diff --git "a/325.jsonl" "b/325.jsonl" new file mode 100644--- /dev/null +++ "b/325.jsonl" @@ -0,0 +1,594 @@ +{"seq_id":"474657549","text":"from flask import Flask,render_template,request,redirect \napp = Flask(__name__)\n@app.route('/') \ndef users():\n return render_template('index.html') \n\n@app.route('/process', methods =['POST']) \ndef processs():\n print(request.form)\n name_from_form = request.form['name']\n city_from_form = request.form['cities']\n selected = request.form.getlist('hobies')\n \n\n \n\n return render_template('info.html', name_on_template =name_from_form, city_on_template =city_from_form, \n language_on_template =request.form['Language'],hobies_on_template = selected, comments = request.form['folks_comments'])\n\nif __name__==\"__main__\": \n app.run(debug=True) ","sub_path":"my_environments/flask/flask_fundamentals/dojo_survey/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"493178499","text":"from os import path\r\nfrom re import findall\r\nfrom tkinter import Tk, Label, Entry, Frame, Button, messagebox\r\n\r\nfrom analysis_tools import analyze, auto\r\nfrom utils import Db\r\n\r\n\r\ndef get_icon(icon):\r\n return path.join('icons', icon)\r\n\r\n\r\n# noinspection PyUnusedLocal\r\nclass Root(Tk):\r\n def __init__(self, *args, **kwargs):\r\n Tk.__init__(self, *args, **kwargs)\r\n\r\n self.attributes(\"-alpha\", 0.0)\r\n self.after(0, self.attributes, \"-alpha\", 1.0)\r\n # Load smoothness upgrade ^\r\n\r\n # Window config\r\n self.iconbitmap(get_icon('hunter.ico'))\r\n self.resizable(False, False)\r\n self.wm_title(\"Clickbait hunter\")\r\n\r\n self.bind('', self.__can_enable)\r\n\r\n # Frames\r\n self.container = Frame(self)\r\n btn_container = Frame(self)\r\n\r\n # Widgets\r\n Label(self.container, text=\"Sentence: \").grid(row=0, column=0,\r\n sticky='e')\r\n self.container.rowconfigure(index=1, weight=1)\r\n self.container.grid_rowconfigure(index=2, weight=1)\r\n Label(self.container, text=\"Error tolerance: \").grid(row=1, column=0,\r\n sticky='e')\r\n Label(self.container, text='Does the sentence sound like clickbait?',\r\n bd=1, relief='sunken').grid(row=2, column=0, pady=3, columnspan=3,\r\n padx=3, sticky='ew')\r\n self.sentence_entry = Entry(self.container, width=50)\r\n self.tolerance_entry = Entry(self.container)\r\n\r\n # Buttons\r\n self.yes_btn = Button(btn_container, text='Yes',\r\n command=self.__choice_yes)\r\n self.no_btn = Button(btn_container, text='No', command=self.__choice_no)\r\n self.you_decide = Button(btn_container, text='You decide',\r\n command=self.__choice_auto)\r\n\r\n # Init\r\n self.sentence_entry.grid(row=0, column=1, sticky='ew')\r\n self.tolerance_entry.grid(row=1, column=1, sticky='ew')\r\n self.container.pack(side='top')\r\n\r\n self.yes_btn.pack(side='left', padx=2, pady=5)\r\n self.no_btn.pack(side='left', padx=2, pady=5)\r\n self.you_decide.pack(side='left', padx=2, pady=5)\r\n\r\n btn_container.pack(side='bottom')\r\n\r\n # state\r\n self.__can_enable()\r\n\r\n def __can_enable(self, event=None):\r\n sentence = self.sentence_entry.get()\r\n tolerance = self.tolerance_entry.get()\r\n\r\n sentence_ok = sentence and not findall(r\"[^a-zA-Z0-9.' ]\", sentence)\r\n tolerance_ok = not findall(r'[^0-9]', tolerance) and tolerance\r\n\r\n if sentence_ok and tolerance_ok:\r\n self.yes_btn.config(state='active')\r\n self.no_btn.config(state='active')\r\n self.you_decide.config(state='active')\r\n else:\r\n self.yes_btn.config(state='disabled')\r\n self.no_btn.config(state='disabled')\r\n self.you_decide.config(state='disabled')\r\n\r\n def __choice_yes(self):\r\n Db.clb_status = True\r\n Db.sentence = self.sentence_entry.get()\r\n Db.error_tolerance = self.tolerance_entry.get()\r\n analysis_sequence()\r\n\r\n def __choice_no(self):\r\n Db.clb_status = False\r\n Db.sentence = self.sentence_entry.get()\r\n Db.error_tolerance = self.tolerance_entry.get()\r\n analysis_sequence()\r\n\r\n def __choice_auto(self):\r\n Db.sentence = self.sentence_entry.get()\r\n Db.error_tolerance = self.tolerance_entry.get()\r\n auto()\r\n messagebox.showinfo('Success', 'Sentence analysis finished!')\r\n\r\n\r\ndef analysis_sequence():\r\n analyze()\r\n messagebox.showinfo('Success', 'Sentence analysis finished!')\r\n","sub_path":"root_gui.py","file_name":"root_gui.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"144898506","text":"import unittest\nimport random\nfrom model.Geometria import Geometria\nfrom view.View import View\n\nclass test(unittest.TestCase):\n\n # 1 \"setUp\" existe en \"unittest\", define los atributos globales de la clase \"TextFixture\"\n def setUp(self):\n\n print(\"test.setUp\")\n\n #crea un set de números\n self.testAttyibutes = []\n\n # parametrización correcta\n self.testAttyibutes.append([1, 2, 3])\n self.testAttyibutes.append([2.00, 3.00, 4.00])\n self.testAttyibutes.append([-2, -3, -4])\n self._selected_figura = random.randint(1, 8)\n\n # parametrización incorrecta, devolverá algún error en algún \"assert()\"\n #self._selected_figura = random.randint(-55, 55)\n #self.testAttyibutes.append([-2, 'a', -4])\n\n # 2.1 test 1\n def test_Geometria__Init__(self):\n\n print(\"test.test_Geometria__Init__\")\n\n for value in enumerate(self.testAttyibutes):\n\n #print(f'index = {value[0]}')\n #print(f'valor = {value[1][0]}, {value[1][1]}, {value[1][2]}')\n #print(f'index + valor = {value}')\n\n geo = Geometria(value[1][0], value[1][1], value[1][2])\n\n # el constructor no puede devolver NONE\n self.assertIsNotNone(geo)\n\n #print(f'next...')\n\n print(\"test.test_Geometria__Init__ > OK\")\n\n # 2.2 test 2\n def test_set_figuraName(self):\n\n print(\"test.test_set_figuraName\")\n\n for value in enumerate(self.testAttyibutes):\n\n #print(f'index = {value[0]}')\n #print(f'valor = {value[1][0]}, {value[1][1]}, {value[1][2]}')\n #print(f'index + valor = {value}')\n\n geo = Geometria(value[1][0], value[1][1], value[1][2])\n\n #el método siempre devuelve NONE\n self.assertIsNone(geo.set_figuraName(self._selected_figura))\n\n #print(f'next...')\n\n print(\"test.test_set_figuraName > OK\")\n\n # 2.3 test 3\n def test_switch(self):\n\n print(\"test.test_switch\")\n\n for value in enumerate(self.testAttyibutes):\n\n #print(f'index = {value[0]}')\n #print(f'valor = {value[1][0]}, {value[1][1]}, {value[1][2]}')\n #print(f'index + valor = {value}')\n\n geo = Geometria(value[1][0], value[1][1], value[1][2])\n\n for i in range(1, 9):\n\n resul = geo.switch(i)\n print(f'_selected_figura={i}, resul={resul}')\n\n #el método siempre debe devolver un valor\n self.assertIsNotNone(resul)\n\n #el método siempre debe devolver valor mayor de 0\n self.assertGreater(resul, 0)\n\n #print(f'next...')\n\n print(\"test.test_switch > OK\")\n\n #\"assertEqual\" compara todos los resultados de \"r\" con los valores esperados en la lista\n #self.assertEqual(r, [-6, -4, -2, 0, 2, 4, 6])\n\n # 2.4 test 4\n def test_view(self):\n\n print(\"test.test_view\")\n\n for value in enumerate(self.testAttyibutes):\n # print(f'index = {value[0]}')\n # print(f'valor = {value[1][0]}, {value[1][1]}, {value[1][2]}')\n # print(f'index + valor = {value}')\n\n geo = Geometria(value[1][0], value[1][1], value[1][2])\n\n view = View.select(self, geo)\n print(view)\n\n #el método siempre devuelve NONE\n self.assertIsNone(view)\n\n # print(f'next...')\n\n print(\"test.test_view > OK\")\n\n # \"assertEqual\" compara todos los resultados de \"r\" con los valores esperados en la lista\n # self.assertEqual(r, [-6, -4, -2, 0, 2, 4, 6])\n\n # 3 \"tearDown\" existe en \"unittest\", se ejecuta al final de la clase \"TextFixture\"\n def tearDown(self):\n\n print(\"test.tearDown\")\n\n del self._selected_figura\n del self.testAttyibutes\n","sub_path":"unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88271779","text":"import os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = ')w^wz5g2c$&v(#3%b$96m$)l=x^+=%qstnxxhcvw8_s5fv_7x)'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nTEMPLATE_DEBUG = False\nTEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__),\n 'templates'),)\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PHOTO', 'https')\nALLOWED_HOSTS = ['*']\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'trial_task',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'trial_task.urls'\n\nWSGI_APPLICATION = 'trial_task.wsgi.application'\n\n# Database\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'static')\n\nSTATICFILES_DIRS = (os.path.join(os.path.dirname(__file__), 'static'), )\n\n# Own variables\n\n# path to YAML file with text descriptions of models\nMODEL_DESC_PATH = os.path.join(os.path.dirname(__file__), 'yaml.conf')\n","sub_path":"trial_task/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"507502984","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\ndetector = hub.load(\"https://tfhub.dev/tensorflow/efficientdet/lite2/detection/1\")\n\ncustomDetector = tf.saved_model.load('customDetector/saved_model')\n\nambulance_rectCol = (255, 51, 000)\ncane_rectCol = (204, 51, 000)\nwheelchair_rectCol = (204, 51, 000)\nbaby_carriage_rectCol = (204, 51, 000)\n\n\ndef Detector(image):\n # Convert img to RGB\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Is optional but i recommend (float conversion and convert img to tensor image)\n rgb_tensor = tf.convert_to_tensor(rgb, dtype=tf.uint8)\n\n # Add dims to rgb_tensor\n rgb_tensor = tf.expand_dims(rgb_tensor, 0)\n\n boxes, scores, classes, num_detections = detector(rgb_tensor)\n\n pred_labels = classes.numpy().astype('int')[0]\n pred_boxes = boxes.numpy()[0].astype('int')\n pred_scores = scores.numpy()[0]\n\n img_result = image\n pos_result = []\n\n # loop throughout the detections and place a box around it\n for score, (ymin, xmin, ymax, xmax), label in zip(pred_scores, pred_boxes, pred_labels):\n if score < 0.5 or label != 1:\n continue\n\n img_result = cv2.rectangle(img_result, (xmin, ymax), (xmax, ymin), (0, 255, 0), 1)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img_result, 'person', (xmin, ymax - 10), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)\n\n pos_result.append([xmin, ymax, xmax, ymin])\n\n return img_result, pos_result\n\n\n# image = Pure Image that not drew anything\n# drawOnImg = image that you gonna draw something on\ndef CustomDetector(image, drawOnImg=None):\n # Convert img to RGB\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Is optional but i recommend (float conversion and convert img to tensor image)\n rgb_tensor = tf.convert_to_tensor(rgb, dtype=tf.uint8)\n\n # Add dims to rgb_tensor\n rgb_tensor = tf.expand_dims(rgb_tensor, 0)\n output_dict = customDetector(rgb_tensor)\n\n num_detections = int(output_dict.pop('num_detections'))\n output_dict = {key: value[0, :num_detections].numpy()\n for key, value in output_dict.items()}\n output_dict['num_detections'] = num_detections\n\n # detection_classes should be ints.\n output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n\n pred_labels = output_dict['detection_classes']\n pred_boxes = output_dict['detection_boxes']\n pred_scores = output_dict['detection_scores']\n\n h, w, c = image.shape\n img_result = image\n\n ambulance_pos = []\n cane_pos = []\n wheelchair_pos = []\n baby_carriage_pos = []\n\n if drawOnImg is not None:\n img_result = drawOnImg\n\n # loop throughout the detections and place a box around it\n for score, (ymin, xmin, ymax, xmax), label in zip(pred_scores, pred_boxes, pred_labels):\n if score < 0.3:\n continue\n\n y_min = int(ymin * h)\n x_min = int(xmin * w)\n y_max = int(ymax * h)\n x_max = int(xmax * w)\n\n try:\n label_txt = ''\n\n if label == 1:\n label_txt = 'wheelchair'\n img_result = cv2.rectangle(img_result, (x_min, y_max), (x_max, y_min), wheelchair_rectCol, 3)\n wheelchair_pos.append([x_min, y_max, x_max, y_min])\n\n elif label == 2:\n label_txt = 'baby_carriage'\n img_result = cv2.rectangle(img_result, (x_min, y_max), (x_max, y_min), baby_carriage_rectCol, 3)\n baby_carriage_pos.append([x_min, y_max, x_max, y_min])\n\n elif label == 3:\n label_txt = 'cane'\n img_result = cv2.rectangle(img_result, (x_min, y_max), (x_max, y_min), cane_rectCol, 3)\n cane_pos.append([x_min, y_max, x_max, y_min])\n\n elif label == 4:\n label_txt = 'ambulance'\n img_result = cv2.rectangle(img_result, (x_min, y_max), (x_max, y_min), ambulance_rectCol, 3)\n ambulance_pos.append([x_min, y_max, x_max, y_min])\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img_result, label_txt, (x_min, y_max - 10), font, 0.5, (255, 0, 0), 3, cv2.LINE_AA)\n\n except Exception as e:\n print(e)\n\n return img_result, ambulance_pos, cane_pos, wheelchair_pos, baby_carriage_pos\n","sub_path":"ImageDetector.py","file_name":"ImageDetector.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"332861341","text":"from flask import Flask\nfrom flask import request\nfrom flask import render_template\nimport pickle as pkl\nfrom working_code.html_writers import write_layer_selector\n\napp = Flask(__name__)\n\nlatent_qtiles = pkl.load(open('data/latent_qtiles.pkl'))\nlatent_tuples = pkl.load(open('data/latent_tuples.pkl'))\ntop_features = pkl.load(open('data/top_feat_per_latent.pkl'))\nchart_data = pkl.load(open('data/chart_data.pkl'))\n\nlayer_selector_code = write_layer_selector(latent_tuples)\n\n## unpickle a saved dictionary object {latent_0:sorted([q1, q2, q3])}\n@app.route('/')\ndef index():\n\t### Create a dictionary of -- {latent_0:sorted([q1, q2, q3])}\n\n return render_template(\"base_template.html\" , latent_qtiles = latent_qtiles\n\t\t\t\t\t\t\t\t\t\t \t, layer_selector_code = layer_selector_code\n\t\t\t\t\t\t\t\t\t\t \t, top_features = top_features\n\t\t\t\t\t\t\t\t\t\t \t, chart_data = chart_data)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n\n","sub_path":"run_app.py","file_name":"run_app.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230543505","text":"import csv\nimport pickle\n\nproj_dir = '/home/nick/projects/scraps/'\n\n\n###############################################################################\n# General Utility Functions #\n###############################################################################\n\n\ndef alnumify(string):\n \"\"\" Remove special characters from string.\"\"\"\n return ''.join(c for c in string if c.isalnum())\n\n\ndef read_csv(path, newline_char='', delim_char=',', quote_char='\"'):\n \"\"\" Returns list, data from csv or similarly formatted table.\"\"\"\n data = []\n with open(path, newline=newline_char) as csvfile:\n reader = csv.reader(csvfile, delimiter=delim_char,\n quotechar=quote_char)\n for row in reader:\n for index, element in enumerate(row):\n row[index] = element.lower()\n data.append(row)\n return data\n\n\ndef serialize(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)\n\n\ndef load_serial(path):\n with open(path, 'rb') as f:\n obj = pickle.load(f)\n return obj\n\n\n###############################################################################\n# Address Data Normalization #\n###############################################################################\n\n# Directory containing address database files\naddress_db = proj_dir + 'normalizer/database/'\n\n# Directory to store serialize data structures used in address normalization\nserial_path = address_db + 'serialized/'\n\n# Master file containing zip code database\nzip_code_master = 'zip_code_database.csv'\n\n# Master file containing state names and abbreviations\nus_states_master = 'state_table.csv'\n\n# Master file containing street suffixes and their abbreviations\nsuffix_master = 'usps_street_suffixes.csv'\n\n# Filenames for serilized data structures\ns_zip_map = 'zip_map.p'\ns_county_map = 'county_map.p'\ns_state_map = 'states_map.p'\ns_suffix_map = 'suffix_map.p'\ns_zip_code_finder = 'zip_code_finder.p'\ns_ed_heritage_map = 'ed_heritage_map.p'\n\n\n###################################\n# Builders #\n###################################\ndef zip_code_data():\n \"\"\"Load zip code database into memory.\"\"\"\n path = address_db + zip_code_master\n comma = ','\n data = read_csv(path)\n # Build sublists with in data\n for index, element in enumerate(data):\n if comma in element:\n data[index] = element.split(comma)\n return data\n\n\ndef build_zip_code_map():\n \"\"\" Returns zip_code_map a dictionary mapping zip code to\n tuple (city, state, county).\"\"\"\n zip_index = 0\n city_index = 2\n state_index = 5\n county_index = 6\n out_of_state_code = 'ot'\n zip_code_map = {}\n data = zip_code_data()\n for row in data:\n zip_code = row[zip_index]\n city = row[city_index]\n state = row[state_index]\n if state == 'wy':\n county = row[county_index]\n else:\n county = out_of_state_code\n zip_code_map[zip_code] = (city, state, county)\n return zip_code_map\n\n\ndef build_wy_county_map():\n \"\"\" Returns a dictionary mapping Wyoming zip codes to Wyoming counties.\"\"\"\n zip_code_index = 0\n state_index = 5\n county_index = 6\n data = zip_code_data()\n wy_county_map = {}\n for row in data:\n state = row[state_index]\n if state == 'wy':\n zip_code = row[zip_code_index]\n county = row[county_index]\n wy_county_map[zip_code] = county\n return wy_county_map\n\n\ndef build_state_map():\n \"\"\" Returns a dictionary mapping state name to\n state postal abbreviation.\"\"\"\n path = address_db + us_states_master\n data = read_csv(path, quote_char=\"'\")\n state_name_index = 1\n state_abbrev_index = 2\n\n state_map = {}\n for row in data:\n try:\n state_name = row[state_name_index][2:-2]\n state_abbrev = alnumify(row[state_abbrev_index])\n state_map[state_name] = state_abbrev\n except IndexError:\n continue\n return state_map\n\n\ndef build_zip_code_finder():\n \"\"\" Returns dictionary of dictionaries.\n Primary dictionary key is state abbreviation.\n Subdictionary key is city which maps to zip code.\"\"\"\n city_index = 2\n alt_cities_index = 3\n state_index = 5\n zip_code_index = 0\n\n zip_code_finder = {}\n data = zip_code_data()\n\n # Build set of states\n states = set()\n for row in data:\n states.add(row[state_index])\n # Use set of states to build primary dictionary\n for state in states:\n zip_code_finder[state] = {}\n\n # Build sub-dictionaries\n for row in data:\n city = row[city_index]\n alt_cities = row[alt_cities_index]\n state = row[state_index]\n zip_code = row[zip_code_index]\n\n zip_code_finder[state][city] = zip_code\n for alt_city in alt_cities:\n zip_code_finder[state][alt_city] = zip_code\n return zip_code_finder\n\n\ndef build_suffix_map():\n \"\"\" Returns dictionary mapping common street\n suffix to standardized postal abbreviation.\"\"\"\n path = address_db + suffix_master\n # primary_suffix_index = 0\n common_suffix_index = 1\n standard_abbrev_index = 2\n data = read_csv(path)\n suffix_map = {}\n for row in data:\n common_suffix = row[common_suffix_index]\n standard_abbrev = row[standard_abbrev_index]\n suffix_map[common_suffix] = standard_abbrev\n return suffix_map\n\n\n###############################################################################\n# Ed Heritage Data Normalization #\n###############################################################################\n\ndef build_ed_heritage_map():\n heritage_map = {'les': 'less',\n 'ged': 'ged',\n 'hig': 'high',\n 'voc': 'voc',\n 'som': 'some',\n 'ass': 'assoc',\n 'bac': 'bach',\n 'mas': 'mast',\n 'doc': 'doc',\n '': 'unk'}\n return heritage_map\n\n\n###################################\n# Serializer #\n###################################\n\ndef serialize_all():\n maps = [build_zip_code_map(),\n build_wy_county_map(),\n build_state_map(),\n build_suffix_map(),\n build_ed_heritage_map()]\n out_files = [s_zip_map,\n s_county_map,\n s_state_map,\n s_suffix_map,\n s_ed_heritage_map]\n for index, out_file in enumerate(out_files):\n path = serial_path + out_file\n obj = maps[index]\n serialize(obj, path)\n\n\n###################################\n# Loaders #\n###################################\n\ndef load_zip_map():\n path = serial_path + s_zip_map\n return load_serial(path)\n\n\ndef load_county_map():\n path = serial_path + s_county_map\n return load_serial(path)\n\n\ndef load_state_map():\n path = serial_path + s_state_map\n return load_serial(path)\n\n\ndef load_suffix_map():\n path = serial_path + s_suffix_map\n return load_serial(path)\n\n\ndef load_ed_heritage_map():\n path = serial_path + s_ed_heritage_map\n return load_serial(path)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"238941161","text":"#!/usr/bin/env python\nimport sys\nimport rospy\nimport moveit_commander\nfrom moveit_msgs.msg import DisplayTrajectory\nfrom xr_tank.msg import ArmJoyCmd\nfrom control_msgs.msg import FollowJointTrajectoryAction,FollowJointTrajectoryGoal\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nimport geometry_msgs\nimport actionlib\nfrom copy import copy\nfrom std_msgs.msg import Header\nimport math\n\nclass ArmControl():\n def __init__(self):\n moveit_commander.roscpp_initialize(sys.argv)\n\n self.robot = moveit_commander.RobotCommander()\n self.scene = moveit_commander.PlanningSceneInterface()\n self.armGroup = moveit_commander.MoveGroupCommander(\"arm\")\n self.planningFrame = self.armGroup.get_planning_frame()\n self.endEffector = self.armGroup.get_end_effector_link()\n\n self.armJoyCmdSub = rospy.Subscriber(\"/arm_joy_cmd\", ArmJoyCmd, self.ArmJoyCmdCallback)\n self.armAC = actionlib.SimpleActionClient(\"/xr_tank/arm_controller/follow_joint_trajectory\", FollowJointTrajectoryAction)\n self.armAC.wait_for_server(timeout=rospy.Duration(10.0))\n self.jointState = self.armGroup.get_current_joint_values()\n\n #add ground plane\n ground_pose = geometry_msgs.msg.PoseStamped()\n ground_pose.header.frame_id = \"vehicle_base\"\n ground_pose.pose.orientation.w = 1.0\n ground_pose.pose.position.z = -0.05\n ground_name = \"ground\"\n self.scene.add_box(ground_name, ground_pose, size=(1, 1, 0.1))\n\n #self.trajectoryPublisher = rospy.Publisher('/arm/planned_path',DisplayTrajectory, queue_size=20)\n #rospy.loginfo(self.planningFrame)\n #rospy.loginfo(self.endEffector)\n #rospy.loginfo(self.robot.get_group_names())\n #rospy.loginfo(self.armGroup.get_current_joint_values())\n\n def ArmJoyCmdCallback(self,msg):\n #rospy.loginfo(msg)\n if msg.resetPose:\n self.GoToStandbyPose()\n elif msg.randomPose:\n self.GoToRandomPose()\n elif msg.armAOffset != 0 or msg.armBOffset != 0 or msg.gripperBaseOffset != 0:\n #target = self.armGroup.get_current_joint_values()\n #rospy.loginfo(\"%f %f %f %f\" % (target[0],target[1],target[2],target[3]))\n \n\n #send action goal directly from action client to skip motion planning\n goal = FollowJointTrajectoryGoal()\n goal.trajectory.joint_names = [\"joint_arm_a\",\"joint_arm_b\",\"joint_gripper_base\"]\n #first point (original pose)\n point = JointTrajectoryPoint()\n point.positions = copy(self.jointState)\n point.time_from_start = rospy.Duration(0)\n goal.trajectory.points.append(point)\n \n #second point(pose after moving by joystick)\n scale = 0.05\n self.jointState[0] += msg.armAOffset*scale\n self.jointState[1] += msg.armBOffset*scale\n self.jointState[2] += msg.gripperBaseOffset*scale\n point = JointTrajectoryPoint()\n point.positions = copy(self.jointState)\n point.time_from_start = rospy.Duration(0.03)\n goal.trajectory.points.append(point)\n \n #rospy.loginfo(goal)\n self.armAC.send_goal(goal)\n #self.armAC.wait_for_result(timeout=rospy.Duration(1))\n \n\n def GoToRandomPose(self):\n rospy.loginfo(\"======go to random pose\")\n #pose_goal = self.armGroup.get_random_pose()\n #self.armGroup.set_pose_target(pose_goal)\n #self.armGroup.go(wait=True)\n\n joint_goal = self.armGroup.get_random_joint_values()\n self.jointState = copy(joint_goal)\n self.armGroup.go(joint_goal, wait=True)\n self.armGroup.stop()\n\n def GoToStandbyPose(self):\n rospy.loginfo(\"======go to standby pose\")\n joint_goal = self.armGroup.get_named_target_values(\"standby\")\n #rospy.loginfo(joint_goal)\n self.jointState = [joint_goal[\"joint_arm_a\"],joint_goal[\"joint_arm_b\"],joint_goal[\"joint_gripper_base\"]]\n self.armGroup.go(joint_goal, wait=True)\n self.armGroup.stop()\n\n def Run(self):\n rospy.spin()\n \"\"\"rate = rospy.Rate(0.2)\n mode = 0\n while not rospy.is_shutdown():\n rate.sleep()\n if mode % 2 == 0:\n self.GoToRandomPose()\n if mode % 2 == 1:\n self.GoToStandbyPose()\n \"\"\"\n\n \n\nif __name__ == '__main__':\n rospy.init_node('arm_control_node')\n rospy.loginfo(\"arm_control_node started\")\n armControl = ArmControl()\n armControl.Run()\n","sub_path":"xr_tank/src/xr_tank/arm_control_node.py","file_name":"arm_control_node.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164301648","text":"#!/usr/bin/env python\nimport random\n\n\"\"\"\n30/11/2015\nTP1 Cryptographique pour l'embarqué\nGuillaume VINCENT - Paul KERGUILLEC\n2A INFORMATIQUE APPRENTISSAGE - ENSICAEN\n\"\"\"\n\n\"\"\"\nPermet de retourner la fonction booleene des polynomes x1,x2,x3\n\"\"\"\ndef Boolean_function(x1,x2,x3):\n x = x3 ^ (x2 << 1) ^ (x1 << 2)\n return ((226 >> x) & 1)\n\n\"\"\"\nPermet d'afficher la valeur binaire d'un entier sur une taille n définie\nCette fonction nous apporte de l'aide lors de l'implémentation des fonctions\npuisque que nous travaillons sur des bits.\n\"\"\"\ndef binary_string(x,n):\n s = ''\n for i in range(n):\n if ((x >> i) & 1) == 1 : s = \"1\" + s\n else : s = \"0\" + s\n return s\n\n\"\"\"\nAffiche la table de vérité de la fonction f\n\"\"\"\ndef display_truth_table(f,n):\n for i in range(1 << n):\n print (binary_string(i,n), (f >> i) & 1)\n\n\"\"\"\nPermet de vérifier que la fonction f est équilibrée.\nSi la fonction est équilibrée, elle possède le même\nnombre de 0 et 1. Si cela est vrai, la fonction retourne\ntrue, sinon false\n\"\"\"\ndef is_balanced(f,n):\n nZero=0\n nOne=0\n for i in range(1 << n):\n m=(f >> i) & 1\n if m == 1:\n nOne =+ 1\n else:\n nZero =+ 1\n\n if (nOne - nZero) == 0:\n return True\n else:\n return False\n\nmasque= lambda x,n: (x&(1<>n\n\n\"\"\"\nPermet de mettre à jour un LFSR à partir de son état, du polynome de retroaction et de la taille max 2^n\nLa fonction garde le LSB (output), effectue les opérations de OU EXCLUSIF. Le registre est ensuite décalé\net le résultat des xor est ajouté en MSB. On obtient alors le nouvel état du lfsr\nLa fonction retourne la sortie et le nouvel état du lfsr.\n\"\"\"\ndef update_LFSR(state, poly, n):\n bit=0\n output=state & 1\n for i in range(0,n):\n bit^=masque(poly,i+1)&masque(state,n-i-1)\n state=(state>>1)\n state=state ^ (bit<<(n-1))\n return output, state\n\n\"\"\"\nPermet de générer un flot de bit à partir d'un lfsr.\nLa fonction prend en paramètre l'état de départ du lfsr(un entier), le polynome de retroaction (un entier),\nn la taille du registre du lfsr, l le nombre de cycle (mis à jour) du lfsr. Chaque cycle de lfsr genere un\nbit de sortie.\nLa fonction renvoie une concatenation de ls sortie du lfsr (flot de bit)\n\"\"\"\ndef generate_keystream_with(state, poly, n, l):\n res = 0\n for i in range(0,l):\n output,state = update_LFSR(state,poly,n)\n #print(\"o:\",output,\"i:\",i)\n\n res = res + (output << i)\n\n return res\n\n\"\"\"\nFonction de generateur pseudo-aleatoire.\nLa fonction prend en paramètre une cle sous la forme d'un liste de trois états différents pour 3 lfsr.\nPar conséquent, on a cle(state1,state2,state3). Pour chaque lsfr, on genere le flot de bit lfsr1,lfsr2,lfsr2\nOn passe en parametre à la fonction booleene chaque bit present à la position n dans lfsr1,lfsr2,lfsr3 et on\nconcatene le résultat retourné par la fonction booleene n fois dans une variable que l'on retourne.\n\"\"\"\ndef Generator(cle,n):\n state1,state2,state3 = cle\n p1 = 1 ^ (1<<3) ^ (1<<10)\n p2 = 1 ^ (1<<2) ^ (1<<11)\n p3 = 1 ^ (1<<1) ^ (1<<4) ^ (1<<6) ^ (1<<12)\n lfsr1=generate_keystream_with(state1,p1,10,n)\n lfsr2=generate_keystream_with(state2,p2,11,n)\n lfsr3=generate_keystream_with(state3,p3,12,n)\n #print(\"lfsr1:\",binary_string(lfsr1,n))\n #print(\"lfsr2:\",binary_string(lfsr2,n))\n #print(\"lfsr3:\",binary_string(lfsr3,n))\n r=0\n for i in range(0,n):\n b=Boolean_function(((lfsr1>>i)&1),((lfsr2>>i)&1),((lfsr3>>i)&1))\n r=r+(b<> i) & 1 != (s2 >> i) & 1:\n res = res + 1\n if res < (l/3):\n return True\n if res > (l/3):\n return False\n\ndef correlation_attack():\n a=random.randint(1,1<<33)\n s1 = a & ((2**10)-1)\n s2 = (a >> 10) & ((2**11)-1)\n s3 = (a >> 21) & ((2**12)-1)\n # la cle est de 33 bits et est une liste composée des trois états\n cle = s1,s2,s3\n print(\"Clé aléatoire (non-connue) :\",s1,s2,s3)\n g=Generator(cle,300)\n #for t_s2 in range(0,2**11-1):\n for t_s1 in range(0,2**10-1):\n lfsr1 = generate_keystream_with(t_s1,1 ^ (1 << 3) ^ (1 << 10),10,300)\n if statistical_test(lfsr1,g,300) == True:\n test_s1 = t_s1\n\n for t_s3 in range(0,2**12-1):\n lfsr3 = generate_keystream_with(t_s3,1 ^ (1<<1) ^ (1<<4) ^ (1<<6) ^ (1<<12),12,300)\n if statistical_test(lfsr3,g,300) == True:\n test_s3 = t_s3\n\n for t_s2 in range(0,2**11-1):\n #print(test_s1,t_s2,test_s3)\n if g == Generator((test_s1,t_s2,test_s3),300):\n test_s2 = t_s2\n return(test_s1,test_s2,test_s3)\n\nf=226\nn=3\nprint(\"---------- Affichage de la table de vérité f=226 et n=3 -------\")\nprint(\"Affichage de la table de vérité f=226 et n=2\")\ndisplay_truth_table(226,3)\nprint(\"---------- Fonction is_balanced sur f=226 et n⁼3 -----------\")\nprint(\"is_balanced(226,3): \",is_balanced(f,n))\nprint(\"------------- Fonction update_lfsr -------------------\")\noutput,state = update_LFSR(53,105,7)\nprint(\"update_LFSR(53,105,7), state=53, poly=105, n=7\")\nprint(\"sortie :\",output,\"state :\",state)\nprint(\"------------- Fonction generate_keystream_with -------\")\nprint(\"state:53,poly=105,n=7,l=20\")\nprint(\"generate_keystream_with(53,105,7,20)\")\nprint(\"Résultat en base 10 :\",generate_keystream_with(53,105,7,20))\nprint(\"Résultat en binaire :\",bin(generate_keystream_with(53,105,7,20)))\nprint(\"------------- Generateur pseudo-aleatoire -----------\")\nprint(\"cle=(410,315,961),n=33\")\nprint(\"Generator((410,315,961),40): \", binary_string(Generator((410,315,961),40),40))\nprint(\"------------- Test statistique --------------------\")\nprint(\"statistical_test(143,187,7)=\",statistical_test(143,187,7))\nprint(\"------------- Attaque par correlation -------------\")\nprint(correlation_attack())\n","sub_path":"tp1.py","file_name":"tp1.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"361209734","text":"import os\nfrom flask import Flask, request\nfrom PIL import Image\nfrom fastai.vision.all import *\nimport json\n\nUPLOAD_FOLDER = './upload'\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nmodel = load_learner(\"export.pkl\")\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if(request.method == 'POST'):\n if 'image' not in request.files:\n return 'there is no image in the form!'\n image = Image.open(request.files['image'].stream)\n out = model.predict(tensor(image))\n dictionary = dict(zip(['calling', 'normal', 'reaching', 'texting'], out[2].tolist()))\n return json.dumps(dictionary, indent=4)\n else:\n return \"API operational!\"\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"644617715","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\n\r\n#data_name = r'C:\\Users\\YuL_e\\Desktop\\Eidition2.0 - Final_Term\\data_withoutDIR.csv'\r\ndata_name = r'C:\\Users\\YuL_e\\Desktop\\Eidition2.0 - Final_Term\\data_withoutNaN - for correlation_analysis.csv'\r\n\r\ndf = pd.read_csv(data_name,header=None)\r\n\r\n#print(np.isnan(df).any())\r\n#df = df.dropna()\r\n#print(np.isnan(df).any())\r\nprint('df.shape:\\n',df.shape)\r\n# (61223, 19)\r\n#print(df.isnull().sum())\r\n\r\n\r\n\r\nx_dim = 18 #删去了3个方向、湿度等变量 18-6=12\r\n# 划分出测试集和训练集\r\nfrom sklearn.model_selection import train_test_split\r\nX,y = df.iloc[1:101,1:x_dim+1].values, df.iloc[1:101,12].values\r\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)\r\n#print(X.shape)\r\n#print(y[0])\r\n\r\n#标准化:均值为0,方差为1\r\nfrom sklearn.preprocessing import StandardScaler\r\nss_x = StandardScaler() \r\nss_y = StandardScaler()\r\nX_train_std = ss_x.fit_transform(X_train) \r\nX_test_std = ss_x.transform(X_test) \r\n#y_train = ss_y.fit_transform(y_train.reshape(-1, 1)) \r\n#y_test = ss_y.transform(y_test.reshape(-1, 1)) \r\n\r\n\r\n#print(X)\r\n#计算协方差矩阵的特征对\r\ncov_mat = np.cov(X_train_std.T) #得到协方差矩阵\r\nprint('cov_mat:\\n',cov_mat)\r\neigen_vals,eigen_vecs = np.linalg.eig(cov_mat) #得到特征向量,存入eigen_vecs中\r\nprint('\\nEigenvalues \\n%s' % eigen_vals)\r\n\r\n\r\n\r\n#绘制特征值的方差贡献率图像\r\ntot = sum(eigen_vals) #求和\r\nvar_exp = [(i / tot) for i in sorted(eigen_vals,reverse=True)] #计算每个的方差贡献率\r\n#var_exp = [(i / tot) for i in eigen_vals] #计算每个的方差贡献率\r\ncum_var_exp = np.cumsum(var_exp) #计算出累计方差\r\nprint('var_exp:\\n',var_exp)\r\nprint('cum_var_exp:\\n',cum_var_exp)\r\n\r\n\r\n\r\n\r\n#排列特征对-降序\r\neigen_pairs = [(np.abs(eigen_vals[i]),eigen_vecs[:,i]) for i in range(len(eigen_vals))]\r\neigen_pairs.sort(reverse=True)\r\nprint('eigen_pairs:\\n',eigen_pairs)\r\n\r\n#选取三个个对应的特征值最大的特征向量:得到13*2的映射矩阵\r\nw = np.hstack((eigen_pairs[0][1][:,np.newaxis],eigen_pairs[1][1][:,np.newaxis]))\r\nw = np.hstack((w,eigen_pairs[2][1][:,np.newaxis]))\r\nprint('Matrix W:\\n',w) \r\n\r\n#将124*13的数据集转换到包含三个主成分的子空间上\r\nX_train_pca = X_train_std.dot(w)\r\n\r\n\r\nprint('X_train_pca:\\n',X_train_pca)\r\n\r\nprint(X_train_pca.shape)\r\n","sub_path":"1.5.[preprocessing]PCA & transform_to_new_space.py","file_name":"1.5.[preprocessing]PCA & transform_to_new_space.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"520971937","text":"from copy import deepcopy\nimport pygame\n\nfrom utilities import createPolygonMask\n\n\nclass Obstacle():\n def __init__(self, x, y, radius, room, color = (100,100,100), movable = False, vel = 2, margin = 2, isWall = False, spacing = 15, positionInWall = None):\n self.isWall = isWall\n self.spacing = spacing\n self.positionInWall = positionInWall\n self.room = room\n self.color = color\n self.x = x\n self.y = y\n self.radius = radius\n self.vel = vel\n self.movable = movable\n self.margin = margin\n self.polygonPointsAbsolute = createPolygonMask([0, 0], 10, self.radius + self.margin)\n self.polygonPoints = deepcopy(self.polygonPointsAbsolute)\n\n for i in range(len(self.polygonPoints)):\n self.polygonPoints[i][0] = self.polygonPointsAbsolute[i][0] + self.x\n self.polygonPoints[i][1] = self.polygonPointsAbsolute[i][1] + self.y\n\n self.seen = False\n\n\n def draw(self):\n surface = self.room.surface1\n pygame.draw.circle(surface, self.color, (self.x, self.y), self.radius)","sub_path":"code_2A/pygame/obstacle.py","file_name":"obstacle.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"175120668","text":"import re\n\n# Regex for 12 or 13 digits\n_valid_bc = re.compile(\"^[0-9]{12,13}$\")\n\ndef validateBarcode(barcode):\n \n bc = str(barcode)\n if not _valid_bc.match(bc):\n return False\n \n (a,b) = (0,0)\n \n bclen = len(bc)-1\n checkdigit = bc[bclen]\n \n for i in range(0,bclen):\n d = int(bc[i])\n if i%2:\n a += d\n else:\n b += d\n# print (a,b,i,d)\n \n if bclen % 2:\n b = b * 3\n else:\n a = a * 3\n \n checksum = a + b\n checksum = checksum % 10\n checksum = 10 - checksum\n if checksum == 10:\n checksum = 0\n# print checksum\n \n return str(checksum) == checkdigit\n","sub_path":"Util/Barcode.py","file_name":"Barcode.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"321351706","text":"#\n# @lc app=leetcode.cn id=100 lang=python\n#\n# [100] 相同的树\n#\n# https://leetcode-cn.com/problems/same-tree/description/\n#\n# algorithms\n# Easy (54.98%)\n# Likes: 310\n# Dislikes: 0\n# Total Accepted: 61.9K\n# Total Submissions: 109.5K\n# Testcase Example: '[1,2,3]\\n[1,2,3]'\n#\n# 给定两个二叉树,编写一个函数来检验它们是否相同。\n# \n# 如果两个树在结构上相同,并且节点具有相同的值,则认为它们是相同的。\n# \n# 示例 1:\n# \n# 输入: 1 1\n# ⁠ / \\ / \\\n# ⁠ 2 3 2 3\n# \n# ⁠ [1,2,3], [1,2,3]\n# \n# 输出: true\n# \n# 示例 2:\n# \n# 输入: 1 1\n# ⁠ / \\\n# ⁠ 2 2\n# \n# ⁠ [1,2], [1,null,2]\n# \n# 输出: false\n# \n# \n# 示例 3:\n# \n# 输入: 1 1\n# ⁠ / \\ / \\\n# ⁠ 2 1 1 2\n# \n# ⁠ [1,2,1], [1,1,2]\n# \n# 输出: false\n# \n# \n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isSameTree(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n # 完全相同,则任意方法遍历结果相同\n return self.scan(p,q)\n\n def scan(self,root1,root2):\n if root1 == None or root2 == None:\n if not root1 == root2:\n return False\n else:\n return True\n elif not root1.val == root2.val:\n return False\n if not self.scan(root1.left,root2.left):\n return False\n if not self.scan(root1.right,root2.right):\n return False\n return True\n \n# @lc code=end\n\n","sub_path":"100.相同的树.py","file_name":"100.相同的树.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457084966","text":"from django.conf.urls import url\nfrom apps.core import views\nfrom apps.core.utils import actions\n\nurlpatterns = [\n url(r'^$', views.HomeView.as_view(), name='home'),\n url(r'^terms_and_billing/$', views.TermsAndBilling.as_view(), name='terms_and_billing'),\n url(r'^employer_info/$', views.EmployerInfo.as_view(), name='employer_info'),\n url(r'^seeker_info/$', views.SeekerInfo.as_view(), name='seeker_info'),\n\n url(r'^offer_categories/$', views.OfferCategoryList.as_view(), name='offer_categories'),\n url(r'^offer_list/$', views.OfferList.as_view(), name='offer_list'),\n url(r'^resume_categories/$', views.ResumeCategoryList.as_view(), name='resume_categories'),\n url(r'^resume_list/$', views.ResumeList.as_view(), name='resume_list'),\n url(r'^offer_details/(?P[-\\w]+)/$', views.OfferDetails.as_view(), name='offer_details'),\n url(r'^resume_details/(?P[-\\w]+)/$', views.ResumeDetails.as_view(), name='resume_details'),\n\n url(r'^clean_selects/$', actions.clean_selects, name='clean_selects'),\n url(r'^get_skills/$', actions.get_skills, name='get_skills'),\n]\n","sub_path":"apps/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458037422","text":"# This file is part of the lib3to6 project\n# https://gitlab.com/mbarkhau/lib3to6\n#\n# Copyright (c) 2019 Manuel Barkhau (mbarkhau@gmail.com) - MIT License\n# SPDX-License-Identifier: MIT\n\nimport ast\nimport typing as typ\n\nfrom . import common\nfrom . import utils\n\n\nclass VersionInfo:\n\n prohibited_until: typ.Optional[str]\n\n def __init__(self, prohibited_until: str = None) -> None:\n self.prohibited_until = prohibited_until\n\n\nclass CheckerBase:\n\n version_info: VersionInfo\n\n def is_prohibited_for(self, version: str) -> bool:\n return (\n self.version_info.prohibited_until is None\n or self.version_info.prohibited_until >= version\n )\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n raise NotImplementedError()\n\n\nclass NoStarImports(CheckerBase):\n\n version_info = VersionInfo()\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n for node in ast.walk(tree):\n if not isinstance(node, ast.ImportFrom):\n continue\n\n for alias in node.names:\n if alias.name == \"*\":\n raise common.CheckError(f\"Prohibited from {node.module} import *.\", node)\n\n\ndef _iter_scope_names(tree: ast.Module) -> typ.Iterable[typ.Tuple[str, ast.AST]]:\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.ClassDef)):\n yield node.name, node\n elif isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store):\n yield node.id, node\n elif isinstance(node, (ast.ImportFrom, ast.Import)):\n for alias in node.names:\n name = alias.name if alias.asname is None else alias.asname\n yield name, node\n elif isinstance(node, ast.arg):\n yield node.arg, node\n\n\nclass NoOverriddenFixerImportsChecker(CheckerBase):\n \"\"\"Don't override names that fixers may reference.\"\"\"\n\n version_info = VersionInfo()\n prohibited_import_overrides = {\"itertools\", \"six\", \"builtins\"}\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n for name_in_scope, node in _iter_scope_names(tree):\n is_fixer_import = (\n isinstance(node, ast.Import)\n and len(node.names) == 1\n and node.names[0].asname is None\n and node.names[0].name == name_in_scope\n )\n if is_fixer_import:\n continue\n\n if name_in_scope in self.prohibited_import_overrides:\n msg = f\"Prohibited override of import '{name_in_scope}'\"\n raise common.CheckError(msg, node)\n\n\nclass NoOverriddenBuiltinsChecker(CheckerBase):\n \"\"\"Don't override names that fixers may reference.\"\"\"\n\n version_info = VersionInfo()\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n for name_in_scope, node in _iter_scope_names(tree):\n if name_in_scope in common.BUILTIN_NAMES:\n msg = f\"Prohibited override of builtin '{name_in_scope}'\"\n raise common.CheckError(msg, node)\n\n\nMODULE_BACKPORTS = {\n 'lzma' : ((3, 3), \"backports.lzma\"),\n 'pathlib' : ((3, 4), \"pathlib2\"),\n 'statistics' : ((3, 4), \"statistics\"),\n 'ipaddress' : ((3, 4), \"py2-ipaddress\"),\n 'asyncio' : ((3, 4), None),\n 'selectors' : ((3, 4), None),\n 'enum' : ((3, 4), \"enum34\"),\n 'zipapp' : ((3, 5), None),\n 'typing' : ((3, 5), \"typing\"),\n 'contextvars' : ((3, 7), \"contextvars\"),\n 'dataclasses' : ((3, 7), \"dataclasses\"),\n \"importlib.resources\": ((3, 7), \"importlib_resources\"),\n}\n\n\nclass NoThreeOnlyImports(CheckerBase):\n\n version_info = VersionInfo(prohibited_until=\"2.7\")\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n pass\n\n\nPROHIBITED_OPEN_ARGUMENTS = {\"encoding\", \"errors\", \"newline\", \"closefd\", \"opener\"}\n\n\nclass NoOpenWithEncodingChecker(CheckerBase):\n\n version_info = VersionInfo(prohibited_until=\"2.7\")\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n for node in ast.walk(tree):\n if not isinstance(node, ast.Call):\n continue\n\n func_node = node.func\n if not isinstance(func_node, ast.Name):\n continue\n if func_node.id != \"open\" or not isinstance(func_node.ctx, ast.Load):\n continue\n\n mode = \"r\"\n if len(node.args) >= 2:\n mode_node = node.args[1]\n if isinstance(mode_node, ast.Str):\n mode = mode_node.s\n else:\n msg = (\n \"Prohibited value for argument 'mode' of builtin.open. \"\n + f\"Expected ast.Str node, got: {mode_node}\"\n )\n raise common.CheckError(msg, node)\n\n if len(node.args) > 3:\n raise common.CheckError(f\"Prohibited positional arguments to builtin.open\", node)\n\n for kw in node.keywords:\n if kw.arg in PROHIBITED_OPEN_ARGUMENTS:\n msg = f\"Prohibited keyword argument '{kw.arg}' to builtin.open.\"\n raise common.CheckError(msg, node)\n if kw.arg != 'mode':\n continue\n\n mode_node = kw.value\n if isinstance(mode_node, ast.Str):\n mode = mode_node.s\n else:\n msg = (\n \"Prohibited value for argument 'mode' of builtin.open. \"\n + f\"Expected ast.Str node, got: {mode_node}\"\n )\n raise common.CheckError(msg, node)\n\n if \"b\" not in mode:\n msg = (\n f\"Prohibited value '{mode}' for argument 'mode' of builtin.open. \"\n + \"Only binary modes are allowed, use io.open as an alternative.\"\n )\n raise common.CheckError(msg, node)\n\n\nASYNC_AWAIT_NODE_TYPES = (ast.AsyncFor, ast.AsyncWith, ast.AsyncFunctionDef, ast.Await)\n\n\nclass NoAsyncAwait(CheckerBase):\n\n version_info = VersionInfo(prohibited_until=\"3.4\")\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n for node in ast.walk(tree):\n if isinstance(node, ASYNC_AWAIT_NODE_TYPES):\n raise common.CheckError(\"Prohibited use of async/await\", node)\n\n\nclass NoComplexNamedTuple(CheckerBase):\n\n version_info = VersionInfo(prohibited_until=\"3.4\")\n\n def __call__(self, cfg: common.BuildConfig, tree: ast.Module) -> None:\n _typing_module_name : typ.Optional[str] = None\n _namedtuple_class_name: str = \"NamedTuple\"\n\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for alias in node.names:\n if alias.name == 'typing':\n if alias.asname is None:\n _typing_module_name = alias.name\n else:\n _typing_module_name = alias.asname\n\n if isinstance(node, ast.ImportFrom) and node.module == 'typing':\n for alias in node.names:\n if alias.name == 'NamedTuple':\n if alias.asname is None:\n _namedtuple_class_name = alias.name\n else:\n _namedtuple_class_name = alias.asname\n\n if not isinstance(node, ast.ClassDef):\n continue\n\n if not (_typing_module_name or _namedtuple_class_name):\n continue\n\n if not utils.has_base_class(node, _typing_module_name, _namedtuple_class_name):\n continue\n\n for subnode in node.body:\n if isinstance(subnode, ast.Expr) and isinstance(subnode.value, ast.Str):\n # docstring is fine\n pass\n elif isinstance(subnode, ast.AnnAssign):\n if subnode.value:\n tgt = subnode.target\n assert isinstance(tgt, ast.Name)\n msg = (\n f\"Prohibited use of default value \"\n + f\"for field '{tgt.id}' of class '{node.name}'\"\n )\n raise common.CheckError(msg, subnode, node)\n\n elif isinstance(subnode, ast.FunctionDef):\n msg = (\n f\"Prohibited definition of method \"\n + f\"'{subnode.name}' for class '{node.name}'\"\n )\n raise common.CheckError(msg, subnode, node)\n else:\n msg = f\"Unexpected subnode defined for class {node.name}: {subnode}\"\n raise common.CheckError(msg, subnode, node)\n\n\n# NOTE (mb 2018-06-24): I don't know how this could be done reliably.\n# The main issue is that there are objects other than dict, which\n# have methods named items,keys,values which this check wouldn't\n# apply to.\n# class NoAssignedDictViews(CheckerBase):\n#\n# check_before = \"3.0\"\n#\n# def __call__(self, cfg: common.BuildConfig, tree: ast.Module):\n# pass\n","sub_path":"src/lib3to6/checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":9405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"176413761","text":"### ML api ###\nimport pandas as pd \nimport requests\n\nd = {\"grant_type\" : \"client_credentials\"}\nh = {\"Accept\": \"application/json\", \"Accept-Language\": \"en_US\"}\n\nauth_url = \"https://api.makeleaps.com/user/oauth2/token/\"\ncid = \"{Client Id}\"\nsecret = \"{Client Secret}\"\n\nr = requests.post(auth_url, auth=(cid, secret), headers=h, data=d).json()\naccess_token = r['access_token']\nh[\"Authorization\"] = f'Bearer {access_token}'\n\nimport time\ndef getRecord(url,page_num): \n queries = f\"?page={page_num}\"\n # if page_num % 100 == 0:\n # print(f\"sleeping as n_get reached {page_num}\")\n # time.sleep(40)\n print(f\"retrieving page {page_num}\")\n g = requests.get(url+queries, headers=h).json()\n if g['meta']['status'] != 200:\n print(f\"error at page {page_num} with status {g['meta']['status']}\")\n return \"error\"\n else:\n data = pd.DataFrame(g['response'])\n return data\n\ndef load_df(kind):\n g_url = f'https://api.makeleaps.com/api/partner/{MakeLeaps_ID}/{kind}/'\n g_r = requests.get(g_url, headers=h).json()\n df = pd.DataFrame(g_r['response'])\n n_retrieve = -(-g_r['meta']['count'] // 20)\n \n print(f\"downloading {kind}s across {n_retrieve} pages and {g_r['meta']['count']} records\")\n for i in range(2,n_retrieve + 1):\n data = getRecord(g_url,i)\n if type(data) != str:\n df = df.append(data)\n return df.reset_index()\n \nclient_df = load_df(\"client\").drop([\"index\"],axis=1)\nclient_df.to_csv(f\"ML_client.csv\",index=False)\ndocument_df = load_df(\"document\")\ndocument_df.to_csv(f\"ML_document.csv\",index=False)\n\ndef expandLineItems(d_row):\n t_df = pd.DataFrame(d_row.lineitems)\n cols = t_df.columns.to_list()\n t_df[\"document_number\"] = d_row.document_number\n return t_df[[\"document_number\"]+cols]\n\nlineitem_df = expandLineItems(document_df.iloc[0,:]).iloc[0:0]\nfor i in range(len(document_df)):\n t_df = expandLineItems(document_df.iloc[i,:])\n lineitem_df = pd.concat([lineitem_df,t_df])\n print(f\"{i+1}/{len(document_df)} done\")\n\nlineitem_df = lineitem_df.rename_axis('index').reset_index() \nlineitem_df.to_csv(f\"ML_document_lineitem.csv\",index=False)\n","sub_path":"MakeLeaps_API_df/MakeLeaps_API_df.py","file_name":"MakeLeaps_API_df.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"58575499","text":"# 게시물의 총 건수(m)\t페이지당 보여줄 게시물 수(n)\t총 페이지 수\n# 5\t 10\t 1\n# 15\t10\t 2\n# 25\t10\t 3\n# 30\t10\t 3\n\ndef getTotalPage(m, n):\n mantisa, fraction = divmod(m, n)\n return mantisa + (fraction > 0 and 1 or 0)\n\n\ndummy = [(5, 10), (15, 10), (25, 10), (30, 10)]\nfor i, (m, n) in enumerate(dummy):\n print(i, \"total page = %x\" % getTotalPage(m, n))\n","sub_path":"practice-06/paging.py","file_name":"paging.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"155693890","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.conf import settings\nimport project.app.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0017_auto_20151019_1643'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Massive',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ide', models.CharField(default=project.app.models.pkgen, unique=True, max_length=40)),\n ('name', models.CharField(max_length=100, verbose_name=b'Nombre del evento')),\n ('place_point', models.CharField(max_length=250, verbose_name=b'D\\xc3\\xb3nde?')),\n ('place_point_lat', models.CharField(max_length=200, null=True, blank=True)),\n ('place_point_lng', models.CharField(max_length=200, null=True, blank=True)),\n ('start_date', models.DateField(help_text=b'Cu\\xc3\\xa1ndo empieza?', verbose_name=b'Fecha de inicio')),\n ('start_time', models.TimeField(null=True, verbose_name=b'Hora de inicio', blank=True)),\n ('end_date', models.DateField(null=True, verbose_name=b'Fecha de termino', blank=True)),\n ('end_time', models.TimeField(null=True, verbose_name=b'Hora de termino', blank=True)),\n ('show_guest', models.BooleanField(default=False, verbose_name=b'Mostrar invitados?')),\n ('photo', models.ImageField(help_text=b'Debe ser menor a 300kb', upload_to=project.app.models.pathphoto_Massive, null=True, verbose_name=b'Foto', blank=True)),\n ('creation_date', models.DateTimeField(auto_now_add=True, verbose_name=b'Fecha de creaci\\xc3\\xb3n')),\n ('modified_date', models.DateTimeField(auto_now=True, verbose_name=b'Ultima modificaci\\xc3\\xb3n')),\n ('administrators', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AlterField(\n model_name='events',\n name='atm',\n field=models.BooleanField(default=False, verbose_name=b' cajero'),\n ),\n migrations.AlterField(\n model_name='events',\n name='bar',\n field=models.BooleanField(default=False, verbose_name=b' bar'),\n ),\n migrations.AlterField(\n model_name='events',\n name='cafe',\n field=models.BooleanField(default=False, verbose_name=b' cafe'),\n ),\n migrations.AlterField(\n model_name='events',\n name='hospital',\n field=models.BooleanField(default=False, verbose_name=b' hospital'),\n ),\n migrations.AlterField(\n model_name='events',\n name='meal_delivery',\n field=models.BooleanField(default=False, verbose_name=b' comida a domicilio'),\n ),\n migrations.AlterField(\n model_name='events',\n name='meal_takeaway',\n field=models.BooleanField(default=False, verbose_name=b' comida para llevar'),\n ),\n migrations.AlterField(\n model_name='events',\n name='name',\n field=models.CharField(help_text=b'Debes darle un nombre a tu ruta', max_length=100, verbose_name=b'Nombre tu ruta'),\n ),\n migrations.AlterField(\n model_name='events',\n name='police',\n field=models.BooleanField(default=False, verbose_name=b' policia'),\n ),\n migrations.AlterField(\n model_name='events',\n name='restaurant',\n field=models.BooleanField(default=False, verbose_name=b' restaurante'),\n ),\n migrations.AlterField(\n model_name='events',\n name='shoe_store',\n field=models.BooleanField(default=False, verbose_name=b' tienda de zapatos'),\n ),\n migrations.AlterField(\n model_name='events',\n name='start_date',\n field=models.DateField(default=datetime.date(2015, 10, 22), help_text=b'Indica la fecha de inicio del evento', verbose_name=b'Fecha de inicio'),\n ),\n migrations.AlterField(\n model_name='events',\n name='start_time',\n field=models.TimeField(default=datetime.time(16, 38, 31, 406453), help_text=b'Indica la hora de inicio del evento', verbose_name=b'Hora de inicio'),\n ),\n ]\n","sub_path":"project/app/migrations/0018_auto_20151022_1638.py","file_name":"0018_auto_20151022_1638.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226541690","text":"class Solution:\n def waysToSplitArray(self, nums) -> int:\n n = len(nums)\n s = sum(nums)\n a = 0\n res = 0\n for i in range(n - 1):\n a += nums[i]\n b = s - a\n if a >= b:\n res += 1\n return res\n\n\ns = Solution()\nprint(s.waysToSplitArray([10, 4, -8, 7]))\n","sub_path":"leetcode/2022/bicontest/bcontest-078/bContest2.py","file_name":"bContest2.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328771781","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'apps_accounts'\n\nurlpatterns = [\n path('minha/', views.UpdateProfile.as_view(), name='my_account'),\n path('minha/foto/', views.MyAccountProfilePictureView.as_view(), name='my_account_profile_picture'),\n]\n","sub_path":"project/apps/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"71598898","text":"import os\nimport time\nimport click\nfrom . import procs\n\n\nclass Interpreter:\n\tdef __init__(self, ctx, verbose):\n\t\tself.ctx = ctx\n\t\tself.verbose = verbose\n\t\tself.lines = []\n\t\tself.in_comment = False\n\n\n\tdef feed(self, line):\n\t\tif len(self.lines) > 0:\n\t\t\t# End of multi-line comment\n\t\t\tif self.lines[0].startswith('#==') and line.endswith('==#'):\n\t\t\t\tself.lines = []\n\t\t\t\tself.in_comment = False\n\t\t\t\treturn False\n\n\t\t\treturn True\n\n\t\tstart_time = time.time()\n\t\t# Handle exit command or EOF\n\t\tif line == 'exit':\n\t\t\tself.ctx.exit()\n\t\t# Blank lines\n\t\telif line.strip() == '':\n\t\t\tpass\n\n\t\t# Print debug information\n\t\telif line == 'debug':\n\t\t\tclick.echo('Configuration values:')\n\t\t\tfor key, val in self.ctx.obj.config.items():\n\t\t\t\tclick.echo(f' {key} = {repr(val)}')\n\t\t# cd\n\t\telif line.startswith('cd '):\n\t\t\ttry:\n\t\t\t\tdirname = line[3:].strip()\n\t\t\t\tos.chdir(os.path.expanduser(dirname))\n\t\t\texcept OSError as e:\n\t\t\t\tclick.echo(e, err=True)\n\n\t\t# Start of multiline comments\n\t\telif line.startswith('#=='):\n\t\t\tself.lines.append(line)\n\t\t\tself.in_comment = True\n\t\t\tself.ctx.obj.previous_cmd_duration = 0\n\t\t\treturn True\n\n\t\t# Single-line comments\n\t\telif line.strip()[0] == '#':\n\t\t\tpass\n\n\t\t# Normal commands\n\t\telse:\n\t\t\ttry:\n\t\t\t\twith self.ctx:\n\t\t\t\t\tprocs.run_line(line, echo_args=self.verbose)\n\t\t\texcept FileNotFoundError as e:\n\t\t\t\tclick.echo(f'Command not found: {e.filename}', err=True)\n\n\t\tself.lines = []\n\t\tself.ctx.obj.previous_cmd_duration = time.time() - start_time\n\t\treturn False\n","sub_path":"dish/interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473313222","text":"\"\"\"\nQuestion: https://leetcode.com/problems/maximum-depth-of-binary-tree/submissions/\n\nGiven a binary tree, find its maximum depth.\n\nThe maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\n\nNote: A leaf is a node with no children.\n\nExample:\n\nGiven binary tree [3,9,20,null,null,15,7],\n\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its depth = 3.\n\"\"\"\nfrom implementations.data_structures import TreeNode\n\n\nclass Solution:\n def maxDepth(self, root: TreeNode) -> int:\n return self.first_implementation(root)\n\n def first_implementation(self, root: TreeNode) -> int:\n \"\"\"\n Use a stack to log the depth from the current node down to its children.\n Implementing the preorder traversal with an added index.\n Time Complexity: O(n) as all nodes are visited.\n Space Complexity: O(n)\n \"\"\"\n if not root:\n return 0\n stack = [(1, root)]\n depth = 0\n\n while stack:\n current_depth, root = stack.pop()\n if root is not None:\n depth = max(depth, current_depth)\n stack.append((current_depth + 1, root.left))\n stack.append((current_depth + 1, root.right))\n return depth\n","sub_path":"python/coding_challenges/leet_code/maximum_depth_of_binary_tree.py","file_name":"maximum_depth_of_binary_tree.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"409410630","text":"import numpy as np\n\n\ndef neighbors(x,y):\n dx = np.asarray([-1, -1, -1, 0, 0, 1, 1, 1])\n dy = np.asarray([-1, 0, 1, -1, 1, -1, 0, 1])\n xs = dx + x\n ys = dy + y\n\n return xs, ys\n\n# grid is: -1 if inaccessible. 0 otherwise\ndef flood_fill(x, y, grid):\n grid[x,y] = 1\n xs, ys = neighbors(x, y)\n for i in range(len(xs)):\n x = xs[i]\n y = ys[i]\n if 0 <= x < grid.shape[0] and 0 <= y < grid.shape[1] and grid[x,y] == 0:\n flood_fill(x,y,grid)\n\n","sub_path":"flood_fill.py","file_name":"flood_fill.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"178262045","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 11 19:15:52 2020\n\n@author: S11De\n\"\"\"\n\nimport pandas as pd #работаем с датафреймами\nimport os #для пути загрузки сета\nimport matplotlib.pyplot as plt #работа с визуализацией графиками\nfrom numpy import datetime64 as datetime #работа с ячейками с датой\n\nCSVNAME = 'walmart.csv' #название исходника с датасетом\nPERCENTDELETION = 0.6 #сколько процентов пропуска допустимо для переменной\n\ndef DownloadDS():\n df = pd.read_csv(os.getcwd()+'/'+CSVNAME) #загрузка по пути запуска ��сходника\n return df #загружаем дс в датафрейм\n\ndef PreWork(df):\n print('Общая информация:')\n print ('Первые 5 записей:\\n' + str(df.head()) + '\\n') #первые 5, можно было как и последние 5 через илокейт\n print ('Последние 5 записей:\\n' + str(df.iloc[len(df)-5:len(df)]) + '\\n') #последние 5\n print ('Кол-во наблюдений:\\n' + str(len(df))) #кол-во наблюдений\n \n print('Названия и типы переменных:\\n')\n print(df.dtypes) #выводим название и тип\n print('\\n')\n \ndef DateToDate(df):\n print('Изменяем тип переменной даты:')\n df['Date'] = pd.to_datetime(df['Date']) #меняем тип через apply с помощью лямбда-функции\n #print('Получившийся тип первой ячейки - ' + str(type(df['Date'][0]))) #смотрим тип первого\n #print('Получившийся тип последней ячейки - ' + str(type(df['Date'][len(df)-1]))) #и тип последнего элемента в столбце даты\n print('Получившийся тип столбца - ' + str(df.Date.dtypes))\n print('\\n')\n return df #получаем датафрейм со столбцом даты, имеющий тип datatime.date\n \ndef MissingFields(df):\n print('Информация о пустых полях:')\n \n for column in df: #цикл по столбцам вроде можно\n emptyFCount = df[column].isnull().sum()\n print('У переменной ' + column + ' ' + str(emptyFCount) + ' пустых полей') #выводим информацию о пропусках\n if (emptyFCount/len(df) > PERCENTDELETION): #если кол-во пустых строк больше PERCENTDELETION процентов, то удаляем столбец\n del df[column]\n \n print('\\n')\n return df\n\ndef Sampling(df):\n print('Информация о выборке:')\n \n print('Магазинов в датасете ' + str(df.Store.value_counts().size)) #кол-во магазинов\n print('Отделов в датасете ' + str(df.Dept.value_counts().size)) #кол-во отделов\n print('Охватывается промежуток в ' + str((df.Date.max()-df.Date.min()).days) + ' дней') #разница между максимальной и минимальной датами\n\n print('\\n')\n\n\ndef Dynamics(df):\n df = df[['Date', 'Weekly_Sales']] #убираем ненужные столбцы\n df = df.groupby('Date', as_index=False).aggregate(sum) #группируем строки по дате, т.к. выручка по отдельности не важна\n df.plot(x='Date', y='Weekly_Sales')\n \ndef Corr(df):\n plt.matshow(df.corr()) #выводим матрицу корреляции\n plt.show()\n \ndef TopSupp(df, mainfield, topcount):\n dfSel = df[[mainfield, \"Weekly_Sales\"]] #убираем ненужные столбцы\n dfSel = dfSel.groupby(mainfield, as_index=False).aggregate(sum) #группируем строки элементов типа mainfield\n dfSel.sort_values(by=['Weekly_Sales'], inplace=True, ignore_index=True) #сортируем по убыванию\n df = df.loc[df[mainfield].isin(dfSel[mainfield].head(topcount))] #оставляем в чистовом ДФ только topcount самых прибыльных элементов типа mainfield\n\n l = [] #список для хранения отдельных экземпляров датафреймов для каждого элемента типа mainfield\n for i in range(topcount):\n l.append(df[df[mainfield] == dfSel[mainfield][i]]) #отбираем только подходящие элементы базового датафрейма\n l[i] = l[i][['Date', \"Weekly_Sales\"]] #убираем все ненужные строки в каждом отдельном датафрейме\n l[i] = l[i].groupby('Date', as_index=False).aggregate(sum) #группируем строки элементам типа mainfield\n l[i] = [l[i], dfSel[mainfield][i]] #добавляем каждому датафрейму название для вывода легенды\n \n for frame in l:\n plt.plot(frame[0]['Date'], frame[0]['Weekly_Sales'], label = frame[1]) #добавляем график каждого дочернего датафрейма в визуализацию\n plt.legend() #виз легенды\n plt.show() #виз картинки\n \ndef Top5(df):\n TopSupp(df, 'Store', 5)\n\ndef Top10(df):\n df = df[df.Type == 'A']\n df = df[['Weekly_Sales', 'Date', 'Dept']]\n df = df[df.Date >= datetime('2011')]\n df = df[df.Date < datetime('2012')] #отсеяли ненужные отделы\n \n TopSupp(df, 'Dept', 10)\n\n \ndef main():\n df = DownloadDS() #Загружаем датасет в датафрейм\n PreWork(df) #Первые\\последние 5 наблюдений, сколько наблюдений, какой формат переменных\n df = DateToDate(df) #привести Date к формату даты\n df = MissingFields(df) #удаляем переменные, имеющие более 60% пропущенных полей\n Sampling(df) #работа с выборкой. сколько магазинов и отделов, за какой период времени\n Dynamics(df) #динамика продаж, график - по оси Х - дата, по оси Y - продажи всей сети\n Corr(df) #матрица корреляции числовых показателей\n Top5(df) #топ5 самых больших магазинов по сумм. за все время + динамика продаж на одном графике\n Top10(df) #топ10 самых больших ОТДЕЛОВ по сумм. за 2011(!) год среди магазинов типа А (!) + столбчатая диаграмма для них\n\nmain()","sub_path":"WallmartDatasetStatsComp/WallmartDatasetStatsComp.py","file_name":"WallmartDatasetStatsComp.py","file_ext":"py","file_size_in_byte":6978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9239055","text":"\"\"\"Module provides the capabilities to create,\r\nmanipulate and operate on IPv4 addresses and networks.\"\"\"\r\n#!/usr/local/bin/python3\r\n\r\nMAX_INT = 4294967295\r\n\r\n\r\nclass IllegalArgumentError(ValueError):\r\n \"\"\"Exceptions class\"\"\"\r\n pass\r\n\r\n\r\nclass IPv4Address(object):\r\n \"\"\"Class provides functions to conveniently\r\n create and display IPv4 addresses\"\"\"\r\n @staticmethod\r\n def to_str(addr):\r\n \"\"\"Int(addr) -> str(addr).\"\"\"\r\n pos = 32\r\n octets = []\r\n for _ in range(4):\r\n pos -= 8\r\n octets.append(addr.__rshift__(pos) & 255)\r\n\r\n return '.'.join(str(oc) for oc in octets)\r\n\r\n def __init__(self, value):\r\n self.address = value\r\n\r\n @property\r\n def address(self):\r\n \"\"\"Returns long representation of address\"\"\"\r\n return self.__dict__.get('address')\r\n\r\n @address.setter\r\n def address(self, value):\r\n \"\"\" str -> long\r\n list -> long\r\n tuple -> long \"\"\"\r\n octets = []\r\n\r\n if isinstance(value, str):\r\n octets = [int(x) for x in (value.split('.', 3))[:4]]\r\n elif isinstance(value, list) or isinstance(value, tuple):\r\n octets = [int(x) for x in value[:4]]\r\n elif isinstance(value, int):\r\n if value < 0 or value > MAX_INT:\r\n raise IllegalArgumentError\r\n self.__dict__['address'] = value\r\n return\r\n else:\r\n raise IllegalArgumentError\r\n\r\n for octet in octets:\r\n if octet > 255 or octet < 0:\r\n raise IllegalArgumentError\r\n\r\n ip_long = 0\r\n pos = 32\r\n for octet in octets[:4]:\r\n pos -= 8\r\n ip_long = ip_long.__or__(octet.__lshift__(pos))\r\n\r\n self.__dict__['address'] = ip_long\r\n\r\n def to_binary(self):\r\n \"\"\"Returns binary representation of this address\"\"\"\r\n return bin(self.address)[2:]\r\n\r\n def __str__(self):\r\n return IPv4Address.to_str(self.address)\r\n\r\n def __lt__(self, other):\r\n return self.address < other.address\r\n\r\n def __gt__(self, other):\r\n return self.address > other.address\r\n\r\n def __eq__(self, other):\r\n return self.address == other.address\r\n\r\n def __ne__(self, other):\r\n return not self.__eq__(other)\r\n\r\n\r\nclass IPv4Mask(IPv4Address):\r\n \"\"\"Class provides functions to conveniently\r\n create and display network mask for IPv4 address\"\"\"\r\n def __init__(self, prefixlen):\r\n self.prefixlen = prefixlen\r\n\r\n @property\r\n def prefixlen(self):\r\n \"\"\"Return prefix length\"\"\"\r\n return self.__dict__.get('prefixlen')\r\n\r\n @prefixlen.setter\r\n def prefixlen(self, value):\r\n \"\"\"Set prefixlength value\"\"\"\r\n if isinstance(value, int) and value <= 32 and value >= 0:\r\n self.__dict__['prefixlen'] = value\r\n IPv4Address.__init__(self, MAX_INT - (2 ** (32 - value) - 1))\r\n else:\r\n IPv4Address.__init__(self, value)\r\n self.__dict__['prefixlen'] = len(self.to_binary().rstrip('0'))\r\n\r\n def __str__(self):\r\n return '{address}/{prefixlen}'.format(address=super().__str__(),\r\n prefixlen=self.prefixlen)\r\n\r\n def __eq__(self, other):\r\n return super().__eq__(other)\r\n\r\n def __gt__(self, other):\r\n return super().__gt__(other)\r\n\r\n def __lt__(self, other):\r\n return super().__lt__(other)\r\n\r\n\r\ndef main():\r\n \"\"\"Test functionality\"\"\"\r\n #correct data\r\n ip_address = IPv4Address((192, 168, 100, 1))\r\n mask = IPv4Mask([255, 255, 255, 244])\r\n\r\n # ip_address = IPv4Address([192, '168', 220, 78])\r\n # ip_address = IPv4Address((192, 168, 220, 78))\r\n # ip_address = IPv4Address('132.25.63')\r\n\r\n\r\n # #incorrect data\r\n # ip_address = IPv4Address(42949672953)\r\n # ip_address = IPv4Address(-2333)\r\n # ip_address = IPv4Address([192, '168', -220, 78])\r\n # ip_address = IPv4Address((192, '168', -220, 78))\r\n # ip_address = IPv4Address('500.569.12.33')\r\n # ip_address = IPv4Address('212.58.-160.160')\r\n\r\n # print(ip_address.to_str())\r\n # print(mask.prefixlen)\r\n # print(mask.to_str())\r\n # print(ip_address.to_binary())\r\n\r\n print(ip_address)\r\n print(mask)\r\n\r\n ip_address2 = IPv4Address('88.208.58.212')\r\n\r\n print('{0} equals to {1}: {2}'.format(ip_address2,\r\n ip_address, ip_address2 == ip_address))\r\n print('{0} greater than {1}: {2}'.format(ip_address2,\r\n ip_address, ip_address2 > ip_address))\r\n print('{0} less than {1}: {2}'.format(ip_address2,\r\n ip_address, ip_address2 < ip_address))\r\n print('{0} not equals to {1}: {2}'.format(ip_address2,\r\n ip_address, ip_address2 != ip_address))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"ipv4.py","file_name":"ipv4.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"483157063","text":"'''\nCalculating the mode when the list of numbers may have multiple modes\n'''\nfrom collections import Counter\n\ndef calculate_mode(numbers):\n c = Counter(numbers)\n mode = c.most_common()\n max_count = numbers_freq[0][1]\n\n modes = []\n for num in numbers_freq:\n if num[1] == max_count:\n modes.append(num[0])\n return modes\n\nif __name__ == '__main__':\n scores =[5,5,5,4,4,4,9,1,3]\n modes = calculate_mode(scores)\n print('The most of the list of numbers are:')\n for mode in modes:\n print(mode)\n","sub_path":"mode.py","file_name":"mode.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"194209713","text":"import random as rnd\nimport string\n\nclass Pile:\n def __init__(self, name, size):\n self.name = name\n self.size = size\n\n def add_items(self, num_items):\n self.size += num_items\n\n def remove_items(self, num_items):\n if(num_items <= self.size):\n self.size -= num_items\n return(f\"removed {num_items} item{'s' if num_items > 1 else ''} from pile {self.name}\")\n else:\n raise ValueError(\n f\"Couldn't remove {num_items} items from pile {self.name} as it only contains {self.size} items\")\n\n\nclass Nim:\n def __init__(self, misere):\n self.piles = []\n self.i_piles = {}\n self.comp_text = ''\n self.misere = misere\n self.winner = False\n\n def is_game_over(self):\n for p in self.piles:\n if p.size > 0:\n return False\n\n return True\n\n def make_pile(self, name, num_items):\n self.piles.append(Pile(name, num_items))\n\n # calculates the nim-sum of all pile sizes\n def get_total_nim_sum(self):\n # creates an array to hold the size of each pile\n sizes = []\n for p in self.piles:\n sizes.append(p.size)\n\n total = sizes[0]\n # calculate the nim sum of each pile\n for i in range(1, len(sizes)):\n total = self.get_nim_sum(total, sizes[i])\n return total\n\n def get_nim_sum(self, a, b):\n return a ^ b\n\n def index_piles_by_name(self):\n self.i_piles = {}\n for pile in self.piles:\n self.i_piles.update({pile.name: pile})\n\n def get_max_pile_size(self):\n max_pile_size = 0\n for p in self.piles:\n if p.size > max_pile_size:\n max_pile_size = p.size\n\n return max_pile_size\n\n def make_finishing_misere_move(self):\n max_pile_size = 0\n for p in self.piles:\n if p.size > max_pile_size:\n max_pile = p\n max_pile_size = p.size\n\n count_nonempty_piles = sum(1 for pile in self.piles if pile.size > 0)\n # if there is an odd number of nonempty piles, we want to keep the number of nonempty piles odd\n # to ensure we win\n if count_nonempty_piles % 2 == 1:\n move = max_pile.remove_items(max_pile.size-1)\n if self.is_game_over():\n self.winner = \"You\"\n return \"Computer \"+move\n else:\n # if there is an even number of nonempty piles\n # remove all the items from a pile to make the number of nonempty piles odd\n move = max_pile.remove_items(max_pile.size)\n if self.is_game_over():\n self.winner = \"You\"\n return \"Computer \"+move\n\n # This function is the \"brains\" of the computer player\n # It will make a winning move if one is available, otherwise it will make a completely random move\n # This move is made in O(n) time so the number of piles barely impacts the system solution\n def make_winning_move(self):\n if self.misere:\n # the strategy to win a misere games only differs when there is exactly one pile with a size >= 2\n piles_larger_than_2 = sum(1 for pile in self.piles if pile.size > 1)\n if piles_larger_than_2 == 1:\n return self.make_finishing_misere_move()\n\n n_sum = self.get_total_nim_sum()\n # If the total nim sum is not zero prior to making a move, the computer will win this game\n if n_sum != 0:\n winning_moves = {}\n for p in self.piles:\n # get the nim sum of the total nim sum of all piles and the current pile size\n p_sum = self.get_nim_sum(n_sum, p.size)\n\n # if the result of p_sum is less than the current pile size, reducing this pile will be a winning move\n if p_sum < p.size:\n # we want to reduce this pile's size to the nim sum of its current size and the total nim sum of all pile sizes\n rem = p.size - p_sum\n winning_moves.update({p: rem})\n\n # we pick the largest from the list of winning moves\n # to try and speed things along\n best_rem = 0\n for pile, rem in winning_moves.items():\n if rem > best_rem:\n best_rem = rem\n best_pile = pile\n\n # the nim-sum of all pile sizes is now zero,\n # meaning it's now impossible for the user to win this game\n move = best_pile.remove_items(best_rem)\n if self.is_game_over():\n self.winner = \"You\" if self.misere else \"Computer\"\n else:\n self.comp_text = \"It's no longer possible for you to win this game :)\"\n\n return \"Computer \"+move\n\n else:\n # Since the nim-sum of all pile sizes is zero prior to making a move,\n # it's not possible for the computer to win unless the user makes a mistake\n # So instead, the computer will make a random move\n p = rnd.choice(self.piles)\n while p.size == 0:\n p = rnd.choice(self.piles)\n\n move = p.remove_items(rnd.randint(1, p.size))\n if self.is_game_over():\n self.winner = \"You\" if self.misere else \"Computer\"\n return \"Computer \"+move\n\n","sub_path":"nim_game.py","file_name":"nim_game.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"521781040","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/DBApps/DbApp.py\n# Compiled at: 2020-05-06 17:23:13\n# Size of source mod 2**32: 5101 bytes\n\"\"\"\nCreated 2018-VIII-24\n@author: jimk\n\"\"\"\nimport pymysql as mysql\nfrom config.config import DBConfig\nimport os\n\nclass DbApp:\n __doc__ = '\\n Base class for database applications\\n '\n _dbConfig: DBConfig\n _cn: mysql.Connection\n _dbConnection: mysql.Connection\n _expectedColumns = None\n _expectedColumns: list\n\n def __init__(self, dbConfig: DBConfig):\n self.dbConfig = dbConfig\n self.connection = None\n self.ExpectedColumns = []\n\n def start_connect(self) -> None:\n \"\"\"\n Opens a database connection using the DBConfig\n :return: Nothing. Sets class connection property\n \"\"\"\n self.connection = mysql.connect(read_default_file=(self.dbConfig.db_cnf), read_default_group=(self.dbConfig.db_host),\n charset='utf8')\n\n @property\n def ExpectedColumns(self) -> list:\n \"\"\"\n If a subclass returns a result set from a query, you may only want some of the query columns\n If this list is empty, all the data set columns are returned\n :return:\n \"\"\"\n return self._expectedColumns\n\n @ExpectedColumns.setter\n def ExpectedColumns(self, value: list):\n assert isinstance(value, list)\n self._expectedColumns = value\n\n @property\n def dbConfig(self) -> DBConfig:\n return self._dbConfig\n\n @dbConfig.setter\n def dbConfig(self, drsDbConfig: str):\n \"\"\"\n gets dbConfig values for setup\n :param drsDbConfig: in section:file format\n :return:\n \"\"\"\n if drsDbConfig is None:\n self._dbConfig = None\n return\n try:\n args = drsDbConfig.split(':')\n dbName = args[0]\n dbConfigFile = os.path.expanduser(args[1])\n except IndexError:\n raise IndexError('Invalid argument %s: Must be formatted as section:file ' % drsDbConfig)\n\n self._dbConfig = DBConfig(dbName, dbConfigFile)\n\n @property\n def connection(self) -> mysql.Connection:\n return self._cn\n\n @connection.setter\n def connection(self, value):\n self._cn = value\n\n def validateExpectedColumns(self, cursorDescription: list) -> None:\n \"\"\"\n Validates the cursor after a call to the database. Checks for\n the required columns (from member ExpectedColumns) in the output\n\n :param cursorDescription: tuple of tuples\n :return: Throws ValueError on fail\n \"\"\"\n found = False\n for expectedColumn in self.ExpectedColumns:\n found = False\n for cursorTuple in cursorDescription:\n if cursorTuple[0] == expectedColumn:\n found = True\n break\n\n if not found:\n break\n\n if not found:\n raise ValueError('SPROC did not return expected columns')\n\n def GetSprocResults(self, sproc: str, maxWorks: int=200) -> list:\n \"\"\"\n call a sproc using the internal connection,\n validate the result columns with the internal member.\n\n :rtype: list of dictionary objects of results. Caller decodes format\n :param sproc: routine to call\n :param maxWorks: limit of return rows\n :returns: a list of dictionary items, each item is a return row\n \"\"\"\n self.start_connect()\n rl = []\n hasNext = True\n with self.connection:\n try:\n workCursor = self.connection.cursor(mysql.cursors.DictCursor)\n print(f\"Calling {sproc} for n = {maxWorks} \")\n workCursor.callproc(f\"{sproc}\", (maxWorks,))\n self.validateExpectedColumns(workCursor.description)\n while hasNext:\n resultRows = workCursor.fetchall()\n rl.append(resultRows)\n hasNext = workCursor.nextset()\n\n finally:\n while hasNext:\n workCursor.fetchall()\n hasNext = workCursor.nextset()\n\n return rl\n\n def CallAnySproc(self, sproc: str, *args):\n \"\"\"\n Calls a routine without analyzing the result\n :param sproc: routine name\n :param args: arguments\n :return: true if there are any results, throws exception otherwise.\n Caller handles\n \"\"\"\n self.start_connect()\n rl = []\n with self.connection:\n workCursor = self.connection.cursor()\n print(f\"Calling {sproc} for n = {maxWorks}\")\n workCursor.callproc(f\"{sproc}\", tuple((arg for arg in args)))\n workCursor.fetchall()","sub_path":"pycfiles/DBApps-0.42.4-py3.7/DbApp.cpython-37.py","file_name":"DbApp.cpython-37.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140303272","text":"\"\"\"\nThis Module contains base routes.\n\nRoute Functions\n home: Renders the home page, where a user can choose a story.\n prompts: Renders a page where the user fills in parts of speech prompts.\n finished_story: Allows the user to review thier new story with the option to save it.\n saved_stories: Allows the user to view all thier saved stories.\n seed_stories: Used to plant pre-faricated stories in the database.\n\"\"\"\n\nfrom . import app\nfrom .auth import login_required, login\nfrom .forms import *\nfrom .models import PresetStory, UserStory, db\nfrom .stories import *\nfrom .utilities import *\nfrom flask import render_template, redirect, url_for, session, request, g, flash\nimport os\nimport requests\nfrom sqlalchemy.exc import DBAPIError, InvalidRequestError\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n \"\"\"\n Renders the home page, where a user can choose from a selection of stories.\n\n Returns\n Home Page: Renders if user is logged in.\n Login Page: Redirects if user is not logged in.\n Prompts Page: Redirects to the prompts page if the user selects a story.\n \"\"\"\n\n if g.user:\n\n form = StorySelect()\n\n if form.validate_on_submit():\n story_id = form.data['stories']\n\n return redirect(url_for('.prompts', id=story_id))\n\n return render_template('home.html', form=form)\n\n return redirect(url_for('.login'))\n\n\n@app.route('/prompts/', methods=['GET'])\n@login_required\ndef prompts(id):\n \"\"\"\n Renders a page where the user fills in parts of speech prompts.\n\n Args\n id: The id of the selected story, passed with the redirect from the home page.\n\n Returns\n Story Page: Redirects to the story page, passing the user's inputs.\n\n \"\"\"\n\n story = PresetStory.query.filter_by(id=id).first()\n\n story_dict = {\n 'title': story.title,\n 'content': story.content,\n 'prompts': story.prompts\n }\n\n stories_new = send_prompts_to_form(story_dict)\n\n return render_template(\n 'prompts.html',\n stories_new=stories_new,\n id=id,\n title=story_dict['title'],\n form=PromptsForm()\n )\n\n\n@app.route('/story/', methods=['GET', 'POST'])\n@login_required\ndef finished_story(id):\n \"\"\"\n Allows the user to review thier new story with the option to save it.\n\n Args\n id: The id of the selected story, passed with the redirect from the prompts page.\n\n Returns\n Prompts Page: Redirects user to the prompts page if they arrive at the story page without having entered prompts.\n Saved Stories Page: Renders the Saved Stories page.\n \"\"\"\n if request.method == 'GET':\n return redirect(url_for('.prompts', id=id))\n\n data = request.form.to_dict()\n keylist = []\n\n for key in data:\n arr = data[key].split(' ')\n for i in range(len(arr)):\n arr[i] = '*' + arr[i]\n str = ' '.join(arr)\n data[key] = str\n\n for key in data:\n keylist.append((int(key), data[key]))\n\n story = PresetStory.query.filter_by(id=id).first()\n\n story_dict = {\n 'title': story.title,\n 'content': story.content,\n 'prompts': story.prompts\n }\n\n story_array = array_from_story_string(story_dict['content'])\n new_story_array = replace_words(story_array, keylist)\n new_story = string_from_array(new_story_array)\n\n form_context = {\n 'title': story_dict['title'],\n 'content': new_story\n }\n\n form = FinalStoryForm(**form_context)\n\n return render_template(\n 'story.html',\n form=form,\n id=id,\n new_story=new_story,\n title=story_dict['title'],\n array_from_story_string=array_from_story_string\n )\n\n\n@app.route('/saved', methods=['GET', 'POST'])\n@login_required\ndef saved_stories():\n \"\"\"\n Allows the user to view all thier saved stories.\n\n Returns\n Saved Stories Page: Renders the saved stories page.\n Home Page: If there is an error, the user is redirected to the home page.\n\n Exceptions\n DBAPIError: Failure of saving to a database.\n InvalidRequestError: Runtime state or other SQLAlchemy errors.\n \"\"\"\n form = FinalStoryForm()\n\n if form.validate_on_submit():\n try:\n user_story = UserStory(\n title=form.data['title'],\n content=form.data['content'],\n user_id=g.user.id\n )\n\n db.session.add(user_story)\n db.session.commit()\n\n except (DBAPIError, InvalidRequestError):\n flash('I AM GROOT?')\n return render_template('/home.html')\n\n return redirect(url_for('.saved_stories'))\n\n stories = UserStory.query.filter(UserStory.user_id == g.user.id).all()\n return render_template(\n 'saved.html',\n stories=stories,\n array_from_story_string=array_from_story_string,\n len=len\n )\n\n\n@app.route('/delete/', methods=['POST'])\ndef delete_story(id):\n \"\"\"\n Allows the user to dlete any of their saved stories.\n\n Returns\n Saved Stories Page: Renders the saved stories page, less the deleted story.\n\n Exceptions\n DBAPIError: Failure of saving to a database.\n InvalidRequestError: Runtime state or other SQLAlchemy errors.\n \"\"\"\n exhiled = UserStory.query.filter_by(id=id).first()\n\n db.session.delete(exhiled)\n db.session.commit()\n\n return redirect(url_for('.saved_stories'))\n\n\n@app.route('/test_stories')\ndef seed_stories():\n \"\"\"\n Plants pre-faricated stories in the database.\n\n Used for the developers to add additional pre-fabricated stories to the database. Returns a message of successful planting.\n \"\"\"\n\n story1 = convert_dict_to_model_instance(story_one)\n story2 = convert_dict_to_model_instance(story_two)\n story3 = convert_dict_to_model_instance(story_three)\n story4 = convert_dict_to_model_instance(story_four)\n story5 = convert_dict_to_model_instance(story_five)\n\n db.session.add(story1)\n db.session.add(story2)\n db.session.add(story3)\n db.session.add(story4)\n db.session.add(story5)\n db.session.commit()\n\n return 'Stories added to database!'\n\n","sub_path":"src/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"477509387","text":"from django.conf.urls import url\nimport authapp.views as authapp\n\napp_name = 'auth'\n\nurlpatterns = [\n url( r'^login/$' , authapp.login, name= 'login' ),\n url( r'^logout/$' , authapp.logout, name= 'logout' ),\n url( r'^register/$' , authapp.register, name= 'register' ),\n url( r'^edit/$' , authapp.edit, name= 'edit' ),\n]\n","sub_path":"authapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"395487511","text":"n = int(input())\npalavras = []\nwhile n > 0:\n i = n\n for i in range(n):\n palavras.append(input())\n maior_tamanho = len(max(palavras,key=len))\n for palavra in palavras:\n if palavra == '':\n print(palavra)\n else:\n print(palavra.rjust(maior_tamanho, ' '))\n n=int(input())\n if n != 0:\n palavras=['']","sub_path":"1273.py","file_name":"1273.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255636434","text":"from spack import *\nimport sys,os\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))\nfrom scrampackage import write_scram_toolfile\n\n\nclass PyCythonToolfile(Package):\n url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'\n version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)\n depends_on('py-cython')\n\n def install(self, spec, prefix):\n values = {}\n values['VER'] = spec['py-cython'].version\n values['PFX'] = spec['py-cython'].prefix\n fname = 'cython.xml'\n contents = str(\"\"\"\n\n \n \n \n \n \n\n\"\"\")\n write_scram_toolfile(contents, values, fname, prefix)\n","sub_path":"packages/py-cython-toolfile/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294215095","text":"from __future__ import print_function\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import PythonOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.models import DAG\nfrom datetime import datetime\n\nargs = {\n 'owner': 'airflow',\n 'start_date': datetime.now(),\n}\n\ndag = DAG(\n dag_id='image_submission_simulation', default_args=args,\n schedule_interval=None)\n\ndef print_context(i):\n print(i)\n return 'print_context has sucess {}'.format(i)\n\ntemplated_command = \"\"\"\n\n\npeg scp to-rem pySpark-cluster 1 ~/sample_labelsaa /home/ubuntu/sample_labelsaa\n\npeg sshcmd-node pySpark-cluster 1 \"touch dummy_from_airflow.txt\" \n\npeg sshcmd-node pySpark-cluster 1 \"nohup sh ~/Deep_Images_Hub/src/producer/auto_upload.sh ~/sample_labelsaa \"test\" > ~/Deep_Images_Hub/src/producer/auto_upload.log & \" \n\n\"\"\"\n\n\n\n# peg ssh pySpark-cluster {{ node_number }}\n#\n\n# sh /home/ubuntu/Deep_Images_Hub/src/producer/auto_upload.sh {{ src_lables_file }} {{ src_type }} > /home/ubuntu/Deep_Images_Hub/src/producer/auto_upload.log &\n\n\n\nLabels_prep = \\\nBashOperator(\n task_id='Batch_Image_Submissions',\n bash_command=templated_command,\n params = {'node_number': 1, 'sample_file' : 'sample_labelsaa' , 'src_lables_file' : '~/sample_labelsaa' , 'src_type' : 'test' },\ndag=dag)\n\n\nparent_on_node1 = None\nfor i in range(120):\n '''\n Generating 10 sleeping task, sleeping from 0 to 9 seconds\n respectively\n '''\n task_on_node1 = \\\n\tBashOperator(\n \t\ttask_id='Batch_Image_Submissions',\n \t\tbash_command=templated_command,\n \t\tparams = {'node_number': 1, 'sample_file' : 'sample_labelsaa' , 'src_lables_file' : '~/sample_labelsaa' , 'src_type' : 'test' },\n\t\tdag=dag)\t\n\t\n if parent_on_node1:\n task_on_node1.set_upstream(parent_on_node1)\n\n parent_on_node1 = task_on_node1\n\n\n","sub_path":"src/airflow/workspace/airflow_home/dags/user_images_upload_simulation.py","file_name":"user_images_upload_simulation.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"151877483","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author: tushushu\n@Date: 2021-07-07 20:30:45\n@Last Modified by: tushushu\n@Last Modified time: 2021-07-07 20:30:45\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def getRow(self, rowIndex: int) -> List[int]:\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n if rowIndex == 2:\n return [1, 2, 1]\n result = [1, 2, 1]\n tmp = [1]\n while rowIndex != 2:\n for i in range(0, len(result) - 1):\n tmp.append(result[i] + result[i + 1])\n tmp.append(1)\n result = tmp\n tmp = [1]\n rowIndex -= 1\n return result\n","sub_path":"python/119. 杨辉三角 II.py","file_name":"119. 杨辉三角 II.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"614803697","text":"import re\nimport argparse\n\ndef trans_dict(line):\n line_list = []\n for word in line.rstrip('\\n').split('\\n'):\n data = re.split('[\\t|,]', word)\n word_dict = {'surface' : data[0], 'base' : data[7], 'pos' : data[1], 'pos1' : data[2]}\n line_list.append(word_dict)\n\n return line_list\n\ndef parse():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('file')\n\n args = parser.parse_args()\n\n return args\n\nif __name__ == '__main__':\n args = parse()\n with open(args.file, 'r') as f:\n lines = f.read().split('EOS\\n')\n\n lines = list(filter(lambda x: x != '', lines))\n result = [trans_dict(line) for line in lines]\n","sub_path":"chapter4/Q30.py","file_name":"Q30.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"649488311","text":"# -*- coding: utf-8 -*-#\n\n# -------------------------------------------------------------------------------\n# Name: textcnn\n# Description:\n# Author: orange\n# Date: 2021/7/17\n# -------------------------------------------------------------------------------\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Embedding, Conv1D, GlobalAveragePooling1D, Dense, Concatenate, GlobalMaxPooling1D\nfrom tensorflow.keras import Model\n\n\nclass TextCNN(Model):\n\n def __init__(self,\n maxlen,\n max_features,\n embedding_dims,\n class_num,\n kernel_sizes=[1,2,3],\n kernel_regularizer=None,\n last_activation='softmax'\n ):\n '''\n :param maxlen: 文本最大长度\n :param max_features: 词典大小\n :param embedding_dims: embedding维度大小\n :param kernel_sizes: 滑动卷积窗口大小的list, eg: [1,2,3]\n :param kernel_regularizer: eg: tf.keras.regularizers.l2(0.001)\n :param class_num:\n :param last_activation:\n '''\n super(TextCNN, self).__init__()\n self.maxlen = maxlen\n self.kernel_sizes = kernel_sizes\n self.class_num = class_num\n self.embedding = Embedding(input_dim=max_features, output_dim=embedding_dims, input_length=maxlen)\n self.conv1s = []\n self.avgpools = []\n for kernel_size in kernel_sizes:\n self.conv1s.append(Conv1D(filters=128, kernel_size=kernel_size, activation='relu', kernel_regularizer=kernel_regularizer))\n self.avgpools.append(GlobalMaxPooling1D())\n self.classifier = Dense(class_num, activation=last_activation, )\n\n def call(self, inputs, training=None, mask=None):\n if len(inputs.get_shape()) != 2:\n raise ValueError('The rank of inputs of TextCNN must be 2, but now is %d' % len(inputs.get_shape()))\n if inputs.get_shape()[1] != self.maxlen:\n raise ValueError('The maxlen of inputs of TextCNN must be %d, but now is %d' % (self.maxlen, inputs.get_shape()[1]))\n\n emb = self.embedding(inputs)\n conv1s = []\n for i in range(len(self.kernel_sizes)):\n c = self.conv1s[i](emb) # (batch_size, maxlen-kernel_size+1, filters)\n c = self.avgpools[i](c) # # (batch_size, filters)\n conv1s.append(c)\n x = Concatenate()(conv1s) # (batch_size, len(self.kernel_sizes)*filters)\n output = self.classifier(x)\n return output\n\n def build_graph(self, input_shape):\n '''\n 自定义函数,在调用model.summary()之前调用\n '''\n input_shape_nobatch = input_shape[1:]\n self.build(input_shape)\n inputs = tf.keras.Input(shape=input_shape_nobatch)\n if not hasattr(self, 'call'):\n raise AttributeError(\"User should define 'call' method in sub-class model!\")\n _ = self.call(inputs)","sub_path":"agorithm-items/agorithm-examples/agorithm-tensorflow2-example/learn/course/course003/textcnn.py","file_name":"textcnn.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"188057269","text":"import yfinance as yf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nimport datetime\n\nclass ExpoReg:\n def __init__(self):\n pass\n\n def reg_stock(self, stock_ticker, period_years=1, show_plot=False):\n # Define ticker\n ticker = f'{stock_ticker}'\n\n # Draw data from yfinance\n stock = yf.Ticker(f\"{ticker}\")\n hist = stock.history(period=f'{period_years}y', interval='1d')['Close']\n\n # Get Data of ticker\n ticker_data = hist\n df = pd.DataFrame({'Date': ticker_data.index, 'Price': ticker_data.values})\n \n # Drop Rows from Dataframe which contains NaN values from price.\n df = df.dropna()\n\n # Get ordinal Dates into new column\n df['date_ordinal'] = pd.to_datetime(df['Date']).apply(lambda date: date.toordinal())\n\n # Get Ordinal Date Plots ( x axis )\n date_plot = df['date_ordinal'].tolist()\n date_numpy = np.array(date_plot)\n\n # Get Price Plots ( y axis )\n price_plot = df['Price'].tolist()\n price_array = np.array(price_plot)\n price_numpy = price_array\n\n # Get Date Plots ( x axis renamed )\n # convert series timestamps using pandas date time format string, then send to a new list\n rename_date_plot = df['Date'].dt.strftime('%Y-%m-%d').tolist()\n rename_date_array = np.array(rename_date_plot)\n\n # unshaped Ordinal Date Plot\n unshaped_date_numpy = date_plot = np.array(df['date_ordinal'].tolist())\n\n # Create Linear Regression Object\n linreg = LinearRegression()\n\n # Reshape date array to fit linear regression model\n date_numpy = date_numpy.reshape(-1, 1)\n\n # Fit linear regression model into data points.\n linreg.fit(date_numpy, np.log(price_numpy))\n\n # Predict linear regression y values\n price_numpy_pred = linreg.predict(date_numpy)\n\n # Create plot\n plt.plot(date_numpy, price_numpy, markevery=5)\n\n # Plot regression line on scatter plot\n plt.plot(date_numpy, price_numpy_pred, color='red', markevery=5)\n\n # Labels for x and y axis.\n plt.xlabel('Dates')\n plt.ylabel('Close Price [$]')\n\n # y = mx + c\n # Find gradient of lin reg\n gradient = round(float(linreg.coef_[0]), 5)\n\n # Find intercept of lin reg\n intercept = round(linreg.intercept_, 2)\n\n # Find score/ RSquare for exponential regression model\n r_squared = round(linreg.score(date_numpy, np.log(price_numpy), sample_weight=None), 3)\n\n # Set title\n plt.title(f'{ticker} Exponential Regression')\n\n # Get Scored exponential regression\n score_val = round(((1 + gradient)**250 * r_squared) * 250, 2)\n\n\n print(f'''{ticker} Exponential Regression - RSquared : {r_squared}, Score : {score_val}\n---\n ''')\n\n # Add text of r squared, and gradient on plot, change font size to fit the plot.\n font_dict = {'size': 7.5}\n props = dict(boxstyle='round', facecolor='lightblue', alpha=0.9)\n plt.text(date_numpy[-1], price_numpy[-1], f\" Slope = {gradient} \\n R^2 = {r_squared} \\n Score = {score_val}\",\n fontdict=font_dict,\n bbox=props)\n\n # Set new x labels\n # Get unshaped date ord array index, then replace with new dates, followed by rotation for visual effect\n plt.xticks(unshaped_date_numpy, rename_date_array, rotation=45)\n # Show only 15 dates for visual effect.\n plt.locator_params(axis=\"x\", nbins=15)\n # Lower font size so it fits in one window\n plt.tick_params(axis=\"x\", labelsize=6)\n\n # Show plot on another window yes/no\n if show_plot:\n # Show plot if true\n plt.show()\n else:\n # Do not show plot if false\n pass\n\n # Function returns score value of exponential regression\n return score_val\n","sub_path":"expo_reg.py","file_name":"expo_reg.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"576264264","text":"# coding: utf-8\n\nfrom enum import Enum\nfrom six import string_types, iteritems\nfrom bitmovin_api_sdk.common.poscheck import poscheck_model\nfrom bitmovin_api_sdk.models.analytics_license_custom_data_field_labels import AnalyticsLicenseCustomDataFieldLabels\nimport pprint\nimport six\n\n\nclass AnalyticsLicenseUpdateRequest(object):\n @poscheck_model\n def __init__(self,\n name=None,\n ignore_dnt=None,\n time_zone=None,\n custom_data_field_labels=None):\n # type: (string_types, bool, string_types, AnalyticsLicenseCustomDataFieldLabels) -> None\n\n self._name = None\n self._ignore_dnt = None\n self._time_zone = None\n self._custom_data_field_labels = None\n self.discriminator = None\n\n if name is not None:\n self.name = name\n if ignore_dnt is not None:\n self.ignore_dnt = ignore_dnt\n if time_zone is not None:\n self.time_zone = time_zone\n if custom_data_field_labels is not None:\n self.custom_data_field_labels = custom_data_field_labels\n\n @property\n def openapi_types(self):\n types = {\n 'name': 'string_types',\n 'ignore_dnt': 'bool',\n 'time_zone': 'string_types',\n 'custom_data_field_labels': 'AnalyticsLicenseCustomDataFieldLabels'\n }\n\n return types\n\n @property\n def attribute_map(self):\n attributes = {\n 'name': 'name',\n 'ignore_dnt': 'ignoreDNT',\n 'time_zone': 'timeZone',\n 'custom_data_field_labels': 'customDataFieldLabels'\n }\n return attributes\n\n @property\n def name(self):\n # type: () -> string_types\n \"\"\"Gets the name of this AnalyticsLicenseUpdateRequest.\n\n\n :return: The name of this AnalyticsLicenseUpdateRequest.\n :rtype: string_types\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n # type: (string_types) -> None\n \"\"\"Sets the name of this AnalyticsLicenseUpdateRequest.\n\n\n :param name: The name of this AnalyticsLicenseUpdateRequest.\n :type: string_types\n \"\"\"\n\n if name is not None:\n if not isinstance(name, string_types):\n raise TypeError(\"Invalid type for `name`, type has to be `string_types`\")\n\n self._name = name\n\n @property\n def ignore_dnt(self):\n # type: () -> bool\n \"\"\"Gets the ignore_dnt of this AnalyticsLicenseUpdateRequest.\n\n\n :return: The ignore_dnt of this AnalyticsLicenseUpdateRequest.\n :rtype: bool\n \"\"\"\n return self._ignore_dnt\n\n @ignore_dnt.setter\n def ignore_dnt(self, ignore_dnt):\n # type: (bool) -> None\n \"\"\"Sets the ignore_dnt of this AnalyticsLicenseUpdateRequest.\n\n\n :param ignore_dnt: The ignore_dnt of this AnalyticsLicenseUpdateRequest.\n :type: bool\n \"\"\"\n\n if ignore_dnt is not None:\n if not isinstance(ignore_dnt, bool):\n raise TypeError(\"Invalid type for `ignore_dnt`, type has to be `bool`\")\n\n self._ignore_dnt = ignore_dnt\n\n @property\n def time_zone(self):\n # type: () -> string_types\n \"\"\"Gets the time_zone of this AnalyticsLicenseUpdateRequest.\n\n\n :return: The time_zone of this AnalyticsLicenseUpdateRequest.\n :rtype: string_types\n \"\"\"\n return self._time_zone\n\n @time_zone.setter\n def time_zone(self, time_zone):\n # type: (string_types) -> None\n \"\"\"Sets the time_zone of this AnalyticsLicenseUpdateRequest.\n\n\n :param time_zone: The time_zone of this AnalyticsLicenseUpdateRequest.\n :type: string_types\n \"\"\"\n\n if time_zone is not None:\n if not isinstance(time_zone, string_types):\n raise TypeError(\"Invalid type for `time_zone`, type has to be `string_types`\")\n\n self._time_zone = time_zone\n\n @property\n def custom_data_field_labels(self):\n # type: () -> AnalyticsLicenseCustomDataFieldLabels\n \"\"\"Gets the custom_data_field_labels of this AnalyticsLicenseUpdateRequest.\n\n Labels for CustomData fields\n\n :return: The custom_data_field_labels of this AnalyticsLicenseUpdateRequest.\n :rtype: AnalyticsLicenseCustomDataFieldLabels\n \"\"\"\n return self._custom_data_field_labels\n\n @custom_data_field_labels.setter\n def custom_data_field_labels(self, custom_data_field_labels):\n # type: (AnalyticsLicenseCustomDataFieldLabels) -> None\n \"\"\"Sets the custom_data_field_labels of this AnalyticsLicenseUpdateRequest.\n\n Labels for CustomData fields\n\n :param custom_data_field_labels: The custom_data_field_labels of this AnalyticsLicenseUpdateRequest.\n :type: AnalyticsLicenseCustomDataFieldLabels\n \"\"\"\n\n if custom_data_field_labels is not None:\n if not isinstance(custom_data_field_labels, AnalyticsLicenseCustomDataFieldLabels):\n raise TypeError(\"Invalid type for `custom_data_field_labels`, type has to be `AnalyticsLicenseCustomDataFieldLabels`\")\n\n self._custom_data_field_labels = custom_data_field_labels\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AnalyticsLicenseUpdateRequest):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"bitmovin_api_sdk/models/analytics_license_update_request.py","file_name":"analytics_license_update_request.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229126374","text":"import os\nif os.name != 'nt':\n try:\n from twisted.internet import epollreactor\n epollreactor.install()\n except:\n pass\nelse:\n try:\n from twisted.internet import iocpreactor as iocpreactor\n iocpreactor.install()\n except:\n pass\n\nimport glob\nimport importlib\nimport sys\nfrom twisted.internet import reactor\nimport setting\nfrom cloud.log import LOG\nfrom cloud.protocol import WebsocketFactory\n\n\ndef add_search_path(_path):\n LOG.debug(\"add search path \" + _path)\n sys.path.insert(0, _path)\n\n\ndef _import(_path):\n LOG.debug(\"import module with path \" + _path)\n return importlib.import_module(_path)\n\n\nclass Config(object):\n \"\"\"docstring for Config\"\"\"\n def __init__(self, root):\n self.projects = []\n self.root = root\n self.search()\n\n def search(self):\n search_path = self.root + \"/*/*_setting.py\"\n _p_path_list = glob.glob(search_path)\n if not _p_path_list:\n LOG.error(\"Not found any project in path \" + self.root)\n return None\n for path in _p_path_list:\n LOG.debug(\"Add project \" + path)\n self.projects.append(Project(path))\n LOG.debug(\"Found %d project in path\" % len(self.projects))\n\n def get_module(self, path):\n path = path[-1] == \"/\" and path[:-1] or path\n path = path[0] == \"/\" and path[1:] or path\n p_name, module = \"/\" in path and path.split(\"/\", 1) or (path, \"\")\n for p in self.projects:\n if p_name == p.name:\n return p, p.get_module(module)\n\n\nclass Project(object):\n def __init__(self, setting_path):\n (p_root, setting_file) = os.path.split(setting_path)\n name = p_root.split(\"/\")[-1:][0]\n add_search_path(p_root)\n module = _import(os.path.splitext(setting_file)[0])\n self.name = name\n self.module = module\n self.urls = module.urls\n self.apps = {}\n\n def add_app(self, key, value):\n self.apps[key] = value\n return value\n\n def get_app(self, key, default=None):\n if key not in self.apps and default is not None:\n self.apps[key] = default\n return self.apps.get(key, None)\n\n def get_conf(self, key):\n print(\"get conf %s and val %s\" %\n (key, self.module.__dict__.get(key, None)))\n return self.module.__dict__.get(key, None)\n\n def get_module(self, module):\n for url in self.urls:\n LOG.info(\"url 0 [ %s ] , module [ %s ]\" % (url[0], module))\n if url[0] == module:\n LOG.debug(\"found it \" + url[1][\"target\"])\n module = _import(url[1][\"target\"])\n return module\n else:\n LOG.error(\"not found module named %s in file setting\" % module)\n\n\nif __name__ == '__main__':\n LOG.info(\"cloudbreath startup\")\n reactor.listenTCP(setting.PORT, WebsocketFactory(Config(setting.webroot)))\n reactor.run()\n","sub_path":"src/cloud/startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"393519391","text":"# Refer to https://developers.google.com/api-client-library/python/\nfrom apiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nfrom googleapiclient.http import MediaFileUpload, MediaIoBaseDownload\nimport googleapiclient\nimport mimetypes, io\nfrom pprint import pprint\n\n# NOTE. May throw `googleapiclient.errors.HttpError` with code '403'. This means that the quota usage has exceeded.\n# Refer to \"https://developers.google.com/drive/v3/web/handle-errors#403_daily_limit_exceeded\" and below. There are\n# more than one 403 cases.\n#\n# These error should be handled by the API caller code.\nclass GoogleDriveAPI():\n def __init__(self):\n self.service = BuildService.build('drive', 'v3', ['https://www.googleapis.com/auth/drive.file'])\n\n # `filename` doesn't have to have extension. However, `name` field in `file_metadata` and `filename` parameter\n # for `MediaFileUpload` MUST be the same. I thought you could choose the name of the file that you are uploading\n #\n # `convert_to` with value 'None' will just 'upload' instead of 'creating a google file type'. For example, use\n # \"application/vnd.google-apps.document\" to 'create' a google document. \n def create(self, target_file, filename=None, folder_id=\"root\", mimetype=None, convert_to=None):\n if filename is None:\n filename = target_file\n\n # Guess mimetype with target_file. So, target_file with extension doesn't need an explicit mimetype.\n if mimetype is None:\n mimetype = mimetypes.guess_type(target_file)[0]\n # mimetype is still `None` if, say, target_file has no extension. This will still work but in case of a\n # text file without extension `.txt`, you are just uploading a file that needs to be downloaded to view.\n # With the extension however you can view it on Gdrive.\n\n file_metadata = {\n 'name': filename,\n 'mimeType': convert_to,\n 'parents': [folder_id] # Needs to be an array\n }\n media = MediaFileUpload(target_file, mimetype=mimetype, resumable=True)\n request = self.service.files().create(media_body=media, body=file_metadata)\n # Good for confirming and testing what kind of http request is being made.\n # pprint(request.to_json())\n result = request.execute()\n return result\n\n # TODO. `getGdriveNotes` is doing this by passing a parameter. I want it to be an option\n # to get a list of trashed items or not.\n # Also account for 'nextPageToken' thing for long responses.\n def list(self, q=None, fields=None, **kwargs):\n request = self.service.files().list(q=q, fields=fields, **kwargs)\n result = request.execute()\n return result\n\n # `fields` is a comma separated string of which fields to display. However, `None` will return\n # (1) kind (2) id (3) name and (4) mimetype.\n def getMetadata(self, file_id, fields=None):\n request = self.service.files().get(fileId=file_id, fields=fields)\n result = request.execute()\n return result\n\n # Method name could be misleading, but it's like file ... in one aspect that you can\n # get the content by calling `read` on the result. Hence the `seek(0)` before returning.\n # \n # Use `getMetadata` to get the target file's mimetype, and convert to a wanted mimetype\n # if necessary BEFORE calling this method. For example, if your target file is of google\n # document type, convert it to `text/plain` and use that mimetype to call this function.\n #\n # Refer to [this][1] document for convertible google MIME types and 'normal' MIME types.\n #\n # [1]: https://developers.google.com/drive/v3/web/manage-downloads\n def getFile(self, file_id, mimetype):\n request = self.service.files().export_media(fileId=file_id, mimeType=mimetype)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n # print(\"Download {}.\".format(int(status.progress() * 100)))\n\n fh.seek(0)\n \n return fh\n\n # Note that moving into a file requires `moveFolder`\n # ALSO, naming the folder `foo/bar` won't create nor move 'bar' directory into 'foo'.\n # It creates a directory name called 'foo/bar'.\n #\n # ALSO noe that same name of directory is allowed, and won't override, but will create a \n # directory with same name.\n def createFolder(self, foldername, parent_id=None):\n file_metadata = {\n 'name': foldername,\n 'mimeType': 'application/vnd.google-apps.folder'\n }\n request = self.service.files().create(body=file_metadata)\n result = request.execute()\n return result\n\n # Note that `target_id` can be BOTH file and folder id.\n def moveToFolder(self, target_id, target_parent_id):\n pass\n\n # Returns the path ID of the Gdrive folder if successful. None if not.\n def getGdriveStoragePathId(self, pathname):\n dirs = getGdriveDirectories(pathname)\n\n # Default is \"Nowhere/Gyst/Notes\" but should be able to handle any path \n cur_dir_id = \"root\" # 'id' is root for root.\n flag = False\n # For creating error message.\n split_flag_message = [\"Please provide a valid folder to your Gdrive folder. Its folder id will be retrieved and set as a value for \" + \\\n \"`GDRIVE_GYSTNOTES_DIR_ID`. However, setting its value manually won't work.\"]\n for i, d in enumerate(dirs):\n search_dir = d\n # List mimeType of google folders whose parents are `cur_dir_id` and have name `search_dir`.\n result = self.list(q=\"parents='{}' and name='{}' and \".format(cur_dir_id, search_dir) + \\\n \"mimeType='application/vnd.google-apps.folder'\",\n fields=\"files(id)\")\n result = result[\"files\"]\n\n if len(result) > 1:\n flag = True\n split_flag_message.insert(0, \"There are more than one directories on your GDrive. '{}' MUST be unique.\".format(pathname))\n\n elif len(result) < 1:\n flag = True\n default_storage_path = GystConfig.Inst.getGdriveDefaultStoragePath()\n split_flag_message.insert(0, \"Full directory doesn't exist. Create the directory. Default is `{}`.\".format(default_storage_path))\n\n # Prematurely exits due to error\n if flag:\n message = \" \".join(split_flag_message)\n # Except this error message then print with `gyst_logger` or something.\n raise InvalidGdrivePathname(message, pathname)\n\n cur_dir_id = result[0][\"id\"] \n\n return cur_dir_id\n\nclass InvalidGdrivePathname(Exception):\n def __init__(self, message, pathname):\n self.message = message\n self.pathname = pathname\n\nclass BuildService():\n def build(service_name, version, scopes):\n bs = BuildService(service_name, version, scopes)\n service = bs.service\n\n return service\n\n # `scopes` could've been `[]` but took it out so that it throws error if not given.\n def __init__(self, service_name, version, scopes):\n self.storage_filepath = 'resources/credentials.json'\n self.secret_filepath = 'resources/client_secret.json'\n self.service_name = service_name\n self.version = version\n self.scopes = scopes\n self.service = None\n\n SCOPES = self.scopes\n storage_filepath = self.storage_filepath\n secret_filepath = self.secret_filepath\n service_name = self.service_name\n version = self.version\n\n store = file.Storage(storage_filepath)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(secret_filepath, SCOPES)\n creds = tools.run_flow(flow, store)\n\n self.service = build(service_name, version, http=creds.authorize(Http()))\n\n# `path` is directory whether it ends with '/' or not\n# Path MUST NOT have sequence of empty strings as a dir in the path.\n# Path MUST NOT have \"/\" in a directory name; Google drive allows it but for the\n# sake of this method, NOONONONONONONO.\ndef getGdriveDirectories(path):\n if path[-1] == \"/\":\n path = path[:-1]\n\n if path[0] == \"/\":\n path = path[1:]\n\n # GUARANTEES first and second are not empty string, unless \"//troll/path//\"\n dirs = path.split(\"/\")\n\n # Check\n for d in dirs:\n if d.strip() == \"\":\n raise ValueError(\"A directory path in Gdrive MUST NOT have a sequence of empty space as a directory.\")\n\n return dirs\n","sub_path":"src/google_drive_api/google_drive_api.py","file_name":"google_drive_api.py","file_ext":"py","file_size_in_byte":8697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"384153657","text":"#!/usr/bin/env python3\n\nimport json\nimport random\nimport subprocess\nimport serial\nimport sys\n\n#from pyavrutils import AvrGcc\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\n\ndef main(args):\n\n input_num = int(args.input_num)\n step = int(args.step)\n layer_num = int(args.layer_num)\n repeat = int(args.repeat)\n\n # for step<0, make sure the last layer number is >0\n if (input_num + step * (layer_num-1)) <= 0:\n print('the number of elements in a layer must larger than 0 ')\n sys.exit()\n\n #erase the result file\n# filename = 'result_RAM' + args.input_num + '_' + args.step + '_' + args.layer_num + '_' + args.repeat + '.csv'\n filename = 'result_Flash_' + args.input_num + '_' + args.step + '_' + args.layer_num + '_' + args.repeat + '.csv'\n if filename != None:\n open(filename, 'w').close()\n\n #start\n i = input_num \n while i < (input_num + abs(step) * repeat):\n\n \n\t#create ANN\n# subprocess.call('python3 createANN.py ' + str(i) + '/'+ str(i+step) + ' NeuralNetwork.cc', shell=True)\n\n string = str(i)\n for j in range(1, layer_num):\n string += '/'+str(i+step*j) \n# add activation functions\n#string += '/' + str(i+step*j) + ',sigmoid' \n subprocess.call('python3 createANN.py ' + string + ' NeuralNetwork.cc', shell=True)\n\n \t#create input vector\n subprocess.call('python3 createInput.py '+ str(i), shell=True)\n\n \t#compile\n subprocess.call('avr-g++ -I\"/home/cyj1699/git/quicknet/quicknet/include\" -Wall -std=c++11 -mmcu=atmega128rfa1 -DF_CPU=16000000UL main.cc NeuralNetwork.cc src/Layer.cc src/Math.cc src/Network.cc src/ses_timer.cc -c', shell=True)\n\n subprocess.call('avr-g++ -Wl,-Map,quicknet.map,--cref -mrelax -Wl,--gc-sections -mmcu=atmega128rfa1 -o \"quicknet.elf\" ./main.o ./NeuralNetwork.o ./Layer.o ./Math.o ./Network.o ./ses_timer.o -L\"/home/cyj1699/git/quicknet/quicknet/lib\" -llcd -luart', shell=True) \n\n subprocess.call('avr-objcopy -R .eeprom -R .fuse -R .lock -R .signature -O ihex quicknet.elf \"quicknet.hex\"', shell=True)\n\n\t#upload\n\n subprocess.call('/usr/bin/avrdude -pm128rfa1 -cstk500 -P/dev/ttyACM0 -Uflash:w:quicknet.hex:a', shell=True)\n\n\n\t#write result to file\n\n while True:\n\n ser = serial.Serial('/dev/ttyUSB0', timeout=None, baudrate=57600) # open serial port\n\n # print('waiting for input...')\n line = ser.readline()\n # print('...received ' + str(line))\n \n with open(filename, 'a') as f:\n f.write(line.decode('ascii'))\n \n break\n \n i=i+abs(step)\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n\n parser.add_argument('input_num', type=str, help='Number of Input')\n parser.add_argument('step', type=str, help='network steps')\n parser.add_argument('layer_num', type=str, help='number of layers')\n parser.add_argument('repeat', type=str, help='repeat times')\n\n main(parser.parse_args())\n\n","sub_path":"quicknet/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"268463635","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2021-Present IjVine Corporation ()\n\n##############################################################################\nfrom odoo import api,fields,models\n\n\nclass MultiChannelSaleConfig(models.TransientModel):\n\t_name = 'multi.channel.sale.config'\n\t_description = 'Channel Sale Config'\n\t_inherit = 'res.config.settings'\n\n\tcron_import_partner = fields.Many2one('ir.cron','Import Customer Scheduler',readonly=True)\n\tcron_import_category = fields.Many2one('ir.cron','Import Category Scheduler',readonly=True)\n\tcron_import_product = fields.Many2one('ir.cron','Import Product Scheduler',readonly=True)\n\tcron_import_order = fields.Many2one('ir.cron','Import Order Scheduler',readonly=True)\n\tcron_evaluation = fields.Many2one('ir.cron','Cron Evaluation Scheduler',readonly=True)\n\n\tavoid_duplicity = fields.Boolean(\n\t\tstring = 'Avoid Duplicity (Default Code)',\n\t\thelp = \"Check this if you want to avoid the duplicity of the imported products. In this case the product with same default code/sku will not create again and again.\"\n\t)\n\n\tavoid_duplicity_using = fields.Selection(\n\t\tselection = [\n\t\t\t('default_code','Default Code/SKU'),\n\t\t\t('barcode','Barcode/UPC/EAN/ISBN'),\n\t\t\t('both','Both')\n\t\t],\n\t\tstring = \"Avoid Duplicity Using\",\n\t\tdefault = 'both',\n\t\thelp = \"In Both option the the uniqueness will be wither on sku/Default or UPC/EAN/Barcode usign OR operator and it should be always be given high priority\"\n\t)\n\n\tdef set_values(self):\n\t\tsuper(MultiChannelSaleConfig,self).set_values()\n\t\tIrDefault = self.env['ir.default'].sudo()\n\t\tIrDefault.set(\n\t\t\t'multi.channel.sale.config',\n\t\t\t'avoid_duplicity',\n\t\t\tself.avoid_duplicity\n\t\t)\n\t\tIrDefault.set(\n\t\t\t'multi.channel.sale.config',\n\t\t\t'avoid_duplicity_using',\n\t\t\tself.avoid_duplicity_using\n\t\t)\n\t\treturn True\n\n\t@api.model\n\tdef get_values(self):\n\t\tres = super(MultiChannelSaleConfig,self).get_values()\n\t\tIrDefault = self.env['ir.default'].sudo()\n\t\tres.update(\n\t\t\t{\n\t\t\t\t'avoid_duplicity': IrDefault.get('multi.channel.sale.config','avoid_duplicity'),\n\t\t\t\t'avoid_duplicity_using': IrDefault.get(\n\t\t\t\t\t'multi.channel.sale.config',\n\t\t\t\t\t'avoid_duplicity_using' or 'both',\n\t\t\t\t),\n\t\t\t\t'cron_import_partner': self.env.ref('ijvine_ebay_base.cron_import_partner').id,\n\t\t\t\t'cron_import_category': self.env.ref('ijvine_ebay_base.cron_import_category').id,\n\t\t\t\t'cron_import_product': self.env.ref('ijvine_ebay_base.cron_import_product').id,\n\t\t\t\t'cron_import_order': self.env.ref('ijvine_ebay_base.cron_import_order').id,\n\t\t\t\t'cron_evaluation': self.env.ref('ijvine_ebay_base.cron_evaluation').id,\n\t\t\t}\n\t\t)\n\t\treturn res\n","sub_path":"ijvine_ebay_base/models/core/res_config.py","file_name":"res_config.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"442069047","text":"# https://github.com/xiongzihua/pytorch-YOLO-v1\nimport torch\nimport torchvision\nimport cv2\nimport numpy as np\n\nimport lib.pyt.yolov1.model as yolo_model\nimport lib.pyt.yolov1.loss as yolo_loss\nimport lib.pyt.yolov1.dataset as yolo_data\nimport lib.pyt.yolov1.predict as yolo_predict\nimport lib.utils.logger as logger\nimport lib.utils.Config as cfg\n\nimport lib.utils.ProgressBar as j_bar\n\n\nyoloCfg = cfg.Config(cfgfile='yolov1_voc.yml').CFGData\n\nuseGpu = yoloCfg.CONFIG.USEGPU and torch.cuda.is_available()\n\nlearning_rate = yoloCfg.TRAIN.LEARNING_RATE\n\nnet = yolo_model.resnet50()\nprint('load pre-trined model')\nresnet = torchvision.models.resnet50(pretrained=True)\nnew_state_dict = resnet.state_dict()\ndd = net.state_dict()\nfor k in new_state_dict.keys():\n print(k)\n if k in dd.keys() and not k.startswith('fc'):\n print('yes')\n dd[k] = new_state_dict[k]\nnet.load_state_dict(dd)\ncriterion = yolo_loss.YoLoV1Loss(config=yoloCfg)\nif useGpu:\n print('cuda', torch.cuda.current_device(), torch.cuda.device_count())\n net.cuda()\n\nparams=[]\nparams_dict = dict(net.named_parameters())\nfor key,value in params_dict.items():\n if key.startswith('features'):\n params += [{'params':[value],'lr':learning_rate * 1}]\n else:\n params += [{'params':[value],'lr':learning_rate}]\noptimizer = torch.optim.SGD(params, lr=learning_rate, momentum=0.9, weight_decay=5e-4)\n\ntrain_dataset = yolo_data.yoloDataset(config=yoloCfg, train=True,transform = [torchvision.transforms.ToTensor()] )\ntrain_loader = torch.utils.data.DataLoader(train_dataset,batch_size=yoloCfg.TRAIN.BATCH_SIZE,shuffle=True)\n\ntest_dataset = yolo_data.yoloDataset(config=yoloCfg, train=False,transform = [torchvision.transforms.ToTensor()] )\ntest_loader = torch.utils.data.DataLoader(test_dataset,batch_size=yoloCfg.TEST.TEST_BATCH_SIZE,shuffle=False)\n\nlog = logger.Logger(yoloCfg.LOG.LOG_DIR)\nbar = j_bar.ProgressBar(yoloCfg.TRAIN.EPOCHS, len(train_loader), \"Loss : %.3f; Total Loss : %.3f\")\nbest_test_loss = np.inf\nnet.train()\nfor epoch in range(1, yoloCfg.TRAIN.EPOCHS + 1):\n net.train()\n if epoch == 30:\n learning_rate=0.0001\n if epoch == 40:\n learning_rate=0.00001\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n\n print('Learning Rate for this epoch: {}'.format(learning_rate))\n\n #region train\n total_loss = 0.\n torch.cuda.empty_cache()\n for i,(images,target) in enumerate(train_loader):\n images = torch.autograd.Variable(images)\n target = torch.autograd.Variable(target)\n if useGpu:\n images,target = images.cuda(),target.cuda()\n\n pred = net(images)\n loss = criterion(pred,target)\n total_loss += loss.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n bar.show(epoch, loss.item(), total_loss / (i+1))\n #endregion\n\n #region test\n torch.cuda.empty_cache()\n validation_loss = 0.0\n net.eval()\n test_bar = j_bar.ProgressBar(1, len(test_loader), \"vloss:%.3f, total loss:%.3f\")\n for i,(images,target) in enumerate(test_loader):\n images = torch.autograd.Variable(images)\n target = torch.autograd.Variable(target)\n if useGpu:\n images,target = images.cuda(),target.cuda()\n\n pred = net(images)\n loss = criterion(pred,target)\n validation_loss += loss.item()\n test_bar.show(i, loss.item(), validation_loss / (i+1))\n validation_loss /= len(test_loader)\n log.logTofile(\"epoch %d trainloss %.3f valloss %.3f\" % epoch, total_loss / (i+1), validation_loss)\n\n if best_test_loss > validation_loss:\n best_test_loss = validation_loss\n torch.save(net.state_dict(),'outputs/best.pth')\n torch.save(net.state_dict(),'outputs/yolo.pth')\n #endregion\n\n #region detect one image\n predict = yolo_predict.YOLOPredict(net, datatype=\"voc\")\n image = cv2.imread(\"testImages/03.jpg\")\n image = predict.drawBoxes(image, {\"img_id\" : \"03\",\n \"height\" : image.shape[0],\n \"width\" : image.shape[1]\n }, output=\"outputs/yolov1_%03d.jpg\" % epoch)\n\n image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n info = {'loss': total_loss / (i+1)}\n for tag, value in info.items():\n log.scalar_summary(tag, value, epoch)\n imageInfo = {'images': image}\n for tag, value in imageInfo.items():\n log.image_summary(tag, value, epoch)\n #endregion","sub_path":"03.DeepLearning-Detection/01.yolov1/voc_pyt_train.py","file_name":"voc_pyt_train.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"600549745","text":"\"\"\"\nReplace empty cells with replacement string.\n\"\"\"\nimport pandas as pd\nimport csv\ninput_file = \"input.csv\"\noutput_file = \"output.csv\"\nreplacement = \"foobar\"\n\n# Open and read input file\ninput_csv_file = open(input_file, 'r', encoding='utf8')\ninput_csv_reader = csv.reader(input_csv_file)\n\n# Open and write to output file\noutput_csv_file = open(output_file, 'w', encoding='utf8')\noutput_csv_writer = csv.writer(output_csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n# Loop through csv rows\nfor row in input_csv_reader:\n # Replace empty cells with replacement string\n for i in range(len(row)):\n if row[i] == '':\n row[i] = replacement\n\n output_csv_writer.writerow(row)\n","sub_path":"other_scripts/csv_empty_cell_modifier/csv_empty_cell_modifier.py","file_name":"csv_empty_cell_modifier.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383547419","text":"from operator import attrgetter\n\nli = [9,1,8,2,7,3,6,4,5]\nli1 = [-6,-5,-4,1,2,3]\nci = ['History', 'Math', 'Physics', 'CompSci']\ndi = {'name': 'Corey', 'job': 'programming', 'age': None, 'os': 'Mac'}\n\n## print(8 in li)\nfor index, item in enumerate(ci, start=1):\n print(index, item)\nprint(' - '.join(ci))\nprint(' - '.join(ci).split(' - '))\n# s_li = sorted(li, reverse=True) \"\"\" Sorted funktion, mer flexibel \"\"\"\n# li.sort() \"\"\" Sorted metod \"\"\"\n# print('Sorted Variabel:\\t', s_li)\n# print('Sorted Variabel:\\t',)\n# print('Original Variabel:\\t',)\n# s_di = sorted(di)\n# print('Dict\\t', s_di)\n# print(li1)\n\n\nclass Employee():\n def __init__(self, name, age, salary):\n self.name = name\n self.age = age\n self.salary = salary\n\n def __repr__(self):\n return '({},{},{})'.format(self.name, self.age, self.salary)\n\ne1 = Employee('Carl', 37, 70000)\ne2 = Employee('Sarah', 29, 80000)\ne3 = Employee('John', 43, 90000)\nemployees = [e1,e2,e3]\n\"\"\"Custum funktion, attrgetter eller lambda\"\"\"\n# def e_sort(emp):\n# return emp.name\n# s_employess = sort(employees, key=e_sort)\n# s_employees = sorted(employees, key=attrgetter('age'))\ns_employees = sorted(employees, key=lambda e: e.name)\n\nprint(s_employees)","sub_path":"sort-li-tu-it.py","file_name":"sort-li-tu-it.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"135872707","text":"import tweepy\nimport sys\nimport csv\n\ndef get_time_line(screen_name, out):\n\tconsumer_key = '40mq26okBuI028ks2OMOvgKgi'\n\tconsumer_secret = 'gth6CVhz1DwfOkUIuA8rYLv47VUFgN6UDFgD1W68UAJ0Xe61Jr'\n\taccess_key = '230900774-gjjICUPMFlwUg0iXNoEywgfXXh3ntNDxc9SGw0jM'\n\taccess_secret = 'NuWG9BVfFlm6QdejcG0JtSnbngJXJ8oGhntNkTryPQni7'\n\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_key, access_secret)\n\tapi = tweepy.API(auth)\n\n\tst = []\n\n\tfor status in tweepy.Cursor(api.home_timeline).items(10):\n\t\tst.append([status.text.encode(\"utf-8\")])\n\t\n\twith open(out+'%s.csv' % screen_name, 'wb') as f:\n\t\twriter = csv.writer(f)\n\t\twriter.writerow([\"text\"])\n\t\twriter.writerows(st)\n\t#print st\n\t\n\ndef main():\n\tget_time_line(sys.argv[1], sys.argv[2])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"twitter/twitter_time_line.py","file_name":"twitter_time_line.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"550794845","text":"import time\n\nimport redis, os\nfrom flask import Flask\n\napp = Flask(__name__)\n\nREDIS_HOST = os.getenv('REDIS_HOST')\nif REDIS_HOST == None: REDIS_HOST = \"redis\" \n\nREDIS_PORT = os.getenv('REDIS_PORT')\nif REDIS_PORT == None: REDIS_PORT = 6379\n\ncache = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)\n\n\ndef get_hit_count():\n retries = 5\n while True:\n try:\n return cache.incr('hits')\n except redis.exceptions.ConnectionError as exc:\n if retries == 0:\n raise exc\n retries -= 1\n time.sleep(0.5)\n\n\n@app.route('/')\ndef hello():\n count = get_hit_count()\n return 'Hello World! I have been seen {} times.\\n'.format(count)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"82872347","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 13:59:51 2013\n\n@author: \n\"\"\"\n\n#Importamos los modulos necesarios\nfrom numpy import *\nimport matplotlib \nfrom matplotlib import cm \nfrom matplotlib import pyplot as plt \nfrom mpl_toolkits.mplot3d import Axes3D\n\n#Definimos nuestra area de trbajo\nfig=plt.figure()\nax=fig.add_subplot(111, projection=\"3d\")\n\n#Creamos la rejilla circular\nr_a=0.5\nr_b=5.0\ncirculos=50\nlineas=50\norigen=(0,0)\nr,t=meshgrid(linspace(r_a,r_b,circulos),linspace(0,2*pi,lineas))\nx=r*cos(t)\ny=r*sin(t)\ndr=abs(r[0][1]-r[0][0])\ndt=abs(t[1][0]-t[0][0])\n\n#Damos el valor de p para una malla\nmax=50\np=array([[0.0 for i in range(50)]for i in range (50)])\n\n#Inicializamos la malla en zeros\nfor i in range(50):\n for j in range(50):\n p[i][j]=0.0\n\n#Colocamos valores de frontera\n#La primera entrada es la coordenada angular\n#La segunda entrada es la coordenada radial.\nfor i in range(max):\n p[i][0]=0.0\n p[i][49]=10.0\n \n#Algoritmo de iteracion\nfor k in range(100):\n for i in range (0,max-1):\n for j in range (0,max-1):\n if (j==0):\n p[i][j]=p[i][j]\n elif (j==49):\n p[i][j]=p[i][j]\n else:\n #En caso de dejarlo en cero solo tendremos 2 cilindros concentricos, el interior a 100 y el exterior a -50.\n #p[i][j]=0.0\n \n #Expresion para el laplaciano en coordenadas polares. Sin termino de densidad de corriente tenemos solo la ecuacion de Laplace en coordenadas polares.\n p[i][j]=((dt*dr**2.*r[i][j]**2.)/(2.*dt*r[i][j]**2.+dr**2.))*((p[i][j-1]+p[i][j+1])/dr**2.+(1/(2.*r[i][j]*dr))*(p[i][j+1]-p[i][j-1])+(1/r[i][j]**2.0)*(p[i-1][j]+p[i+1][j]))\n \n #Si agregamos un termino de densidad de corriente tenemos la solucion para la ecuacion de Poisson en coordenadas polares.\n #En este ejemplo una dependencia del inverso del radio al cubo\n #p[i][j]=((dt*dr**2.*r[i][j]**2.)/(2.*dt*r[i][j]**2.+dr**2.))*((2.*r[i][j]**3.)+(p[i][j-1]+p[i][j+1])/dr**2.+(1/(2.*r[i][j]*dr))*(p[i][j+1]-p[i][j-1])+(1/r[i][j]**2.0)*(p[i-1][j]+p[i+1][j]))\n \nz=p\n#Graficamos X,Y,Z y agregamos mapa de color\nsurf=ax.plot_surface(x,y,z, rstride=2, cstride=2, linewidth=0.5, cmap=cm.coolwarm)\n#Colocamos etiquetas en los ejes\nax.set_zlabel(\"Potencial electrico\")\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_title(\" Solucion a la ecuacion de Poisson en coordenadas polares\")\n\n#Agregamos una barra indicadora\n#fig.colorbar(surf, shrink=0.5, aspect=5)\n#Dibujamos las lineas de nivel en un plano inferior a la grafica\n#cset=ax.contourf(x,y,z, zdir=\"z\", offset=100, cmap=cm.coolwarm)\n#Delimitamos nuestra grafica\n#ax.set_zlim(-100,100)\n#Mostramos la gráfica\nplt.show()","sub_path":"Codigos Python/ProblemaPoisson (1).py","file_name":"ProblemaPoisson (1).py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"312818710","text":"turmas ={}\ndef calcularMedia(dic,nomeTurma,matricula):\n for a in dic[nomeTurma]:\n if a[0] == matricula:\n notas = a[1]\n soma = 0 \n for i in notas:\n soma += i \n media = soma/len(notas)\n a.append(media)\n print(\"Média:%.1f\"%media)\n print()\ndef adicionarTurma(dic,nomeTurma):\n dic[nomeTurma] = []\n print(\"Turma adicionada.\")\n print()\ndef adicionarAlunoNotas(dic,nomeTurma,matricula,listaNotas):\n aluno = [matricula,listaNotas]\n dic[nomeTurma].append(aluno)\n print(\"Aluno(a) e notas adicionados.\")\n print()\ndef CalcularMediaTurma(dic,nomeTurma):\n soma = 0\n alunos = 0\n for b in dic[nomeTurma]:\n soma += b[2]\n alunos += 1\n mediaTurma = soma/alunos\n print(\"Média da Turma:%.1f\"%mediaTurma)\n print()\ndef menu():\n opçao = eval(input(\"(1)-Adicionar Turma.\\n(2)-Adicionar Aluno e Notas.\\n(3)-Calcular média de um Aluno.\\n(4)-Calcular média de uma Turma.\\n(5)-Sair.\\nO que você deseja fazer?\"))\n return opçao\nvalor = menu()\nwhile valor >0 and valor <5:\n if valor == 1:\n nomeTurma = input(\"Nome da turma:\")\n adicionarTurma(turmas,nomeTurma)\n valor = menu()\n elif valor == 2:\n nomeTurma = input(\"Digite o nome da turma na qual deseja adicionar esse aluno(a):\")\n matricula = input(\"Digite a matricula do aluno(a):\")\n notas = []\n quantNotas = eval(input(\"Quantas notas o aluno(a) tem?\"))\n for k in range(quantNotas):\n if k == 0:\n nota = float(input(\"Primeira nota:\"))\n notas.append(nota)\n else:\n nota = float(input(\"Próxima nota:\"))\n notas.append(nota)\n adicionarAlunoNotas(turmas,nomeTurma,matricula,notas)\n valor = menu()\n elif valor == 3:\n matricula = input(\"Digite a matricula do aluno(a):\")\n turma = input(\"Digite a turma do aluno(a):\")\n calcularMedia(turmas,turma,matricula)\n valor = menu()\n elif valor == 4:\n print()\n print(\"-->Lembrando que para calcular a média da turma, a média de todos os alunos já deverão ter sido calculadas.\")\n print()\n nomeTurma = input(\"Nome da Turma:\")\n CalcularMediaTurma(turmas,nomeTurma)\n valor = menu()\nprint()\nprint(\"Programa encerrado.\")\n","sub_path":"Questão4.py","file_name":"Questão4.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568014552","text":"# Copyright 2011 Eldar Nugaev\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport datetime\nimport json\n\nfrom nova.api.openstack import v2\nfrom nova.api.openstack.v2 import extensions\nfrom nova.api.openstack import wsgi\nimport nova.compute\nfrom nova import test\nfrom nova.tests.api.openstack import fakes\nimport nova.utils\n\n\ndt = datetime.datetime.utcnow()\n\n\ndef fake_get_actions(self, _context, instance_uuid):\n return [\n {'action': 'rebuild', 'error': None, 'created_at': dt},\n {'action': 'reboot', 'error': 'Failed!', 'created_at': dt},\n ]\n\n\ndef fake_instance_get(self, _context, instance_uuid):\n return {'uuid': instance_uuid}\n\n\nclass ServerDiagnosticsTest(test.TestCase):\n\n def setUp(self):\n super(ServerDiagnosticsTest, self).setUp()\n self.flags(allow_admin_api=True)\n self.flags(verbose=True)\n self.stubs.Set(nova.compute.API, 'get_actions', fake_get_actions)\n self.stubs.Set(nova.compute.API, 'get', fake_instance_get)\n self.compute_api = nova.compute.API()\n\n self.router = v2.APIRouter()\n ext_middleware = extensions.ExtensionMiddleware(self.router)\n self.app = wsgi.LazySerializationMiddleware(ext_middleware)\n\n def test_get_actions(self):\n uuid = nova.utils.gen_uuid()\n req = fakes.HTTPRequest.blank('/fake/servers/%s/actions' % uuid)\n res = req.get_response(self.app)\n output = json.loads(res.body)\n expected = {'actions': [\n {'action': 'rebuild', 'error': None, 'created_at': str(dt)},\n {'action': 'reboot', 'error': 'Failed!', 'created_at': str(dt)},\n ]}\n self.assertEqual(output, expected)\n","sub_path":"nova/tests/api/openstack/v2/contrib/test_server_action_list.py","file_name":"test_server_action_list.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"544216570","text":"#!/usr/bin/env python3\n\nimport datetime, sys\nnow = datetime.datetime.now()\n\nclass Donor:\n def __init__(self, first_name, last_name, donation_amount):\n self.first = first_name\n self.last = last_name\n self.donation = donation_amount\n\n @property\n def full_name(self):\n return f\"{self.first} {self.last}\"\n\n def add_donation(self, amount):\n return self.donation.append(amount)\n\n def sum_donation(self):\n return sum(self.donation)\n\n\ndef get_name():\n return input(\"Who would you like to thank? If you would like a list of donors, enter 'list'. \")\n\n\ndef get_amount():\n try:\n return float(input(f\"How much did this person donate? \"))\n except ValueError:\n print(\"You did not enter a numeric value. Please try again.\")\n sys.exit()\n\n\nclass Donor_Actions:\n def __init__(self, donors):\n if donors is None:\n self.donors = []\n else:\n self.donors = donors\n\n def add_donor(self, donor):\n self.donors.append(donor)\n\n def all_donor_names(self):\n return [donor.full_name for donor in self.donors]\n\n def send_thank_you(self):\n donor_name = get_name()\n if donor_name.lower() == \"list\":\n print(self.all_donor_names())\n donor_name = get_name()\n\n donation_amount = get_amount()\n\n if donor_name not in self.all_donor_names():\n try:\n first, last = donor_name.split(\" \")\n self.add_donor(Donor(first, last, [donation_amount]))\n except ValueError:\n print(\"Please enter the donor's full name\")\n else:\n for donor in self.donors:\n if donor.full_name == donor_name:\n donor.add_donation(donation_amount)\n\n ty_name = donor_name + f\" {now.year}{now.month:0>2d}{now.day:0>2d}\" + \".txt\"\n\n with open(ty_name, \"w\") as f:\n f.write(f\"Dear {donor_name},\\nThank you very much for your donation of ${donation_amount:,.2f}.\\nSincerely,\\nMatt Casali\")\n print(\"A thank you message has been created.\")\n\n def create_report(self):\n reports = []\n for donor in self.donors:\n reports.append([donor.full_name, sum(donor.donation), len(donor.donation), sum(donor.donation)/len(donor.donation)])\n return reports\n\n def print_report(self):\n print(\" Donor Name | Total Given | Num Gifts | Average Gift\\n\")\n\n for donor_report in self.create_report():\n print(\"{:23}${:12.2f}{:10} ${:12.2f}\".format(donor_report[0], donor_report[1], donor_report[2], donor_report[3]))\n\n def send_all_letters(self):\n for donor in self.donors:\n file_name = donor.full_name + '.txt'\n with open(file_name, \"w\") as donor_file:\n donor_file.write(f\"Thank you {donor.full_name}, for your generous donation of ${sum(donor.donation):,.2f}!\")\n print(\"Letters have been created and saved for every donor.\")\n\n\nd1 = Donor(\"William\", \"Gates, III\", [326892.23, 326892.25])\nd2 = Donor(\"Mark\", \"Zuckerberg\", [500.00, 800.00, 2.00])\nd3 = Donor(\"Jeff\", \"Bezos\", [877.33])\nd4 = Donor(\"Paul\", \"Allen\", [750.23, 23.53, 999.99])\nd5 = Donor(\"Dakota\", \"Dakota\", [10.00, 100.00, 1000.00])\n\ndh = Donor_Actions([d1, d2, d3, d4, d5])\n\ndef main():\n choices_dic = {\"1\": dh.send_thank_you, \"2\": dh.print_report, \"3\": dh.send_all_letters}\n while True:\n print(\"Please choose: \\n1: Send Thank You\\n2: Create Report\\n3: Send Letters to Everyone\\n4: Quit\")\n choice = input(\"Choice: \")\n\n try:\n if choice == \"4\":\n break\n choices_dic.get(choice)()\n except TypeError:\n print(\"You have made an invalid choice. Goodbye.\")\n break\n\n\nif __name__ == '__main__':\n main()","sub_path":"students/matt_casali/Lesson09/mailroom_9.py","file_name":"mailroom_9.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445312691","text":"import argparse\nimport asyncio\nimport logging\nfrom typing import Text, Optional\n\nimport rasa.utils.io\nimport rasa.train\nfrom examples.restaurantbot.policy import RestaurantPolicy\nfrom rasa.core.agent import Agent\nfrom rasa.core.interpreter import RasaNLUInterpreter, RegexInterpreter\nfrom rasa.core.policies.memoization import MemoizationPolicy\nfrom rasa.core.policies.mapping_policy import MappingPolicy\n\nlogger = logging.getLogger(__name__)\n\n\nasync def parse(\n text: Text, core_model_path: Text, nlu_model_path: Optional[Text] = None\n):\n if nlu_model_path:\n interpreter = RasaNLUInterpreter(nlu_model_path)\n else:\n logger.warning(\"No NLU model passed, parsing messages using RegexInterpreter.\")\n interpreter = RegexInterpreter()\n\n agent = Agent.load(core_model_path, interpreter=interpreter)\n\n response = await agent.handle_text(text)\n\n logger.info(\"Text: '{}'\".format(text))\n logger.info(\"Response:\")\n logger.info(response)\n\n return response\n\n\nasync def train_core(\n domain_file: Text = \"domain.yml\",\n model_path: Text = \"models/core\",\n training_data_file: Text = \"data/stories.md\",\n):\n agent = Agent(\n domain_file,\n policies=[\n MemoizationPolicy(max_history=3),\n MappingPolicy(),\n RestaurantPolicy(batch_size=100, epochs=400, validation_split=0.2),\n ],\n )\n\n training_data = await agent.load_data(training_data_file)\n agent.train(training_data)\n\n # Attention: agent.persist stores the model and all meta data into a folder.\n # The folder itself is not zipped.\n agent.persist(model_path)\n\n logger.info(\"Model trained. Stored in '{}'.\".format(model_path))\n\n return model_path\n\n\ndef train_nlu(\n config_file=\"config.yml\", model_path=\"models/nlu\", training_data_file=\"data/nlu.md\"\n):\n from rasa.nlu.training_data import load_data\n from rasa.nlu import config\n from rasa.nlu.model import Trainer\n\n training_data = load_data(training_data_file)\n trainer = Trainer(config.load(config_file))\n trainer.train(training_data)\n\n # Attention: trainer.persist stores the model and all meta data into a folder.\n # The folder itself is not zipped.\n model_directory = trainer.persist(model_path)\n\n logger.info(\"Model trained. Stored in '{}'.\".format(model_directory))\n\n return model_directory\n\n\nif __name__ == \"__main__\":\n rasa.utils.io.configure_colored_logging(loglevel=\"INFO\")\n\n parser = argparse.ArgumentParser(description=\"Restaurant Bot\")\n\n subparser = parser.add_subparsers(dest=\"subparser_name\")\n train_parser = subparser.add_parser(\"train\", help=\"train a core or nlu model\")\n parse_parser = subparser.add_parser(\"parse\", help=\"parse any text\")\n\n parse_parser.add_argument(\n \"--nlu-model\", default=None, help=\"Path to the nlu model.\"\n )\n parse_parser.add_argument(\n \"--core-model\", default=\"models/core\", help=\"Path to the core model.\"\n )\n parse_parser.add_argument(\"--text\", default=\"hello\", help=\"Text to parse.\")\n\n train_parser.add_argument(\n \"model\",\n choices=[\"nlu\", \"core\"],\n help=\"Do you want to train a NLU or Core model?\",\n )\n args = parser.parse_args()\n\n loop = asyncio.get_event_loop()\n\n # decide what to do based on first parameter of the script\n if args.subparser_name == \"train\":\n if args.model == \"nlu\":\n train_nlu()\n elif args.model == \"core\":\n loop.run_until_complete(train_core())\n elif args.subparser_name == \"parse\":\n loop.run_until_complete(parse(args.text, args.core_model, args.nlu_model))\n","sub_path":"examples/restaurantbot/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399714060","text":"import pydot\nimport turtle_ast as AST\n\nfrom multipledispatch import dispatch\n\n# ---------------------------------------------------------------------------- #\nclass CFG(object):\n def __init__(self):\n self.lst = []\n\n def new_node(self, ast_node = None):\n n = CFGNode(idx = len(self.lst), ast_node = ast_node)\n self.lst.append(n)\n return n\n\n def visit_components(self, visitor):\n for node in self.lst:\n node.visited = False\n\n for node in self.lst:\n node.accept(visitor.pre_visit, visitor.post_visit)\n\n def __iter__(self):\n for n in self.lst:\n yield n\n\n# ---------------------------------------------------------------------------- #\nclass CFGNode(object):\n def __init__(self, idx = -1, ast_node = None):\n self.prev = []\n self.succ = []\n self.ast_node = ast_node\n self.visited = False\n self.idx = idx\n\n def accept(self, cfg_pre_visitor, cfg_post_visitor):\n if not self.visited:\n self.visited = True\n\n cfg_pre_visitor(self)\n for succ in self.succ:\n succ.accept(cfg_pre_visitor, cfg_post_visitor)\n cfg_post_visitor(self)\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if self.ast_node is not None:\n return str(self.ast_node)\n else:\n return \"dummy\"\n\n# ---------------------------------------------------------------------------- #\n@dispatch(AST.ExprStmt, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n nexpr = cfg.new_node(node)\n nprev.succ.append(nexpr)\n nexpr.prev.append(nprev)\n return nexpr\n\n@dispatch(AST.Repeat, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n nstart = cfg.new_node(node)\n nstart.prev.append(nprev)\n nprev.succ.append(nstart)\n\n nbody = cfgfy(node.body, nstart, cfg)\n nbody.succ.append(nstart)\n nstart.prev.append(nbody)\n\n nexit = cfg.new_node()\n nexit.prev.append(nstart)\n nstart.succ.append(nexit)\n\n return nexit\n\n@dispatch(AST.Assignment, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n n = cfg.new_node(node)\n n.prev.append(nprev)\n nprev.succ.append(n)\n return n\n\n@dispatch(AST.FunctionDecl, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n nhandle = cfg.new_node(node)\n nhandle.prev.append(nprev)\n nprev.succ.append(nhandle)\n\n ndef = cfg.new_node(node)\n cfgfy(node.body, ndef, cfg)\n\n return nhandle\n\n@dispatch(AST.IfStmt, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n ncond = cfg.new_node(node)\n nprev.succ.append(ncond)\n ncond.prev.append(nprev)\n\n join_node = cfg.new_node()\n\n st1 = node._then\n n1 = cfgfy(st1, ncond, cfg)\n n1.succ.append(join_node)\n join_node.prev.append(n1)\n\n st2 = node._else\n if st2 is not None:\n n2 = cfgfy(st2, ncond, cfg)\n n2.succ.append(join_node)\n join_node.prev.append(n2)\n else:\n ncond.succ.append(join_node)\n join_node.prev.append(ncond)\n\n return join_node\n\n@dispatch(AST.ReturnStmt, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n nret = cfg.new_node(node)\n nprev.succ.append(nret)\n nret.prev.append(nprev)\n return nret\n\n@dispatch(AST.Top, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n return cfgfy(node.body, nprev, cfg)\n\n@dispatch(AST.Seq, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n ncurr = nprev\n\n for stmt in node.lst:\n n = cfgfy(stmt, ncurr, cfg)\n if n is not None:\n ncurr = n\n\n return ncurr\n\n@dispatch(AST.End, CFGNode, CFG)\ndef cfgfy(node, nprev, cfg):\n nend = cfg.new_node(node)\n nprev.succ.append(nend)\n nend.prev.append(nprev)\n return nend\n\ndef ast_to_cfg(ast):\n cfg = CFG()\n ntop = cfg.new_node()\n cfgfy(ast, ntop, cfg)\n return cfg\n\n# ---------------------------------------------------------------------------- #\n## Draw graph.\nclass CFGGraphVisitor(object):\n def __init__(self):\n self.graph = pydot.Dot(graph_type = 'digraph')\n\n def pre_visit(self, node):\n label = str(node)\n if node.ast_node is not None:\n shape = \"box\"\n else:\n shape = \"oval\"\n # label = node.idx\n\n self.graph.add_node(pydot.Node(node.idx, label = str(node), shape = shape))\n\n def post_visit(self, node):\n for succ in node.succ:\n self.graph.add_edge(pydot.Edge(node.idx, succ.idx))\n\n def draw(self, filename):\n self.graph.write_png(filename)\n","sub_path":"compiler/turtle_cfg.py","file_name":"turtle_cfg.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"322933081","text":"from rest_framework.generics import get_object_or_404\n\nfrom django.http import HttpRequest\n\nimport access\n\nfrom access.resources import Resources\nfrom api.endpoint.base import BaseEndpoint\nfrom db.models.nodes import ClusterNode\nfrom scopes.permissions.scopes import ScopesPermission\n\n\nclass NodePermission(ScopesPermission):\n SCOPE_MAPPING = access.get_scope_mapping_for(Resources.NODE)\n\n def has_object_permission(self, request: HttpRequest, view, obj) -> bool:\n # This means that we allowed this auth backend on this endpoint\n if self._check_internal_or_ephemeral(request=request):\n return True\n\n return access.has_object_permission(\n resource=Resources.NODE,\n permission=NodePermission,\n request=request,\n view=view,\n obj=obj)\n\n\nclass NodeListEndpoint(BaseEndpoint):\n queryset = ClusterNode.objects.order_by('sequence').filter(is_current=True)\n permission_classes = (NodePermission,)\n AUDITOR_EVENT_TYPES = None\n\n\nclass NodeEndpoint(NodeListEndpoint):\n CONTEXT_KEYS = ('sequence',)\n CONTEXT_OBJECTS = ('node',)\n lookup_field = 'sequence'\n lookup_url_kwarg = 'sequence'\n\n def _initialize_context(self) -> None:\n # pylint:disable=attribute-defined-outside-init\n super()._initialize_context()\n self.node = self.get_object()\n\n\nclass NodeResourceEndpoint(NodeListEndpoint):\n CONTEXT_KEYS = ('sequence',)\n CONTEXT_OBJECTS = ('node',)\n\n def enrich_queryset(self, queryset):\n return queryset.filter(cluster_node=self.node)\n\n def _initialize_context(self) -> None:\n # pylint:disable=attribute-defined-outside-init\n super()._initialize_context()\n self.node = get_object_or_404(ClusterNode,\n sequence=self.sequence)\n","sub_path":"platform/core/polyaxon/api/endpoint/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"37837315","text":"# Loops through all of the JSONs in the folder; for each one, pulls \"Report_Header:Created_By\" and \"Report_Header:Institution_ID:Value\" for Institution_ID Type=Proprietary, then creates a CSV row for each instance of \"Report_Items:Platform\" with the above data points included\n\n\"\"\"To obtain JSONs, the following goes at the very end of the \"Make API Calls\" section of app.py:\n\nfrom pathlib import Path\ntry:\n Namespace = str(Report_JSON['Report_Header']['Institution_ID'][0]['Value']).split(\":\")[0]\n File_Name = Path('Examples', 'Example_JSONs', f\"{Report_JSON['Report_Header']['Report_ID']}_{Namespace}.json\")\nexcept KeyError:\n try:\n Namespace = str(Report_JSON['Institution_ID'][0]['Value']).split(\":\")[0]\n File_Name = Path('Examples', 'Example_JSONs', f\"{Report_JSON['Report_ID']}_{Namespace}.json\")\n except KeyError:\n continue # Means the JSON returned is for an error\nwith open(File_Name, 'w') as writeJSON:\n json.dump(Master_Report_Response.json(), writeJSON)\n\"\"\"\n\nimport os\nimport json\nfrom pathlib import Path\nimport csv\n\n\n#Section: Create CSV\nCSV_File_Path = Path('Examples', 'Example_JSONs', 'JSON_Keys.csv')\nCSV_File = open(CSV_File_Path, 'w', newline='')\nCSV_File_Writer = csv.DictWriter(CSV_File, [\n 'Source_JSON',\n 'Report_Creator',\n 'Report_Type',\n 'Source_COUNTER_Namespace',\n 'Resource_Platform'\n])\nCSV_File_Writer.writeheader()\n\n\n#Section: Open JSON\n#Subsection: Get list of JSON Files in Folder\nJSON_File_Names = []\nCurrent_Folder = os.path.dirname(os.path.realpath(__file__)) # Goes down to this specific subfolder--getcwd pulls all folders, including git\nfor Folder, Subfolders, Files in os.walk(Current_Folder):\n for File in Files:\n if File.endswith(\".json\"):\n JSON_File_Names.append(File)\n\n#Subsection: Loop Through Opening JSON Files in Folder\nfor File in JSON_File_Names:\n File_Path = os.path.dirname(os.path.realpath(__file__)) + \"\\\\\" + File\n with open(File_Path, encoding='utf8') as JSON_File:\n JSON_Dictionary = json.load(JSON_File)\n\n\n #Section: Read Data from JSON Dictionary\n CSV_Record = {}\n CSV_Record['Source_JSON'] = File\n \n #Subsection: Read Data from Header\n CSV_Record['Report_Creator'] = JSON_Dictionary['Report_Header']['Created_By']\n CSV_Record['Report_Type'] = JSON_Dictionary['Report_Header']['Report_ID']\n\n try:\n for ID in JSON_Dictionary['Report_Header']['Institution_ID']:\n if ID['Type'] == \"Proprietary\":\n CSV_Record['Source_COUNTER_Namespace'] = ID['Value'].split(\":\")[0]\n except KeyError:\n if \":\" in JSON_Dictionary['Report_Header']['Customer_ID']:\n CSV_Record['Source_COUNTER_Namespace'] = JSON_Dictionary['Report_Header']['Customer_ID'].split(\":\")[0]\n else:\n CSV_Record['Source_COUNTER_Namespace'] = \"No COUNTER Namespace\"\n\n #Subsection: Get List of Platforms\n if len(JSON_Dictionary['Report_Items']) == 0: # If the Report_Items section is empty\n CSV_Record['Resource_Platform'] = \"Empty report\"\n CSV_File_Writer.writerow(CSV_Record)\n continue\n\n Platform_List = []\n for Platforms in JSON_Dictionary['Report_Items']:\n for Key, Value in Platforms.items():\n if Key == \"Platform\":\n Platform_List.append(Value)\n \n for Found_Platform in Platform_List:\n CSV_Record['Resource_Platform'] = Found_Platform\n CSV_File_Writer.writerow(CSV_Record)","sub_path":"data/Examples/Master_Report_JSONs/Examine_Keys_in_Master_Report_JSONs.py","file_name":"Examine_Keys_in_Master_Report_JSONs.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"497872038","text":"from .local import *\n\nfrom djzbar.settings import INFORMIX_EARL_PROD as INFORMIX_EARL\n\nALLOWED_HOSTS = [\n 'localhost','127.0.0.1','ceres.carthage.edu','www.carthage.edu'\n]\nDEBUG = False\n#DEBUG = True\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\nROOT_URL = '/apps/twilio'\nTWILIO_API_URL = 'https://api.twilio.com/2010-04-01/'\nLOGIN_REDIRECT_URL = ROOT_URL\n","sub_path":"djtwilio/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167041333","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport mindspore.dataset.vision.c_transforms as c_version\nimport mindspore.dataset.transforms.c_transforms as C\nimport mindspore.dataset as ds\nimport mindspore.common.dtype as mstype\n\n\nDATASET_PATH = \"/home/workspace/mindspore_dataset/animal/mini_animal_12\"\n_R_MEAN = 123.68\n_G_MEAN = 116.78\n_B_MEAN = 103.94\n\n_R_STD = 1\n_G_STD = 1\n_B_STD = 1\n\n\ndef create_dataset(epoch_size=1, batch_size=32, step_size=1, resize_height=224,\n resize_width=224, full_batch=False, scale=1.0, rank_size=1):\n try:\n os.environ['DEVICE_ID']\n except KeyError:\n device_id = 0\n os.environ['DEVICE_ID'] = str(device_id)\n\n if full_batch:\n batch_size = batch_size * rank_size\n\n num_shards = 1\n shard_id = 0\n data_url = DATASET_PATH\n dataset = ds.ImageFolderDataset(data_url, num_parallel_workers=1, num_shards=num_shards,\n shard_id=shard_id, shuffle=False)\n\n # define map operations\n decode_op = c_version.Decode()\n c_version.Normalize(mean=[_R_MEAN, _G_MEAN, _B_MEAN], std=[_R_STD, _G_STD, _B_STD])\n random_resize_op = c_version.Resize((resize_height, resize_width))\n channelswap_op = c_version.HWC2CHW()\n rescale = scale / 255.0\n shift = 0.0\n rescale_op = c_version.Rescale(rescale, shift)\n type_cast_label = C.TypeCast(mstype.float32)\n type_cast_image = C.TypeCast(mstype.int32)\n\n dataset = dataset.map(input_columns=\"label\", operations=C.OneHot(dataset.num_classes()))\n dataset = dataset.map(input_columns=\"label\", operations=type_cast_label, num_parallel_workers=1)\n\n dataset = dataset.map(input_columns=\"image\", operations=decode_op, num_parallel_workers=1)\n dataset = dataset.map(input_columns=\"image\", operations=random_resize_op, num_parallel_workers=1)\n dataset = dataset.map(input_columns=\"image\", operations=rescale_op, num_parallel_workers=1)\n dataset = dataset.map(input_columns=\"image\", operations=channelswap_op, num_parallel_workers=1)\n dataset = dataset.map(input_columns=\"image\", operations=type_cast_image, num_parallel_workers=1)\n\n dataset = dataset.batch(batch_size, drop_remainder=True)\n return dataset\n","sub_path":"tests/st/map_parameter/network/src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154894436","text":"# -*- coding: utf-8 -*-\nimport unicodedata\nfrom urllib.parse import parse_qs, unquote, urlsplit\n\nimport pandas as pd\nimport scrapy\nfrom scrapy_selenium import SeleniumRequest\nfrom w3lib.url import add_or_replace_parameter, url_query_cleaner\n\nfrom ..settings import IMAGE_SERVER_URL, SEARCH_URL\n\n\nclass ImgUrlSpider(scrapy.Spider):\n name = 'img_url'\n\n custom_settings = {\n 'COOKIES_ENABLED': False,\n 'DOWNLOAD_DELAY': 3,\n 'DEFAULT_REQUEST_HEADERS': {\n 'Referer': 'https://images.google.com/',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n 'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\",\n },\n }\n\n def start_requests(self):\n img_paths = pd.read_csv('output/img_path.csv')['img_path']\n for img_path in img_paths:\n url = img_path.replace('images', IMAGE_SERVER_URL)\n search_url = add_or_replace_parameter(SEARCH_URL, 'image_url', url)\n meta = {\n 'img_path': img_path,\n }\n yield SeleniumRequest(url=search_url, meta=meta, callback=self.parse_result)\n\n def parse_result(self, response):\n all_size_url = response.xpath('//div[@class=\"card-section\"]//a[contains(text(), \"全部尺寸\")]/@href').extract_first()\n all_size_url = response.urljoin(all_size_url)\n meta = {\n 'img_path': response.meta['img_path'],\n }\n if all_size_url:\n yield SeleniumRequest(url=all_size_url, meta=meta, callback=self.parse_img_urls)\n\n similar_url = response.xpath('//a[text()=\"外观类似的图片\"]/@href').extract_first()\n similar_url = response.urljoin(similar_url)\n if similar_url:\n meta['similar'] = True\n yield SeleniumRequest(url=similar_url, meta=meta, callback=self.parse_img_urls)\n\n def parse_img_urls(self, response):\n similar = response.meta.get('similar')\n images = response.xpath('//a[@class=\"rg_l\"]')\n urls = images.xpath('./@href').extract()\n urls = list(response.urljoin(url) for url in urls)\n urls = list(unquote(url) for url in urls)\n urls = list(parse_qs(urlsplit(url).query).get('imgurl', [None])[0] for url in urls)\n urls = list(url_query_cleaner(url) for url in urls if url)\n if similar:\n urls = urls[0:8]\n for url in urls:\n yield {\n 'img_path': response.meta['img_path'],\n 'url': url,\n 'similar': 1,\n }\n sizes = images.xpath('.//span[@class=\"rg_an\"]/text()').extract()\n sizes = list(unicodedata.normalize(\"NFKD\", s) for s in sizes)\n if not urls:\n return\n max_size = 0\n max_idx = -1\n for idx, size in enumerate(sizes):\n tmp = size.split(' ')\n width = int(tmp[0])\n height = int(tmp[2])\n cur_size = width * height\n if cur_size > max_size:\n max_size = cur_size\n max_idx = idx\n max_size_url = urls[max_idx]\n yield {\n 'img_path': response.meta['img_path'],\n 'url': max_size_url,\n 'similar': 0,\n }\n","sub_path":"damage_car_spider/spiders/img_url.py","file_name":"img_url.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"448404612","text":"# coding=utf-8\n\"\"\" 03. 直方图\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(19680801)\n\nmu1, sigma1 = 100, 15\nmu2, sigma2 = 80, 15\nx1 = mu1 + sigma1 * np.random.randn(10000)\nx2 = mu2 + sigma2 * np.random.randn(10000)\n\n# the histogram of the data\n# 50:将数据分成50组\n# facecolor:颜色;alpha:透明度\n# density:是密度而不是具体数值\nn1, bins1, patches1 = plt.hist(x1, 50, density=True, facecolor='g', alpha=1)\nn2, bins2, patches2 = plt.hist(x2, 50, density=True, facecolor='r', alpha=0.2)\n\n# n:概率值;bins:具体数值;patches:直方图对象。\n\nplt.xlabel('Smarts')\nplt.ylabel('Probability')\nplt.title('Histogram of IQ')\n\nplt.text(110, .025, r'$\\mu=100,\\ \\sigma=15$')\nplt.text(50, .025, r'$\\mu=80,\\ \\sigma=15$')\n\n# 设置x,y轴的具体范围\nplt.axis([40, 160, 0, 0.03])\nplt.grid(True)\nplt.show()\n","sub_path":"compute/chart/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"546965377","text":"import requests\nimport base64\nimport spotipy\nfrom spotify_auth import client_public_token, client_secret_token\n\n\nclass TopSpotify:\n\n \n\n def discover(self):\n\n # Request 15 tracks from spotify 'Today top hits' playlist\n access_token = self.get_access_token()\n self.media = []\n payload = {'limit': 20 }\n headers = {'Authorization': 'Bearer ' + access_token} \n r = requests.get('https://api.spotify.com/v1/users/spotify/playlists/18pDbI9hwBndkj04AmqkuS/tracks', params=payload, headers=headers).json()\n \n for track in r['items']:\n self.media.append({'name' : track['track']['name'], 'link':track['track']['uri'], 'description':''})\n\n return self.media\n\n def get_access_token(self):\n\n payload = {'grant_type': 'client_credentials'}\n\n headers = {'Authorization': 'Basic '\n + base64.standard_b64encode(client_public_token\n + ':' + client_secret_token)}\n\n r = requests.post('https://accounts.spotify.com/api/token',\n data=payload, headers=headers).json()\n access_token = r['access_token']\n\n return access_token","sub_path":"Processing/top_spotify.py","file_name":"top_spotify.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247417532","text":"#!/usr/bin/python\n\nimport json\n\nclass Robot:\n def __init__(self):\n self.id = 'robot_5'\n self.pose = { 'x': 0.1, 'y': 0.2, 'orientation': 10 }\n self.state = 'Wandering'\n self.batteryLevel = 12.6\n self.internalState = {'alpha': 12, 'nextTarget': 'somewhere', 'ir': [0.3, 0.6, 0.1, 0.05]}\n\n def toJson(self):\n return json.dumps({\n 'id': self.id,\n 'state': 'Wandering',\n 'batteryLevel': self.batteryLevel,\n 'internalState': self.internalState,\n 'someString' : 'Some new String'\n })\n\ntestRobot = Robot()\nprint(testRobot.toJson())\n\nimport socket\nfrom math import cos, sin\nfrom time import sleep\n\nhostName = ''\nhostPort = 8888\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.connect((hostName, hostPort))\n\nwhile True:\n \n testRobot.pose['orientation'] += 1\n if testRobot.pose['orientation'] > 360:\n testRobot.pose['orientation'] -= 360\n testRobot.pose['x'] = 0.5 + 0.4*sin(testRobot.pose['orientation']*(3.14159/180))\n testRobot.pose['y'] = 0.5 - 0.4*cos(testRobot.pose['orientation']*(3.14159/180))\n\n try:\n s.send(testRobot.toJson())\n except:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((hostName, hostPort))\n s.send(testRobot.toJson())\n\n sleep(0.1)\n \n","sub_path":"testDataSource.py","file_name":"testDataSource.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"116775359","text":"import logging\nimport time\nfrom constants import SPEED_OF_MOTOR, SECS, ITERS\nfrom motor_control.differential_motor_control import DifferentialMotorControl\nfrom motor_control.interval_thread import IntervalThread\nfrom motor_control.maxon.maxon_communicator import MaxonCommunicator, WARNING_EMERGENCY_STOP, WARNING_FAR, WARNING_CLOSE\n\nfrom logging import getLogger\n\nlogger = getLogger(__name__)\nFLIP = False # flip motors\nCHECK_WARNING_INTERVAL = 0.1\n\nclass MaxonMotorControl(DifferentialMotorControl):\n def __init__(self, sensor=None):\n DifferentialMotorControl.__init__(self, sensor)\n self.motor_left = MaxonCommunicator(dev_index=0)\n self.motor_right = MaxonCommunicator(dev_index=1)\n\n self.motor_left.open_device()\n self.motor_right.open_device()\n\n self.stopping = False\n\n def move(self, v_l, v_r):\n if self.stopping:\n return\n\n max_speed = 0.9 # m/s\n if max(abs(v_l),abs(v_r)) > max_speed:\n r_f = max(abs(v_l), abs(v_r)) / max_speed\n v_l /= r_f\n v_r /= r_f\n #print(max_speed, v_l, v_r, r_f)\n # left motor moves backwards\n #print('speed is', v_r, v_l)\n if FLIP:\n self.motor_left.set_speed(-v_r)\n self.motor_right.set_speed(v_l)\n else:\n self.motor_left.set_speed(v_l)\n self.motor_right.set_speed(-v_r)\n\n def stop(self):\n self.stopping = True\n self.motor_left.quickstop()\n self.motor_right.quickstop()\n time.sleep(1)\n self.motor_left.disable()\n self.motor_right.disable()\n self.stopping = False\n\n def start(self):\n self.motor_left.enable()\n self.motor_right.enable()\n\n def close(self):\n super(MaxonMotorControl, self).close()\n # close motor right first, as it is passed via motor left\n self.motor_right.close_device()\n self.motor_left.close_device()\n\n\nif __name__ == '__main__':\n logger.addHandler(logging.StreamHandler())\n\n #time.sleep(7)\n speed = SPEED_OF_MOTOR\n control = MaxonMotorControl()\n secs = SECS\n iters = ITERS\n for i in range(iters):\n for i in range(10*secs):\n control.move(v_l=speed, v_r=speed)\n time.sleep(0.1)\n\n for i in range(10*secs):\n control.move(v_l=-speed, v_r=-speed)\n time.sleep(0.1)\n\n control.stop()\n\n control.close()\n","sub_path":"motor_control/maxon/maxon_control.py","file_name":"maxon_control.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"246692845","text":"\"\"\"\nhandlers.place\n\nCreated on Dec 19, 2012\n\"\"\"\n\n__author__ = 'steven@eyeballschool.com (Steven)'\n\n\nimport json\n\nfrom string import capwords\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.api import users\n\nfrom handlers.abstracts import baseapp\nfrom classes import placedlit\nfrom classes import user_request\n\n\nclass AddPlacesHandler(baseapp.BaseAppHandler):\n def post(self):\n place_data = json.loads(self.request.body)\n place_data['user'] = users.get_current_user()\n place_key = placedlit.PlacedLit.create_or_update_from_dict(place_data)\n\n agent = self.request.headers['User-Agent']\n user_request.UserRequest.create(ua=agent, user_loc=place_key)\n\n response_message = '%s by %s added at
location: (%s, %s)
thanks.' % (\n place_data['title'], place_data['author'], place_data['latitude'],\n place_data['longitude'])\n response_json = {\n 'message': response_message,\n 'geopt': { 'lat': place_data['latitude'], 'lng': place_data['longitude']}\n }\n\n self.output_json(response_json)\n\n\nclass GetPlacesHandler(baseapp.BaseAppHandler):\n def get(self):\n places = placedlit.PlacedLit.get_all_places()\n loc_json = []\n for place in places:\n geo_pt = place.location\n key = place.key()\n loc = {\n 'latitude': geo_pt.lat,\n 'longitude': geo_pt.lon,\n 'title': place.title,\n 'author': place.author,\n 'db_key': key.id()}\n loc_json.append(loc)\n self.output_json(loc_json)\n\n\nclass RecentPlacesHandler(baseapp.BaseAppHandler):\n def get(self):\n places = placedlit.PlacedLit.get_newest_places(limit=10)\n loc_json = []\n for place in places:\n date_added = place.ts.strftime('%m-%d-%Y')\n geo_pt = place.location\n key = place.key()\n loc = {\n 'latitude': geo_pt.lat,\n 'longitude': geo_pt.lon,\n 'title': place.title,\n 'author': place.author,\n 'date_added': date_added,\n 'db_key': key.id()}\n if place.scenelocation:\n loc['location'] = capwords(place.scenelocation)\n loc_json.append(loc)\n self.output_json(loc_json)\n\n\nclass InfoHandler(baseapp.BaseAppHandler):\n def get(self, place_id):\n place = placedlit.PlacedLit.get_place_from_id(place_id)\n if place:\n date_added = place.ts.strftime('%m-%d-%Y')\n place_info = {\n 'id': place_id,\n 'title': place.title,\n 'author': place.author,\n 'place_name': place.scenelocation,\n 'scenetime': place.scenetime,\n 'actors': place.actors,\n 'symbols': place.symbols,\n 'description': place.scenedescription,\n 'notes': place.notes,\n 'date_added': date_added,\n 'visits': place.checkins,\n }\n if place.image_url:\n place_info['image'] = place.image_url.replace('http://', '')\n self.output_json(place_info)\n\n\nclass ExportPlacesHandler(baseapp.BaseAppHandler):\n def get(self):\n places = placedlit.PlacedLit.get_all_places()\n row_id = 1\n loc_csv = '\"id\",\"title\",\"author\",\"location\",\"time\",\"actors\",\"symbols\",'\n loc_csv += '\"description\",\"notes\",\"latitude\",\"longitude\"\\n'\n for place in places:\n geo_pt = place.location\n loc_csv += '\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"\\n'.format(\n row_id, place.title, place.author, place.scenelocation, place.scenetime,\n place.actors, place.symbols, place.scenedescription, place.notes,\n geo_pt.lat, geo_pt.lon, place.ts)\n row_id += 1\n self.response.headers['Content-Type'] = 'text/csv'\n self.response.out.write(loc_csv)\n\n\nclass PlacesVisitHandler(baseapp.BaseAppHandler):\n def get(self, place_id):\n place = placedlit.PlacedLit.get_place_from_id(place_id)\n place.update_visit_count()\n info_path = '/places/info/' + place_id\n self.redirect(info_path)\n\n\nclass CountPlacesHandler(baseapp.BaseAppHandler):\n def get(self):\n count_data = {\n 'count': placedlit.PlacedLit.count()\n }\n self.output_json(count_data)\n\n\nurls = [\n ('/places/add', AddPlacesHandler),\n ('/places/show', GetPlacesHandler),\n ('/places/info/(.*)', InfoHandler),\n ('/places/visit/(.*)', PlacesVisitHandler),\n ('/places/recent', RecentPlacesHandler),\n ('/places/export', ExportPlacesHandler),\n ('/places/count', CountPlacesHandler)\n]\n\napp = webapp.WSGIApplication(urls, debug=True)\n","sub_path":"tests/handlers/place.py","file_name":"place.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265585562","text":"\nfrom nineml.abstraction_layer.dynamics import ComponentClass\nfrom nineml.abstraction_layer.dynamics.testing_utils import RecordValue\n\nimport coba_synapse\nimport iaf\n\n\nclass ComponentMetaData(object):\n is_neuron_model = False\n\n supports_test_pynn_neuron_std = True\n\n parameters = {\n 'iaf.cm': 1.0,\n 'iaf.gl': 50.0,\n 'iaf.taurefrac': 5.0,\n 'iaf.vrest': -65.0,\n 'iaf.vreset': -65.0,\n 'iaf.vthresh': -50.0,\n 'cobaExcit.tau': 2.0,\n 'cobaInhib.tau': 5.0,\n 'cobaExcit.vrev': 0.0,\n 'cobaInhib.vrev': -70.0,\n }\n\n initial_values = {\n 'iaf_V': parameters['iaf.vrest'],\n 'tspike': -1e99,\n 'regime': 1002,\n }\n\n synapse_components = [\n ('cobaInhib', 'q'),\n ('cobaExcit', 'q'),\n ]\n\n records = [\n RecordValue(what='iaf_V', tag='Voltage [mV]', label='Membrane Voltage'),\n RecordValue(what='cobaInhib_g', tag='Conductance [ns]', label='cobaInhib-g'),\n RecordValue(what='cobaExcit_g', tag='Conductance [ns]', label='cobaExcit-g'),\n RecordValue(what='regime', tag='Regime', label='Regime'),\n ]\n\n\ndef get_component():\n\n # Create a model, composed of an iaf neuron, and\n iaf_2coba_model = ComponentClass(\n name=\"iaf_2coba\",\n subnodes={\"iaf\": iaf.get_component(),\n \"cobaExcit\": coba_synapse.get_component(),\n \"cobaInhib\": coba_synapse.get_component()})\n\n # Connections have to be setup as strings, because we are deep-copying objects.\n iaf_2coba_model.connect_ports(\"iaf.V\", \"cobaExcit.V\")\n iaf_2coba_model.connect_ports(\"iaf.V\", \"cobaInhib.V\")\n iaf_2coba_model.connect_ports(\"cobaExcit.I\", \"iaf.ISyn\")\n iaf_2coba_model.connect_ports(\"cobaInhib.I\", \"iaf.ISyn\")\n\n return iaf_2coba_model\n","sub_path":"lib9ml/python/test/unit/data/sample_components/hierachical_iaf_2coba.py","file_name":"hierachical_iaf_2coba.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"201838548","text":"from tkinter import (Button, Entry, Frame, Label, \n Toplevel)\n\nfrom pytimer.split_handler import SplitHandler\nfrom .time_entry_widget import TimeEntryWidget\n\n\nclass NewSplitEntryBox(Toplevel):\n \"\"\"\n \"\"\"\n\n def __init__(self, controller, split_data = None):\n super().__init__()\n self.title(\"Enter New Split Information\")\n self.controller = controller\n\n self._create(split_data)\n self._arrange()\n\n def _create(self, split_data):\n self.title_label = Label(self, text = \"Title\")\n self.title_entry = Entry(self)\n self.new_segment_button = Button(self, text = \"New Segment\",\n command = self._add_segment) \n self.segment_area = Frame(self)\n self.segments = []\n\n if split_data is not None:\n self._add_split_data(split_data)\n\n self.confirm_button = Button(self, \n text = \"Confirm\",\n command = lambda: \n self.controller.new_split_callback(self._retrieve_and_close))\n self.bind(\"\", lambda event: \n self.controller.new_split_callback(self._retrieve_and_close))\n\n self.cancel_button = Button(self, \n text = \"Cancel\", \n command = lambda: self.destroy())\n self.bind(\"\", lambda event: self.destroy())\n\n def _arrange(self):\n self.title_label.grid(row = 0, column = 0)\n self.title_entry.grid(row = 0, column = 1)\n self.new_segment_button.grid(row = 1, column = 0)\n self.segment_area.grid(row = 2, column = 0, columnspan = 2)\n self.cancel_button.grid(row = 3, column = 0)\n self.confirm_button.grid(row = 3, column = 1)\n for segment in self.segments:\n segment.pack()\n\n def _add_split_data(self, split_data):\n \"\"\"\n Parse the split data into segments, create widgets for those segments.\n \"\"\"\n self.title_entry.insert(0, split_data[SplitHandler.TITLE_KEY])\n for i, segment in enumerate(split_data[SplitHandler.SEGMENTS_KEY]):\n segment_label = segment[SplitHandler.Segment.LABEL_KEY]\n segment_time = segment[SplitHandler.Segment.BEST_TIME_KEY]\n new_segment_frame = self.AddSegmentFrame(self.segment_area, i)\n new_segment_frame.update(segment_label, segment_time)\n self.segments.append(new_segment_frame)\n\n def _add_segment(self):\n new_segment = self.AddSegmentFrame(self.segment_area, \n len(self.segments))\n new_segment.pack()\n self.segments.append(new_segment)\n\n def _retrieve_and_close(self):\n data_dict = {\n SplitHandler.VERSION_KEY: SplitHandler.CURRENT_FILE_VERSION,\n SplitHandler.TITLE_KEY: self.title_entry.get(),\n SplitHandler.SEGMENTS_KEY: [segment.get_data() \n for segment in self.segments\n if not segment.removed] }\n self.destroy()\n return data_dict\n\n\n class AddSegmentFrame(Frame):\n \"\"\"\n \"\"\"\n\n def __init__(self, root, index):\n super().__init__(root)\n self.removed = False\n\n self._create()\n self._arrange()\n\n def _create(self):\n self.name_label = Label(self, text = \"Name\")\n self.name_entry = Entry(self)\n\n self.time_label = Label(self, text = \"Best Time\")\n self.time_entry = TimeEntryWidget(self)\n\n self.delete_button = Button(self, text = \"Remove Segment\", \n command = self._remove)\n\n def _arrange(self):\n self.name_label.grid(row = 0, column = 0)\n self.name_entry.grid(row = 1, column = 0)\n\n self.time_label.grid(row = 0, column = 1)\n self.time_entry.grid(row = 1, column = 1)\n\n self.delete_button.grid(row = 1, column = 2)\n\n def update(self, title, time):\n self.name_entry.insert(0, title)\n self.time_entry.insert(time)\n\n def _remove(self):\n self.removed = True\n self.destroy()\n\n def get_data(self):\n data_dict = {\n SplitHandler.Segment.LABEL_KEY: \n self.name_entry.get(),\n SplitHandler.Segment.BEST_TIME_KEY: \n self.time_entry.get_time_in_ms()\n }\n return data_dict\n","sub_path":"pytimer/new_split_entry_box.py","file_name":"new_split_entry_box.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"342387406","text":"from isBinaryTree import Node\nfrom Traversal import TreeTraversal\nimport collections\n\n\nclass TreeNode:\n def __init__(self, val, left=None, right=None):\n self.left = left\n self.right = right\n self.parent = None\n self.val = val\n\n\ndef lca(root: TreeNode, x: TreeNode, y: TreeNode) -> TreeNode:\n if not root:\n return\n if root.val == x.val or root.val == y.val:\n return root\n\n left_search = lca(root.left, x, y)\n right_search = lca(root.right, x, y)\n if left_search is not None and right_search is not None:\n return root\n if left_search is None and right_search is None:\n return None\n\n return left_search if left_search else right_search\n\n\ndef tree_height(root: TreeNode) -> int:\n if not root:\n return -1\n\n left_height = tree_height(root.left) + 1\n right_height = tree_height(root.right) + 1\n\n return max(right_height, left_height)\n\n\ndef isSymmetric(root: TreeNode) -> bool:\n return helper(root, root)\n\n\ndef helper(root1: TreeNode, root2: TreeNode) -> bool:\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and helper(root1.left, root2.right) and helper(root1.right, root2.left)\n\n\ndef revert_binary(root: TreeNode) -> TreeNode:\n if not root:\n return\n temp = root.left\n root.left = root.right\n root.right = temp\n\n revert_binary(root.left)\n revert_binary(root.right)\n return root\n\n\ndef right_view(root: TreeNode):\n if not root:\n return\n res = []\n stack = collections.deque()\n stack.append(root)\n while stack:\n row = []\n rowSize = len(stack)\n while rowSize > 0:\n current_node = stack.popleft()\n if current_node.left is not None:\n stack.append(current_node.left)\n if current_node.right is not None:\n stack.append(current_node.right)\n row.append(current_node.val)\n rowSize -= 1\n res.append(row[-1])\n\n return res\n\n\ndef zigzag_travelsal(root: TreeNode) -> None:\n if not root:\n return\n\n stack = collections.deque()\n stack.append(root)\n res = []\n result = []\n count = -1\n\n while stack:\n rowSize = len(stack)\n row = []\n while rowSize > 0:\n current_node = stack.popleft()\n if current_node.left is not None:\n stack.append(current_node.left)\n if current_node.right is not None:\n stack.append(current_node.right)\n\n row.append(current_node.val)\n rowSize -= 1\n result.append(row[0])\n count += 1\n if count % 2 == 0:\n res.append([count,row])\n elif count % 2 != 0:\n res.append([count, row[::-1]])\n print(\"Tree level:\", count, \"level\")\n return res\n\n\nif __name__ == \"__main__\":\n # root = TreeNode('a')\n # root.left = TreeNode('b')\n # root.left.parent = root\n # root.right = TreeNode('c')\n # root.right.parent = root\n # a = root.right.left = TreeNode('d')\n # root.right.left.parent = root.right\n # b = root.right.right = TreeNode('e')\n # root.right.right.parent = root.right\n #\n # res = lowest_common_ancestor(root, TreeNode('b'), TreeNode('e'))\n # print(res.val)\n\n tree = TreeNode('a')\n tree.left = TreeNode('b')\n tree.right = TreeNode('c')\n tree.left.left = TreeNode('f')\n tree.right.left = TreeNode('d')\n tree.right.right = TreeNode('e')\n tree.right.right.right = TreeNode('l')\n tree.right.right.left = TreeNode('m')\n tree.right.right.right.right = TreeNode('n')\n tree.right.right.right.left = TreeNode('o')\n\n result = lca(tree, TreeNode('f'), TreeNode('m'))\n print(result.val)\n print(tree_height(tree))\n\n root = TreeNode(1, TreeNode(5, TreeNode(7), TreeNode(6)), TreeNode(8, TreeNode(9), TreeNode(10)))\n root_revert = revert_binary(root)\n\n TreeTraversal().print_tree(root)\n\n print(\"Right view: \", right_view(root))\n\n TreeTraversal().print_tree(tree)\n print(zigzag_travelsal(tree))\n","sub_path":"BinaryTree/LCA-BinaryTree.py","file_name":"LCA-BinaryTree.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"606780358","text":"import socket\nfrom flask import views, jsonify, g, request\nfrom webargs.flaskparser import use_args\nfrom webargs import fields\nfrom marshmallow import Schema\nfrom .db import db, DdnsSchema\nfrom .auth import auth\n\n\ndef validate_ipadress(address, v=4):\n if v is 6:\n try:\n socket.inet_pton(socket.AF_INET6, address)\n except socket.error: # not a valid address\n return False\n return True\n if v is 4:\n try:\n socket.inet_pton(socket.AF_INET, address)\n except AttributeError: # no inet_pton here, sorry\n try:\n socket.inet_aton(address)\n except socket.error:\n return False\n return address.count('.') == 3\n except socket.error: # not a valid address\n return False\n return True\n\n\nclass DdnsGetModel(Schema):\n ip = fields.Str()\n\n class Meta:\n strict = False\n\n\nclass PublicView(views.MethodView):\n @auth.login_required\n @use_args(DdnsGetModel)\n def get(self, args):\n if 'ip' in args.keys():\n if validate_ipadress(args['ip'], 4):\n g.device.ip4_give = args['ip']\n g.device.ip6_give = None\n if validate_ipadress(args['ip'], 6):\n g.device.ip6_give = args['ip']\n g.device.ip4_give = None\n if validate_ipadress(request.headers['X-Real-Ip'], 4):\n g.device.ip4_send = request.headers['X-Real-Ip']\n g.device.ip6_send = None\n if validate_ipadress(request.headers['X-Real-Ip'], 6):\n g.device.ip6_send = request.headers['X-Real-Ip']\n g.device.ip4_send = None\n g.device.updated_on = db.func.now()\n db.session.commit()\n return jsonify(DdnsSchema().dump(g.device).data)\n","sub_path":"app/public_view.py","file_name":"public_view.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"308522111","text":"\n\n#calss header\nclass _BOUT():\n\tdef __init__(self,): \n\t\tself.name = \"BOUT\"\n\t\tself.definitions = [u'a short period of illness or involvement in an activity: ', u'a boxing or wrestling match: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_bout.py","file_name":"_bout.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"432917154","text":"MOD = 1000000007\nMAXN = 101\n\ncache = [0] * MAXN\ncache[1] = 1\ncache[2] = 2\n\nfor i in range(3, MAXN):\n cache[i] = cache[i-1]+cache[i-2]\n\ndef asymtiling(n):\n if n == 1 or n == 2: return 0\n half = n // 2\n ans = cache[n] - cache[half]\n if n % 2 == 0:\n ans = ans - cache[half-1]\n return ans % MOD\n\ndef main():\n C = int(input())\n for _ in range(C):\n n = int(input())\n print(asymtiling(n))\n\nif __name__ == '__main__':\n main()\n","sub_path":"asymtiling/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"51590041","text":"from flask import Flask, render_template, url_for, request, redirect, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\ndb = SQLAlchemy(app)\n\n@app.route(\"/\", methods=['POST','GET'])\ndef index():\n from models import Writing\n if request.method == \"POST\":\n from wtform_fields import WritingForm\n form = WritingForm(request.form)\n if form.validate():\n content = Writing(content = request.form[\"content\"])\n db.session.add(content)\n db.session.commit()\n return jsonify({\"status\" : \"success\"}), 200\n else:\n respr = jsonify({\"status\" : \"fail\"})\n respr.status_code = 500\n return respr\n\n else:\n quotes = Writing.query.order_by(Writing.date_created).all()\n return render_template(\"index.html\", quotes=quotes, clink = \"/\")\n\n@app.route(\"/delete/\")\ndef delete(id):\n from models import Writing\n task_to_delete = Writing.query.get_or_404(id)\n\n try:\n db.session.delete(task_to_delete)\n db.session.commit()\n return redirect(\"/\")\n except:\n return 'There was a problem deleting that task'\n\n@app.route(\"/update/\", methods=['POST','GET'])\ndef update(id):\n from models import Writing\n data = Writing.query.get_or_404(id)\n if request.method == \"POST\":\n from wtform_fields import WritingForm\n form = WritingForm(request.form)\n if form.validate():\n data.content = request.form[\"content\"]\n db.session.commit()\n return redirect(\"/\")\n else:\n respr = jsonify({\"status\" : \"fail\"})\n respr.status_code = 500\n return respr\n else:\n return render_template('update.html', task = data, clink = data.id)\n\napp.config.update(\n DEBUG=True,\n SECRET_KEY='This key must be secret!',\n WTF_CSRF_ENABLED=False,\n)\n\nif __name__ == \"__main__\":\n from models import *\n db.create_all()\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"231026495","text":"from django.test import TestCase\n\n_models = __import__(\"MVC Structure.Model.models\")\n_models = _models.Model.models\n\nclass ProductTest(TestCase):\n def setUp(self):\n self.category = _models.Category()\n self.category.title = \"Smartphone\"\n self.category.save()\n\n self.product = _models.Product()\n self.product.name = \"Iphone 12 pro max\"\n self.product.category = self.category\n self.product.preview_text = \"Iphone 12 pro max for sell\"\n self.product.detail_text = \"Iphone 12 pro max with 12 gb ram\"\n self.product.price = 100000\n self.product.save()\n\n def test_oldPrice(self):\n self.assertEqual(self.product.old_price, 0, \"Default old price should be 0\")\n\n def test_str(self):\n expected_string = \"Iphone 12 pro max\"\n self.assertEqual(self.product.__str__(), expected_string, \"String repsentation of a product should be same as product name\")\n","sub_path":"TestCode/ProductTest.py","file_name":"ProductTest.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"183192463","text":"import copy\nimport random\nimport sys\n\nimport networkx as nx\n\nimport graph\n\n\nclass Solution:\n \"\"\"\n Contains solution of the assignment problem.\n \"\"\"\n def __init__(self, nx_graph: nx.Graph):\n self.nxGraph = nx_graph\n\n if not nx.is_bipartite(nx_graph):\n raise (Exception, 'Provided graph must be bipartite.')\n\n part_1, part_2 = nx.bipartite.sets(nx_graph)\n part_1_len, part_2_len = len(part_1), len(part_2)\n\n if part_1_len < part_2_len:\n self.l_part, self.g_part = list(part_1), list(part_2)\n self.l_part_len, self.g_part_len = part_1_len, part_2_len\n else:\n self.l_part, self.g_part = list(part_2), list(part_1)\n self.l_part_len, self.g_part_len = part_2_len, part_1_len\n\n # initialize random solution\n self.solution = self.g_part[0:self.l_part_len]\n self.remaining = self.g_part[self.l_part_len:self.g_part_len]\n\n def get_estimate(self):\n \"\"\"\n Returns value estimate of the solution.\n \"\"\"\n estimate = 0.0\n for i in range(0, self.l_part_len):\n estimate += self.__get_weight(self.l_part[i], self.solution[i])\n return estimate\n\n def get_neighbour(self):\n \"\"\"\n Returns a neighbour solution (with only one edge swapped).\n \"\"\"\n neighbour = copy.copy(self)\n\n neighbour.solution = copy.deepcopy(self.solution)\n neighbour.remaining = copy.deepcopy(self.remaining)\n\n neighbour.__change_edge_random()\n\n return neighbour\n\n def draw(self):\n \"\"\"\n Draws the solution.\n \"\"\"\n graph.Graph.draw_bipartite(self.nxGraph, list(zip(self.l_part, self.solution)))\n\n def __get_weight(self, v_1, v_2):\n \"\"\"\n Returns weight of the edge v_1:v_2.\n \"\"\"\n if self.nxGraph.has_edge(v_1, v_2) and 'weight' in self.nxGraph[v_1][v_2]:\n return self.nxGraph[v_1][v_2]['weight']\n else:\n return sys.maxsize\n\n def __change_edge_random(self):\n \"\"\"\n Randomly changes an edge in the solution.\n \"\"\"\n idx_1, idx_2 = random.sample(range(0, self.g_part_len - 1), 2)\n self.__change_edge(idx_1, idx_2)\n\n def __change_edge(self, idx_1, idx_2):\n \"\"\"\n If vertices[idx_1] and vertices[idx_2] are connected then swaps its edges.\n If only one of them is connected than connecting edge is removed and vertices[idx_1]:vertices[idx_2] edge added.\n If both are disconnected then does not change edges state.\n \"\"\"\n if idx_1 > self.g_part_len:\n raise (Exception, 'Index 1 must be lower than the number of vertices in greater bipartite.')\n\n if idx_2 > self.g_part_len:\n raise (Exception, 'Index 2 must be lower than the number of vertices in greater bipartite.')\n\n v_1 = self.solution[idx_1] if idx_1 < self.l_part_len else self.remaining[idx_1 - self.l_part_len]\n v_2 = self.solution[idx_2] if idx_2 < self.l_part_len else self.remaining[idx_2 - self.l_part_len]\n\n if idx_2 < self.l_part_len:\n self.solution[idx_2] = v_1\n else:\n self.remaining[idx_2 - self.l_part_len] = v_1\n\n if idx_1 < self.l_part_len:\n self.solution[idx_1] = v_2\n else:\n self.remaining[idx_1 - self.l_part_len] = v_2\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"8910105","text":"import textwrap\nimport os\nimport subprocess\nimport sys\nimport signal\nimport glob\nimport itertools\nimport pathlib\n\n\nclass PathReader:\n @staticmethod\n def _read_file(filename):\n root = os.path.dirname(filename)\n return (\n os.path.join(root, path.rstrip())\n for path in open(filename)\n if path.strip()\n and not path.startswith('#')\n and not path.startswith('import ')\n )\n\n @classmethod\n def _read(cls, target):\n \"\"\"\n As .pth files aren't honored except in site dirs,\n read the paths indicated by them.\n \"\"\"\n pth_files = glob.glob(os.path.join(target, '*.pth'))\n file_items = map(cls._read_file, pth_files)\n return itertools.chain.from_iterable(file_items)\n\n\ndef _inject_sitecustomize(target):\n \"\"\"\n Create a sitecustomize file in the target that will install\n the target as a sitedir.\n \"\"\"\n hook = textwrap.dedent(\n f\"\"\"\n import site\n site.addsitedir({target!r})\n \"\"\"\n ).lstrip()\n sc_fn = pathlib.Path(target) / 'sitecustomize.py'\n sc_fn.write_text(hook)\n\n\ndef _pythonpath():\n return 'JYTHONPATH' if sys.platform.startswith('java') else 'PYTHONPATH'\n\n\ndef _build_env(target):\n \"\"\"\n Prepend target and .pth references in target to PYTHONPATH\n \"\"\"\n key = _pythonpath()\n env = dict(os.environ)\n suffix = env.get(key)\n prefix = (target,)\n items = itertools.chain(\n prefix, PathReader._read(target), (suffix,) if suffix else ()\n )\n joined = os.pathsep.join(items)\n env[key] = joined\n return env\n\n\ndef _setup_env(target):\n _inject_sitecustomize(target)\n return _build_env(target)\n\n\ndef with_path(target, params):\n \"\"\"\n Launch Python with target on the path and params\n \"\"\"\n\n def null_handler(signum, frame):\n pass\n\n signal.signal(signal.SIGINT, null_handler)\n cmd = [sys.executable] + params\n return subprocess.Popen(cmd, env=_setup_env(target)).wait()\n\n\ndef with_path_overlay(target, params):\n \"\"\"\n Overlay Python with target on the path and params\n \"\"\"\n cmd = [sys.executable] + params\n os.execve(sys.executable, cmd, _setup_env(target))\n","sub_path":"pip_run/launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"69556634","text":"# Copyright (c) 2014 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nclass WorkflowManagementForm(object):\n name = 'workflowManagement'\n field_specs = [{\n 'widgetMedia':\n {'css':\n {'all': ('muranodashboard/css/checkbox.css',\n 'muranodashboard/css/hide_app_name.css')\n }\n },\n 'name': 'StayAtCatalog',\n 'initial': False,\n 'description': 'If checked, you will be returned to the '\n 'Application Catalog page. If not - to the '\n 'Environment page, where you can deploy'\n ' the application.',\n 'required': False,\n 'type': 'boolean',\n 'label': 'Add more applications to the environment'}]\n validators = []\n","sub_path":"muranodashboard/catalog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293729792","text":"import datetime\nimport logging\nimport urllib\nfrom bulkmail.api.campaign.views import get_open_raw\n\nfrom django import http\nfrom django.conf import settings\nfrom django.core.context_processors import csrf\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom ..shortcuts import render_tpl, ok\nfrom ..auth import super_admin_required, staff_required\n\nfrom .models import ApiKey, Campaign, generate_key\nfrom .forms import ApiKeyForm\nfrom ..tracking.models import Stats, Track\n\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import urlfetch\n\n@super_admin_required\ndef key_list (request):\n c = {\n 'keys': ApiKey.query()\n }\n return render_tpl(request, 'api/key_list.html', c)\n \ndef key_add (request):\n return key_edit_view(request)\n \n@super_admin_required\ndef key_edit_view (request, kid=None):\n instance = None\n verb = 'Add'\n form = ApiKeyForm(request.POST or None)\n \n if request.method == 'POST':\n if form.is_valid():\n akey = ApiKey(\n name=form.cleaned_data['name'],\n akey=generate_key(),\n created_by=request.user.user\n )\n akey.put()\n return http.HttpResponseRedirect('../')\n \n c = {\n 'instance': instance,\n 'verb': verb,\n 'form': form,\n }\n c.update(csrf(request))\n \n return render_tpl(request, 'api/key_edit.html', c)\n \n@csrf_exempt\n@staff_required\ndef force_compile_stats (request):\n key = request.POST.get('key', '')\n cmpgn = ndb.Key(urlsafe=key).get()\n taskqueue.add(url='/api/compile-stats', params={'list_id': cmpgn.list_id, 'campaign_id': cmpgn.campaign_id}, queue_name='stats')\n return ok()\n \n@csrf_exempt\ndef compile_stats (request):\n list_id = request.POST.get('list_id', '')\n campaign_id = request.POST.get('campaign_id', '')\n key = request.POST.get('key', '')\n cursor = request.POST.get('cursor', None)\n \n if list_id and campaign_id:\n if key:\n stat = ndb.Key(urlsafe=key).get()\n \n else:\n stat = Stats.query(Stats.list_id == list_id, Stats.campaign_id == campaign_id).get()\n if not stat:\n stat = Stats(list_id=list_id, campaign_id=campaign_id)\n \n stat.process(cursor=cursor)\n\n \n else:\n old = datetime.datetime.now() - datetime.timedelta(days=settings.COMPILE_STATS_PERIOD)\n for cmpgn in Campaign.query(Campaign.sent >= old).fetch():\n taskqueue.add(url='/api/compile-stats', params={'list_id': cmpgn.list_id, 'campaign_id': cmpgn.campaign_id}, queue_name='stats')\n \n return ok()\n\n@staff_required\ndef campaign_stats (request, list_id, campaign_id):\n campaign = Campaign.query(Campaign.list_id == list_id, Campaign.campaign_id == campaign_id).get()\n if campaign:\n c = {\n 'list_id': list_id,\n 'campaign_id': campaign_id,\n 'campaign': campaign,\n }\n return render_tpl(request, 'api/stats/campaign.html', c)\n \n raise http.Http404\n\n@csrf_exempt\ndef process_open (request):\n email = request.POST.get('email', '')\n list_id = request.POST.get('list_id', '')\n #campaign_id = request.POST.get('campaign_id', '')\n\n results = get_open_raw(email, list_id)\n\n if results:\n\n form_data = {'email': email, 'opens': results['opens'], 'last_open': results['last_open'], 'list_id': list_id,}\n form_data.update(settings.REPORT_PAYLOAD)\n form_data = urllib.urlencode(form_data)\n result = urlfetch.fetch(url=settings.REPORT_OPEN_URL, payload=form_data, method=urlfetch.POST, headers={'Content-Type': 'application/x-www-form-urlencoded'})\n logging.info('Open Report Status: ' + str(result.status_code))\n\n else:\n logging.info('Open Report Status: None found for %s and list %s' % (email, list_id))\n\n return ok()","sub_path":"bulkmail/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"440960637","text":"from socket import *\n\ndef Socket_client():\n try:\n client = socket() #定义协议类型,相当于生命socket类型,同时生成socket连接对象\n client.connect(('192.168.1.3',8000))\n while True:\n msg = input(\">>>\").strip()\n if len(msg) ==0:\n continue\n client.send(msg.encode(\"utf-8\"))\n data = client.recv(1024)#这里是字节1k\n print(\"recv:>\",data.decode())\n client.close()\n except ConnectionError as ex:\n print(ex)\n \nif __name__ == \"__main__\":\n Socket_client()\n","sub_path":"socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"526930562","text":"import datetime\nfrom itertools import zip_longest\nfrom django import template\nfrom django.utils.safestring import mark_safe\nfrom django.template.defaultfilters import stringfilter\n\nfrom ..models import (AddOnBenefitRow, AlaCarteBenefitRow, Benefit, BenefitApplies, ExplanationRow,\n SponsorCategory, SponsorPackage, SponsorLevel)\n\nregister = template.Library()\n\n@register.filter()\ndef get_all_applies(benefit, levels):\n return benefit.list_all_applies(levels)\n\n@register.simple_tag(takes_context=True)\ndef load_extra_benefits(context):\n alacarte = AlaCarteBenefitRow.objects.all().order_by('order')\n addon = AddOnBenefitRow.objects.all().order_by('order')\n context['extra_benefits'] = zip_longest(addon, alacarte)\n return ''\n\n@register.simple_tag(takes_context=True)\ndef load_sponsor_cats_levels_packages(context):\n context['sponsor_cats'] = SponsorCategory.objects.all().order_by('order')\n context['sponsor_packages'] = SponsorPackage.objects.all().order_by('order')\n context['sponsor_levels'] = SponsorLevel.objects.all().order_by('order')\n return ''\n\n@register.simple_tag(takes_context=True)\ndef load_explanations(context):\n context['explanations'] = ExplanationRow.objects.all().order_by('order')\n return ''\n\n@register.filter\n@stringfilter\ndef checkmark(text):\n checkmark = ''\n output = checkmark if text.lower() == 'yes' else '{}'.format(text)\n return mark_safe(output)\n\n@register.filter\n@stringfilter\ndef comma2br(text):\n output = \"
\".join(text.split(\",\"))\n return mark_safe(output)\n\n@register.filter\n@stringfilter\ndef count_content_lines(text):\n output = [line for line in text.strip().split(\"\\n\")[:-1] if line.strip()]\n return len(output)\n","sub_path":"sponsorbenefits/templatetags/benefit_tags.py","file_name":"benefit_tags.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"185242661","text":"# Specific to 1cake 2stone examples.\n# It seems like we can get away with loglogn permutations!!\n\n\nfrom itertools import combinations, count\nfrom math import factorial as fac\nbinom = lambda n, k : fac(n)//fac(n-k)//fac(k)\nfrom find_designs import combinations2, hits\n\ndef bits_needed(n):\n # Find number of bits needed to have enough half-bit ones\n return next(l for l in count() if l%2==0 and binom(l,l//2) >= 2*n)\n\ndef find_pieces(n):\n length = bits_needed(n)\n for i in range(2**length):\n if bin(i).count('1') == length//2:\n inv = ~i & (1< 0: return pieces[n-1][0]\n if n < 0: return pieces[-n-1][1]\n for i in range(bits_needed(n)):\n yield [[piece_map(n)>>i&1 for n in row] for row in matrix]\n\ndef matrix_to_perm(matrix):\n n = len(matrix[0])\n return sorted(range(n), key=lambda i: matrix[i].count(1))\n\ndef test_solution(seqs, n):\n for xs in combinations(range(n), 3):\n for y in range(3):\n if not any(seq.index(xs[y]) == min(seq.index(x) for x in xs)\n for seq in seqs):\n return False\n return True\n\nif __name__ == '__main__':\n import numpy as np\n for n in range(4):\n print(np.array(make_matrix(n)))\n for matrix in make_perm_matrices(n):\n print(np.array(matrix))\n print(matrix_to_perm(matrix))\n perms = list(map(matrix_to_perm, make_perm_matrices(n)))\n print(2**n, len(perms), test_solution(perms, 2**n))\n","sub_path":"done/minhash/code/find_designs2.py","file_name":"find_designs2.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"266831851","text":"from datetime import date\nfrom financial_models import utils\n\nclass Valuations(object):\n def __init__(self, financial_statements, drivers):\n self.financial_statements = financial_statements\n\n self.terminal_ebitda_multiple = drivers['terminal_ebitda_multiple']\n self.beta = drivers['beta']\n\n self.tax_rate = drivers['tax_rate_pct'] / 100.0\n self.risk_free_rate = drivers['risk_free_rate_pct'] / 100.0\n self.market_risk_premium = drivers['market_risk_premium_pct'] / 100.0\n self.cost_of_debt = drivers['cost_of_debt_pct'] / 100.0\n self.debt_to_equity = drivers['debt_to_equity_pct'] / 100.0\n self.gdp_growth_rate = drivers['gdp_growth_rate_pct'] / 100.0\n\n @property\n def cost_of_equity(self):\n return self.risk_free_rate + (\n self.beta * self.market_risk_premium\n )\n\n @property\n def equity_to_debt(self):\n return 1 - self.debt_to_equity\n\n @property\n def wacc(self):\n return (\n self.cost_of_equity * self.equity_to_debt\n ) + (\n self.cost_of_debt *\n (1 - self.tax_rate) *\n self.debt_to_equity\n )\n\n def get_terminal_value(self, year):\n ebitda = self.financial_statements.get_ebitda(year)\n return self.terminal_ebitda_multiple * ebitda\n\n def get_pv_of_terminal_value(self, year):\n rate = self.wacc\n dates = [date(y, 1, 1) for y in range(\n self.financial_statements.starting_year + 1,\n year + 1\n )]\n values = [0 for d in dates]\n values[-1] = self.get_terminal_value(year)\n return utils.xnpv(rate, zip(dates, values))\n\n def get_pv_of_unlevered_fcf(self, year):\n rate = self.wacc\n dates = []\n values = []\n for y in range(self.financial_statements.starting_year + 1, year + 1):\n dates.append(date(y, 1, 1))\n values.append(self.financial_statements.get_unlevered_fcf(y))\n return utils.xnpv(rate, zip(dates, values))\n\n def get_dcf_firm_value_terminal_method(self, year):\n return (\n self.get_pv_of_unlevered_fcf(year) + \n self.get_pv_of_terminal_value(year)\n )\n\n def get_gordon_growth_terminal_value(self, year):\n unlevered_fcf = self.financial_statements.get_unlevered_fcf(year)\n return (\n unlevered_fcf * (1 + self.gdp_growth_rate) /\n (self.wacc - self.gdp_growth_rate)\n )\n\n def get_dcf_firm_value_gordon_growth(self, year):\n return (\n self.get_pv_of_unlevered_fcf(year) + \n self.get_gordon_growth_terminal_value(year)\n )\n\n","sub_path":"financial_models/valuations.py","file_name":"valuations.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402515537","text":"\"\"\"\nGiven a string, your task is to count how many palindromic substrings in this string.\n\nThe substrings with different start indexes or end indexes are counted as different substrings even they\nconsist of same characters.\n\nExample 1:\n\nInput: \"abc\"\nOutput: 3\nExplanation: Three palindromic strings: \"a\", \"b\", \"c\".\n\n\nExample 2:\n\nInput: \"aaa\"\nOutput: 6\nExplanation: Six palindromic strings: \"a\", \"a\", \"a\", \"aa\", \"aa\", \"aaa\".\n\n\nNote:\n\nThe input string length won't exceed 1000.\n\"\"\"\n\n\n# center expand, time O(n^2), space O(1)\nclass Solution(object):\n def countSubstrings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n n = len(s)\n res = 0\n for c in xrange(2 * n - 1):\n l = c / 2\n r = l + c % 2\n while l >= 0 and r < n and s[l] == s[r]:\n res += 1\n l -= 1\n r += 1\n return res\n\n\n# DP, time O(n^2), space O(n^2)\nclass Solution2(object):\n def countSubstrings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n\n n = len(s)\n dp = [[False for x in range(n)] for y in range(n)]\n cnt = 0\n\n # Check for a window of size 1\n for i in range(n):\n dp[i][i] = True\n cnt += 1\n\n # Check for a window of size 2\n for i in range(n-1):\n if s[i] == s[i+1]:\n dp[i][i+1] = True\n cnt += 1\n\n # Check windows of size 3\n for k in range(3, n+1):\n for i in range(n-k+1):\n j = i+k-1\n if dp[i+1][j-1] and s[i] == s[j]:\n dp[i][j] = True\n cnt += 1\n\n return cnt\n\n\n# time O(n^3)\nclass Solution3 (object):\n def countSubstrings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n cnt = 0\n n = len(s)\n for i in range(n + 1):\n for j in range(i, n + 1):\n tmp = s[i:j]\n if tmp == '':\n continue\n if tmp == tmp[::-1]:\n cnt += 1\n\n return cnt\n\n\nif __name__ == '__main__':\n mySol = Solution()\n x = 'abc'\n print(\"inputs: %s\" % x)\n print('outputs: %s' % mySol.countSubstrings(x))\n","sub_path":"python/palindromic_substrings.py","file_name":"palindromic_substrings.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"648294386","text":"from __future__ import unicode_literals\n\nimport datetime\n\nfrom django.db import models\n\n\nCONDITION = (\n ('New', 'New'),\n ('Good', 'Good'),\n ('Old', 'Old'),\n)\n\nSHELF_LOCATION = (\n ('1A', '1A'),\n ('1B', '1B'),\n ('1C', '1C'),\n)\n\n\nclass Book(models.Model):\n\n YEAR_CHOICES = [(r, r)\n for r in range(1984, datetime.date.today().year + 1)]\n\n title = models.CharField(max_length=50)\n publisher = models.CharField(max_length=255)\n publication_date = models.DateField(\n blank=True, null=True, help_text='dd/mm/yy')\n author = models.CharField(max_length=255, blank=True)\n editor = models.CharField(max_length=255, blank=True)\n edition = models.CharField(max_length=255, blank=True)\n\n year = models.IntegerField('year',\n choices=YEAR_CHOICES,\n default=datetime.datetime.now().year)\n\n pages = models.IntegerField(blank=True, null=True)\n\n condition = models.CharField('condition',\n max_length=10,\n choices=CONDITION,\n default='choose condition')\n\n shelf_location = models.CharField('shelf_location',\n max_length=10,\n choices=SHELF_LOCATION,\n default='choose location')\n\n notes = models.TextField()\n copies = models.IntegerField(default=1)\n","sub_path":"vitabu/books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124714531","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Web report.\"\"\"\n\nimport logging\n\nimport matplotlib.pyplot as pyplot\n\nfrom util.dataloader import GetSortedPriceDataFromDB\nfrom util.zoolike import ZooLike\nfrom util.gchart_util import GetGLineUrl\nfrom util.gchart_util import OpenWeb\n\nclass WebReport(object):\n pass\n\nclass WebReportSSLS(WebReport):\n def __init__(self, pivot_ticker, custom_ticker):\n \"\"\"\n Veriables:\n kospi_data: A list [(datetime(d), last_price(f)),]\n custom_data: A list [(datetime(d), last_price(f)),]\n \"\"\"\n \n self.kospi_ticker = pivot_ticker\n self.custom_ticker = custom_ticker\n \n self.pvalue = 0.0\n self.beta = 0.0\n self.constant = 0.0\n self.last_date = None\n self.cointegrated = False\n \n self.zoo_data = None\n \n def __ConvertToDict(self, list):\n dict = {}\n for row in list:\n dict[row[0]] = row[1]\n return dict\n\n def __CreateZoo(self):\n # load pivot data\n sorted_pivot_data_list = GetSortedPriceDataFromDB(self.kospi_ticker)\n sorted_date_list = [row[0] for row in sorted_pivot_data_list]\n pivot_data_dict = self.__ConvertToDict(sorted_pivot_data_list)\n \n # set pivot data\n zoo = ZooLike(sorted_date_list)\n zoo.AddMarketData(self.kospi_ticker, pivot_data_dict)\n \n # add market data\n sorted_ticker_data_list = \\\n GetSortedPriceDataFromDB(self.custom_ticker)\n ticker_data_dict = self.__ConvertToDict(sorted_ticker_data_list)\n zoo.AddMarketData(self.custom_ticker, ticker_data_dict)\n \n return zoo\n\n def Show(self):\n # Create zoo.\n self.zoo_data = self.__CreateZoo()\n \n # Show chart.\n diff_list = []\n start_index = self.zoo_data.GetIntersectionStartIndex()\n count_total = self.zoo_data.GetDateCountTotal()\n \n beta = float(self.beta)\n \n for index in range(start_index, count_total):\n kospi_data = self.zoo_data.GetDataFromIndex(self.kospi_ticker, index)\n custom_data = self.zoo_data.GetDataFromIndex(self.custom_ticker, index)\n \n if kospi_data is None or custom_data is None:\n continue\n \n diff = kospi_data[0] - (beta * custom_data[0] + float(self.constant))\n diff_list.append(diff)\n \n pyplot.plot(diff_list)\n pyplot.show()\n","sub_path":"kb_codes/sandbox/python/market_boy/src/report/web_report.py","file_name":"web_report.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542747253","text":"import os\nimport re\nimport subprocess\nfrom loguru import logger\n\n\"\"\"设备信息获取Android/IOS\n1.获取设备品牌\n2.获取设备型号\n3.获取设备名\n4.获取设备系统版本\n5.获取设备分辨率\n6.获取设备最大内存\n7.获取设备CPU核数\n8.获取设备当前剩余内存\n\"\"\"\n\n\ndef eliminate_special_symbols(func):\n # 将换行符消除\n def perform_func(*args, **kwargs):\n results: str = func(*args, **kwargs)\n return results.replace('\\n', '') if results else None\n return perform_func\n\n\ndef eliminate_blank_space(func):\n # 将多余的空格消除\n def perform_func(*args, **kwargs):\n results: str = func(*args, **kwargs)\n return results.replace(' ', '') if results else None\n return perform_func\n\n\nclass AndroidInfo:\n\n _device_id = None\n\n def __init__(self, device_id):\n self.device_id = device_id\n\n @property\n def device_id(self):\n return self._device_id\n\n @device_id.setter\n def device_id(self, device_id):\n # Lock.acquire()\n self._device_id = device_id\n # Lock.release()\n\n def us_cmd(self, cmd):\n # results = os.system(\"adb \" + cmd)\n results = subprocess.Popen(\"adb \" + cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).stdout.readlines()\n return results\n\n @eliminate_special_symbols\n def get_device_brand(self) ->str :\n # 获取设备品牌\n cmd = \"-s %s shell getprop ro.product.brand\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n return results.decode()\n\n # def get_mobile_system_information(self, devices_id):\n # # Android获取手机系统版本\n # adb_commod = \"adb -d -s \" + devices_id + \" shell getprop ro.build.version.release \"\n # devices_system_data = os.popen(adb_commod)\n # devices_system_msg = devices_system_data.read()\n # devices_system_data.close()\n # return float(devices_system_msg)\n\n @eliminate_special_symbols\n def get_model_device(self) ->str :\n # 获取设备型号\n cmd = \"-s %s shell getprop ro.product.model\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n return results.decode()\n\n @eliminate_special_symbols\n def get_device_name(self) ->str :\n # 获取设备名称\n cmd = \"-s %s shell getprop ro.product.device\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n return results.decode()\n\n @eliminate_special_symbols\n def get_device_system_version(self) ->str :\n # 获取系统版本\n cmd = \"-s %s shell getprop ro.build.version.release\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n return results.decode()\n\n @eliminate_blank_space\n def get_device_resolution(self) ->str :\n # 获取设备分辨率\n # 高通平台\n cmd = \"-s %s shell wm size\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n results = re.search(r\"Physical size:(.*)\", results.decode())\n if results:\n results = results.group(1)\n return results\n\n @eliminate_blank_space\n def get_maximum_memory(self) ->str :\n # 获取最大内存\n cmd = \"-s %s shell cat /proc/meminfo\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n results = re.search(r\"MemTotal:(.*)\", results.decode())\n if results:\n results = results.group(1)\n return results\n\n @eliminate_blank_space\n def get_number_cpu_cores(self):\n # 获取设备核数\n cmd = \"-s %s shell cat /proc/cpuinfo\" % (self.device_id)\n results = self.us_cmd(cmd)\n cpu_core_num = None\n for item in results:\n item = item.decode()\n cpu_core_num = re.search(r\"CPU architecture:(.*)\", item)\n if cpu_core_num:\n cpu_core_num = cpu_core_num.group(1)\n break\n\n return cpu_core_num\n\n @eliminate_special_symbols\n def get_ipaddress(self) ->str :\n # 获取设备ip地址\n cmd = \"-s %s shell ifconfig wlan0 | grep 'inet addr'\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n return results.decode()\n\n def get_current_remaining_memory(self) ->str :\n # 获取设备当前剩余内存\n cmd = \"-s %s shell getprop ro.product.model\" % (self.device_id)\n results = self.us_cmd(cmd)[0]\n return results.decode()\n\n\nclass IosInfo:\n # todo:实现IOS提取设备信息\n pass\n\n\nclass PhoneInfo:\n __Android_devices = AndroidInfo\n __Ios_devices = IosInfo\n\n def __init__(self, device_type, device_id):\n self.device_info = {}\n if device_type == 'android':\n self.phone_device = self.__Android_devices(device_id)\n self.device_info['platformName'] = 'Android'\n else:\n self.phone_device = self.__Ios_devices()\n self.device_info['platformName'] = 'IOS'\n\n def get_device_info(self):\n # todo: 根据type返回设备信息, all返回全部设备信息\n # todo: 根据device判断并从对应的设备平台获取信息\n device_brand = self.phone_device.get_device_brand()\n model_device = self.phone_device.get_model_device()\n device_name = self.phone_device.get_device_name()\n system_version = self.phone_device.get_device_system_version()\n device_resolution = self.phone_device.get_device_resolution()\n maximum_memory = self.phone_device.get_maximum_memory()\n cpu_cores = self.phone_device.get_number_cpu_cores()\n # ipaddress = self.phone_device.get_ipaddress()\n device_info = {\n \"device_brand\": device_brand,\n \"model_device\": model_device,\n \"device_name\": device_name,\n \"platformVersion\": system_version,\n \"device_resolution\": device_resolution,\n \"maximum_memory\": maximum_memory,\n \"cpu_cores\": cpu_cores,\n # \"ipaddress\": ipaddress\n }\n return device_info\n\n\nif __name__ == '__main__':\n phone_info = PhoneInfo('android', '966d5386')\n print(phone_info.get_device_info())","sub_path":"base/BasePhoneInfo.py","file_name":"BasePhoneInfo.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"280607875","text":"__author__ = 'tinglev@kth.se'\n\nimport unittest\nimport mock\nfrom test import mock_test_data # pylint: disable=C0411\nfrom modules.steps.first_conditional_stop import FirstConditionalStop\nfrom modules.util import data_defs, cache_defs\n\nclass TestFirstConditionalStop(unittest.TestCase):\n\n def test_service_uses_semver(self):\n pipeline_data = mock_test_data.get_pipeline_data()\n step = FirstConditionalStop()\n result = step.service_uses_semver(pipeline_data)\n self.assertTrue(result)\n pipeline_data[data_defs.SERVICES][1][data_defs.S_IMAGE][data_defs.IMG_IS_SEMVER] = False\n result = step.service_uses_semver(pipeline_data)\n self.assertFalse(result)\n\n def test_caches_are_equal(self):\n pipeline_data = {data_defs.STACK_FILE_DIR_HASH: 'abc123'}\n pipeline_data[data_defs.CACHE_ENTRY] = None\n step = FirstConditionalStop()\n result = step.caches_are_equal(pipeline_data)\n self.assertFalse(result)\n pipeline_data[data_defs.CACHE_ENTRY] = {cache_defs.DIRECTORY_MD5: '123abc'}\n result = step.caches_are_equal(pipeline_data)\n self.assertFalse(result)\n pipeline_data[data_defs.CACHE_ENTRY] = {cache_defs.DIRECTORY_MD5: 'abc123'}\n result = step.caches_are_equal(pipeline_data)\n self.assertTrue(result)\n\n def test_run_step(self):\n pipeline_data = mock_test_data.get_pipeline_data()\n pipeline_data[data_defs.CACHE_ENTRY] = None\n step = FirstConditionalStop()\n step.stop_pipeline = mock.Mock()\n # semver usage + changed hash: no stop\n step.run_step(pipeline_data)\n step.stop_pipeline.assert_not_called()\n pipeline_data[data_defs.CACHE_ENTRY] = {cache_defs.DIRECTORY_MD5: 'alejfbabovudbasepvbsoev'}\n step.stop_pipeline.reset_mock()\n # semver usage + equal hash: no stop\n step.run_step(pipeline_data)\n step.stop_pipeline.assert_not_called()\n pipeline_data[data_defs.SERVICES][1][data_defs.S_IMAGE][data_defs.IMG_IS_SEMVER] = False\n step.stop_pipeline.reset_mock()\n # no semver usage + equal hash: stop\n step.run_step(pipeline_data)\n step.stop_pipeline.assert_called_once()\n pipeline_data[data_defs.STACK_FILE_DIR_HASH] = 'not_equal'\n step.stop_pipeline.reset_mock()\n # no semver usage + changed hash: no stop\n step.run_step(pipeline_data)\n step.stop_pipeline.assert_not_called()\n","sub_path":"test/unit/test_first_conditional_stop.py","file_name":"test_first_conditional_stop.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"507593704","text":"'''\nradixSort(listIn):\n Entrada: Lista de enteros\n Salida: Retorna las lista ordenada de forma ascendente\n Restricciones: -\n'''\n\ndef radixSort(listIn):\n n = 0\n biggest = 0\n for elem in listIn:\n if elem > biggest:\n biggest = elem\n n += 1\n biggest = len(str(biggest))\n print(biggest)\n print(listIn)\n\n for x in range(biggest):\n print(\"###########################\")\n listOut = []\n for i in range(len(listIn)):\n listOut += [0]\n digit = 10 ** x\n integers = []\n for i in range(10):\n integers += [0]\n for elem in listIn:\n dig = (elem // digit) % 10\n # print(dig)\n integers[dig] += 1\n for i in range(len(integers)):\n if i != 0:\n integers[i] += integers[i - 1]\n # print(integers)\n print(listIn)\n for i in range(len(listIn)):\n n = len(listIn) - (i + 1)\n dig = (listIn[n] // digit) % 10\n integers[dig] -= 1\n num = listIn[n]\n listOut[integers[dig]] = num\n print(listOut)\n listIn = listOut\n return listOut\n\n\n'''\ninsertionSort(listOrd):\n Entrada: Lista de elementos comparables\n Salida: Lista ordenada de forma descendente\n Restricciones: -\n'''\n\n\ndef insertion_Sort(listOrd):\n print(\"desc org\")\n position = 1\n while position < len(listOrd):\n ordered = False\n i = position\n while not ordered:\n if listOrd[i] > listOrd[i - 1] and i != 0:\n tmp = listOrd[i]\n listOrd[i] = listOrd[i - 1]\n listOrd[i - 1] = tmp\n i -= 1\n else:\n ordered = True\n position += 1\n print(listOrd)\n return listOrd\n\n\n'''\nshellSort(listOrd):\n Entrada: Lista de elementos comparables\n Salida: Lista ordenada de forma ascendente\n Restricciones: -\n'''\ndef shell_Sort(listOrd):\n print(\"asc org\")\n ordered = False\n gap = len(listOrd)\n while not ordered:\n gap = gap // 2\n i = 0\n f = gap\n print(gap)\n if gap > 1:\n while f != len(listOrd):\n if listOrd[i] > listOrd[f]:\n tmp = listOrd[i]\n listOrd[i] = listOrd[f]\n listOrd[f] = tmp\n i += 1\n f += 1\n\n print(gap, listOrd)\n else:\n i = 1\n position = 1\n while position < len(listOrd):\n orderedf = False\n i = position\n while not orderedf:\n if listOrd[i] < listOrd[i - 1] and i != 0:\n tmp = listOrd[i]\n listOrd[i] = listOrd[i - 1]\n listOrd[i - 1] = tmp\n i -= 1\n else:\n orderedf = True\n position += 1\n ordered = True\n print(listOrd)\n return listOrd","sub_path":"Algoritmo_de_ordenamiento.py","file_name":"Algoritmo_de_ordenamiento.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"400495305","text":"# Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute.\n# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).\n\"\"\"\nCreated on 2020-04-07 10:28\n\n@author: a002028\n\"\"\"\nimport os\nimport numpy as np\nfrom collections import Mapping\nfrom datetime import datetime\nimport shutil\nfrom decimal import Decimal, ROUND_HALF_UP\n\n\ndef check_path(path):\n \"\"\"Make directory.\"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef convert_string_to_datetime_obj(x, fmt):\n \"\"\"Return datetime object.\n\n Args:\n x: can be any kind of date/time related string format\n fmt: format of output\n \"\"\"\n if type(x) == str:\n return datetime.strptime(x, fmt)\n else:\n return ''\n\n\ndef copyfile(src, dst):\n \"\"\"Copy file to destination.\"\"\"\n shutil.copy2(src, dst)\n\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n \"\"\"Copy fodler tree to destination.\"\"\"\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n\ndef create_directory_structure(dictionary, base_path):\n \"\"\"Create directory based on nested dictionary.\n\n Args:\n dictionary: Nested dictionary\n base_path: Base folder\n \"\"\"\n if len(dictionary) and not isinstance(dictionary, str):\n for direc in dictionary:\n if isinstance(direc, str):\n if '.' not in direc:\n create_directory_structure(dictionary[direc], os.path.join(base_path, direc))\n elif isinstance(direc, dict):\n create_directory_structure(dictionary[direc], os.path.join(base_path, direc))\n else:\n os.makedirs(base_path)\n\n\ndef decdeg_to_decmin(pos: (str, float), string_type=True, decimals=2) -> (str, float):\n \"\"\"Convert position from degrees and decimal minutes into decimal degrees.\"\"\"\n pos = float(pos)\n deg = np.floor(pos)\n minute = pos % deg * 60.0\n if string_type:\n if decimals:\n output = ('%%2.%sf'.zfill(7) % decimals % (float(deg) * 100.0 + minute))\n else:\n output = (str(deg * 100.0 + minute))\n\n if output.index('.') == 3:\n output = '0' + output\n else:\n output = (deg * 100.0 + minute)\n return output\n\n\ndef decmin_to_decdeg(pos, string_type=True, decimals=4):\n \"\"\"Convert position from decimal degrees into degrees and decimal minutes.\"\"\"\n pos = float(pos)\n\n output = np.floor(pos / 100.) + (pos % 100) / 60.\n output = round_value(output, nr_decimals=decimals)\n if string_type:\n return output\n else:\n return float(output)\n\n\ndef find_key(key, dictionary):\n \"\"\"Find key in nested dictionary.\n\n Generator to find an element of a specific key.\n Note that a key can occur multiple times in a nested dictionary.\n \"\"\"\n if isinstance(dictionary, list):\n\n for d in dictionary:\n for result in find_key(key, d):\n yield result\n else:\n for k, v in dictionary.items():\n if k == key:\n yield v\n elif isinstance(v, dict):\n for result in find_key(key, v):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in find_key(key, d):\n yield result\n\n\ndef get_file_name(file_path):\n \"\"\"Return filename without extension.\"\"\"\n return os.path.splitext(os.path.basename(file_path))[0]\n\n\ndef generate_filepaths(directory, endswith=''):\n \"\"\"Generate file paths.\"\"\"\n for path, _, fids in os.walk(directory):\n for f in fids:\n if f.endswith(endswith):\n yield os.path.abspath(os.path.join(path, f))\n\n\ndef get_subdirectories(directory):\n \"\"\"Return subdirectories in the given path.\"\"\"\n return [subdir for subdir in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, subdir))]\n\n\ndef get_filepaths_from_directory(directory):\n \"\"\"Generate file paths.\"\"\"\n return [os.path.join(directory, fid) for fid in os.listdir(directory)\n if not os.path.isdir(directory + fid)]\n\n\ndef get_datetime(date_string, time_string):\n \"\"\"Get datetime object for date and time.\"\"\"\n if ' ' in date_string:\n date_string = date_string.split(' ')[0]\n if len(time_string) == 8:\n return datetime.strptime(date_string + ' ' + time_string, '%Y-%m-%d %H:%M:%S')\n elif len(time_string) == 5:\n return datetime.strptime(date_string + ' ' + time_string, '%Y-%m-%d %H:%M')\n else:\n return None\n\n\ndef get_datetime_now(fmt='%Y-%m-%d %H:%M:%S'):\n \"\"\"Get datetime object according to the given format for time right NOW.\"\"\"\n return datetime.now().strftime(fmt)\n\n\ndef get_export_folder():\n \"\"\"Return path to export folder.\"\"\"\n date_str = get_datetime_now(fmt='%Y%m%d')\n export_path = os.path.abspath(os.path.join('C:/sirena_exports', date_str))\n if not os.path.isdir(export_path):\n os.makedirs(export_path)\n return export_path\n\n\ndef get_file_list_based_on_suffix(file_list, suffix):\n \"\"\"Get filenames ending with \"suffix\".\"\"\"\n match_list = []\n for fid in file_list:\n if '~$' in fid:\n # memory prefix when a file is open\n continue\n elif fid.endswith(suffix):\n match_list.append(fid)\n\n return match_list\n\n\ndef is_sequence(arg):\n \"\"\"Check if an object is iterable (you can loop over it) and not a string.\"\"\"\n return not hasattr(arg, \"strip\") and hasattr(arg, \"__iter__\")\n\n\ndef recursive_dict_update(d, u):\n \"\"\"Recursive dictionary update.\n\n Copied from:\n http://stackoverflow.com/questions/3232943/update-\n value-of-a-nested-dictionary-of-varying-depth\n via satpy\n \"\"\"\n if isinstance(u, dict):\n for k, v in u.items():\n if isinstance(v, Mapping):\n r = recursive_dict_update(d.get(k, {}), v)\n d[k] = r\n # d.setdefault(k, r)\n else:\n d[k] = u[k]\n # d.setdefault(k, u[k])\n return d\n\n\ndef round_value(value: (str, int, float), nr_decimals=3) -> str:\n \"\"\"Calculate rounded value.\"\"\"\n return str(Decimal(str(value)).quantize(Decimal('%%1.%sf' % nr_decimals % 1), rounding=ROUND_HALF_UP))\n","sub_path":"sirena/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"80013965","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands #hand solution\nclass HandSign:\n gesture = {\n 0:'fist', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five',\n 6:'six', 7:'rock', 8:'spiderman', 9:'yeah', 10:'ok',\n }\n numbers = {\n 0:'zero', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five',\n 6:'ready', 9:'two', 10:'three',\n }\n rps_gesture = {0:'rock', 5:'paper', 9:'scissors'}\n def __init__(self,model):\n self.model_file=model\n self.model=self.load_model(model)\n def watch(self,sign=None,detection_conf=0.8,tracking_conf=0.6):\n if sign is None:\n sign=self.rps_gesture\n knn=self.model\n hands=self.hand_recognition(2,detection_conf,tracking_conf)\n cap = cv2.VideoCapture(0)\n window_name=\"Test Hand Recognition\"\n while cap.isOpened():\n ret, img = cap.read() #video read\n if not ret:\n continue #not ready yet\n # ----------------------------------------------\n cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty(window_name,cv2.WND_PROP_FULLSCREEN,\n cv2.WINDOW_FULLSCREEN)\n img=self.preprocess_img(img)\n result=hands.process(img)\n img=self.postprocess_img(img)\n #\n width=img.shape[1]\n height=img.shape[0]\n self.print_head(img,\"Taekyung Kim, KWU, 2021\",x=width*0.02,y=height*0.05)\n self.print_head(img,\"Type q to exit\",x=width*0.02,y=height*0.1)\n #self.\n #\n if result.multi_hand_landmarks is not None:\n for res in result.multi_hand_landmarks:\n mp_drawing.draw_landmarks(img, res, mp_hands.HAND_CONNECTIONS)\n idx=self.get_label(knn,res)\n # Draw gesture result\n if idx in sign.keys():\n cv2.putText(img,\n text=sign[idx].upper(),\n org=(int(res.landmark[0].x * img.shape[1]), #width\n int(res.landmark[0].y * img.shape[0] + 20)), #height\n fontFace=cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=1, \n color=(255, 255, 255), \n thickness=2)\n # Other gestures\n cv2.putText(img, \n text=self.gesture[idx].upper(), \n org=(int(res.landmark[0].x * img.shape[1]), \n int(res.landmark[0].y * img.shape[0]*1.1 + 20)), \n fontFace=cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=1, color=(0, 255, 0), \n thickness=2)\n cv2.imshow(window_name, img)\n if cv2.waitKey(1) == ord('q'):\n cv2.destroyAllWindows() #close the image window\n cap.release()\n break \n def hand_recognition(self,max_num_hands=1,detection_conf=0.5,tracking_conf=0.5):\n hands = mp_hands.Hands(\n max_num_hands=max_num_hands,\n min_detection_confidence=detection_conf,\n min_tracking_confidence=tracking_conf)\n return hands\n def train_model(self,data_file,save='hand_recognition_model.xml'):\n file = np.genfromtxt(data_file, delimiter=',')\n angle = file[:,:-1].astype(np.float32)\n label = file[:, -1].astype(np.float32)\n knn = cv2.ml.KNearest_create()\n knn.train(angle, cv2.ml.ROW_SAMPLE, label)\n knn.save(save)\n def load_model(self,file_name):\n return cv2.ml.KNearest_load(file_name)\n def print_head(self,img,text,x=50,y=50):\n cv2.putText(img, \n text, org=(int(x),int(y)),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=0.5, \n color=(255, 255, 255),\n thickness=1)\n def finger_vectors(self,landmarks):\n joint=np.zeros((21,3,))\n for j, lm in enumerate(landmarks): #landmark data\n joint[j] = [lm.x, lm.y, lm.z]\n N=len(landmarks)\n v1 = joint[[0,1,2,3,0,5,6,7,0,9,10,11,0,13,14,15,0,17,18,19],:] # Parent joint\n v2 = joint[[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],:] # Child joint\n v = v2 - v1 # [20,3] #vector (x,y,z)\n v = v / np.linalg.norm(v, axis=1)[:, np.newaxis] #normalize, 길이로 나눈다. Norm\n return v\n def cal_angles(self,finger_vecs):\n angle = np.arccos(np.einsum('ij,ij->i', #row sum\n finger_vecs[[0,1,2,4,5,6,8,9,10,12,13,14,16,17,18],:], \n finger_vecs[[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19],:])) # [15,]\n angle = np.degrees(angle) # Convert radian to degree\n data = np.array([angle], dtype=np.float32)\n return data\n\n def get_label(self,knn_model,res):\n v=self.finger_vectors(res.landmark)\n data=self.cal_angles(v)\n _, results, _, _ = knn_model.findNearest(data, 3) #res,result,neighbors,distances\n try:\n idx = int(results[0][0])\n except:\n idx=None\n return idx\n def preprocess_img(self,img):\n img = cv2.flip(img, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #색공간 변경\n return img\n def postprocess_img(self,img):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\nif __name__==\"__main__\":\n hand_sign=HandSign('hand_recognition_model.xml')\n hand_sign.watch(HandSign.numbers,detection_conf=0.5,tracking_conf=0.5)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454355347","text":"# %load q03_t_test/build.py\n# Default imports\nimport scipy.stats as stats\nimport pandas as pd\nfrom statsmodels.stats.weightstats import ztest\n\ndf = pd.read_csv('data/house_pricing.csv')\n\n# Enter Code Here\ndef t_statistic(df):\n z_statistic, p_value = ztest(x1=df[df['Neighborhood'] == 'OldTown']['GrLivArea'], value=df['GrLivArea'].mean())\n pvalue=stats.ttest_1samp(a= df[df['Neighborhood'] == 'OldTown']['GrLivArea'],popmean= df['GrLivArea'].mean())\n test_result=pvalue[1] < p_value\n return pvalue[1],test_result\n\nt_statistic(df)\n\n\n\n","sub_path":"q03_t_test/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460299436","text":"import re\nimport os\n\nall_nodes = []\n\nfor filename in os.listdir(\"../../Sample Dataset/NER_XML_Old/\"):\n if filename.endswith(\".xml\"):\n with open('../../Sample Dataset/NER_XML_Old/'+filename) as f:\n nodes_of_one_xml_file = re.findall('^<*.[^\\s]+\\s', f.read(), re.MULTILINE)\n all_nodes.append(nodes_of_one_xml_file)\n\nb = sum(all_nodes, [])\n\nfor i in range(len(b)):\n pre = b[i]\n pre = pre.split('<')[-1]\n pre = pre.split('')[0]\n pre = pre.split('/>')[0]\n pre = pre.split('/')[-1]\n pre = pre.replace(\" \", \"\")\n b[i] = pre\n\nb = set(b)\n\n\nwith open('1.txt', 'w') as f1:\n for i in b:\n f1.write(i)\n f1.write(\"\\n\")\n\n\n","sub_path":"pre_final.py","file_name":"pre_final.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"651566275","text":"# is是用来判断内存地址是否一样,一般用来判断None\nclass Gun(object):\n def __init__(self,model):\n #枪的型号\n self.model=model\n #子弹数量\n self.bullet_count=0\n def add_bullet(self,count):\n self.bullet_count+=count;\n def shoot(self):\n #判断子弹数量\n if self.bullet_count<=0:\n print(\"%s没有子弹了...\"%self.model)\n return\n #发射子弹-1\n self.bullet_count-=1;\n #提示发射信息\n print(\"[%s]突突突...[%d]\"%(self.model,self.bullet_count))\n\nclass Soldier(object):\n def __init__(self,name):\n #新兵姓名,枪的属性\n self.name=name\n self.gun=None;\n def fire(self):\n #判断有没有枪\n if self.gun is None:\n print(\"%s还没有枪...\"%self.name)\n return\n #高喊口号\n print(\"冲啊...[%s]\"%self.name)\n #装填子弹\n self.gun.add_bullet(50)\n #发射子弹\n self.gun.shoot()\nak47=Gun(\"AK47\")\n\nxsd=Soldier(\"许三多\")\nxsd.gun=ak47\nxsd.fire()\nxsd.fire()\nprint(xsd.gun)\na=[1,2,3]\nb=[1,2,3]\nprint(a is b)\n","sub_path":"basic/soldier.py","file_name":"soldier.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"93110059","text":"import time\nimport os\nimport os.path\nfrom os import path\nfrom selenium import webdriver\nfrom opencc import OpenCC\n\noptions=webdriver.ChromeOptions()\nprefs={\n 'profile.default_content_setting_values': {\n 'images': 2,\n 'javascript':2\n }\n}\noptions.add_experimental_option('prefs',prefs)\n\n#translate lang model\ncc = OpenCC('s2tw')\n\ndef illSynRemover(string):\n return string.replace('\\\\','_').replace('/','_').replace(':',':').replace('*','*').replace('?','?').replace('\"','_').replace('<','<').replace('>','>').replace('|','|')\n\nurl = input('Input the url of the book :')\n\nbook_url_mode = bool(True)\nbook_index = str()\nif (url[-1] == 'm'):\n book_url_mode = False\n book_index = url.split('/')[-2]\nelse:\n book_index = url.split('=')[-1]\n\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.get(url)\n\n# find all class include \"css\"\nuti_path = '//td[contains(@class,\"css\")]'\nbookname_path = '//*[@id=\"title\"]'\nclass_link = driver.find_elements_by_xpath(uti_path)\nbookname_link = driver.find_elements_by_xpath(bookname_path)\n\nbook_name_tw = cc.convert(bookname_link[0].get_attribute('innerHTML'))\n\nclass_order = []\ntitle_order_tw = []\n\nfor i in range(len(class_link)):\n spec_class = class_link[i].get_attribute('class')\n if (spec_class == 'ccss'):\n if (class_link[i].get_attribute('innerHTML') != ' '):\n class_order.append(spec_class)\n if (spec_class == 'vcss'):\n class_order.append(spec_class)\n title_order_tw.append(cc.convert(class_link[i].get_attribute('innerHTML')))\n\nclass_order.append('vcss')\n\n# find the chapter count of each book\nccount_book = []\nchapter_count = int(0)\nfor i in range(len(class_order)):\n if (class_order[i] != 'vcss'):\n chapter_count += 1\n else:\n ccount_book.append(chapter_count)\n chapter_count = 0\n\nccount_book.remove(0)\nclass_order.clear()\n\n# find the real chapter title (class = 'ccss')\n\nregular_path = '//td[contains(@class,\"ccss\")]//a'\nchapter_link = driver.find_elements_by_xpath(regular_path)\n\nchapter_title_tw = []\ntitle_link = []\n\nfor i in range(len(chapter_link)):\n chapter_title_tw.append(cc.convert(chapter_link[i].get_attribute('text')))\n title_link.append(chapter_link[i].get_attribute('href'))\n\ndriver.quit()\n\n#Specific character removal\n\nbook_name_tw = illSynRemover(book_name_tw)\nfor i in range(len(title_order_tw)):\n title_order_tw[i] = illSynRemover(title_order_tw[i])\n#\n\nloc_link = int(0)\nfirst_links = []\nfor i in ccount_book:\n if (book_url_mode == False):\n first_links.append(title_link[loc_link].split('/')[-1].split('.')[0])\n else:\n first_links.append(title_link[loc_link].split('=')[-1])\n loc_link += i\n\ntitle_link.clear()\n\ncmd1 = 'explorer \"http://dl.wenku8.com/packtxt.php?aid=' + str(book_index) + '&vid='\ncmd2 = '&charset=big5'\n\nfolder = 'D:\\\\' + str(book_name_tw)\nif not os.path.isdir(folder):\n os.mkdir(folder)\n\n\ndownload_link = 'C:\\\\Users\\\\lenovo\\\\Downloads\\\\'\nfor i in range(len(first_links)):\n os.system(cmd1 + first_links[i] + cmd2)\n while (path.exists(download_link + first_links[i] + ' big5.txt') == False):\n time.sleep(0.1)\n old_file = open(download_link + first_links[i] + ' big5.txt', 'r', encoding ='utf-8', errors = 'replace')\n txtInnerText = old_file.read()\n old_file.close()\n new_file = open(folder + '\\\\' + str(i + 1) + title_order_tw[i] + '.txt', 'w', encoding ='utf-8')\n new_file.write(txtInnerText)\n new_file.close()\n os.remove(download_link + first_links[i] + ' big5.txt')\n\nos.system('taskkill /f /im msedge.exe')\nprint('Book Set Complete!')\n","sub_path":"Aux crawler/Crack crawler.py","file_name":"Crack crawler.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"260664410","text":"import sys\nimport re\nfrom model import *\nfrom utils import *\n\ndef load_model():\n vocab = load_vocab(sys.argv[2])\n model = cbow(len(vocab))\n if CUDA:\n model = model.cuda()\n print(model)\n load_checkpoint(sys.argv[1], model)\n return model, vocab\n\ndef run_model(model, data):\n batch = []\n z = len(data)\n while len(data) < BATCH_SIZE:\n data.append([\"\", UNK_IDX])\n for x in data:\n batch.append(x[1])\n result = model.embed(LongTensor(batch))\n for i in range(z):\n data[i].append(result[i].data)\n return data[:z]\n\ndef evaluate():\n k = 20\n y = sys.argv[3]\n data = []\n result = []\n model, vocab = load_model()\n if y in vocab:\n data.append([y, vocab[y]])\n del vocab[y]\n else:\n data.append([y, UNK_IDX])\n for w in vocab:\n data.append([w, vocab[w]])\n if len(data) == BATCH_SIZE:\n result.extend(run_model(model, data))\n data = []\n if len(data):\n result.extend(run_model(model, data))\n for x in result:\n x.append(torch.dist(x[2], result[0][2]))\n for x in sorted(result, key = lambda x: x[3])[:k]:\n print(x[0], x[3])\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n sys.exit(\"Usage: %s model vocab word\" % sys.argv[0])\n print(\"cuda: %s\" % CUDA)\n evaluate()\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"420339742","text":"from doubly_linked_list import DoublyLinkedList\n\n\nclass RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = None\n self.storage = DoublyLinkedList()\n\n def append(self, item):\n if self.storage.length < self.capacity:\n self.storage.add_to_tail(item)\n self.current = self.storage.tail\n elif self.current == self.storage.tail:\n self.storage.head.value = item\n self.current = self.storage.head\n else:\n self.current.next.value = item\n self.current = self.current.next\n\n\n def get(self):\n # Note: This is the only [] allowed\n list_buffer_contents = []\n node = self.storage.head\n while node is not None:\n list_buffer_contents.append(node.value)\n node = node.next\n return list_buffer_contents\n\n# passes all but 2 tests. good enough for MVP.\n# I'll hopefully come back to this later.\n\n# ----------------Stretch Goal-------------------\n\n\nclass ArrayRingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = 0\n self.storage = [None] * capacity\n\n def append(self, item):\n if len(self.storage) == self.capacity:\n self.storage[self.current] = item\n if self.current is len(self.storage) - 1:\n self.current = 0\n else:\n self.current += 1\n else:\n self.storage.append(item)\n\n def get(self):\n return [x for x in self.storage if x is not None]\n\n# All tests pass instantly. Yeah!\n","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"346115058","text":"from django.db import models\nimport django.db.models.options as options\nfrom django.contrib.auth.models import Group\n\n# Create your models here.\n\nclass Workflow(models.Model):\n\n groups = models.ManyToManyField(Group, blank=True)\n name = models.CharField(max_length=255, blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n hide = models.TextField(blank=True, null=True)\n workflow_type = models.CharField(max_length=40, blank=True, null=True)\n\n def isAuthorized(self, user):\n belong=False\n for group in self.groups.all():\n if group in user.groups.all():\n belong=True\n break\n return belong \n \n\n class Meta:\n ordering = ('id',)","sub_path":"workflow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"330478001","text":"from sys import version_info, path\nfrom os import system, environ\n\n# APPEND CAFFE TO YOUR PATH\npath.append(\"./caffe/python\") # export PYTHONPATH=\"/home/evann/dev/perso/Projet RecVis/caffe/python\":$PYTHONPATH\nenviron['GLOG_minloglevel'] = '2'\n\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nimport caffe\nfrom loadFeatures import *\nfrom CCA_search import imageToTagSearch\n\nuser_input = input if version_info[0] > 2 else raw_input\n\n\nprint(\"Loading Caffe model\")\ncaffe_root = './caffe/'\nmodel_prototxt = caffe_root + 'models/bvlc_googlenet/deploy.prototxt'\nmodel_trained = caffe_root + 'models/bvlc_googlenet/bvlc_googlenet.caffemodel'\nmean_path = caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'\nlayer_name = 'pool5/7x7_s1'\n\ncaffe.set_mode_cpu()\n\nnet = caffe.Net(model_prototxt, model_trained, caffe.TEST)\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\ntransformer.set_mean('data', np.load(mean_path).mean(1).mean(1))\ntransformer.set_transpose('data', (2, 0, 1))\ntransformer.set_channel_swap('data', (2, 1, 0)) # if using RGB instead of BGR\ntransformer.set_raw_scale('data', 255.0)\nnet.blobs['data'].reshape(1, 3, 224, 224)\n\n\ndef urlToVec(image):\n net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(image))\n output = net.forward()\n imageVec = net.blobs[layer_name].data[0].reshape(1, -1)\n return imageVec[0]\n\n\ndef loadCCA(num=1, name='COCO'):\n print(\"Loading CCA %d (%s DB)...\" % (num, name))\n load_features(name)\n imIds = get_images_id()\n W_T, W_V, phi_T, phi_V, D = np.load('Computed_CCA/CCA_{0}.npy'.format(num), encoding='latin1')\n return W_T, W_V, phi_T, phi_V, D, imIds\n\n\nW_T, W_V, phi_T, phi_V, D, imIds = loadCCA()\n\nurl = ''\nwhile(url != 'EXIT'):\n system(\"clear\")\n url = user_input(\"Image URL: \")\n\n if (url[:2] == 'DB'):\n W_T, W_V, phi_T, phi_V, D, imIds = loadCCA(int(url[2:]))\n elif (url != 'EXIT'):\n image = io.imread(url)\n imageVec = urlToVec(url)\n tags, counts = imageToTagSearch(imageVec, W_V, D, 15, phi_T, W_T, imIds)\n print('\\nCorresponding tags:')\n print(zip(tags, counts))\n plt.axis('off')\n plt.imshow(image)\n plt.show()\n","sub_path":"I2T.py","file_name":"I2T.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"222382969","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom skimage import io\nimport os\n\n\ndef convert(fname):\n image = io.imread(fname, as_grey=True)\n unique_dict = {k:i for i, k in enumerate(np.unique(image))}\n outs = [np.zeros(image.shape, dtype=np.uint8) for _ in range(len(unique_dict))]\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n channel_index = unique_dict[image[i,j]]\n outs[channel_index][i,j] = 255\n\n dname = os.path.dirname(fname)\n for i, out in enumerate(outs):\n outname = os.path.join(dname, \"class%d.tif\" % i)\n print(\"Saving file %s\" % outname)\n io.imsave(outname, out, plugin='tifffile')\n\nif __name__ == \"__main__\":\n parent = \"/Users/joshuaarnold/Documents/Papers/VU_SEM/analysis/\" \\\n \"SEM-EDX-DATA/BG2/\"\n print(os.listdir(parent))\n\n for i in range(1,13):\n fname = os.path.join(parent,\n \"soi_%s/Classified image.tif\" % str(i).zfill(3)\n )\n convert(fname)","sub_path":"semclassify/label2masks.py","file_name":"label2masks.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"299911785","text":"import calendar\nfrom datetime import date, datetime\nfrom PyQt5.QtCore import QDate\nfrom PyQt5.QtGui import QFont, QStandardItemModel, QStandardItem, QIcon\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QCheckBox, QSpacerItem, QSizePolicy, QLabel, QComboBox, QLineEdit, \\\n QPushButton, QMessageBox, QCalendarWidget, QListView, QGridLayout\nfrom appuntamentotampone.model.AppuntamentoTampone import AppuntamentoTampone\nfrom appuntamentotampone.view.VistaAppuntamentoTampone import VistaAppuntamentoTampone\n\n\nclass VistaInserisciAppuntamentoTampone(QWidget):\n\n def __init__(self, controller):\n super(VistaInserisciAppuntamentoTampone, self).__init__(parent=None)\n\n self.controller = controller\n self.info = {}\n\n self.v_layout = QVBoxLayout()\n self.get_form_entry(\"Nome*\")\n self.get_form_entry(\"Cognome*\")\n self.get_form_entry(\"Data di nascita (dd/mm/YYYY)*\")\n self.get_form_entry(\"Codice Fiscale*\")\n self.get_form_entry(\"Indirizzo*\")\n self.get_form_entry(\"Telefono*\")\n\n self.data_selezionata = \" \"\n self.orario_selected = \" \"\n self.calendar_layout = QGridLayout()\n self.calendario_appuntamento = self.init_calendario()\n\n self.calendar_layout.addWidget(QLabel(\"Data appuntamento*\"), 0, 0)\n self.calendar_layout.addWidget(self.calendario_appuntamento, 1, 0)\n\n self.calendario_appuntamento.selectionChanged.connect(self.calendar_date)\n\n self.label = QLabel('')\n self.label_orario = QLabel('')\n self.calendar_layout.addWidget(QLabel(\"Fascia oraria appuntamento*\"), 0, 1)\n\n self.list_view_orario = QListView()\n\n self.update_ui()\n\n self.calendar_layout.addWidget(self.list_view_orario, 1, 1)\n\n self.list_view_orario.selectionModel().currentChanged.connect(self.show_selected_orario)\n\n self.calendar_layout.addWidget(self.label, 2, 0)\n self.calendar_layout.addWidget(self.label_orario, 2, 1)\n self.v_layout.addLayout(self.calendar_layout)\n\n self.drive_through = QCheckBox(\"Presenta sintomi o ha avuto contatti con persone positive o è attualmente positivo\")\n\n self.v_layout.addWidget(self.drive_through)\n self.v_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))\n\n self.v_layout.addWidget(QLabel(\"Tipologia di tampone da effettuare*\"))\n self.tipo_tampone = QComboBox()\n self.tipo_tampone.addItems([\" \", \"Antigenico Rapido\", \"Molecolare\", \"Sierologico\"])\n\n self.v_layout.addWidget(self.tipo_tampone)\n self.v_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))\n\n btn_ok = QPushButton(\"OK\")\n btn_ok.clicked.connect(self.add_appuntamento)\n self.v_layout.addWidget(btn_ok)\n\n self.setLayout(self.v_layout)\n self.setWindowTitle(\"Nuovo Appuntamento\")\n self.setFont(QFont('Arial Nova Light', 12))\n self.setWindowIcon(QIcon('appuntamentovaccino/data/CovidFree_Clinica.png'))\n\n self.setMaximumSize(620, 700)\n self.resize(620, 700)\n self.move(90, 0)\n\n # Funzione che viene richiamata per la ottenere i campi da compilare.\n def get_form_entry(self, tipo):\n self.v_layout.addWidget(QLabel(tipo))\n current_text_edit = QLineEdit(self)\n self.v_layout.addWidget(current_text_edit)\n self.info[tipo] = current_text_edit\n\n # Funzione che, dopo una serie di controlli, aggiunge l'appuntamento.\n def add_appuntamento(self):\n nome = self.info[\"Nome*\"].text()\n cognome = self.info[\"Cognome*\"].text()\n data_nascita = self.info[\"Data di nascita (dd/mm/YYYY)*\"].text()\n cf = self.info[\"Codice Fiscale*\"].text()\n indirizzo = self.info[\"Indirizzo*\"].text()\n telefono = self.info[\"Telefono*\"].text()\n tipo_tampone = self.tipo_tampone.currentText()\n ok = True\n\n if nome == \"\" or cognome == \"\" or data_nascita == \"\" or cf == \"\" or indirizzo == \"\" or telefono == \"\" or tipo_tampone == ' ' or self.orario_selected == \" \" or self.data_selezionata == \" \":\n QMessageBox.critical(self, 'Errore', 'Per favore, completa tutti i campi', QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n\n if ok is True:\n try:\n data_inserita = datetime.strptime(self.info[\"Data di nascita (dd/mm/YYYY)*\"].text(), '%d/%m/%Y')\n except:\n QMessageBox.critical(self, 'Errore', 'Inserisci la data nel formato richiesto: dd/MM/yyyy',\n QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n if ok is True and date.today().year < data_inserita.year:\n QMessageBox.critical(self, 'Errore', 'La data di nascita inserita non è valida',\n QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n\n if ok is True:\n d = datetime.strptime(self.data_selezionata, '%Y-%m-%d')\n if date.today().day > d.day and date.today().month == d.month:\n QMessageBox.critical(self, 'Errore', 'La data selezionata per l\\' appuntamento non è valida',\n QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n\n if ok is True and not self.controller.get_tamponi_presenti():\n QMessageBox.critical(self, 'Errore',\n 'Siamo spiacenti, ma attualmente non c\\'è alcuna disponibilità di tamponi da poter eseguire',\n QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n\n if ok is True and not self.controllo_disponibilita():\n QMessageBox.critical(self, 'Errore', 'Siamo spiacenti, ma attualmente non è disponibile la tipologia di tampone selezionata',\n QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n\n if ok is True:\n contatore_data = 0\n contatore_ora = 0\n for appuntamento in self.controller.get_elenco_appuntamenti():\n if appuntamento.data_appuntamento == self.data_selezionata:\n contatore_data = contatore_data + 1\n if appuntamento.fascia_oraria == self.orario_selected:\n contatore_ora = contatore_ora + 1\n if contatore_data > 39:\n QMessageBox.critical(self, 'Errore', 'Siamo spiacenti, il giorno selezionata è al completo.', QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n elif contatore_ora > 3:\n QMessageBox.critical(self, 'Errore', 'Siamo spiacenti, la fascia oraria selezionata è al completo.', QMessageBox.Ok, QMessageBox.Ok)\n ok = False\n\n if ok is True:\n is_drive_through = False\n if self.drive_through.isChecked():\n is_drive_through = True\n\n appuntamento_tampone = AppuntamentoTampone(nome, cognome, cf, telefono, indirizzo, data_nascita, self.data_selezionata, self.orario_selected, is_drive_through, tipo_tampone)\n\n self.controller.aggiungi_appuntamento(appuntamento_tampone)\n self.vista_riepilogo = VistaAppuntamentoTampone(appuntamento_tampone)\n self.vista_riepilogo.show()\n self.close()\n\n # Funzione che viene utilizzata in uno dei controlli della funzione precedente per il controllo della disponibilità.\n def controllo_disponibilita(self):\n self.controller.lettura_magazzino()\n return self.controller.prenota_tampone(self.tipo_tampone.currentText())\n\n # Funzione che inizializza il calendario dell'interfaccia grafica dal quale si seleziona la data per l'appuntamento.\n def init_calendario(self):\n calendario = QCalendarWidget(self)\n currentMonth = datetime.now().month\n currentYear = datetime.now().year\n calendario.setMinimumDate(QDate(currentYear, currentMonth, 1))\n calendario.setMaximumDate(\n QDate(currentYear + 1, currentMonth, calendar.monthrange(currentYear, currentMonth)[1]))\n calendario.setSelectedDate(QDate(currentYear, currentMonth, 1))\n calendario.setStyleSheet('background-color: lightblue')\n calendario.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n calendario.setGeometry(200, 200, 300, 200)\n calendario.setGridVisible(True)\n return calendario\n\n # Funzione che crea la lista delle fasce orarie.\n def update_ui(self):\n self.list_view_orario_model = QStandardItemModel(self.list_view_orario)\n self.orari = [\"9:00-10:00\", \"10:00-11:00\", \"11:00-12:00\", \"12:00-13:00\", \"13:00-14:00\", \"14:00-15:00\",\n \"15:00-16:00\", \"16:00-17:00\", \"17:00-18:00\", \"18:00-19:00\"]\n for fascia in self.orari:\n item = QStandardItem()\n item.setText(fascia)\n item.setEditable(False)\n font = item.font()\n font.setPointSize(12)\n item.setFont(font)\n self.list_view_orario_model.appendRow(item)\n self.list_view_orario.setModel(self.list_view_orario_model)\n\n # Funzione che ritorna la data selezionata.\n def calendar_date(self):\n dateselected = self.calendario_appuntamento.selectedDate()\n self.data_selezionata = str(dateselected.toPyDate())\n self.label.setText(\"Data selezionata* : \" + self.data_selezionata)\n return self.data_selezionata\n\n # Funzione che ritorna la fascia oraria selezionata.\n def show_selected_orario(self, current):\n if self.list_view_orario.selectedIndexes():\n self.orario_selected = self.orari[current.row()]\n self.label_orario.setText(\"Fascia oraria selezionata* : \" + self.orario_selected)\n return self.orario_selected","sub_path":"calendariotamponi/view/VistaInserisciAppuntamentoTampone.py","file_name":"VistaInserisciAppuntamentoTampone.py","file_ext":"py","file_size_in_byte":9742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"67870530","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nfrom account.managers import CustomAccountManager\n\n\nclass CustomAccount(AbstractUser):\n \"\"\" Define a custom user which is used to login and store user credentials \"\"\"\n\n username = None\n email = models.EmailField(max_length=255, unique=True)\n first_name = models.CharField(max_length=255, blank=False, null=False)\n\n objects = CustomAccountManager()\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name']\n\n def __str__(self):\n return self.email\n","sub_path":"api/account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443756763","text":"if __name__ == \"__main__\":\n import pandas as pd\n data=pd.read_csv('.//data//CRDC2013_14.csv',encoding='Latin-1')\n data['total_enrollment'] = data['TOT_ENR_M'] + data['TOT_ENR_F']\n all_enrollment = data['total_enrollment'].sum()\n enrol = data.loc[:, data.columns.str.contains(r'^SCH_ENR_[A-Z]{2}_[FM]$')]\n enrol_sum = enrol.sum()\n race_gender_ratio = enrol_sum/all_enrollment\n print(race_gender_ratio)\n \n ","sub_path":"Data analysis/Exploring-data/enrollment.py","file_name":"enrollment.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"490471450","text":"import cv2\nimport numpy as np\nimport logging\n\nclass Utils(object):\n\n def __init__(self, controller):\n self.threshold = 0.2\n self.object_real_world_mm = 50\n self.controller = controller\n self.camera_calibration_data = self.controller.get_camera_calibration_data()\n self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0],\n [0.5, 0.5, 4]])\n\n def compute_object_vertices(self, img, tracked):\n start_x, start_y, end_x, end_y = tracked.target.rect\n quad_3d = np.float32([[start_x, start_y, 0], [end_x, start_y, 0],\n [end_x, end_y, 0], [start_x, end_y, 0]])\n h, w = img.shape[:2]\n K = np.float64([[w, 0, 0.5 * (w - 1)],\n [0, w, 0.5 * (h - 1)],\n [0, 0, 1.0]])\n dist_coef = np.zeros(4)\n ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)\n verts = self.overlay_vertices * [(end_x - start_x), (end_y - start_y),\n -(end_x - start_x) * 0.3] + (start_x, start_y, 0)\n verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)\n\n verts_floor = np.int32(verts).reshape(-1, 2)\n return (verts, verts_floor)\n\n def draw_object(self, img, object_vertices, texts):\n (start_x, start_y), (end_x, end_y) = object_vertices[0], object_vertices[2]\n cv2.rectangle(img, (start_x, start_y), (end_x, end_y), (255, 255, 255), cv2.FILLED, 8, 0);\n font = cv2.FONT_HERSHEY_SIMPLEX\n deficit = 15\n for text in texts:\n cv2.putText(img, text, (start_x + 10, start_y + deficit), font, 0.35, (0, 0, 0), 1, cv2.LINE_AA)\n deficit = deficit+10\n\n def calculate_distance_to_object(self, real_world_template_size_mm, image_size):\n if self.camera_calibration_data is None:\n self.camera_calibration_data = self.controller.get_camera_calibration_data()\n\n distance_mm = 0\n\n if self.camera_calibration_data is None:\n return distance_mm\n\n x = (self.camera_calibration_data.m * self.camera_calibration_data.width) / self.camera_calibration_data.width\n size_object_on_image_sensor = image_size / x\n\n if size_object_on_image_sensor != 0:\n distance_mm = real_world_template_size_mm * self.camera_calibration_data.focal_length / size_object_on_image_sensor\n return distance_mm\n\n def get_canny_edge_image(self, image):\n\n if image is None:\n return\n try:\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n if gray is not None:\n gaussian_blur = cv2.GaussianBlur(gray, (5, 5), 0)\n canny_image = cv2.Canny(gaussian_blur, 100, 200)\n return canny_image\n except Exception as ex:\n logging.exception(\"Something went wrong\")\n return None\n return None\n\n def calculate_all_contour_area(self, im, contours):\n contourArea = 0;\n\n for contour in contours:\n contourArea = contourArea + cv2.contourArea(contour)\n\n if len(contours) > 0:\n contourArea = contourArea / len(contours)\n\n return contourArea\n\n def calculate_all_contour_perimeter(self, contours):\n contourPerimeter = 0;\n for contour in contours:\n contourPerimeter = contourPerimeter +cv2.arcLength(contour,True)\n\n if len(contours) > 0:\n contourPerimeter = contourPerimeter / len(contours)\n return contourPerimeter","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"601411156","text":"import os\nimport random\n\nimport requests\nfrom flask import Flask, render_template, request\n\nfrom MemeGenerator.meme_engine import MemeEngine\nfrom Ingestor import Ingestor\n\napp = Flask(__name__, static_folder='./tmp')\n\nmeme = MemeEngine('./tmp/static')\n\n\ndef setup():\n \"\"\" Load all resources \"\"\"\n\n quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt',\n './_data/DogQuotes/DogQuotesDOCX.docx',\n './_data/DogQuotes/DogQuotesPDF.pdf',\n './_data/DogQuotes/DogQuotesCSV.csv']\n\n # quote_files variable\n quotes = []\n\n for values in quote_files:\n quotes = Ingestor.parse(values)\n\n images_path = \"./_data/photos/dog/\"\n\n # images within the images images_path directory\n imgs = os.listdir(images_path)\n return quotes, imgs\n\n\nquotes, imgs = setup()\n\n\n@app.route('/')\ndef meme_rand():\n \"\"\" Generate a random meme \"\"\"\n\n # Use the random python standard library class to:\n # 1. select a random image from imgs array\n # 2. select a random quote from the quotes array\n\n img = os.getcwd() + \"\\\\_data\\\\photos\\\\dog\\\\\" + random.choice(imgs)\n quote = random.choice(quotes)\n path = meme.make_meme(img, quote.body, quote.author)\n return render_template('meme.html', path=path)\n\n\n@app.route('/create', methods=['GET'])\ndef meme_form():\n \"\"\" User input for meme information \"\"\"\n return render_template('meme_form.html')\n\n\n@app.route('/create', methods=['POST'])\ndef meme_post():\n \"\"\" Create a user defined meme \"\"\"\n\n # 1. Use requests to save the image from the image_url\n # form param to a temp local file.\n # 2. Use the meme object to generate a meme using this temp\n # file and the body and author form paramaters.\n # 3. Remove the temporary saved image.\n\n image_url = request.form['image_url']\n body = request.form['body']\n author = request.form['author']\n r = requests.get(image_url, allow_redirects=True)\n tmp = f'./tmp/{random.randint(0, 100000000)}.png'\n imz = open(tmp, 'wb').write(r.content)\n\n path = meme.make_meme(tmp, body, author)\n return render_template('meme.html', path=path)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"516840921","text":"from ...common.decorators import *\nfrom ...common.errors import *\nfrom enum import IntEnum\nfrom .data_element import DataElement\nfrom decimal import *\nimport json\nimport dateutil.parser\n\nclass InActionQueryValue:\n def __init__(self, value=None, datatype=None, textsearch=False, position=None):\n if type(value) == DataElement:\n self.dataelement = value\n else:\n self.dataelement = DataElement(value=value, datatype=datatype)\n self.textsearch = textsearch\n self.position = position\n\n @staticmethod\n def from_dict(dct):\n val = None\n try:\n de = DataElement.from_dict(dct)\n val = InActionQueryValue(value=de,\n textsearch=get(dct, 'textsearch', false),\n position=get(dct, 'position', None))\n except Exception as e:\n raise DeserializationError(InstanceElement, 'dict', dct) from e\n return val\n\n def to_dict(self):\n dct = {}\n try:\n dct = self.dataelement.to_dict()\n dct['textsearch'] = self.textsearch\n if not self.position is None:\n dct['position'] = self.position\n except Exception as e:\n raise SerializationError(InstanceElement, 'dict') from e\n return dct\n\n\nclass InActionQueryResult(IntEnum):\n INSTANCES = 1\n ACTIONS = 2\n TOPOLOGIES = 4\n\n\nclass InActionQuery:\n def __init__(self, and_set=None, or_set=None, not_set=None, action_filter=None, topology_filter=None, query_results=InActionQueryResult.INSTANCES):\n self.and_set = and_set\n self.or_set = or_set\n self.not_set = not_set\n self.action_filter = action_filter\n self.topology_filter = topology_filter\n self.query_results = query_results\n\n @staticmethod\n def from_dict(dct):\n retval = None\n try:\n if type(dct).__name__ == 'list':\n retval = []\n for d in dct:\n retval.append(InActionQuery.from_dct(d))\n else:\n and_set = list(map(InActionQueryValue.from_dict, dct['and'])) if 'and' in dct else None\n or_set = list(map(InActionQueryValue.from_dict, dct['or'])) if 'or' in dct else None\n not_set = list(map(InActionQueryValue.from_dict, dct['not'])) if 'not' in dct else None\n\n retval = InActionQuery(and_set, or_set, not_set,\n dct.get('action_id', None),\n dct.get('toplogy_id', None))\n except Exception as e:\n raise DeserializationError(InActionQuery, 'dict', dct) from e\n return retval\n\n\n @staticmethod\n def from_json(json_string):\n query = None\n try:\n data = json.loads(json_string)\n query = InActionQuery.from_dict(data)\n except Exception as e:\n raise DeserializationError(InActionQuery, 'json', json_string) from e\n return query\n\n def to_dict(self):\n dct = None\n try:\n dct = {}\n if self.and_set is not None:\n dct['and'] = list(map(lambda o: o.to_dict(), self.and_set))\n if self.or_set is not None:\n dct['or'] = list(map(lambda o: o.to_dict(), self.or_set))\n if self.not_set is not None:\n dct['not'] = list(map(lambda o: o.to_dict(), self.not_set))\n if self.action_filter is not None:\n dct['action_id'] = self.action_filter\n if self.topology_filter is not None:\n dct['topology_id'] = self.topology_filter\n except Exception as e:\n raise SerializationError(InActionQuery, 'dict') from e\n return dct\n\n def to_json(self):\n json_string = \"\"\n try:\n dct = self.to_dict()\n json_string = json.dumps(dct)\n except Exception as e:\n raise SerializationError(InActionQuery, 'json') from e\n return json_string\n\n","sub_path":"dyna/dynizer/types/in_action_query.py","file_name":"in_action_query.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70205156","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#\t wymypy.py\n#\n#\t Copyright 2007 Marc Lentz \n#\n#\t This program is free software; you can redistribute it and/or modify\n#\t it under the terms of the GNU General Public License as published by\n#\t the Free Software Foundation; either version 2 of the License, or\n#\t (at your option) any later version.\n#\n#\t This program is distributed in the hope that it will be useful,\n#\t but WITHOUT ANY WARRANTY; without even the implied warranty of\n#\t MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\t See the\n#\t GNU General Public License for more details.\n#\n#\t You should have received a copy of the GNU General Public License\n#\t along with this program; if not, write to the Free Software\n#\t Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\nimport config\nfrom plugins import wPlugin\n\nclass Player(wPlugin):\t \n\tdef ajax_player(self, isForced=0):\n\t\tyield \"[[zonePlayer]]\"\n\n\t\tstat = self.mpd.status()\n\t\t#~ for i in\t ['elapsedTime', 'playlist', 'playlistLength', 'random', 'repeat', 'song', 'state', 'stateStr', 'totalTime', 'volume']:\n\t\t #~ print i, getattr(stat,i)\n\n\t\tif not stat:\n\t\t\tself.mpd.stop()\n\t\t\tyield \"Error : Can't play that!\"\n\n\t\t\tclass stat:\n\t\t\t\tstate = 0\n\t\telse:\n\t\t\tif stat.state in (2, 3): # in play/pause\n\t\t\t\t# aff title\n\t\t\t\ts = self.mpd.getCurrentSong()\n\t\t\t\tif s.path.lower().startswith(\"http://\"):\n\t\t\t\t\t# radio\n\t\t\t\t\tyield \"[Stream] \"\n\t\t\t\t\tyield s.title and s.title or \"playing ...\"\n\t\t\t\telse:\n\t\t\t\t\t# file\n\t\t\t\t\tyield self.mpd.display(s, config.TAG_FORMAT)\n\t\t\t\tyield \"
\"\n\t\t\t\t\n\t\t\t\t# aff position\n\t\t\t\tds = lambda t: \"%02d:%02d\" % (t / 60, t % 60)\n\t\t\t\ts, t, p = self.mpd.getSongPosition()\n\t\t\t\tyield \"\"\"\n\t\t\t\t \n\t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t%d %% - %s/%s\n\t\t\t\t\t
\"\"\" % (int(p * 2), int(p), ds(s), ds(t))\n\n\t\tyield \"\"\"\n\t\t\n\t\t\"\"\"\n\t\tif stat.state != 2:\n\t\t\tyield \"\"\" \"\"\"\n\t\telse:\n\t\t\tyield \"\"\" \"\"\"\n\t\tif stat.state != 1:\n\t\t\tyield \"\"\" \"\"\"\n\t\tyield \"\"\"\n\t\t\n\t\t\"\"\"\n\n\t\tif stat.state != 0:\n\t\t\tyield \"\"\"\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\"\"\"\n\t\t\tyield str(stat.volume)\n\t\t\tyield \"%\"\n\n\t\tif isForced or self.mpd.needRedrawPlaylist():\n\t\t\tidx, tot = self.mpd.getPlaylistPosition()\n\t\t\tyield \"[[zonePlayList]]\"\n\t\t\tyield \"\"\"\n\t\t\t

Playlist (%d)\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t

\n\t\t\t\"\"\" % tot\n\n\t\t\tl = self.mpd.playlist()\n\t\t\tfor s in l:\n\t\t\t\ti = l.index(s)\n\n\t\t\t\tif i + 1 == idx:\n\t\t\t\t\tclasse = \" class='s'\"\n\t\t\t\telse:\n\t\t\t\t\tclasse = i % 2 == 0 and \" class='p'\" or ''\n\n\t\t\t\tif s.path.lower().startswith(\"http://\"):\n\t\t\t\t\ttitle = s.path\n\t\t\t\telse:\n\t\t\t\t\ttitle = self.mpd.display(s, config.TAG_FORMAT)\n\n\t\t\t\tyield \"\" % classe\n\t\t\t\tyield \"%03d\" % (i + 1)\n\t\t\t\tyield \"\"\"X\"\"\"\n\t\t\t\tyield \"\"\"\"\"\" + title + \"\"\"\"\"\"\n\t\t\t\tyield \"\"\n\t\n\t\n\tdef ajax_ope(self, op, idx=None):\n\t\tif op == \"play\":\n\t\t\tif idx:\n\t\t\t\tself.mpd.play(int(idx))\n\t\t\telse:\n\t\t\t\tself.mpd.play()\n\t\telif op == \"delete\":\n\t\t\tself.mpd.delete([int(idx), ])\n\t\telif op == \"next\":\n\t\t\tself.mpd.next()\n\t\telif op == \"prev\":\n\t\t\tself.mpd.prev()\n\t\telif op == \"play\":\n\t\t\tself.mpd.play()\n\t\telif op == \"pause\":\n\t\t\tself.mpd.pause()\n\t\telif op == \"playpause\":\n\t\t\tstat = self.mpd.status()\n\t\t\tif stat.state != 2:\n\t\t\t\tself.mpd.play()\n\t\t\telse:\n\t\t\t\tself.mpd.pause()\n\t\telif op == \"stop\":\n\t\t\tself.mpd.stop()\n\t\telif op == \"clear\":\n\t\t\tself.mpd.clear()\n\t\telif op == \"clear_old\":\n\t\t\tidx, tot = self.mpd.getPlaylistPosition()\n\t\t\tself.mpd.delete([[0, max(0, idx-2)]])\n\t\telif op == \"shuffle\":\n\t\t\tself.mpd.shuffleIt()\n\t\telif op == \"seek\":\n\t\t\tself.mpd.seek(percent=int(idx))\n\t\telif op == \"volup\":\n\t\t\tself.mpd.volumeUp()\n\t\telif op == \"voldown\":\n\t\t\tself.mpd.volumeDown()\n\t\telif op == \"mute\":\n\t\t\tself.mpd.mute()\n\t\telif op == \"changeDisplay\":\n\t\t\tself.mpd.changeDisplay(int(idx))\n\t\telse:\n\t\t\traise \"ERROR:\" + op + \",\" + str(idx)\n\t\treturn self.ajax_player()","sub_path":"wymypy/plugins/player/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"394557223","text":"def uploadFormData(request, session_id, auth_token):\n \n logr.info(\"Incoming request to upload form-media files by user:%s\"%(request.user))\n resp = {}\n\n if request.method != 'POST':\n return _return_json_error(\"Only POST method supported\")\n\n # Fetch the candidate from the request.user\n try:\n candidate = Candidate.objects.get(user_id = request.user.id)\n except:\n return _return_json_error(\"Not Authenticated!\")\n\n # Check for valid session\n try:\n appSession = ApplicationSession.objects.get(id = session_id) \n except Exception:\n return _return_json_error(\"Candidate Application Session doesn't exist!\")\n\n # Check the session_id belongs to same candidate.\n if appSession.applicant.candidate != candidate:\n return _return_json_error(\"Invalid Candidate Application Flow!\")\n\n # Check the status of appSession -- allow upload only if the status is open\n if appSession.status != \"open\":\n return _return_json_error(\"Candidate Application Flow is closed!\")\n\n # Make a token out of the candidate_id, session_id and token. \n expected_auth_token = _make_media_token(session_id, candidate.id)\n\n # Verify with the token and save the file.\n if expected_auth_token == auth_token:\n f = request.FILES['file']\n split_name = f.name.split('.') #To distinguish between the '.' in filename and extension if any, last one will be the extension\n extension = split_name[-1]\n\n _map_extensions = {\n \"mp3\": \"audio\",\n \"wav\": \"audio\",\n \"mp4\": \"video\",\n \"webm\": \"video\",\n \"png\": 'photo',\n \"jpg\": 'photo',\n \"jpeg\": 'photo',\n }\n\n # Fetch the candidate profile to save the filenames. \n profile = Profile.objects.get(candidate_id = candidate.id)\n\n # Build the filename based on the extension\n if _map_extensions[extension] == 'photo':\n filename = 'candidate_%d.%s'%(candidate.id, extension)\n profile.photo_filename = filename\n if _map_extensions[extension] == 'audio':\n filename = 'audio_resume_%d.%s'%(candidate.id, extension)\n profile.audio_profile = filename\n elif _map_extensions[extension] == 'video':\n filename = 'video_resume_%d.%s'%(candidate.id, extension)\n profile.video_profile = filename\n\n path = '/content/candidates/resume/%s'%filename\n o_file = open(path, 'w')\n\n for chunk in f.chunks():\n o_file.write(chunk)\n o_file.close()\n profile.save()\n\n resp[\"status\"] = \"success\"\n resp[\"path\"] = path.replace('/content', './images/content/s3content')\n\n else:\n resp[\"status\"] = \"failed\"\n resp[\"errStr\"] = \"Auth Token Mismatch\"\n\n return JsonResponse(resp)\n# End of UploadFormData()\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642517946","text":"import pygame, sys\nfrom pygame.locals import *\nfrom config import *\nfrom colors import *\nimport game\n\npygame.init()\n\ndisplay = pygame.display.set_mode(WINDOW_SIZE)\npygame.display.set_caption(WINDOW_NAME)\nclock = pygame.time.Clock()\n\nmouse_x = 0\nmouse_y = 0\nmouse_clicked = False\ngame_board = game.generate_board(WINDOW_SIZE[0], WINDOW_SIZE[1])\n\nwhile True:\n display.fill(THEME_BG)\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n (mouse_x, mouse_y) = event.pos\n elif event.type == MOUSEBUTTONUP:\n (mouse_x, mouse_y) = event.pos\n mouse_clicked = True\n\n for card in game_board:\n card.render(display)\n\n pygame.draw.circle(display, RED, (mouse_x, mouse_y), 10, 2)\n\n pygame.display.update()\n clock.tick(FPS)","sub_path":"chapter-3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"551740919","text":"import tensorflow as tf\nimport tensorflow.keras as k\n\ndef lrelu(x):\n return tf.keras.activations.relu(x, alpha=0.1)\n\ndef keras_squeeze_module(incoming_layer, sx, ex1, ex2, p, sc, ec):\n layer_1 = k.layers.Conv3D(sc, sx, padding='SAME', strides=1, activation=lrelu, name=\"s1\" + str(p))(incoming_layer)\n layer_2 = k.layers.Conv3D(ec, ex1, padding='SAME', strides=1, activation=lrelu, name=\"s2\" + str(p))(layer_1)\n # layer_3 = k.layers.Conv3D(ec, ex2, padding='SAME', strides=1, activation=lrelu, name=\"s3\" + str(p))(layer_1)\n return layer_2\n\ndef kerasVoxelExtractor(im):\n layer_1 = k.layers.Conv3D(64, 3, strides=2, activation=lrelu,name='convfirst')(im)\n layer_2 = keras_squeeze_module(layer_1, 1, 3, 3, 1, 16, 64)\n ll = k.layers.BatchNormalization(name='bbn0')(layer_2)\n layer_3 = keras_squeeze_module(ll, 1, 3, 3, 2, 16, 64)\n layer_4 = keras_squeeze_module(layer_3, 1, 3, 3, 3, 32, 96)\n ll = k.layers.BatchNormalization(name='bbn0.2')(layer_4)\n layer_5 = k.layers.MaxPool3D(2, 2, 'VALID',name='maxps')(ll)\n layer_6 = keras_squeeze_module(layer_5, 1, 3, 3, 4, 32, 128)\n layer_7 = keras_squeeze_module(layer_6, 1, 3, 3, 5, 48, 128)\n ll = k.layers.BatchNormalization(name='bbn1')(layer_7)\n layer_8 = keras_squeeze_module(ll, 1, 3, 3, 6, 48, 192)\n layer_9 = keras_squeeze_module(layer_8, 1, 3, 3, 7, 64, 128)\n ll = k.layers.BatchNormalization(name='bbn1.2')(layer_9)\n layer_10 = k.layers.AveragePooling3D(2, 2, 'VALID', name='avgps')(ll)\n return k.layers.Flatten(name='flat')(layer_10)\n","sub_path":"rldock/voxel_policy/utils_tf2.py","file_name":"utils_tf2.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"391570450","text":"targets = input().split()\ntargets = [int(i) for i in targets]\ncommand = input()\n\nwhile not command == \"End\":\n action, index, value = command.split()\n index = int(index)\n value = int(value)\n if action == \"Shoot\":\n if 0 <= index < len(targets):\n targets[index] -= value\n else:\n command = input()\n continue\n if targets[index] <= 0:\n targets.pop(index)\n elif action == \"Add\":\n if index >= len(targets) or index < 0:\n print(f\"Invalid placement!\")\n else:\n targets.insert(index, value)\n elif action == \"Strike\":\n if len(targets) < index + value or index < value:\n print(f\"Strike missed!\")\n command = input()\n continue\n else:\n del targets[index-value:index+value+1]\n command = input()\ntargets = [str(i) for i in targets]\ntargets_as_str = \"|\".join(targets)\nprint(targets_as_str)","sub_path":"list,advanced/Moving Target.py","file_name":"Moving Target.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"116751051","text":"#!/usr/bin/env python\nimport roslib; roslib.load_manifest('cob_voltage_control')\nimport rospy\nimport time\nimport csv\nfrom std_msgs.msg import Float64\n#from pr2_msgs.msg import PowerState\n\n#starttime = 1\n\ndef callback(data):\n\twriter.writerow( ( round((rospy.Time.now() - starttime).to_sec(),5), data.data) )\n\n#def timer_callback(data):\n#\trospy.loginfo(data.data)\n\ndef record():\n\trospy.init_node('record_voltage')\n\tglobal starttime\n\tstarttime = rospy.Time.now()\n\n\tglobal f\n\tglobal writer\n\tfilename = rospy.get_param(\"/record_voltage/filename\")\n\tf = open(filename, 'wt', 1)\n\twriter = csv.writer(f)\n\n\trospy.Subscriber(\"/power_board/voltage\", Float64, callback)\n\t\n\n\twhile not rospy.is_shutdown():\n\t\t#pub_em_stop.publish(msg_em)\n\t\t#pub_power.publish(msg_power) comes already out of gazebo\n\t\trospy.sleep(1.0)\n\nif __name__ == '__main__':\n\trecord()\n\n","sub_path":"cob_voltage_control/ros/src/record_voltage.py","file_name":"record_voltage.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"578168624","text":"import datetime\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout\nfrom django.shortcuts import redirect\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\n\nfrom saleboxdjango.lib.basket import SaleboxBasket\n\n\nclass SaleboxMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # kick out inactive (i.e. banned) users\n if request.user.is_authenticated and not request.user.is_active:\n request.session['saleboxbasket'] = None\n logout(request)\n return redirect('/')\n\n # init shopping basket\n sb = SaleboxBasket(request)\n\n # set product_list_order\n request.session.setdefault(\n 'product_list_order',\n settings.SALEBOX['SESSION']['DEFAULT_PRODUCT_LIST_ORDER']\n )\n if 'product_list_order' in request.GET:\n valid_orders = [\n 'bestseller_low_to_high',\n 'bestseller_high_to_low',\n 'price_low_to_high',\n 'price_high_to_low',\n 'rating_high_to_low',\n 'rating_low_to_high',\n ]\n if request.GET['product_list_order'] in valid_orders:\n request.session['product_list_order'] = request.GET['product_list_order']\n if re.search(r'\\d+\\/$', request.path):\n return redirect(re.sub(r'\\d+\\/$', '', request.path))\n\n # create response\n response = self.get_response(request)\n if sb.get_cookie_action(request) == 'add':\n response.set_cookie(\n 'psessionid',\n value=request.session.session_key,\n max_age=60 * 60 * 24 * 365\n )\n elif sb.get_cookie_action(request) == 'remove':\n response.delete_cookie('psessionid')\n return response\n\n\n\"\"\"\nThis middleware sets whatever language is in the URL, e.g. /en/about-us = 'en'\nand stores it in the session[LANGUAGE_SESSION_KEY]. This means django\ncan 'remember' the language of the non-language specific URLs, e.g. /basket/\n\"\"\"\nclass SaleboxI18NSessionStoreMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n available_languages = [l[0].lower() for l in settings.LANGUAGES]\n key = getattr(settings, 'LANGUAGE_SESSION_KEY', LANGUAGE_SESSION_KEY)\n prev_language = request.session.get(key, None)\n curr_language = prev_language\n\n # set a default language if none exists\n if curr_language is None:\n curr_language = available_languages[0]\n\n # attempt to set the language from the path\n try:\n prefix = request.path.lower().strip('/').split('/')[0]\n if prefix in available_languages:\n curr_language = prefix\n except:\n pass\n\n # update the session if applicable\n if prev_language != curr_language:\n request.session[key] = curr_language\n\n # create response\n return self.get_response(request)\n","sub_path":"saleboxdjango/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"55341009","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nReceives a POST request to ingest a bag giving the URL and publishes the\narchive event to an SNS topic.\n\nQuoting from RFC 002 at commit ea310c1 on master:\n\n POST /ingests\n Content-Type: application/json\n\n {\n \"type\": \"Ingest\",\n \"ingestType\": {\n \"id\": \"create\",\n \"type\": \"IngestType\"\n },\n \"uploadUrl\": \"s3://source-bucket/source-path/source-bag.zip\",\n \"callbackUrl\": \"https://workflow.wellcomecollection.org/callback?id=b1234567\",\n }\n\n Response:\n\n 202 ACCEPTED\n\n\"\"\"\n\nimport os\nimport uuid\n\nimport boto3\nimport daiquiri\n\nfrom urllib.parse import urlparse\nfrom wellcome_aws_utils.lambda_utils import log_on_error\nfrom wellcome_aws_utils.sns_utils import publish_sns_message\n\ndaiquiri.setup(level=os.environ.get('LOG_LEVEL', 'INFO'))\nlogger = daiquiri.getLogger()\n\n\ndef post_ingest_request(event, sns_client, topic_arn):\n request = event['body']\n path = event.get('path', '')\n\n try:\n upload_url = request['uploadUrl']\n callback_url = request.get('callbackUrl', None)\n except TypeError:\n raise TypeError(f\"[BadRequest] Invalid request not json: {request}\")\n except KeyError as keyError:\n raise KeyError(f\"[BadRequest] Invalid request missing '{keyError.args[0]}' in {request}\")\n\n ingest_request_id = str(uuid.uuid4())\n logger.debug('ingest_request_id: %r', ingest_request_id)\n\n message = archive_bag_message(ingest_request_id, upload_url, callback_url)\n logger.debug(\"sns-message: %r\", message)\n\n topic_name = topic_arn.split(\":\")[-1]\n\n publish_sns_message(\n sns_client=sns_client,\n topic_arn=topic_arn,\n message=message,\n subject=f\"source: archive_ingest ({topic_name})\"\n )\n logger.debug(\"published: %r to %r\", message, topic_arn)\n\n return {\n 'id': ingest_request_id,\n 'location': join_url((path, ingest_request_id))\n }\n\n\ndef archive_bag_message(archive_request_id, bag_url, callback_url):\n \"\"\"\n Generates bag archive messages.\n \"\"\"\n url = urlparse(bag_url)\n if url.scheme == 's3':\n bucket = url.netloc\n key = url.path.lstrip('/')\n msg = {\n 'archiveRequestId': archive_request_id,\n 'bagLocation': {\n 'namespace': bucket,\n 'key': key\n }\n }\n if callback_url:\n msg['callbackUrl'] = callback_url\n return msg\n else:\n raise ValueError(f\"[BadRequest] Unrecognised url scheme: {bag_url}\")\n\n\ndef join_url(path_segments):\n return '/' + '/'.join(path_segment.strip('/') for path_segment in path_segments)\n\n\n@log_on_error\ndef main(event, context=None, sns_client=None):\n logger.debug('received %r', event)\n\n request_method = event['request_method']\n if request_method != 'POST':\n raise ValueError(\n 'Expected request_method=POST, got %r' % request_method\n )\n\n topic_arn = os.environ['TOPIC_ARN']\n sns_client = sns_client or boto3.client('sns')\n\n return post_ingest_request(\n event,\n sns_client=sns_client,\n topic_arn=topic_arn\n )\n","sub_path":"archive/archive_request_ingest/src/archive_request_ingest.py","file_name":"archive_request_ingest.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"331347702","text":"from board import *\nfrom pieces import *\n\nclass Player(object):\n \"\"\"docstring for Player.\"\"\"\n\n def __init__(self, color, board, model=None, training=None):\n super(Player, self).__init__()\n self.model = model\n self.color = color\n self.color_name = [\"white\",\"black\"][color]\n self.board = board\n if not model:\n self.human = True\n else:\n self.human = False\n if self.human:\n self.name = input(\"What is your name, {} player?\".format(self.color_name))\n else:\n self.name = str(model)\n self.training = training\n\n\n\n def model_move(self, all_possible_moves):\n possible_moves_tensor = torch.zeros(NUM_ALL_MOVES, dtype=torch.uint8)\n for piece, old_pos, new_pos in all_possible_moves:\n move_num = all_moves_map[(old_pos, new_pos)]\n possible_moves_tensor[move_num] = 1\n #return random.sample(all_possible_moves,1)[0]\n boardstate = self.board.to_model_input()\n all_info = torch.cat((boardstate,possible_moves_tensor))\n return self.model(all_info)\n\n def players_move(self, all_possible_moves):\n '''Accepts input in the form \"x1,y1.x2,y2\", where x1,y1 is the old pos\n and x2,y2 is the new pos'''\n # TODO: ADD Castling Support\n move_map = {\"{},{}.{},{}\".format(m[1][0],m[1][1],m[2][0],m[2][1]):m for m in all_possible_moves}\n limbo = True\n while limbo:\n move_input = input('Choose a move!')\n if move_map.get(move_input):\n limbo = False\n return move_map.get(move_input)\n else:\n print(\"No move found. Select from the list:\")\n pprint(list(move_map.keys()))\n\n def _get_training_move(self,all_possible_moves):\n raise NotImplementedError\n\n def get_enemy_king(self):\n return [self.board.black_king,self.board.white_king][self.color]\n\n def get_next_move(self):\n # self.board.print()\n enemy_king = self.get_enemy_king()\n enemy_king.in_check = False\n\n all_possible_moves = set()\n for row in self.board.board:\n for s in row:\n if s and s.color == self.color:\n s.update_moves()\n for m in s.moves:\n all_possible_moves.add((s, (s.x,s.y), m))\n for a in s.attacks:\n all_possible_moves.add((s, (s.x,s.y), a))\n if a == (enemy_king.x,enemy_king.y):\n enemy_king.in_check = True\n\n if self.board.is_castling_legal(self.color, 'Kingside'):\n all_possible_moves.add('O-O')\n if self.board.is_castling_legal(self.color, 'Queenside'):\n all_possible_moves.add('O-O-O')\n\n if self.training:\n return self._get_training_move(all_possible_moves)\n if self.human:\n return self.players_move(all_possible_moves)\n else:\n return self.model_move(all_possible_moves)\n\nclass TrainingPlayer(Player):\n \"\"\"docstring for TrainingPlayer.\"\"\"\n\n def __init__(self, color, board, winner, move_gen):\n super(TrainingPlayer, self).__init__(color, board, model=\"Virtual\", training=True)\n self.move_gen = move_gen\n if winner==color:\n self.learn = True\n else:\n self.learn = False\n\n def _get_training_move(self,all_possible_moves):\n possible_moves_tensor = torch.zeros(NUM_ALL_MOVES, dtype=torch.uint8)\n for move in all_possible_moves:\n if type(move) == 'str':\n move_num = all_moves_map[move]\n possible_moves_tensor[move_num] = 1\n else:\n piece, old_pos, new_pos = move\n move_num = all_moves_map[(old_pos, new_pos)]\n possible_moves_tensor[move_num] = 1\n boardstate = self.board.to_model_input()\n #all_info = torch.cat((boardstate,possible_moves_tensor))\n all_info = boardstate\n\n try:\n new_move = next(self.move_gen)\n except StopIteration:\n return None\n\n if type(new_move) == str:\n move_for_production = new_move\n else:\n move_for_production = (self.board.get(new_move[0]), new_move[0], new_move[1])\n\n if self.learn:\n new_move_tensor = torch.zeros(NUM_ALL_MOVES)\n move_num = all_moves_map[new_move]\n new_move_tensor[move_num] = 1\n training_examples.append((all_info,new_move_tensor))\n\n return move_for_production\n\nclass Game(object):\n \"\"\"docstring for Game.\"\"\"\n\n def __init__(self, interactive=False, models=None, training_winner=-1):\n super(Game, self).__init__()\n self.interactive = interactive\n self.b = Board()\n self.models = models\n self.ongoing = True\n self.turn_number = 1\n self.player_turn = 0\n self.training_winner = training_winner\n\n # INIT players\n self.player1 = None\n self.player2 = None\n if training_winner == -1:\n if type(models) == list:\n num_models = len(models)\n if num_models == 1:\n self.model = models[0]\n if interactive:\n model = models[0]\n else:\n self.model = models\n num_models = 1\n if interactive:\n while not self.player1:\n player_color = input(\"Which color will you be? (w/b)\")\n if player_color.lower().startswith('w'):\n self.player1 = Player(0, self.b)\n self.player2 = Player(1, self.b, self.model)\n elif player_color.lower().startswith('b'):\n self.player1 = Player(0, self.b, self.model)\n self.player2 = Player(1, self.b)\n else:\n print(\"I didn't understand that.\")\n else:\n if num_models > 2:\n print(\"Too many models were included! All but the first two will be discarded.\")\n elif num_models == 2:\n self.player1 = Player(0, self.b, models[0])\n self.player2 = Player(1, self.b, models[1])\n elif num_models == 1:\n self.player1 = Player(0, self.b, self.model)\n self.player2 = Player(1, self.b, self.model)\n else:\n self.player1 = TrainingPlayer(0, self.b, self.training_winner, models[0])\n self.player2 = TrainingPlayer(1, self.b, self.training_winner, models[1])\n\n #INIT boardstate\n for (piece, color, pos) in starting_board:\n piece(color, self.b, pos)\n\n def apply(self, move):\n try:\n if type(move) == 'str':\n if move in ['O-O','0-0']:\n self.b.castle(self.player_turn, 'Kingside')\n elif move in ['O-O','0-0']:\n self.board.castle(self.player_turn, 'Queenside')\n else:\n piece, old_pos, new_pos = move\n # if self.interactive:\n # pprint(piece.__dict__)\n piece._move_to(new_pos)\n except:\n self.b.print()\n print(move)\n print(\"Turn {}{}\".format(self.turn_number,['a','b'][self.player_turn]))\n pprint(move[0].__dict__)\n\n def start(self):\n while self.ongoing:\n if self.interactive:\n print()\n self.b.print()\n print(\"Turn {}{}\".format(self.turn_number,['a','b'][self.player_turn]))\n current_player = [self.player1,self.player2][self.player_turn]\n\n next_move = current_player.get_next_move()\n if not next_move:\n self.ongoing = False\n break\n\n self.apply(next_move)\n\n if self.b.won:\n self.ongoing = False\n\n if self.ongoing:\n self.player_turn = 1 - self.player_turn\n if self.player_turn == 0:\n self.turn_number += 1\n if self.training_winner == -1:\n print(\"{} is the winner!\".format([self.player1.name,self.player2.name][self.player_turn]))\n\n\nif __name__ == '__main__':\n g = Game(True, ['a'])\n g.start()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"198755242","text":"#!/usr/bin/env python3\nimport re\nimport math\nfrom collections import defaultdict, Counter\n\ndef process(message):\n nonAlpha = re.compile('[^a-z\\s]')\n message = message.lower()\n message = message.replace('\\n', ' ')\n message = nonAlpha.sub('', message)\n\n parsedList = []\n for word in message.split(' '):\n if len(word) > 2:\n parsedList.append(word)\n \n #extract tripls\n triplesList = defaultdict(int)\n for word in parsedList:\n\n for i in range(len(word)):\n if i + 3 > len(word):\n break\n try:\n triplesList[word[i:i+3]] += 1\n except KeyError:\n triplesList[word[i:i+3]] = 1\n \n triplesList = Counter(triplesList)\n return triplesList\n\n\ndef calculate(triples):\n H_P3 = 0\n total = sum(triples.values())\n for trigram, val in triples.items():\n probability = val / total * math.log((val/total), 10)\n H_P3 += probability\n\n print(-1*H_P3)\n\n\ndef main():\n infile = \"infile.txt\"\n message = \"\"\n with open(infile, 'r') as f:\n message = f.read()\n \n calculate(process(message))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CS5602_Crpytography/Final/FinalZip/ANDERSONQ3.py","file_name":"ANDERSONQ3.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"409889215","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport cx_Oracle\nfrom scrapy_splash import SplashRequest\n\n\nclass TvsiFinancialRatioSpider(scrapy.Spider):\n name = 'tvsi_financial_ratio'\n download_delay = 1\n allowed_domains = ['finance.tvsi.com.vn']\n\n def start_requests(self):\n\n con = cx_Oracle.connect('stock/password1@127.0.0.1/xe')\n cur = con.cursor()\n\n stockyears = ['2018', '2017', '2016', '2015', '2014', '2013', '2012', '2011', '2010', '2009', '2008']\n #stockyears = ['2013', '2012', '2011', '2010', '2009', '2008']\n\n runindustry = 'Building, Construction'\n\n stockperiod = '1'\n\n sqlstatement = 'select stock_code from tvsi_stock_entity where stock_industry in (' + \"'\" + runindustry + \"'\" ')'\n curstatement = cur.execute(sqlstatement)\n stockcodes = curstatement.fetchall()\n stocklists = []\n\n for stockcode in stockcodes:\n stocklists.append(stockcode[0])\n\n for stockyear in stockyears:\n for stocklist in stocklists:\n url = 'http://finance.tvsi.com.vn/Enterprises/chitieutaichinh?symbol=' + stocklist + '&YearView=' + stockyear + '&period=' + stockperiod + '&donvi=1000000'\n yield scrapy.Request(url, self.parse, meta={'splash': {'endpoint': 'render.html',}, 'stockyear': stockyear, 'runindustry': runindustry, 'stockperiod': stockperiod}, cookies={'fp_tvsi_lang':'en-US'})\n\n cur.close()\n con.close()\n\n def parse(self, response):\n\n con = cx_Oracle.connect('stock/password1@127.0.0.1/xe')\n cur = con.cursor()\n\n stockyear = response.meta.get('stockyear')\n runindustry = response.meta.get('runindustry')\n stockperiod = response.meta.get('stockperiod')\n\n f = open(\"tvsi-\" + runindustry + \"-\" + stockyear + \".csv\", \"a\")\n\n rows = response.xpath('//table[@id=\"table_cttc\"]/tbody/tr[@data-level=2]')\n\n for row in rows:\n stock_code = response.url.split(\"?symbol=\")[1].split(\"&YearView=\")[0]\n stock_factor = row.xpath('./td[1]/div[@class=\"label\"]/text()').extract()[0]\n\n data_date1 = response.xpath('//table[@id=\"table_cttc\"]/tbody/tr[@data-level=\"header\"]/td[3]/text()').extract()\n if data_date1:\n if stockperiod == '1':\n data_quarter1 = data_date1[0].split(\" \")[0]\n data_year1 = data_date1[0].split(\" \")[0]\n elif stockperiod == '2':\n data_quarter1 = data_date1[0].split(\" \")[0]\n data_year1 = data_date1[0].split(\" \")[1]\n stock_data1 = row.xpath('./td[3]/text()').extract()\n if stock_data1:\n stock_data1 = stock_data1[0]\n else:\n stock_data1 = ''\n yield {\n 'stock_code': stock_code,\n 'stock_factor': stock_factor,\n 'stock_quarter': data_quarter1,\n 'stock_year': data_year1,\n 'stock_data': stock_data1,\n }\n sqlstatement = 'insert into tvsi_financial_ratio (stock_code, stock_factor, stock_quarter, stock_year, stock_data) values (:1, :2, :3, :4, :5)'\n cur.execute(sqlstatement, (stock_code, stock_factor, data_quarter1, data_year1, stock_data1))\n con.commit()\n\n f.write(stock_code + ', ' + stock_factor + ', ' + data_quarter1 + ', ' + data_year1 + ', ' + stock_data1 + '\\n')\n\n data_date2 = response.xpath('//table[@id=\"table_cttc\"]/tbody/tr[@data-level=\"header\"]/td[4]/text()').extract()\n if data_date2:\n if stockperiod == '1':\n data_quarter2 = data_date2[0].split(\" \")[0]\n data_year2 = data_date2[0].split(\" \")[0]\n elif stockperiod == '2':\n data_quarter2 = data_date2[0].split(\" \")[0]\n data_year2 = data_date2[0].split(\" \")[1]\n stock_data2 = row.xpath('./td[4]/text()').extract()\n if stock_data2:\n stock_data2 = stock_data2[0]\n else:\n stock_data2 = ''\n yield {\n 'stock_code': stock_code,\n 'stock_factor': stock_factor,\n 'stock_quarter': data_quarter2,\n 'stock_year': data_year2,\n 'stock_data': stock_data2,\n }\n sqlstatement = 'insert into tvsi_financial_ratio (stock_code, stock_factor, stock_quarter, stock_year, stock_data) values (:1, :2, :3, :4, :5)'\n cur.execute(sqlstatement, (stock_code, stock_factor, data_quarter2, data_year2, stock_data2))\n con.commit()\n\n f.write(stock_code + ', ' + stock_factor + ', ' + data_quarter2 + ', ' + data_year2 + ', ' + stock_data2 + '\\n')\n\n data_date3 = response.xpath('//table[@id=\"table_cttc\"]/tbody/tr[@data-level=\"header\"]/td[5]/text()').extract()\n if data_date3:\n if stockperiod == '1':\n data_quarter3 = data_date3[0].split(\" \")[0]\n data_year3 = data_date3[0].split(\" \")[0]\n elif stockperiod == '2':\n data_quarter3 = data_date3[0].split(\" \")[0]\n data_year3 = data_date3[0].split(\" \")[1]\n stock_data3 = row.xpath('./td[5]/text()').extract()\n if stock_data3:\n stock_data3 = stock_data3[0]\n else:\n stock_data3 = ''\n yield {\n 'stock_code': stock_code,\n 'stock_factor': stock_factor,\n 'stock_quarter': data_quarter3,\n 'stock_year': data_year3,\n 'stock_data': stock_data3,\n }\n sqlstatement = 'insert into tvsi_financial_ratio (stock_code, stock_factor, stock_quarter, stock_year, stock_data) values (:1, :2, :3, :4, :5)'\n cur.execute(sqlstatement, (stock_code, stock_factor, data_quarter3, data_year3, stock_data3))\n con.commit()\n\n f.write(stock_code + ', ' + stock_factor + ', ' + data_quarter3 + ', ' + data_year3 + ', ' + stock_data3 + '\\n')\n\n data_date4 = response.xpath('//table[@id=\"table_cttc\"]/tbody/tr[@data-level=\"header\"]/td[6]/text()').extract()\n if data_date4:\n if stockperiod == '1':\n data_quarter4 = data_date4[0].split(\" \")[0]\n data_year4 = data_date4[0].split(\" \")[0]\n elif stockperiod == '2':\n data_quarter4 = data_date4[0].split(\" \")[0]\n data_year4 = data_date4[0].split(\" \")[1]\n stock_data4 = row.xpath('./td[6]/text()').extract()\n if stock_data4:\n stock_data4 = stock_data4[0]\n else:\n stock_data4 = ''\n yield {\n 'stock_code': stock_code,\n 'stock_factor': stock_factor,\n 'stock_quarter': data_quarter4,\n 'stock_year': data_year4,\n 'stock_data': stock_data4,\n }\n sqlstatement = 'insert into tvsi_financial_ratio (stock_code, stock_factor, stock_quarter, stock_year, stock_data) values (:1, :2, :3, :4, :5)'\n cur.execute(sqlstatement, (stock_code, stock_factor, data_quarter4, data_year4, stock_data4))\n con.commit()\n\n f.write(stock_code + ', ' + stock_factor + ', ' + data_quarter4 + ', ' + data_year4 + ', ' + stock_data4 + '\\n')\n\n data_date5 = response.xpath('//table[@id=\"table_cttc\"]/tbody/tr[@data-level=\"header\"]/td[7]/text()').extract()\n if data_date5:\n if stockperiod == '1':\n data_quarter5 = data_date5[0].split(\" \")[0]\n data_year5 = data_date5[0].split(\" \")[0]\n elif stockperiod == '2':\n data_quarter5 = data_date5[0].split(\" \")[0]\n data_year5 = data_date5[0].split(\" \")[1]\n stock_data5 = row.xpath('./td[7]/text()').extract()\n if stock_data5:\n stock_data5 = stock_data5[0]\n else:\n stock_data5 = ''\n yield {\n 'stock_code': stock_code,\n 'stock_factor': stock_factor,\n 'stock_quarter': data_quarter5,\n 'stock_year': data_year5,\n 'stock_data': stock_data5,\n }\n sqlstatement = 'insert into tvsi_financial_ratio (stock_code, stock_factor, stock_quarter, stock_year, stock_data) values (:1, :2, :3, :4, :5)'\n cur.execute(sqlstatement, (stock_code, stock_factor, data_quarter5, data_year5, stock_data5))\n con.commit()\n\n f.write(stock_code + ', ' + stock_factor + ', ' + data_quarter5 + ', ' + data_year5 + ', ' + stock_data5 + '\\n')\n\n data_date6 = response.xpath('//table[@id=\"table_cttc\"]/tbody/tr[@data-level=\"header\"]/td[8]/text()').extract()\n if data_date6:\n if stockperiod == '1':\n data_quarter6 = data_date6[0].split(\" \")[0]\n data_year6 = data_date6[0].split(\" \")[0]\n elif stockperiod == '2':\n data_quarter6 = data_date6[0].split(\" \")[0]\n data_year6 = data_date6[0].split(\" \")[1]\n stock_data6 = row.xpath('./td[8]/text()').extract()\n if stock_data6:\n stock_data6 = stock_data6[0]\n else:\n stock_data6 = ''\n yield {\n 'stock_code': stock_code,\n 'stock_factor': stock_factor,\n 'stock_quarter': data_quarter6,\n 'stock_year': data_year6,\n 'stock_data': stock_data6,\n }\n sqlstatement = 'insert into tvsi_financial_ratio (stock_code, stock_factor, stock_quarter, stock_year, stock_data) values (:1, :2, :3, :4, :5)'\n cur.execute(sqlstatement, (stock_code, stock_factor, data_quarter6, data_year6, stock_data6))\n con.commit()\n\n f.write(stock_code + ', ' + stock_factor + ', ' + data_quarter6 + ', ' + data_year6 + ', ' + stock_data6 + '\\n')\n\n f.close()\n cur.close()\n con.close()\n","sub_path":"scraperExpress/spiders/tvsi_financial_ratio.py","file_name":"tvsi_financial_ratio.py","file_ext":"py","file_size_in_byte":10441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"345177038","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\nLABORATÓRIO DE REDES 1 \nDUPLA: LUCAS LIMA DE SOUSA 378608\nISMAEL JOSÉ \n\n\"\"\"\nimport socket\nimport requests\nlocalhostname = socket.gethostname()\nip = socket.gethostbyname(localhostname)\n\n\"\"\"\nQUESTÃO 1\nExibindo o hostname e o ip do local \n\"\"\"\nprint(\"Nome do host: {}, Endereço de ip: {}\".format(localhostname, ip))\n\nauxiliar = 0\n\"\"\"\nQUESTÃO 1 \nExibindo os serviços das portas 'tcp' de uma lista de 0 a 9999\n\"\"\"\nprint(\"------------------------------TCP----------------------------------\")\nwhile (auxiliar<=9999):\n \n try:\n portservice = socket.getservbyport(auxiliar, 'tcp')\n print(\"Porta {} ====> Serviço {}\".format(auxiliar, portservice))\n auxiliar = auxiliar +1\n except:\n auxiliar = auxiliar +1 \n \nauxiliar = 0\n\"\"\"\nQUESTÃO 1\nExibindo os serviços das portas 'udp' de uma lista de 0 a 9999\n\"\"\"\nprint(\"------------------------------UDP----------------------------------\")\nwhile (auxiliar<=9999):\n \n try:\n portservice = socket.getservbyport(auxiliar, 'udp')\n print(\"Porta {} ====> Serviço {}\".format(auxiliar, portservice))\n auxiliar = auxiliar +1\n except:\n auxiliar = auxiliar +1 \n \n\"\"\"\nQUESTÃO 2\n\nExibindo endereços de ip de uma lista de dez sites e seus respectivos países de geolocalização\n\n\"\"\"\n \nhost_list = ['github.com', 'lelivros.love', 'jovemnerd.com.br','www.google.com','www.mises.org.br', 'www.skoob.com.br', 'www.gorillaz.com','homoliteratus.com','marvel.com', 'www.ufc.br']\nbase = 'http://freegeoip.net/json/'\nfor host in host_list:\n ip = socket.gethostbyname(host) \n response = requests.get(base+host)\n country = response.json()\n print(\"O sítio {} tem como endereço de ip {} localizado no país {}\".format(host,ip, country['country_name']))\n \n","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"24871401","text":"# https://www.codewars.com/kata/52210226578afb73bd0000f1/train/python\r\n\r\ndef electrons_around_the_cores(dice):\r\n # Just so you can try some numbers\r\n ans = 0\r\n for number in dice:\r\n if number%2!=0:\r\n ans = ans +number\r\n \r\n ans = ans -1 \r\n\r\n return ans","sub_path":"gameElectron/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"610521245","text":"import random\ndef matrix_reloaded():\n size = random.randint(2, 10)\n matrix = [[random.randint(0,1) for y in range(size)] for x in range(size)]\n rowindices = [0]\n columnindices = [0]\n highcolumn = 0\n for i in range(len(matrix)):\n if matrix[i].count(1) > matrix[rowindices[-1]].count(1):\n rowindices.clear()\n rowindices.insert(0, i)\n elif matrix[i].count(1) == matrix[rowindices[-1]].count(1):\n if i != rowindices[-1]:\n rowindices.append(i)\n tempcount = 0\n for j in range(len(matrix)):\n if matrix[j][i] == 1:\n tempcount += 1\n if tempcount > highcolumn:\n columnindices.clear()\n columnindices.insert(0, i)\n highcolumn = tempcount\n elif tempcount == highcolumn:\n if i != columnindices[-1]:\n columnindices.append(i)\n for i in matrix:\n print(i)\n print(\"The largest row index: \" + \"\".join(str(rowindices)))\n print(\"The larget column index: \" + \"\".join(str(columnindices)))\n\nmatrix_reloaded()","sub_path":"Class Exercises 1/matrix_reloaded.py","file_name":"matrix_reloaded.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"537412052","text":"# -*- coding: utf-8 -*-\nimport random\nimport bisect\n\n# Returns a random value, considering the weights of each item.\nclass WeightedChoice(object):\n def __init__(self, weights):\n self.totals = []\n self.weights = weights\n running_total = 0\n\n for w in weights:\n running_total += w[1]\n self.totals.append(running_total)\n\n def next(self):\n rnd = random.random() * self.totals[-1]\n i = bisect.bisect_right(self.totals, rnd)\n return self.weights[i][0]\n\n\n\ngame_list = (\n ('dota', 10),\n ('dota 2', 20),\n ('csgo', 15),\n ('hots', 40),\n ('dont starve', 10),\n ('dead icland', 10),\n ('dead icland addon', 10),\n ('civ', 7),\n ('majesty', 10),\n ('eve lou', 7),\n ('war 3', 7),\n ('war 3 custom', 10),\n ('fabel 3', 10),\n ('l4d2', 10),\n ('start bound', 5),\n ('danger defenders', 10),\n)\n\nweightedChoice = WeightedChoice(game_list)\n\nprint(weightedChoice.next())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"65348643","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n日志\n\"\"\"\n\nimport logging\n\nLOG_LEVELS = {\"5\": logging.DEBUG,\n \"4\": logging.INFO,\n \"3\": logging.WARNING,\n \"2\": logging.ERROR,\n \"1\": logging.CRITICAL,\n }\n\n\ndef init_logger(loglevel,\n logfile,\n logformatter=None,\n logname=\"applogger\"):\n \"\"\"初始化日志记录器\n :logname : 名称\n :loglevel : 日志等级\n :logfile : 日志文件\n :logformatter: 日志格式\n \"\"\"\n # 设置默认的日志输出格式\n formatter_default = (\"%(asctime)s - \\\n %(name)s - \\\n %(levelname)s - \\\n %(message)s\")\n if logformatter:\n formatter_default = logformatter\n\n # 日志等级\n level = LOG_LEVELS.get(loglevel, logging.NOTSET)\n\n # 全局配置\n logging.basicConfig(level=level,\n format=formatter_default,\n )\n\n # 创建一个logger\n logger = logging.getLogger(logname)\n\n # 创建一个handler,用于写入日志文件\n fh = logging.FileHandler(logfile)\n\n # 创建一个handler,用于输出到控制台\n ch = logging.StreamHandler()\n\n # 给logger添加handler\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n return logger\n","sub_path":"examples/spider/logginger.py","file_name":"logginger.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519418357","text":"# Author : Abhyuday Tripathi\nimport sys\nfrom termcolor import colored, cprint\nfrom copy import deepcopy\nfrom queue import PriorityQueue as pq\nfrontier = pq()\nexplored = dict()\nimport time\nclass board: \n def __init__(self,state):\n self.state= state \n self.parent =None\n self.level =0\n self.ch=[]\n self.loners=0\n self.actions =0\n self.gots=0\n self.hvalue=0\n def isolation(self):\n check=False\n \n for i in range(7):\n for j in range(7):\n h1=False\n h2=False\n v1 =False\n v2 = False\n if(self.state[i][j]==1):\n if(j+2<7):\n if(self.state[i][j+1]!=1 and self.state[i][j+2]!=1):\n h1=True\n else:\n h1=True\n\n\n if(j-2>=0):\n if(self.state[i][j-1]!=1 and self.state[i][j-2]!=1):\n h2=True\n else:\n h2=True\n if(i+2<7):\n if(self.state[i+1][j]!=1 and self.state[i+2][j]!=1):\n v1=True\n else:\n v1=True\n if(i-2>=0):\n if(self.state[i-1][j]!=1 and self.state[i-2][j]!=1):\n v2=True\n else:\n v2=True\n if(h1 and h2 and v1 and v2):\n self.loners+=1\n \n \n \n\n def moves(self):\n \n\n for i in range(7):\n for j in range(7):\n if(self.state[i][j]==1):\n self.gots+=1\n if(j+2<7):\n if(self.state[i][j+1]==1 and self.state[i][j+2]==0):\n x =deepcopy(self.state)\n x[i][j]=0\n x[i][j+1]=0\n x[i][j+2]=1\n b = board(x)\n b.parent = self\n b.level = self.level+1\n self.ch.append(b)\n self.actions+=1\n \n if(j-2>0):\n if(self.state[i][j-1]==1 and self.state[i][j-2]==0):\n x =deepcopy(self.state)\n x[i][j]=0\n x[i][j-1]=0\n x[i][j-2]=1\n b = board(x)\n b.parent = self\n b.level = self.level+1\n self.ch.append(b)\n self.actions+=1\n \n \n\n if(i+2<7):\n if(self.state[i+1][j]==1 and self.state[i+2][j]==0):\n x =deepcopy(self.state)\n x[i][j]=0\n x[i+1][j]=0\n x[i+2][j]=1\n b = board(x)\n b.parent = self\n b.level = self.level+1\n self.ch.append(b)\n self.actions+=1\n \n if(i-2>0):\n if(self.state[i-1][j]==1 and self.state[i-2][j]==0):\n x =deepcopy(self.state)\n x[i][j]=0\n x[i-1][j]=0\n x[i-2][j]=1\n b = board(x)\n b.parent = self\n b.level = self.level+1\n self.ch.append(b)\n self.actions+=1\n \n self.isolation()\n self.hvalue=hval(self)\n \n\n\n def is_goal_state(self,other):\n for i in range(7):\n for j in range(7):\n if(self.state[i][j]!=other[i][j]):\n return False\n return True\n\n def __lt__(self,other):\n return self.hvalue=2) or (i.loners>=5) ):\n pass\n else:\n \n frontier.put((i.hvalue-i.level,i))\n\n \n\n\n\n\n\n","sub_path":"English_PegSolataire_Astar.py","file_name":"English_PegSolataire_Astar.py","file_ext":"py","file_size_in_byte":6524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85762781","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom io import StringIO\nsys.stdin = StringIO(\"\"\"Hallo,\tdies ist eine \nziemlich lange Zeile, die in Html\naber nicht umgebrochen wird.\n
\nZwei

produzieren zwei Newlines. \nEs gibt auch noch das tag
was einen Trenner darstellt.\nZwei

produzieren zwei Horizontal Rulers.\nAchtung mehrere Leerzeichen irritieren\n\nHtml genauso wenig wie\n\n\nmehrere Leerzeilen.\"\"\")\n\ndef printer(is_last = False):\n\tglobal word, word_length, line, line_length\n\tif word == \"
\":\n\t\tif line_length:\n\t\t\tprint(line)\n\t\telse:\n\t\t\tprint()\n\t\tclear_word()\n\t\tclear_line()\n\telif word == \"
\":\n\t\tif line_length:\n\t\t\tprint(line)\t\t\n\t\tprint(\"-\"*80)\n\t\tclear_word()\n\t\tclear_line()\n\telif word_length:\n\n\t\t# word\n\t\tif line_length + word_length <= 80:\n\t\t\tline += word\n\t\t\tline_length += word_length\t\t\t\n\t\t\tclear_word()\n\t\telse:\n\t\t\tprint(line)\n\t\t\tline = word\n\t\t\tline_length = word_length\n\t\t\tclear_word()\n\n\t\t# space\n\t\tif line_length + 1 <= 80:\n\t\t\tline += \" \"\n\t\t\tline_length += 1\n\t\telse:\n\t\t\tprint(line)\n\t\t\tline = \" \"\n\t\t\tline_length = 1\n\n\tif is_last and line_length:\n\t\tprint(line)\n\ndef clear_word():\n\tglobal word, word_length\n\tword = \"\"\n\tword_length = 0\n\ndef clear_line():\n\tglobal line, line_length\n\tline = \"\"\n\tline_length = 0\n\nword = \"\"\nword_length = 0\nline = \"\"\nline_length = 0\n\ncursor = sys.stdin.read(1)\nwhile True:\n\t# print(\"DEBUG :: \", \"word : \",word, \"word_length : \",word_length, \"line : \",line, \"line_length : \",line_length, \"cursor : \",cursor)\n\tif cursor == \" \" or cursor == \"\\t\" or cursor == \"\\n\":\n\t\tprinter()\n\t\twhile (cursor == \" \" or cursor == \"\\t\" or cursor == \"\\n\"):\n\t\t\tcursor = sys.stdin.read(1)\n\telif cursor == \"\":\n\t\tprinter(is_last = True)\n\t\tbreak\n\telse:\n\t\tword += cursor\n\t\tword_length += 1\n\t\tcursor = sys.stdin.read(1)","sub_path":"season_2/week05/6581_kim.py","file_name":"6581_kim.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"324931797","text":"import discord\nimport random\nimport math\nimport asyncio\nimport voxelbotutils as vbu\n\n\n# Does all the xp stuff\nasync def xp_finder_adder(bot, user: discord.User, played_with_fish):\n # ranges of how much will be added\n total_xp_to_add = random.randint(1, 25)\n\n # initial acquired fish data\n async with bot.database() as db:\n fish_rows = await db(\"\"\"SELECT * FROM user_fish_inventory WHERE user_id = $1 AND fish_name = $2\"\"\", user.id, played_with_fish)\n\n # level increase xp calculator\n xp_per_level = math.floor(25 * fish_rows[0]['fish_level'] ** 1.5)\n\n # for each tick of xp...\n for i in range(total_xp_to_add):\n\n # if the xp is higher or equal to the xp recquired to level up...\n if fish_rows[0]['fish_xp'] >= fish_rows[0]['fish_xp_max']:\n\n # update the level to increase by one, reset fish xp, and set fish xp max to the next level xp needed\n async with bot.database() as db:\n await db(\"\"\"UPDATE user_fish_inventory SET fish_level = fish_level + 1 WHERE user_id = $1 AND fish_name = $2\"\"\", user.id, played_with_fish)\n await db(\"\"\"UPDATE user_fish_inventory SET fish_xp = 0 WHERE user_id = $1 AND fish_name = $2\"\"\", user.id, played_with_fish)\n await db(\"\"\"UPDATE user_fish_inventory SET fish_xp_max = $1 WHERE user_id = $2 AND fish_name = $3\"\"\", int(xp_per_level), user.id, played_with_fish)\n\n # adds one xp regets new fish_rows\n async with bot.database() as db:\n await db(\"\"\"UPDATE user_fish_inventory SET fish_xp = fish_xp + 1 WHERE user_id = $1 AND fish_name = $2\"\"\", user.id, played_with_fish)\n fish_rows = await db(\"\"\"SELECT * FROM user_fish_inventory WHERE user_id = $1 AND fish_name = $2\"\"\", user.id, played_with_fish)\n\n return total_xp_to_add\n\n\ndef get_fixed_field(field):\n \"\"\"\n Return a list of tuples for the rarity-level in the pagination to fix fields that are too large\n \"\"\"\n\n fish_string_split = field[1].split('\\n')\n fixed_field = []\n current_string = \"\"\n fish_character_sum = 0\n\n for index, fish_string in enumerate(fish_string_split):\n fish_character_sum += len(\"\\n\" + fish_string)\n if fish_character_sum < 1020:\n current_string += \"\\n\" + fish_string\n if index == len(fish_string_split) - 1:\n fixed_field.append((field[0], current_string))\n else:\n fixed_field.append((field[0], current_string))\n current_string = fish_string\n fish_character_sum = 0\n\n if not fixed_field:\n fixed_field = [field]\n\n return fixed_field\n\n\ndef create_bucket_embed(user, field, custom_title=None):\n \"\"\"\n Creates the embed for the pagination page for the fishbucket\n \"\"\"\n embed = discord.Embed() # Create a new embed to edit the message\n embed.title = custom_title or f\"**{user.display_name}'s Fish Bucket**\\n\"\n embed.add_field(name=f\"__{field[0]}__\", value=field[1], inline=False)\n return embed\n\n\n# def create_info_embed(field):\n# embed = discord.Embed() # Create a new embed to edit the message\n# embed.title = \"Commands (anything in quotes is a variable, and the quotes may or may not be needed)\"\n# embed.add_field(name=f\"__{field[0]}__\", value=field[1], inline=False)\n# return embed\n\n\nasync def paginate(ctx, fields, user, custom_str=None):\n bot = ctx.bot\n curr_index = 1\n curr_field = fields[curr_index - 1]\n embed = create_bucket_embed(user, curr_field, custom_str)\n\n # Set up the buttons\n left = vbu.Button(custom_id=\"left\", emoji=\"◀️\", style=vbu.ButtonStyle.PRIMARY)\n right = vbu.Button(custom_id=\"right\", emoji=\"▶️\", style=vbu.ButtonStyle.PRIMARY)\n stop = vbu.Button(custom_id=\"stop\", emoji=\"⏹️\", style=vbu.ButtonStyle.DANGER)\n numbers = vbu.Button(custom_id=\"numbers\", emoji=\"🔢\", style=vbu.ButtonStyle.PRIMARY)\n\n valid_buttons = [left, right, stop]\n if len(fields) > 1:\n valid_buttons.append(numbers)\n\n # Put the buttons together\n components = vbu.MessageComponents(\n vbu.ActionRow(*valid_buttons)\n )\n\n fish_message = await ctx.send(embed=embed, components=components)\n\n def button_check(payload):\n\n if payload.message.id != fish_message.id:\n return False\n\n if payload.component.custom_id in [left.custom_id, right.custom_id, stop.custom_id, numbers.custom_id]:\n bot.loop.create_task(payload.ack())\n\n return payload.user.id == ctx.author.id\n\n while True: # Keep paginating until the user clicks stop\n try:\n chosen_button_payload = await bot.wait_for('component_interaction', timeout=60.0, check=button_check)\n chosen_button = chosen_button_payload.component.custom_id.lower()\n except asyncio.TimeoutError:\n chosen_button = \"stop\"\n\n index_chooser = {\n 'left': max(1, curr_index - 1),\n 'right': min(len(fields), curr_index + 1)\n }\n\n if chosen_button in index_chooser.keys():\n curr_index = index_chooser[chosen_button] # Keep the index in bounds\n curr_field = fields[curr_index - 1]\n\n await fish_message.edit(embed=create_bucket_embed(user, curr_field, custom_str))\n\n elif chosen_button == \"stop\":\n await fish_message.edit(components=components.disable_components())\n break # End the while loop\n\n elif chosen_button == \"numbers\" and len(fields) > 1:\n number_message = await ctx.send(f\"What page would you like to go to? (1-{len(fields)}) \")\n\n # Check for custom message\n def message_check(message):\n return message.author == ctx.author and message.channel == fish_message.channel and message.content.isdigit()\n\n user_message = await bot.wait_for('message', check=message_check)\n user_input = int(user_message.content)\n\n curr_index = min(len(fields), max(1, user_input))\n curr_field = fields[curr_index - 1]\n\n await fish_message.edit(embed=create_bucket_embed(user, curr_field, custom_str))\n await number_message.delete()\n await user_message.delete()\n\n\ndef seconds_converter(time):\n if 5_400 > time >= 3_600:\n form = 'hour'\n time /= 60 * 60\n elif time > 3_600:\n form = 'hours'\n time /= 60 * 60\n elif 90 > time >= 60:\n form = 'minute'\n time /= 60\n elif time >= 60:\n form = 'minutes'\n time /= 60\n elif time < 1.5:\n form = 'second'\n else:\n form = 'seconds'\n return f\"{round(time)} {form}\"\n","sub_path":"cogs/utils/misc_utils.py","file_name":"misc_utils.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"598582099","text":"import os\nfrom Bio.PDB import PDBList\n\n\ndef downloadPDBFiles(pdbs, dir):\n \"\"\"Downloads PDB files from a list of PDB codes.\n\n Parameters\n ----------\n pdbs : list of str\n list containing all PDB codes (str) for download\n dir : str\n output directory\n \"\"\"\n pdb_list = PDBList()\n for i in pdbs:\n pdb_id = i[:4]\n pdb_list.retrieve_pdb_file(pdb_id, file_format=\"pdb\", pdir=dir, obsolete=False)\n\n # Delete unused folder obsolete\n os.system(\"rm -d obsolete\")\n\n files = os.listdir(dir)\n for index, file in enumerate(files):\n if file.endswith(\".ent\"):\n new_name = file.replace('pdb', '')\n os.rename(os.path.join(dir, file), os.path.join(dir, f'{new_name.split(\".\")[0]}.pdb'))\n","sub_path":"Last Version/build/lib/MacroFlexEngine/Modeller/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"377764802","text":"\nclass Solution:\n \n\n # def transpose(self, A):\n # \tl, w = len(A), len(A[0])\n # \tres = [[0] * l for _ in range(w)]\n\n # \tfor i in range(l):\n # \t\tfor j in range(w):\n # \t\t\tres[j][i] = A[i][j]\n # \treturn res\n\n\n def transpose(self, A):\n r, c = len(A), len(A[0])\n total = r * c\n res = [[0] * r for _ in range(c)]\n\n for i in range(total):\n row = i // r\n col = i % r\n res[row][col] = A[col][row]\n return res\n\n\n\n # def transpose(self, A):\n # if len(A) < 0: return []\n\n # l, w = len(A), len(A[0])\n\n # return [[A[i][j] for i in range(l)] for j in range(w)]\n\n\n # def transponse(self, A):\n # A[::] = zip(*A)\n # return A\n\n \nif __name__ == '__main__':\n \n sol = Solution()\n A = [[1,2,3],[4,5,6]]\n # A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n print(sol.transpose(A))\n","sub_path":"867_转置矩阵.py","file_name":"867_转置矩阵.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157234087","text":"def fn():\r\n n = int(input().strip())\r\n tmp = list(map(int,input().strip().split()))\r\n m = [[0]*n for i in range(n)]\r\n count = 0\r\n for i in range(n):\r\n for j in range(n):\r\n m[i][j] = tmp[count]\r\n count += 1\r\n \r\n a = 0\r\n b = n - 1\r\n\r\n while a != b:\r\n if m[a][b]:\r\n a += 1\r\n else:\r\n b -= 1\r\n \r\n row = a\r\n\r\n for i in range(n):\r\n if m[row][i] != 0:\r\n return -1\r\n if i != row:\r\n if m[i][row] == 0:\r\n return -1\r\n return row\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfor _ in range(int(input().strip())):\r\n if(fn()):\r\n print(\"1\")\r\n else:\r\n print(\"-1\")\r\n\r\n\r\n","sub_path":"python/the_celebrity_problem_using_pointers.py","file_name":"the_celebrity_problem_using_pointers.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"357029101","text":"#!/bin/python3\n#Script to pull blob data from Azure Storage Account\n#Author: Vinod.N K\n##########################\n#Prerequisites\n#Usage: Python3, Python3-pip \n#Install the Azure Storage Blobs client library for Python with pip:\n### pip3 install azure-storage-blob --pre ###\n#Distro : Linux -Centos, Rhel, and any fedora\n#####################\n\n\n# Azure Storage Blobs client library for Python\nfrom azure.storage.blob import BlockBlobService\n\nfrom azure.storage.blob import PublicAccess\n\nimport os\n\n#name of your storage account and the access key from Settings->AccessKeys->key1\nblock_blob_service = BlockBlobService(account_name=\"TestBlobStorageAC\",account_key=\"ABCDEFGHIJKLMNOPKQRSTUVWXYZHcQ4kmNm293q3zx+IdQ2685hj8HUQO1Qg0ZTQMH4HtUZ6NWwLy==\")\n\n#Function to sort and clear JSON\ndef createJsonArray(path=\"\"):\n file=open(path,'r')\n lines = file.readlines()\n single_json_list = []\n \n json_arr = \"[\"\n delimiter=\"\"\n \n for line in lines:\n if line.strip():\n json_arr = json_arr + delimiter + line.strip() + \"\\n\"\n delimiter=\",\"\n json_arr = json_arr + \"]\"\n file1 = open(path+\"_arr.json\",'w')\n file1.write(json_arr)\n file1.close()\n os.remove(path)\n \n\n#name of the container\ngenerator = block_blob_service.list_blobs('BlobContainerName')\n\n#code below lists all the blobs in the container and downloads them one after another\nfor blob in generator:\n print(blob.name)\n print(\"{}\".format(blob.name))\n #check if the path contains a folder structure, create the folder structure\n if \"/\" in \"{}\".format(blob.name):\n print(\"there is a path in this\")\n #extract the folder path and check if that folder exists locally, and if not create it\n head, tail = os.path.split(\"{}\".format(blob.name))\n print(head)\n print(tail)\n print(os.getcwd()+ \"/\" )\n if (os.path.isdir(os.getcwd()+ \"/\" + head)):\n # #download the files to this directory\n print(\"directory and sub directories exist\")\n block_blob_service.get_blob_to_path('BlobContainerName',blob.name,os.getcwd()+ \"/\" + head + \"/\" + tail)\n createJsonArray(path=os.getcwd()+ \"/\" + head + \"/\" + tail)\n \n else:\n # create the diretcory and download the file to it\n print(\"directory doesn't exist, creating it now\")\n print(\" dir -> \"+os.getcwd()+ \"/\" + head)\n os.makedirs(name=os.getcwd()+ \"/\" + head, exist_ok=True)\n #os.mkdir(os.getcwd()+ \"/\" + head)\n print(\"directory created, download initiated\")\n block_blob_service.get_blob_to_path('BlobContainerName',blob.name,os.getcwd()+ \"/\" + head + \"/\" + tail)\n createJsonArray(path=os.getcwd()+ \"/\" + head + \"/\" + tail)\n \n# block_blob_service.get_blob_to_path('cober-test-stand',blob.name,blob.name,blob.name,blob.name \n else:\n block_blob_service.get_blob_to_path('BlobContainerName',blob.name,blob.name)\n print()\n block_blob_service.get_blob_to_path('BlobContainerName',blob.name,head + \"/\"+tail)\n","sub_path":"AzureBlobAC.py","file_name":"AzureBlobAC.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489479620","text":"from MoodboardColorPicker import cluster_colors, load_images\nimport numpy as np\nimport sys\nimport os\nimport argparse\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\n\n\ndef get_image_colors(im, rescale_size):\n colors = np.array(im)\n if rescale_size:\n colors = resize(colors, (rescale_size, rescale_size), anti_aliasing=True)\n if colors.dtype == np.uint8:\n colors = colors.astype(np.float64) / 255\n return colors\n\ndef get_cluster_labels(image, num_clusters, rescale_size):\n colors = get_image_colors(image, rescale_size)\n original_shape = colors.shape\n colors = colors.reshape((colors.shape[0] * colors.shape[1], 3))\n cluster_labels = cluster_colors(colors, num_clusters)\n cluster_labels = cluster_labels.reshape((original_shape[0], original_shape[1]))\n colors = colors.reshape((original_shape[0], original_shape[1], 3))\n return cluster_labels, colors\n\ndef remove_colors(image, labels, to_remove, replacement_val):\n num = np.max(labels) + 1\n new_image = image.copy()\n labels_sorted = np.asarray(list(to_remove) + [x for x in np.arange(num) if x not in to_remove])\n labels_map = np.argsort(labels_sorted)\n new_labels = labels_map[labels]\n new_image[new_labels < len(to_remove)] = replacement_val\n return new_image\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image_dir\", type=str)\n parser.add_argument(\"--num_clusters\", type=int, default=10)\n parser.add_argument(\"--rescale_size\", type=int, default=64)\n args = parser.parse_args()\n\n image_dir = args.image_dir\n num_clusters = args.num_clusters\n rescale_size = args.rescale_size\n\n images = load_images(args.image_dir)\n for image in images:\n labels, colors = get_cluster_labels(image, args.num_clusters, rescale_size)\n new_image = remove_colors(colors, labels, [1,2,3,4,5])\n plt.imshow(new_image)\n plt.axis('off')\n plt.show()\n\nif __name__ == \"__main__\":\n main()","sub_path":"clustering/ColorRemover.py","file_name":"ColorRemover.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"565991305","text":"#coding=utf-8\r\nfrom django.db import models\r\n\r\n# Create your models here.\r\nclass Address(models.Model):\r\n name = models.CharField('Name', max_length=6, unique=True)\r\n gender = models.CharField('Sex', choices=(('M', 'Male'), ('F', 'Female')), max_length=1)\r\n telphone = models.CharField('Telphone', max_length=20)\r\n mobile = models.CharField('Cellphone', max_length=11)\r\n room = models.CharField('Room', max_length=10)\r\n def __unicode__(self):\r\n return self.name\r\n\r\nfrom django.contrib import admin\r\n\r\nclass AddressAdmin(admin.options.ModelAdmin):\r\n model=Address\r\n radio_fields = {'gender':admin.VERTICAL}\r\n\r\nadmin.site.register(Address,AddressAdmin)","sub_path":"python/webpy/mysite/mysite/address/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"327625074","text":"import copy\nimport math\nimport random\nimport ai2thor\nimport pdb\nimport ai2thor.fifo_server\n\n\n### CONSTANTS\n\n\n\nADITIONAL_ARM_ARGS = {\n 'disableRendering': True,\n 'returnToStart': True,\n 'speed': 1,\n}\n\nARM_MOVE_CONSTANT = 0.05\n\nSCENE_INDICES = [i + 1 for i in range(30)] +[i + 1 for i in range(200,230)] +[i + 1 for i in range(300,330)] +[i + 1 for i in range(400,430)]\nSCENE_NAMES = ['FloorPlan{}_physics'.format(i) for i in SCENE_INDICES]\n\n\nENV_ARGS = dict(gridSize=0.25,\n width=224, height=224, agentMode='arm', fieldOfView=100,\n agentControllerType='mid-level',\n server_class=ai2thor.fifo_server.FifoServer,\n useMassThreshold = True, massThreshold = 10,\n autoSimulation=False, autoSyncTransforms=True,\n )\n\n#Functions\n\ndef is_object_at_position(controller, action_detail):\n objectId = action_detail['objectId']\n position = action_detail['position']\n current_object_position = get_object_details(controller, objectId)['position']\n return two_dict_equal(dict(position=position), dict(position=current_object_position))\n\ndef is_agent_at_position(controller, action_detail):\n # dict(action='TeleportFull', x=initial_location['x'], y=initial_location['y'], z=initial_location['z'], rotation=dict(x=0, y=initial_rotation, z=0), horizon=horizon, standing=True)\n target_pose = dict(\n position={'x': action_detail['x'], 'y': action_detail['y'], 'z': action_detail['z'], },\n rotation=action_detail['rotation'],\n horizon=action_detail['horizon']\n )\n current_agent_pose = controller.last_event.metadata['agent']\n current_agent_pose = dict(\n position=current_agent_pose['position'],\n rotation=current_agent_pose['rotation'],\n horizon=current_agent_pose['cameraHorizon'],\n )\n return two_dict_equal(current_agent_pose, target_pose)\n\n\ndef get_object_details(controller, obj_id):\n return [o for o in controller.last_event.metadata['objects'] if o['objectId'] == obj_id][0]\n\ndef initialize_arm(controller, scene_starting_cheating_locations):\n # for start arm from high up as a cheating, this block is very important. never remove\n scene = controller.last_event.metadata['sceneName']\n initial_pose = scene_starting_cheating_locations[scene]\n event1 = controller.step(dict(action='TeleportFull', standing=True, x=initial_pose['x'], y=initial_pose['y'], z=initial_pose['z'], rotation=dict(x=0, y=initial_pose['rotation'], z=0), horizon=initial_pose['horizon']))\n event2 = controller.step(dict(action='MoveMidLevelArm', position=dict(x=0.0, y=0, z=0.35), **ADITIONAL_ARM_ARGS))\n event3 = controller.step(dict(action='MoveMidLevelArmHeight', y=0.8, **ADITIONAL_ARM_ARGS))\n return event1, event2, event3\n\ndef make_all_objects_unbreakable(controller):\n all_breakable_objects = [o['objectType'] for o in controller.last_event.metadata['objects'] if o['breakable'] is True]\n all_breakable_objects = set(all_breakable_objects)\n for obj_type in all_breakable_objects:\n controller.step(action='MakeObjectsOfTypeUnbreakable', objectType=obj_type)\n\n\ndef reset_the_scene_and_get_reachables(controller, scene_name=None, scene_options=None):\n if scene_name is None:\n if scene_options is None:\n scene_options = SCENE_NAMES\n scene_name = random.choice(scene_options)\n controller.reset(scene_name)\n controller.step(action='MakeAllObjectsMoveable')\n controller.step(action='MakeObjectsStaticKinematicMassThreshold')\n make_all_objects_unbreakable(controller)\n return get_reachable_positions(controller)\n\ndef only_reset_scene(controller, scene_name):\n controller.reset(scene_name)\n controller.step(action='MakeAllObjectsMoveable')\n controller.step(action='MakeObjectsStaticKinematicMassThreshold')\n make_all_objects_unbreakable(controller)\n\ndef transport_wrapper(controller, target_object, target_location):\n action_detail_list = []\n transport_detail = dict(action = 'PlaceObjectAtPoint', objectId=target_object, position=target_location, forceKinematic=True)\n event = controller.step(**transport_detail)\n action_detail_list.append(transport_detail)\n # controller.step('PhysicsSyncTransforms')\n advance_detail = dict(action='AdvancePhysicsStep', simSeconds=1.0)\n controller.step(**advance_detail)\n action_detail_list.append(advance_detail)\n return event, action_detail_list\n\ndef is_object_in_receptacle(event,target_obj,target_receptacle):\n all_containing_receptacle = set([])\n parent_queue = [target_obj]\n while(len(parent_queue) > 0):\n top_queue = parent_queue[0]\n parent_queue = parent_queue[1:]\n if top_queue in all_containing_receptacle:\n continue\n current_parent_list = event.get_object(top_queue)['parentReceptacles']\n if current_parent_list is None:\n continue\n else:\n parent_queue += current_parent_list\n all_containing_receptacle.update(set(current_parent_list))\n return target_receptacle in all_containing_receptacle\n\ndef get_reachable_positions(controller):\n event = controller.step('GetReachablePositions')\n reachable_positions = event.metadata['reachablePositions']\n if reachable_positions is None or len(reachable_positions) == 0:\n reachable_positions = event.metadata['actionReturn']\n if reachable_positions is None or len(reachable_positions) == 0:\n print('Scene name', controller.last_event.metadata['sceneName'])\n pdb.set_trace()\n return reachable_positions\ndef execute_command(controller, command,action_dict_addition):\n\n base_position = get_current_arm_state(controller)\n change_height = ARM_MOVE_CONSTANT\n change_value = change_height\n action_details = {}\n\n if command == 'w':\n base_position['z'] += change_value\n elif command == 'z':\n base_position['z'] -= change_value\n elif command == 's':\n base_position['x'] += change_value\n elif command == 'a':\n base_position['x'] -= change_value\n elif command == '3':\n base_position['y'] += change_value\n elif command == '4':\n base_position['y'] -= change_value\n elif command == 'u':\n base_position['h'] += change_height\n elif command == 'j':\n base_position['h'] -= change_height\n elif command == '/':\n action_details = dict('')\n pickupable = controller.last_event.metadata['arm']['PickupableObjectsInsideHandSphere']\n print(pickupable)\n elif command == 'd':\n event = controller.step(action='ReleaseObject')\n action_details = dict(action='ReleaseObject')\n elif command == 'm':\n action_dict_addition = copy.deepcopy(action_dict_addition)\n event = controller.step(action='MoveAgent', ahead=0.2,**action_dict_addition)\n action_details = dict(action='MoveAgent', ahead=0.2,**action_dict_addition)\n\n elif command == 'r':\n action_dict_addition = copy.deepcopy(action_dict_addition)\n event = controller.step(action='RotateAgent', degrees = 45,**action_dict_addition)\n action_details = dict(action='RotateAgent', degrees = 45,**action_dict_addition)\n elif command == 'l':\n action_dict_addition = copy.deepcopy(action_dict_addition)\n event = controller.step(action='RotateAgent', degrees = -45,**action_dict_addition)\n action_details = dict(action='RotateAgent', degrees = -45,**action_dict_addition)\n elif command == 'p':\n event = controller.step(action='PickupObject')\n action_details = dict(action='PickupObject')\n elif command == 'q':\n action_details = {}\n else:\n action_details = {}\n\n if command in ['w', 'z', 's', 'a', '3', '4']:\n\n event = controller.step(action='MoveArm', position=dict(x=base_position['x'], y=base_position['y'], z=base_position['z']),**action_dict_addition)\n action_details=dict(action='MoveArm', position=dict(x=base_position['x'], y=base_position['y'], z=base_position['z']),**action_dict_addition)\n success = event.metadata['lastActionSuccess']\n\n\n elif command in ['u', 'j']:\n\n event = controller.step(action='MoveArmBase', y=base_position['h'],**action_dict_addition)\n action_details=dict(action='MoveArmBase', y=base_position['h'],**action_dict_addition)\n\n success = event.metadata['lastActionSuccess']\n\n return action_details\n\ndef get_current_arm_state(controller):\n h_min = 0.450998873\n h_max = 1.8009994\n agent_base_location = 0.9009995460510254\n event = controller.last_event\n offset = event.metadata['agent']['position']['y'] - agent_base_location\n h_max += offset\n h_min += offset\n joints=(event.metadata['arm']['joints'])\n arm=joints[-1]\n assert arm['name'] == 'robot_arm_4_jnt'\n xyz_dict = copy.deepcopy(arm['rootRelativePosition'])\n height_arm = joints[0]['position']['y']\n xyz_dict['h'] = (height_arm - h_min) / (h_max - h_min)\n # print_error([x['position']['y'] for x in joints])\n return xyz_dict\n\ndef two_list_equal(l1, l2):\n dict1 = {i: v for (i,v) in enumerate(l1)}\n dict2 = {i: v for (i,v) in enumerate(l2)}\n return two_dict_equal(dict1, dict2)\n\n\ndef get_current_full_state(controller):\n return {'agent_position':controller.last_event.metadata['agent']['position'], 'agent_rotation':controller.last_event.metadata['agent']['rotation'], 'arm_state': controller.last_event.metadata['arm']['joints'], 'held_object': controller.last_event.metadata['arm']['HeldObjects']}\n\n\ndef two_dict_equal(dict1, dict2, threshold=0.001, ignore_keys=[]):\n if len(dict1) != len(dict2):\n print('different len', dict1, dict2)\n return False\n # assert len(dict1) == len(dict2), print('different len', dict1, dict2)\n equal = True\n for k in dict1:\n if k in ignore_keys:\n continue\n val1 = dict1[k]\n val2 = dict2[k]\n if not (type(val1) == type(val2) or (type(val1) in [int, float] and type(val2) in [int, float])):\n print('different type', dict1, dict2)\n return False\n # assert type(val1) == type(val2) or (type(val1) in [int, float] and type(val2) in [int, float]), ()\n if type(val1) == dict:\n equal = two_dict_equal(val1, val2)\n elif type(val1) == list:\n equal = two_list_equal(val1, val2)\n # elif val1 != val1: # Either nan or -inf\n # equal = val2 != val2\n elif type(val1) == float:\n equal = abs(val1 - val2) < threshold\n else:\n equal = (val1 == val2)\n if not equal:\n print('not equal', 'key', k, 'values', val1, val2)\n return equal\n return equal\n\n","sub_path":"scripts/jupyter_helper.py","file_name":"jupyter_helper.py","file_ext":"py","file_size_in_byte":10683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"272805202","text":"import requests\nimport json\nimport time\n\nbase_url = \"https://api.assemblyai.com/v2\"\n\nheaders = {\n \"authorization\": \"513f45417677479780ea2db749f01b46\" \n}\n\nwith open(f\"./episode_list.json\", \"r\") as f:\n episodes = json.load(f)\n\n\nfor episode in episodes:\n print(f\"Doing {episode.filename}\")\n upload_url = f\"https://fatalerror.fm/attachments/{episode.filename}\" \n\n data = {\n \"audio_url\": upload_url,\n \"speaker_labels\": True,\n \"speakers_expected\": episode.speaker_count,\n }\n\n url = base_url + \"/transcript\"\n response = requests.post(url, json=data, headers=headers)\n\n transcript_id = response.json()['id']\n polling_endpoint = f\"https://api.assemblyai.com/v2/transcript/{transcript_id}\"\n\n while True:\n transcription_result = requests.get(polling_endpoint, headers=headers).json()\n\n if transcription_result['status'] == 'completed':\n break\n\n elif transcription_result['status'] == 'error':\n raise RuntimeError(f\"Transcription failed: {transcription_result['error']}\")\n\n else:\n time.sleep(3)\n \n print(polling_endpoint)\n\n # Serializing json\n json_object = json.dumps(transcription_result, indent=4)\n \n # Writing to sample.json\n with open(f\"{filename}.json\", \"w\") as outfile:\n outfile.write(json_object)\n","sub_path":"transcription/assembly_ai.py","file_name":"assembly_ai.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"291604802","text":"from Helper.Train.Train import Train\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom Helper.Visual.Train.construct_confusion_matrix import annotate_heatmap, heatmap\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass VClassification:\n def __init__(self, Train):\n self.train = Train\n self.load = self.train.load\n\n def construct_confuation_matrix(self):\n plt.rcParams[\"figure.figsize\"] = (20,10)\n preds = self.train.clf.predict(self.load.partition.X_test)\n cm = confusion_matrix(self.load.partition.y_test.values, preds)\n class_ = np.unique(self.load.data[self.load.outputs].values)\n fig, ax = plt.subplots()\n im, cbar = heatmap(cm, class_, class_, ax, cmap=\"terrain\", cbarlabel=\"Support\")\n texts = annotate_heatmap(im, valfmt=\"{x:.1f} t\")\n fig.tight_layout()\n plt.show()\n","sub_path":"KEEL/Projects/Supervised/Standard/connect4/Helper/Visual/Train/.ipynb_checkpoints/classification-checkpoint.py","file_name":"classification-checkpoint.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470895020","text":"# coding=utf-8\nimport datetime\n\nimport urllib\nimport json\nimport time\nimport random\nfrom binascii import b2a_hex, a2b_hex\nfrom Crypto.Cipher import DES\n\nfrom django.http.response import HttpResponse, HttpResponseRedirect, Http404\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import ObjectDoesNotExist\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.views import auth_login\nfrom django.contrib.auth import authenticate\nfrom app.core.models import CustomerOrder, CoreLog, AliCustomer, Prefs, Customer, CustomerDomain, CustomerMailbox, \\\n Manager, Services\nfrom alipay import create_direct_pay_by_user, notify_verify\nfrom app.wechat.models import WeixinLog\nfrom alipay_sdk import AliPay\nfrom lib.tools import gen_len_chars, get_random_string, get_sys_smtp_mailbox\nfrom django_redis import get_redis_connection\nfrom lib.pushcrew import pushcrew_notice\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nALI = AliPay(appid=settings.ALI_APPID, web_private_key_path=settings.ALI_PRIVATE_KEY_PATH,\n web_alipay_public_key_path=settings.ALI_PUBLIC_KEY_PATH, return_url=settings.ALI_RETURN_URL)\n# 确认支付\n@login_required\ndef pre_pay(request):\n id = request.GET.get('id', '')\n try:\n order = CustomerOrder.objects.get(orderno=id, customer=request.user)\n except ObjectDoesNotExist:\n raise Http404\n orderno = order.orderno\n subject = order.product_desc\n body = order.product_detail\n bank = ''\n tf = order.fee\n url = create_direct_pay_by_user(orderno, subject, body, bank, tf)\n # 去支付页面\n return HttpResponseRedirect(url)\n\n\n# alipay异步通知\n@csrf_exempt\ndef alipay_notify_url(request):\n data = request.POST\n if request.method == 'POST':\n # 商户网站订单号\n out_trade_no = data.get('out_trade_no', '')\n # 支付宝单号\n trade_no = data.get('trade_no', '')\n #返回支付状态\n trade_status = data.get('trade_status', '')\n #付款人\n buyer_id = data.get('buyer_id', '')\n #付款人\n buyer_email = data.get('buyer_email', '')\n # 时间\n notify_time = data.get('notify_time')\n is_sign = notify_verify(data)\n WeixinLog.objects.create(type='ali_notify', body=str(data), is_sign=is_sign)\n if is_sign:\n order = CustomerOrder.objects.get(orderno=out_trade_no)\n order.transaction_id = trade_no\n order.dt_pay = datetime.datetime.strptime(notify_time, '%Y-%m-%d %H:%M:%S')\n order.openid = buyer_id\n order.buyer = buyer_email\n if trade_status == 'TRADE_SUCCESS':\n order.status = 'paied'\n # 充值\n fee = order.fee\n qty_buy = order.qty_buy\n user = order.customer\n service = user.service()\n service.qty_count += qty_buy\n service.qty_valid += qty_buy\n service.qty_buytotal += qty_buy\n # 服务状态改变\n if service.disabled == \"1\":\n service.disabled = '0'\n service.save()\n\n # 通知\n redis = get_redis_connection()\n redis.rpush('edm_web_notice_queue', json.dumps(\n {\n \"type\": \"2\",\n 'customer_id': '{}'.format(order.customer_id),\n \"area\": '',\n 'point': '{}'.format(int(qty_buy)),\n 'domain': '',\n 'task': '',\n }\n ))\n\n # 自主注册用户(支付宝) 转 支付宝正式用户\n if service.server_type == '6':\n service.server_type = '0'\n service.is_verify = '1'\n service.save()\n\n # 日志\n CoreLog.objects.create(user=user, user_type='users', target=user, target_name=user,\n action='c_update_count', desc=u'{}元/{}点(支付宝充值)'.format(fee, qty_buy))\n order.save()\n return HttpResponse(\"success\")\n return HttpResponse(\"fail\")\n\n\n# 同步通知\ndef alipay_return_url(request):\n data = request.GET\n is_sign = notify_verify(data)\n WeixinLog.objects.create(type='ali_return', body=str(data), is_sign=is_sign)\n if notify_verify(request.GET):\n orderno = request.GET.get('out_trade_no')\n return HttpResponseRedirect('{}?id={}'.format(reverse('pay_success'), orderno))\n return HttpResponseRedirect(\"/\")\n\n\ndef ali_login(request):\n key = request.GET.get('key', 'login')\n return HttpResponseRedirect(ALI.user_info_auth(state=key))\n\n\ndef ali_login_return(request):\n is_allow_register = False\n customer = None\n try:\n auth_code = request.GET.get('auth_code', '')\n state = request.GET.get('state', '')\n token_url = ALI.system_oauth_token(auth_code)\n res = json.loads(urllib.urlopen(token_url).read())\n data = res.get('alipay_system_oauth_token_response', {})\n access_token = data['access_token']\n user_info_url = ALI.user_info_share(access_token)\n user_info = json.loads(urllib.urlopen(user_info_url).read()).get('alipay_user_info_share_response', {})\n user_id = user_info['user_id']\n ali_customer, _created = AliCustomer.objects.get_or_create(user_id=user_id)\n # 支付宝登录\n if state == 'login':\n users = Customer.objects.filter(ali_customer=ali_customer, disabled=0)\n if users:\n user = users[0]\n user = authenticate(username=user.username, password='', t_password=user.password)\n auth_login(request, user)\n # 登录日志\n user.save_fast_login_log(request, mode='ali')\n user.last_login = timezone.now()\n user.save(update_fields=['last_login'])\n\n # 支付宝登录通知销售\n sss = user.service()\n if sss and sss.is_pushcrew:\n action = \"service\"\n title = u\"支付宝登录提醒\"\n message = u\"{}(ID: {}) 于 {} 时间登录平台\".format(\n user.company, user.id, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n pushcrew_notice(action, title, message)\n\n return HttpResponseRedirect('http://{}{}'.format(settings.HOSTNAME, reverse('home')))\n else:\n messages.add_message(request, messages.ERROR, _(u'此支付宝并未于平台账号绑定,请先绑定账号!'))\n return HttpResponseRedirect('http://{}{}'.format(settings.HOSTNAME, reverse('home')))\n\n elif state == 'bind':\n customer = request.user\n else:\n #芝麻信用验证\n cryp_obj = DES.new(settings.CRYP_KEY)\n customer_id, expired_time = cryp_obj.decrypt(a2b_hex(state)).split('----')\n customer = Customer.objects.get(id=customer_id)\n\n for field in ['avatar', 'city', 'gender', 'is_certified', 'is_student_certified', 'province', 'user_status',\n 'user_type']:\n setattr(ali_customer, field, user_info.get(field, ''))\n transaction_id = time.strftime('%Y%m%d%H%M%S') + get_random_string(4).upper()\n try:\n min_score = int(Prefs.objects.get(name='register_credit').value)\n except:\n min_score = 630\n zhima_url = ALI.zhima_credit_score(access_token, transaction_id, user_id, min_score)\n zhima_info = json.loads(urllib.urlopen(zhima_url).read()).get('zhima_credit_score_brief_get_response', {})\n is_admittance = zhima_info.get('is_admittance', '')\n ali_customer.is_admittance = is_admittance\n ali_customer.save()\n if ali_customer.user_type == '1' and ali_customer.user_status == 'T':\n is_allow_register = True\n elif is_admittance == \"N\":\n msg = _(u'抱歉,您的芝麻信用未符合快捷注册条件,如需测试, 请联系客服,电话/企业QQ:400-8181-568!')\n elif is_admittance == \"Y\":\n is_allow_register = True\n else:\n msg = _(u'抱歉,系统获取您的芝麻信用失败,无法进行快捷注册登陆!')\n except:\n msg = _(u'支付宝接口异常,请稍后重试!')\n\n if customer and is_allow_register and not customer.is_bind_ali:\n try:\n register_point = int(Prefs.objects.get(name='register_point').value)\n except:\n register_point = 100\n\n if customer.is_register:\n register_manager_lists = ['register_manager_1', 'register_manager_2', 'register_manager_3']\n redis = get_redis_connection()\n find_next_manager = 'register_manager_1'\n find_next_count = None\n for key in register_manager_lists:\n count = redis.hget(\"edm_web_register_manager_hash\", key=key)\n count = int(count) if count else 0\n if find_next_count is None or find_next_count >= count:\n find_next_manager = key\n find_next_count = count\n redis.hincrby(\"edm_web_register_manager_hash\", find_next_manager, 1)\n\n # manager, _created = Prefs.objects.get_or_create(name=random.choice(register_manager_lists))\n manager, _created = Prefs.objects.get_or_create(name=find_next_manager)\n manager_id = int(manager.value) if manager.value else None\n if not manager_id:\n manager = Manager.objects.first()\n manager_id = manager.id\n customer.disabled = \"0\"\n customer.manager_id = manager_id\n service = customer.service()\n\n service.qty_valid += register_point\n service.qty_count += register_point\n customer.is_bind_ali = True\n service.save()\n\n # 支付宝注册通知销售\n action = \"sale\"\n title = u\"注册提醒(验证通过)\"\n message = u\"{}(ID: {}) 于 {} 时间注册并登录平台\".format(\n customer.company, customer.id, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n pushcrew_notice(action, title, message, customer_id=customer.id)\n\n msg = _(u'恭喜您,芝麻信用验证通过,注册成功!')\n customer.ali_customer = ali_customer\n customer.save()\n if state == 'bind':\n messages.add_message(request, messages.SUCCESS, _(u'支付宝绑定成功'))\n return HttpResponseRedirect(reverse('account'))\n else:\n messages.add_message(request, messages.INFO, msg)\n return HttpResponseRedirect(\n '{}?result={}&key={}'.format(reverse('register_new_step3'), is_allow_register, state))\n","sub_path":"e/edm/app/ali/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"146105371","text":"import asyncio, io, threading, sys\nfrom contextlib import contextmanager\nfrom wsgiref.util import is_hop_by_hop\n\n\nclass WSGIResponse:\n\n __slots__ = (\"application\", \"request\", \"response\",)\n\n def __init__(self, application, request):\n self.application = application\n self.request = request\n self.response = None\n\n def start_response_threadsafe(self, status, headers, exc_info=None):\n # Start the response.\n try:\n # Parse the status.\n assert isinstance(status, str), \"Response status should be str\"\n status_code, reason = status.split(None, 1)\n status_code = int(status_code)\n # Get the response writer.\n self.response = self.request.start_response(status_code, reason, headers, exc_info)\n finally:\n exc_info = None\n return self.write_threadsafe\n\n def write_threadsafe(self, data):\n with self.application.lock_for_write():\n self.response.write(data)\n\n\nclass WSGIApplication:\n\n def __init__(self, application, *, executor=None, loop=None):\n self.application = application\n self.executor = executor\n self.loop = loop or asyncio.get_event_loop()\n # The write lock mechanism.\n self.write_lock = threading.Lock()\n self.write_ready = threading.Event()\n\n def pause_loop(self):\n self.write_ready.set()\n with self.write_lock:\n self.write_ready.clear()\n\n @contextmanager\n def lock_for_write(self):\n with self.write_lock:\n # Have the event loop signal it's ready to pause, and then\n # wait for the write lock again.\n self.loop.call_soon_threadsafe(self.pause_loop)\n # Write the data when the write is ready.\n self.write_ready.wait()\n yield\n\n @asyncio.coroutine\n def run_in_executor(self, task, *args):\n return (yield from self.loop.run_in_executor(self.executor, task, *args))\n\n def __call__(self, request):\n # Create the environ.\n environ = {\n \"REQUEST_METHOD\": request.method,\n \"SCRIPT_NAME\": \"\",\n \"PATH_INFO\": request.path,\n \"QUERY_STRING\": request.query_string,\n \"CONTENT_TYPE\": request.headers.get(\"Content-Type\", \"\"),\n \"CONTENT_LENGTH\": str(len(request.body)),\n \"SERVER_NAME\": request.server_name,\n \"SERVER_PORT\": str(request.server_port),\n \"REMOTE_ADDR\": request.remote_addr,\n \"REMOTE_HOST\": request.remote_host,\n \"REMOTE_PORT\": str(request.remote_port),\n \"SERVER_PROTOCOL\": request.protocol,\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": request.url_scheme,\n \"wsgi.input\": io.BytesIO(request.body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": False,\n \"wsgi.run_once\": False,\n }\n # Add in additional HTTP headers.\n for header_name, header_value in request.headers.items():\n header_name = header_name.upper()\n if not(is_hop_by_hop(header_name)) and not header_name in (\"CONTENT-LENGTH\", \"CONTENT-TYPE\"):\n environ[\"HTTP_\" + header_name.replace(\"-\", \"_\")] = header_value\n # Run the application.\n response = WSGIResponse(self, request)\n body_iterable = (yield from self.run_in_executor(self.application, environ, response.start_response_threadsafe))\n try:\n body_iter = iter(body_iterable)\n # Run through all the data.\n while True:\n data = (yield from self.run_in_executor(next, body_iter))\n if data is None:\n break\n if data:\n assert response.response is not None, \"start_response() was not called by application\"\n response.response.write(data)\n yield from response.response.drain()\n finally:\n # Close the body.\n if hasattr(body_iterable, \"close\"):\n yield from self.run_in_executor(body_iterable.close)\n","sub_path":"jutsu/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"373310982","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\ntimesteps = 50\ndt = 0.5\n# timesteps per rotation\nsample_rate = 10\n# covariance of lighthouse states\nP_l = np.identity(3)\n\nx_l0 = 0\ny_l0 = 0\n\niterations = 1000\nplot_run = False\nerror = np.empty((0))\n\nfor iteration in range(iterations):\n\t###BRIAN: This generates the lighthouse trajectory, for this iteration of simulation#######\n x_l_traj = 0 * np.cos(np.linspace(0, timesteps, num=timesteps)/100)\n y_l_traj = 3 * np.sin(dt*np.linspace(0, timesteps, num=timesteps))\n #####################################################\n\n #y_l_traj = 2 * (unidrnd(2*ones(1,timesteps))-1.5);\n #x0 = 3\n #y0 = 0\n \n x0 = np.random.rand() * 5 - 2.5\n y0 = np.random.rand() * 5 - 2.5\n \n # x0 = 3.4080;\n # y0= 1.888;\n varx = [np.power(.3, 2)]\n vary = [np.power(.3, 2)]\n\n x_a = np.array([x0, y0])[:, None]\n P_m = np.empty((2, 2, 0))\n P_m = np.append(P_m, np.array([[varx[0], 0], [0, vary[0]]])[:,:,None], axis=2)\n # initial p_m\n x_m = np.array([x0 + np.random.randn() * np.sqrt(varx[0]), y0 + np.random.randn() * np.sqrt(vary[0])])[:, None]\n K_rx = [0]\n K_ry = [0]\n K_lx = [0]\n K_ly = [0]\n measurement = np.zeros((2, 1))\n\n # noise stds\n sig1 = .05\n sig2 = .05\n sig3 = 1.5 * 3.1415 / 180\n sig4 = 10\n\n P_l = np.diag([sig1**2, sig2**2, sig3**2]) # covariance of lighthouse states \n\n D, V = np.linalg.eig(P_m[:,:,0])\n D = np.diag(D)[:,:,None]\n V = V[:,:,None]\n\n \n # control vectors for lighthouse\n theta = 3.14 / 8\n rot = np.array([[np.cos(theta), - np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n u_primative = np.array([[1, 0, 0, -1], \n [0, 1, -1, 0]])\n # u_l = [1,0;0,1;0,-1;-1,0;1,1;1,-1;-1,1;-1,-1]' * 1;\n u_l = np.concatenate((u_primative, u_primative * 0.5, rot @ u_primative, rot @ u_primative * .5, rot @ rot @ u_primative, rot @ rot @ u_primative * .5), axis=1)\n x_l = [0]\n y_l = [0]\n X_l = np.array([x_l[0], y_l[0]])[:, None]\n last_direction = np.array([0, 0])[:, None]\n r_diffx = []\n r_diffy = []\n x_p = np.zeros((2, 1))\n P_p = np.zeros((2,2,1))\n\n # Begin for loop \n for i in range(1, timesteps):\n # step dynamics forward \n x_a = np.append(x_a, x_a[:, i-1][:, None], axis=1)\n \n max_idx = 1\n # lighthouse location control \n\n #WHEN IS THIS SUPPOSED TO GO INTO THIS IF STATEMENT\n if i < 1:\n gain = np.array([]) \n for cont in range(0, 9):\n \t#BRIAN: this assess 9 different direcions the lighthouse could go and chooses the one that maximizes the fischer information matrix. \n \t#I don't really use this method anymore#########\n Rp = np.diag([np.power(sig1, 2), np.power(sig4, 2)])\n del_Xl_prop = u_l[:, cont]\n del_x = del_Xl_prop[0]\n del_y = del_Xl_prop[1]\n d = np.linalg.norm(x_m[:, i-1]-(X_l[:,i-1] + del_Xl_prop))\n angle = np.arctan2(x_m[1,i-1]-(y_l[i-1]+del_y), x_m[0,i-1]-(x_l[i-1]+del_x))\n # Hp = [-(x_m(2,i-1)-(y_l(i-1)+del_y))/norm(x_m(:,i-1)-(X_l(:,i-1)+del_Xl_prop))^2 , (x_m(1,i-1)-(x_l(i-1)+del_x))/norm(x_m(:,i-1)-(X_l(:,i-1)+del_Xl_prop))^2;\n # -10*(x_m(1,i-1)-(x_l(i-1)+del_x))/(log(10)* norm(x_m(:,i-1)-(X_l(:,i-1) + del_Xl_prop))^2), -10*(x_m(2,i-1)-(y_l(i-1) + del_y))/(log(10)* norm(x_m(:,i-1)-(X_l(:,i-1)+del_Xl_prop))^2)];\n Hp = (1/d) * np.array([[np.sin(angle) , -np.cos(angle)]])\n # 10*(x_m(1,i-1)-(x_l(i-1)+del_x))/(log(10)* d), 10*(x_m(2,i-1)-(y_l(i-1) + del_y))/(log(10)* d)];\n Rp = np.linalg.inv(Rp)\n # using least squares here instead of matrix right division\n fim = np.matmul(Hp.T / np.power(sig1, 2), Hp)\n area = np.linalg.det(fim)\n d, v = np.linalg.eig(fim)\n d = np.diag(d)\n gain = np.append(gain, area)\n # gain(cont) = d(1,1);\n\n argvalue = np.max(gain)\n max_idx = np.argmax(gain)\n d = np.linalg.norm(x_m[:,i-1] - X_l[:,i-1])\n angle = np.arctan2(x_m[1,i-1] - y_l[i-1], x_m[0,i-1] - x_l[i-1])\n Hp = (1/d) * np.array([[np.sin(angle), -np.cos(angle)]])\n fim = np.matmul(Hp.T / np.power(sig1,2), Hp)\n d, v = np.linalg.eig(fim)\n d = np.diag(d)\n if d[0, 0] > d[1, 1]:\n direction = v[:, 0]\n else:\n direction = v[:, 1]\n\n last_direction = direction\n else:\n \t#BRIAN: chooses lighthouse directory based on the direciton that maximizes the first eigenvalue\n \t#of the fischer information matrix###################################\n d = np.linalg.norm(x_m[:,i-1] - X_l[:,i-1])\n angle = np.arctan2(x_m[1, i-1] - y_l[i-1], x_m[0, i-1] - x_l[i-1])\n Hp = (1/d) * np.array([[np.sin(angle), -np.cos(angle)]])\n # -10*(x_m(1,i-1)-(x_l(i-1)))/(log(10)* d), -10*(x_m(2,i-1)-(y_l(i-1)))/(log(10)* d)];\n Rp = np.diag([np.power(sig1, 2)])\n fim = np.matmul(Hp.T / np.linalg.inv(Rp), Hp)\n lam, v = np.linalg.eig(fim)\n lam = np.diag(lam)\n if lam[0, 0] >= lam[1, 1]:\n direction = v[:, 0]\n else:\n direction = v[:, 1]\n \n dot = np.matmul(np.transpose(direction), last_direction)\n \n if (np.matmul(np.transpose(direction), last_direction)) < 0:\n direction = -direction\n\n last_direction = direction\n\n # x_l(i) = x_l(i-1)+ u_l(1,max_idx);\n # y_l(i) = y_l(i-1)+ u_l(2,max_idx);\n x_l = np.append(x_l, x_l_traj[i])\n y_l = np.append(y_l, y_l_traj[i])\n \n x_l[i] = x_l[i-1] + direction[0]\n y_l[i] = y_l[i-1] + direction[1]\n X_l = np.append(X_l, np.array([x_l[i], y_l[i]])[:,None], axis=1)\n\n #BRIAN: this is the prediction step of the anchor estimator and should be ported to the python simulator \n # as a part of the robot class\n # prediction step\n x_p = np.append(x_p, x_m[:, i-1][:, None], axis=1)\n P_p = np.append(P_p, P_m[:, :, i-1][:,:,None], axis=2)\n ########################################################\n\n #BRIAN: this noise generation should be ported over to the python simulator as a part of the robot class\n # generate noise\n w1 = np.random.randn() * sig1\n w2 = np.random.randn() * sig2\n w3 = np.random.randn() * sig3\n # w4 = (randn(1) * sig4);\n # w4 = sig4+max(-exprnd(sig4),-90);\n w4 = -np.random.rayleigh(sig4 / np.sqrt((4-3.14)/2)) # rayleigh fading\n ########################################################\n\n #BRIAN: this part of measurement generation should probably be a part of the overarching simulator\n # generate measurments \n z = np.array([[np.arctan2(x_a[1,i] - (y_l[i] + w1), x_a[0, i] - (x_l[i] + w2)) + w3],\n [-10 * np.log10(np.linalg.norm(x_a[:,i] - np.array([[x_l[i]], [y_l[i]]]))) + w4]])\n # propogate prediction through measurment model\n # z\n h = np.array([[np.arctan2(x_p[1, i] - y_l[i], x_p[0, i] - x_l[i])],\n [-10 * np.log10(np.linalg.norm(x_p[:, i] - X_l[:, i]))]])\n ##########################################################\n\n #BRIAN: this is the measurement step of the anchor simulator and \n #should be implemented in the python simulator as a part of the robot object.\n # measurement step\n\n if abs(z[0] - h[0]) < 3.14:\n \t#BRIAN: the above if statement is to avoid angle wrapping issues that will break the estimator\n \t#the lighthouse EKF version of this simulation does a much better job of handling this angle problem\n \t#so use that angle wrapping method rather than this one.\n r = np.linalg.norm(x_p[:, i] - X_l[:, i])\n angle = np.arctan2(x_p[1, i] - y_l[i], x_p[0, i] - x_l[i])\n H = (1/r) * np.array([[-np.sin(angle), np.cos(angle)],\n [-10 * (x_p[0, i] - x_l[i]) / (np.log(10) * r), -10 * (x_p[1, i] - y_l[i]) / (np.log(10) * r)]])\n # H = [-(x_p(2,i)-y_l(i))/norm(x_p(:,i)-X_l(:,i))^2 , (x_p(1,i)-x_l(i))/norm(x_p(:,i)-X_l(:,i))^2;\n # -10*(x_p(1,i)-x_l(i))/(log(10)* norm(x_p(:,i)-X_l(:,i))^2), -10*(x_p(2,i)-y_l(i))/(log(10)* norm(x_p(:,i)-X_l(:,i))^2)];\n\n\n W = np.array([[(x_p[1, i] - y_l[i]) / np.power(np.linalg.norm(x_p[:, i] - X_l[:, i]), 2), -(x_p[0, i] - x_l[i]) / np.power(np.linalg.norm(x_p[:, i] - X_l[:, i]), 2), 1, 0],\n [10 * (x_p[0, i] - x_l[i]) / (np.log(10) * np.power(np.linalg.norm(x_p[:, i] - X_l[:, i]), 2)), 10*(x_p[1, i]-y_l[i]) / (np.log(10) * np.power(np.linalg.norm(x_p[:, i] - X_l[:, i]), 2)), 0 ,1]])\n\n R = np.array([np.append(P_l[0,:],[0]),\n np.append(P_l[1,:], [0]),\n np.append(P_l[2,:], [0]),\n [0,0,0,sig4**2]])\n\n\n\n K = P_p[:,:,i] @ H.T @ np.linalg.inv(H @ P_p[:,:,i] @ H.T + W @ R @ W.T)\n\n # is the kalman gain helpful?\n\n K = np.array([[K[0, 0], 0],\n [K[1, 0], 0]])\n # K = [0,K(1,2); 0,K(2,2)];\n\n\n\n # K*H\n\n # K;\n # H;\n # z-h;\n # z;\n\n x_m = np.append(x_m, np.array(x_p[:, i][:,None] + K @ (z-h)), axis=1)\n P_m = np.append(P_m, np.array((np.identity(2) - K @ H) @ P_p[:,:,i])[:,:,None], axis=2)\n varx = np.append(varx, P_m[0,0,i])\n vary = np.append(vary, P_m[1,1,i])\n measurement = np.append(measurement, z, axis=1)\n\n K_rx = np.append(K_rx, K[0,1])\n K_ry = np.append(K_ry, K[1,1])\n K_lx = np.append(K_lx, K[0,0])\n K_ly = np.append(K_ly, K[1,0])\n\n r_diffx = np.append(r_diffx, K[0,1]*(z[1]-h[1]))\n r_diffy = np.append(r_diffy, K[1,1]*(z[1]-h[1]))\n tempD, tempV = np.linalg.eig(P_m[:,:,i])\n V = np.append(V, tempV)\n D = np.append(D, np.diag(tempD))\n else:\n x_m = np.append(x_m, x_m[:,i-1][:,None], axis=1)\n P_m = np.append(P_m, P_m[:,:,i-1][:,:,None], axis=2)\n varx = np.append(varx, P_m[0,0,i])\n vary = np.append(vary, P_m[1,1,i])\n # varx(i) = D(1,1,i);\n # vary(i) = D(2,2,i);\n measurement = np.append(measurement, z, axis=1)\n K_rx = np.append(K_rx, K_rx[i-1])\n K_ry = np.append(K_ry, K_ry[i-1])\n r_diffx = np.append(r_diffx, 0)\n r_diffy = np.append(r_diffy, 0)\n tempD, tempV = np.linalg.eig(P_m[:,:,i])\n V = np.append(V, tempV)\n D = np.append(D, np.diag(tempD))\n ############################################################################## \n error = np.append(error, np.linalg.norm(x_a[:,0]-x_m[:,-1]))\n \n # Plot Runs NOT ORIGINALLY COMMENTED OUT BUT PLOT RUN IS FALSE SO TESTING\n # if plot_run:\n # linewidth = 2;\n # x_m(:,timesteps)\n # P_m(:,:,timesteps)\n # plot([1:timesteps],x_m(1,:),[1:timesteps],x_m(2,:))\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('Estimated Anchor Location')\n\n # xlabel('Measurement Number')\n # ylabel('Location (m)')\n # set(gca,'fontsize',20)\n # hold\n\n # plot([1:timesteps],ones(1,timesteps)*x_a(1,1),'--b',[1:timesteps],ones(1,timesteps)*x_a(2,2),'--r')\n # legend('X','Y','X truth','Y truth')\n # xlim([0,100])\n # figure\n\n # subplot(1,2,1)\n # plot([1:timesteps],abs(x_m(1,:)-x_a(1,1)))\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('X Location Error')\n # set(gca,'fontsize',20)\n # xlabel('Measurement Number')\n # ylabel('Error (m)')\n # set(gca,'YScale','log')\n # xlim([0,100])\n\n # subplot(1,2,2)\n # plot([1:timesteps],abs(x_m(2,:)-x_a(2,1)))\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('Y Location Error')\n # xlabel('Measurement Number')\n # ylabel('Error (m)')\n # set(gca,'fontsize',20)\n # set(gca,'YScale','log')\n # xlim([0,100])\n\n # figure\n\n # plot([1:timesteps],varx,[1:timesteps],vary)\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('Anchor Location Variance')\n # legend('Axis 1','Axis 2')\n # xlabel('Measurement Number')\n # ylabel('Location (m)')\n # set(gca,'YScale','log')\n # set(gca,'fontsize',20)\n # xlim([0,100])\n\n\n # % %ylim([0,100])\n # % xlim([0,100])\n # % \n # figure\n # plot([2:timesteps],abs(K_rx(2:end)),[2:timesteps],abs(K_ry(2:end)))\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('Kalman Gain of RSSI Measurements')\n # legend('X Gain','Y Gain')\n # xlabel('Measurement Number')\n # ylabel('Gain')\n # set(gca,'YScale','log')\n # set(gca,'fontsize',20)\n # xlim([0, 100])\n\n # figure\n # plot([1:timesteps],r_diffx,[1:timesteps],r_diffy)\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('State Correction of RSSI Measurements')\n # legend('X','Y')\n # xlabel('Measurement Number')\n # ylabel('Location (m)')\n # set(gca,'fontsize',20)\n\n # figure\n # plot([1:timesteps],y_l)\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('Lighthouse Y Location')\n # xlabel('Measurement Number')\n # ylabel('Location (m)')\n # set(gca,'fontsize',20)\n # xlim([0, 100])\n \n # figure\n # plot([1:timesteps],x_l)\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # title('Lighthouse X Location')\n # xlabel('Measurement Number')\n # ylabel('Location (m)')\n # set(gca,'fontsize',20)\n # xlim([0, 100])\n\n \n \n # endpoint = 10;\n # figure\n # plot([1:timesteps],measurement(1,:))\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # figure\n # scatter(x_l(1:endpoint),y_l(1:endpoint))\n # set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n # legend('Actual Lighthouse Location')\n # hold\n\n # scatter(x_m(1,1:endpoint),x_m(2,1:endpoint))\n # legend('Estimated Anchor Position')\n # scatter(x_a(1,1), x_a(2,1))\n # legend('Actual Anchor Position')\n # %hold\n # a = [1:endpoint]'; b = num2str(a); c = cellstr(b);\n # dx = 0.1; dy = 0.1; % displacement so the text does not overlay the data points\n # text(x_m(1,1:endpoint)+dx, x_m(2,1:endpoint)+dy, c);\n\n # text(x_l(1:endpoint)+dx, y_l(1:endpoint)+dy, c);\n # title('Anchor and Lighthouse Location')\n\n # xlabel('X (m)')\n # ylabel('Y (m)')\n # set(gca,'fontsize',20)\n # xlim([-6, 6])\n # ylim([-6, 6])\n\n \n'''\n% figure\n% plot([2:timesteps],measurement(2,2:end))\n% set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n% title('RSSI Measurements')\n% xlabel('Measurement Number')\n% ylabel('RSSI')\n% set(gca,'fontsize',20)\n% xlim([0, 100])\n% \n% figure\n% linewidth = 4;\n% lims = 1\n% subplot(2,2,1)\n% vectors = V(:,:,1)*sqrtm(D(:,:,1))\n% plotv([vectors,-1*vectors])\n% xlim([-lims,lims])\n% ylim([-lims,lims])\n% set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n% title('Measurement 1 ')\n% xlabel('X (m)')\n% ylabel('Y (m)')\n% set(gca,'fontsize',20)\n% \n% \n% subplot(2,2,2)\n% vectors = V(:,:,2)*sqrtm(D(:,:,2))\n% plotv([vectors,-1*vectors])\n% xlim([-lims,lims])\n% ylim([-lims,lims])\n% set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n% title('Measurement 2 ')\n% xlabel('X (m)')\n% ylabel('Y (m)')\n% set(gca,'fontsize',20)\n% \n% subplot(2,2,3)\n% vectors = V(:,:,3)*sqrtm(D(:,:,3))\n% plotv([vectors,-1*vectors])\n% xlim([-lims,lims])\n% ylim([-lims,lims])\n% set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n% title('Measurement 3 ')\n% xlabel('X (m)')\n% ylabel('Y (m)')\n% set(gca,'fontsize',20)\n% \n% subplot(2,2,4)\n% vectors = V(:,:,4)*sqrtm(D(:,:,4))\n% plotv([vectors,-1*vectors])\n% xlim([-lims,lims])\n% ylim([-lims,lims])\n% set(findall(gca, 'Type', 'Line'),'LineWidth',linewidth);\n% title('Measurement 4 ')\n% xlabel('X (m)')\n% ylabel('Y (m)')\n% set(gca,'fontsize',20)\n end\n'''\n\n\nqs, counts = np.unique(error, return_counts=True)\ncumulative_prob = np.cumsum(counts).astype(np.double) / error.size\n\nplt.figure(1)\n\nplt.hist(error,100)\nprint(np.std(error))\nprint(np.mean(error))\nprint(np.median(error))\nplt.title('Error After 50 Measurements', fontsize = 20)\nplt.xlabel('L2 Norm Error (m)', fontsize = 16)\nplt.ylabel('Count', fontsize=16)\n\nplt.figure(2)\nplt.plot(qs, cumulative_prob)\nplt.title('Error After 50 Measurements', fontsize=20)\nplt.xlabel('L2 Norm Error (m)', fontsize=16)\nplt.ylabel('CDF', fontsize=16)\n\nplt.show()\n","sub_path":"anchor_sim_python.py","file_name":"anchor_sim_python.py","file_ext":"py","file_size_in_byte":17755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"428244648","text":"from django.db import models\nfrom django.utils.translation import ugettext as _\n\n\nclass Stage(models.Model):\n name = models.CharField(\n max_length=200,\n )\n distance = models.DecimalField(\n help_text=_('distance in kilometers'),\n max_digits=5,\n decimal_places=3,\n )\n\n def __str__(self):\n return '%s (%s km)' % (self.name, self.distance)\n\n\nclass Event(models.Model):\n name = models.CharField(\n max_length=100,\n )\n date = models.DateField()\n stages = models.ManyToManyField(\n Stage,\n through='EventStage',\n )\n\n def __str__(self):\n return self.name\n\n\nclass EventStage(models.Model):\n stage = models.ForeignKey(\n Stage,\n on_delete=models.CASCADE,\n )\n event = models.ForeignKey(\n Event,\n on_delete=models.CASCADE,\n )\n ordering = models.PositiveSmallIntegerField()\n\n def __str__(self):\n return '%s, %s. %s' % (self.event, self.ordering, self.stage)\n\n class Meta:\n ordering = ['ordering']\n\n\nclass Team(models.Model):\n name = models.CharField(\n max_length=100,\n )\n event = models.ForeignKey(\n Event\n )\n\n def __str__(self):\n return '%s - %s' % (self.name, self.event)\n\n\nclass Runner(models.Model):\n name = models.CharField(\n max_length=100,\n )\n time_estimated = models.DurationField(\n help_text=_('hh:mm:ss'),\n )\n team = models.ForeignKey(\n Team\n )\n event_stage = models.ForeignKey(\n EventStage,\n on_delete=models.CASCADE,\n )\n\n def __str__(self):\n return self.name\n","sub_path":"relay/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48183254","text":"import boto3\r\nimport urllib3\r\nfrom prettytable import PrettyTable\r\nimport sys\r\n\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\n\r\nregions = ['us-west-2']\r\n\r\nresult = PrettyTable(['Region', 'InstanceType', 'Instances', 'InstanceCPU', 'TotalCPU'])\r\ntotalaz_sum = PrettyTable(['Region', 'AZ', 'Instances'])\r\ntotalcpu_sum = PrettyTable(['Region', 'SumTotalCPU', 'Instances'])\r\n\r\n\r\nec2_cap = []\r\nec2_azcap = []\r\n\r\ntotalcpu_counter = 0\r\ntotalinstance_counter = 0\r\n\r\nfor region in regions:\r\n\r\n client = boto3.session.Session().client('ec2', region_name = region)\r\n response = client.describe_instances()\r\n\r\n if len(response['Reservations']) > 0:\r\n\r\n for reservation in response['Reservations']:\r\n\r\n for instance in reservation['Instances']:\r\n\r\n try:\r\n\r\n if instance['State']['Name'] == 'running':\r\n\r\n ec2_occurances = False\r\n az_occurances = False\r\n\r\n vcpu = client.describe_instance_types(InstanceTypes=[instance['InstanceType']])['InstanceTypes'][0]['VCpuInfo']['DefaultVCpus']\r\n #vcpu = instance['CpuOptions']['CoreCount'] * instance['CpuOptions']['ThreadsPerCore']\r\n\r\n for compute in ec2_cap:\r\n\r\n if compute['InstanceType'] == instance['InstanceType']:\r\n\r\n compute['Instances'] += 1\r\n compute['TotalCPU'] += vcpu\r\n\r\n break\r\n\r\n if not ec2_occurances:\r\n\r\n ec2_cap.append({'Region': region, 'InstanceType': instance['InstanceType'], 'Instances': 1, 'TotalCPU': vcpu})\r\n\r\n for az in ec2_azcap:\r\n\r\n if az['AZ'] == instance['Placement']['AvailabilityZone']:\r\n\r\n az['Region'] = region\r\n az['Instances'] += 1\r\n\r\n az_occurances = True\r\n\r\n break\r\n\r\n if not az_occurances:\r\n\r\n ec2_azcap.append({'Region': region, 'AZ': instance['Placement']['AvailabilityZone'], 'Instances': 1})\r\n\r\n except Exception as e:\r\n print(\"Exiting the Script.Something is not right, please check the Exception below\")\r\n print(e)\r\n sys.exit()\r\nfor compute in ec2_cap:\r\n\r\n result.add_row([region, compute['InstanceType'], compute['Instances'], int(compute['TotalCPU']/compute['Instances']), compute['TotalCPU']])\r\n\r\n totalcpu_counter += compute['TotalCPU']\r\n totalinstance_counter += compute['Instances']\r\n\r\nif totalcpu_counter > 0:\r\n\r\n totalcpu_sum.add_row([region, totalcpu_counter, totalinstance_counter])\r\n\r\nfor az in ec2_azcap:\r\n\r\n totalaz_sum.add_row([region, az['AZ'], az['Instances']])\r\n\r\nresult.sortby = \"InstanceType\"\r\ntotalaz_sum.sortby = \"Instances\"\r\ntotalcpu_sum.sortby = \"Region\"\r\n\r\nprint(result)\r\nprint(totalaz_sum)\r\nprint(totalcpu_sum)\r\n","sub_path":"instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68020509","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\nt = float( np.loadtxt(\"t.txt\") )\r\ndt = float( np.loadtxt(\"dt.txt\") )\r\nx = np.loadtxt(\"x.txt\")\r\na = min(x)\r\nb = max(x)\r\nnTimesteps = int( np.loadtxt(\"nTimesteps.txt\") )\r\ndelt = 20\r\n\r\nplt.ion()\r\nfor i in np.arange(0,nTimesteps+1,delt) :\r\n rho = np.loadtxt('./snapshots/'+str(i).zfill(6)+'.txt')\r\n plt.plot( x, rho )\r\n plt.axis( [a,b,-1.2,1.2] )\r\n plt.title( '{0:02.3f}'.format(t) )\r\n t = t + delt*dt\r\n plt.waitforbuttonpress()\r\n plt.cla()\r\n\r\nplt.ioff()\r\nplt.plot( x, rho - np.exp(-10*x**2) )\r\n#plt.plot( x, rho - np.cos(np.pi*x) )\r\n#plt.plot( x, rho )\r\nplt.show()\r\n","sub_path":"discontinuousGalerkin/2d/doublyPeriodicTransport/plottingScript.py","file_name":"plottingScript.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"304473568","text":"from django.contrib.messages.api import error\nfrom django.db import models\nfrom django.shortcuts import render, redirect\nfrom .models import *\nfrom django.contrib import messages\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\ndef login(request):\n if request.method == \"POST\":\n\n if request.POST['operation'] == \"login\":\n errors = User.objects.validator_login(request.POST)\n\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n else:\n user_details = get_user_details(request.POST['email'])\n for key, value in user_details.items():\n request.session[key] = value\n\n messages.success(request, \"Logged in successfully\")\n return redirect(f\"/success\")\n\n if request.POST['operation'] == \"register\":\n request.session.clear()\n errors = User.objects.validator_registeration(request.POST)\n if len(errors) > 0 :\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n else:\n create_user(request.POST)\n messages.success(request, \"user successfully created\")\n request.session['first_name'] = request.POST['first_name']\n return redirect('/success')\n return redirect('/')\n \n\ndef show_user_page(request):\n if 'email' in request.session:\n return render(request, 'success.html')\n\n if 'first_name' in request.session:\n return render(request, 'success.html')\n \n else:\n return redirect('/')\n\ndef logout(request):\n request.session.clear()\n return redirect('/')","sub_path":"django/django_fullstack/login_registration/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"435716537","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\n# get_ipython().run_line_magic('matplotlib', 'inline')\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 10, 5\n\n\n# In[2]:\n\n\nfrom myfun import plot_decision_regions\ndef svm_example(plot_dict):\n # import some data to play with\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features.\n y = iris.target\n\n # data since we want to plot the support vectors\n gamma = 0.7\n degree = 3\n C= 1\n # Plot the decision boundary. For that, we will assign a color to each\n for kernel in plot_dict:\n models = svm.SVC(kernel=kernel, C=C, degree=degree, gamma=gamma)\n models.fit(X,y)\n accuracy = models.score(X,y)\n #prediction = models.predict(y)\n plt.subplot(plot_dict[kernel])\n plt.tight_layout()\n #plt.figure()\n plot_decision_regions(X, y, models)\n # Plot also the training points TIGER edgecolors\n plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.Paired)\n plt.xlabel('Sepal length')\n plt.ylabel('Sepal width')\n plt.title('Plot for %s'%kernel + '\\n' + 'Accuracy:%.2f'%accuracy)\n\n# edgecolors : color or sequence of color, optional, default: 'face'\n# The edge color of the marker. Possible values:\n# \n# - 'face': The edge color will always be the same as the face color.\n# - 'none': No patch boundary will be drawn.\n# - A matplotib color.\n# \n# For non-filled markers, the *edgecolors* kwarg is ignored and\n# forced to 'face' internally.\n \n# # Kernel Comparison\n\n# In[3]:\n\n\nplot_dict= {'linear':131, 'poly':132, 'rbf':133}\nsvm_example(plot_dict)\nplt.show()\n\n","sub_path":"samples-labs-exercises/samples/z_aia/0908/CH4 SVM/1-SVM_Kernel_Comparison_Example_iris.py","file_name":"1-SVM_Kernel_Comparison_Example_iris.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"598918516","text":" #Sign your name:Will Fuchs\nimport time\n'''\n 1. Make the following program work.\n ''' \n# print(\"This program takes three numbers and returns the sum.\")\n# total = 0\n#\n# for i in range(3):\n# x = float(input(\"Enter a number: \"))\n# total = total + x\n# print(\"The total is:\", total)\n \n\n\n'''\n 2. Write a Python program that will use a FOR loop to print the even\n numbers from 2 to 100, inclusive.\n'''\n# for i in range(2, 102, 2):\n# print(i)\n\n\n\n\n'''\n 3. Write a program that will use a WHILE loop to count from\n 10 down to, and including, 0. Then print the words Blast off! Remember, use\n a WHILE loop, don't use a FOR loop.\n'''\n# i = 10\n# while i > -1:\n# time.sleep(1)\n# print(i)\n# i -= 1\n# print(\"Blast Off!\")\n\n\n\n'''\n 4. Write a program that prints a random integer from 1 to 10 (inclusive).\n'''\n# import random\n# number = random.randrange(0,11)\n# print(number)\n\n\n\n'''\n 5. Write a Python program that will:\n \n * Ask the user for seven numbers\n * Print the total sum of the numbers\n * Print the count of the positive entries, the count of entries equal to zero,\n and the count of negative entries. Use an if, elif, else chain, not just three\n if statements.\n \n'''\ntotal = 0\neven = 0\nzero = 0\nnegative = 0\n\nfor i in range(7):\n number = int(input(\"Give me a number: \"))\n total += number\n if number < 0:\n negative += 1\n elif number == 0:\n zero += 1\n elif number % 2 == 0:\n even += 1\n else:\n total += 0\nprint()\nprint()\nprint(\"The sum of these numbers is: \", total)\nprint(\"There are a total of\", negative, \"Negative numbers\")\nprint(\"There are a total of\", zero, \"zeros\")\nprint(\"There are a total of\", even, \"even numbers\")","sub_path":"5.0_Jedi_Training.py","file_name":"5.0_Jedi_Training.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"42953588","text":"salario = float(input())\n\nif salario <= 400:\n aumento = 15\nelif salario <= 800:\n aumento = 12\nelif salario <= 1200:\n aumento = 10\nelif salario <= 2000:\n aumento = 7\nelse:\n aumento = 4\n\nreajuste = salario*(aumento/100)\n\nprint('Novo salario: {:.2f}'.format(salario+reajuste))\nprint('Reajuste ganho: {:.2f}'.format(reajuste))\nprint('Em percentual: {} %'.format(aumento))","sub_path":"Iniciante/1048.py","file_name":"1048.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"602891733","text":"from urllib.parse import parse_qs, urlencode, urlparse\nfrom urllib.request import urlopen, urlretrieve\nimport argparse\nimport codecs\nimport json\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n def __init__(self):\n super(ArgumentParser, self).__init__()\n self.add_argument('command', action='store')\n self.add_argument('uid', action='store')\n self.add_argument('format_string', action='store')\n\n\nclass Application(object):\n APP_ID = 4301930\n OAUTH_URL = 'https://oauth.vk.com/authorize'\n AUDIOS_URL = 'https://api.vk.com/method/audio.get'\n REDIRECT_BLANK = 'https://oauth.vk.com/blank.html'\n\n\nclass AuthDisplay(object):\n PAGE = 'page'\n\n\nclass VkPermission(object):\n AUDIO = 8\n\n\nclass ResponseType(object):\n TOKEN = 'token'\n\n\ndef download_audios(uid, format_string):\n auth_url = '{}?{}'.format(\n Application.OAUTH_URL,\n urlencode(dict(\n client_id=Application.APP_ID,\n redirect_uri=Application.REDIRECT_BLANK,\n display=AuthDisplay.PAGE,\n scope=VkPermission.AUDIO,\n response_type=ResponseType.TOKEN,\n ))\n )\n redirect_url = input(\n 'Please authenticate at following URL and enter address bar '\n 'contents below\\n\\n{}\\n\\n> '.format(auth_url))\n access_token = parse_qs(urlparse(redirect_url).fragment)['access_token'][0]\n audios_url = '{}?{}'.format(\n Application.AUDIOS_URL,\n urlencode(dict(\n access_token=access_token,\n owner_id=uid,\n ))\n )\n print(audios_url)\n with urlopen(audios_url) as response:\n audios_info = json.load(codecs.getreader('utf-8')(response))\n audios_count = audios_info['response'][0]\n audios = audios_info['response'][1:]\n print('Count in response is {}, accessible count is {}'.format(\n audios_count, len(audios)))\n for i, audio in enumerate(audios):\n destination = format_string.format(i)\n print('({}/{}) {} - {}\\n <- {}\\n -> {}'.format(\n i, len(audios), audio['artist'], audio['title'], audio['url'],\n destination))\n urlretrieve(audio['url'], destination)\n\n\ndef main():\n args = ArgumentParser().parse_args()\n if args.command == 'download-audios':\n download_audios(args.uid, args.format_string)\n else:\n print('Unknown command \"{}\"'.format(args.command))\n","sub_path":"vkbelt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"125113956","text":"import time\n\nclass Main:\n def calcthisshit(fileinfo1, fileinfo2):\n list1 = [5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100]\n l2 = \"[~~~~~~~~~~~~~~~~~~~~]\"\n percent = fileinfo1/fileinfo2*100\n percent = float(\"{:.2f}\".format(percent))\n i = 0\n for x in list1:\n if x > percent:\n i = list1.index(x) + 1\n while i != 0:\n l2 = list(l2)\n l2[i] = \"#\"\n l2 = Main.listtostring(l2)\n i -= 1\n break\n if percent > 99:\n l2 = \"[####################]\"\n l2 = l2, \"\" + str(percent) + \"%\"\n return Main.listtostring(l2)\n\n def listtostring(list):\n str = \"\"\n for x in list:\n str += x\n return str","sub_path":"download_scripts/extrafunc.py","file_name":"extrafunc.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"394148544","text":"class Folder():\n def __init__(self,name):\n self.name = name\n self.contents = []\n\n def add(self,item):\n self.contents.append(item)\n\nclass File():\n def __init__(self,name):\n self.name = name\n\ndef create_data_map():\n data_source = Folder(\"data\")\n\n training = Folder(\"training\")\n testing = Folder(\"testing\")\n \n train_images = Folder(\"images\")\n train_labels = Folder(\"labels\")\n \n test_images = Folder(\"images\")\n test_labels = Folder(\"labels\")\n\n train_image_file = File(\"train-images-idx3-ubyte.gz\")\n train_label_file = File(\"train-labels-idx1-ubyte.gz\")\n test_image_file = File(\"t10k-images-idx3-ubyte.gz\")\n test_label_file = File(\"t10k-labels-idx1-ubyte.gz\")\n\n train_images.add(train_image_file)\n train_labels.add(train_label_file)\n test_images.add(test_image_file)\n test_labels.add(test_label_file)\n\n\n training.add(train_images)\n training.add(train_labels)\n\n testing.add(test_images)\n testing.add(test_labels)\n\n \n data_source.add(training)\n data_source.add(testing)\n\n return data_source\n\ndef build_data_map(data_source, data_structure, counts):\n depth = len(counts)\n for i in range(len(counts)-1):\n if counts[i] == -1:\n data_structure.write(\"\\t\"*(depth-1))\n else:\n data_structure.write(\"|\\t\"*(depth-1))\n if depth > 0 :\n data_structure.write(\"|---\")\n data_structure.write(data_source.name+\"\\n\")\n \n for count, element in enumerate(data_source.contents):\n if type(element) == Folder:\n build_data_map(element, data_structure, counts + [count-len(data_source.contents)])\n elif type(element) == File:\n for i in range(len(counts)):\n if counts[i] == -1:\n data_structure.write(\"\\t\"*(depth-1))\n else:\n data_structure.write(\"|\\t\"*(depth-1))\n if depth > 0 :\n data_structure.write(\"|---\")\n data_structure.write(element.name+\"\\n\")\n\n\ndata_source = create_data_map()\nwith open(\"data_structure.txt\", \"w\") as data_structure:\n build_data_map(data_source,data_structure, [])\n\n \n","sub_path":"Linear_Network/data_mapper.py","file_name":"data_mapper.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"371692990","text":"def snake(n):\n res = [[0] * n for i in range(n)]\n k = 0\n for i in range(n):\n for j in range(n):\n res[i][j] = j + (n * k) + 1\n k += 1\n for i in range(1, n, 2):\n res[i].sort(reverse=True)\n return res\n\n\nsnake_in = open('snake.in', 'r')\nsnake_out = open('snake.out', 'w')\n\nn = int(snake_in.readline())\nsnake_in.close()\n\nans = snake(n)\nfor i in ans:\n print(' '.join([str(j) for j in i]), file=snake_out)\nsnake_out.close()\n","sub_path":"lKSH/day05/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"444238887","text":"\"\"\"\n535\nencode and decode TinyURL\nmedium\n\nNote: This is a companion problem to the System Design problem:\nDesign TinyURL.\nTinyURL is a URL shortening service where you enter a URL such\nas https://leetcode.com/problems/design-tinyurl and it returns\na short URL such as http://tinyurl.com/4e9iAk. Design a class\nto encode a URL and decode a tiny URL.\n\nThere is no restriction on how your encode/decode algorithm\nshould work. You just need to ensure that a URL can be encoded\nto a tiny URL and the tiny URL can be decoded to the original URL.\n\nImplement the Solution class:\n\nSolution() Initializes the object of the system.\nString encode(String longUrl) Returns a tiny URL for the given longUrl.\nString decode(String shortUrl) Returns the original long URL for\nthe given shortUrl. It is guaranteed that the given shortUrl was\nencoded by the same object.\n\"\"\"\n\nfrom random import choices\n\nclass Codec:\n\n def __init__(self):\n self.long2short = {}\n self.short2long = {}\n self.alphabet = \"abcdefghijklmnopqrstuvwxyz1234567890\"\n\n def encode(self, longUrl: str) -> str:\n \"\"\"Encodes a URL to a shortened URL.\n \"\"\"\n if longUrl in self.long2short:\n return self.long2short[longUrl]\n while(1):\n short = \"\".join(choices(self.alphabet, k=8))\n if \"http://tinyurl.com/\"+short not in self.short2long:\n self.long2short[longUrl] = \"http://tinyurl.com/\"+short\n self.short2long[\"http://tinyurl.com/\"+short] = longUrl\n break\n return \"http://tinyurl.com/\"+short\n\n def decode(self, shortUrl: str) -> str:\n \"\"\"Decodes a shortened URL to its original URL.\n \"\"\"\n if shortUrl in self.short2long:\n return self.short2long[shortUrl]\n\n# Your Codec object will be instantiated and called as such:\n# codec = Codec()\n# codec.decode(codec.encode(url))\n\ncodec = Codec()\nurl = \"https://leetcode.com/problems/design-tinyurl\"\nprint(codec.encode(url))\n\n\n\n\n\n\n\n","sub_path":"Q535.py","file_name":"Q535.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635922727","text":"from pandas import read_csv\r\nimport numpy as np\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom matplotlib import pyplot as plt\r\nfrom scipy import stats\r\nfrom scipy.optimize import curve_fit\r\n\r\n# load the dataset\r\ntest1 = read_csv('merged_dataset_BearingTest_1.csv')\r\ntest2 = read_csv('merged_dataset_BearingTest_2.csv')\r\ntest3 = read_csv('merged_dataset_BearingTest_3.csv')\r\n\r\ndata131 = np.array(test1['Bearing 3 C1'].values)\r\ndata132 = np.array(test1['Bearing 3 C2'].values)\r\ndata21 = np.array(test2['Bearing 1'].values)\r\ndata33 = np.array(test3['Bearing 3'].values)\r\n\r\n# train and test\r\ntest1 = data132[200:]\r\ntest2 = data21\r\ntest3 = data33\r\n\r\n# Time\r\nN1 = test1.shape[0]\r\ntime1 = np.linspace(0, N1/6, N1)\r\nN2 = test2.shape[0]\r\ntime2 = np.linspace(0, N2/6, N2)\r\nN3 = test3.shape[0]\r\ntime3 = np.linspace(0, N3/6, N3)\r\n\r\n######################## 1\r\nMSE = 0\r\ni = 1\r\ndifference = 0\r\nwhile difference <= 0.012:\r\n linear_reg = LinearRegression().fit(time1.reshape(-1, 1)[0:i], test1[0:i])\r\n difference = test1[i+1] - linear_reg.predict(np.array([[time1[i+1]]]))\r\n i = i + 1\r\n\r\ndef exponential(x, a, b, c):\r\n return a * np.exp(b * x) + c\r\n\r\nlinear_regression = linear_reg.predict(time1[0:i+1].reshape(-1,1))\r\n\r\nparam1, param_cov1 = curve_fit(exponential, time1[0:len(time1)-i-15], test1[i:len(test1)-15], p0=[1, 0.6, 0.3], maxfev=2000)\r\nans1 = param1[0]*(np.exp(param1[1]*time1[0:len(time1)-i+5])) + param1[2]\r\n\r\nfig, axs = plt.subplots(3, figsize=(12,10))\r\nfig.text(0.1, 0.1, 'RMS', va='center', rotation='vertical', fontsize=14)\r\n\r\naxs[0].plot(time1[280*6:i+1], test1[280*6:i+1], '.', color='purple', markersize=5, label = 'Real Data')\r\naxs[0].plot(time1[280*6:i+1], linear_regression[280*6:], '-', color='cyan', linewidth=2, label='Linear Prediction')\r\naxs[0].plot(time1[i:], test1[i:], '.', color='purple', markersize=5)\r\naxs[0].plot(time1[280*6:], 0.3*np.ones_like(time1[280*6:]), '--')\r\n\r\nnew_t1 = np.linspace(0, (N1+5)/6, N1+5)\r\naxs[0].plot(new_t1[i:-20], ans1[:-20], '-', color='chartreuse', linewidth=2, label='Exponential Train')\r\naxs[0].plot(new_t1[-21:], ans1[-21:], '-', color='tomato', linewidth=2, label='Exponential Prediction')\r\n\r\n####### Confidence region\r\nci = 0.95\r\npp = (1. + ci) / 2.\r\nnstd = stats.norm.ppf(pp)\r\n\r\nexponential_region = time1[0:len(time1)-i+30]\r\n\r\nperr = np.sqrt(np.diag(param_cov1))\r\npopt_up = param1 + nstd * perr\r\npopt_dw = param1 - nstd * perr\r\n\r\nexponential_region_prediction_up = popt_up[0]*(np.exp(popt_up[1]*exponential_region)) + popt_up[2]\r\nexponential_region_prediction_down = popt_dw[0]*(np.exp(popt_dw[1]*exponential_region)) + popt_dw[2]\r\n\r\nnew_t1 = np.linspace(0, (N1+30)/6, N1+30)\r\naxs[0].plot(new_t1[i:-57], exponential_region_prediction_up[:-57], '--', color='red', label='95% Confidence Interval')\r\naxs[0].plot(new_t1[i:], exponential_region_prediction_down, '--', color='red')\r\n\r\naxs[0].legend()\r\naxs[0].set_title('Test 1 Bearing 3 Channel 2', fontsize=11)\r\n\r\n###################### 2\r\nMSE = 0\r\ni = 1\r\ndifference = 0\r\nwhile difference <= 0.012:\r\n linear_reg = LinearRegression().fit(time2.reshape(-1, 1)[0:i], test2[0:i])\r\n difference = test2[i+1] - linear_reg.predict(np.array([[time2[i+1]]]))\r\n i = i + 1\r\n\r\ndef exponential(x, a, b, c):\r\n return a * np.exp(b * x) + c\r\n\r\nlinear_regression = linear_reg.predict(test2[0:i+1].reshape(-1,1))\r\n\r\nparam2, param_cov2 = curve_fit(exponential, time2[0:len(time2)-i-15], test2[i:len(test2)-15], p0=[1, 0.6, 0.3], maxfev=2000)\r\nans2 = param2[0]*(np.exp(param2[1]*time2[0:len(time2)-i+5])) + param2[2]\r\n\r\naxs[1].plot(time2[480:i+1], test2[480:i+1], '.', color='purple',markersize=5, label = 'Linear Real')\r\naxs[1].plot(time2[480:i+1], linear_regression[480:], 'c-', linewidth = 2, label = 'Linear Predict')\r\naxs[1].plot(time2[i:], test2[i:], '.', color='purple',markersize=5, label = 'Exponential Real')\r\naxs[1].plot(time2[480:], 0.3*np.ones_like(time2[480:]), '--')\r\n\r\nnew_t2 = np.linspace(0, (N2+5)/6, N2+5)\r\naxs[1].plot(new_t2[i:-20], ans2[:-20], '-', color='chartreuse',linewidth = 2, label = 'Exponential Train')\r\naxs[1].plot(new_t2[-21:], ans2[-21:], '-', color='tomato', linewidth=2, label='Exponential Predict')\r\n\r\n####### Confidence region\r\nci = 0.95\r\npp = (1. + ci) / 2.\r\nnstd = stats.norm.ppf(pp)\r\n\r\nexponential_region = time2[0:len(time2)-i+15]\r\n\r\nperr = np.sqrt(np.diag(param_cov2))\r\npopt_up = param2 + nstd * perr\r\npopt_dw = param2 - nstd * perr\r\n\r\nexponential_region_prediction_up = popt_up[0]*(np.exp(popt_up[1]*exponential_region)) + popt_up[2]\r\nexponential_region_prediction_down = 1e-16*(np.exp(popt_dw[1]*exponential_region)) + popt_dw[2]\r\n\r\nnew_t2 = np.linspace(0, (N2+15)/6, N2+15)\r\naxs[1].plot(new_t2[i:-50], exponential_region_prediction_up[:-50], '--', color='red', label='95% Confidence Interval')\r\naxs[1].plot(new_t2[i:], exponential_region_prediction_down, '--', color='red')\r\n\r\naxs[1].set_title('Test 2 Bearing 1', fontsize=11)\r\n\r\n####################### 3\r\nMSE = 0\r\ni = 1\r\ndifference = 0\r\nwhile difference <= 0.012:\r\n linear_reg = LinearRegression().fit(time3.reshape(-1, 1)[0:i], test3[0:i])\r\n difference = test3[i+1] - linear_reg.predict(np.array([[time3[i+1]]]))\r\n i = i + 1\r\n\r\ndef exponential(x, a, b, c):\r\n return a * np.exp(b * x) + c\r\n\r\nlinear_regression = linear_reg.predict(test3[0:i+1].reshape(-1,1))\r\n\r\nparam3, param_cov3 = curve_fit(exponential, time3[0:len(time3)-i-15], test3[i:len(test3)-15], p0=[1, 0.6, 0.3], maxfev=2000)\r\nans3 = param3[0]*(np.exp(param3[1]*time3[0:len(time3)-i+5])) + param3[2]\r\n\r\naxs[2].plot(time3[980*6:i+1], test3[980*6:i+1], '.', color='purple',markersize=5, label = 'Linear Real')\r\naxs[2].plot(time3[980*6:i+1], linear_regression[980*6:], 'c-', linewidth = 2, label = 'Linear Predict')\r\naxs[2].plot(time3[i:], test3[i:], '.', color='purple',markersize=5, label = 'Exponential Real')\r\naxs[2].plot(time3[980*6:], 0.3*np.ones_like(time3[980*6:]), '--')\r\n\r\nnew_t3 = np.linspace(0, (N3+5)/6, N3+5)\r\naxs[2].plot(new_t3[i:-20], ans3[:-20], '-', color='chartreuse', linewidth = 2, label = 'Exponential Train')\r\naxs[2].plot(new_t3[-21:], ans3[-21:], '-', color='red',linewidth=2, label='Exponential Predict')\r\n\r\n####### Confidence region\r\nci = 0.95\r\npp = (1. + ci) / 2.\r\nnstd = stats.norm.ppf(pp)\r\n\r\nexponential_region = time3[0:len(time3)-i]\r\n\r\nperr = np.sqrt(np.diag(param_cov3))\r\npopt_up = param3 + nstd * perr\r\npopt_dw = param3 - nstd * perr\r\n\r\nexponential_region_prediction_up = popt_up[0]*(np.exp(popt_up[1]*exponential_region)) + popt_up[2]\r\nexponential_region_prediction_down = popt_dw[0]*(np.exp(popt_dw[1]*exponential_region)) + popt_dw[2]\r\n\r\naxs[2].plot(time3[i:-45], exponential_region_prediction_up[:-45], '--', color='red', label='upper confidence interval')\r\naxs[2].plot(time3[i:], exponential_region_prediction_down, '--', color='red', label='lower confidence interval')\r\n\r\naxs[2].set_xlabel('Time [hr]', fontsize=14)\r\naxs[2].set_title('Test 3 Bearing 3', fontsize=11)\r\nplt.show()\r\n\r\n# plt.savefig('Combined_zoom.png')","sub_path":"two_stage_BY_zoom.py","file_name":"two_stage_BY_zoom.py","file_ext":"py","file_size_in_byte":6933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"290263177","text":"from pytube import Playlist\r\nimport streamlit as st\r\nfinal = ''\r\nst.title('Gerador de Urls de Videos de playList Youtube')\r\ntry:\r\n \r\n with st.form(key='form'):\r\n playlist = st.text_input('Url da Playlist')\r\n enviar = st.form_submit_button('Gerar')\r\n p = Playlist(playlist)\r\n if enviar:\r\n for v in p.video_urls:\r\n final = final + v+\"\\n\"\r\n \r\n st.text_area('Urls da Playlist',final,300)\r\nexcept:\r\n st.error('Url Inválida, a Url necessita ser uma playlist do YouTube')","sub_path":"str.py","file_name":"str.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76329280","text":"'''\n实现二叉树的深度优先遍历\n'''\n\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass TreeToSequence:\n def __init__(self):\n self.pres=[]\n self.mid=[]\n self.back=[]\n\n def convert(self, root):\n pre = self.pre_order(root)\n mid = self.mid_order(root)\n back =self.back_order(root)\n res = []\n res.append(pre)\n res.append(mid)\n res.append(back)\n return res\n\n def pre_order(self,root):\n if not root:\n return\n self.pres.append(root.val)\n self.pre_order(root.left)\n self.pre_order(root.right)\n return self.pres\n\n def mid_order(self,root):\n if not root:\n return\n self.mid_order(root.left)\n self.mid.append(root.val)\n self.mid_order(root.right)\n return self.mid\n\n def back_order(self,root):\n if not root:\n return\n self.back_order(root.left)\n self.back_order(root.right)\n self.back.append(root.val)\n return self.back\n\n\n","sub_path":"data_structure_and_algorithms/binary_tree/DFS_recursive.py","file_name":"DFS_recursive.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"55965641","text":"#1.编写函数,接收两个正整数作为参数,返回一个元组,其中第一个元素为最大公约数,第二个元素为最小公倍数。\ndef reserve(num1, num2):\n if num1>num2:\n num3 = num1\n num1 = num2\n num2 = num3\n mingbs = num1 * num2\n while num1!=0:\n num4 = num2 % num1\n num2 = num1\n num1 = num4\n maxgys = int(mingbs/num2)\n print('最大公因数为{}\\n最小公倍数为{}\\n'.format(num2, maxgys))\nnumber1 = input(\"请输入第一个整数:\\n\")\nnum1 = int(number1)\nnumber2 = input(\"请输入第二个整数:\\n\")\nnum2 = int(number2)\nresult = reserve(num1,num2)\n#运行结果\n#请输入第一个整数:\n#2\n#请输入第二个整数:\n#3\n#最大公因数为1\n#最小公倍数为6\n\n#2.编写函数,接受一个字符串作为参数,计算并打印传入字符串中数字,字母,空格,以及其它的个数。\ndef static(str):\n numbers = 0\n letters = 0\n spaces = 0\n others = 0\n for i in str:\n if i.isdigit():\n numbers += 1\n elif i.isalpha():\n letters += 1\n elif i.isspace():\n spaces += 1\n else:\n others += 1\n print('数字的个数为{}\\n字母的个数为{}\\n空格的个数为{}\\n其他的个数为{}\\n'.format(numbers, letters, spaces, others))\nstr = input(\"请输入字符串:\")\nstatic(str)\n#运行结果\n#请输入字符串:123abc ,,,\n#数字的个数为3\n#字母的个数为3\n#空格的个数为3\n#其他的个数为3","sub_path":"homework7/Group8/hw7_1720389.py","file_name":"hw7_1720389.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"86250687","text":"#\n# File: second_largest.py\n# Purpose: HackerRank Python Track, Find the Second Largest Number\n# Author: Lucas Brown\n# Date: October 19, 2017\n# Python: 3.5.3\n#\n# Some challenges have code provided that is necessary to complete the challenge. Any code provided by HackerRank and not written by Lucas Brown will be identified.\n#\n#\n# Task\n# -----\n# You are given n numbers. Store them in a list and find the second largest number.\n#\n#\n# Input Format\n# -------------\n# The first line contains n. The second line contains an array A[] of n integers each separated by a space.\n#\n#\n# Output Format\n# --------------\n# Print the value of the second largest number.\n#\n#\n# Constraints\n# ------------\n# 2 <= n <= 10\n# -100 <= A[i] <= 100\n#\n#\n# Sample Input\n# -------------\n# 5\n# 2 3 6 6 5\n#\n#\n# Sample Output\n# --------------\n# 5\n#\n\nif __name__ == \"__main__\":\n\n n = int( input() ) \n A = [ int(x) for x in input().split() ]\n\n A.sort()\n\n i = n - 2 # No need to start with last element that we're comparing to\n while( A[i] == A[n-1] and i != 0 ):\n i -= 1\n\n print( A[i] )","sub_path":"LanguageTrack_Python/BasicDataTypes/second_largest.py","file_name":"second_largest.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70758110","text":"import tensorflow as tf\n\nfrom transformers_keras.transformer.encoder import EncoderLayer, Encoder\n\n\nclass EncoderTest(tf.test.TestCase):\n\n def testEncoderLayer(self):\n sample_encoder_layer = EncoderLayer(512, 8, 2048)\n\n output, attn_weights = sample_encoder_layer(\n tf.random.uniform((64, 43, 512)), False, None)\n\n self.assertAllEqual(output.shape, [64, 43, 512]) # (batch_size, input_seq_len, d_model)\n self.assertAllEqual(attn_weights.shape, [64, 8, 43, 43]) # (batch_size, num_heads, seq_length, seq_length)\n\n def testEncoder(self):\n sample_encoder = Encoder(num_layers=2, d_model=512, num_heads=8, dff=2048, input_vocab_size=8500)\n output, attn_weights = sample_encoder(tf.random.uniform((64, 62)), training=False, mask=None)\n\n self.assertAllEqual(output.shape, [64, 62, 512]) # (batch_size, input_seq_len, d_model)\n\n # each layers attention weights\n for k, v in attn_weights.items():\n self.assertAllEqual(v.shape, [64, 8, 62, 62])\n print(k, v.shape)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"transformers_keras/transformer/encoder_test.py","file_name":"encoder_test.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283470259","text":"import arcade\nimport random\n\nWIDTH = 800\nHEIGHT = 800\nTITLE = \"Sprites\"\n\nclass Game(arcade.Window):\n def __init__(self):\n super().__init__(WIDTH, HEIGHT, TITLE)\n self.player_list = None\n self.coin_list = None\n self.score = None\n arcade.set_background_color(arcade.color.AIR_FORCE_BLUE)\n self.set_mouse_visible(False)\n\n def setup(self):\n self.score = 0\n self.player_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.player = arcade.Sprite(\"character_idle.png\", 0.7)\n self.player.center_x = 200\n self.player.center_y = 100\n self.player_list.append(self.player)\n for i in range(50):\n coin = arcade.Sprite('coin.png', 0.4)\n coin.center_x = random.randint(0, WIDTH)\n coin.center_y = random.randint(0, HEIGHT)\n self.coin_list.append(coin)\n \n def update(self, delta_time):\n self.player_list.update()\n self.coin_list.update()\n coins_touching = arcade.check_for_collision_with_list(self.player, self.coin_list)\n for coin in coins_touching:\n self.score += 1\n coin.kill()\n if len(self.coin_list) == 0:\n self.setup()\n\n def on_draw(self):\n arcade.start_render()\n arcade.draw_text(str(self.score), WIDTH/2, HEIGHT/2, arcade.color.BLACK, 70)\n self.player_list.draw()\n self.coin_list.draw()\n\n \n def on_mouse_motion(self, x, y, dx, dy):\n self.player.center_x = x\n self.player.center_y = y\n\nmy_game = Game()\nmy_game.setup()\narcade.run()","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163371679","text":"import pyglet\nfrom multiprocessing import Pool\nfrom multiprocessing import Process\nfrom view import Renderer\nfrom messages import Messenger\nfrom gui_controls import GUIcontrols\nfrom ai_controls import AIcontrols, AItype\nimport messages\nimport argparse\nimport datetime\nfrom objects import Objects\nfrom obj_def import *\nimport gc\n\nclass GameState:\n Start, ActiveGame, Menu, Exit, Pause = range(5)\n\n\nclass Game:\n def __init__(self, screen_width, screen_height, history_path=None, train_mode=False, prefix=None, tries=5):\n gc.disable()\n self.game_state = GameState.Start\n self.screen_width = screen_width\n self.screen_height = screen_height\n self.train_mode = train_mode\n self.battle_field_size = (1000, 1000)\n self.radiant_bots = 0\n self.dire_bots = 0\n self.is_player1_play = 1\n self.is_player2_play = 1\n self.radiant = self.radiant_bots + self.is_player1_play\n self.dire = self.dire_bots + self.is_player2_play\n if history_path is None:\n now_time = datetime.datetime.now()\n self.history_path = now_time.strftime(\"%Y_%m_%d_%H_%M_%S\")+'.txt'\n #self.history_path = 'delete_me_pls.txt'\n if prefix:\n self.history_path = '{}_{}'.format(prefix, self.history_path)\n self.clear_file(self.history_path)\n self.is_it_move_from_history = False\n else:\n self.history_path = history_path\n self.is_it_move_from_history = True\n # self.fps_display = pyglet.clock.ClockDisplay()\n self.playtime = 0\n self.framerate = 60\n self.configuration = {ObjectType.FieldSize: [],\n ObjectType.Bot1: [],\n ObjectType.Player1: [],\n ObjectType.Bot2: [],\n ObjectType.Player2: []}\n self.configuration[ObjectType.FieldSize].append(self.battle_field_size)\n self.prepare_config(self.radiant_bots, self.dire_bots, self.is_player1_play, self.is_player2_play,\n self.battle_field_size[0], self.battle_field_size[1])\n self.messenger = Messenger()\n if self.train_mode:\n self.ai_controls = AIcontrols(self.configuration, messenger=self.messenger, train_mode=True)\n self.Objects = Objects(self.configuration, self.radiant, self.dire, history_path=self.history_path,\n messenger=self.messenger, ai_controls=self.ai_controls, tries=tries)\n else:\n self.ai_controls = AIcontrols(self.configuration, messenger=self.messenger)\n self.Objects = Objects(self.configuration, self.radiant, self.dire, history_path=self.history_path,\n messenger=self.messenger)\n self.gui_controls = GUIcontrols(self.messenger)\n self.renderer = Renderer(self.screen_width, self.screen_height, self.battle_field_size)\n self.game_window = None\n self.objects = None\n self.history_list = []\n self.functions = {messages.Game.Quit: self.quit,\n messages.Game.UpdateObjects: self.update_objects,\n messages.Game.Pause: self.game_pause_simulation,\n messages.Game.Polar_grid: self.show_polar_grid,\n messages.Game.ActiveGame: self.game_unpaused}\n self.run_game()\n\n def prepare_config(self, bot1, bot2, player1, player2, sizeX, sizeY):\n pos1 = sizeX / (bot1 + player1 + 1)\n\n pos2 = sizeX / (bot2 + player2 + 1)\n if player1:\n self.configuration[ObjectType.Player1].append((pos1 + np.random.randint(-50, 50), 50 + np.random.randint(50),\n 90, ObjectSubtype.Drone, Constants.DefaultObjectRadius))\n if player2:\n self.configuration[ObjectType.Player2].append((pos2 + np.random.randint(-50, 50), sizeY - 50 - np.random.randint(50),\n 270, ObjectSubtype.Drone, Constants.DefaultObjectRadius))\n\n for i in range(1, bot1 + 1):\n self.configuration[ObjectType.Bot1].append(\n (pos1 * (i + player1) + np.random.randint(-50, 50), 50 + np.random.randint(50),\n 90, ObjectSubtype.Plane, Constants.DefaultObjectRadius, AItype.DumbAi))\n\n for i in range(1, bot2 + 1):\n self.configuration[ObjectType.Bot2].append(\n (pos2 * (i + player2) + np.random.randint(-50, 50), sizeY - 50 - np.random.randint(50),\n 270, ObjectSubtype.Plane, Constants.DefaultObjectRadius, AItype.DumbAi))\n\n\n def clear_file(self, file_path):\n with open(file_path, \"w\") as file: # just to open with argument which clean file\n pass\n\n def quit(self):\n self.game_state = GameState.Exit\n self.messenger.shutdown()\n pyglet.app.exit()\n\n def game_pause_simulation(self):\n self.game_state = GameState.Pause\n\n def game_unpaused(self):\n self.game_state = GameState.ActiveGame\n\n def show_polar_grid(self):\n if(self.game_window.width == self.screen_width):\n self.game_window.set_size(self.screen_width + 500, self.screen_height)\n self.renderer.show_polar_grid()\n else:\n self.game_window.set_size(self.screen_width, self.screen_height)\n self.renderer.show_polar_grid()\n\n def read_messages(self, dt):\n while True:\n data = self.messenger.get_message(messages.Game)\n if not data:\n return\n self.functions[data['func']](**data['args']) if 'args' in data else self.functions[data['func']]()\n\n def update_graphics(self, dt):\n if self.game_state != GameState.Pause:\n self.renderer.update_graphics()\n self.game_window.clear()\n self.renderer.batch.draw()\n\n def update_objects(self, objects_copy):\n if self.game_state != GameState.Pause:\n self.objects = objects_copy\n self.renderer.update_objects(objects_copy)\n self.renderer.update_graphics()\n\n def run_game(self):\n if self.train_mode:\n pyglet.clock.schedule_interval(self.read_messages, 1.0 / 2)\n pyglet.app.run()\n return 0\n self.game_window = pyglet.window.Window(self.screen_width, self.screen_height,resizable=True)\n pyglet.gl.glClearColor(0.9, 0.9, 0.9, 0)\n self.game_window.set_location(200, 50)\n self.game_state = GameState.ActiveGame\n if self.is_it_move_from_history:\n self.messenger.objects_run_from_file_simulation()\n else:\n self.messenger.objects_run_simulation()\n self.messenger.ai_start_game()\n\n #@self.game_window.event\n #def on_draw():\n # if self.game_state != GameState.Pause:\n # self.fps_display.draw()\n\n @self.game_window.event\n def on_key_press(key, modif):\n self.messenger.controls_handle_key(True, key)\n\n @self.game_window.event\n def on_key_release(key, modif):\n self.messenger.controls_handle_key(False, key)\n\n @self.game_window.event\n def on_close():\n self.quit()\n\n # we need to remember about hard nailed graphics. much later we should fix it somehow\n pyglet.clock.schedule_interval(self.read_messages, 1.0 / self.framerate)\n pyglet.clock.schedule_interval(self.update_graphics, 1.0 / self.framerate)\n pyglet.app.run()\n\n\nif __name__ == \"__main__\":\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-f\", \"--history_path\", type=str, required=False,\n help=\"path to history file\")\n ap.add_argument(\"-t\", \"--train_mode\", required=False, action='store_true',\n help=\"training mode\")\n ap.add_argument(\"-p\", '--prefix', type=str, required=False,\n help='prefix for history file')\n ap.add_argument(\"-m\", '--tries', type=int, required=False,\n help='number of total retries in one session')\n args = vars(ap.parse_args())\n args[\"screen_width\"] = 1000\n args[\"screen_height\"] = 1000\n print(\"{}\".format(args))\n Game(**args)\n #for index in range(0, 1):\n # proc_arr.append(Process(target=Game, args=args_for_game))\n # proc_arr[index].start()\n\n #for index in range(0, 1):\n # proc_arr[index].join()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453157484","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Name: TOFILL\\n\n Description: TOFILL\n\"\"\"\n\n\"\"\"PySimpleFrame\n Author: Miguel Silva\n License: Check LICENSE file\n\"\"\"\n\n\n## System imports ##\nfrom enum import Enum, IntEnum, unique\n\n## Library imports ##\n\n## Application imports ##\nimport Input\nfrom KeyCodes import KeyCodes\n\n\n## Print state\nprint(\"Listening input (press CTRL+C twice to stop)\")\n\n\n## Capture key inputs continuously\nlastInput = None\nwhile True:\n\t## Wait for input and set\n\tinput = Input.GetInput()\n\t\n\t## Print input\n\tprint(f\"Inputed: {input}\")\n\t\n\t## Break if CTRL + C was pressed now and before\n\tif lastInput == KeyCodes.COPYCANCEL.keycode() and input == KeyCodes.COPYCANCEL.keycode():\n\t\tprint(\"Input listening canceled\")\n\t\tbreak\n\t\n\t## Set last input as current input\n\tlastInput = input\n\t\n\t","sub_path":"pysimpleframe/interface/input/windows/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296856659","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('category_name', models.CharField(default=b'', max_length=200)),\n ],\n options={\n 'db_table': 'categories',\n 'verbose_name': 'category',\n 'verbose_name_plural': 'categories',\n },\n bases=None,\n managers=None,\n ),\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('item_name', models.CharField(default=b'', max_length=200)),\n ('item_description', models.TextField(verbose_name=b'Item description')),\n ('item_price', models.DecimalField(default=0, max_digits=10, decimal_places=2)),\n ('item_image', models.ImageField(default=b'', upload_to=b'img')),\n ('item_category', models.ForeignKey(to='shopping.Category')),\n ],\n options={\n 'db_table': 'items',\n 'verbose_name': 'item',\n 'verbose_name_plural': 'items',\n },\n bases=None,\n managers=None,\n ),\n ]\n","sub_path":"ShopGame/shopping/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157737369","text":"import io\nimport json\nimport os\n\nfrom google.cloud import vision\n\ndef detect_text(path,text_path,client):\n \"\"\"Detects text in the file.\"\"\"\n # client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n # response = client.text_detection(image=image)\n # response = client.text_detection(image=image)\n response = client.document_text_detection(image=image)\n texts = response.text_annotations\n document = response.full_text_annotation\n print(type(document))\n print(dir(document))\n print(type(document.pages))\n print(dir(document.pages))\n file = open(text_path,'w',encoding='utf-8')\n print(document.pages,file=file)\n file.close()\n # with open(text_path,'w',encoding='utf-8')as f:\n # f.write(document.pages)\n\n\n\n # if len(texts) > 0:\n # with open(text_path,'w',encoding='utf-8')as f:\n # for text in texts:\n # line_content = text.description\n # vertices = (['({},{})'.format(vertex.x, vertex.y)\n # for vertex in text.bounding_poly.vertices])\n #\n # write_str = '{} {}'.format(line_content,','.join(vertices))\n # f.write(write_str)\n # f.write('\\n')\n\nif __name__ == '__main__':\n test_image = r'D:\\ProgramData\\Python_Project\\OCR云测\\google\\google_image_dete\\book_computer\\IMG_20180412_111449.jpg'\n test_text = 'abc.txt'\n\n image_dir = 'google_image_dete'\n client = vision.ImageAnnotatorClient()\n # for root,dirs,files in os.walk(image_dir):\n # try:\n # for file in files:\n # if file.endswith('.jpg'):\n # image_path = os.path.join(root,file)\n # text_path = image_path.replace('.jpg','.txt')\n # print(image_path,text_path)\n # if os.path.exists(text_path):\n # continue\n # else:\n # detect_text(image_path,text_path,client)\n # except Exception as e:\n # print(e)\n # continue\n\n detect_text(test_image,test_text,client)","sub_path":"others/google/v3_dete.py","file_name":"v3_dete.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"193637281","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*_\n# Author: Yunlong Feng \n\nfrom packaging import version\nfrom argparse import ArgumentParser\n\nimport torch\nfrom torch import nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom transformers import AutoModel\nfrom ltp.nn import (\n SharedDropout, MLP, LSTM, Bilinear, BaseModule, Triaffine,\n MFVISemanticDependency, LBPSemanticDependency,\n)\nfrom ltp.transformer_biaffine import GraphResult\n\ntorch_version = version.parse(torch.__version__)\npack_padded_sequence_cpu_version = version.parse('1.7.0')\n\n\nclass ViClassifier(nn.Module):\n def __init__(self, input_size, label_num, dropout,\n lstm_hidden_size=600, lstm_num_layers=3, bin_hidden_size=150,\n arc_hidden_size=600, rel_hidden_size=600, loss_interpolation=0.4, inference='mfvi', max_iter=3):\n super().__init__()\n self.label_num = label_num\n self.loss_interpolation = loss_interpolation\n\n if lstm_num_layers > 0:\n self.lstm = LSTM(\n input_size=input_size,\n hidden_size=lstm_hidden_size,\n num_layers=lstm_num_layers,\n bidirectional=True,\n dropout=dropout\n )\n self.lstm_dropout = SharedDropout(p=dropout)\n hidden_size = lstm_hidden_size * 2\n else:\n self.lstm = None\n hidden_size = input_size\n\n self.mlp_bin_d = MLP([hidden_size, bin_hidden_size], output_dropout=dropout)\n self.mlp_bin_h = MLP([hidden_size, bin_hidden_size], output_dropout=dropout)\n self.mlp_bin_g = MLP([hidden_size, bin_hidden_size], output_dropout=dropout)\n self.mlp_arc_h = MLP([hidden_size, arc_hidden_size], output_dropout=dropout)\n self.mlp_arc_d = MLP([hidden_size, arc_hidden_size], output_dropout=dropout)\n self.mlp_rel_h = MLP([hidden_size, rel_hidden_size], output_dropout=dropout)\n self.mlp_rel_d = MLP([hidden_size, rel_hidden_size], output_dropout=dropout)\n\n self.sib_attn = Triaffine(bin_hidden_size, bias_x=True, bias_y=True)\n self.cop_attn = Triaffine(bin_hidden_size, bias_x=True, bias_y=True)\n self.grd_attn = Triaffine(bin_hidden_size, bias_x=True, bias_y=True)\n self.arc_atten = Bilinear(arc_hidden_size, arc_hidden_size, 1, bias_x=True, bias_y=True, expand=True)\n self.rel_atten = Bilinear(rel_hidden_size, rel_hidden_size, label_num, bias_x=True, bias_y=True, expand=True)\n\n self.vi = (MFVISemanticDependency if inference == 'mfvi' else LBPSemanticDependency)(max_iter)\n\n def forward(self, input, attention_mask=None, word_index=None, word_attention_mask=None, head=None, labels=None,\n is_processed=False):\n if not is_processed:\n assert word_attention_mask is not None\n input = input[:, :-1, :]\n if word_index is not None:\n input = torch.cat([input[:, :1, :], torch.gather(\n input[:, 1:, :], dim=1, index=word_index.unsqueeze(-1).expand(-1, -1, input.size(-1))\n )], dim=1)\n\n if self.lstm is not None:\n lengths = word_attention_mask.sum(1) + 1 # +cls\n if torch_version >= pack_padded_sequence_cpu_version:\n lengths = lengths.cpu()\n input = pack_padded_sequence(input, lengths, True, False)\n input, _ = self.lstm(input)\n input, _ = pad_packed_sequence(input, True, total_length=word_attention_mask.shape[1] + 1)\n input = self.lstm_dropout(input)\n\n bin_d = self.mlp_bin_d(input)\n bin_h = self.mlp_bin_h(input)\n bin_g = self.mlp_bin_g(input)\n\n arc_h = self.mlp_arc_h(input)\n arc_d = self.mlp_arc_d(input)\n\n rel_h = self.mlp_rel_h(input)\n rel_d = self.mlp_rel_d(input)\n\n # [batch_size, seq_len, seq_len, n_labels]\n s_sib = self.sib_attn(bin_d, bin_d, bin_h).triu_()\n s_sib = (s_sib + s_sib.transpose(-1, -2)).permute(0, 3, 1, 2)\n # [batch_size, seq_len, seq_len, n_labels]\n s_cop = self.cop_attn(bin_h, bin_d, bin_h).permute(0, 3, 1, 2).triu_()\n s_cop = s_cop + s_cop.transpose(-1, -2)\n # [batch_size, seq_len, seq_len, n_labels]\n s_grd = self.grd_attn(bin_g, bin_d, bin_h).permute(0, 3, 1, 2)\n\n # [batch_size, seq_len, seq_len]\n s_arc = self.arc_atten(arc_d, arc_h).squeeze_(1)\n # [batch_size, seq_len, seq_len, n_labels]\n s_rel = self.rel_atten(rel_d, rel_h).permute(0, 2, 3, 1)\n\n loss = None\n\n # cat cls\n mask = torch.cat([word_attention_mask[:, :1], word_attention_mask], dim=1)\n mask = mask.unsqueeze(1) & mask.unsqueeze(2)\n mask[:, 0] = 0\n\n if labels is not None:\n rel_loss = nn.CrossEntropyLoss()\n\n head = torch.cat([torch.zeros_like(head[:, :1, :], device=head.device), head], dim=1)\n arc_mask = head.gt(0) & mask\n arc_loss, arc_logits = self.vi((s_arc, s_sib, s_cop, s_grd), mask, head)\n\n labels = torch.cat([torch.zeros_like(labels[:, :1, :], device=labels.device), labels], dim=1)\n rel_loss = rel_loss(s_rel[arc_mask], labels[arc_mask])\n loss = self.loss_interpolation * rel_loss + (1 - self.loss_interpolation) * arc_loss\n else:\n arc_logits = self.vi((s_arc, s_sib, s_cop, s_grd), mask)\n\n return GraphResult(loss=loss, arc_logits=arc_logits, rel_logits=s_rel, src_arc_logits=s_arc)\n\n\nclass TransformerVi(BaseModule):\n def __init__(self, hparams, config=None):\n super().__init__()\n self.save_hyperparameters(hparams)\n if config is None:\n self.transformer = AutoModel.from_pretrained(self.hparams.transformer)\n else:\n self.transformer = AutoModel.from_config(config)\n self.dropout = nn.Dropout(self.hparams.dropout)\n hidden_size = self.transformer.config.hidden_size\n self.classifier = ViClassifier(\n input_size=hidden_size,\n label_num=self.hparams.num_labels,\n dropout=self.hparams.dropout,\n lstm_num_layers=self.hparams.lstm_num_layers,\n lstm_hidden_size=self.hparams.lstm_hidden_size,\n bin_hidden_size=self.hparams.bin_hidden_size,\n arc_hidden_size=self.hparams.arc_hidden_size,\n rel_hidden_size=self.hparams.rel_hidden_size,\n loss_interpolation=self.hparams.loss_interpolation,\n inference=self.hparams.inference,\n max_iter=self.hparams.max_iter,\n )\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False, conflict_handler='resolve')\n parser.add_argument('--transformer', type=str, default=\"hfl/chinese-electra-base-discriminator\")\n parser.add_argument('--lstm_num_layers', type=int, default=0)\n parser.add_argument('--lstm_hidden_size', type=int, default=600)\n parser.add_argument('--arc_hidden_size', type=int, default=600)\n parser.add_argument('--rel_hidden_size', type=int, default=600)\n parser.add_argument('--bin_hidden_size', type=int, default=150)\n parser.add_argument('--loss_interpolation', type=float, default=0.1)\n parser.add_argument('--dropout', type=float, default=0.1)\n parser.add_argument('--num_labels', type=int)\n parser.add_argument('--inference', type=str, default='mfvi')\n parser.add_argument('--max_iter', type=int, default=3)\n return parser\n\n def forward(\n self,\n input_ids=None,\n logits_mask=None,\n attention_mask=None,\n word_index=None,\n word_attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n head=None,\n labels=None\n ) -> GraphResult:\n hidden_states = self.transformer(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False,\n )\n sequence_output = hidden_states[0]\n sequence_output = self.dropout(sequence_output)\n\n return self.classifier(\n input=sequence_output,\n attention_mask=attention_mask,\n word_index=word_index,\n word_attention_mask=word_attention_mask,\n head=head,\n labels=labels\n )\n","sub_path":"ltp/transformer_vi.py","file_name":"transformer_vi.py","file_ext":"py","file_size_in_byte":8616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"57536758","text":"import tkinter as tk\n\nfrom common import *\nfrom Gomoku.Board import Board\n\n\nclass BoardCanvas(tk.Canvas):\n def __init__(self, master=None, height=0, width=0):\n tk.Canvas.__init__(self, master, height=height, width=width)\n self.turn = 1\n self.game = Board()\n self.draw_board()\n self.previous_action = []\n\n def draw_board(self):\n # 15 horizontal lines\n for i in range(15):\n start_pixel_x = (i + 1) * 30\n start_pixel_y = (0 + 1) * 30\n end_pixel_x = (i + 1) * 30\n end_pixel_y = (14 + 1) * 30\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # 15 vertical lines\n for j in range(15):\n start_pixel_x = (0 + 1) * 30\n start_pixel_y = (j + 1) * 30\n end_pixel_x = (14 + 1) * 30\n end_pixel_y = (j + 1) * 30\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # place a \"star\" to particular intersections\n self.draw_star(3, 3)\n self.draw_star(11, 3)\n self.draw_star(7, 7)\n self.draw_star(3, 11)\n self.draw_star(11, 11)\n\n def draw_star(self, x, y):\n start_pixel_x = (x + 1) * 30 - 2\n start_pixel_y = (y + 1) * 30 - 2\n end_pixel_x = (x + 1) * 30 + 2\n end_pixel_y = (y + 1) * 30 + 2\n\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='black')\n\n def draw_stone(self, x, y, turn):\n inner_start_x = (x + 1) * 30 - 4\n inner_start_y = (y + 1) * 30 - 4\n inner_end_x = (x + 1) * 30 + 4\n inner_end_y = (y + 1) * 30 + 4\n\n outer_start_x = (x + 1) * 30 - 6\n outer_start_y = (y + 1) * 30 - 6\n outer_end_x = (x + 1) * 30 + 6\n outer_end_y = (y + 1) * 30 + 6\n\n start_pixel_x = (x + 1) * 30 - 10\n start_pixel_y = (y + 1) * 30 - 10\n end_pixel_x = (x + 1) * 30 + 10\n end_pixel_y = (y + 1) * 30 + 10\n\n if turn == BLACK:\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='black')\n self.create_oval(outer_start_x, outer_start_y, outer_end_x, outer_end_y, fill='white')\n self.create_oval(inner_start_x, inner_start_y, inner_end_x, inner_end_y, fill='black')\n elif turn == WHITE:\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='white')\n self.create_oval(outer_start_x, outer_start_y, outer_end_x, outer_end_y, fill='black')\n self.create_oval(inner_start_x, inner_start_y, inner_end_x, inner_end_y, fill='white')\n\n def draw_prev_stone(self, x, y, turn):\n start_pixel_x = (x + 1) * 30 - 10\n start_pixel_y = (y + 1) * 30 - 10\n end_pixel_x = (x + 1) * 30 + 10\n end_pixel_y = (y + 1) * 30 + 10\n\n if turn == BLACK:\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='black')\n elif turn == WHITE:\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='white')\n\n def put_stone(self, x, y, turn):\n if not self.game.check_valid(x, y, turn):\n print('invalid action')\n return False\n\n self.game.set(x, y, turn)\n self.draw_stone(x, y, turn)\n\n if len(self.previous_action):\n pre_x, pre_y, pre_turn = self.previous_action[-1]\n self.draw_prev_stone(pre_x, pre_y, pre_turn)\n\n self.previous_action.append((x, y, turn))\n return True\n\n def on_board_clicked(self, event):\n clicked_x, clicked_y = (event.x - 15) // 30, (event.y - 15) // 30\n error_x, error_y = event.x - (clicked_x + 1) * 30, event.y - (clicked_y + 1) * 30\n if abs(error_x) > 10 or abs(error_y) > 10:\n return 0\n\n if self.put_stone(clicked_x, clicked_y, self.turn):\n self.change_turn()\n\n def change_turn(self):\n if self.turn == BLACK:\n self.turn = WHITE\n elif self.turn == WHITE:\n self.turn = BLACK\n\n\nclass BoardFrame(tk.Frame):\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.board_canvas = BoardCanvas(height=550, width=480)\n self.board_canvas.bind('', self.board_canvas.on_board_clicked)\n self.board_canvas.pack()\n","sub_path":"GUI/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"523441825","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef parse(x):\n return x[9:]\n# Importing the dataset\ndataset = pd.read_csv('data.csv', header = None)\nX_temp = dataset.iloc[:,0].values\nX = np.reshape(np.array([parse(x) for x in X_temp], int), (-1, 1))\ny = dataset.iloc[:, 1].values\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree = 4)\nX_poly = poly_reg.fit_transform(X)\nX_poly2 = X_poly[50:57,:]\ny2 = y[50:57]\npoly_reg.fit(X_poly, y)\nregressor = LinearRegression()\nregressor.fit(X_poly, y)\n\nX_res = np.reshape(np.array([i for i in range(61,73)]), (-1, 1))\nX_poly_res = poly_reg.fit_transform(X_res)\n\ny_act = [1563178,1312558,1501793,1388316,1325942,1410769,687396,1493945,1161128,590382,1082215,1416327]\n\nplt.plot(X, y, color = 'red')\nplt.plot(X, regressor.predict(poly_reg.fit_transform(X)), color = 'blue')\nplt.plot(X_res, regressor.predict(X_poly_res), color = 'blue')\nplt.plot(X_res, y_act, color = 'green')\n\n\n\n ","sub_path":"Machine Learning A-Z Template Folder/Other/Passenger/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"136573518","text":"import sys\nfrom pyspark.sql import SparkSession\nfrom hdfs import InsecureClient\n\nimport time\nimport datetime\nimport pandas\n\nfrom cj_loader import CJ_Loader\nfrom cj_predictor import CJ_Predictor\nfrom cj_export import CJ_Export\n\ndef main():\n \n if len(sys.argv) < 4:\n raise Exception(\"command must have 3 arguments\")\n \n # Specifies to Merge\n send_update = True if sys.argv[1]==\"send\" else False\n \n # Overrides option to refit the model\n arg_refit = True if sys.argv[2]==\"refit\" else False\n \n # Sets sample rate\n arg_sample_rate = sys.argv[3]\n \n # send_update = True if len(sys.argv) >= 2 and (sys.argv[1]==\"1\") else False\n print(\"Send_update = {}\".format(send_update))\n update_model_every = 60*24*7 # in seconds\n \n start_processing = time.time()\n \n # Common classes\n spark = SparkSession.builder.appName('analytical_attributes').getOrCreate()\n wd = \"/user/kkotochigov/\"\n hdfs_client = InsecureClient(\"http://159.69.60.71:50070\", \"hdfs\")\n \n \n # Check whether We Need to Refit\n model_modification_ts = next(iter([x[1]['modificationTime'] for x in hdfs_client.list(wd+\"models/\", status=True) if x[0] == \"model.pkl\"]), None)\n model_needs_update = True if (model_modification_ts == None) or (time.time() - model_modification_ts > update_model_every) or (arg_refit) else False\n print(\"Refit = {}\".format(model_needs_update))\n \n # Load Data\n cjp = CJ_Loader(spark)\n cjp.set_organization(\"57efd33d-aaa5-409d-89ce-ff29a86d78a5\")\n cjp.load_cj(ts_from=(2010,12,10), ts_to=(2020,12,12))\n # cjp.load_cj(ts_from=(2018,12,1), ts_to=(2018,12,31))\n # cjp.cj_stats(ts_from=(2010,12,1), ts_to=(2020,12,31))\n cjp.cj_data.createOrReplaceTempView('cj')\n cjp.extract_attributes()\n cjp.process_attributes(features_mode=\"seq\", split_mode=\"all\")\n data = cjp.cj_dataset\n \n # data.to_parquet(wd+\"/data_export.parquet\")\n \n # Sample Dataset to Reduce Processing Time\n # if arg_sample_rate != 1.0:\n # (train_index, test_index) = StratifiedShuffleSplit(n_splits=1, train_size=arg_sample_rate).get_n_splits(data, data.target)\n \n # Make Model\n predictor = CJ_Predictor(wd+\"models/\", hdfs_client)\n predictor.set_data(data)\n predictor.optimize(batch_size=4096)\n \n start_fitting = time.time()\n result = predictor.fit(update_model=model_needs_update, batch_size=4096)\n \n scoring_distribution = result.return_score.value_counts(sort=False)\n \n print(\"Got Result Table with Rows = {}\".format(result.shape[0]))\n print(\"Score Distribution = \\n{}\".format(scoring_distribution))\n \n # Make Delta\n df = spark.createDataFrame(result)\n dm = CJ_Export(\"57efd33d-aaa5-409d-89ce-ff29a86d78a5\", \"model_update\", \"http://159.69.60.71:50070\", \"schema.avsc\")\n \n mapping = {\n 'id': {\n 'fpc': {\n 'primary': 10008,\n 'secondary': 10031\n },\n 'tpc': {\n 'primary':10005,\n 'secondary':-1\n }\n },\n 'attributes': {\n 'return_score': {\n 'primary': 10127,\n 'mapping': {\n '1': 10000,\n '2': 10001,\n '3': 10002,\n '4': 10003,\n '5': 10004\n }\n }\n }\n }\n \n # Publish Delta\n print(\"Send Update To Production = {}\".format(send_update))\n dm.make_delta(df, mapping, send_update=send_update)\n \n finish_fitting = time.time()\n \n # Store Run Metadata\n log_data = {\n \"dt\":[datetime.datetime.today().strftime('%Y-%m-%d %H-%m-%S')],\n \"loaded_rows\":[cjp.cj_data_rows],\n \"extracted_rows\":[cjp.cj_df_rows],\n \"processed_rows\":[cjp.cj_dataset_rows],\n \"refit_flag\":[model_needs_update],\n \"send_to_prod_flag\":[send_update],\n \"processing_time\":[round((start_fitting - start_processing)/60, 2)],\n \"fitting_time\":[round((finish_fitting - start_fitting)/60, 2)],\n \"target_rate\":[0.05],\n \"train_auc\":[predictor.train_auc],\n \"test_auc\":[predictor.test_auc[0]],\n \"test_auc_std\":[predictor.test_auc_std[0]],\n \"test_auc_lb\":[predictor.test_auc[0] - predictor.test_auc_std[0]],\n \"test_auc_ub\":[predictor.test_auc[0] + predictor.test_auc_std[0]],\n \"q1\":[scoring_distribution[0]],\n \"q2\":[scoring_distribution[1]],\n \"q3\":[scoring_distribution[2]],\n \"q4\":[scoring_distribution[3]],\n \"q5\":[scoring_distribution[4]]\n }\n # log = \";\".join(log_data)\n \n # log_path = wd+\"log/log.csv\"\n \n df = spark.createDataFrame(pandas.DataFrame(log_data))\n df=df.withColumn(\"dt\",df.dt.astype(\"Date\"))\n \n df.write.jdbc(url=\"jdbc:postgresql://bmw-prod-mn1:5432/analytics_monitoring\", table=\"model_stats\", mode=\"append\", properties = {\"password\":\"liquibase\", \"user\":\"liquibase\"})\n \n # if \"log.csv\" not in hdfs_client.list(wd+\"log/\"):\n # data_with_header = 'dt;loaded_rows;extracted_rows;processed_rows;refit_flag;send_to_prod_flag;processing_time;fitting_time;train_auc;test_auc;test_auc_std;q1;q2;q3;q4;q5\\n'+log + \"\\n\"\n # hdfs_client.write(log_path, data=bytes(data_with_header, encoding='utf8'), overwrite=True)\n # else:\n # with hdfs_client.read(log_path) as reader:\n # prev_log = reader.read()\n # new_log = prev_log + bytes(log + \"\\n\", encoding='utf8')\n # hdfs_client.write(log_path, data=new_log, overwrite=True)\n \n\n\n\nmain()\n\n\n\n\n\n\n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"367849220","text":"'''\nAuthor@ : kpkishankrishna\nThis program evaluates the square root of number using Newton Raphson method\n'''\ndef main():\n '''Main function.'''\n num_1 = int(input())\n epsilon = 0.01\n guess = num_1/2.0\n while abs(guess**2-num_1) >= epsilon:\n guess = guess-(((guess**2)-num_1)/(2*guess))\n print(guess)\nif __name__ == \"__main__\":\n main()\n","sub_path":"cspp1-assignments/m5/p4/square_root_newtonrapson.py","file_name":"square_root_newtonrapson.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"646998621","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.forms import ModelForm\nfrom froala_editor.widgets import FroalaEditor\n\nfrom .models import CustomUser, Comment, Post\n\n\nclass CustomUserCreationForm(UserCreationForm):\n class Meta(UserCreationForm.Meta):\n model = CustomUser\n fields = ('username', 'email')\n\n\nclass CustomUserChangeForm(UserChangeForm):\n class Meta:\n model = CustomUser\n fields = ('username', 'email')\n\n\n# from ckeditor_uploader.widgets import CKEditorUploadingWidget\n\n\nclass CommentForm(forms.ModelForm):\n # body = forms.CharField(widget=FroalaEditor(options={\n # 'toolbarInline': True,\n # }))\n body = forms.CharField(widget=FroalaEditor())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'custom-control',\n 'cols': '1',\n 'rows': '1',\n 'resize': 'none'\n })\n\n class Meta:\n model = Comment\n fields = ['body']\n\n\nclass MainCommentForm(forms.ModelForm):\n body = forms.CharField(widget=FroalaEditor())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'custom-control-main',\n 'cols': '1',\n 'rows': '1',\n 'resize': 'none'\n })\n\n class Meta:\n model = Comment\n fields = ['body']\n\n\nclass PostForm(ModelForm):\n title = forms.CharField()\n body = forms.CharField(widget=FroalaEditor())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control '\n })\n\n class Meta:\n model = Post\n fields = ['title', 'body', ]\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"203695882","text":"\n\n# - Class JSON - #\nclass JsonModel():\n def toJson(self):\n dict = self.__dict__\n dictio = {}\n invalid_keys = {\"_sa_instance_state\"}\n for x in dict :\n if x not in invalid_keys :\n if isinstance(dict[x],JsonModel):\n dictio[x.rsplit('_', 1)[-1]] = dict[x].toJson()\n else :\n dictio[x.rsplit('_', 1)[-1]] = dict[x]\n return dictio\n\n def getProp(self, propertie):\n return self.__dict__[propertie]\n\n\nclass MeteorologyJson(JsonModel):\n metro_timestamp = 0\n metro_weather = []\n\n def __init__(self, timestamp, weather):\n self.metro_timestamp = timestamp\n self.metro_weather = weather\n\n\nclass WeatherJson(JsonModel):\n weather_dfn = 0\n weather_type = 0\n\n def __init__(self, dfn, wtype):\n self.weather_dfn = dfn\n self.weather_type = wtype\n\n\ndef get_or_create(session, model, **kwargs):\n instance = session.query(model).filter_by(**kwargs).first()\n if instance:\n return instance\n else:\n instance = model(**kwargs)\n session.add(instance)\n session.commit()\n return instance\n\nmeteoJson = MeteorologyJson(0, 0)\nmeteoJsontoString = {\"timestamp\": 0, \"weather\": [{\"dfn\": 0, \"weather\": \"SOLEIL\"}, {\"dfn\": 1, \"weather\": \"SOLEIL\"}]}\n\ncurrentDay = 0\n\n\navailablesItems = []\ntomorrowActions = {}\nnbVentesPlayer = {}\nlastMessages = []\ncurrentHour = 0\nlastInfoFromPlayer = {}\nactualRecettesNumberAndPrices = {}","sub_path":"json_model.py","file_name":"json_model.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"231388026","text":"from model.figure import Figure\n\nclass King(Figure):\n \n def __init__(self, board, x, y, color, player_type):\n Figure.__init__(self, board, x, y, color, player_type)\n\n def get_valid_moves(self) -> set:\n valid_moves = set()\n for i in range (-1, 2):\n for j in range(-1, 2):\n if self.posx + i < 8 and self.posy + j < 8 and self.posx + i >= 0 and self.posy + j >= 0 and not (i == 0 and j == 0):\n valid_moves.add((self.posx + i, self.posy + j))\n return valid_moves","sub_path":"model/figures/king.py","file_name":"king.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"214538399","text":"from django.conf.urls import url\nfrom django.contrib.auth.views import login, logout\n\nfrom .views import CreateBlogView, CreatePostView, ListPostByAuthorView, MyIndexView, MarkAsReadView, PostView\n\n\nurlpatterns = [\n url(r'^$', MyIndexView.as_view(), name='index'),\n url(r'^create_post/$', CreatePostView.as_view(), name='create_post'),\n url(r'^create_blog/$', CreateBlogView.as_view(), name='create_blog'),\n url(r'^blog/(?P\\w+)/$', ListPostByAuthorView.as_view(), name='post_list'),\n url(r'^post_isread/(?P\\w+)/$', MarkAsReadView.as_view(), name='read'),\n url(r'^post/(?P\\w+)/$', PostView.as_view(), name='post'),\n url(r'^login/$', login, name='login', kwargs=({'template_name': 'blog/login.html'})),\n url(r'^logout/$', logout, name='logout')\n]","sub_path":"test_blog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"588150017","text":"# -*- coding: utf-8 -*-\nimport torch\nfrom torch.utils.data import Dataset, TensorDataset, DataLoader\nimport numpy as np\nimport pandas as pd\n\nclass Normalization:\n def __init__(self,data):\n self.mu = torch.mean(data,dim=0)\n self.std = torch.std(data,dim=0)\n self.min = torch.min(torch.abs(data),dim=0)[0]\n self.max = torch.max(torch.abs(data),dim=0)[0]\n self.diff = self.max - self.min\n self.cols = data.size()[1]\n \n def normalize(self, data):\n \n for i in range(0, self.cols): \n # Scaling based on max value:\n data[:,i] = torch.div(data[:,i]-self.min[i], self.max[i]-self.min[i])\n \n return data\n \n \n def unnormalize(self, data):\n \n for i in range(0, self.cols):\n \n # Scaling based on max value:\n data[:,i] = torch.mul(data[:,i], self.max[i]-self.min[i]) +self.min[i]\n \n return data\n\n\n\n","sub_path":"normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"594496428","text":"from scraper.manning_scraper import ManningScraper\nfrom utils.manning_file_handler import ManningFileHandler\n\n\nclass ManningDataManager(object):\n\n def __init__(self, url):\n self._url = url\n self._manning_scraper = None\n self._cache_data = None\n\n def get_data_from_web(self):\n # if self._cache_data:\n # return self._cache_data\n self._manning_scraper = ManningScraper(self._url)\n self._cache_data = self._manning_scraper.get_all_data()\n return self._cache_data\n\n def get_data_from_file(self, path):\n return ManningFileHandler.get_data_from_file(path)\n\n def write_data_to_file(self, path):\n # if self._cache_data:\n # ManningFileHandler.write_data_to_file(self._cache_data, path)\n # else:\n try:\n ManningFileHandler.test_file(path)\n except FileNotFoundError:\n raise\n else:\n self._manning_scraper = ManningScraper(self._url)\n ManningFileHandler.write_data_to_file(self._manning_scraper.get_all_data(), path)\n","sub_path":"manning/manager/manning_data_manager.py","file_name":"manning_data_manager.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"639545155","text":"import datetime\nfrom sqlalchemy import Column, Integer, String, ForeignKey, Boolean, DateTime\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.orm.exc import DetachedInstanceError\nfrom chacra.models import Base\nfrom chacra.controllers import util\n\n\nclass Binary(Base):\n\n __tablename__ = 'binaries'\n id = Column(Integer, primary_key=True)\n name = Column(String(256), nullable=False, index=True)\n path = Column(String(256))\n ref = Column(String(256), index=True)\n distro = Column(String(256), nullable=False, index=True)\n distro_version = Column(String(256), nullable=False, index=True)\n arch = Column(String(256), nullable=False, index=True)\n built_by = Column(String(256))\n created = Column(DateTime, index=True)\n modified = Column(DateTime, index=True)\n signed = Column(Boolean(), default=False)\n size = Column(Integer, default=0)\n\n project_id = Column(Integer, ForeignKey('projects.id'))\n project = relationship('Project', backref=backref('binaries', lazy='dynamic'))\n\n allowed_keys = [\n 'path',\n 'distro',\n 'distro_version',\n 'arch',\n 'ref',\n 'built_by',\n 'size',\n ]\n\n def __init__(self, name, project, **kw):\n self.name = name\n self.project = project\n self.created = datetime.datetime.utcnow()\n self.modified = datetime.datetime.utcnow()\n for key in self.allowed_keys:\n if key in kw.keys():\n setattr(self, key, kw[key])\n\n def __repr__(self):\n try:\n return '' % self.name\n except DetachedInstanceError:\n return ''\n\n def update_from_json(self, data):\n \"\"\"\n We received a JSON blob with updated metadata information\n that needs to update some fields\n \"\"\"\n for key in self.allowed_keys:\n if key in data.keys():\n setattr(self, key, data[key])\n\n @property\n def last_changed(self):\n if self.modified > self.created:\n last = self.modified\n else:\n last = self.created\n return util.last_seen(last)\n\n def __json__(self):\n return dict(\n name=self.name,\n created=self.created,\n modified=self.modified,\n signed=self.signed,\n size=self.size,\n path=self.path,\n last_changed=self.last_changed,\n built_by=self.built_by,\n distro=self.distro,\n distro_version=self.distro_version,\n arch=self.arch,\n ref=self.ref,\n )\n\n","sub_path":"chacra/models/binaries.py","file_name":"binaries.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355450971","text":"#!/usr/bin/env python3\nimport os, subprocess, tempfile, time, shutil, sys\n\n# do the injection\nprint(\"*** Done. Now this won't hurt a bit...\")\nto_patch = \"out/smali/android/content/pm/PackageParser.smali\"\n\nf = open(to_patch, \"r\")\nold_contents = f.readlines()\nf.close()\n\nf = open(\"fillinsig.smali\", \"r\")\nfillinsig = f.readlines()\nf.close()\n\n# add fillinsig method\ni = 0\ncontents = []\nalready_patched = False\nin_function = False\nright_line = False\nstart_of_line = None\ndone_patching = False\nstored_register = \"v11\"\npartially_patched = False\n\nwhile i < len(old_contents):\n if \";->fillinsig\" in old_contents[i]:\n already_patched = True\n if \".method public static fillinsig\" in old_contents[i]:\n partially_patched = True\n if \".method public static generatePackageInfo(Landroid/content/pm/PackageParser$Package;[IIJJLjava/util/Set;Landroid/content/pm/PackageUserState;I)Landroid/content/pm/PackageInfo;\" in old_contents[i]:\n in_function = True\n if \".method public static generatePackageInfo(Landroid/content/pm/PackageParser$Package;[IIJJLandroid/util/ArraySet;Landroid/content/pm/PackageUserState;I)Landroid/content/pm/PackageInfo;\" in old_contents[i]:\n in_function = True\n if \".method public static generatePackageInfo(Landroid/content/pm/PackageParser$Package;[IIJJLjava/util/HashSet;Landroid/content/pm/PackageUserState;I)Landroid/content/pm/PackageInfo;\" in old_contents[i]:\n in_function = True\n if \".end method\" in old_contents[i]:\n in_function = False\n if in_function and \".line\" in old_contents[i]:\n start_of_line = i + 1\n if in_function and \"arraycopy\" in old_contents[i]:\n right_line = True\n if in_function and \"Landroid/content/pm/PackageInfo;->()V\" in old_contents[i]:\n stored_register = old_contents[i].split(\"{\")[1].split(\"}\")[0]\n if not already_patched and in_function and right_line and not done_patching:\n contents = contents[:start_of_line]\n contents.append(\"move-object/from16 v0, p0\\n\")\n contents.append(\"invoke-static {%s, v0}, Landroid/content/pm/PackageParser;->fillinsig(Landroid/content/pm/PackageInfo;Landroid/content/pm/PackageParser$Package;)V\\n\" % stored_register)\n done_patching = True\n else:\n contents.append(old_contents[i])\n i = i + 1\n\nif not already_patched and not partially_patched:\n contents.extend(fillinsig)\nelif partially_patched and not already_patched:\n print(\"??? Previous failed patch attempt, not including the fillinsig method again...\")\nelif already_patched:\n print(\"??? This framework.jar appears to already have been patched... Exiting.\")\n sys.exit(2)\n\nf = open(to_patch, \"w\")\ncontents = \"\".join(contents)\nf.write(contents)\nf.close()\n\nprint(\"*** Injection successful.\")\nsys.exit(0)\n","sub_path":"tar/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"560536039","text":"\n\n#calss header\nclass _OBLIGED():\n\tdef __init__(self,): \n\t\tself.name = \"OBLIGED\"\n\t\tself.definitions = [u'to be forced to do something or feel that you must do something: ', u'used to thank someone and say that you are grateful: ', u'used to ask someone politely to do something: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_obliged.py","file_name":"_obliged.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398887862","text":"# -*- coding: UTF-8 -*-\n#\n# The MIT License\n#\n# Copyright (c) 2015 Felix Schwarz \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# I believe the license above is permissible enough so you can actually\n# use/relicense the code in any other project without license proliferation.\n# I'm happy to relicense this code if necessary for inclusion in other free\n# software projects.\n\nimport sys\n\n__all__ = ['basestring', 'exception_message', 'UPREFIX']\n\nUPREFIX = 'u' if (sys.version_info < (3, 0)) else ''\n\ntry:\n basestring = basestring\nexcept NameError:\n basestring = str\n\ndef exception_message(exception):\n if len(exception.args) == 0:\n return None\n return exception.args[0]\n\n","sub_path":"tests/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653832977","text":"#! env python3\n\nsqr = int(input(\"Podaj liczbe, z ktorej wylicze pierwiastek: \"))\nstopien = int(input(\"Podaj stopien pierwiastka: \"))\n\nsrednia = 0\ne = 0.0001\nx = sqr / 2\ny = sqr / x\nc = stopien - 1\n\nwhile abs(x - (sqr/pow(x, c))) > e:\n x = ((c*x) + sqr/pow(x, c)) / stopien\nprint(x)","sub_path":"pierwiastki.py","file_name":"pierwiastki.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473328702","text":"#!/usr/bin/env python\nimport siconos.numerics as sn\nimport siconos.kernel as sk\nfrom siconos.io.mechanics_run import MechanicsHdf5Runner\n\nfrom siconos.io.FrictionContactTrace import GlobalFrictionContactTraceParams\n\nimport chute\nimport rocas\nimport random\n\nrandom.seed(0)\n\ncube_size = 0.1\nplan_thickness = cube_size\ndensity = 2500\n\n#print('density',density)\n\nbox_height = 3.683\nbox_length = 6.900\nbox_width = 3.430\n\nplane_thickness = 0.2\n\n\nfrom params import *\n\n\ntest = False\nif test:\n n_layer = 20\n n_row = 4\n n_col = 4\n step = 10000\n hstep = 1e-3\n itermax=100\n options.iparam[sn.SICONOS_IPARAM_MAX_ITER] = itermax\n options.dparam[sn.SICONOS_DPARAM_TOL] = tolerance\nelse:\n n_layer = 100\n n_row = 4\n n_col = 16\n\n\nwith MechanicsHdf5Runner(mode='w') as io:\n ch = chute.create_chute(io, box_height=box_height,\n box_length=box_length,\n box_width=box_width,\n plane_thickness=plane_thickness,\n scale=1, trans=[-0.6, -1.8, -1])\n\n # The time of death is driven by the rate value.\n # For a layer, number n, the time of birth is given by n*rate+random.random()*rate*2/5\n # The travel time for a grain is 0.5*9.81*(rate)**2\n \n rcs = rocas.create_rocas(io, n_layer=n_layer, n_row=n_row, n_col=n_col,\n x_shift=2.0, roca_size=0.1, top=3,\n rate=0.25, density=density)\n\n io.add_Newton_impact_rolling_friction_nsl('contact', mu=1.0, mu_r=0.1, e=0.01)\n\n\nimport os \nbase = './Chute'\ncmp=0\noutput_dir_created = False\noutput_dir = base +'_0'\nwhile (not output_dir_created):\n print('output_dir', output_dir)\n if (os.path.exists(output_dir)):\n cmp =cmp+1\n output_dir = base + '_' + str(cmp)\n else:\n os.mkdir(output_dir)\n output_dir_created = True\n\n\nfileName = os.path.join(output_dir,'Chute')\n\n\nprint('itermax', itermax)\nsn.numerics_set_verbose(2)\nsk.solver_options_print(options)\ntitle = \"Chute with rolling friction\"\ndescription = \"\"\"\nChute with 6400 polyhedra with Bullet collision detection\nMoreau TimeStepping: h={0}, theta = {1}\nOne Step non smooth problem: {2}, maxiter={3}, tol={4}\n\"\"\".format(hstep,\n theta,\n sk.solver_options_id_to_name(solver_id),\n itermax,\n tolerance)\nmathInfo = \"\"\n\nfriction_contact_trace_params = GlobalFrictionContactTraceParams(\n dump_itermax=dump_itermax, dump_probability=dump_probability,\n fileName=fileName, title=title,\n description=description, mathInfo=mathInfo)\nwith MechanicsHdf5Runner(mode='r+', collision_margin=0.01) as io:\n # By default earth gravity is applied and the units are those\n # of the International System of Units.\n # Because of fixed collision margins used in the collision detection,\n # sizes of small objects may need to be expressed in cm or mm.\n if test:\n io.run(gravity_scale=1.0,\n t0=0,\n T=step * hstep,\n h=hstep,\n multipoints_iterations=True,\n theta=1.0,\n Newton_max_iter=1,\n output_frequency=10,\n osi=sk.MoreauJeanGOSI,\n solver_options=options,\n friction_contact_trace_params=friction_contact_trace_params)\n else:\n io.run(gravity_scale=1.0,\n t0=0,\n T=step * hstep,\n h=hstep,\n multipoints_iterations=True,\n theta=1.0,\n Newton_max_iter=1,\n output_frequency=10,\n osi=sk.MoreauJeanGOSI,\n solver_options=options,\n numerics_verbose=True,\n friction_contact_trace_params=friction_contact_trace_params)\n","sub_path":"siconos/GlobalRolling/Chute/chute_con_rocas-MoreauJeanGOSI.py","file_name":"chute_con_rocas-MoreauJeanGOSI.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"410162024","text":"from routeDirectionCorpusReader import TextStandoff, Annotation\nimport crfEntityExtractor\n\nclass direction_parser_sdc:\n def __init__(self):\n self.extractor = crfEntityExtractor.SdcExtractor()\n\n def extract_SDCs(self, mystr):\n return self.extractor.chunk(mystr)\n\n\ndef sdc_hmap_to_sdc_standoff(sdc_hmap):\n \n sent = sdc_hmap[\"figure\"]+\" \"+sdc_hmap[\"verb\"]+\" \"+sdc_hmap[\"sr\"]+\" \"+sdc_hmap[\"landmark\"]\n l1 = len(sdc_hmap[\"figure\"])\n l2 = len(sdc_hmap[\"verb\"])\n l3 = len(sdc_hmap[\"sr\"])\n l4 = len(sdc_hmap[\"landmark\"])\n \n t1 = TextStandoff(sent, (0,l1))\n t2 = TextStandoff(sent, (l1+1,l1+1+l2))\n t3 = TextStandoff(sent, (l1+l2+2,l1+l2+2+l3))\n t4 = TextStandoff(sent, (l1+l2+l3+3,l1+l2+l3+3+l4))\n \n return Annotation(figure=t1, verb=t2, spatialRelation=t3, landmark=t4)\n","sub_path":"pytools/utilities/python/sdc_util.py","file_name":"sdc_util.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"145061948","text":"import asyncio\nimport config\nimport discord\nfrom discord.ext import commands\nimport sys\nimport traceback\n\nextensions = (\n 'cogs.simple',\n 'cogs.twitter',\n)\n\nclass UnimportantBot(commands.Bot):\n def __init__(self):\n super().__init__(command_prefix = '!')\n self.token = config.token\n self.tw_consumer_key = config.tw_consumer_key\n self.tw_consumer_secret = config.tw_consumer_secret\n self.tw_access_token = config.tw_access_token\n self.tw_access_token_secret = config.tw_access_token_secret\n\n for extension in extensions:\n try:\n self.load_extension(extension)\n except Exception as e:\n print('Failed to load extension.', file=sys.stderr)\n traceback.print_exc()\n super().run(self.token)\n\n async def on_message(self, message):\n server = message.server\n if message.author.bot or '!nobot' in message.content:\n return\n if '69' in message.content:\n nice = next((e for emojis in server.emojis if e.name == 'nice'), None)\n await self.add_reaction(message, nice)\n await self.process_commands(message)\n\nbot = UnimportantBot()\n","sub_path":"UnimportantBot.py","file_name":"UnimportantBot.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"125361589","text":"#Time Complexity : O(N) where N is number of elements in grid\n#Space Complexity : O(B) maximum breadth\nclass Solution(object):\n def orangesRotting(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n dirs = [(0,1),(1,0),(-1,0),(0,-1)]\n Queue = []\n time = 0\n m = len(grid)\n n = len(grid[0])\n fresh = 0\n \n for i in range(m):\n for j in range(n):\n if grid[i][j] == 1:\n fresh +=1\n if grid[i][j] == 2:\n Queue.append([i,j])\n if fresh == 0:\n return fresh \n while(len(Queue)>0):\n size = len(Queue)\n for i in range(size):\n curr = Queue.pop(0)\n for s in dirs:\n r = s[0] + curr[0]\n c = s[1] + curr[1]\n if r >= 0 and r < m and c>=0 and c (3, 0)) else False\n\ndef exists():\n return True\n\ndef regexp_check(pattern, text):\n return re.search(pattern, text, re.MULTILINE)\n\n\ndef more_recent(pattern, text):\n version = text.strip()\n return version >= pattern\n\n\n# A list of tools to check.\nTOOLS = [\n # Name, pattern, required, match_func\n ('bwa', '', True, regexp_check),\n ('datamash --version', '', True, regexp_check),\n ('fastqc --version', '', True, regexp_check),\n ('hisat2', '', True, regexp_check),\n ('seqret --version', '', True, regexp_check),\n ('subread-align', '', True, regexp_check),\n ('featureCounts', '', True, regexp_check),\n ('efetch -version', '', True, exists),\n ('esearch -version', '', True, exists),\n ('samtools --version', '1.3', True, more_recent),\n ('fastq-dump -version', '2.8.0', True, more_recent),\n ('global-align.sh', '', False, regexp_check),\n ('local-align.sh', '', False, regexp_check),\n]\n\ndef bash_check():\n bashrc = expanduser(\"~/.bashrc\")\n bashprofile = expanduser(\"~/.bash_profile\")\n\ndef path_check():\n errors = 0\n # The PATH variable\n paths = os.environ.get('PATH').split(':')\n bindir = expanduser(\"~/bin\")\n\n #\n # We need ~/bin to be in the PATH\n #\n if bindir not in paths:\n errors += 1\n print(\"# The ~/bin folder is not in your PATH!\")\n\n return errors\n\n\ndef tool_check(tools):\n errors = 0\n print(\"# Checking {} symptoms...\".format(len(tools)))\n for cmd, pattern, required, callback in tools:\n args = cmd.split()\n try:\n proc = subprocess.Popen(args, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n except OSError as exc:\n if required:\n word = cmd.split()[0]\n print(\"# ERROR! Missing program: {}\".format(word))\n errors += 1\n else:\n print(\"# Optional program not found: {}\".format(cmd))\n continue\n\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n\n output = stdout + stderr\n\n if pattern:\n if not callback(pattern, output):\n print(\"# Version {} mismatch for: {}\".format(pattern, cmd))\n errors += 1\n continue\n\n return errors\n\nFIXME = \"\"\"\n#\n# How to delete your environment and reinstall everything.\n#\n\nsource deactivate\nconda update conda -y\nconda remove --name bioinfo --all -y\nconda create --name bioinfo python=3.6 -y \ncurl http://data.biostarhandbook.com/install/conda.txt | xargs conda install -y\n\n#\n# How to install Entrez Direct from source.\n#\n\nmkdir -p ~/src\ncurl ftp://ftp.ncbi.nlm.nih.gov/entrez/entrezdirect/edirect.zip > ~/src/edirect.zip\nunzip -o ~/src/edirect.zip -d ~/src\necho 'export PATH=~/src/edirect:$PATH' >> ~/.bash_profile\nsource ~/.bash_profile\n\n\"\"\"\n\n\ndef fixme():\n print (FIXME)\n\ndef health_check():\n\n errors = 0\n errors += path_check()\n errors += tool_check(tools=TOOLS)\n\n if errors:\n if errors == 1:\n print(\"# Your system shows 1 error!\")\n else:\n print(\"# Your system shows {} errors.\".format(errors))\n print(\"# See also: doctor.py --fixme\")\n else:\n print(\"# You are doing well!\")\n\nif __name__ == '__main__':\n if '--fixme' in sys.argv:\n fixme()\n else:\n print(\"# Doctor! Doctor! Give me the news.\")\n health_check()\n","sub_path":"proj/bin/doctor.py","file_name":"doctor.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613803749","text":"# Q1. write a Python program to check if the input year is a leap year or not.\n\n\ndef input_positive_integer(msg):\n try:\n num = int(input(msg))\n except ValueError:\n print(\"Please input positive integers only!\")\n num = int(input(msg))\n if num < 0:\n print(\"Please input positive integers only!\")\n num = int(input(msg))\n return num\n\n\ndef leap_or_not(year):\n if year % 4 == 0 and year % 100 != 0:\n return True\n elif year % 4 == 0 and year % 100 == 0 and year % 400 == 0:\n return True\n\n\ndef main():\n year = input_positive_integer(\"Please input an year : \")\n print(\"It's Leap Year.\" if leap_or_not(year) else \"It's not a leap year\")\n\n\nmain()\n","sub_path":"Python/Assignment6/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402301321","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the formingMagicSquare function below.\ndef formingMagicSquare(s):\n s = sum(s, []) # flaten s\n\n # All possible magic squares of 3x3 order\n magic_squares = [\n [8, 1, 6, 3, 5, 7, 4, 9, 2],\n [6, 1, 8, 7, 5, 3, 2, 9, 4],\n [4, 9, 2, 3, 5, 7, 8, 1, 6],\n [2, 9, 4, 7, 5, 3, 6, 1, 8],\n [8, 3, 4, 1, 5, 9, 6, 7, 2],\n [4, 3, 8, 9, 5, 1, 2, 7, 6],\n [6, 7, 2, 1, 5, 9, 8, 3, 4],\n [2, 7, 6, 9, 5, 1, 4, 3, 8],\n ]\n\n costs = [] # this variable will contain all possible costs\n\n for magic_square in magic_squares:\n costs.append(sum([abs(magic_square[i] - s[i]) for i in range(9)]))\n\n return min(costs)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = []\n\n for _ in range(3):\n s.append(list(map(int, input().rstrip().split())))\n\n result = formingMagicSquare(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"HackerRank/Problem Solving/Forming a Magic Square.py","file_name":"Forming a Magic Square.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"139591026","text":"import datetime\n\nfrom django.db import IntegrityError\n\nimport json\n\nfrom Poem.api.views import NotFound\nfrom Poem.helpers.history_helpers import create_history, update_comment\nfrom Poem.poem import models as poem_models\nfrom Poem.poem_super_admin import models as admin_models\nfrom Poem.tenants.models import Tenant\n\nfrom rest_framework import status\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom tenant_schemas.utils import schema_context, get_public_schema_name\n\n\nclass ListProbes(APIView):\n authentication_classes = (SessionAuthentication,)\n\n def get(self, request, name=None):\n if name:\n try:\n probe = admin_models.Probe.objects.get(name=name)\n\n if probe.datetime:\n probe_datetime = datetime.datetime.strftime(\n probe.datetime, '%Y-%m-%dT%H:%M:%S.%f'\n )\n else:\n probe_datetime = ''\n\n result = dict(\n id=probe.id,\n name=probe.name,\n version=probe.package.version,\n package=probe.package.__str__(),\n docurl=probe.docurl,\n description=probe.description,\n comment=probe.comment,\n repository=probe.repository,\n user=probe.user,\n datetime=probe_datetime\n )\n\n return Response(result)\n\n except admin_models.Probe.DoesNotExist:\n raise NotFound(status=404, detail='Probe not found')\n\n else:\n probes = admin_models.Probe.objects.all()\n\n results = []\n for probe in probes:\n # number of probe revisions\n nv = admin_models.ProbeHistory.objects.filter(\n object_id=probe\n ).count()\n\n results.append(\n dict(\n name=probe.name,\n version=probe.package.version,\n package=probe.package.__str__(),\n docurl=probe.docurl,\n description=probe.description,\n comment=probe.comment,\n repository=probe.repository,\n nv=nv\n )\n )\n\n results = sorted(results, key=lambda k: k['name'].lower())\n\n return Response(results)\n\n def put(self, request):\n schemas = list(\n Tenant.objects.all().values_list('schema_name', flat=True)\n )\n schemas.remove(get_public_schema_name())\n\n probe = admin_models.Probe.objects.get(id=request.data['id'])\n old_name = probe.name\n package_name = request.data['package'].split(' ')[0]\n package_version = request.data['package'].split(' ')[1][1:-1]\n package = admin_models.Package.objects.get(\n name=package_name, version=package_version\n )\n old_version = probe.package.version\n\n try:\n if package.version != old_version:\n probe.name = request.data['name']\n probe.package = package\n probe.repository = request.data['repository']\n probe.docurl = request.data['docurl']\n probe.description = request.data['description']\n probe.comment = request.data['comment']\n probe.user = request.user.username\n\n probe.save()\n create_history(probe, probe.user)\n\n if request.data['update_metrics'] in [True, 'true', 'True']:\n metrictemplates = \\\n admin_models.MetricTemplate.objects.filter(\n probekey__name=old_name,\n probekey__package__version=old_version\n )\n\n for metrictemplate in metrictemplates:\n metrictemplate.probekey = \\\n admin_models.ProbeHistory.objects.get(\n name=probe.name,\n package__version=probe.package.version\n )\n metrictemplate.save()\n create_history(metrictemplate, request.user.username)\n\n else:\n history = admin_models.ProbeHistory.objects.filter(\n name=old_name, package__version=old_version\n )\n probekey = history[0]\n new_data = {\n 'name': request.data['name'],\n 'package': package,\n 'description': request.data['description'],\n 'comment': request.data['comment'],\n 'repository': request.data['repository'],\n 'docurl': request.data['docurl'],\n 'user': request.user.username\n }\n admin_models.Probe.objects.filter(pk=probe.id).update(\n **new_data\n )\n\n del new_data['user']\n new_data.update({\n 'version_comment': update_comment(\n admin_models.Probe.objects.get(\n id=request.data['id']\n )\n )\n })\n history.update(**new_data)\n\n # update Metric history in case probekey name has changed:\n if request.data['name'] != old_name:\n for schema in schemas:\n with schema_context(schema):\n metrics = poem_models.Metric.objects.filter(\n probekey=probekey\n )\n\n for metric in metrics:\n vers = poem_models.TenantHistory.objects.filter(\n object_id=metric.id\n )\n\n for ver in vers:\n serialized_data = json.loads(\n ver.serialized_data\n )\n\n serialized_data[0]['fields']['probekey'] = \\\n [request.data['name'],\n package.version]\n\n ver.serialized_data = json.dumps(\n serialized_data\n )\n ver.save()\n\n return Response(status=status.HTTP_201_CREATED)\n\n except IntegrityError:\n return Response(\n {'detail': 'Probe with this name already exists.'},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n def post(self, request):\n package_name = request.data['package'].split(' ')[0]\n package_version = request.data['package'].split(' ')[1][1:-1]\n try:\n probe = admin_models.Probe.objects.create(\n name=request.data['name'],\n package=admin_models.Package.objects.get(\n name=package_name, version=package_version\n ),\n repository=request.data['repository'],\n docurl=request.data['docurl'],\n description=request.data['description'],\n comment=request.data['comment'],\n user=request.user.username,\n datetime=datetime.datetime.now()\n )\n\n if request.data['cloned_from']:\n clone = admin_models.Probe.objects.get(\n id=request.data['cloned_from']\n )\n comment = 'Derived from {} ({}).'.format(\n clone.name, clone.package.version\n )\n create_history(probe, probe.user, comment=comment)\n\n else:\n create_history(probe, probe.user)\n\n return Response(status=status.HTTP_201_CREATED)\n\n except IntegrityError:\n return Response({'detail': 'Probe with this name already exists.'},\n status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, name=None):\n schemas = list(\n Tenant.objects.all().values_list('schema_name', flat=True)\n )\n schemas.remove(get_public_schema_name())\n if name:\n try:\n probe = admin_models.Probe.objects.get(name=name)\n mt = admin_models.MetricTemplate.objects.filter(\n probekey=admin_models.ProbeHistory.objects.get(\n name=probe.name, package__version=probe.package.version\n )\n )\n if len(mt) == 0:\n for schema in schemas:\n # need to iterate through schemas because of foreign\n # key in Metric model\n with schema_context(schema):\n admin_models.ProbeHistory.objects.filter(\n object_id=probe\n ).delete()\n probe.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(\n {'detail': 'You cannot delete Probe that is associated '\n 'to metric templates!'},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n except admin_models.Probe.DoesNotExist:\n raise NotFound(status=404, detail='Probe not found')\n\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"poem/Poem/api/internal_views/probes.py","file_name":"probes.py","file_ext":"py","file_size_in_byte":10054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513250419","text":"\"\"\" Sequence index definitions for samplesheet generator.\n\n2013-03-14\nRemoved the interpretation of \"_{number}\" as a number for\na TruSeq Illumina index. Now a prefix to the number is required.\nThis was done to reduce the risk of misinterpreting a sample id\nas an index specification.\n\nPer Kraulis, Pontus Larsson\n\"\"\"\n\n# The module variabel BASIC_LOOKUP contains the primary definitions of\n# the sequence indexes and their names.\nBASIC_LOOKUP = dict()\n\n# The module variable INDEX_LOOKUP, contains a number of aliases\n# for the index names.\nINDEX_LOOKUP = dict()\n\n# The Illumina index number-to-sequence mappings have been double-checked\n# against the documentation from Illumina dated 2011-10-11.\n# index1-index27 are from the table \"TruSeq RNA and DNA Sample Prep Kits\".\n# Identifying prefixes: 'index', '', 'idx', 'in', 'i'\nILLUMINA = dict(index1='ATCACG',\n index2='CGATGT',\n index3='TTAGGC',\n index4='TGACCA',\n index5='ACAGTG',\n index6='GCCAAT',\n index7='CAGATC',\n index8='ACTTGA',\n index9='GATCAG',\n index10='TAGCTT',\n index11='GGCTAC',\n index12='CTTGTA',\n index13='AGTCAA',\n index14='AGTTCC',\n index15='ATGTCA',\n index16='CCGTCC',\n index17='GTAGAG', # 17 is \"reserved\" by Illumina, used by others\n index18='GTCCGC',\n index19='GTGAAA',\n index20='GTGGCC',\n index21='GTTTCG',\n index22='CGTACG',\n index23='GAGTGG',\n index24='GGTAGC', # 24 is \"reserved\" by Illumina, used by others\n index25='ACTGAT',\n # index26 is \"reserved\" by Illumina\n index27='ATTCCT')\nBASIC_LOOKUP.update(ILLUMINA)\nINDEX_LOOKUP.update(ILLUMINA)\nINDEX_LOOKUP.update(dict([(k.replace('index', 'idx'), v)\n for k,v in ILLUMINA.items()]))\nINDEX_LOOKUP.update(dict([(k.replace('index', 'in'), v)\n for k,v in ILLUMINA.items()]))\nINDEX_LOOKUP.update(dict([(k.replace('index', 'i'), v)\n for k,v in ILLUMINA.items()]))\n\n# rpi1-rpi48 are from the table \"TruSeq Small RNA Sample Prep Kits\",\n# after reverse-complement conversion.\n# RPI indexes for \"TruSeq Small RNA\", \n# These are reverse-complement of Illumina documentation\n# Identifying prefixes: 'rpi', 'r', 'indexr'\nRPI = dict(rpi1='ATCACG',\n rpi2='CGATGT',\n rpi3='TTAGGC',\n rpi4='TGACCA',\n rpi5='ACAGTG',\n rpi6='GCCAAT',\n rpi7='CAGATC',\n rpi8='ACTTGA',\n rpi9='GATCAG',\n rpi10='TAGCTT',\n rpi11='GGCTAC',\n rpi12='CTTGTA',\n rpi13='AGTCAA',\n rpi14='AGTTCC',\n rpi15='ATGTCA',\n rpi16='CCGTCC',\n rpi17='GTAGAG',\n rpi18='GTCCGC',\n rpi19='GTGAAA',\n rpi20='GTGGCC',\n rpi21='GTTTCG',\n rpi22='CGTACG',\n rpi23='GAGTGG',\n rpi24='GGTAGC',\n rpi25='ACTGAT',\n rpi26='ATGAGC',\n rpi27='ATTCCT',\n rpi28='CAAAAG',\n rpi29='CAACTA',\n rpi30='CACCGG',\n rpi31='CACGAT',\n rpi32='CACTCA',\n rpi33='CAGGCG',\n rpi34='CATGGC',\n rpi35='CATTTT',\n rpi36='CCAACA',\n rpi37='CGGAAT',\n rpi38='CTAGCT',\n rpi39='CTATAC',\n rpi40='CTCAGA',\n rpi41='GACGAC',\n rpi42='TAATCG',\n rpi43='TACAGC',\n rpi44='TATAAT',\n rpi45='TCATTC',\n rpi46='TCCCGA',\n rpi47='TCGAAG',\n rpi48='TCGGCA')\nBASIC_LOOKUP.update(RPI)\nINDEX_LOOKUP.update(RPI)\nINDEX_LOOKUP.update(dict([(k.replace('rpi', 'r'), v)\n for k,v in RPI.items()]))\nINDEX_LOOKUP.update(dict([(k.replace('rpi', 'indexr'), v)\n for k,v in RPI.items()]))\n\n# The Agilent indexes agilent1-agilent96 are from the Google Docs spreadsheet\n# \"illumina 96 barcodes plate format_column arrangement\" by Joel Gruselius.\n# Identifying prefixes: 'agilent', 'a', 'indexa'\nAGILENT = dict(agilent1='ATCACG',\n agilent2='CGATGT',\n agilent3='TTAGGC',\n agilent4='TGACCA',\n agilent5='ACAGTG',\n agilent6='GCCAAT',\n agilent7='CAGATC',\n agilent8='ACTTGA',\n agilent9='GATCAG',\n agilent10='TAGCTT',\n agilent11='GGCTAC',\n agilent12='CTTGTA',\n agilent13='AAACAT',\n agilent14='CAAAAG',\n agilent15='GAAACC',\n agilent16='TAATCG',\n agilent17='AAAGCA',\n agilent18='CAACTA',\n agilent19='GAATAA',\n agilent20='TACAGC',\n agilent21='AAATGC',\n agilent22='CACCGG',\n agilent23='GACGGA',\n agilent24='AGGCCG',\n agilent25='AACAAA',\n agilent26='CACGAT',\n agilent27='GATATA',\n agilent28='TATAAT',\n agilent29='AACCCC',\n agilent30='CACTCA',\n agilent31='GATGCT',\n agilent32='TCATTC',\n agilent33='AACTTG',\n agilent34='CAGGCG',\n agilent35='GCAAGG',\n agilent36='ATAATT',\n agilent37='AAGACT',\n agilent38='CATGGC',\n agilent39='GCACTT',\n agilent40='TCCCGA',\n agilent41='AAGCGA',\n agilent42='CATTTT',\n agilent43='GCCGCG',\n agilent44='TCGAAG',\n agilent45='AAGGAC',\n agilent46='CCAACA',\n agilent47='GCCTTA',\n agilent48='ATACGG',\n agilent49='AATAGG',\n agilent50='CCACGC',\n agilent51='GCTCCA',\n agilent52='TCGGCA',\n agilent53='ACAAAC',\n agilent54='CCCATG',\n agilent55='GGCACA',\n agilent56='TCTACC',\n agilent57='ACATCT',\n agilent58='CCCCCT',\n agilent59='GGCCTG',\n agilent60='ATCCTA',\n agilent61='ACCCAG',\n agilent62='CCGCAA',\n agilent63='GTAGAG',\n agilent64='TGAATG',\n agilent65='ACCGGC',\n agilent66='CCTTAG',\n agilent67='GTCCGC',\n agilent68='TGCCAT',\n agilent69='ACGATA',\n agilent70='CGAGAA',\n agilent71='GTGAAA',\n agilent72='ATCTAT',\n agilent73='ACTCTC',\n agilent74='CGGAAT',\n agilent75='GTGGCC',\n agilent76='TGCTGG',\n agilent77='ACTGAT',\n agilent78='CTAGCT',\n agilent79='GTTTCG',\n agilent80='TGGCGC',\n agilent81='AGAAGA',\n agilent82='CTATAC',\n agilent83='CGTACG',\n agilent84='ATGAGC',\n agilent85='AGATAG',\n agilent86='CTCAGA',\n agilent87='GAGTGG',\n agilent88='TTCGAA',\n agilent89='AGCATC',\n agilent90='CTGCTG',\n agilent91='GGTAGC',\n agilent92='TTCTCC',\n agilent93='AGCGCT',\n agilent94='CCGTCC',\n agilent95='ATTCCT',\n agilent96='AGGTTT')\nBASIC_LOOKUP.update(AGILENT)\nINDEX_LOOKUP.update(AGILENT)\nINDEX_LOOKUP.update(dict([(k.replace('agilent', 'a'), v)\n for k,v in AGILENT.items()]))\nINDEX_LOOKUP.update(dict([(k.replace('agilent', 'indexa'), v)\n for k,v in AGILENT.items()]))\n\n# Indexes mondrian1-mondrian16 are from the PDF \"User Guide for ovation\n# SP Ultralow Library System\" a.k.a. Mondrian system.\n# Identifying prefixes: 'mondrian', 'm', 'indexm'\nMONDRIAN = dict(mondrian1='AAGGGA',\n mondrian2='CCTTCA',\n mondrian3='GGACCC',\n mondrian4='TTCAGC',\n mondrian5='AAGACG',\n mondrian6='CCTCGG',\n mondrian7='GGATGT',\n mondrian8='TTCGCT',\n mondrian9='ACACGA',\n mondrian10='CACACA',\n mondrian11='GTGTTA',\n mondrian12='TGTGAA',\n mondrian13='ACAAAC',\n mondrian14='CACCTC',\n mondrian15='GTGGCC',\n mondrian16='TGTTGC')\nBASIC_LOOKUP.update(MONDRIAN)\nINDEX_LOOKUP.update(MONDRIAN)\nINDEX_LOOKUP.update(dict([(k.replace('mondrian', 'm'), v)\n for k,v in MONDRIAN.items()]))\nINDEX_LOOKUP.update(dict([(k.replace('mondrian', 'indexm'), v)\n for k,v in MONDRIAN.items()]))\n\n# Indexes halo1-halo96 are from the PDF \"Haloplex PCR Target Enrichment &\n# Library Preparation Guide, Version 2.0, November 2011\"\n# Identifying prefixes: 'halo', 'h', 'indexh'\nHALO = dict(halo1='CTCGGT',\n halo2='AATCGT',\n halo3='GCGCGT',\n halo4='CGAAGT',\n halo5='TATTCT',\n halo6='AGATCT',\n halo7='CAGGCT',\n halo8='TCCGCT',\n halo9='GGTCCT',\n halo10='TCGTAT',\n halo11='GTCCAT',\n halo12='GATTGG',\n halo13='TTACGG',\n halo14='CCTTCG',\n halo15='GGAGCG',\n halo16='ACGCAG',\n halo17='TGCCAG',\n halo18='GAGAAG',\n halo19='ATCAAG',\n halo20='CGATTC',\n halo21='ACCGTC',\n halo22='TAAGTC',\n halo23='TTCATC',\n halo24='AGCAGC',\n halo25='GCGTCC',\n halo26='AGGTAC',\n halo27='ACGTTA',\n halo28='AACCTA',\n halo29='TGGATA',\n halo30='TTATCA',\n halo31='ATAGAA',\n halo32='CTGGTT',\n halo33='GGAGTT',\n halo34='TACCTT',\n halo35='TCTACT',\n halo36='ATAACT',\n halo37='GAGTAT',\n halo38='AGCTAT',\n halo39='CAAGAT',\n halo40='TCGTTG',\n halo41='ACTCTG',\n halo42='GATATG',\n halo43='TATGCG',\n halo44='GTACCG',\n halo45='CAGACG',\n halo46='CCTGAG',\n halo47='TATTGC',\n halo48='GAGAGC',\n halo49='ATATAC',\n halo50='GCCGAC',\n halo51='CTTAAC',\n halo52='GTTCTA',\n halo53='CAGCTA',\n halo54='ACCGGA',\n halo55='CTCCGA',\n halo56='TTAAGA',\n halo57='GGTTCA',\n halo58='ACGCCA',\n halo59='CGACCA',\n halo60='TCGGAA',\n halo61='GGCCTT',\n halo62='AGACGT',\n halo63='CATAGT',\n halo64='GATGAT',\n halo65='CCTATG',\n halo66='AACTGG',\n halo67='GCGAGG',\n halo68='TTCTCG',\n halo69='GCTGCG',\n halo70='CTGGCG',\n halo71='CGAACG',\n halo72='ATTCAG',\n halo73='CCGTTC',\n halo74='TACTTC',\n halo75='GAGGTC',\n halo76='ATCCTC',\n halo77='TCAATC',\n halo78='CTTCGC',\n halo79='GACCGC',\n halo80='ATAAGC',\n halo81='CATTAC',\n halo82='TGATAC',\n halo83='CTAGAC',\n halo84='TAGAAC',\n halo85='ATGGTA',\n halo86='GTACGA',\n halo87='AAGAGA',\n halo88='GGCAGA',\n halo89='GGAGAA',\n halo90='GCGCAA',\n halo91='GCGGTT',\n halo92='TTAGTT',\n halo93='AGAATT',\n halo94='ATCAGT',\n halo95='GGCGCT',\n halo96='ACTTAT')\nBASIC_LOOKUP.update(HALO)\nINDEX_LOOKUP.update(HALO)\nINDEX_LOOKUP.update(dict([(k.replace('halo', 'h'), v)\n for k,v in HALO.items()]))\nINDEX_LOOKUP.update(dict([(k.replace('halo', 'indexh'), v)\n for k,v in HALO.items()]))\n\n# Indexes haloht1-haloht96 are the new 8-bp indexes for Haloplex.\n# From a CSV file \"oligo_reference-halo8.csv\" provided by Joel Gruselius.\n# Identifying prefixes: 'haloht', 'hht'\nHALOHT = dict(haloht1='AACGTGAT',\n haloht2='AAACATCG',\n haloht3='ATGCCTAA',\n haloht4='AGTGGTCA',\n haloht5='ACCACTGT',\n haloht6='ACATTGGC',\n haloht7='CAGATCTG',\n haloht8='CATCAAGT',\n haloht9='CGCTGATC',\n haloht10='ACAAGCTA',\n haloht11='CTGTAGCC',\n haloht12='AGTACAAG',\n haloht13='AACAACCA',\n haloht14='AACCGAGA',\n haloht15='AACGCTTA',\n haloht16='AAGACGGA',\n haloht17='AAGGTACA',\n haloht18='ACACAGAA',\n haloht19='ACAGCAGA',\n haloht20='ACCTCCAA',\n haloht21='ACGCTCGA',\n haloht22='ACGTATCA',\n haloht23='ACTATGCA',\n haloht24='AGAGTCAA',\n haloht25='AGATCGCA',\n haloht26='AGCAGGAA',\n haloht27='AGTCACTA',\n haloht28='ATCCTGTA',\n haloht29='ATTGAGGA',\n haloht30='CAACCACA',\n haloht31='CAAGACTA',\n haloht32='CAATGGAA',\n haloht33='CACTTCGA',\n haloht34='CAGCGTTA',\n haloht35='CATACCAA',\n haloht36='CCAGTTCA',\n haloht37='CCGAAGTA',\n haloht38='CCGTGAGA',\n haloht39='CCTCCTGA',\n haloht40='CGAACTTA',\n haloht41='CGACTGGA',\n haloht42='CGCATACA',\n haloht43='CTCAATGA',\n haloht44='CTGAGCCA',\n haloht45='CTGGCATA',\n haloht46='GAATCTGA',\n haloht47='GACTAGTA',\n haloht48='GAGCTGAA',\n haloht49='GATAGACA',\n haloht50='GCCACATA',\n haloht51='GCGAGTAA',\n haloht52='GCTAACGA',\n haloht53='GCTCGGTA',\n haloht54='GGAGAACA',\n haloht55='GGTGCGAA',\n haloht56='GTACGCAA',\n haloht57='GTCGTAGA',\n haloht58='GTCTGTCA',\n haloht59='GTGTTCTA',\n haloht60='TAGGATGA',\n haloht61='TATCAGCA',\n haloht62='TCCGTCTA',\n haloht63='TCTTCACA',\n haloht64='TGAAGAGA',\n haloht65='TGGAACAA',\n haloht66='TGGCTTCA',\n haloht67='TGGTGGTA',\n haloht68='TTCACGCA',\n haloht69='AACTCACC',\n haloht70='AAGAGATC',\n haloht71='AAGGACAC',\n haloht72='AATCCGTC',\n haloht73='AATGTTGC',\n haloht74='ACACGACC',\n haloht75='ACAGATTC',\n haloht76='AGATGTAC',\n haloht77='AGCACCTC',\n haloht78='AGCCATGC',\n haloht79='AGGCTAAC',\n haloht80='ATAGCGAC',\n haloht81='ATCATTCC',\n haloht82='ATTGGCTC',\n haloht83='CAAGGAGC',\n haloht84='CACCTTAC',\n haloht85='CCATCCTC',\n haloht86='CCGACAAC',\n haloht87='CCTAATCC',\n haloht88='CCTCTATC',\n haloht89='CGACACAC',\n haloht90='CGGATTGC',\n haloht91='CTAAGGTC',\n haloht92='GAACAGGC',\n haloht93='GACAGTGC',\n haloht94='GAGTTAGC',\n haloht95='GATGAATC',\n haloht96='GCCAAGAC')\nBASIC_LOOKUP.update(HALOHT)\nINDEX_LOOKUP.update(HALOHT)\nINDEX_LOOKUP.update(dict([(k.replace('haloht', 'hht'), v)\n for k,v in HALOHT.items()]))\n\n# Indexes sureselect1-sureselect16 are the 16 SureSelect indexes.\n# From a CSV file \"oligo_reference-halo8.csv\" provided by Joel Gruselius.\n# Identifying prefixes: 'sureselect', 'ss'\nSURESELECT = dict(sureselect1='ATCACG',\n sureselect2='CGATGT',\n sureselect3='TTAGGC',\n sureselect4='TGACCA',\n sureselect5='ACAGTG',\n sureselect6='GCCAAT',\n sureselect7='CAGATC',\n sureselect8='ACTTGA',\n sureselect9='GATCAG',\n sureselect10='TAGCTT',\n sureselect11='GGCTAC',\n sureselect12='CTTGTA',\n sureselect13='AAACAT',\n sureselect14='CAAAAG',\n sureselect15='GAAACC',\n sureselect16='AAAGCA')\nBASIC_LOOKUP.update(SURESELECT)\nINDEX_LOOKUP.update(SURESELECT)\nINDEX_LOOKUP.update(dict([(k.replace('sureselect', 'ss'), v)\n for k,v in SURESELECT.items()]))\n\n# Indexes for TruSeq DNA HT Dual D7-D5\n# From the CSV file written out from GenoLogics LIMS 2013-01-24.\n# Identifying prefixes: 'dual'\nDUAL = dict(dual1='ATTACTCG-TATAGCCT',\n dual2='ATTACTCG-ATAGAGGC',\n dual3='ATTACTCG-CCTATCCT',\n dual4='ATTACTCG-GGCTCTGA',\n dual5='ATTACTCG-AGGCGAAG',\n dual6='ATTACTCG-TAATCTTA',\n dual7='ATTACTCG-CAGGACGT',\n dual8='ATTACTCG-GTACTGAC',\n dual9='TCCGGAGA-TATAGCCT',\n dual10='TCCGGAGA-ATAGAGGC',\n dual11='TCCGGAGA-CCTATCCT',\n dual12='TCCGGAGA-GGCTCTGA',\n dual13='TCCGGAGA-AGGCGAAG',\n dual14='TCCGGAGA-TAATCTTA',\n dual15='TCCGGAGA-CAGGACGT',\n dual16='TCCGGAGA-GTACTGAC',\n dual17='CGCTCATT-TATAGCCT',\n dual18='CGCTCATT-ATAGAGGC',\n dual19='CGCTCATT-CCTATCCT',\n dual20='CGCTCATT-GGCTCTGA',\n dual21='CGCTCATT-AGGCGAAG',\n dual22='CGCTCATT-TAATCTTA',\n dual23='CGCTCATT-CAGGACGT',\n dual24='CGCTCATT-GTACTGAC',\n dual25='GAGATTCC-TATAGCCT',\n dual26='GAGATTCC-ATAGAGGC',\n dual27='GAGATTCC-CCTATCCT',\n dual28='GAGATTCC-GGCTCTGA',\n dual29='GAGATTCC-AGGCGAAG',\n dual30='GAGATTCC-TAATCTTA',\n dual31='GAGATTCC-CAGGACGT',\n dual32='GAGATTCC-GTACTGAC',\n dual33='ATTCAGAA-TATAGCCT',\n dual34='ATTCAGAA-ATAGAGGC',\n dual35='ATTCAGAA-CCTATCCT',\n dual36='ATTCAGAA-GGCTCTGA',\n dual37='ATTCAGAA-AGGCGAAG',\n dual38='ATTCAGAA-TAATCTTA',\n dual39='ATTCAGAA-CAGGACGT',\n dual40='ATTCAGAA-GTACTGAC',\n dual41='GAATTCGT-TATAGCCT',\n dual42='GAATTCGT-ATAGAGGC',\n dual43='GAATTCGT-CCTATCCT',\n dual44='GAATTCGT-GGCTCTGA',\n dual45='GAATTCGT-AGGCGAAG',\n dual46='GAATTCGT-TAATCTTA',\n dual47='GAATTCGT-CAGGACGT',\n dual48='GAATTCGT-GTACTGAC',\n dual49='CTGAAGCT-TATAGCCT',\n dual50='CTGAAGCT-ATAGAGGC',\n dual51='CTGAAGCT-CCTATCCT',\n dual52='CTGAAGCT-GGCTCTGA',\n dual53='CTGAAGCT-AGGCGAAG',\n dual54='CTGAAGCT-TAATCTTA',\n dual55='CTGAAGCT-CAGGACGT',\n dual56='CTGAAGCT-GTACTGAC',\n dual57='TAATGCGC-TATAGCCT',\n dual58='TAATGCGC-ATAGAGGC',\n dual59='TAATGCGC-CCTATCCT',\n dual60='TAATGCGC-GGCTCTGA',\n dual61='TAATGCGC-AGGCGAAG',\n dual62='TAATGCGC-TAATCTTA',\n dual63='TAATGCGC-CAGGACGT',\n dual64='TAATGCGC-GTACTGAC',\n dual65='CGGCTATG-TATAGCCT',\n dual66='CGGCTATG-ATAGAGGC',\n dual67='CGGCTATG-CCTATCCT',\n dual68='CGGCTATG-GGCTCTGA',\n dual69='CGGCTATG-AGGCGAAG',\n dual70='CGGCTATG-TAATCTTA',\n dual71='CGGCTATG-CAGGACGT',\n dual72='CGGCTATG-GTACTGAC',\n dual73='TCCGCGAA-TATAGCCT',\n dual74='TCCGCGAA-ATAGAGGC',\n dual75='TCCGCGAA-CCTATCCT',\n dual76='TCCGCGAA-GGCTCTGA',\n dual77='TCCGCGAA-AGGCGAAG',\n dual78='TCCGCGAA-TAATCTTA',\n dual79='TCCGCGAA-CAGGACGT',\n dual80='TCCGCGAA-GTACTGAC',\n dual81='TCTCGCGC-TATAGCCT',\n dual82='TCTCGCGC-ATAGAGGC',\n dual83='TCTCGCGC-CCTATCCT',\n dual84='TCTCGCGC-GGCTCTGA',\n dual85='TCTCGCGC-AGGCGAAG',\n dual86='TCTCGCGC-TAATCTTA',\n dual87='TCTCGCGC-CAGGACGT',\n dual88='TCTCGCGC-GTACTGAC',\n dual89='AGCGATAG-TATAGCCT',\n dual90='AGCGATAG-ATAGAGGC',\n dual91='AGCGATAG-CCTATCCT',\n dual92='AGCGATAG-GGCTCTGA',\n dual93='AGCGATAG-AGGCGAAG',\n dual94='AGCGATAG-TAATCTTA',\n dual95='AGCGATAG-CAGGACGT',\n dual96='AGCGATAG-GTACTGAC')\nBASIC_LOOKUP.update(DUAL)\nINDEX_LOOKUP.update(DUAL)\n\n# Indexes for Nextera Dual HT.\n# From CSV file provided by Sverker Lundin 2013-05-02\n# Additional indexes (>96) from PDF and CSV files from Illumina,\n# provided by Anna Leinfelt 2014-12-10.\nNEXTERADUAL = dict(\n nxdual1='TAAGGCGA-TAGATCGC',\n nxdual2='TAAGGCGA-CTCTCTAT',\n nxdual3='TAAGGCGA-TATCCTCT',\n nxdual4='TAAGGCGA-AGAGTAGA',\n nxdual5='TAAGGCGA-GTAAGGAG',\n nxdual6='TAAGGCGA-ACTGCATA',\n nxdual7='TAAGGCGA-AAGGAGTA',\n nxdual8='TAAGGCGA-CTAAGCCT',\n nxdual9='CGTACTAG-TAGATCGC',\n nxdual10='CGTACTAG-CTCTCTAT',\n nxdual11='CGTACTAG-TATCCTCT',\n nxdual12='CGTACTAG-AGAGTAGA',\n nxdual13='CGTACTAG-GTAAGGAG',\n nxdual14='CGTACTAG-ACTGCATA',\n nxdual15='CGTACTAG-AAGGAGTA',\n nxdual16='CGTACTAG-CTAAGCCT',\n nxdual17='AGGCAGAA-TAGATCGC',\n nxdual18='AGGCAGAA-CTCTCTAT',\n nxdual19='AGGCAGAA-TATCCTCT',\n nxdual20='AGGCAGAA-AGAGTAGA',\n nxdual21='AGGCAGAA-GTAAGGAG',\n nxdual22='AGGCAGAA-ACTGCATA',\n nxdual23='AGGCAGAA-AAGGAGTA',\n nxdual24='AGGCAGAA-CTAAGCCT',\n nxdual25='TCCTGAGC-TAGATCGC',\n nxdual26='TCCTGAGC-CTCTCTAT',\n nxdual27='TCCTGAGC-TATCCTCT',\n nxdual28='TCCTGAGC-AGAGTAGA',\n nxdual29='TCCTGAGC-GTAAGGAG',\n nxdual30='TCCTGAGC-ACTGCATA',\n nxdual31='TCCTGAGC-AAGGAGTA',\n nxdual32='TCCTGAGC-CTAAGCCT',\n nxdual33='GGACTCCT-TAGATCGC',\n nxdual34='GGACTCCT-CTCTCTAT',\n nxdual35='GGACTCCT-TATCCTCT',\n nxdual36='GGACTCCT-AGAGTAGA',\n nxdual37='GGACTCCT-GTAAGGAG',\n nxdual38='GGACTCCT-ACTGCATA',\n nxdual39='GGACTCCT-AAGGAGTA',\n nxdual40='GGACTCCT-CTAAGCCT',\n nxdual41='TAGGCATG-TAGATCGC',\n nxdual42='TAGGCATG-CTCTCTAT',\n nxdual43='TAGGCATG-TATCCTCT',\n nxdual44='TAGGCATG-AGAGTAGA',\n nxdual45='TAGGCATG-GTAAGGAG',\n nxdual46='TAGGCATG-ACTGCATA',\n nxdual47='TAGGCATG-AAGGAGTA',\n nxdual48='TAGGCATG-CTAAGCCT',\n nxdual49='CTCTCTAC-TAGATCGC',\n nxdual50='CTCTCTAC-CTCTCTAT',\n nxdual51='CTCTCTAC-TATCCTCT',\n nxdual52='CTCTCTAC-AGAGTAGA',\n nxdual53='CTCTCTAC-GTAAGGAG',\n nxdual54='CTCTCTAC-ACTGCATA',\n nxdual55='CTCTCTAC-AAGGAGTA',\n nxdual56='CTCTCTAC-CTAAGCCT',\n nxdual57='CAGAGAGG-TAGATCGC',\n nxdual58='CAGAGAGG-CTCTCTAT',\n nxdual59='CAGAGAGG-TATCCTCT',\n nxdual60='CAGAGAGG-AGAGTAGA',\n nxdual61='CAGAGAGG-GTAAGGAG',\n nxdual62='CAGAGAGG-ACTGCATA',\n nxdual63='CAGAGAGG-AAGGAGTA',\n nxdual64='CAGAGAGG-CTAAGCCT',\n nxdual65='GCTACGCT-TAGATCGC',\n nxdual66='GCTACGCT-CTCTCTAT',\n nxdual67='GCTACGCT-TATCCTCT',\n nxdual68='GCTACGCT-AGAGTAGA',\n nxdual69='GCTACGCT-GTAAGGAG',\n nxdual70='GCTACGCT-ACTGCATA',\n nxdual71='GCTACGCT-AAGGAGTA',\n nxdual72='GCTACGCT-CTAAGCCT',\n nxdual73='CGAGGCTG-TAGATCGC',\n nxdual74='CGAGGCTG-CTCTCTAT',\n nxdual75='CGAGGCTG-TATCCTCT',\n nxdual76='CGAGGCTG-AGAGTAGA',\n nxdual77='CGAGGCTG-GTAAGGAG',\n nxdual78='CGAGGCTG-ACTGCATA',\n nxdual79='CGAGGCTG-AAGGAGTA',\n nxdual80='CGAGGCTG-CTAAGCCT',\n nxdual81='AAGAGGCA-TAGATCGC',\n nxdual82='AAGAGGCA-CTCTCTAT',\n nxdual83='AAGAGGCA-TATCCTCT',\n nxdual84='AAGAGGCA-AGAGTAGA',\n nxdual85='AAGAGGCA-GTAAGGAG',\n nxdual86='AAGAGGCA-ACTGCATA',\n nxdual87='AAGAGGCA-AAGGAGTA',\n nxdual88='AAGAGGCA-CTAAGCCT',\n nxdual89='GTAGAGGA-TAGATCGC',\n nxdual90='GTAGAGGA-CTCTCTAT',\n nxdual91='GTAGAGGA-TATCCTCT',\n nxdual92='GTAGAGGA-AGAGTAGA',\n nxdual93='GTAGAGGA-GTAAGGAG',\n nxdual94='GTAGAGGA-ACTGCATA',\n nxdual95='GTAGAGGA-AAGGAGTA',\n nxdual96='GTAGAGGA-CTAAGCCT',\n nxdual97='GCTCATGA-CTCTCTAT',\n nxdual98='GCTCATGA-TATCCTCT',\n nxdual99='GCTCATGA-AGAGTAGA',\n nxdual100='GCTCATGA-GTAAGGAG',\n nxdual101='GCTCATGA-ACTGCATA',\n nxdual102='GCTCATGA-AAGGAGTA',\n nxdual103='GCTCATGA-CTAAGCCT',\n nxdual104='ATCTCAGG-CTCTCTAT',\n nxdual105='ATCTCAGG-TATCCTCT',\n nxdual106='ATCTCAGG-AGAGTAGA',\n nxdual107='ATCTCAGG-GTAAGGAG',\n nxdual108='ATCTCAGG-ACTGCATA',\n nxdual109='ATCTCAGG-AAGGAGTA',\n nxdual110='ATCTCAGG-CTAAGCCT',\n nxdual111='ACTCGCTA-CTCTCTAT',\n nxdual112='ACTCGCTA-TATCCTCT',\n nxdual113='ACTCGCTA-AGAGTAGA',\n nxdual114='ACTCGCTA-GTAAGGAG',\n nxdual115='ACTCGCTA-ACTGCATA',\n nxdual116='ACTCGCTA-AAGGAGTA',\n nxdual117='ACTCGCTA-CTAAGCCT',\n nxdual118='GGAGCTAC-CTCTCTAT',\n nxdual119='GGAGCTAC-TATCCTCT',\n nxdual120='GGAGCTAC-AGAGTAGA',\n nxdual121='GGAGCTAC-GTAAGGAG',\n nxdual122='GGAGCTAC-ACTGCATA',\n nxdual123='GGAGCTAC-AAGGAGTA',\n nxdual124='GGAGCTAC-CTAAGCCT',\n nxdual125='GCGTAGTA-CTCTCTAT',\n nxdual126='GCGTAGTA-TATCCTCT',\n nxdual127='GCGTAGTA-AGAGTAGA',\n nxdual128='GCGTAGTA-GTAAGGAG',\n nxdual129='GCGTAGTA-ACTGCATA',\n nxdual130='GCGTAGTA-AAGGAGTA',\n nxdual131='GCGTAGTA-CTAAGCCT',\n nxdual132='CGGAGCCT-CTCTCTAT',\n nxdual133='CGGAGCCT-TATCCTCT',\n nxdual134='CGGAGCCT-AGAGTAGA',\n nxdual135='CGGAGCCT-GTAAGGAG',\n nxdual136='CGGAGCCT-ACTGCATA',\n nxdual137='CGGAGCCT-AAGGAGTA',\n nxdual138='CGGAGCCT-CTAAGCCT',\n nxdual139='TACGCTGC-CTCTCTAT',\n nxdual140='TACGCTGC-TATCCTCT',\n nxdual141='TACGCTGC-AGAGTAGA',\n nxdual142='TACGCTGC-GTAAGGAG',\n nxdual143='TACGCTGC-ACTGCATA',\n nxdual144='TACGCTGC-AAGGAGTA',\n nxdual145='TACGCTGC-CTAAGCCT',\n nxdual146='ATGCGCAG-CTCTCTAT',\n nxdual147='ATGCGCAG-TATCCTCT',\n nxdual148='ATGCGCAG-AGAGTAGA',\n nxdual149='ATGCGCAG-GTAAGGAG',\n nxdual150='ATGCGCAG-ACTGCATA',\n nxdual151='ATGCGCAG-AAGGAGTA',\n nxdual152='ATGCGCAG-CTAAGCCT',\n nxdual153='TAGCGCTC-CTCTCTAT',\n nxdual154='TAGCGCTC-TATCCTCT',\n nxdual155='TAGCGCTC-AGAGTAGA',\n nxdual156='TAGCGCTC-GTAAGGAG',\n nxdual157='TAGCGCTC-ACTGCATA',\n nxdual158='TAGCGCTC-AAGGAGTA',\n nxdual159='TAGCGCTC-CTAAGCCT',\n nxdual160='ACTGAGCG-CTCTCTAT',\n nxdual161='ACTGAGCG-TATCCTCT',\n nxdual162='ACTGAGCG-AGAGTAGA',\n nxdual163='ACTGAGCG-GTAAGGAG',\n nxdual164='ACTGAGCG-ACTGCATA',\n nxdual165='ACTGAGCG-AAGGAGTA',\n nxdual166='ACTGAGCG-CTAAGCCT',\n nxdual167='CCTAAGAC-CTCTCTAT',\n nxdual168='CCTAAGAC-TATCCTCT',\n nxdual169='CCTAAGAC-AGAGTAGA',\n nxdual170='CCTAAGAC-GTAAGGAG',\n nxdual171='CCTAAGAC-ACTGCATA',\n nxdual172='CCTAAGAC-AAGGAGTA',\n nxdual173='CCTAAGAC-CTAAGCCT',\n nxdual174='CGATCAGT-CTCTCTAT',\n nxdual175='CGATCAGT-TATCCTCT',\n nxdual176='CGATCAGT-AGAGTAGA',\n nxdual177='CGATCAGT-GTAAGGAG',\n nxdual178='CGATCAGT-ACTGCATA',\n nxdual179='CGATCAGT-AAGGAGTA',\n nxdual180='CGATCAGT-CTAAGCCT',\n nxdual181='TGCAGCTA-CTCTCTAT',\n nxdual182='TGCAGCTA-TATCCTCT',\n nxdual183='TGCAGCTA-AGAGTAGA',\n nxdual184='TGCAGCTA-GTAAGGAG',\n nxdual185='TGCAGCTA-ACTGCATA',\n nxdual186='TGCAGCTA-AAGGAGTA',\n nxdual187='TGCAGCTA-CTAAGCCT',\n nxdual188='TCGACGTC-CTCTCTAT',\n nxdual189='TCGACGTC-TATCCTCT',\n nxdual190='TCGACGTC-AGAGTAGA',\n nxdual191='TCGACGTC-GTAAGGAG',\n nxdual192='TCGACGTC-ACTGCATA',\n nxdual193='TCGACGTC-AAGGAGTA',\n nxdual194='TCGACGTC-CTAAGCCT',\n nxdual195='TAAGGCGA-CGTCTAAT',\n nxdual196='TAAGGCGA-TCTCTCCG',\n nxdual197='TAAGGCGA-TCGACTAG',\n nxdual198='TAAGGCGA-TTCTAGCT',\n nxdual199='TAAGGCGA-CCTAGAGT',\n nxdual200='TAAGGCGA-GCGTAAGA',\n nxdual201='TAAGGCGA-CTATTAAG',\n nxdual202='TAAGGCGA-AAGGCTAT',\n nxdual203='TAAGGCGA-GAGCCTTA',\n nxdual204='TAAGGCGA-TTATGCGA',\n nxdual205='CGTACTAG-CGTCTAAT',\n nxdual206='CGTACTAG-TCTCTCCG',\n nxdual207='CGTACTAG-TCGACTAG',\n nxdual208='CGTACTAG-TTCTAGCT',\n nxdual209='CGTACTAG-CCTAGAGT',\n nxdual210='CGTACTAG-GCGTAAGA',\n nxdual211='CGTACTAG-CTATTAAG',\n nxdual212='CGTACTAG-AAGGCTAT',\n nxdual213='CGTACTAG-GAGCCTTA',\n nxdual214='CGTACTAG-TTATGCGA',\n nxdual215='AGGCAGAA-CGTCTAAT',\n nxdual216='AGGCAGAA-TCTCTCCG',\n nxdual217='AGGCAGAA-TCGACTAG',\n nxdual218='AGGCAGAA-TTCTAGCT',\n nxdual219='AGGCAGAA-CCTAGAGT',\n nxdual220='AGGCAGAA-GCGTAAGA',\n nxdual221='AGGCAGAA-CTATTAAG',\n nxdual222='AGGCAGAA-AAGGCTAT',\n nxdual223='AGGCAGAA-GAGCCTTA',\n nxdual224='AGGCAGAA-TTATGCGA',\n nxdual225='TCCTGAGC-CGTCTAAT',\n nxdual226='TCCTGAGC-TCTCTCCG',\n nxdual227='TCCTGAGC-TCGACTAG',\n nxdual228='TCCTGAGC-TTCTAGCT',\n nxdual229='TCCTGAGC-CCTAGAGT',\n nxdual230='TCCTGAGC-GCGTAAGA',\n nxdual231='TCCTGAGC-CTATTAAG',\n nxdual232='TCCTGAGC-AAGGCTAT',\n nxdual233='TCCTGAGC-GAGCCTTA',\n nxdual234='TCCTGAGC-TTATGCGA',\n nxdual235='GGACTCCT-CGTCTAAT',\n nxdual236='GGACTCCT-TCTCTCCG',\n nxdual237='GGACTCCT-TCGACTAG',\n nxdual238='GGACTCCT-TTCTAGCT',\n nxdual239='GGACTCCT-CCTAGAGT',\n nxdual240='GGACTCCT-GCGTAAGA',\n nxdual241='GGACTCCT-CTATTAAG',\n nxdual242='GGACTCCT-AAGGCTAT',\n nxdual243='GGACTCCT-GAGCCTTA',\n nxdual244='GGACTCCT-TTATGCGA',\n nxdual245='TAGGCATG-CGTCTAAT',\n nxdual246='TAGGCATG-TCTCTCCG',\n nxdual247='TAGGCATG-TCGACTAG',\n nxdual248='TAGGCATG-TTCTAGCT',\n nxdual249='TAGGCATG-CCTAGAGT',\n nxdual250='TAGGCATG-GCGTAAGA',\n nxdual251='TAGGCATG-CTATTAAG',\n nxdual252='TAGGCATG-AAGGCTAT',\n nxdual253='TAGGCATG-GAGCCTTA',\n nxdual254='TAGGCATG-TTATGCGA',)\n\nNEXTERADUAL.update(dict(\n nxdual255='CTCTCTAC-CGTCTAAT',\n nxdual256='CTCTCTAC-TCTCTCCG',\n nxdual257='CTCTCTAC-TCGACTAG',\n nxdual258='CTCTCTAC-TTCTAGCT',\n nxdual259='CTCTCTAC-CCTAGAGT',\n nxdual260='CTCTCTAC-GCGTAAGA',\n nxdual261='CTCTCTAC-CTATTAAG',\n nxdual262='CTCTCTAC-AAGGCTAT',\n nxdual263='CTCTCTAC-GAGCCTTA',\n nxdual264='CTCTCTAC-TTATGCGA',\n nxdual265='CAGAGAGG-CGTCTAAT',\n nxdual266='CAGAGAGG-TCTCTCCG',\n nxdual267='CAGAGAGG-TCGACTAG',\n nxdual268='CAGAGAGG-TTCTAGCT',\n nxdual269='CAGAGAGG-CCTAGAGT',\n nxdual270='CAGAGAGG-GCGTAAGA',\n nxdual271='CAGAGAGG-CTATTAAG',\n nxdual272='CAGAGAGG-AAGGCTAT',\n nxdual273='CAGAGAGG-GAGCCTTA',\n nxdual274='CAGAGAGG-TTATGCGA',\n nxdual275='GCTACGCT-CGTCTAAT',\n nxdual276='GCTACGCT-TCTCTCCG',\n nxdual277='GCTACGCT-TCGACTAG',\n nxdual278='GCTACGCT-TTCTAGCT',\n nxdual279='GCTACGCT-CCTAGAGT',\n nxdual280='GCTACGCT-GCGTAAGA',\n nxdual281='GCTACGCT-CTATTAAG',\n nxdual282='GCTACGCT-AAGGCTAT',\n nxdual283='GCTACGCT-GAGCCTTA',\n nxdual284='GCTACGCT-TTATGCGA',\n nxdual285='CGAGGCTG-CGTCTAAT',\n nxdual286='CGAGGCTG-TCTCTCCG',\n nxdual287='CGAGGCTG-TCGACTAG',\n nxdual288='CGAGGCTG-TTCTAGCT',\n nxdual289='CGAGGCTG-CCTAGAGT',\n nxdual290='CGAGGCTG-GCGTAAGA',\n nxdual291='CGAGGCTG-CTATTAAG',\n nxdual292='CGAGGCTG-AAGGCTAT',\n nxdual293='CGAGGCTG-GAGCCTTA',\n nxdual294='CGAGGCTG-TTATGCGA',\n nxdual295='AAGAGGCA-CGTCTAAT',\n nxdual296='AAGAGGCA-TCTCTCCG',\n nxdual297='AAGAGGCA-TCGACTAG',\n nxdual298='AAGAGGCA-TTCTAGCT',\n nxdual299='AAGAGGCA-CCTAGAGT',\n nxdual300='AAGAGGCA-GCGTAAGA',\n nxdual301='AAGAGGCA-CTATTAAG',\n nxdual302='AAGAGGCA-AAGGCTAT',\n nxdual303='AAGAGGCA-GAGCCTTA',\n nxdual304='AAGAGGCA-TTATGCGA',\n nxdual305='GTAGAGGA-CGTCTAAT',\n nxdual306='GTAGAGGA-TCTCTCCG',\n nxdual307='GTAGAGGA-TCGACTAG',\n nxdual308='GTAGAGGA-TTCTAGCT',\n nxdual309='GTAGAGGA-CCTAGAGT',\n nxdual310='GTAGAGGA-GCGTAAGA',\n nxdual311='GTAGAGGA-CTATTAAG',\n nxdual312='GTAGAGGA-AAGGCTAT',\n nxdual313='GTAGAGGA-GAGCCTTA',\n nxdual314='GTAGAGGA-TTATGCGA',\n nxdual315='GCTCATGA-CGTCTAAT',\n nxdual316='GCTCATGA-TCTCTCCG',\n nxdual317='GCTCATGA-TCGACTAG',\n nxdual318='GCTCATGA-TTCTAGCT',\n nxdual319='GCTCATGA-CCTAGAGT',\n nxdual320='GCTCATGA-GCGTAAGA',\n nxdual321='GCTCATGA-CTATTAAG',\n nxdual322='GCTCATGA-AAGGCTAT',\n nxdual323='GCTCATGA-GAGCCTTA',\n nxdual324='GCTCATGA-TTATGCGA',\n nxdual325='ATCTCAGG-CGTCTAAT',\n nxdual326='ATCTCAGG-TCTCTCCG',\n nxdual327='ATCTCAGG-TCGACTAG',\n nxdual328='ATCTCAGG-TTCTAGCT',\n nxdual329='ATCTCAGG-CCTAGAGT',\n nxdual330='ATCTCAGG-GCGTAAGA',\n nxdual331='ATCTCAGG-CTATTAAG',\n nxdual332='ATCTCAGG-AAGGCTAT',\n nxdual333='ATCTCAGG-GAGCCTTA',\n nxdual334='ATCTCAGG-TTATGCGA',\n nxdual335='ACTCGCTA-CGTCTAAT',\n nxdual336='ACTCGCTA-TCTCTCCG',\n nxdual337='ACTCGCTA-TCGACTAG',\n nxdual338='ACTCGCTA-TTCTAGCT',\n nxdual339='ACTCGCTA-CCTAGAGT',\n nxdual340='ACTCGCTA-GCGTAAGA',\n nxdual341='ACTCGCTA-CTATTAAG',\n nxdual342='ACTCGCTA-AAGGCTAT',\n nxdual343='ACTCGCTA-GAGCCTTA',\n nxdual344='ACTCGCTA-TTATGCGA',\n nxdual345='GGAGCTAC-CGTCTAAT',\n nxdual346='GGAGCTAC-TCTCTCCG',\n nxdual347='GGAGCTAC-TCGACTAG',\n nxdual348='GGAGCTAC-TTCTAGCT',\n nxdual349='GGAGCTAC-CCTAGAGT',\n nxdual350='GGAGCTAC-GCGTAAGA',\n nxdual351='GGAGCTAC-CTATTAAG',\n nxdual352='GGAGCTAC-AAGGCTAT',\n nxdual353='GGAGCTAC-GAGCCTTA',\n nxdual354='GGAGCTAC-TTATGCGA',\n nxdual355='GCGTAGTA-CGTCTAAT',\n nxdual356='GCGTAGTA-TCTCTCCG',\n nxdual357='GCGTAGTA-TCGACTAG',\n nxdual358='GCGTAGTA-TTCTAGCT',\n nxdual359='GCGTAGTA-CCTAGAGT',\n nxdual360='GCGTAGTA-GCGTAAGA',\n nxdual361='GCGTAGTA-CTATTAAG',\n nxdual362='GCGTAGTA-AAGGCTAT',\n nxdual363='GCGTAGTA-GAGCCTTA',\n nxdual364='GCGTAGTA-TTATGCGA',\n nxdual365='CGGAGCCT-CGTCTAAT',\n nxdual366='CGGAGCCT-TCTCTCCG',\n nxdual367='CGGAGCCT-TCGACTAG',\n nxdual368='CGGAGCCT-TTCTAGCT',\n nxdual369='CGGAGCCT-CCTAGAGT',\n nxdual370='CGGAGCCT-GCGTAAGA',\n nxdual371='CGGAGCCT-CTATTAAG',\n nxdual372='CGGAGCCT-AAGGCTAT',\n nxdual373='CGGAGCCT-GAGCCTTA',\n nxdual374='CGGAGCCT-TTATGCGA',\n nxdual375='TACGCTGC-CGTCTAAT',\n nxdual376='TACGCTGC-TCTCTCCG',\n nxdual377='TACGCTGC-TCGACTAG',\n nxdual378='TACGCTGC-TTCTAGCT',\n nxdual379='TACGCTGC-CCTAGAGT',\n nxdual380='TACGCTGC-GCGTAAGA',\n nxdual381='TACGCTGC-CTATTAAG',\n nxdual382='TACGCTGC-AAGGCTAT',\n nxdual383='TACGCTGC-GAGCCTTA',\n nxdual384='TACGCTGC-TTATGCGA',\n nxdual385='ATGCGCAG-CGTCTAAT',\n nxdual386='ATGCGCAG-TCTCTCCG',\n nxdual387='ATGCGCAG-TCGACTAG',\n nxdual388='ATGCGCAG-TTCTAGCT',\n nxdual389='ATGCGCAG-CCTAGAGT',\n nxdual390='ATGCGCAG-GCGTAAGA',\n nxdual391='ATGCGCAG-CTATTAAG',\n nxdual392='ATGCGCAG-AAGGCTAT',\n nxdual393='ATGCGCAG-GAGCCTTA',\n nxdual394='ATGCGCAG-TTATGCGA',\n nxdual395='TAGCGCTC-CGTCTAAT',\n nxdual396='TAGCGCTC-TCTCTCCG',\n nxdual397='TAGCGCTC-TCGACTAG',\n nxdual398='TAGCGCTC-TTCTAGCT',\n nxdual399='TAGCGCTC-CCTAGAGT',\n nxdual400='TAGCGCTC-GCGTAAGA',\n nxdual401='TAGCGCTC-CTATTAAG',\n nxdual402='TAGCGCTC-AAGGCTAT',\n nxdual403='TAGCGCTC-GAGCCTTA',\n nxdual404='TAGCGCTC-TTATGCGA',\n nxdual405='ACTGAGCG-CGTCTAAT',\n nxdual406='ACTGAGCG-TCTCTCCG',\n nxdual407='ACTGAGCG-TCGACTAG',\n nxdual408='ACTGAGCG-TTCTAGCT',\n nxdual409='ACTGAGCG-CCTAGAGT',\n nxdual410='ACTGAGCG-GCGTAAGA',\n nxdual411='ACTGAGCG-CTATTAAG',\n nxdual412='ACTGAGCG-AAGGCTAT',\n nxdual413='ACTGAGCG-GAGCCTTA',\n nxdual414='ACTGAGCG-TTATGCGA',\n nxdual415='CCTAAGAC-CGTCTAAT',\n nxdual416='CCTAAGAC-TCTCTCCG',\n nxdual417='CCTAAGAC-TCGACTAG',\n nxdual418='CCTAAGAC-TTCTAGCT',\n nxdual419='CCTAAGAC-CCTAGAGT',\n nxdual420='CCTAAGAC-GCGTAAGA',\n nxdual421='CCTAAGAC-CTATTAAG',\n nxdual422='CCTAAGAC-AAGGCTAT',\n nxdual423='CCTAAGAC-GAGCCTTA',\n nxdual424='CCTAAGAC-TTATGCGA',\n nxdual425='CGATCAGT-CGTCTAAT',\n nxdual426='CGATCAGT-TCTCTCCG',\n nxdual427='CGATCAGT-TCGACTAG',\n nxdual428='CGATCAGT-TTCTAGCT',\n nxdual429='CGATCAGT-CCTAGAGT',\n nxdual430='CGATCAGT-GCGTAAGA',\n nxdual431='CGATCAGT-CTATTAAG',\n nxdual432='CGATCAGT-AAGGCTAT',\n nxdual433='CGATCAGT-GAGCCTTA',\n nxdual434='CGATCAGT-TTATGCGA',\n nxdual435='TGCAGCTA-CGTCTAAT',\n nxdual436='TGCAGCTA-TCTCTCCG',\n nxdual437='TGCAGCTA-TCGACTAG',\n nxdual438='TGCAGCTA-TTCTAGCT',\n nxdual439='TGCAGCTA-CCTAGAGT',\n nxdual440='TGCAGCTA-GCGTAAGA',\n nxdual441='TGCAGCTA-CTATTAAG',\n nxdual442='TGCAGCTA-AAGGCTAT',\n nxdual443='TGCAGCTA-GAGCCTTA',\n nxdual444='TGCAGCTA-TTATGCGA',\n nxdual445='TCGACGTC-CGTCTAAT',\n nxdual446='TCGACGTC-TCTCTCCG',\n nxdual447='TCGACGTC-TCGACTAG',\n nxdual448='TCGACGTC-TTCTAGCT',\n nxdual449='TCGACGTC-CCTAGAGT',\n nxdual450='TCGACGTC-GCGTAAGA',\n nxdual451='TCGACGTC-CTATTAAG',\n nxdual452='TCGACGTC-AAGGCTAT',\n nxdual453='TCGACGTC-GAGCCTTA',\n nxdual454='TCGACGTC-TTATGCGA',\n )\n )\nBASIC_LOOKUP.update(NEXTERADUAL)\nINDEX_LOOKUP.update(NEXTERADUAL)\n\n\n# Indexes converting Haloplex to dual\n# Transformation deduced from Excel file provided by Sverker Lundin 2013-07-11\nHALOHTDUAL = dict()\nsuffix = '-TCTTTCCC'\nfor key, value in HALOHT.iteritems():\n HALOHTDUAL[key + 'dual'] = value + suffix\nBASIC_LOOKUP.update(HALOHTDUAL)\nINDEX_LOOKUP.update(HALOHTDUAL)\n\n\n# Illumina indexes converted to dual.\n# From Excel file provided by Sverker Lundin 2013-07-11\nILLUMINADUAL = dict(index1dual='ATCACGAT-TCTTTCCC',\n index2dual='CGATGTAT-TCTTTCCC',\n index3dual='TTAGGCAT-TCTTTCCC',\n index4dual='TGACCAAT-TCTTTCCC',\n index5dual='ACAGTGAT-TCTTTCCC',\n index6dual='GCCAATAT-TCTTTCCC',\n index7dual='CAGATCAT-TCTTTCCC',\n index8dual='ACTTGAAT-TCTTTCCC',\n index9dual='GATCAGAT-TCTTTCCC',\n index10dual='TAGCTTAT-TCTTTCCC',\n index11dual='GGCTACAT-TCTTTCCC',\n index12dual='CTTGTAAT-TCTTTCCC',\n index13dual='AGTCAACA-TCTTTCCC',\n index14dual='AGTTCCGT-TCTTTCCC',\n index15dual='ATGTCAGA-TCTTTCCC',\n index16dual='CCGTCCCG-TCTTTCCC',\n index17dual='GTAGAGAT-TCTTTCCC', # Added 2013-10-14: \"AT\"?\n index18dual='GTCCGCAC-TCTTTCCC',\n index19dual='GTGAAACG-TCTTTCCC',\n index20dual='GTGGCCTT-TCTTTCCC',\n index21dual='GTTTCGGA-TCTTTCCC',\n index22dual='CGTACGTA-TCTTTCCC',\n index23dual='GAGTGGAT-TCTTTCCC',\n index25dual='ACTGATAT-TCTTTCCC',\n index27dual='ATTCCTTT-TCTTTCCC')\nBASIC_LOOKUP.update(ILLUMINADUAL)\nINDEX_LOOKUP.update(ILLUMINADUAL)\n\n# SureSelect XT indexes\n# From table 85 on page 158 of http://www.chem.agilent.com/library/usermanuals/public/g7550-90000.pdf\nSURESELECT_XT = dict(xta01='ATGCCTAA',\n xta02='AGCAGGAA',\n xta03='ATCATTCC',\n xta04='AACTCACC',\n xta05='AACGCTTA',\n xta06='AGCCATGC',\n xta07='ACGTATCA',\n xta08='CAGCGTTA',\n xta09='CTCAATGA',\n xta10='AATGTTGC',\n xta11='CCAGTTCA',\n xta12='CAAGGAGC',\n xtb01='GAATCTGA',\n xtb02='GAGCTGAA',\n xtb03='GCCACATA',\n xtb04='GCTAACGA',\n xtb05='GGAGAACA',\n xtb06='GTACGCAA',\n xtb07='GTCTGTCA',\n xtb08='TAGGATGA',\n xtb09='TCCGTCTA',\n xtb10='TGAAGAGA',\n xtb11='TGGCTTCA',\n xtb12='TTCACGCA',\n xtc01='AACGTGAT',\n xtc02='AAACATCG',\n xtc03='ACCACTGT',\n xtc04='CAGATCTG',\n xtc05='CATCAAGT',\n xtc06='AGTACAAG',\n xtc07='CTAAGGTC',\n xtc08='AGTGGTCA',\n xtc09='AGGCTAAC',\n xtc10='AGATCGCA',\n xtc11='CGACTGGA',\n xtc12='CACCTTAC',\n xtd01='CACTTCGA',\n xtd02='GAGTTAGC',\n xtd03='CTGGCATA',\n xtd04='ATCCTGTA',\n xtd05='AAGGTACA',\n xtd06='ACATTGGC',\n xtd07='CGACACAC',\n xtd08='ACAGCAGA',\n xtd09='CCATCCTC',\n xtd10='AAGAGATC',\n xtd11='CAAGACTA',\n xtd12='AAGACGGA',\n xte01='GCCAAGAC',\n xte02='CGAACTTA',\n xte03='ACCTCCAA',\n xte04='CTGTAGCC',\n xte05='CGCTGATC',\n xte06='ATTGAGGA',\n xte07='CCGTGAGA',\n xte08='CATACCAA',\n xte09='AGATGTAC',\n xte10='CAACCACA',\n xte11='CCTCCTGA',\n xte12='ACACAGAA',\n xtf01='GACTAGTA',\n xtf02='GATAGACA',\n xtf03='GCGAGTAA',\n xtf04='GCTCGGTA',\n xtf05='GGTGCGAA',\n xtf06='GTCGTAGA',\n xtf07='GTGTTCTA',\n xtf08='TATCAGCA',\n xtf09='TCTTCACA',\n xtf10='TGGAACAA',\n xtf11='TGGTGGTA',\n xtf12='GAACAGGC',\n xtg01='ATTGGCTC',\n xtg02='AAGGACAC',\n xtg03='ACTATGCA',\n xtg04='ACACGACC',\n xtg05='CCTAATCC',\n xtg06='AGAGTCAA',\n xtg07='CAATGGAA',\n xtg08='ATAGCGAC',\n xtg09='CCGAAGTA',\n xtg10='CCTCTATC',\n xtg11='AACAACCA',\n xtg12='AACCGAGA',\n xth01='GATGAATC',\n xth02='GACAGTGC',\n xth03='CGGATTGC',\n xth04='AGTCACTA',\n xth05='CTGAGCCA',\n xth06='CCGACAAC',\n xth07='AGCACCTC',\n xth08='ACGCTCGA',\n xth09='CGCATACA',\n xth10='ACAGATTC',\n xth11='AATCCGTC',\n xth12='ACAAGCTA')\nBASIC_LOOKUP.update(SURESELECT_XT)\nINDEX_LOOKUP.update(SURESELECT_XT)\n\n\n# Finally, allow all upper-case variants of index designations.\nINDEX_LOOKUP.update(dict([(k.upper(), v)\n for k,v in INDEX_LOOKUP.items()]))\n","sub_path":"index_definitions.py","file_name":"index_definitions.py","file_ext":"py","file_size_in_byte":43651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68081439","text":"# Kevin Oswaldo Cabrera Navarro\n# A01227157\n# LAB 05 - Compilers Lecute\n\nbad_words = ['comment']\n\nwith open('tokens.out') as oldfile, open('tokens.txt', 'w') as newfile:\n for line in oldfile:\n if not any(bad_word in line for bad_word in bad_words):\n newfile.write(line)\n","sub_path":"labs/06/grammar_analyser.py","file_name":"grammar_analyser.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76415427","text":"#7568번\nfrom sys import stdin\nanswerlist = [] # 순위리스트\nsizelist = []\nappend = sizelist.append \nappend2 = answerlist.append\npeople = int(stdin.readline().strip())\nfor _ in range(people):\n append(tuple(map(int, stdin.readline().strip().split())))\nfor i in range(len(sizelist)):\n k = 0\n for w,h in sizelist[:i] + sizelist[i+1:]: \n if sizelist[i][0] < w and sizelist[i][1] < h :\n k += 1\n append2(k+1)\nfor ans in answerlist :\n print(ans, end = \" \")\n","sub_path":"Baekjoon/Brute Force/Baekjoon-덩치.py","file_name":"Baekjoon-덩치.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"273372718","text":"from distributions.bernoullidistribution import BernoulliDistribution\nimport numpy as np\nimport copy\nclass Agent():\n def __init__(self, trustworthy, arm_dists, num_reports, best_arm, target_arms, attack_freq, agent_id):\n self.trustworthy = trustworthy\n self.arm_dists = arm_dists #a array of distributions\n self.num_reports = num_reports\n self.best_arm = best_arm\n self.worst_arm = 0\n self.target_arms = target_arms\n self.attack_freq = attack_freq\n self.id = agent_id\n self.second_best = 0\n self.rewards = []\n\n def compute_logit(self, d):\n return np.log(d/(1-d))\n\n def add_noise(self, data, sigma= 0):\n noise = np.random.normal(0, sigma, len(self.arm_dists))\n logits = [self.compute_logit(param) for param in data]\n new_hidden_logits = noise + logits\n final = [1/(1+np.exp(-val)) for val in new_hidden_logits]\n return final\n\n\n def add_biased_noise(self, data, sigma= 0):\n mult = np.array([-1 if elem == 0 else 1 for elem in self.rewards])\n noise = np.array([np.random.normal(0, sigma) for elem in mult])\n # noise = np.random.normal(0, sigma, len(self.arm_dists))\n # mult = np.array([-1 if elem == 0 else 1 for elem in self.rewards])\n # noise = np.multiply(noise, mult) \n logits = np.array([self.compute_logit(param) for param in data])\n new_hidden_logits = noise + logits\n final = [1/(1+np.exp(-val)) for val in new_hidden_logits]\n return final\n\n def add_binomial_noise(self, data):\n test = [np.random.binomial(self.num_reports, param)/self.num_reports for param in data]\n return test\n\n def add_binary_noise(self, data, noise_param= 0):\n noise = []\n if noise_param == 0:\n noise = np.array([0 for reward in data])\n elif noise_param == 1:\n noise = np.array([1 for reward in data])\n else:\n noise = np.array([BernoulliDistribution(noise_param).sample() for reward in data])\n noise_reward = data + noise\n noise_reward = np.remainder(noise_reward, 2)\n return noise_reward\n\n def generate_reports_2(self):\n reports = []\n for dist in self.arm_dists:\n total = 0\n for _ in range(self.num_reports):\n if not self.trustworthy:\n total += BernoulliDistribution(1-dist.theta).sample()\n else:\n total += dist.sample()\n\n reports.append(total/self.num_reports)\n\n return reports #returns an array of bernoulli parameters\n\n def generate_reports_sneak_attack(self):\n data = [dist.mean() for dist in self.arm_dists]\n # arg_sorted = np.argsort(data)\n reports = self.add_biased_noise(data)\n # reports = np.array(reports)\n # reports = 1 - reportsccc\n reports[self.best_arm] = 0\n # reports[self.worst_arm] = 1\n # reports[arg_sorted[-2]]\n \n return reports #returns an array of bernoulli parameters\n\n\n def generate_reports(self):\n if self.trustworthy == True:\n reports = []\n for dist in self.arm_dists:\n reports.append(np.mean(dist.sample_array(self.num_reports)))\n\n return reports #returns an array of bernoulli parameters\n else:\n # print(\"ello\")\n return self.generate_reports_sneak_attack()\n\n def generate_reports_copy_cat_attack(self, prev_agents, prev_agent_reports):\n reports = []\n for agent_index, agent in enumerate(prev_agents):\n if agent.trustworthy == True:\n reports = copy.deepcopy(prev_agent_reports[agent])\n reports[self.best_arm] = 0.5\n \n return reports\n \n return self.generate_reports_sneak_attack()\n\n def generate_reports_sleeper_attack(self, t, prev_agents, prev_agent_reports, attack=\"sneak\"):\n reports = []\n if np.random.rand() < self.attack_freq:\n if attack == \"copy\":\n return self.generate_reports_copy_cat_attack(prev_agents, prev_agent_reports)\n elif attack == \"damage\":\n return self.generate_reports_max_damage()\n elif attack == \"sneak\":\n return self.generate_reports_sneak_attack()\n else:\n data = [dist.mean() for dist in self.arm_dists]\n reports = self.add_biased_noise(data)\n return reports\n # for dist in self.arm_dists:\n # reports.append(dist.mean())\n # return reports\n\n def generate_reports_prolonged_attack(self, t, prev_agents, prev_agent_reports, attack=\"damage\"):\n if (t > 500):\n return self.generate_reports_sneak_attack()\n else:\n data = [dist.mean() for dist in self.arm_dists]\n # reports = self.add_binary_noise(self.rewards)\n # reports = self.add_noise(data)\n reports = self.add_biased_noise(data)\n return reports\n\n def generate_reports_average_attack(self, prev_agents, prev_agent_reports):\n reports = np.zeros(len(self.arm_dists))\n count = 0\n\n if len(prev_agents) == 0:\n return self.generate_reports_sneak_attack()\n else:\n for agent_index, _ in enumerate(prev_agents):\n reports = np.add(reports, prev_agent_reports[agent_index])\n count += 1\n\n reports /= count\n reports[self.best_arm] = 0\n for target_arm_index in self.target_arms:\n reports[target_arm_index] = 1\n return reports.tolist()\n\n def generate_reports_max_damage(self):\n reports = []\n for dist in self.arm_dists:\n reports.append(1-dist.mean())\n\n return reports #returns an array of bernoulli parameters\n\n def generate_reports_random_attack(self):\n reports = []\n for index, __ in enumerate(self.arm_dists):\n if index == self.best_arm:\n reports.append(0)\n else:\n reports.append(np.random.rand())\n\n return reports #returns an array of bernoulli parameters\n\n def generate_reports_deterministic_attack(self):\n reports = []\n for index, __ in enumerate(self.arm_dists):\n if index in self.target_arms:\n reports.append(1)\n else:\n reports.append(0)\n\n return reports #returns an array of bernoulli parameters\n\n def generate_reports_v2(self, t, attack, prev_agents= [], prev_agent_reports= []):\n data = [dist.mean() for dist in self.arm_dists]\n if self.trustworthy == True:\n # reports = self.add_noise(data)\n # reports = self.add_binary_noise(self.rewards)\n reports = self.add_biased_noise(data)\n \n return reports #returns an array of bernoulli parameters\n else:\n if attack == \"copy\":\n return self.generate_reports_copy_cat_attack(prev_agents, prev_agent_reports)\n elif attack == \"avg\":\n return self.generate_reports_average_attack(prev_agents, prev_agent_reports)\n elif attack == \"sneak\":\n return self.generate_reports_sneak_attack()\n elif attack == \"damage\":\n return self.generate_reports_max_damage()\n elif attack == \"deterministic\":\n return self.generate_reports_deterministic_attack()\n elif attack == \"sleeper\":\n return self.generate_reports_sleeper_attack(t, prev_agents, prev_agent_reports)\n elif attack == \"random\":\n return self.generate_reports_random_attack()\n elif attack == \"prolonged\":\n return self.generate_reports_prolonged_attack(t, prev_agents, prev_agent_reports)\n else:\n exit()","sub_path":"agencies/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"611867204","text":"######## obvezne funkcije ########\n\n\ndef unikati(s):\n new = []\n for e in s:\n if e not in new:\n new.append(e)\n return new\n\n\ndef avtor(tweet):\n temp = tweet.split(\":\")\n result = temp[0]\n return result\n\n\ndef vsi_avtorji(tweets):\n avtorji = []\n for tweet in tweets:\n a = avtor(tweet)\n if a not in avtorji:\n avtorji.append(a)\n return avtorji\n\n\ndef izloci_besedo(beseda):\n a = \"\"\n i = 0\n for b in beseda:\n if b.isalnum() == False:\n i += 1\n else:\n a = beseda[i:]\n break\n i = 0\n for b in a[::-1]:\n if b.isalnum() == False:\n i += 1\n else:\n if i == 0:\n break\n else:\n a = a[:-i]\n break\n return a\n\n\ndef se_zacne_z(tweet, c):\n temp = []\n result = []\n t = tweet.split()\n for word in t:\n if word.startswith(c):\n temp.append(word)\n for word in temp:\n x = izloci_besedo(word)\n result.append(x)\n return result\n\n\ndef zberi_se_zacne_z(tweets, c):\n temp = []\n for tweet in tweets:\n a = se_zacne_z(tweet, c)\n for w in a:\n if a != []:\n if w not in temp:\n temp.append(w)\n temp = list(temp)\n return temp\n\n\ndef vse_afne(tweets):\n return zberi_se_zacne_z(tweets, \"@\")\n\n\ndef vsi_hashtagi(tweets):\n return zberi_se_zacne_z(tweets, \"#\")\n\n\ndef vse_osebe(tweets):\n a = zberi_se_zacne_z(tweets, \"@\")\n for tweet in tweets:\n temp = tweet.split()\n temp_char = izloci_besedo(temp[0])\n if temp_char not in a:\n a.append(temp_char)\n result = sorted(a)\n return result\n\n\n###### END obvezne funkcije ######\n\n\n######## dodatne funkcije ########\n\ndef custva(tweets, hashtags):\n a = []\n for tweet in tweets:\n for hash in hashtags:\n if hash in tweet:\n temp = tweet.split()\n temp_char = izloci_besedo(temp[0])\n if temp_char not in a:\n a.append(temp_char)\n break\n else:\n break\n result = sorted(a)\n return result\n\ndef se_poznata(tweets, oseba1, oseba2):\n mentions = []\n for tweet in tweets:\n temp = tweet.split()\n author = izloci_besedo(temp[0])\n mention = zberi_se_zacne_z(temp, \"@\")\n for person in mention:\n s = izloci_besedo(person)\n mentions.append(s)\n if author == oseba1 and oseba2 in mentions:\n return True\n mentions = []\n return False\n\n\n\n###### END dodatne funkcije ######\n\n\nimport unittest\n\n\nclass TestTviti(unittest.TestCase):\n tviti = [\n \"sandra: Spet ta dež. #dougcajt\",\n \"berta: @sandra Delaj domačo za #programiranje1\",\n \"sandra: @berta Ne maram #programiranje1 #krneki\",\n \"ana: kdo so te @berta, @cilka, @dani? #krneki\",\n \"cilka: jst sm pa #luft\",\n \"benjamin: pogrešam ano #zalosten\",\n \"ema: @benjamin @ana #split? po dvopičju, za začetek?\",\n ]\n\n def test_unikat(self):\n self.assertEqual(unikati([1, 2, 1, 1, 3, 2]), [1, 2, 3])\n self.assertEqual(unikati([1, 3, 2, 1, 1, 3, 2]), [1, 3, 2])\n self.assertEqual(unikati([1, 5, 4, 3, 2]), [1, 5, 4, 3, 2])\n self.assertEqual(unikati([1, 1, 1, 1, 1]), [1])\n self.assertEqual(unikati([1]), [1])\n self.assertEqual(unikati([]), [])\n self.assertEqual(unikati([\"Ana\", \"Berta\", \"Cilka\", \"Berta\"]), [\"Ana\", \"Berta\", \"Cilka\"])\n\n def test_avtor(self):\n self.assertEqual(avtor(\"janez: pred dvopičjem avtor, potem besedilo\"), \"janez\")\n self.assertEqual(avtor(\"ana: malo krajse ime\"), \"ana\")\n self.assertEqual(avtor(\"benjamin: pomembne so tri stvari: prva, druga in tretja\"), \"benjamin\")\n\n def test_vsi_avtorji(self):\n self.assertEqual(vsi_avtorji(self.tviti), [\"sandra\", \"berta\", \"ana\", \"cilka\", \"benjamin\", \"ema\"])\n self.assertEqual(vsi_avtorji(self.tviti[:3]), [\"sandra\", \"berta\"])\n\n def test_izloci_besedo(self):\n self.assertEqual(izloci_besedo(\"@ana\"), \"ana\")\n self.assertEqual(izloci_besedo(\"@@ana!!!\"), \"ana\")\n self.assertEqual(izloci_besedo(\"ana\"), \"ana\")\n self.assertEqual(izloci_besedo(\"!#$%\\\"=%/%()/Ben-jamin'\"), \"Ben-jamin\")\n\n def test_vse_na_crko(self):\n self.assertEqual(se_zacne_z(\"Benjamin $je $skocil! Visoko!\", \"$\"), [\"je\", \"skocil\"])\n self.assertEqual(se_zacne_z(\"Benjamin $je $skocil! #Visoko!\", \"$\"), [\"je\", \"skocil\"])\n self.assertEqual(se_zacne_z(\"ana: kdo so te @berta, @cilka, @dani? #krneki\", \"@\"), [\"berta\", \"cilka\", \"dani\"])\n\n def test_zberi_na_crko(self):\n self.assertEqual(zberi_se_zacne_z(self.tviti, \"@\"), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])\n self.assertEqual(zberi_se_zacne_z(self.tviti, \"#\"), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])\n\n def test_vse_afne(self):\n self.assertEqual(vse_afne(self.tviti), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])\n\n def test_vsi_hashtagi(self):\n self.assertEqual(vsi_hashtagi(self.tviti), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])\n\n def test_vse_osebe(self):\n self.assertEqual(vse_osebe(self.tviti), ['ana', 'benjamin', 'berta', 'cilka', 'dani', 'ema', 'sandra'])\n\n\nclass TestDodatna(unittest.TestCase):\n tviti = [\n \"sandra: Spet ta dež. #dougcajt\",\n \"berta: @sandra Delaj domačo za #programiranje1\",\n \"sandra: @berta Ne maram #programiranje1 #krneki\",\n \"ana: kdo so te @berta, @cilka, @dani? #krneki\",\n \"cilka: jst sm pa #luft\",\n \"benjamin: pogrešam ano #zalosten\",\n \"ema: @benjamin @ana #split? po dvopičju, za začetek?\",\n ]\n\n def test_custva(self):\n self.assertEqual(custva(self.tviti, [\"dougcajt\", \"krneki\"]), [\"ana\", \"sandra\"])\n self.assertEqual(custva(self.tviti, [\"luft\"]), [\"cilka\"])\n self.assertEqual(custva(self.tviti, [\"meh\"]), [])\n\n def test_se_poznata(self):\n self.assertTrue(se_poznata(self.tviti, \"ana\", \"berta\"))\n self.assertTrue(se_poznata(self.tviti, \"ema\", \"ana\"))\n self.assertFalse(se_poznata(self.tviti, \"sandra\", \"ana\"))\n self.assertFalse(se_poznata(self.tviti, \"cilka\", \"luft\"))\n self.assertFalse(se_poznata(self.tviti, \"cilka\", \"balon\"))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"code/batch-2/dn5 - tviti/M-17075-2230.py","file_name":"M-17075-2230.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283906694","text":"#!/usr/bin/env python\nfrom jasp import *\nfrom ase import Atom, Atoms\nLC = [2.75, 2.8, 2.85, 2.9, 2.95, 3.0]\nfor a in LC:\n atoms = Atoms([Atom('Cu', [0, 0, 0])],\n cell=0.5 * a * np.array([[ 1.0, 1.0, -1.0],\n [-1.0, 1.0, 1.0],\n [ 1.0, -1.0, 1.0]]))\n with jasp('bulk/Cu-bcc-{0}'.format(a),\n xc='PBE',\n encut=350,\n kpts=(8, 8, 8),\n atoms=atoms) as calc:\n calc.calculate()","sub_path":"learn/script-98.py","file_name":"script-98.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"112688756","text":"# Copyright (c) 2015 Joel Goguen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport re\n\nfrom datetime import datetime\n\n\n# This regexp will allow nonsense dates\ndate_re = re.compile(r'^(\\d{4})\\-(0[1-9]|1[012])\\-(0[1-9]|[12][0-9]|3[01])')\n\n\ndef is_valid_date(datestr):\n m = date_re.match(datestr)\n if not m:\n return False\n\n year = int(m.group(1))\n month = int(m.group(2))\n day = int(m.group(3))\n\n # Only certain months may have 31 days\n if day == 31 and month in (4, 6, 9, 11):\n return False\n\n # February has weird rules. Thanks leap years!\n if month == 2:\n if day > 29:\n return False # February never has more than 29 days\n if day == 29 and not (year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)):\n return False # Feb 29 outside a leap year\n\n # This date has been blessed!\n return True\n\ndef parse_date(datestr):\n if not is_valid_date(datestr):\n raise ValueError(\"Invalid date: {0}\".format(datestr))\n return datetime.strptime(datestr, \"%Y-%m-%d\").date()\n","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"303105031","text":"from Week4.Matplotlib_All.Matplotlib_PieChart.Utility.piechart_utility import validate_num, create_list_all\nimport matplotlib.pyplot as plt\n\n\n# class to perform graphical representation of data using matplotlib pie chart\nclass ChartByTitlePopularity:\n choice = 0\n\n def pie_chart(self):\n print()\n print(\"1. Create a pie chart with a title of the popularity of programming Languages.\")\n print(\"2. Exit\")\n print()\n while True:\n try:\n print()\n # accept choice from user\n self.choice = input(\"Enter choice : \")\n # validate choice number\n valid_choice = validate_num(self.choice)\n if valid_choice:\n choice = int(self.choice)\n if choice == 1:\n print(\"Enter programming languages (5):\")\n # create list of 5 languages\n lang = create_list_all(5)\n print(\"Enter popularity of that language:\")\n # list of popularity\n popularity = create_list_all(5)\n # explode 1st slice\n explode = (0.1, 0, 0, 0, 0)\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\"]\n\n # plot\n plt.pie(popularity, explode=explode, labels=lang, colors=colors, autopct='%1.1f%%', shadow=True,\n startangle=140)\n plt.axis('equal')\n\n # set the title\n plt.title(\"Popularity of Programming Language\\n\" + \"Worldwide, Mar 2019 compared to a year ago\",\n bbox={'facecolor': '0.8', 'pad': 5})\n plt.show()\n elif choice == 2:\n exit()\n else:\n print(\"Enter valid choice\")\n else:\n print(\"Enter only numbers\")\n except Exception as e:\n print(e)\n\n\n# obj of class created\nobject = ChartByTitlePopularity()\nobject.pie_chart()\n","sub_path":"Week4/Matplotlib_All/Matplotlib_PieChart/programs/piechart by title of popularity of lang.py","file_name":"piechart by title of popularity of lang.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"40366124","text":"# /usr/bin/python\nimport requests\nimport json\n\n\nclass BDWatchdog:\n OPENTSDB_URL = \"opentsdb\"\n OPENTSDB_PORT = 4242\n NO_METRIC_DATA_DEFAULT_VALUE = 0 # -1\n\n def __init__(self, server=None):\n if not server:\n self.server = \"http://{0}:{1}\".format(self.OPENTSDB_URL, str(int(self.OPENTSDB_PORT)))\n else:\n self.server = server\n self.session = requests.Session()\n\n def get_points(self, query, tries=3):\n try:\n r = self.session.post(self.server + \"/api/query\", data=json.dumps(query),\n headers={'content-type': 'application/json', 'Accept': 'application/json'})\n if r.status_code == 200:\n return json.loads(r.text)\n else:\n r.raise_for_status()\n except requests.ConnectionError as e:\n tries -= 1\n if tries <= 0:\n raise e\n else:\n self.get_points(query, tries)\n\n # def get_points(self, query):\n # r = self.session.post(self.server + \"/api/query\", data=json.dumps(query),\n # headers={'content-type': 'application/json', 'Accept': 'application/json'})\n # if r.status_code == 200:\n # return json.loads(r.text)\n # else:\n # r.raise_for_status()\n\n def get_structure_timeseries(self, structure_name, start, end, retrieve_metrics, downsample=5):\n usages = dict()\n subquery = list()\n for metric in retrieve_metrics:\n metric_name = metric[0]\n metric_tag = metric[1]\n usages[metric_name] = dict()\n subquery.append(dict(aggregator='zimsum', metric=metric_name, tags={metric_tag: structure_name},\n downsample=str(downsample) + \"s-avg\"))\n\n query = dict(start=start, end=end, queries=subquery)\n result = self.get_points(query)\n\n for metric in result:\n dps = metric[\"dps\"]\n metric_name = metric[\"metric\"]\n usages[metric_name] = dps\n\n return usages\n\n @staticmethod\n def perform_hysteresis_aggregation(timeseries):\n hysteresis_count = 0\n points = list(timeseries.items())\n if points:\n # Perform the differentiation\n previous_time = int(points[0][0])\n previous_value = points[0][1]\n for point in points[1:]:\n time = int(point[0])\n value = point[1]\n diff_time = time - previous_time\n diff_value = abs(value - previous_value)\n hysteresis_count += diff_value / diff_time\n previous_time = time\n previous_value = value\n return hysteresis_count\n\n @staticmethod\n def perform_timeseries_range_apply(timeseries, ymin=0, ymax=None):\n check_range = True\n try:\n if ymin:\n int(ymin)\n if ymax:\n int(ymax)\n if (not ymax and ymin == 0) or ymin >= ymax:\n check_range = False\n except ValueError:\n check_range = False\n\n if check_range:\n points = list(timeseries.items())\n for point in points:\n key = point[0]\n value = point[1]\n if value > ymax:\n timeseries[key] = ymax\n elif ymin and value < ymin:\n timeseries[key] = ymin\n return timeseries\n\n @staticmethod\n def perform_check_for_missing_metric_info(timeseries, max_diff_time=30):\n misses = list()\n if timeseries:\n points = list(timeseries.items())\n previous_timestamp = int(points[0][0])\n for point in points[1:]:\n timestamp = int(point[0])\n diff_time = timestamp - previous_timestamp\n if diff_time >= max_diff_time:\n misses.append({\"time\": previous_timestamp, \"diff_time\": diff_time})\n previous_timestamp = timestamp\n return misses\n\n @staticmethod\n def perform_structure_aggregations(start, end, metrics):\n usages = dict()\n for metric in metrics:\n summatory = 0\n points = list(metrics[metric].items())\n if points:\n # Perform the integration through trapezoidal steps\n previous_time = int(points[0][0])\n previous_value = points[0][1]\n for point in points[1:]:\n time = int(point[0])\n value = point[1]\n diff_time = time - previous_time\n added_value = value + previous_value\n summatory += (added_value / 2) * diff_time\n previous_time = time\n previous_value = value\n average = summatory / (end - start)\n usages[metric] = dict()\n usages[metric][\"AVG\"] = average\n usages[metric][\"SUM\"] = summatory\n return usages\n","sub_path":"opentsdb/bdwatchdog.py","file_name":"bdwatchdog.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"40722269","text":"class Pet():\n \"\"\" A class to capture useful information regarding my pets, just incase I lose track of them.\"\"\"\n\n def __init__(self, height=5):\n self.height = height\n self.is_human = False\n self.owner = 'Kanon'\n\n\nchubbles = Pet(5)\nif chubbles.is_human is False:\n print('Not a human')\n\nprint('owner name is: {}'.format(chubbles.owner))\n# print(chubbles.__doc__)\nprint(chubbles.height)\n","sub_path":"ConstructingPython/pet.py","file_name":"pet.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209463336","text":"from product import models as mx\n\n\ndef requisites(request):\n req_obj = mx.Requisites.objects.first()\n if not req_obj:\n return {\"requisites\": {}}\n return {\n \"requisites\": {\n k: v for k, v in req_obj.__dict__.items()\n if k in [f.name for f in mx.Requisites._meta.local_fields]\n }\n }\n","sub_path":"core/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"582454202","text":"#!/usr/bin/python3\n\"\"\"Flask web\"\"\"\n\nfrom models import storage\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef tr_down(exception):\n \"\"\"close session\"\"\"\n storage.close()\n\n\n@app.route('/states_list', strict_slashes=False)\ndef list_state():\n \"\"\"list states\"\"\"\n all_state = storage.all(\"State\")\n all_states = sorted(list(all_state.values()), key=lambda x: x.name)\n return render_template('7-states_list.html', all_states=all_states)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='5000')\n","sub_path":"web_flask/7-states_list.py","file_name":"7-states_list.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294031054","text":"import numpy as np\nimport math\n\nclass Row:\n def __init__(self, verticalLine, dim):\n\n self.combinations = []\n self.strEndCell = 0\n self.numberOfNumbers = 0\n self.positionToShift = 0\n self.currentCombination = []\n self.positionHolded = 0\n self.lastCell = 0;\n self.dim = dim\n self.verticalLine = verticalLine\n self.numberOfNumbers = len(verticalLine)\n\n self.makeFirstCombination(0)\n self.makeCombinations()\n\n self.currentCombinationCounter = 0\n\n def makeCombinations(self):\n while self.isNextCombination():\n self.makeNextCombination()\n\n\n def makeFirstCombination(self, lastCell):\n self.lastCell = lastCell\n self.currentCombination = [0] * self.dim\n numberSum = 0\n\n for number in self.verticalLine:\n for i in range(0, number):\n self.currentCombination[lastCell] = 2\n lastCell += 1\n lastCell += 1\n numberSum += number\n self.positionHolded = self.numberOfNumbers + numberSum\n\n self.combinations.append(list(self.currentCombination))\n\n def isNextCombination(self):\n result = False\n position = self.dim - self.strEndCell - 1\n\n for posCounter in reversed(range(0, position - self.strEndCell + 1)):\n if self.currentCombination[posCounter] == 2:\n first = posCounter + 1\n second = self.dim - self.strEndCell\n if (posCounter + 1) < (self.dim - self.strEndCell):\n if self.currentCombination[posCounter + 1] == 0:\n if posCounter + 2 < self.dim - self.strEndCell:\n if self.currentCombination[posCounter + 2] == 0:\n self.positionToShift = posCounter\n result = True\n break\n else:\n self.positionToShift = posCounter\n result = True\n break\n\n if result == False and self.numberOfNumbers != 1:\n if self.dim - self.strEndCell*2 - self.positionHolded > 0:\n self.strEndCell += 1\n self.makeFirstCombination(self.strEndCell)\n self.makeCombinations()\n\n return result\n\n def makeNextCombination(self):\n position = self.positionToShift\n\n for posCounter in reversed(range(0, position + 1)):\n if self.currentCombination[posCounter] == 2:\n self.currentCombination[posCounter + 1] = 2\n self.currentCombination[posCounter] = 0\n else:\n break\n self.combinations.append(list(self.currentCombination))\n\n def getCurrentCombination(self):\n return self.combinations[self.currentCombinationCounter]\n def getCombinationCounter(self):\n return self.currentCombinationCounter\n def resetCombinationCounter(self):\n self.currentCombinationCounter = 0\n def increaseCombinationCounter(self):\n self.currentCombinationCounter += 1\n def getCombinationCount(self):\n return len(self.combinations)\n# ----------------------------------------------------------------------------\nclass Column:\n def __init__(self, horizontalLine, dim):\n self.numberSum = 0\n self.numberCount = 0\n\n for value in horizontalLine:\n self.numberSum += value\n self.numberCount += 1\n def getNumberSum(self):\n return self.numberSum\n def getNumberCount(self):\n return self.numberCount\n# ----------------------------------------------------------------------------\n\ndef validateNonogram(map, cols, rowsCount):\n numberSum = None\n truesCount = None\n breakCount = None\n currentCol = None\n\n for col in cols:\n truesCount = 0\n numberSum = col.getNumberSum()\n for i in range(rowsCount):\n if(map[i][currentCol] == 2):\n truesCount += 1\n if i + 1 < rowsCount:\n if map[i+1][currentCol] == 0:\n for j in range(i+1, rowsCount):\n if map[j][currentCol] == 2:\n breakCount +=1\n break\n if col.getNumberCount() == 1:\n if not(col.getNumberSum() == rowsCount):\n if breakCount > 1:\n return False\n if not(truesCount == numberSum) or not(breakCount == col.getNumberCount() -1):\n return False\n currentCol += 1\n breakCount = 0\n return True\n\ndef solve(map, rows, cols):\n\n for y in range(len(rows)):\n combination = rows[y].getCurrentCombination()\n for x in range(len(cols)):\n map[y][x] = combination[x]\n n = len(rows)\n\n while(not validateNonogram(map, cols,len(rows))):\n it = n - 1;\n\n if (rows[n-1].getCombinationCounter() != rows[n-1].getCombinationCount()-1):\n rows[n-1].increaseCombinationCounter()\n combination = rows[n-1].getCurrentCombination()\n for k in range(len(cols)):\n map[n-1][k] = combination[k]\n else:\n while(True):\n if(it == 0):\n raise Exception('noResult')\n\n rows[it].resetCombinationCounter()\n combination = rows[it].getCurrentCombination()\n for k in range(len(cols)):\n map[it][k] = combination[k]\n test = rows[it-1].getCombinationCounter()\n test1 = rows[it-1].getCombinationCount()\n if rows[it-1].getCombinationCounter() != rows[it-1].getCombinationCount()-1: #\n rows[it-1].increaseCombinationCounter() #\n combination = rows[it-1].getCurrentCombination() #\n for k in range(len(cols)):\n map[it-1][k] = combination[k]\n else:\n rows[it-1].increaseCombinationCounter()\n it -= 1\n\n if(rows[it].getCombinationCounter() != rows[it].getCombinationCount()):\n break\n\n\n\nif __name__ == '__main__':\n # -----------------------------MAPS--------------------------------------------\n #5x5\n cluesHorizontal1 = [1,2],[1,2],[2,1],[3],[1]\n cluesVertical1 = [3],[3],[1],[4],[2]\n #10x10\n cluesHorizontal2 = [8],[2,5],[2,1],[3],[2,1],[6],[7],[1,1,3],[2,2],[2,2]\n cluesVertical2 = [3,3],[3,2],[1,1],[1,3],[2,4],[2,1,2],[8],[2,1,3],[1,5],[2]\n #15x15\n cluesHorizontal3 = [4,2,3],[2,1,5],[1,1,9],[8],[1,1,4],[2],[2,3],[4,3,3],[4,4,4],[7,4],[2,6,3],[3,1],[4,3],[2,2],[1,1,3]\n cluesVertical3 = [1,1,1,7],[2,7],[3,6,2],[1,6,1],[1,1,2,1],[1,3],[3,3],[5,3,1],[1,3,3,1],[4,2],[4],[4,2],[3,4,1,1],[2,4,3],[1,8]\n\n # rows = []\n # cols = []\n # colDim = len(cluesHorizontal1)\n # rowDim = len(cluesVertical1)\n #\n # for rowLine in cluesHorizontal1:\n # rows.append(Row(rowLine, rowDim))\n # for colLine in cluesVertical1:\n # cols.append(Column(colLine, colDim))\n #\n # map = [[0 for x in range(rowDim)] for x in range(colDim)]\n #\n # try:\n # solve(map, rows, cols)\n # print(map)\n # except :\n # print(\"Map has no result\")\n","sub_path":"BruteFirst.py","file_name":"BruteFirst.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15768825","text":"import torch\r\nimport torch.nn as nn\r\nfrom src.sent_encoder import SentenceEncoder\r\nfrom src.decoder import Decoder\r\nfrom torchcrf import CRF\r\n\r\nclass Transformer(nn.Module):\r\n def __init__(\r\n self,\r\n label_list,\r\n src_pad_idx,\r\n trg_pad_idx,\r\n embed_size=512,\r\n num_layers=6,\r\n forward_expansion=4,\r\n heads=8,\r\n dropout=0,\r\n device=\"cpu\",\r\n max_par_len=10,\r\n max_seq_len=20,\r\n bert_model=\"allenai/scibert_scivocab_uncased\",\r\n ):\r\n super(Transformer, self).__init__()\r\n self.encoder = SentenceEncoder(\r\n label_list,\r\n embed_size,\r\n num_layers,\r\n heads,\r\n device,\r\n forward_expansion,\r\n dropout,\r\n max_par_len,\r\n max_seq_len,\r\n )\r\n self.fc_out = nn.Linear(embed_size,len(label_list)+2)\r\n self.crf = CRF(len(label_list)+2, batch_first = True)\r\n self.src_pad_idx = src_pad_idx\r\n self.trg_pad_idx = trg_pad_idx\r\n self.device = device\r\n \r\n def make_src_mask(self,src):\r\n src_mask = (src != self.src_pad_idx).int()\r\n return src_mask.to(self.device)\r\n \r\n def make_trg_mask(self,trg):\r\n N,trg_len = trg.shape\r\n trg_mask = torch.tril(torch.ones((trg_len,trg_len))).expand(\r\n N,1,trg_len,trg_len\r\n )\r\n return trg_mask.to(self.device)\r\n\r\n def make_second_pass_mask(self,trg):\r\n N,trg_len = trg.shape\r\n trg_mask = torch.ones((trg_len,trg_len)).expand(\r\n N,1,trg_len,trg_len\r\n )\r\n return trg_mask.to(self.device)\r\n\r\n def make_crf_trg_mask(self, trg):\r\n trg_mask = (trg != self.trg_pad_idx)\r\n return trg_mask.to(self.device)\r\n\r\n def forward(self,src,trg, training, att_heat_map=False):\r\n src_mask = self.make_src_mask(src)\r\n trg_mask = self.make_crf_trg_mask(trg)\r\n enc_out = self.encoder(src,src_mask, att_heat_map)\r\n \r\n out = self.fc_out(enc_out)\r\n\r\n if training:\r\n crf_out = self.crf(out, trg, trg_mask, reduction='token_mean')\r\n\r\n else:\r\n crf_out = self.crf.decode(out,trg_mask)\r\n \r\n return crf_out\r\n","sub_path":"src/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"476632868","text":"from SocketServer import TCPServer, BaseRequestHandler\nfrom utilities import send_script, is_available\n\nscript = \"\"\nscript += \"def program():\\n\"\nscript += \"\\ttextmsg(\\\">> Entering program.\\\")\\n\"\nscript += \"\\tSERVER_ADDRESS = \\\"{SERVER_ADDRESS}\\\"\\n\"\nscript += \"\\tPORT = {PORT}\\n\"\nscript += \"\\ttextmsg(SERVER_ADDRESS)\\n\"\nscript += \"\\ttextmsg(PORT)\\n\"\nscript += \"\\tset_tcp(p{TCP})\\n\"\nscript += \"\\tMM2M = 1000.0\\n\"\nscript += \"\\tsocket_open(SERVER_ADDRESS, PORT)\\n\"\nscript += \"\\tcurrent_pose = get_forward_kin()\\n\"\nscript += \"\\ttextmsg(current_pose)\\n\"\nscript += \"\\tsocket_send_string([current_pose[0] * MM2M, current_pose[1] * MM2M, current_pose[2] * MM2M, current_pose[3], current_pose[4], current_pose[5]])\\n\"\nscript += \"\\tsocket_close()\\n\"\nscript += \"\\ttextmsg(\\\"<< Exiting program.\\\")\\n\"\nscript += \"end\\n\"\nscript += \"program()\\n\\n\\n\"\n\ndef list_str_to_list(str):\n str = str[(str.find(\"[\")+1):str.find(\"]\")]\n return [float(x) for x in str.split(\",\")]\n\nclass MyTCPHandler(BaseRequestHandler):\n\n def handle(self):\n # self.request is the TCP socket connected to the client\n pose = \"\"\n while pose.find(\"]\") == -1:\n pose += self.request.recv(1024)\n self.server.rcv_msg = pose\n self.server.server_close() # this throws an exception\n\n\ndef get_current_pose_cartesian(server_ip, server_port, ur_ip, tool_angle_axis):\n\n global script\n script = script.replace(\"{SERVER_ADDRESS}\", server_ip)\n script = script.replace(\"{PORT}\", str(server_port))\n script = script.replace(\"{TCP}\", str([tool_angle_axis[i] for i in range(len(tool_angle_axis))]))\n\n print (script)\n\n ur_available = is_available(ur_ip)\n\n if ur_available:\n # start server\n server = TCPServer((server_ip, server_port), MyTCPHandler)\n\n send_script(ur_ip, script)\n # send file\n try:\n server.serve_forever()\n except:\n return list_str_to_list(server.rcv_msg)\n\nif __name__ == \"__main__\":\n server_port = 30005\n server_ip = \"192.168.10.41\"\n ur_ip = \"192.168.10.20\"\n tool_angle_axis = [0,0,0.168,0,0,0]\n\n pose = get_current_pose_cartesian(server_ip, server_port, ur_ip, tool_angle_axis)\n\n print (\"pose\", pose)\n","sub_path":"src/ur_fabrication_control/online_control/ur_direct/get_current_pose_cartesian.py","file_name":"get_current_pose_cartesian.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229494749","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nactivations = {\n 'relu': F.relu,\n 'sigmoid': F.sigmoid,\n 'softmax': F.softmax\n}\n\n\nclass ConvBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, activation='relu'):\n super(ConvBlock, self).__init__()\n self.activation = activation\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride,\n padding, bias=bias)\n\n def forward(self, x):\n x = self.conv(x)\n if self.activation != 'linear':\n x = activations[self.activation](x)\n return x\n\n\nclass ConvTransposeBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, activation='relu'):\n super(ConvTransposeBlock, self).__init__()\n self.activation = activation\n self.conv_tr = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride,\n padding, bias=bias)\n\n def forward(self, x):\n x = self.conv_tr(x)\n if self.activation != 'linear':\n x = activations[self.activation](x)\n return x\n\n\nclass ResBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False, isDownsample=False,\n isEqDecoder=False):\n super(ResBlock, self).__init__()\n\n self.isDownsample = isDownsample\n self.isEqDecoder = isEqDecoder\n if self.isDownsample:\n self.downsample = nn.AvgPool2d(2, 2, 0)\n self.equalize = nn.Conv2d(in_channels, out_channels, 2, 2, 0, bias=bias)\n\n if self.isEqDecoder:\n self.eq_dec = nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=bias)\n\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size, stride, padding, bias=bias)\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n def forward(self, x):\n identity = x\n if self.isDownsample:\n out = self.downsample(x)\n else:\n out = x\n out = F.relu(self.bn1(self.conv1(out)))\n out = self.bn2(self.conv2(out))\n\n if self.isDownsample:\n out = self.equalize(identity) + out\n elif self.isEqDecoder:\n out = self.eq_dec(identity) + out\n else:\n out = identity + out\n out = F.relu(out)\n return out\n","sub_path":"purelung/models/suppression/common_blocks.py","file_name":"common_blocks.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"570012969","text":"from transformers.modeling_tf_xlm import TFXLMPreTrainedModel\nfrom transformers.modeling_tf_flaubert import TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP, TFXLMMainLayer, TFFlaubertMainLayer\nfrom transformers.configuration_flaubert import FlaubertConfig\nfrom transformers.modeling_tf_utils import TFPreTrainedModel, get_initializer\nimport tensorflow as tf\n\nclass TFXLMForTokenClassification(TFXLMPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.transformer = TFXLMMainLayer(config, name=\"transformer\")\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.classifier = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.init_std), name=\"classifier\"\n )\n\n def call(self, inputs, **kwargs):\n transformer_outputs = self.transformer(inputs, **kwargs)\n sequence_output = transformer_outputs[0]\n\n sequence_output = self.dropout(sequence_output, training=kwargs.get(\"training\", False))\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n return outputs\n\nclass TFFlaubertForTokenClassification(TFXLMForTokenClassification):\n config_class = FlaubertConfig\n pretrained_model_archive_map = TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP\n\n def __init__(self, config, *inputs, **kwargs):\n super(TFFlaubertForTokenClassification, self).__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")","sub_path":"api/flaubert_token_classification.py","file_name":"flaubert_token_classification.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"17157702","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import AuthenticationForm as BaseAuthenticationForm\n\nfrom registration.forms import RegistrationFormTermsOfService as BaseRegistrationForm\nfrom registration.models import RegistrationProfile\n\nfrom uni_form.helpers import Layout, Fieldset, Row, HTML\nfrom uni_form.helpers import FormHelper, Submit, Reset\n\nfrom profiles.models import UserProfile\n\nclass UserProfileForm(forms.ModelForm):\n\n first_name = forms.CharField(max_length=30, required=True)\n last_name = forms.CharField(max_length=30, required=True)\n email = forms.EmailField(label=\"E-mail\", max_length=75)\n\n # Attach a formHelper to your forms class.\n helper = FormHelper()\n # Add in a class and id\n helper.form_id = 'id_userprofile-form'\n helper.form_class = 'userprofile-form'\n #helper.form_action = ''\n helper.form_method = 'post'\n \"\"\"\n style = \"\"\n layout = Layout(\n Fieldset('',\n HTML(style),\n 'username',\n 'email',\n Row('password1', 'password2'),\n )\n )\n helper.add_layout(layout)\n \"\"\"\n\n # add in a submit and reset button\n submit = Submit('update','update')\n helper.add_input(submit)\n\n def __init__(self, *args, **kwargs):\n super(UserProfileForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = UserProfile\n fields = ('first_name', 'last_name', 'email', 'phone', 'fax', 'photo')\n\n def clean_email(self):\n \"\"\"\n Validate that the supplied email address is unique for the\n site. Exclude the instance provided to the form as we want\n Users to be able to submit the form with their current address\n without getting an error.\n \"\"\"\n if User.objects.filter(email__iexact=self.cleaned_data['email']).exclude(pk=self.instance.id):\n raise forms.ValidationError(\"This email address is already in use. Please supply a different email address.\")\n return self.cleaned_data['email']\n\n\n def save(self, *args, **kwargs):\n pass\n\nclass UserChangeForm(forms.ModelForm):\n # Attach a formHelper to your forms class.\n helper = FormHelper()\n # Add in a class and id\n helper.form_id = 'id_user-form'\n helper.form_class = 'user-form'\n #helper.form_action = ''\n helper.form_method = 'post'\n \"\"\"\n style = \"\"\n layout = Layout(\n Fieldset('',\n HTML(style),\n 'username',\n 'email',\n Row('password1', 'password2'),\n )\n )\n helper.add_layout(layout)\n \"\"\"\n\n # add in a submit and reset button\n submit = Submit('update','update')\n helper.add_input(submit)\n\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email',)\n\n #this forcing the email to be unique breaks\n #updating the profile\n\n def clean_email(self):\n \"\"\"\n Validate that the supplied email address is unique for the\n site. Exclude the instance provided to the form as we want\n Users to be able to submit the form with their current address\n without getting an error.\n \"\"\"\n if User.objects.filter(email__iexact=self.cleaned_data['email']).exclude(pk=self.instance.id):\n raise forms.ValidationError(\"This email address is already in use. Please supply a different email address.\")\n return self.cleaned_data['email']\n\n\nattrs_dict = { 'class': 'required' }\n\nclass AuthenticationForm(BaseAuthenticationForm):\n # Attach a formHelper to your forms class.\n helper = FormHelper()\n\n # Add in a class and id\n helper.form_id = 'id_authentication-form'\n helper.form_class = 'authentication-form'\n\n #helper.form_action = ''\n helper.form_method = 'post'\n \"\"\"\n style = \"\"\n layout = Layout(\n Fieldset('',\n HTML(style),\n 'username',\n 'email',\n Row('password1', 'password2'),\n )\n )\n helper.add_layout(layout)\n \"\"\"\n # add in a submit and reset button\n submit = Submit('login','login')\n helper.add_input(submit)\n\n def __init__(self, request=None, *args, **kwargs):\n super(AuthenticationForm, self).__init__(*args, **kwargs)\n\n\nclass RegistrationForm(BaseRegistrationForm):\n # Attach a formHelper to your forms class.\n helper = FormHelper()\n # Add in a class and id\n helper.form_id = 'id_registration-form'\n helper.form_class = 'registration-form'\n #helper.form_action = ''\n helper.form_method = 'post'\n \"\"\"\n style = \"\"\n layout = Layout(\n Fieldset('',\n HTML(style),\n 'username',\n 'email',\n Row('password1', 'password2'),\n )\n )\n helper.add_layout(layout)\n \"\"\"\n\n # add in a submit and reset button\n submit = Submit('register','register')\n helper.add_input(submit)\n reset = Reset('reset','clear')\n helper.add_input(reset)\n\n def clean_email(self):\n \"\"\"\n Validate that the supplied email address is unique for the\n site.\n\n \"\"\"\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']\n\n\n def save(self, profile_callback=None):\n new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],\n password=self.cleaned_data['password1'],\n email=self.cleaned_data['email'])\n\n new_profile = UserProfile(user=new_user)\n new_profile.save()\n\n return new_user\n\n\n","sub_path":"profiles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"511714174","text":"for _ in range(int(input())):\n\tn,h=[int(i) for i in input().split()]\n\trow=[n]*n\n\tfor i in range(n):\n\t\ta,b=[int(i) for i in input().split()]\n\t\tfor j in range(a,b+1):\n\t\t\trow[j]-=1\n\tans=0\n\tfor i in range(h):\n\t\tans+=row[i]\n\tend=h\n\tstart=0\n\ttemp=ans\n\twhile end!=n:\n\t\ttemp=temp-row[start]+row[end]\n\t\tif temp>>\"))\n\ntol1=0\ntol2=0\ndolist=[120,330,500,700,99999]#度數清單\nalist=[3.02,4.39,4.97,5.63]\nblist=[2.68,3.61,4.01,4.50] \n\n\nif do <= 120:\n tol1,tol2=do*2.1,do*2.1\n print(\"Summer months:\",round(tol1,2),'\\nNon-Summer months:',round(tol2,2))\nelse:\n tol1,tol2=120*2.1,120*2.1\n \n for j,k in enumerate(dolist):\n \n \n \n if do > dolist[j] and do <= dolist[j+1]:\n \n tol1+=(do-dolist[j])*alist[j]\n tol2+=(do-dolist[j])*blist[j]\n \n print(\"Summer months:\",round(tol1,2),'\\nNon-Summer months:',round(tol2,2))\n break\n else:\n \n tol1+=(dolist[j+1]-dolist[j])*alist[j]\n tol2+=(dolist[j+1]-dolist[j])*blist[j]\n \n \n\n\n","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230583551","text":"import threading\n\nimport schedule\nimport time\n\nfrom ibapi.client import EClient\nfrom ibapi.contract import Contract\nfrom ibapi.order import Order\nfrom ibapi.utils import iswrapper\nfrom ibapi.wrapper import EWrapper\n\nfrom GooseIndicatorMay import GooseIndicator, Signal\n\nLAST_ORDER_ACTION=\"\"\nINITIAL_BUY_DONE=False\n\nclass IBapi(EWrapper, EClient):\n def __init__(self):\n EClient.__init__(self, self)\n self.started = False\n self.globalCancelOnly = False\n\n @iswrapper\n def nextValidId(self, orderId: int):\n super().nextValidId(orderId)\n self.nextorderId = orderId\n print('The next valid order id is: ', self.nextorderId)\n\n self.start()\n\n def start(self):\n # print(\"inside start\")\n\n if self.started:\n return\n\n self.started = True\n\n if self.globalCancelOnly:\n print(\"Executing GlobalCancel only\")\n self.reqGlobalCancel()\n else:\n # print(\"Executing requests\")\n self.evaluateAndExecute()\n # print(\"Executing requests ... finished\")\n\n def orderStatus(self, orderId, status, filled, remaining, avgFullPrice, permId, parentId, lastFillPrice, clientId,\n whyHeld, mktCapPrice):\n print('orderStatus - orderid:', orderId, 'status:', status, 'filled', filled, 'remaining', remaining,\n 'lastFillPrice', lastFillPrice)\n\n def openOrder(self, orderId, contract, order, orderState):\n print('openOrder id:', orderId, contract.symbol, contract.secType, '@', contract.exchange, ':', order.action,\n order.orderType, order.totalQuantity, orderState.status)\n\n\n def execDetails(self, reqId, contract, execution):\n print('Order Executed: ', reqId, contract.symbol, contract.secType, contract.currency, execution.execId,\n execution.orderId, execution.shares, execution.lastLiquidity)\n\n def evaluateAndExecute(self):\n # print(\"Inside evaluateAndExecute\")\n global LAST_ORDER_ACTION, INITIAL_BUY_DONE\n # Check if the signal is Buy or Sell\n actionType = GooseIndicator().deriveIndicatorAndPlaceOrder()\n order = Order()\n\n if actionType!= Signal.DO_NOTHING:\n\n if actionType.value == LAST_ORDER_ACTION:\n print('No Short or Long please. Moving on!')\n else:\n print(\"Last Action :: \",LAST_ORDER_ACTION, \" ,Current Action:: \",actionType.value)\n\n # Create order object\n order.totalQuantity = 1\n order.orderId = self.nextorderId\n order.orderType = 'MKT'\n order.isOmsContainer = False\n self.nextorderId += 1\n order.transmit = True\n\n if(INITIAL_BUY_DONE):\n order.totalQuantity = 2\n print (\"initial buy done, order size is 2\")\n\n if actionType == Signal.BUY:\n order.action = 'BUY'\n elif actionType == Signal.SELL:\n order.action = 'SELL'\n\n # place order\n self.placeOrder(self.nextorderId, SimpleFuture(), order)\n LAST_ORDER_ACTION = actionType.value\n INITIAL_BUY_DONE=True\n\n self.disconnect()\n # print (\"exiting evalAndExecute\")\n # time.sleep(3)\n\ndef SimpleFuture():\n #! [futcontract]\n contract = Contract()\n contract.symbol = \"ES\"\n contract.secType = \"FUT\"\n contract.exchange = \"GLOBEX\"\n contract.currency = \"USD\"\n contract.lastTradeDateOrContractMonth = \"202006\"\n #! [futcontract]\n return contract\n\ndef job():\n app = IBapi()\n app.connect('127.0.0.1', 7497, 123)\n app.run()\n #app.evaluateAndExecute()\n #app.disconnect()\n # print(\"called goose\")\n\nschedule.every(1).second.do(job)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n\n","sub_path":"SchedulerWithClientv5.py","file_name":"SchedulerWithClientv5.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"425520026","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@version: 3.5\n@author: morgana\n@license: Apache Licence \n@contact: vipmorgana@gmail.com\n@site: \n@software: PyCharm\n@file: homework.py\n@time: 2017/6/23 上午10:03\n\"\"\"\nimport logging\n\ndef get_logger():\n # logging.basicConfig(\n #\n # level=logging.DEBUG,\n # format='%(asctime)s %(filename)s[line:%(lineno)d]% %(name)s:%(message)s',\n # filename=\"log.txt\",\n # filemode='w',\n #\n # )\n # logging.debug('debug message')\n # logging.info('info message')\n # logging.warning('warning,message')\n # logging.error('error,messaage')\n # logging.critical('critical message')\n\n logger_obj=logging.getLogger()\n fh=logging.FileHandler(\"logger_file.txt\")\n ch=logging.StreamHandler()\n logger_obj.addHandler(fh)\n logger_obj.addHandler(ch)\n formatter=logging.Formatter(\"%(asctime)s-%(lineno)s-%(levelname)s-%(name)s:%(message)s\")\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger_obj.setLevel(logging.DEBUG)\n return logger_obj\n\nlogger_obj=get_logger()\n\n# logger_obj.debug(\"debug\")\n# logger_obj.info(\"info\")\n# logger_obj.error(\"error\")\n# logger_obj.warning(\"warning\")\n# logger_obj.critical(\"critical\")\n\nseen = set()\nnames = [x for x in names if not (x in seen or seen.add(x))]\nprint(names)\n\n\n\n\n\n\n\n","sub_path":"D20170622sys/homework/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"357280809","text":"# TASK\n# Вхідні дані: 3 числа x, y та z. x, y -- невід'ємні цілі числа, z дорівнює 0 або 1. x не дорівнює 0\n# Передаються як аргументи командного рядка.\n# Вихідні дані: рядок \"Everybody sing a song: <текст пісеньки>\", де <текст пісеньки>\n# формується з у куплетів, розділених пробілами. Всі куплети однакові і складаються з x 'la'\n# через дефіс. Якщо z дорівнює одиниці, в кінці ставиться окличний знак, інакше крапка. За\n# відсутності куплетів пробіл перед крапкою/окличним знаком не ставиться.\n\n# SOLUTION\n# Importing modules to work with embedded functions\nimport sys\n\n# Assigning variables values of three cmd-line arguments\nx = int(sys.argv[1])\ny = int(sys.argv[2])\nz = int(sys.argv[3])\n\n# Defining constant part of song\nconst = \"Everybody sing a song: \"\n\n# Performing checks due to task\nif x > 1:\n couplet = y*((x*(\"la\" + \"-\" ))[:-1]+ \" \")\nelse:\n couplet = y*((x*\"la\" + \" \"))\n\nif y == 0:\n if z == 1:\n print(const [:-1] + couplet [:-1] + \"!\")\n elif z == 0:\n print(const [:-1] + couplet [:-1] + \".\")\nelse:\n if z == 1:\n print(const + couplet [:-1] + \"!\")\n elif z == 0:\n print(const + couplet [:-1] + \".\")\n","sub_path":"Module 1/task1_3.py","file_name":"task1_3.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"587926450","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('viapublica_app', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='reclamo',\n name='latitud',\n field=models.CharField(max_length=200, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='reclamo',\n name='longitus',\n field=models.CharField(max_length=200, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='reclamo',\n name='telefono',\n field=models.CharField(max_length=20),\n ),\n ]\n","sub_path":"viapublica_app/migrations/0002_auto_20161017_1214.py","file_name":"0002_auto_20161017_1214.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154855490","text":"class Solution:\n\tdef findWords(self, board, words):\n\t\tK = len(words)\n\t\ttrie = buildTrie(words)\n\t\tres = []\n\t\tM = len(board)\n\t\tN = len(board[0])\n\t\tfor i in range(M):\n\t\t\tfor j in range(N):\n\t\t\t\tdfs(i, j, board, trie, res)\n\t\treturn res\n\ndef dfs(i, j, board, p, res):\n\tc = board[i][j]\n\tif c == '#' or not p.children[ord(c) - ord('a')]:\n\t\treturn\n\tp = p.children[ord(c) - ord('a')]\n\tif p.word:\n\t\tres.append(p.word)\n\t\tp.word = None\n\tboard[i][j] = '#'\n\tdx = [1, 0, -1, 0]\n\tdy = [0, -1, 0, 1]\n\tM = len(board)\n\tN = len(board[0])\n\tfor d in range(4):\n\t\tx = i + dx[d]\n\t\ty = j + dy[d]\n\t\tif x < 0 or x > M-1 or y < 0 or y > N-1:\n\t\t\tcontinue\n\t\tdfs(x,y,board,p,res)\n\tboard[i][j] = c\n\t\t\n\ndef buildTrie(words):\n\troot = TrieNode('')\n\tfor word in words:\n\t\tp = root\n\t\tfor c in word:\n\t\t\tpos = ord(c) - ord('a')\n\t\t\tif not p.children[pos]:\n\t\t\t\tp.children[pos] = TrieNode(c)\n\t\t\tp = p.children[pos]\n\t\tp.word = word\n\treturn root\n\nclass TrieNode:\n\tdef __init__(self, val):\n\t\tself.val = val\n\t\tself.word = None\n\t\tself.children = [None] * 26\n\t\t\nif __name__ == '__main__':\n\tsol = Solution()\n\twords = [\"oath\",\"pea\",\"eat\",\"rain\"]\n\tboard = [\n\t\t['o','a','a','n'],\n\t\t['e','t','a','e'],\n\t\t['i','h','k','r'],\n\t\t['i','f','l','v']\n\t]\n\t# print(sol.findWords(board, words))\n\tprint(sol.findWords([[\"b\"],[\"a\"],[\"b\"],[\"b\"],[\"a\"]],[\"baa\",\"abba\",\"baab\",\"aba\"]))\n\n","sub_path":"python/212.word_search_2.py","file_name":"212.word_search_2.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"108603955","text":"import json\nfrom flask import Blueprint, request, make_response, jsonify\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom api.models.offices_model import Office\nfrom api.models.candidates_model import Candidate\nfrom api.models.users_model import User\nfrom api.models.parties_model import Party\nfrom api.models.votes_model import Vote\nfrom api.utils.validator import return_response, return_error, check_json_office_keys\nfrom api.utils.validator import validate_string_data_type, sanitize_input\nfrom api.utils.validator import validate_int_data_type, validate_office_type\n\n\nOFFICE_BLUEPRINT = Blueprint('offices', __name__)\n\n@OFFICE_BLUEPRINT.route('/offices', methods=['POST'])\n@jwt_required\ndef add_offices():\n \"\"\"post office\"\"\"\n current_user = get_jwt_identity()\n if current_user['is_admin']:\n try:\n data = request.get_json()\n name=data['name']\n office_type=data['office_type']\n\n if(validate_string_data_type(name) == False):\n return return_error(400, \"The name should contain characters that form a word\")\n if(validate_string_data_type(office_type) == False):\n return return_error(400, \"The office type should contain characters that form a word\")\n if(sanitize_input(name) == False):\n return return_error(400, \"Provide a valid name i.e it should not contain spaces in between characters other than a word that makes sense\")\n if(sanitize_input(office_type) == False):\n return return_error(400, \"Provide a valid office type i.e it should not contain spaces in between characters other than a word that makes sense\")\n if(validate_office_type(office_type) == False):\n return return_error(400, \"Should be either legislative, federal, state or local\")\n except KeyError as e:\n return return_error(400, \"An error occurred while creating office {} is missing\".format(e.args[0]))\n\n office = Office(name=name, office_type=office_type)\n office = office.create_office()\n if office:\n return make_response(jsonify({\n \"status\":201,\n \"message\":\"Office {} created successfully\".format(name),\n \"data\": [{\n \"name\" : name,\n \"office_type\":office_type\n }]\n }),201)\n return return_error(409, \"The office already exist create another office\")\n\n return make_response(jsonify({\n \"status\":401,\n \"message\": \"You are not authorized to perform this action\"\n }), 401)\n\n\n@OFFICE_BLUEPRINT.route('/offices', methods=['GET'])\ndef get_offices():\n \"\"\"get all the offices\"\"\"\n offices = Office(name=None, office_type=None)\n political_offices = offices.get_offices()\n if political_offices:\n return return_response(200, \"Request was successful\", political_offices)\n return return_response(200, \"No government offices were found register an office\", political_offices)\n\n@OFFICE_BLUEPRINT.route('/offices//result', methods=['GET'])\ndef get_results(office_id):\n office = Office(name=None, office_type=None)\n office = office.get_office(office_id)\n if not office:\n return return_error(404, \"no result of that office was found please register a new office\")\n vote = Vote(office_id=None,user_id=None, candidate_id=None)\n result = vote.get_results_of_a_particular_office(office_id)\n if result:\n return make_response(jsonify({\n \"office\": result\n }), 200)\n return return_error(404, \"No results were found\")\n\n@OFFICE_BLUEPRINT.route('/offices//candidates', methods=['GET'])\ndef get_registered_candidates(office_id):\n candidate = Candidate(office_id=None, party_id=None, candidate_id=None)\n candidates = candidate.get_all_registered_candidates(office_id)\n if candidates:\n return make_response(jsonify({\n \"data\": candidates\n }), 200)\n return return_error(404, \"No Candidates are currently registered\")\n\n@OFFICE_BLUEPRINT.route('/offices/', methods=['GET'])\ndef get_office(id):\n if(validate_int_data_type(id) == False):\n return return_error(400, \"Please provide id which is a number\")\n political_office = Office(name=None, office_type=None)\n\n office = political_office.get_office(id)\n if office:\n return make_response(jsonify({\n \"status\":200,\n \"message\":\"Office was successfully retrieved\",\n \"data\": [{\n \"name\" : office[\"name\"],\n \"office_type\":office[\"office_type\"]\n }]\n }), 200)\n return return_error(404,\"No office with that id was found\")\n\n@OFFICE_BLUEPRINT.route('/offices//register', methods=[\"POST\"])\n@jwt_required\ndef create_candidate(office_id):\n current_user = get_jwt_identity()\n if current_user['is_admin']:\n try:\n data = request.get_json()\n party_id=data['party_id']\n candidate_id=data['candidate_id']\n\n if(validate_int_data_type(party_id) == False):\n return return_error(400, \"Provide an Id for party that is a number\")\n if(validate_int_data_type(candidate_id) == False):\n return return_error(400, \"Provide an Id for a candidate that is a number\")\n except KeyError as e:\n return return_error(400, \"An error occurred {}\\\n is missing\".format(e.args[0]))\n\n user = User()\n user = user.get_user_by_id(candidate_id)\n if not user:\n return return_error(404, \"User does not exist\")\n\n party = Party(name=None,hq_address=None, logo_url=None)\n party = party.get_party(party_id)\n\n if not party:\n return return_error(404, \"The party does not exist\")\n\n office = Office(name=None, office_type=None)\n office = office.get_office(office_id)\n if not office:\n return return_error(404, \"The office does not exist\")\n\n candidate = Candidate(office_id=None, candidate_id=None,\\\n party_id=None)\n candidate = candidate.get_candidate(candidate_id)\n if not candidate:\n candidate = Candidate(office_id=office_id, candidate_id=candidate_id,\\\n party_id=party_id)\n new = candidate.create_a_candidate()\n if new:\n return make_response(jsonify({\n \"status\":201,\n \"message\":\"The candidate was created\",\n \"data\": [{\n \"office_id\" : office_id,\n \"candidate_id\":candidate_id,\n }]\n\n }),201)\n return return_error(400, \"An error occurred while registering the candidate\")\n return return_error(409, \"Candidate already registered for that party\")\n return return_error(401, \"You are not authorized to perform this action\")\n","sub_path":"api/views/offices.py","file_name":"offices.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577574907","text":"import MySQLdb\nimport database_creds\n\n#SQL\n\ncreate_db = 'CREATE DATABASE IF NOT EXISTS {};'\ncreate_table_raw_data = 'CREATE TABLE raw_data (id int AUTO_INCREMENT , user_id int, event_id int,amount int, PRIMARY KEY (id));'\ncreate_table_agg_data = 'CREATE TABLE agg_data (user_id int, balance int DEFAULT 0, event_number int DEFAULT 0, best_event int, worst_event int, PRIMARY KEY (user_id));'\ncreate_table_last_processed_id = 'CREATE TABLE last_processed_id (id int);'\n\n\n\n\ntables = {'raw_data': create_table_raw_data, 'agg_data': create_table_agg_data, 'last_processed_id': create_table_last_processed_id}\n\ndef create_table(table):\n try:\n cur.execute(tables[table])\n except:\n print(\"Table {} alredy exists\".format(table)) \n\n\n\nif __name__ == '__main__':\n try:\n db = MySQLdb.connect(database_creds.host, database_creds.user, database_creds.passwd)\n cur = db.cursor()\n except:\n exit('Connection failed. Something went wrong')\n\n cur.execute(create_db.format(database_creds.db_name))\n cur.execute('USE {};'.format(database_creds.db_name)) \n \n for tab in tables.keys():\n create_table(tab) \n\t\n\n","sub_path":"create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210079058","text":"# -*- coding: utf-8 -*-\n# \nfrom __future__ import division\n\nimport otree.models\nfrom otree.db import models\nfrom otree import widgets\nfrom otree.common import Currency as c, currency_range, safe_json\nfrom otree.constants import BaseConstants\nfrom otree.models import BaseSubsession, BaseGroup, BasePlayer\n\nfrom otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport random\n\n# \n\n\n\nauthor = 'Curtis Kephart'\n\ndoc = \"\"\"\nCSR Experiment - quiz for warm glow treatment vcm game\nDesigned by Chetan Dave and Alicja Reuben.\nFirst implementation by Curtis Kephart (curtiskephart@gmail.com) 2017.01\nQuiz\n\"\"\"\n\nclass Constants(BaseConstants):\n name_in_url = 'csr_quiz_wg'\n players_per_group = 4\n task_timer = 270\n num_rounds = 14\n instructions_template = 'csr_1_quiz_warmGlow/instruc.html'\n\n\n\nclass Subsession(BaseSubsession):\n pass\n\n\nclass Group(BaseGroup):\n\tpass\n\n\n\nclass Player(BasePlayer):\n user_text = models.CharField(\n \tdoc=\"user's transcribed text\")\n is_correct = models.BooleanField(\n \tdoc=\"did the user get the task correct?\")\n final_score = models.IntegerField(\n \tdoc=\"player's total score up to this round\")\n\n\n quiz_01 = models.PositiveIntegerField(\n verbose_name='Your earnings:',\n min = 0,\n max = 999,\n initial=None,\n doc='quiz answer')\n \n quiz_02 = models.PositiveIntegerField(\n verbose_name='Your earnings:',\n min = 0,\n max = 999,\n initial=None,\n doc='quiz answer')\n\n quiz_03 = models.PositiveIntegerField(\n verbose_name='Your earnings:',\n min = 0,\n max = 999,\n initial=None,\n doc='quiz answer')\n \n quiz_04 = models.FloatField(\n verbose_name='Your earnings:',\n min = 0,\n max = 999,\n initial=None,\n doc='quiz answer')\n \n quiz_05 = models.FloatField(\n verbose_name='Your earnings:',\n min = 0,\n max = 999,\n initial=None,\n doc='quiz answer')\n \n quiz_06= models.FloatField(\n verbose_name='Your earnings:',\n min = 0,\n max = 999,\n initial=None,\n doc='quiz answer')\n \n quiz_07 = models.FloatField(\n verbose_name='Your earnings:',\n min = 0,\n max = 999,\n initial=None,\n doc='quiz answer')\n \n\n\n\n\n\n","sub_path":"csr_1_quiz_warmGlow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283735910","text":"import re\n\ndef is_isogram(string: str) -> bool:\n if re.search('[^a-zA-Z-\\s]', string):\n return False\n\n string_lowered = string.lower()\n string_no_hyphens_no_whitespace = re.sub('[\\s-]', '', string_lowered)\n string_list = list(sorted(string_no_hyphens_no_whitespace))\n\n for el in range(0, len(string_list)-1, 1):\n if string_list[el] is string_list[el+1]:\n return False\n\n return True\n","sub_path":"python/isogram/isogram.0.py","file_name":"isogram.0.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"569292342","text":"from imagepy import IPy\nfrom imagepy.core import ImagePlus\nimport wx\nimport numpy as np\nfrom imagepy.core.engine import Simple, Free\nfrom imagepy.core.manager import WindowsManager\nfrom scipy.ndimage import label, generate_binary_structure\nfrom skimage.measure import regionprops\nfrom shapely.geometry import Polygon, Point\nfrom skimage.measure import find_contours\nimport matplotlib.pyplot as plt\nimport matplotlib.path as mpath\nimport matplotlib.patches as mpatches\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.font_manager as fm\nfrom wx.lib.pubsub import pub\nimport osr, gdal, ogr\nimport pandas as pd\n\ndef showicehist(areas):\n plt.figure('Ice Histogram')\n ax = plt.gca()\n n, bins, patches = ax.hist(areas, 50)\n ax.set_xlabel('Area (m^2)')\n ax.set_ylabel('Frequence')\n ax.set_title('Ice Area Histogram')\n plt.show()\n\npub.subscribe(showicehist, 'showicehist')\n\n# center, area, l, extent, cov\nclass IceStatic(Simple):\n title = 'Ice Static'\n note = ['8-bit']\n\n #process\n def run(self, ips, imgs, para = None):\n conts = find_contours(ips.img, 1e-6, fully_connected='low', positive_orientation='low')\n trans = np.array(ips.data['trans']).reshape((2,3))\n\n xian = 'PROJCS[\"Xian 1980 / Gauss-Kruger zone 17\", GEOGCS[\"Xian 1980\",DATUM[\"Xian_1980\",SPHEROID[\"Xian 1980\",6378140,298.257,AUTHORITY[\"EPSG\",\"7049\"]],AUTHORITY[\"EPSG\",\"6610\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.01745329251994328,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4610\"]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",120],PARAMETER[\"scale_factor\",1],PARAMETER[\"false_easting\",500000],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AUTHORITY[\"EPSG\",\"2331\"]]'\n\n osrprj = osr.SpatialReference()\n osrprj.ImportFromWkt(ips.data['proj'])\n osrgeo = osr.SpatialReference()\n osrgeo.ImportFromWkt(xian)\n ct = osr.CoordinateTransformation(osrprj, osrgeo)\n\n data = []\n for i in range(len(conts)):\n cur = conts[i][:,::-1]\n #将像素点坐标转换成经纬度\n jw = np.dot(trans[:,1:], cur.T).T+ trans[:,0]\n #进行仿射变换\n xy = ct.TransformPoints(jw)\n print(xy)\n #用shaply库计算面积 坐标\n polygon = Polygon(xy)\n line = [i, polygon.area, polygon.centroid.x, polygon.centroid.y]\n data.append([round(j) for j in line])\n\n # IPy.table(ips.title+'-region', data, ['ID', 'Area', 'Center-X', 'Center-Y'])\n IPy.show_table(pd.DataFrame(data, columns=['ID', 'Area', 'Center-X', 'Center-Y']), ips.title+'-region')\n wx.CallAfter(pub.sendMessage, 'showicehist', areas=[i[1] for i in data])\n\nclass ShapeWriter(Simple):\n title = 'Export To Shapefile'\n note = ['8-bit']\n\n #process\n def run(self, ips, imgs, para = None):\n folder = IPy.getpath('Export Shapefile', '', 'open')\n if folder==None: return\n conts = find_contours(ips.img, 1e-6, fully_connected='low', positive_orientation='low')\n trans = np.array(ips.data['trans']).reshape((2,3))\n\n gdal.SetConfigOption(\"GDAL_FILENAME_IS_UTF8\",\"NO\") \n gdal.SetConfigOption(\"SHAPE_ENCODING\",\"\") \n ogr.RegisterAll() \n driver = ogr.GetDriverByName('ESRI Shapefile') \n ds=driver.CreateDataSource(folder) \n shapLayer=ds.CreateLayer(\"ice\", geom_type=ogr.wkbPolygon)\n fieldDefn = ogr.FieldDefn('id', ogr.OFTString) \n fieldDefn.SetWidth(4) \n shapLayer.CreateField(fieldDefn); \n defn = shapLayer.GetLayerDefn() \n\n for i in range(len(conts)):\n cur = conts[i][:,::-1]\n jw = np.dot(trans[:,1:], cur.T).T+ trans[:,0]\n polygon = Polygon(jw)\n feature = ogr.Feature(defn) ; \n feature.SetField('ID', i) \n poly = ogr.CreateGeometryFromWkt(polygon.wkt)\n feature.SetGeometry(poly); \n shapLayer.CreateFeature(feature)\n feature.Destroy()\n\nclass WKTWriter(Simple):\n title = 'Export To WKT'\n note = ['8-bit']\n\n #process\n def run(self, ips, imgs, para = None):\n folder = IPy.getpath('Export WKT', 'files (*.wkt)|*.wkt', 'save')\n if folder==None: return\n conts = find_contours(ips.img, 1e-6, fully_connected='low', positive_orientation='low')\n trans = np.array(ips.data['trans']).reshape((2,3))\n f = open(folder, 'w')\n for i in range(len(conts)):\n cur = conts[i][:,::-1]\n jw = np.dot(trans[:,1:], cur.T).T+ trans[:,0]\n polygon = Polygon(jw)\n f.write(polygon.wkt)\n f.write('\\n')\n f.close()\n\ndef showice(img, ices, areas, para):\n plt.figure('Ice Segment')\n plt.imshow(img)\n patches = []\n for xy, c in zip(ices, areas):\n path = mpath.Path(xy[:,::-1])\n patch = mpatches.PathPatch(path, lw=1)\n patches.append(patch)\n #plt.gca().add_patch(patch)\n\n ps = PatchCollection(patches, alpha=255, cmap=plt.cm.rainbow)\n ps.set_array(areas)\n plt.gca().add_collection(ps)\n plt.gcf().colorbar(ps, ax=plt.gca())\n plt.grid()\n myfont = fm.FontProperties(fname='C:/Windows/Fonts/msyh.ttc')\n plt.title(para['title'], fontproperties=myfont, size=24)\n plt.show()\n\npub.subscribe(showice, 'showice')\n\nclass ShowResult(Simple):\n title = 'Show Ice Result'\n note = ['8-bit']\n para = {'title':'海冰分布图 2017-00-00 00:00:00'}\n view = [(str,'title', 'title', '')]\n\n #process\n def run(self, ips, imgs, para = None):\n conts = find_contours(ips.img, 1e-6, fully_connected='low', positive_orientation='low')\n trans = np.array(ips.data['trans']).reshape((2,3))\n xian = 'PROJCS[\"Xian 1980 / Gauss-Kruger zone 17\", GEOGCS[\"Xian 1980\",DATUM[\"Xian_1980\",SPHEROID[\"Xian 1980\",6378140,298.257,AUTHORITY[\"EPSG\",\"7049\"]],AUTHORITY[\"EPSG\",\"6610\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.01745329251994328,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4610\"]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",120],PARAMETER[\"scale_factor\",1],PARAMETER[\"false_easting\",500000],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AUTHORITY[\"EPSG\",\"2331\"]]'\n\n osrprj = osr.SpatialReference()\n osrprj.ImportFromWkt(ips.data['proj'])\n osrgeo = osr.SpatialReference()\n osrgeo.ImportFromWkt(xian)\n ct = osr.CoordinateTransformation(osrprj, osrgeo)\n\n areas = []\n for i in conts:\n cur = i[:,::-1]\n jw = np.dot(trans[:,1:], cur.T).T+ trans[:,0]\n xy = ct.TransformPoints(jw)\n areas.append(Polygon(xy).area)\n\n areas = np.array(areas)\n #areas[np.argmin(areas)] = areas.max()/-2\n wx.CallAfter(pub.sendMessage, 'showice', img=ips.data['back'], ices=conts, areas=areas, para=para)\n\nclass Difference(Simple):\n \"\"\"Calculator Plugin derived from imagepy.core.engine.Simple \"\"\"\n title = 'Ice Difference'\n note = ['8-bit']\n para = {'temp':None}\n \n view = [('img', 'temp', 'object', '')]\n\n def run(self, ips, imgs, para = None):\n ips2 = WindowsManager.get(para['temp']).ips\n nimg = ips.img * 0\n nimg[ips.img>0] += 80\n nimg[ips2.img>0] += 160\n ips = ImagePlus([nimg], ips.title+'-diff-'+ips2.title)\n ips.data = ips2.data\n IPy.show_ips(ips)\n\nplgs = [Difference, IceStatic, ShowResult, ShapeWriter, WKTWriter]","sub_path":"menus/Ice-Segment/result_plgs.py","file_name":"result_plgs.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"637860974","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport argparse\nimport serial.serialposix as s\n\n# os.system('sudo systemctl disable hciuart')\n\nclass vedirect:\n\n def __init__(self, serialport, timeout):\n self.serialport = '/dev/ttyAMA0'\n self.ser = s.Serial('/dev/ttyAMA0', 19200, timeout=timeout)\n self.header1 = '\\r'\n self.header2 = '\\n'\n self.hexmarker = ':'\n self.delimiter = '\\t'\n self.key = ''\n self.value = ''\n self.bytes_sum = 0\n self.state = self.WAIT_HEADER\n self.dict = {}\n print('1')\n\n (HEX, WAIT_HEADER, IN_KEY, IN_VALUE, IN_CHECKSUM) = range(5)\n\n def input(self, byte):\n print('2')\n if byte == self.hexmarker and self.state != self.IN_CHECKSUM:\n self.state = self.HEX\n\n if self.state == self.WAIT_HEADER:\n self.bytes_sum += ord(byte)\n if byte == self.header1:\n self.state = self.WAIT_HEADER\n elif byte == self.header2:\n self.state = self.IN_KEY\n return None\n\n elif self.state == self.IN_KEY:\n self.bytes_sum += ord(byte)\n if byte == self.delimiter:\n if self.key == 'Checksum':\n self.state = self.IN_CHECKSUM\n else:\n self.state = self.IN_VALUE\n else:\n self.key += byte\n return None\n\n elif self.state == self.IN_VALUE:\n self.bytes_sum += ord(byte)\n if byte == self.header1:\n self.state = self.WAIT_HEADER\n self.dict[self.key] = self.value;\n self.key = '';\n self.value = '';\n else:\n self.value += byte\n return None\n\n elif self.state == self.IN_CHECKSUM:\n self.bytes_sum += ord(byte)\n self.key = ''\n self.value = ''\n self.state = self.WAIT_HEADER\n if (self.bytes_sum % 256 == 0):\n self.bytes_sum = 0\n return self.dict\n else:\n print('Malformed packet')\n self.bytes_sum = 0\n\n elif self.state == self.HEX:\n self.bytes_sum = 0\n if byte == self.header2:\n self.state = self.WAIT_HEADER\n\n else:\n raise AssertionError()\n\n\n def read_data(self):\n print('3')\n while True:\n byte = self.ser.read(1)\n packet = self.input(byte)\n\n\n def read_data_single(self):\n print('4')\n while True:\n byte = self.ser.read(1)\n packet = self.input(byte)\n if (packet != None):\n return packet\n\n\n def read_data_callback(self, callbackFunction):\n print('5')\n while True:\n print('hello')\n byte = self.ser.read(1)\n print(byte)\n print('im here')\n if byte:\n packet = self.input(byte)\n if (packet != None):\n callbackFunction(packet)\n else:\n break\n\n\ndef print_data_callback(data):\n print('6')\n print(data)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Process VE.Direct protocol')\n parser.add_argument('--port', help='Serial port')\n parser.add_argument('--timeout', help='Serial port read timeout', type=int, default='60')\n print('main')\n args = parser.parse_args()\n ve = vedirect(args.port, args.timeout)\n ve.read_data_callback(print_data_callback)\n #print(ve.read_data_single())\n","sub_path":"vedirect-master/vedirect.py","file_name":"vedirect.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"574319004","text":"import glob\nfrom subprocess import check_call\nimport pandas as pd\nimport numpy as np\nfrom multiprocessing import Process\nimport os\nfrom shutil import copytree, ignore_patterns\n\n\ndef _convert_wav(df):\n # with open(f'error_{os.getpid()}.log', 'w') as f:\n # for _, row in df.iterrows():\n # check_call([\n # 'ffmpeg', '-v', '8', '-i', row['read_files'],\n # '-f', 'wav', '-acodec', 'pcm_s16le',\n # f\"{row['save_files']}\",\n # ], stderr=f)\n for _, row in df.iterrows():\n check_call([\n 'ffmpeg', '-v', '8', '-i', row['read_files'],\n '-f', 'wav', '-acodec', 'pcm_s16le',\n f\"{row['save_files']}\",\n ])\n\n\ndef convert_wav_interal(read_dir, wav_dir, n_jobs):\n files = glob.glob(f'{read_dir}/**/*.m4a', recursive=True)\n if len(files) == 0:\n raise ValueError('there are files exise in wav dir')\n df = pd.DataFrame({'read_files': files})\n df['save_files'] = df.read_files.str.replace('.m4a$', '.wav', regex=True) \\\n .str.replace(read_dir, wav_dir)\n\n dfs = np.array_split(df, n_jobs)\n processes = []\n for i, df in enumerate(dfs):\n p = Process(target=_convert_wav, args=(df,))\n p.start()\n print(f'process {i} has started')\n processes.append(p)\n\n for p in processes:\n p.join()\n\n\ndef convert_wav(read_dir, save_dir, n_jobs):\n copytree(read_dir, save_dir, ignore=ignore_patterns('*.wav', '*.m4a'))\n convert_wav_interal(read_dir, save_dir, n_jobs)\n\n\ndef down_sample_wav(read_dir, wav_dir, n_jobs):\n files = glob.glob(f'{read_dir}/**/*.wav', recursive=True)\n if len(files) == 0:\n raise ValueError('there is no exise in wav dir')\n df = pd.DataFrame({'read_files': files})\n df['save_files'] = df.read_files.str.replace(read_dir, wav_dir)\n\n dfs = np.array_split(df, n_jobs)\n processes = []\n for i, df in enumerate(dfs):\n p = Process(target=_down_sample, args=(df,))\n p.start()\n print(f'process {i} has started')\n processes.append(p)\n\n for p in processes:\n p.join()\n\n\ndef lauch_job(read_dir, wav_dir, filer_pattern, func_call, n_jobs, new_subfix=None):\n files = glob.glob(f'{read_dir}/{filer_pattern}', recursive=True)\n if len(files) == 0:\n raise ValueError('there are files exise in wav dir')\n df = pd.DataFrame({'read_files': files})\n df['save_files'] = df.read_files.str.replace(read_dir, wav_dir)\n\n if new_subfix:\n df['save_files'] =df['save_files'].str.replace('\\..+$', f'.{new_subfix}', regex=True)\n # df['save_files'] = df.read_files.str.replace('.m4a$', '.wav', regex=True) \\\n # .str.replace(read_dir, wav_dir)\n\n dfs = np.array_split(df, n_jobs)\n processes = []\n for i, df in enumerate(dfs):\n p = Process(target=func_call, args=(df,))\n p.start()\n print(f'process {i} has started')\n processes.append(p)\n\n for p in processes:\n p.join()\n\n# def _flac2wav(df):\n# for _, row in df.iterrows():\n# check_call([\n# 'flac', '-c', '-d', '-s', row['read_files'], row['save_files'],\n# ])\n\ndef _down_sample(df):\n for _, row in df.iterrows():\n check_call([\n 'sox', row['read_files'], '-r', '8000', row['save_files'],\n ])\n\n\ndef _concat_wav(df):\n with open(f'error_{os.getpid()}.log', 'w') as f:\n for _, row in df.iterrows():\n # print(row['read_parent_dir'])\n # print(row['save_parent_dir'])\n\n check_call([\n 'sox', f\"{row['read_parent_dir']}/*.wav\", f\"{row['save_parent_dir']}/concat.wav\"\n ], stderr=f)\n\n\ndef concat_wav_internal(read_wav_dir, concat_wav_dir, n_jobs):\n read_files = glob.glob(f'{read_wav_dir}/**/*.wav', recursive=True)\n if not read_files:\n raise ValueError('empty dir')\n read_parent_dir = pd.Series(read_files).str.rpartition('/')[0].drop_duplicates()\n save_parent_dir = read_parent_dir.str.replace(read_wav_dir, concat_wav_dir)\n df = pd.DataFrame({'read_parent_dir': read_parent_dir.values,\n 'save_parent_dir': save_parent_dir.values})\n\n dfs = np.array_split(df, n_jobs)\n processes = []\n for i, df in enumerate(dfs):\n p = Process(target=_concat_wav, args=(df,))\n p.start()\n print(f'process {i} has started')\n processes.append(p)\n\n for p in processes:\n p.join()\n\n\ndef concat_wav(read_wav_dir, concat_wav_dir, n_jobs):\n copytree(read_wav_dir, concat_wav_dir, ignore=ignore_patterns('*.wav', '*.m4a'))\n concat_wav_internal(read_wav_dir, concat_wav_dir, n_jobs)\n\n\ndef downsample_vox(read_wav_dir, concat_wav_dir, n_jobs):\n copytree(read_wav_dir, concat_wav_dir, ignore=ignore_patterns('*.wav', '*.m4a'))\n down_sample_wav(read_wav_dir, concat_wav_dir, n_jobs)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--func', choices=['m4a2wav', 'concat'], required=True)\n parser.add_argument('--read_dir', required=True)\n parser.add_argument('--save2', required=True)\n parser.add_argument('--nj', default=20)\n args = parser.parse_args()\n print(args)\n if args.func == 'm4a2wav':\n convert_wav(args.read_dir, args.save2, args.nj)\n elif args.func == 'concat':\n concat_wav(args.read_dir, args.save2, args.nj)\n else:\n raise ValueError\n","sub_path":"tools/vox_wav_tools.py","file_name":"vox_wav_tools.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341212383","text":"from app_code import app\nfrom flask import render_template\nfrom flask import request\n\nimport requests\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nimport mpld3\nfrom mpld3 import plugins, fig_to_html\n\n\n# class PointLabelTooltip(PluginBase):\n# \"\"\"A Plugin to enable a tooltip: text which hovers over points.\n\n# Parameters\n# ----------\n# points : matplotlib Collection or Line2D object\n# The figure element to apply the tooltip to\n# labels : array or None\n# If supplied, specify the labels for each point in points. If not\n# supplied, the (x, y) values will be used.\n# hoffset, voffset : integer\n# The number of pixels to offset the tooltip text. Default is\n# hoffset = 0, voffset = 10\n\n# Examples\n# --------\n# >>> import matplotlib.pyplot as plt\n# >>> from mpld3 import fig_to_html, plugins\n# >>> fig, ax = plt.subplots()\n# >>> points = ax.plot(range(10), 'o')\n# >>> plugins.connect(fig, PointLabelTooltip(points[0]))\n# >>> fig_to_html(fig)\n# \"\"\"\n# def __init__(self, points, labels=None,\n# hoffset=0, voffset=10, location=\"mouse\"):\n# if location not in [\"bottom left\", \"top left\", \"bottom right\",\n# \"top right\", \"mouse\"]:\n# raise ValueError(\"invalid location: {0}\".format(location))\n# if isinstance(points, matplotlib.lines.Line2D):\n# suffix = \"pts\"\n# else:\n# suffix = None\n# self.dict_ = {\"type\": \"tooltip\",\n# \"id\": get_id(points, suffix),\n# \"labels\": labels,\n# \"hoffset\": hoffset,\n# \"voffset\": voffset,\n# \"location\": location}\n\n\noutput=None\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/results') \ndef results():\n #Amazing code for interactive matplotlib:\n #http://mpld3.github.io/examples/html_tooltips.\n#these files bellow need to be first unzipped. Zipped format to avoit Githubs file size restrictions\n ClustLoc = pd.read_csv('app_code/data/ClusterCoords.txt', sep=\"\\t\") #The tree coords and the cluster they belong\n t_num = pd.read_csv('app_code/data/Numbers_tab.csv') #Number of trees\n t_surv = pd.read_csv('app_code/data/Survival_tab.csv') #Probability of survival\n t_name = pd.read_csv('app_code/data/Tree_names_tab.csv') #Match of latin and common names\n\n#Geocoding user input\n address =request.args.get('address')\n address = address.lower()\n\n#If there is no address input,or they just entered New York then return results for entire NY\n \n if address==\"ny\" or address==\"new york\" or address==\"nyc\" or address==\"new york city\" or len(address)==0:\n #Select and organize data\n #This is the numbers of each tree in New York\n num = t_num.drop('Cluster', 1)\n num = num.sum(axis=0)\n \n #Before I convert numbers to frequencies, get the total nuber of trees in New York\n num_trees = num.sum()\n \n #now convert each species to frequences\n num = num.divide(float(num_trees))*100\n\n #This is the probability of survival of each tree in New York\n temp_num = t_num.drop('Cluster', 1)\n temp_surv = t_surv.drop('Cluster', 1)\n\n #this is the calculation the species-wise survivors for the entire NYC\n temp_surv = pd.DataFrame(temp_num.values*temp_surv.values, columns=temp_num.columns, index=temp_num.index)\n temp_surv = temp_surv.sum(axis=0)\n\n #Now convert the absolute survivors to frequencies\n temp_num = temp_num.sum(axis=0)\n\n surv = temp_surv.divide(temp_num)*100\n\n #Create the New York-wise table\n frames = [surv, num]\n temp = pd.concat(frames,axis=1,keys=['surv', 'num'])\n #temp = temp.reset_index(level=1, drop=True)\n temp = temp.sort(columns='surv',ascending=False)\n\n #Finally, switch species for common names\n a = list(t_name['Common'])\n b = list(t_name['Latin'])\n c = list(temp.index.values)\n\n common_names = []\n for i in range(len(a)):\n for j in range(len(c)):\n if c[j]==b[i]:\n common_names.append(a[i])\n\n adrs = \"New York, NY\"\n else:\n if address.find(\"ny\")<0:\n address = address + \" NY\" \n#URL encoding\n service_url = \"https://maps.googleapis.com/maps/api/geocode/json?\"\n\n #if len(address)<1: print \"Please enter an address\"\n\n \n\n #These are urllib commands\n #url = service_url + urllib.urlencode({'address':address, 'key':\"###################################\"})\n # uh = urllib.urlopen(url)\n # data = uh.read()\n #js = json.loads(str(data))\n\n #These are requests commands\n parameters = {'address':address, 'key':\"#############################\"}\n\n r = requests.get(service_url, params=parameters)\n js = r.json()\n\n lat = js['results'][0]['geometry']['location']['lat']\n lon = js['results'][0]['geometry']['location']['lng']\n adrs = js['results'][0]['formatted_address']\n q = tuple([lat, lon])\n\n #Identify cluster\n ClustLoc['dLat'] = (ClustLoc.latitude - q[0])**2.\n ClustLoc['dLon'] = (ClustLoc.longitude - q[1])**2.\n ClustLoc['dist'] = (ClustLoc.dLat + ClustLoc.dLon)**0.5 \n i = ClustLoc['dist'].idxmin(axis='index') #the index of the minimum distance tree\n cluster = ClustLoc['Cluster'].ix[i] #its number\n\n #If the closest tree is more than 1.11Km away, then we have no data!\n # d = ClustLoc['dist'].min(axis='index')\n # if d>=0.01:\n # print \"Sorry there is no information for the location you chose\"\n\n #Select and organize data\n #This is the numbers of each tree in the selected cluster\n num = t_num[t_num['Cluster']==\"Cluster\"+str(cluster)]\n num = num.drop('Cluster', 1)\n num = num.dropna(axis='columns',)\n\n #Before I convert numbers to frequencies, get the total nuber of trees in the cluster\n num_trees = str(num.sum(axis=1))\n num_trees = float(num_trees.split()[1])\n num_trees = int(num_trees)\n\n #now convert each species to frequences\n num = num.divide(float(num.sum(axis=1)))*100\n\n #This is the probability of survival of each tree in the selected cluster\n surv = t_surv[t_surv['Cluster']==\"Cluster\"+str(cluster)]\n surv = surv.drop('Cluster', 1)\n surv = surv.dropna(axis=1)*100\n\n frames = [surv, num]\n temp = pd.concat(frames,keys=['surv', 'num'])\n temp = temp.reset_index(level=1, drop=True)\n temp = temp.transpose()\n temp = temp.sort(columns='surv',ascending=False)\n\n #Finally, switch species for common names\n a = list(t_name['Common'])\n b = list(t_name['Latin'])\n c = list(temp.index.values)\n\n common_names = []\n for i in range(len(a)):\n for j in range(len(c)):\n if c[j]==b[i]:\n common_names.append(a[i])\n\n#Pick top three values\n maximums = list(temp['surv'][0:3].values)\n\n first_val = \"%.2f\" % maximums[0]+'%'\n second_val = \"%.2f\" % maximums[1]+'%'\n third_val = \"%.2f\" % maximums[2]+'%'\n\n first_sp = common_names[0]\n second_sp = common_names[1]\n third_sp = common_names[2] \n\n#Make plot\n fig, ax = plt.subplots(subplot_kw=dict(axisbg='#FFFFFF'))\n\n scatter = ax.scatter(temp['surv'],\n temp['num'],\n c=temp['surv'],\n cmap ='Greens',\n s= 100,\n alpha=1)\n\n ax.set_title(\"Total number of trees considered: \"+ str(num_trees), size=20, color='darkgreen', fontweight='heavy')\n\n ax.set_xlabel('% Likelihood of survival past 10 years', fontsize=18)\n ax.set_ylabel('% of all trees considered', fontsize=18)\n\n labels = common_names\n\n tooltip = mpld3.plugins.PointHTMLTooltip(scatter, labels=labels)\n mpld3.plugins.connect(fig, tooltip) \n fig_html = mpld3.fig_to_html(fig)\n\n return render_template(\"results.html\",address=adrs, first_val=first_val,second_val=second_val,third_val=third_val,first_sp=first_sp,second_sp=second_sp,third_sp=third_sp, plot=fig_html) \n \n \n","sub_path":"app_code/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344673931","text":"import psycopg2\nimport csv\nimport time\n\n\n# Clean contract data file from Jupyter notebook\ncontract_data_file = \"../../data/clean/2018_2019_contract_data.csv\"\n\n\n# DB setup\nconn = psycopg2.connect(\"dbname=contract_history user=postgres password=postgres\")\ncur = conn.cursor()\n\n\ndef add_record_to_db(contract):\n db_insert = \"\"\"INSERT INTO canada_new_contracts_2018_2019 \n (contract_number, amendment_number, award_date, expiry_date, contract_value, total_contract_value, gsin,\n gsin_description, competitive_tender, limited_tender_reason_description, supplier_standardized_name,\n supplier_operating_name, supplier_legal_name, supplier_address_city, supplier_address_province,\n supplier_address_postal_code, supplier_address_longitude, supplier_address_latitude, end_user_entity, \n contracting_address_city, contracting_address_province, contracting_address_postal_code, \n contracting_address_longitude, contracting_address_latitude)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n\n cur.execute(db_insert, [\n contract[0], contract[1], contract[2], contract[3], contract[4], contract[19], contract[5],\n contract[6], contract[7], contract[8], contract[12],\n contract[13], contract[14], contract[15], contract[16],\n contract[17], contract[27], contract[28], contract[21],\n contract[23], contract[24], contract[25],\n contract[29], contract[30]])\n\n conn.commit()\n\n\ndef process_data():\n with open(contract_data_file, 'r') as csvfile:\n contracts = csv.reader(csvfile, delimiter=',')\n next(csvfile, None)\n\n for contract in contracts:\n # contract[2] = time.strptime(contract[2], '%Y-%m-%d')\n add_record_to_db(contract)\n\n\nprocess_data()\n","sub_path":"scripts/pg_data_loader/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347013428","text":"import numpy as np\n\ndef densityair(T):\n M=28.97e-3\n R=8.314\n P=1.01e5\n rho=P*M/R/T\n print(rho)\n \ndensityair(300)\n\n\ndef Range1(u,theta):\n\n thetarad=theta*np.pi/180\n g=9.81\n R=u**2*np.sin(2*thetarad)/g\n Rkm=R/1e3\n print(R,Rkm)\n H=u**2*(np.sin(thetarad))*2/g/2\n Hkm=H/1000\n print(H,Hkm)\n\nRange1(1.25e3,45)\n\ndef surfaceenergy(N,R): #Breaking 1 drop to N\n r=R/N**0.33\n delA=4*np.pi*r**2*N-4*np.pi*R**2\n S=0.075 #N/m\n delU=delA*S\n print(delU)\n\nsurfaceenergy(100,1)\n","sub_path":"physicsfunctions.py","file_name":"physicsfunctions.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"345740294","text":"# Script to animate the evolution of the upstream hydraulic jump\n\nfrom matplotlib.animation import FuncAnimation\nfrom xmitgcm import open_mdsdataset\nfrom MyFunctions import get_contour\n\nrun_dir = ('/home/hugke729/mitgcm/stratified_flow_directed_study/'\n 'two_layer_flow_proj3/runs/exp_ru_0p3_vel_0p3/')\n\nds = open_mdsdataset(run_dir, prefix=['T', 'U'], delta_t=60).squeeze()\n\nfig, ax = plt.subplots()\n# cax = ds['T'].isel(time=0).plot.pcolormesh(ax=ax, vmin=0, vmax=5)\ncax = ds['U'].isel(time=0).plot.pcolormesh(ax=ax, vmin=0, vmax=0.8, cmap='RdBu_r')\ninterface = get_contour(ds.XC, ds.Z, ds['T'].isel(time=0).values, 2.5)\nline, = ax.plot(ds.XC, interface)\n\nax.set(ylim=(-200, -100))\nax.plot(2*(ds.XG[10], ), (-150, -200))\nax.plot(2*(ds.XG[-10], ), (-150, -200))\n\n\ndef animate(i):\n T_i = ds['T'].isel(time=i).values\n U_i = ds['U'].isel(time=i).values\n cax.set_array(U_i.flatten())\n line.set_ydata(get_contour(ds.XC, ds.Z, T_i, 2.5))\n ax.set_title(str(ds.time[i].values/3600))\n\n\nanim = FuncAnimation(fig, animate, frames=ds['time'].size)\n","sub_path":"two_layer_flow_proj3/visualise_output.py","file_name":"visualise_output.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"648018559","text":"import pyautogui\nimport time\n\n\n# pyautogui.FAILSAFE = False # 關閉安全防護功能\n\n\ndef getMointorInfo():\n\twidth, height = pyautogui.size()\n\tprint(\"螢幕寬度:{} 螢幕高度:{}\".format(width, height))\n\treturn width, height\n\n\ndef getMouseInfo():\n\tmouse_x, mouse_y = pyautogui.position()\n\tprint(\"滑鼠座標 x:{} y:{}\".format(str(mouse_x).rjust(4), str(mouse_y).rjust(4)))\n\treturn mouse_x, mouse_y\n\n\ndef testPos():\n\tmouse_x = 0\n\ttry:\n\t\twhile (mouse_x < 2000):\n\t\t\tgetMouseInfo()\n\t\t\ttime.sleep(1)\n\texcept KeyboardInterrupt:\n\t\t# KeyboardInterrupt: 當用戶按下終止鍵時觸發此異常(通常是Ctrl+C或者Delete鍵)\n\t\tprint('\\nExit.')\n\n\ndef absMove():\n\ttry:\n\t\tfor x in range(10, 100, 20):\n\t\t\tfor y in range(0, 100, 20):\n\t\t\t\ttry:\n\t\t\t\t\tpyautogui.moveTo(x, y, duration=0.5) # 滑鼠絕對值座標移動\n\t\t\t\t\tgetMouseInfo()\n\t\t\t\t\t# pyautogui.PAUSE = 1.5 #每次執行暫停1.5秒\n\t\t\t\t\tpyautogui.FAILSAFE = True # 當滑鼠座標移動到(0,0)時拋出Exception\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"座標移動到(0,0)強制結束\")\n\t\t\t\t\traise StopIteration # 拋出StopIteration例外,並跳出所有Iteration\n\texcept:\n\t\tpass\n\n\ndef RefPosMove():\n\ttry:\n\t\tfor x in range(2):\n\t\t\ttry:\n\t\t\t\tpyautogui.moveRel(300, 0, duration=0.5) # 滑鼠相對位置座標移動\n\t\t\t\tgetMouseInfo()\n\t\t\t\tpyautogui.moveRel(0, 300, duration=0.5) # 滑鼠相對位置座標移動\n\t\t\t\tgetMouseInfo()\n\t\t\t\tpyautogui.moveRel(-300, 0, duration=0.5) # 滑鼠相對位置座標移動\n\t\t\t\tgetMouseInfo()\n\t\t\t\tpyautogui.moveRel(0, -300, duration=0.5) # 滑鼠相對位置座標移動\n\t\t\t\tgetMouseInfo()\n\t\t\t\t# pyautogui.PAUSE = 1.5 #每次執行暫停1.5秒\n\t\t\t\tpyautogui.FAILSAFE = True # 當滑鼠座標移動到(0,0)時拋出Exception\n\t\t\texcept:\n\t\t\t\tprint(\"座標移動到(0,0)強制結束\")\n\t\t\t\traise StopIteration # 拋出StopIteration例外,並跳出所有Iteration\n\texcept:\n\t\tpass\n\n\ndef testMouseOper():\n\tpyautogui.click(500, 350) # 使用click()函數發送虛擬鼠標點,預設為左鍵\n\tpyautogui.click(500, 350, button='right') # 點擊右鍵\n\n\tpyautogui.mouseDown(800, 750, button='right') # 在指定位置按住滑鼠右鍵(mouseDown按住滑鼠)\n\tpyautogui.mouseUp(800, 850, button='right') # 滑鼠移動到指定位置並鬆開右鍵(mouseUp鬆開滑鼠)\n\n\ndef painter():\n\tprint(\"選擇畫筆\")\n\ttime.sleep(10)\n\tpyautogui.click()\n\tfoot = 10\n\twhile foot < 300:\n\t\tpyautogui.dragRel(foot, 0, duration=0.2) # dragTo/dragRel滑鼠拖曳到指定到指定/相對位址\n\t\tpyautogui.dragRel(0, foot, duration=0.2)\n\t\tpyautogui.dragRel(-foot, 0, duration=0.2)\n\t\tpyautogui.dragRel(0, -foot, duration=0.2)\n\t\tfoot += 10\n\n\ndef scrollView():\n\twidth, height = getMointorInfo()\n\tmiddle = height * 0.5\n\twhile (True):\n\t\tmouse_x, mouse_y = getMouseInfo()\n\t\tif mouse_y < middle:\n\t\t\tpyautogui.scroll(300)#向上捲動\n\t\t\ttime.sleep(0.5)\n\t\telif mouse_y > middle:\n\t\t\tpyautogui.scroll(-300)#向下捲動\n\t\t\ttime.sleep(0.5)\n\n\n\n","sub_path":"各式模組/硬體控制模組/滑鼠控制.py","file_name":"滑鼠控制.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"551298124","text":"\"\"\"\nReinforcement Learning (A3C) using Pytroch + multiprocessing.\nThe most simple implementation for continuous action.\n\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom cart_utils import v_wrap, set_init, plotter_ep_rew, plotter_ep_rew_norm, handleArguments, push_and_pull, record, plotter_ep_time_norm, plotter_ep_time, confidence_intervall\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom shared_adam import SharedAdam\nimport numpy as np\nimport gym\nimport time\nfrom datetime import datetime\nimport sys\nimport os\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\nGAMMA = 0.9\nMAX_EP = 2000\n\n\n\nenv = gym.make('CartPole-v0').unwrapped\nN_S = env.observation_space.shape[0]\nN_A = env.action_space.n\n\n\nclass Net(nn.Module):\n def __init__(self, s_dim, a_dim):\n super(Net, self).__init__()\n self.s_dim = s_dim\n self.a_dim = a_dim\n self.pi1 = nn.Linear(s_dim, 80)\n self.pi2 = nn.Linear(80, 60)\n self.pi3 = nn.Linear(60, a_dim)\n self.v2 = nn.Linear(80, 60)\n self.v3 = nn.Linear(60, 1)\n set_init([self.pi1, self.pi2, self.pi3, self.v2, self.v3])\n self.distribution = torch.distributions.Categorical\n\n def forward(self, x):\n pi1 = F.relu(self.pi1(x))\n pi2 = F.relu(self.pi2(pi1))\n logits = self.pi3(pi2)\n v2 = F.relu(self.v2(pi1))\n values = self.v3(v2)\n return logits, values\n\n def set_init(layers):\n for layer in layers:\n nn.init.xavier_uniform_(layer.weight, nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(layer.bias, nn.init.calculate_gain('relu'))\n\n def choose_action(self, s):\n self.eval()\n logits, _ = self.forward(s)\n prob = F.softmax(logits, dim=1).data\n m = self.distribution(prob)\n return m.sample().numpy()[0]\n\n def loss_func(self, s, a, v_t):\n self.train()\n logits, values = self.forward(s)\n td = v_t - values\n c_loss = td.pow(2)\n \n probs = F.softmax(logits, dim=1)\n m = self.distribution(probs)\n exp_v = m.log_prob(a) * td.detach().squeeze()\n a_loss = -exp_v\n total_loss = (c_loss + a_loss).mean()\n return total_loss\n\n\nclass Worker(mp.Process):\n def __init__(self, gnet, opt, global_ep, global_ep_r, global_time_done, res_queue, time_queue, action_queue, name):\n super(Worker, self).__init__()\n self.name = 'w%02i' % name\n self.g_ep, self.g_ep_r, self.g_time = global_ep, global_ep_r, global_time_done\n self.gnet, self.opt = gnet, opt\n self.lnet = Net(N_S, N_A) # local network\n self.res_queue, self.time_queue, self.action_queue = res_queue, time_queue, action_queue\n self.env = gym.make(\"CartPole-v0\").unwrapped\n\n def run(self):\n total_step = 1\n stop_processes = False\n scores = []\n while self.g_ep.value < MAX_EP and stop_processes is False:\n s = self.env.reset()\n buffer_s, buffer_a, buffer_r = [], [], []\n ep_r = 0.\n while True:\n start = time.time()\n if self.name == 'w00' and handleArguments().demo_mode:\n self.env.render()\n a = self.lnet.choose_action(v_wrap(s[None, :]))\n s_, r, done, _ = self.env.step(a)\n if done: r = -1\n ep_r += r\n buffer_a.append(a)\n buffer_s.append(s)\n buffer_r.append(r)\n\n if done or ep_r >= 450: # update global and assign to local net\n # sync\n end = time.time()\n time_done = end - start\n\n push_and_pull(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA, False,\n self.g_ep)\n\n record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.time_queue, self.g_time, time_done, a,\n self.action_queue, self.name)\n\n scores.append(int(self.g_ep_r.value))\n if handleArguments().load_model and handleArguments().normalized_plot:\n if np.mean(scores[-min(100, len(scores)):]) >= 400 and self.g_ep.value >= 100:\n stop_processes = True\n elif handleArguments().normalized_plot:\n if np.mean(scores[-min(10, len(scores)):]) >= 400 and self.g_ep.value >= mp.cpu_count():\n stop_processes = True\n else:\n stop_processes = False\n break\n\n s = s_\n total_step += 1\n self.res_queue.put(None)\n self.time_queue.put(None)\n self.action_queue.put(None)\n\n\nif __name__ == \"__main__\":\n # load global network\n print(\"Starting Synchronous A2C Agent for Cartpole-v0\")\n time.sleep(3)\n timedelta_sum = datetime.now()\n timedelta_sum -= timedelta_sum\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\n if handleArguments().normalized_plot and not handleArguments().save_data:\n runs = 3\n else:\n runs = 1\n\n # Reinitialize Agent to make 5 trials\n for i in range(runs):\n starttime = datetime.now()\n if handleArguments().load_model:\n gnet = Net(N_S, N_A)\n gnet = torch.load(\"./CARTPOLE/cart_save_model/a3c_cart_comb.pt\")\n gnet.eval()\n else:\n gnet = Net(N_S, N_A)\n\n gnet.share_memory() # share the global parameters in multiprocessing\n opt = SharedAdam(gnet.parameters(), lr=0.001, betas=(0.92, 0.999)) # global optimizer\n global_ep, global_ep_r, global_time_done = mp.Value('i', 0), mp.Value('d', 0.), mp.Value('d', 0.)\n res_queue, time_queue, action_queue = mp.Queue(), mp.Queue(), mp.Queue()\n # parallel training\n if handleArguments().load_model:\n workers = [\n Worker(gnet, opt, global_ep, global_ep_r, global_time_done, res_queue, time_queue, action_queue, i) for\n i in range(1)]\n [w.start() for w in workers]\n else:\n workers = [\n Worker(gnet, opt, global_ep, global_ep_r, global_time_done, res_queue, time_queue, action_queue, i) for\n i in range(mp.cpu_count())]\n [w.start() for w in workers]\n\n # record episode-reward and episode-duration to plot\n res = []\n durations = []\n actions = []\n while True:\n r = res_queue.get()\n t = time_queue.get()\n a = action_queue.get()\n if r is not None:\n res.append(r)\n if t is not None:\n durations.append(t)\n if a is not None:\n actions.append(a)\n else:\n break\n\n [w.join() for w in workers]\n\n if np.mean(res[-min(mp.cpu_count(), len(res)):]) >= 200 and not handleArguments().load_model:\n print(\"Save model\")\n torch.save(gnet, \"./CARTPOLE/cart_save_model/a3c_cart_comb.pt\")\n elif handleArguments().load_model:\n print(\"Testing! No need to save model.\")\n else:\n print(\"Failed to train agent. Model was not saved\")\n endtime = datetime.now()\n timedelta = endtime - starttime\n print(\"Number of Episodes: \", global_ep.value, \" | Finished within: \", timedelta)\n\n timedelta_sum += timedelta / 3\n\n # Get results for confidence intervall\n\n if handleArguments().load_model:\n confidence_intervall(actions, True)\n else:\n confidence_intervall(actions)\n\n # Plot results\n if handleArguments().normalized_plot:\n plotter_ep_time_norm(ax1, durations)\n plotter_ep_rew_norm(ax2, res)\n else:\n plotter_ep_time(ax1, durations)\n plotter_ep_rew(ax2, res)\n\n if handleArguments().save_data:\n if handleArguments().load_model:\n scores = np.asarray([res])\n np.savetxt('CARTPOLE/cart_save_plot_data/a3c_cart_comb_test.csv', scores, delimiter=',')\n else:\n scores = np.asarray([res])\n np.savetxt('CARTPOLE/cart_save_plot_data/a3c_cart_comb.csv', scores, delimiter=',')\n\n font = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 8,\n }\n plt.text(0, 450, f\"Average Duration: {timedelta_sum}\", fontdict=font)\n plt.title(\"A3C-Cartpole (shared NN)\", fontsize=16)\n plt.show()\n\n sys.exit()\n\n","sub_path":"CARTPOLE/a3c_cart_comb.py","file_name":"a3c_cart_comb.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163605442","text":"import torch\nimport numpy as np\nimport pickle\nfrom rlkit.misc.asset_loader import load_local_or_remote_file\nimport rlkit.torch.pytorch_util as ptu\n\n\nvae_path = '/home/khazatsky/rail/data/rail-khazatsky/sasha/PCVAE/DCVAE/run20/id0/itr_600.pkl'\n\n# vae_path = '/home/shikharbahl/research/rlkit-private/data/local/shikhar/corl2019/pointmass/real/run0/id0/vae.pkl'\n\n\nvae = load_local_or_remote_file(vae_path)\n\n\n\ndataset_path = '/home/khazatsky/rail/data/train_data.npy'\ndataset = load_local_or_remote_file(dataset_path).item()\n\nimport matplotlib.pyplot as plt \n\ntraj = dataset['observations'][17]\nn = traj.shape[0]\n\nx0 = traj[0]\nx0 = ptu.from_numpy(x0.reshape(1, -1))\ngoal = traj[-1]\nvae = vae.cpu()\nlatent_goal = vae.encode(ptu.from_numpy(goal.reshape(1,-1)), x0, distrib=False)\ndecoded_goal, _ = vae.decode(latent_goal,x0)\n\nlog_probs = []\ndistances = []\nfor i in range(n): \n\tx = traj[i]\n\tlatent = vae.encode(ptu.from_numpy(x.reshape(1,-1)), x0, distrib=False)\n\tdecoded, _ = vae.decode(latent, x0)\n\tdistances.append(np.linalg.norm(ptu.get_numpy(latent) - ptu.get_numpy(latent_goal)))\n\tlog_probs.append(ptu.get_numpy(vae.logprob(decoded_goal, decoded, mean=False).exp())[0])\nplt.plot(np.arange(n), np.array(distances))\n'''\ndataset_path = '/home/shikharbahl/research/visual_foresight/examples/train_data.npy'\ndataset = np.load(dataset_path).item()\ntraj = dataset['observations'][0]\nn = traj.shape[0]\nimport matplotlib.pyplot as plt\n\ndef get_distances(i):\n\tglobal vae\n\ttraj = dataset['observations'][i]\n\tx0 = traj[0]\n\tx0 = ptu.from_numpy(x0.reshape(1, -1))\n\tgoal = traj[-1]\n\tvae = vae.cpu()\n\tlatent_goal = vae.encode(ptu.from_numpy(goal.reshape(1,-1)), x0, distrib=False)\n\tdecoded_goal, _ = vae.decode(latent_goal)\n\n\tn = traj.shape[0]\n\tlog_probs = []\n\tdistances = []\n\tfor i in range(n):\n\t\tx = traj[i]\n\t\tlatent = vae.encode(ptu.from_numpy(x.reshape(1,-1)), x0, distrib=False)\n\t\tdecoded, _ = vae.decode(latent)\n\t\tdistances.append(np.linalg.norm(ptu.get_numpy(latent) - ptu.get_numpy(latent_goal)))\n\t\tlog_probs.append(ptu.get_numpy(vae.logprob(decoded_goal, decoded, mean=False).exp())[0])\n\treturn np.array(distances)\n\ndists = np.array([get_distances(i) for i in range(1)])\n# import ipdb; ipdb.set_trace()\nplt.plot(np.arange(n), np.mean(dists, axis=0))\n'''\nplt.show()\n","sub_path":"scripts/vae_reward_visualizer.py","file_name":"vae_reward_visualizer.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"108843780","text":"import os\n\nimport cv2\n\n\nclass FaceDetector():\n def __init__(self):\n path_haarcascade = 'libs/opencv/models/haarcascades/haarcascade_frontalface_alt.xml'\n\n self.face_cascade = cv2.CascadeClassifier(path_haarcascade)\n\n\n def _get_contour_faces_haar(self, image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n contour_faces = self.face_cascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=3,\n minSize=(20, 20),\n flags=cv2.CASCADE_SCALE_IMAGE)\n\n return contour_faces\n\n\n def has_maybe_faces(self, image):\n contour_faces = self._get_contour_faces_haar(image)\n\n return len(contour_faces) > 0\n\n\n def get_faces_haar(self, image):\n contour_faces = self._get_contour_faces_haar(image)\n\n for contour_face in contour_faces:\n x, y, w, h = [v for v in contour_face]\n\n face = image[y:y + h, x:x + w]\n yield face","sub_path":"FaceDetector.py","file_name":"FaceDetector.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"429101704","text":"import json\nimport os\nimport numpy as np\nimport random\nimport string\nimport time\nimport pandas as pd\nimport plotly.graph_objects as go\n\nif not os.path.exists(\"group_by_figures\"):\n os.mkdir(\"group_by_figures\")\n\n# ________ATTENTION: group_by (ASC, DESC) working only for group by 1 column and apply\n# operation (sum, mean) on 1 columns\n\n# --------------- GROUPING COL STORAGE DISK --------------------------\n\n\ndef order(grouped_json, how, flattened):\n if how == 'ASC':\n unordered = [u for sublist in list(grouped_json.values()) for u in sublist]\n unordered = [unordered[i] for i in range(len(unordered)) if i % 2 == 0]\n grouped_json = dict(zip(list(grouped_json.keys()), unordered))\n new_json = list(grouped_json.items())\n for i in range(len(new_json)):\n for j in range(len(new_json) - 1):\n if new_json[j][1] > new_json[j+1][1]:\n swapper = new_json[j]\n new_json[j] = new_json[j+1]\n new_json[j+1] = swapper\n\n grouped_df = pd.DataFrame(new_json, columns=flattened)\n\n elif how == 'DESC':\n unordered = [u for sublist in list(grouped_json.values()) for u in sublist]\n unordered = [unordered[i] for i in range(len(unordered)) if i % 2 == 0]\n grouped_json = dict(zip(list(grouped_json.keys()), unordered))\n new_json = list(grouped_json.items())\n for i in range(len(new_json)):\n for j in range(len(new_json) - 1):\n if new_json[j][1] < new_json[j + 1][1]:\n swapper = new_json[j]\n new_json[j] = new_json[j + 1]\n new_json[j + 1] = swapper\n\n grouped_df = pd.DataFrame(new_json, columns=flattened)\n\n else:\n grouped_df = pd.DataFrame(\n list(zip(list(grouped_json.keys()), list(grouped_json.values()))),\n columns=[flattened[0], 'apply_on']\n )\n flattened.append('count')\n grouped_df[flattened[1:]] = pd.DataFrame(grouped_df.apply_on.tolist(), index=grouped_df.index)\n grouped_df = grouped_df.drop(columns=['apply_on'], axis=1)\n\n #print(grouped_df)\n\n return grouped_df\n\n\ndef go_plot(grouped_df, group_by_list, apply_on_list, how):\n fig = go.Figure(data=[go.Table(\n header=dict(values=list(grouped_df.columns),\n fill_color='paleturquoise',\n align='left'),\n cells=dict(values=[grouped_df[col] for col in grouped_df.columns],\n fill_color='lavender',\n align='left'))\n ])\n fig.update_layout(title_text=f\"Group by {group_by_list} select {apply_on_list} {how}\", title_x=0.5)\n fig.write_image(f\"group_by_figures/{group_by_list}_{apply_on_list}_{how}.png\")\n\n\ndef col_group_by(file_name, group_by_list, apply_on_list, operation='sum', how=None):\n start = time.time()\n file_path = os.path.join(os.getcwd(), 'disk_storage_column\\\\ ' + file_name)\n with open(file_path, 'r') as json_file:\n table = json.load(json_file)\n\n flattened = [u for subitem in [group_by_list, apply_on_list] for u in subitem]\n iters = []\n\n while len(iters) < len(flattened):\n ran = random.choice(string.ascii_letters)\n if ran not in iters:\n iters.append(ran)\n\n grouped_json = {}\n\n for iters in zip(*[table[u] for u in flattened]):\n if iters[0] not in grouped_json:\n grouped_json[iters[0]] = np.zeros(len(apply_on_list) + 1)\n\n for i in range(len(apply_on_list)):\n grouped_json[iters[0]][i] += float(iters[i+1])\n\n grouped_json[iters[0]][-1] += 1\n\n if operation == 'mean':\n for k in grouped_json.keys():\n for i in range(len(apply_on_list)):\n grouped_json[k][i] = grouped_json[k][i] / grouped_json[k][-1]\n\n grouped_df = order(grouped_json, how, flattened)\n duration = time.time() - start\n go_plot(grouped_df, group_by_list, apply_on_list, how)\n print(f'Time spent for group by on columns: {duration}')\n return grouped_df\n\n\n# --------------- GROUPING ROW STORAGE DISK --------------------------\n\ndef row_group_by(file_name, group_by_list, apply_on_list, operation='sum', how=None):\n start = time.time()\n flattened = [u for subitem in [group_by_list, apply_on_list] for u in subitem]\n\n file_path = os.path.join(os.getcwd(), 'disk_storage_row\\\\ ' + file_name)\n with open(file_path, 'r') as json_file:\n table = json.load(json_file)\n\n iters = []\n while len(iters) < len(flattened):\n ran = random.choice(string.ascii_letters)\n if ran not in iters:\n iters.append(ran)\n\n grouped_json = {}\n for row in table['rows']:\n if row[flattened[0]] not in grouped_json:\n grouped_json[row[flattened[0]]] = np.zeros(len(apply_on_list) + 1)\n for i in range(len(apply_on_list)):\n grouped_json[row[flattened[0]]][i] += float(row[flattened[i+1]])\n grouped_json[row[flattened[0]]][-1] += 1\n\n if operation == 'mean':\n for k in grouped_json.keys():\n for i in range(len(apply_on_list)):\n grouped_json[k][i] = grouped_json[k][i] / grouped_json[k][-1]\n\n grouped_df = order(grouped_json, how, flattened)\n duration = time.time() - start\n go_plot(grouped_df, group_by_list, apply_on_list, how)\n print(f'Time spend for group by on row: {duration}')\n return grouped_df\n\n\ncol_group_by('LINEITEM_column.txt', ['suppkey'], ['quantity'], operation='mean', how=\"ASC\")\nrow_group_by('LINEITEM_row.txt', ['suppkey'], ['quantity'], operation='mean', how=\"ASC\")\n\n\n\n","sub_path":"group_by.py","file_name":"group_by.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"637570124","text":"# __author__ = 'tonye0115'\n# -*- coding: utf-8 -*-\nimport datetime\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport time\nfrom tensorflow.contrib import learn\nfrom com.ryxc.cnn_text_classification_address_site2.TextCNN import TextCNN\nfrom com.ryxc.cnn_text_classification_address_site2 import DataHelpers\n\n# Data loading params\ntf.flags.DEFINE_float(\"test_sample_percentage\", .3, \"Percentage of the training data to use for validation\")\ntf.flags.DEFINE_string(\"data_path\", \"data\", \"地址-网店数据文件目录\")\ntf.flags.DEFINE_string(\"runs_path\", \"runs\", \"模型存储目录\")\n\n# Training parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_integer(\"num_epochs\", 200, \"Number of training epochs (default: 200)\")\ntf.flags.DEFINE_integer(\"evaluate_every\", 100, \"Evaluate model on dev set after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"checkpoint_every\", 100, \"Save model after this many steps (default: 100)\")\n\n# Model Hyperparameters\ntf.flags.DEFINE_integer(\"embedding_dim\", 128, \"Dimensionality of character embedding (default: 128)\")\ntf.flags.DEFINE_string(\"filter_sizes\", \"3,4,5\", \"Comma-separated filter sizes (default: '3,4,5')\") # 滤波器尺寸\ntf.flags.DEFINE_integer(\"num_filters\", 128, \"Number of filters per filter size (default: 128)\") # 每个过滤器的过滤器数量大小\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0, \"L2 regularization lambda (default: 0.0)\")\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout keep probability (default: 0.5)\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\" \")\n\n# Data Preparation\n# =====================================================================================================\n# Load data\nprint(\"Loading data...\")\nx_text, y = DataHelpers.load_data_and_labels(FLAGS.data_path)\n# print(\"y:\", y)\nprint(\"-------------------------------------- Build vocabulary---------------------------------------------\")\nmax_document_length = max([len(x.split('\\t')) for x in x_text])\nprint(\"max_document_length:\", max_document_length)\n\n\nvocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)\n# 计算tf-idf\n# 返回每行的词id, 其中词id是该词在x_text的_tokenizer(分词去重)后的位置\nfit_transform = vocab_processor.fit_transform(x_text)\n# for a in fit_transform:\n# print(a)\nfit_transform_list = list(fit_transform)\nx = np.array(list(fit_transform_list))\n# 文档分词索引集合\n# print(\"x:\", x)\n# 分类变量的词汇类\nprint(\"Vocabulary Size:{:d}\".format(len(vocab_processor.vocabulary_)))\n\nprint(\"-------------------------------------- Randomly shuffle data--------------------------------\")\nnp.random.seed(10) # 设置相同的seed种子值,返回的随机数据是一样的\n# print(\"Random number with seed 10 : \", np.random.permutation([1, 4, 9, 12, 15]))\n# np.random.seed(10)\n# print(\"Random number with seed 10 : \", np.random.permutation([1, 4, 9, 12, 15]))\n# # Random number with seed 10 : [ 9 12 1 15 4]\n# # Random number with seed 10 : [ 9 12 1 15 4]\n# permutation 返回与集合size相同的随机集合\nshuffle_indices = np.random.permutation(np.arange(len(y)))\nx_shuffle = x[shuffle_indices]\ny_shuffle = y[shuffle_indices]\n# print(\"shuffle_indices\", shuffle_indices)\n\nprint(\"-------------------------------------- Split tran/test set-----------------------------------------\")\ntest_sample_index = -1 * int(FLAGS.test_sample_percentage * float(len(y)))\n# print(\"len(y):\", len(y))\n# print(\"test_sample_index:\", test_sample_index)\nx_train, x_test = x_shuffle[:test_sample_index], x_shuffle[test_sample_index:]\ny_train, y_text = y_shuffle[:test_sample_index], y_shuffle[test_sample_index:]\nprint(\"x_train/x_test split:{:d}/{:d}\".format(len(x_train), len(x_test)))\nprint(\"y_train/y_text split:{:d}/{:d}\".format(len(y_train), len(y_text)))\n\n\n# Training\nprint(\"============================================================================\")\nprint(\"Training starting...........\")\n\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n\n cnn = TextCNN(sequence_length=x_train.shape[1],\n num_classes=y_train.shape[1],\n vocab_size=len(vocab_processor.vocabulary_),\n embedding_size=FLAGS.embedding_dim,\n filter_sizes=list(map(int, FLAGS.filter_sizes.split(\",\"))),\n num_filters=FLAGS.num_filters,\n l2_reg_lambda=FLAGS.l2_reg_lambda)\n\n\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n # Adam 适应性动量估计法 是另一种能对不同参数计算适应性学习率的方法\n optimizer = tf.train.AdamOptimizer(1e-3)\n # 根据loss损失的变量值计算梯度变量\n grads_and_vars = optimizer.compute_gradients(cnn.loss)\n # 定义训练器\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n # Output directory for models and summaries\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.path.curdir, FLAGS.runs_path, timestamp))\n # print(\"Writing to {}\\n\".format(out_dir))\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables())\n\n # Write vocabulary\n vocab_processor.save(os.path.join(out_dir, \"vocab\"))\n\n # Generate batches\n batches = DataHelpers.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)\n\n sess.run(tf.global_variables_initializer())\n\n\n def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, loss, accuracy = sess.run([train_op, global_step, cnn.loss, cnn.accuracy], feed_dict=feed_dict)\n time_str = datetime.datetime.now().isoformat()\n # print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n\n\n def dev_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: 1.0\n }\n step, loss, accuracy = sess.run([global_step, cnn.loss, cnn.accuracy], feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n\n DataHelpers.writeFile(FLAGS.runs_path+'/train_status', 'running')\n\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n\n train_step(x_batch, y_batch)\n\n current_step = tf.train.global_step(sess, global_step)\n if current_step % FLAGS.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n dev_step(x_batch, y_batch)\n if current_step % FLAGS.checkpoint_every == 0:\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to{}\\n\".format(path))\n\n DataHelpers.writeFile(FLAGS.runs_path+'/train_status', 'ending')\n\n\n","sub_path":"com/ryxc/cnn_text_classification_address_site2/TrainAddress.py","file_name":"TrainAddress.py","file_ext":"py","file_size_in_byte":7801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"42216020","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 20 09:40:34 2018\n\n@author: dongsiku\n\"\"\"\n\nimport re\nfrom os import path\nfrom pathlib import Path\nfrom extract_data_from_html import extract_data_from_html\nimport sys\n\n\ndef open_gradefile():\n if len(sys.argv) == 2:\n filename = sys.argv[1]\n else:\n filename = get_filename()\n\n if filename is False:\n return False, False, False\n updated_date, course_list = extract_data_from_html(filename)\n return filename, updated_date, course_list\n\n\ndef get_filename():\n import tkinter\n from tkinter.filedialog import askopenfilename\n root = tkinter.Tk()\n root.withdraw()\n filename = askopenfilename(filetypes=[(\"単位修得状況確認表.html\", \"*.html\")],\n initialdir=path.join(str(Path.home()),\n \"Downloads\"))\n\n if filename == \"\":\n print(\"no file\")\n return False\n\n print(filename)\n return filename\n\n\nif __name__ == \"__main__\":\n open_gradefile()\n","sub_path":"src/open_gradefile.py","file_name":"open_gradefile.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"149949095","text":"#\n# Grafy E-R metoda klasyczna, wypelnianie macierzy sąsiedztwa,\n# 1 i 0 z prawdop p, narysować P(k), p=(0.1, 0.5, 0.7), N~100\n\nimport random\nfrom pathlib import Path\nfrom typing import Callable\n\nimport networkx as nx\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport scipy.stats\n\nGRAPHS_DIR = 'graphs'\n\n\nclass Graph(nx.Graph):\n def __init__(self, num_nodes):\n self.num_nodes = num_nodes\n self.adjacency_matrix = np.zeros((num_nodes, num_nodes))\n super().__init__()\n\n def __call__(self, build_fn=None, p: float=None):\n if p:\n build_fn(self, p)\n else:\n build_fn(self)\n\n\ndef classic(graph: Graph, p: float=0.5):\n for node in range(graph.num_nodes):\n for neighbour in range(node):\n current_p = random.random()\n if current_p < p:\n graph.add_edge(node, neighbour)\n graph.adjacency_matrix[node, neighbour] = 1\n graph.adjacency_matrix[neighbour, node] = 1\n\n\ndef node_degree_dist(adj_matrix: np.array):\n return np.sum(adj_matrix, axis=1)\n\n\ndef build_and_plot_graph(num_nodes: int, build_fn: Callable, probability: float=None, bins=20):\n\n Path(GRAPHS_DIR).mkdir(exist_ok=True)\n figs_path = Path(GRAPHS_DIR + \"/\" + str(build_fn.__name__))\n figs_path.mkdir(exist_ok=True)\n\n g = Graph(num_nodes)\n g(build_fn, probability)\n probability = str(probability) if probability is not None else '_'\n\n position = nx.circular_layout(g)\n nx.draw(g, pos=position)\n labels = nx.draw_networkx_labels(g, pos=position)\n plt.savefig(str(figs_path / (\"graph\" + str(num_nodes) + \"_p\" + probability + \".png\")))\n # plt.show()\n\n degrees = node_degree_dist(g.adjacency_matrix)\n\n h, edges = scipy.histogram(degrees, bins=bins, normed=True)\n k = edges[:-1] + (edges[1] - edges[0]) / 2\n\n plt.clf()\n plt.plot(k, h, label='hist')\n\n plt.title('P(k)')\n plt.xlabel('k')\n plt.ylabel('P(k)')\n plt.legend()\n plt.savefig(str(figs_path / (\"P(k)_\" + str(num_nodes) + \"_p\" + probability + \".png\")))\n plt.show()\n\n print(\"Images saved\")\n\n\n# o zadanym rozkładzie p(k) ~ k^-3 np. metoda odwrotnej dystrybuanty\n\n# x_min = 1\n# P(k) = k^-3\n# P_cum = k ^ 2\n\n\ndef inv_distr(graph: Graph):\n # P(k) = k^-3\n degrees = np.round((1 - np.random.random(size=graph.num_nodes)) ** (-1 / 2))\n\n for node in range(graph.num_nodes):\n k = degrees[node]\n for neighbour in range(node):\n iter = 0\n while sum(graph.adjacency_matrix[node]) < k and iter < 1000:\n neighbour = random.randint(0, graph.num_nodes - 1)\n if neighbour == node or sum(graph.adjacency_matrix[node]) == degrees[neighbour]:\n iter += 1\n continue\n else:\n graph.add_edge(node, neighbour)\n graph.adjacency_matrix[node, neighbour] = 1\n graph.adjacency_matrix[neighbour, node] = 1\n\n\n# M-C algorytm Metropolis, E(t), p(k)\ndef monte_carlo(graph: Graph, p: float=0.5, steps: int=100000):\n theta = np.log(p / (1 - p))\n E = []\n e = 0\n\n for step in range(steps):\n edge = (np.random.randint(0, graph.num_nodes - 1), np.random.randint(0, graph.num_nodes - 1))\n edge_rev = edge[1], edge[0]\n if graph.adjacency_matrix[edge] == 0:\n graph.add_edge(edge[0], edge[1])\n graph.adjacency_matrix[edge] = 1\n graph.adjacency_matrix[edge_rev] = 1\n e += 1\n else:\n if np.random.random() < np.exp(-theta):\n graph.remove_edge(edge[0], edge[1])\n graph.adjacency_matrix[edge] = 0\n graph.adjacency_matrix[edge_rev] = 0\n e -= 1\n E.append(e)\n\n Path(GRAPHS_DIR).mkdir(exist_ok=True)\n figs_path = Path(GRAPHS_DIR + \"/\" + str(monte_carlo.__name__))\n figs_path.mkdir(exist_ok=True)\n plt.plot(E)\n plt.title('Liczba krawedzi')\n plt.xlabel('t, (krok)')\n plt.ylabel('E')\n plt.savefig(str(figs_path / (\"E(t)_\" + str(graph.num_nodes) + \"_p\" + str(p) + \".png\")))\n # plt.show()\n\n\nif __name__ == '__main__':\n\n build_and_plot_graph(100, classic, 0.1, 10)\n build_and_plot_graph(100, classic, 0.5, 10)\n build_and_plot_graph(100, classic, 0.7, 10)\n build_and_plot_graph(100, inv_distr)\n build_and_plot_graph(100, monte_carlo, 0.1, 20)\n build_and_plot_graph(100, monte_carlo, 0.5, 50)\n build_and_plot_graph(100, monte_carlo, 0.7, 50)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255486246","text":"from django.db import models\nfrom django.core import urlresolvers\nimport mptt.models\nimport django.contrib.auth.models as django_models\nimport datetime\n\nfrom .location import Location\n\nclass User(django_models.AbstractUser):\n TYPES = (\n ('person', 'Частное лицо'),\n ('company', 'Компания'),\n )\n \n CONTACT_METHODS = (\n ('email', 'Почта'),\n ('phone', 'Телефон'),\n )\n \n patronym = models.CharField(max_length=50, blank=True, null=True) # Отчество\n type = models.CharField(max_length=20, choices=TYPES, blank=True, null=True)\n subscribed_to_newsletter = models.NullBooleanField()\n preferred_contact_method = models.CharField(max_length=20, choices=CONTACT_METHODS, blank=True, null=True)\n location = mptt.models.TreeForeignKey('Location', related_name='users', blank=True, null=True)\n social_auth_id = models.CharField(max_length=50, blank=True, null=True)\n","sub_path":"common/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"522386973","text":"import gc\n\nfrom twisted.trial.unittest import TestCase\n\nfrom axiom.store import Store\nfrom axiom.item import Item\nfrom axiom.attributes import integer, reference\n\nclass Referee(Item):\n schemaVersion = 1\n typeName = \"test_reference_referee\"\n\n topSecret = integer()\n\nclass _Referent(Item):\n schemaVersion = 1\n\n ref = reference(whenDeleted=reference.CASCADE)\n\nclass SimpleReferent(Item):\n schemaVersion = 1\n typeName = \"test_reference_referent\"\n\n ref = reference()\n\nclass DependentReferent(Item):\n ref = reference(whenDeleted=reference.CASCADE)\n\nclass BadReferenceTestCase(TestCase):\n ntimes = 10\n\n def testSanity(self):\n store = Store()\n for i in xrange(self.ntimes):\n SimpleReferent(store=store, ref=Referee(store=store, topSecret=i))\n (referee,) = list(store.query(Referee))\n (referent,) = list(store.query(SimpleReferent))\n self.assertEqual(referent.ref.topSecret, referee.topSecret)\n referee.deleteFromStore()\n referent.deleteFromStore()\n\n def testBadReferenceNone(self):\n store = Store()\n referee = Referee(store=store, topSecret=0)\n referent = SimpleReferent(store=store, ref=referee)\n referee.deleteFromStore()\n\n referee = None\n gc.collect()\n\n (referent,) = list(store.query(SimpleReferent))\n self.assertEqual(referent.ref, None)\n\n def testBadReferenceNoneRevert(self):\n store = Store()\n referee = Referee(store=store, topSecret=0)\n referent = SimpleReferent(store=store, ref=referee)\n def txn():\n referee.deleteFromStore()\n self.assertEqual(referent.ref, None)\n 1 / 0\n self.assertRaises(ZeroDivisionError, store.transact, txn)\n self.assertEqual(referent.ref, referee)\n\n referent = None\n referee = None\n gc.collect()\n\n referent = store.findUnique(SimpleReferent)\n referee = store.findUnique(Referee)\n self.assertEqual(referent.ref, referee)\n\n def testReferenceQuery(self):\n store = Store()\n referee = Referee(store=store, topSecret=0)\n self.assertEqual(\n list(store.query(SimpleReferent,\n SimpleReferent.ref == Referee.storeID)),\n [])\n\n def testReferenceDeletion(self):\n store = Store()\n referee = Referee(store=store, topSecret=0)\n dep = DependentReferent(store=store,\n ref=referee)\n sid = dep.storeID\n self.assertIdentical(store.getItemByID(sid), dep) # sanity\n referee.deleteFromStore()\n self.assertRaises(KeyError, store.getItemByID, sid)\n","sub_path":"tums/trunk/lite/axiom/test/test_reference.py","file_name":"test_reference.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"295918527","text":"import cv2\n\noriginal_img = cv2.imread('identity.png', 0)\ntemplate = cv2.imread('template_from_original.png', 0)\nw, h = template.shape[::-1]\n\nmethod = cv2.TM_CCOEFF\n\nresult = cv2.matchTemplate(\n original_img, template, method\n)\nmin_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n\ntop_left = max_loc\nbottom_right = (top_left[0] + w, top_left[1] + 2 * h)\n\ncv2.imshow('Original', original_img)\n\ncv2.rectangle(original_img, top_left, bottom_right, 255, 2)\ncv2.imshow('Identity detection', original_img)\nif cv2.waitKey(0) & 0xff == 27:\n cv2.destroyAllWindows()\n","sub_path":"lab7/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"554360921","text":"#!/usr/bin/python3\n\"\"\" Module that represent the view for State objects \"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import jsonify, request, abort\nfrom models import storage\nfrom models.state import State\n\n\n@app_views.route(\"/states\", methods=['GET', 'POST'], strict_slashes=False)\n@app_views.route(\"/states/\", methods=['GET', 'DELETE', 'PUT'],\n strict_slashes=False)\ndef all_states(state_id=None):\n \"\"\" this function retrieves the list of all State objects \"\"\"\n if request.method == 'GET':\n _states = storage.all(State).values()\n if state_id is None:\n list_states = list()\n for state in _states:\n list_states.append(state.to_dict())\n return jsonify(list_states)\n else:\n list_states = list()\n for state in _states:\n if state.id == state_id:\n list_states.append(state.to_dict())\n return jsonify(list_states[0])\n abort(404)\n if request.method == 'DELETE':\n obj_state = storage.get(State, state_id)\n if obj_state:\n storage.delete(obj_state)\n storage.save()\n return jsonify(dict()), 200\n abort(404)\n if request.method == 'POST':\n try:\n conv_body = request.get_json()\n if 'name' not in conv_body:\n return \"Missing name\\n\", 400\n new_inst = State(name=conv_body.get('name'))\n storage.new(new_inst)\n storage.save()\n return jsonify(new_inst.to_dict()), 201\n except:\n abort(400, description=\"Not a JSON\")\n if request.method == 'PUT':\n new_inst = storage.get(State, state_id)\n if not new_inst:\n abort(404)\n try:\n list_ignore = ['id', 'created_at', 'updated_at']\n conv_body = request.get_json()\n for key, value in conv_body.items():\n if key not in list_ignore:\n setattr(new_inst, key, value)\n new_inst.save()\n return jsonify(new_inst.to_dict()), 200\n except:\n abort(400, description=\"Not a JSON\")\n","sub_path":"api/v1/views/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79578600","text":"import os\nfrom decimal import Decimal\nfrom pympi import Eaf, Elan\nfrom .format_utils import (\n TECH_REGEX, ANNOTATION_WORD_SEP, ANNOTATION_OPTION_SEP,\n ANNOTATION_PART_SEP, ANNOTATION_TAG_SEP, UNKNOWN_PREFIX\n)\n\n\n# TODO: join all funcs in ElanObject and use it everywhere\n\n\nclass Tier:\n def __init__(self, name, info):\n self.name = name\n self.aligned_annotations = info[0]\n self.reference_annotations = info[1]\n self.attributes = info[2]\n self.ordinal = info[3]\n\n self.top_level = False\n if 'PARENT_REF' not in self.attributes.keys():\n self.top_level = True\n\n self.side = None\n if '_i_' in self.name:\n self.side = 'interviewer'\n elif '_n_' in self.name:\n self.side = 'speaker'\n\n\nclass ElanObject:\n def __init__(self, path_to_file):\n self.path = path_to_file\n self.Eaf = Eaf(path_to_file)\n self.Eaf.clean_time_slots()\n self.load_tiers()\n self.load_annotation_data()\n self.load_participants()\n\n def load_participants(self):\n participants_lst = []\n\n for tier_obj in self.tiers_lst:\n try:\n p_title = tier_obj.attributes['PARTICIPANT'].title()\n if p_title not in participants_lst:\n participants_lst.append(p_title)\n except KeyError:\n pass\n\n self.participants_lst = participants_lst\n\n def load_tiers(self):\n tiers_lst = []\n for tier_name, tier_info in self.Eaf.tiers.items():\n tiers_lst.append(Tier(tier_name, tier_info))\n self.tiers_lst = sorted(tiers_lst, key=lambda data: data.ordinal)\n\n def load_annotation_data(self):\n annot_data_lst = []\n for tier_obj in self.tiers_lst:\n if tier_obj.top_level:\n for annot_data in self.Eaf.get_annotation_data_for_tier(tier_obj.name):\n annot_data_lst.append(annot_data + (tier_obj.name,))\n self.annot_data_lst = sorted(annot_data_lst, key=lambda data: data[0])\n\n def get_tier_obj_by_name(self, tier_name):\n for tier_obj in self.tiers_lst:\n if tier_obj.name == tier_name:\n return tier_obj\n return None\n\n def add_extra_tags(self, parent_tier_name, start, end, value, typ):\n if typ == 'annotation':\n tier_name = parent_tier_name + '_annotation'\n ling = 'tokenz_and_annot'\n elif typ == 'standartization':\n tier_name = parent_tier_name + '_standartization'\n ling = 'stndz_clause'\n else:\n return\n\n if self.get_tier_obj_by_name(tier_name) is None:\n self.Eaf.add_tier(tier_name, ling=ling, parent=parent_tier_name)\n self.load_tiers()\n\n try:\n self.Eaf.remove_annotation(tier_name, (start + end) / 2, clean=True)\n except KeyError:\n pass\n\n self.Eaf.add_annotation(tier_name, start, end, value)\n\n def save(self):\n self.Eaf.clean_time_slots()\n try:\n os.remove(self.path + '.bak')\n except OSError:\n pass\n\n Elan.to_eaf(self.path, self.Eaf, pretty=True)\n os.remove(self.path + '.bak')\n\n def process_html_annot(self, html_annot):\n tier_name = html_annot.xpath('*[@class=\"annot\"]/@tier_name')[0]\n raw_start = html_annot.xpath('*[@class=\"audiofragment\"]/@starttime')[0]\n raw_end = html_annot.xpath('*[@class=\"audiofragment\"]/@endtime')[0]\n start = int(Decimal(raw_start))\n end = int(Decimal(raw_end))\n t_counter = 0\n annot_value_lst = []\n nrm_value_lst = []\n\n for token in html_annot.xpath('*//token'):\n nrm_lst = token.xpath('nrm/text()')\n lemma_lst = token.xpath('lemma/text()')\n morph_lst = token.xpath('morph/text()')\n\n try:\n if lemma_lst + morph_lst:\n annot_value_lst.append(ANNOTATION_PART_SEP.join([str(t_counter), lemma_lst[0], morph_lst[0]]))\n if nrm_lst:\n nrm_value_lst.append(ANNOTATION_PART_SEP.join([str(t_counter), nrm_lst[0]]))\n except IndexError:\n print(\n 'Exception while saving. Normalization: %s,'\n 'Lemmata: %s, Morphology: %s, Counter: %s'\n % (nrm_lst, lemma_lst, morph_lst, t_counter)\n )\n\n t_counter += 1\n\n if annot_value_lst:\n self.add_extra_tags(\n tier_name, start, end, ANNOTATION_WORD_SEP.join(annot_value_lst), 'annotation'\n )\n\n if nrm_value_lst:\n self.add_extra_tags(\n tier_name, start, end, ANNOTATION_WORD_SEP.join(nrm_value_lst), 'standartization'\n )\n\n\ndef clean_transcription(transcription):\n return TECH_REGEX.sub('', transcription).strip()\n\n\ndef get_tier_alignment(orig_tier, standartization_tier, annotation_tier):\n tier_alignment = {(ann[0], ann[1]): [ann[2], None, None] for ann in orig_tier}\n\n for ann in standartization_tier:\n if (ann[0], ann[1]) in tier_alignment:\n tier_alignment[(ann[0], ann[1])][1] = ann[2]\n\n for ann in annotation_tier:\n if (ann[0], ann[1]) in tier_alignment:\n tier_alignment[(ann[0], ann[1])][2] = ann[2]\n\n return tier_alignment\n\n\ndef get_annotation_alignment(annotation, num_regex):\n annotations = {}\n if annotation is not None:\n for ann in annotation.split(ANNOTATION_WORD_SEP):\n ann_parts = num_regex.search(ann).groups()\n ann_num = ann_parts[0]\n # ann_parts len is 3 for annotations and 2 for standartizations\n ann = ann_parts[1:] if len(ann_parts) > 2 else ann_parts[1]\n annotations[int(ann_num)] = ann\n return annotations\n\n\ndef split_ann_for_db(ann):\n lemma_view, tags_view = ann # should always be a list\n lemma = lemma_view.lower().replace(UNKNOWN_PREFIX, '')\n tags = tags_view.lower().split(ANNOTATION_TAG_SEP)\n annotation = {\n 'lemma': lemma,\n 'tags': tags,\n 'lemma_view': lemma_view,\n 'tags_view': tags_view\n }\n\n return [annotation] # todo: get rid of list\n","sub_path":"trimco/corpora/utils/elan_utils.py","file_name":"elan_utils.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"594515599","text":"import sqlite3\nfrom hashlib import md5\n\ndef hash(text):\n return md5(text.encode()).hexdigest()\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\nclass Wikidb(object):\n \"\"\" A class for handling wiki data. \"\"\"\n\n def __init__(self, db_path='wiki.db'):\n with open('initialize.sql', 'r') as sql:\n self.tablesql = sql.read().split(';')\n self.conn = sqlite3.connect(db_path)\n self.conn.row_factory = dict_factory\n self.cur = self.conn.cursor()\n for command in self.tablesql:\n self.cur.execute(command)\n self.conn.commit()\n\n def put(self, subject, body, author_email='anonymous'):\n \"\"\"\n This function creates or updates an article.\n \"\"\"\n history_id = hash(str(subject+body))\n self.cur.execute(\"\"\"INSERT OR REPLACE INTO history (body, history_id)\n VALUES (?,?)\"\"\", (body, history_id))\n self.cur.execute(\"\"\"INSERT INTO authorship(subject, author_email, history_id)\n VALUES(?, ?, ?)\"\"\", (subject.lower(), author_email.lower(), history_id))\n self.conn.commit()\n\n def detail(self, subject):\n \"\"\" \n Returns detailed info on an article as a dictionary.\n This is the intended way to fetch article text.\n An empty dict is returned if no exact match for subject, this is to avoid object exceptions when using get method.\n \"\"\"\n detail_sql = \"\"\"\n SELECT subject\n , created_on\n , creator_email\n , last_updated_on\n , updator_email\n , body\n FROM v_first_last\n WHERE subject = ?;\n \"\"\"\n self.cur.execute(detail_sql, [subject])\n article_text = self.cur.fetchone()\n if not article_text:\n return dict()\n else:\n return article_text\n\n def search(self, subject_text, strict=False):\n \"\"\"\n Takes a subject_text and optionally strict True/False.\n Returns a list of article subjects which contain subject_text.\n Strict flag makes searches require an exact match instead of contains.\n \"\"\"\n if not strict:\n subject_text = '%'+subject_text.lower()+'%'\n search_query = \"\"\"\n SELECT DISTINCT subject\n FROM authorship\n WHERE subject like ?;\n \"\"\"\n self.cur.execute(search_query, [subject_text])\n return self.cur.fetchall()\n","sub_path":"dbfunctions.py","file_name":"dbfunctions.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"510549796","text":"\nimport sqlite3\nimport time\nimport googlemaps\nfrom datetime import datetime\n\nnow=datetime.now()\n\n#free code\n#api_key='AIzaSyCgBrIh97GTLhnLK7NMkGTSqZMbHENuMJo'\n\n#paid code\napi_key='AIzaSyBZG6yfPokTPWdIt3MijWJiyT91v-9rt5M'\n\n\ndef gDistTime(source, destination):\n gmaps = googlemaps.Client(key=api_key)\n now = datetime.now()\n directions_result = gmaps.directions(source, destination, mode=\"driving\",departure_time=now)\n for map1 in directions_result:\n overall_stats = map1['legs']\n for dimensions in overall_stats:\n distance = dimensions['distance']\n duration = dimensions['duration']\n return (distance,duration)\n\n\n\n\ndtb='data_files/medSched.sqlite'\nconn=sqlite3.connect(dtb)\nc=conn.cursor()\n\nc.execute('Delete from inZipDT')\n\nc.execute('''select a.comb_key,\n a.zip, \n a.mbr1,\n b.full_addr,\n a.mbr2,\n c.full_addr\n from combs a, mbrs b, mbrs c\n where a.mbr1 = b.mbr_id and\n a.mbr2 =c.mbr_id\n ''')\n\n\ntrips=c.fetchall()\n\nfor trip in trips:\n try:\n gdt=gDistTime(trip[3],trip[5])\n gdist=gdt[0]['text']\n gtime=gdt[1]['text']\n\n except:\n gdist=0\n dtime=0\n \n print(trip[0],gdist,gtime)\n c.execute('''INSERT INTO inZipDT(comb_key,zip,dist,time)\n VALUES(?,?,?,?)''', (trip[0],trip[1],gdist,gtime)) \n\n time.sleep(0.1)\nconn.commit()\nconn.close()\n","sub_path":"medSched/old/inZip3.py","file_name":"inZip3.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548888468","text":"#Reading contents of a file and printing out how many types a certain item is in the text.\n\nwith open('namelist.txt' , \"r\") as read_file:\n\tall_text = read_file.read()\n\tall_text = all_text.split(\"\\n\")\n\nnames = {}\n\nfor name in all_text:\n\tif name not in names.keys():\n\t\tnames.update({name : 1})\n\telse:\n\t\tnames[name] = names[name] + 1\n\n\nprint(names)","sub_path":"python exercices/read_file.py","file_name":"read_file.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407224474","text":"import discord\nfrom discord.ext import commands\n\n# ----------------------------------------------------------------------------------------------\n# Returns the ping of the bot, useful for testing bot lag and as a simple functionality command\n# ----------------------------------------------------------------------------------------------\nfrom src import qna\n\n\nclass qanda(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n ###########################\n # Function: ask\n # Description: command to ask question and sends to qna module\n # Inputs:\n # - ctx: context of the command\n # - question: question text\n # Outputs:\n # - User question in new post\n ###########################\n @commands.command(name='ask', help='Ask question. Please put question text in quotes.')\n async def ask_question(self, ctx, question):\n ''' ask question command '''\n # make sure to check that this is actually being asked in the Q&A channel\n if ctx.channel.name == 'q-and-a':\n await qna.question(ctx, question)\n else:\n await ctx.author.send('Please send questions to the #q-and-a channel.')\n await ctx.message.delete()\n\n @commands.command(name='answer', help='Answer specific question. Please put answer text in quotes.')\n async def answer_question(self, ctx, q_num, answer):\n \"\"\"\n Function:\n answer\n Description:\n command to answer question and sends to qna module\n Inputs:\n - ctx: context of the command\n - q_num: question number to answer\n - answer: answer text\n Outputs:\n User answer in question post\n \"\"\"\n # make sure to check that this is actually being asked in the Q&A channel\n if ctx.channel.name == 'q-and-a':\n await qna.answer(ctx, q_num, answer)\n else:\n await ctx.author.send('Please send answers to the #q-and-a channel.')\n await ctx.message.delete()\n\n\n# -------------------------------------\n# add the file to the bot's cog system\n# -------------------------------------\ndef setup(bot):\n bot.add_cog(qanda(bot))\n\n# Copyright (c) 2021 War-Keeper\n","sub_path":"cogs/ask_and_answer.py","file_name":"ask_and_answer.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"332764738","text":"'''\n Time Complexity:\n O(n)\n\n Space Complexity:\n O(1)\n\n Did this code successfully run on LeetCode?:\n Yes\n\n Problems faced while coding this:\n None\n\n Approach:\n From the end, find the index of the first number that is in decreasing order. Let's call it i.\n If i >= 0:\n From the end, find the index of the first number that is greater than the number at i. Let's call it j.\n Swap the numbers at i and j.\n Reverse the numbers starting from i+1.\n'''\n\nclass Solution:\n def __init__(self):\n self.nums = []\n\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n self.nums = nums\n\n i = self.find_first_decreasing()\n if i >= 0:\n j = self.find_first_greater(i)\n self.swap(i, j)\n self.reverse(start=i+1)\n\n def find_first_decreasing(self):\n i = len(self.nums) - 2\n while i >= 0:\n if self.nums[i] < self.nums[i + 1]:\n break\n i -= 1\n return i\n\n def find_first_greater(self, i):\n num = self.nums[i]\n j = len(self.nums) - 1\n\n while j >= 0:\n if self.nums[j] > num:\n break\n j -=1\n\n return j\n\n def swap(self, i, j):\n self.nums[i], self.nums[j] = self.nums[j], self.nums[i]\n\n def reverse(self, start):\n i = start\n j = len(self.nums) - 1\n\n while i < j:\n self.swap(i, j)\n i += 1\n j -= 1\n","sub_path":"NextPermutation.py","file_name":"NextPermutation.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"625437883","text":"def pattern(lines):\r\n\tlis = list()\r\n\tn = lines ** 2\r\n\tcount = 0\r\n\ttemp = 0\r\n\tfor i in range(1, n):\r\n\t\tlis.append(i)\r\n\t\ta = len(lis)\r\n\t\tif a > temp: # for rows condition\r\n\t\t\ttemp = a\r\n\t\t\tcount += 1\r\n\t\t\tif count % 2 == 0: # for reverse order\r\n\t\t\t\tlis.sort(reverse = True)\r\n\t\t\t\tprint()\r\n\t\t\t\tfor i in lis:\r\n\t\t\t\t\tprint(i, end = \" \")\r\n\t\t\t\tlis.clear()\r\n\t\t\telse: # for normal order\r\n\t\t\t\tprint()\r\n\t\t\t\tfor i in lis:\r\n\t\t\t\t\tprint(i, end = \" \")\r\n\t\t\t\tlis.clear()\r\n\t\tif count == lines: # for number of lines\r\n\t\t\tbreak\r\n\r\npattern(5)","sub_path":"number_triangle_in_pattern.py","file_name":"number_triangle_in_pattern.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499243434","text":"import sys\n\nclass Klump:\n def __init__(self, i, branch):\n self.i = i\n self.n_buds = branch[0]\n self.buds = branch[1:]\n\ndef grow(start):\n splits = 0\n r_d = {}\n for bud in start.buds:\n if nexs[(start.i,bud)]:\n nexs[(start.i,bud)] = False\n nexs[(bud,start.i)] = False\n ret_val = grow(ks[bud])\n if ret_val in r_d:\n r_d[ret_val] += 1\n else:\n r_d[ret_val] = 0\n splits += 1\n\n if len(r_d) > 0:\n max_key = max(k for k, v in r_d.items())\n to_comp = max_key + r_d[max_key]\n return max(splits,to_comp) + 1 \n else:\n return splits\n\nsys.setrecursionlimit(1000000000)\nN = int(sys.stdin.readline())\nks = []\nfor i in range(N):\n ks.append(Klump(i,[int(x) for x in sys.stdin.readline().split(\" \")]))\nnexs = {} \nfor klump in ks:\n for bud in klump.buds:\n nexs[(klump.i,bud)] = True\n\nrecord = len(ks)\nfor i in range(len(ks)):\n time = grow(ks[i]) - 1 \n nexs = {k:True for (k,v) in nexs.items()}\n if time < record:\n record = time \nprint(record)\n\n","sub_path":"Python/code_comp/Programmeringsolympiaden/Bonsai/app_copy.py","file_name":"app_copy.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294136191","text":"# coding: utf-8\nimport pytest\nfrom MobileApps.libs.flows.android.hpbridge.utility.api_utility import APIUtility\n\npytest.app_info = \"hpbridge\"\n\n\nclass TestEnterMPFromPA(object):\n\n @pytest.fixture(scope=\"class\", autouse=\"true\")\n def class_setup(self, request, hpbridge_test_setup):\n self = self.__class__\n self.driver, self.fc = hpbridge_test_setup\n # cls.p = load_printers_session\n\n # Define flows\n self.wechat = self.fc.flow[\"wechat\"]\n self.mphome = self.fc.flow[\"mp_home\"]\n self.pa_home = self.fc.flow[\"pa_home\"]\n\n # Define variables\n self.api_utility = APIUtility()\n self.api_utility.unbind_all_printers()\n\n \"\"\"\n Pre-conditions:\n 1. Install the WeChat app.\n 2. Login Wechat with a valid account and enter the WeChat Applet.\n 3. There is no printer bound to this login WeChat account.\n 4. User has followed the related WeChat official account.\n \"\"\"\n def test_01_enter_miniprogram_from_public_account(self):\n \"\"\"\n Steps:\n 1.Enter to WeChat Official Account.\n 2.Click on the Personal Center -> My Printer.\n 3.Long press the QR code and then click \"前往图中包含的小程序\".\n 4.Bind the printer from applet.\n 5.Enter to WeChat Official Account and unbinding all the printer.\n Expected result:\n 1.Verify the official account home page displayed correctly.\n 2.Verify the \"无绑定的打印机 请先启动小程序绑定打印机\" and applet QR code should be shown on this page correctly.\n 3.Verify the applet launched and applet home page displayed correctly.\n 4.Verify the printer bind successful.\n 5.Verify the \"无绑定的打印机 请先启动小程序绑定打印机\" and applet QR code should be shown on my printer page correctly.\n \"\"\"\n self.wechat.goto_pa()\n self.pa_home.go_to_mp_from_pa()\n self.mphome.verify_home_page_displayed()\n\n\n","sub_path":"MobileApps/tests/android/hpbridge/regression/Service Account Test/test_03_Enter_Applet_from_Public_Account_my_printer.py","file_name":"test_03_Enter_Applet_from_Public_Account_my_printer.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229986793","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^edit/(?P[\\w]+)/$', views.edit, name='edit_page'),\n url(r'^add/$', views.add, name='add_page'),\n url(r'^remove/(?P[\\w]+)/$', views.remove),\n url(r'^$', views.main, name='main_page'),\n url(r'^topA$', views.topdrivers, name='drivers_top_page'),\n]\n\n","sub_path":"taxi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"361786972","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 27 17:11:18 2017\n\n@author: clark\n\"\"\"\n \nfrom bitarray import bitarray\nimport mmh3\nimport numpy as np \nimport pandas as pd\nclass BloomFilter:\n \n def __init__(self, size, hash_count):\n self.size = size\n self.hash_count = hash_count\n self.bit_array = bitarray(size)\n self.bit_array.setall(0)\n def add(self, string):\n for seed in xrange(self.hash_count):\n result = mmh3.hash(string, seed) % self.size\n self.bit_array[result] = 1 \n def lookup(self, string):\n for seed in xrange(self.hash_count):\n result = mmh3.hash(string, seed) % self.size\n if self.bit_array[result] == 0:\n return \"Nope\"\n return \"Probably\"\n# k is number of hash function, set 5\n# df = pd.read_csv('./ftrain.txt')\n# a=df['115.177.11.215'].unique()\n# items number is 361423, make it 361500\n#bit array is 1e4(fp 1.0000) 1e5(fp 0.9999) 1e6(0.4082) 5e6(0.0026)\nbf = BloomFilter(1000000, 5)\n \nlines = open(\"./src_ip.txt\").read().splitlines()\nftrain=open('./ftrain.txt').read().splitlines()\nftest=open('./ftest.txt').read().splitlines()\nsumline=3414011\nhalfline=np.round(sumline/2)#1707005\n\ntrain=pd.read_csv('./ftrain.txt')\ntest=pd.read_csv('./ftest.txt')\n\ntruni=train['115.177.11.215'].unique()#361423\nteuni=test['42.117.99.84'].unique()#251098\n\ntrue=np.zeros(len(teuni))\nfor i in range(0,len(teuni)):\n print ('i{}'.format(i))\n if (teuni[i] in truni):\n true[i]=1\n else :\n true[i]=0\n\nfor str in truni: \n bf.add(str)\n \npred=np.zeros(len(teuni)) \nfor j in range(0,len(teuni)):\n print ('j{}'.format(j))\n if bf.lookup(teuni[j]) == 'Probably':\n pred[i]=1\n else:\n pred[i]=0 \n\nfor i in range(0,len(true)):\n if (true[i]==0 and pred[i]==1):\n fp=fp+1\n else:\n pass","sub_path":"lab3/codes/bloomFilter.py","file_name":"bloomFilter.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131793885","text":"from datetime import timedelta\n\nfrom crispy_forms import layout\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django_cradmin import crapp\nfrom django_cradmin import crinstance\nfrom django_cradmin.viewhelpers import create\nfrom django_cradmin.viewhelpers import crudbase\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_cradmin.widgets.datetimepicker import DateTimePickerWidget\n\nfrom devilry.apps.core.models import Assignment\nfrom devilry.utils import datetimeutils\nfrom devilry.utils import nodenamesuggestor\n\n\nclass CreateForm(forms.ModelForm):\n class Meta:\n model = Assignment\n fields = [\n 'long_name',\n 'short_name',\n 'first_deadline',\n 'publishing_time',\n 'parentnode'\n ]\n\n def __init__(self, *args, **kwargs):\n self.period = kwargs.pop('period')\n super(CreateForm, self).__init__(*args, **kwargs)\n self.fields['long_name'].help_text = _(\n 'Type the name of your assignment.')\n self.fields['short_name'].help_text = _(\n 'Up to 20 letters of lowercase english letters (a-z), '\n 'numbers, underscore (\"_\") and hyphen (\"-\").')\n self.fields['first_deadline'].widget = DateTimePickerWidget(\n required=True,\n minimum_datetime=timezone.now() + timedelta(\n minutes=settings.DEVILRY_ASSIGNMENT_PUBLISHING_TIME_DELAY_MINUTES),\n maximum_datetime=self.period.end_time)\n self.fields['first_deadline'].required = True\n self.fields['first_deadline'].label = _('Set first deadline')\n self.fields['first_deadline'].help_text = _(\n 'The first deadline for this assignment. This is shared by all the '\n 'students on the assignment.'\n )\n\n # We ignore this, we just need to include it to be able to\n # change the value in clean()\n self.fields['publishing_time'].widget = forms.HiddenInput()\n self.fields['publishing_time'].required = False\n\n # We ignore this, we just need to include it to be able to\n # change the value in clean()\n self.fields['parentnode'].widget = forms.HiddenInput()\n self.fields['parentnode'].required = False\n\n def clean(self):\n cleaned_data = super(CreateForm, self).clean()\n first_deadline = cleaned_data.get('first_deadline', None)\n cleaned_data['parentnode'] = self.period\n if first_deadline:\n publishing_time = timezone.now() + timedelta(\n minutes=settings.DEVILRY_ASSIGNMENT_PUBLISHING_TIME_DELAY_MINUTES)\n if first_deadline < publishing_time:\n publishing_time_naturaltime = naturaltime(timezone.now() + timedelta(\n minutes=settings.DEVILRY_ASSIGNMENT_PUBLISHING_TIME_DELAY_MINUTES,\n # We add some seconds to make the naturaltime show the correct amount of\n # hours/minutes because at least a small fraction of time will pass between\n # creating the datetime and the formatting in the naturaltime function.\n seconds=10))\n raise ValidationError({\n # Translators: The \"delay\" is formatted as \"X hours/minutes from now\"\n 'first_deadline': _('First deadline must be at least %(delay)s.') % {\n 'delay': publishing_time_naturaltime\n }\n })\n cleaned_data['publishing_time'] = publishing_time\n return cleaned_data\n\n\nclass CreateView(crudbase.OnlySaveButtonMixin, create.CreateView):\n form_class = CreateForm\n model = Assignment\n suggested_deadlines_template_name = 'devilry_admin/period/createassignment/suggested_deadlines.django.html'\n helpbox_template_name = 'devilry_admin/period/createassignment/helpbox.django.html'\n\n def dispatch(self, *args, **kwargs):\n self.period = self.request.cradmin_role\n self.previous_assignment = self.period.assignments \\\n .exclude(first_deadline=None) \\\n .order_by('first_deadline') \\\n .last()\n return super(CreateView, self).dispatch(*args, **kwargs)\n\n def get_pagetitle(self):\n return u'{} - {}'.format(self.get_pageheading(), self.period.get_path())\n\n def get_pageheading(self):\n return _('Create new assignment')\n\n def get_form_kwargs(self):\n kwargs = super(CreateView, self).get_form_kwargs()\n kwargs['period'] = self.request.cradmin_role\n return kwargs\n\n def get_initial(self):\n initial = super(CreateView, self).get_initial()\n if self.previous_assignment:\n namesuggestion = nodenamesuggestor.Suggest(long_name=self.previous_assignment.long_name,\n short_name=self.previous_assignment.short_name)\n if namesuggestion.has_suggestion():\n namecollision_queryset = self.period.assignments.filter(\n models.Q(long_name=namesuggestion.suggested_long_name) |\n models.Q(short_name=namesuggestion.suggested_short_name))\n if not namecollision_queryset.exists():\n initial['long_name'] = namesuggestion.suggested_long_name\n initial['short_name'] = namesuggestion.suggested_short_name\n return initial\n\n def __get_suggested_deadlines(self):\n suggested_deadlines = []\n if self.previous_assignment:\n if self.previous_assignment.first_deadline > timezone.now():\n first_suggested_deadline = self.previous_assignment.first_deadline + timedelta(days=7)\n else:\n first_suggested_deadline = datetimeutils.datetime_with_same_day_of_week_and_time(\n weekdayandtimesource_datetime=self.previous_assignment.first_deadline,\n target_datetime=timezone.now())\n suggested_deadlines.append(first_suggested_deadline)\n for days_forward in range(7, (7 * 4), 7):\n suggested_deadline = first_suggested_deadline + timedelta(days=days_forward)\n suggested_deadlines.append(suggested_deadline)\n return suggested_deadlines\n\n def __render_suggested_deadlines_box(self):\n return render_to_string(self.suggested_deadlines_template_name, {\n 'suggested_deadlines': self.__get_suggested_deadlines()\n })\n\n def __render_help_box(self):\n return render_to_string(self.helpbox_template_name)\n\n def get_field_layout(self):\n return [\n layout.Div(\n layout.Field('long_name', placeholder=_('Example: Assignment 1'),\n focusonme='focusonme'),\n layout.Field('short_name', placeholder=_('Example: assignment1')),\n # layout.HTML(self.__render_help_box()),\n layout.Div(\n layout.Div(\n layout.Field('first_deadline'),\n css_class='col-sm-6'\n ),\n layout.HTML(self.__render_suggested_deadlines_box()),\n css_class='row'\n ),\n css_class='cradmin-globalfields'\n )\n ]\n\n def get_success_url(self):\n return crinstance.reverse_cradmin_url(\n instanceid='devilry_admin_assignmentadmin',\n appname='overview',\n roleid=self.created_assignment.id\n )\n\n def form_saved(self, object):\n self.created_assignment = object\n if self.previous_assignment:\n self.created_assignment.copy_groups_from_another_assignment(self.previous_assignment)\n else:\n self.created_assignment.create_groups_from_relatedstudents_on_period()\n self.created_assignment.setup_examiners_by_relateduser_syncsystem_tags()\n\n\nclass App(crapp.App):\n appurls = [\n crapp.Url(r'^$', CreateView.as_view(), name=crapp.INDEXVIEW_NAME)\n ]\n","sub_path":"devilry/devilry_admin/views/period/createassignment.py","file_name":"createassignment.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"425780887","text":"\"\"\"This file includes all the operations that needed to detect locations of the ID card\"\"\"\nimport numpy as np\nimport cv2\n\ndef detect_edges(img):\n \"\"\"\n Detect edges using Canny edge detection algorithm\n\n img: should be 8 bit image\n i.e. grayscalse or single channel image\n \"\"\"\n dilate_kernel = np.ones((3, 3), np.uint8)\n noise_removed_img = cv2.dilate(img, dilate_kernel)\n # two thresholds setting for Canny edge detection should be case-by-case(ideally programmatic),\n # here's just a temporal setting based on the good result from experiments\n edge_img = cv2.Canny(noise_removed_img, 255/3, 255)\n # dilate operation against inverse binary output of Canny() to get better result\n # (essentially connecting isolated dots)\n edge_img = cv2.dilate(edge_img, dilate_kernel)\n return edge_img\n\ndef find_max_area_rectangle(img, min_area_scale=0.3, max_area_scale=0.8):\n \"\"\"\n Find largest rectangle in the image\n\n img: should be 8 bit image (grayscalse or single channel image)\n using image containing only edges often gets better result\n min_area_scale: minimum percentage of total area that could be detected as a rectangle\n max_area_scale: maximum percentage of total area that could be detected as a rectangle\n having upper boundary forces user to give picture with background\n to enble detecting edge of the ID card\n \"\"\"\n height, width = img.shape[0], img.shape[1]\n min_area = height * width * min_area_scale\n max_area = height * width * max_area_scale\n # by pass the mode 'cv2.RETR_EXTERNAL' only outermost contour if any will be detected\n im2, contours, hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n maximum_area = 0\n ret = None\n for cnt in contours:\n # not using contour approximation is because it is not stable when ID card has\n # smooth corner edges\n #approx = cv2.approxPolyDP(cnt,0.1*cv2.arcLength(cnt,True),True)\n\n # instead doing contour approximation just placing bounding box directly assuming outer contour is the correct one\n rect = cv2.minAreaRect(cnt)\n area = rect[1][0] * rect[1][1]\n if min_area < area < max_area and maximum_area < area:\n ret = rect\n maximum_area = area\n return ret\n\ndef get_rectangle_area(img, rect):\n \"\"\"\n Crop out interested rectangle from the img and adjust the angle if it is rotated\n \"\"\"\n (x, y), (w, h), theta = rect\n goal = np.float32([(0, h), (0, 0), (w, 0), (w, h)])\n box = cv2.boxPoints(rect)\n\n # construct tranformation matrix\n M = cv2.getPerspectiveTransform(box, goal)\n # get transformed img\n dst = cv2.warpPerspective(img, M, (int(w), int(h)))\n return dst","sub_path":"1-Computer-Vision-Projects/OCR/utils/ID_cards/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"176829773","text":"import pika\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('rabbit', heartbeat=600))\nchannel = connection.channel()\n\nchannel.basic_qos(prefetch_count=1)\n\nchannel.queue_declare(queue='queue')\n\n@app.route('/')\ndef home():\n return render_template(\"home.html\")\n\n@app.route('/add')\ndef add():\n channel.basic_publish(exchange='', routing_key='queue', body='Hello World!')\n return render_template(\"add.html\")\n\n@app.route('/get')\ndef get():\n res = channel.queue_declare(\n queue=\"queue\",\n passive=True\n )\n return render_template(\"get.html\", count=str(res.method.message_count))\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"gateway/gateway.py","file_name":"gateway.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"53310261","text":"\n\nfrom xai.brain.wordbase.adjectives._earthly import _EARTHLY\n\n#calss header\nclass _EARTHLIEST(_EARTHLY, ):\n\tdef __init__(self,): \n\t\t_EARTHLY.__init__(self)\n\t\tself.name = \"EARTHLIEST\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"earthly\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_earthliest.py","file_name":"_earthliest.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"214026705","text":"from unittest.mock import patch\nimport json\n\nfrom django.test import TestCase\nfrom rest_framework.reverse import reverse\n\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\nfrom api.models import NumberPlate, CarModel\nfrom api.tasks import task_car_model_cb, task_car_model_get_picture\n\n\nPLATE_URL = reverse('numberplate-list')\nPLATE_URL_DETAIL = reverse('numberplate-detail', kwargs={'pk': 1})\n\nclass NumberPlateTest(TestCase):\n \"\"\" Test the number plate API \"\"\"\n def setUp(self):\n self.client = APIClient()\n self.payload = {\n 'number': 'EZJ123',\n 'owner': 'TOM',\n 'car_model': {\n 'manufacturer': 'vw',\n 'model': 'Golf'\n }\n }\n self.prepopulate_db()\n\n def prepopulate_db(self):\n car_model_rec = {\n 'manufacturer': 'porsche',\n 'model': '911'\n }\n car_model = CarModel.objects.create(**car_model_rec)\n car_model_rec = {\n 'manufacturer': 'porsche',\n 'model': 'rs'\n }\n car_model = CarModel.objects.create(**car_model_rec)\n number_plate_rec = {\n 'number': 'ANJ519',\n 'owner': 'Peter',\n 'car_model': car_model\n }\n NumberPlate(**number_plate_rec).save()\n\n @patch('api.tasks.get_image')\n @patch('api.tasks.task_car_model_get_picture.retry')\n def test_task_car_model_get_picture_exception(self, mock_retry, mock_get_image):\n mock_get_image.side_effect = Exception('shit happens')\n # funcion retunrs FAILED\n self.assertEqual(task_car_model_get_picture(car_model_id=1), 'FAILED')\n mock_get_image.assert_called_with('porsche 911')\n car_model = CarModel.objects.get(pk=1)\n # CarModel DB have saved proper values\n self.assertEqual(car_model.ctask_status, 'FAILED')\n self.assertEqual(car_model.ctask_message, 'Failed to get image of car model porsche 911. Due to: Exception:shit happens')\n\n @patch('api.tasks.get_image')\n def test_task_car_model_get_picture_success(self, mock_get_image):\n mock_get_image.return_value = 'images/porsche 911.jpg'\n self.assertEqual(task_car_model_get_picture(car_model_id=1), 'SUCCESS')\n mock_get_image.assert_called_with('porsche 911')\n car_model = CarModel.objects.get(pk=1)\n # CarModel DB have saved proper values\n self.assertEqual(car_model.image, 'images/porsche 911.jpg')\n self.assertEqual(car_model.ctask_status, 'SUCCESS')\n\n @patch('api.tasks.task_car_model_get_picture.delay')\n def test_task_car_model_cb_success(self, mock_func):\n \"\"\" Test task used by scheduler to create car_model_get_picture tasksc\"\"\"\n self.assertEqual(task_car_model_cb(), 'SUCCESS')\n self.assertEqual(mock_func.call_count, 2)\n mock_func.assert_called_with(2)\n\n def test_create_number_plate_success(self):\n \"\"\" Test creation of number plate record with valid payload is successful\"\"\"\n resp = self.client.post(PLATE_URL, self.payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n resp_car_model = resp.data.get('car_model')\n resp_car_model.pop('image')\n car_model = CarModel.objects.get(**resp_car_model)\n resp.data['car_model'] = car_model\n self.assertTrue(NumberPlate.objects.filter(**resp.data).exists())\n\n def test_create_number_plate_without_car_model(self):\n \"\"\" Test creation of number plate record without car_model is successful\"\"\"\n payload = {\n 'number': 'abc123',\n 'owner': 'Peter'\n }\n resp = self.client.post(PLATE_URL, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n number_plate = NumberPlate.objects.get(pk=resp.data['id'])\n self.assertEqual(number_plate.number, 'ABC123')\n\n def test_create_number_plate_without_car_model_model(self):\n \"\"\" Test creation of number plate record without car_model model is successful\"\"\"\n payload = {\n 'number': 'ABC123',\n 'owner': 'Peter',\n 'car_model': {\n 'manufacturer': 'volvo'\n }\n }\n resp = self.client.post(PLATE_URL, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n number_plate = NumberPlate.objects.get(pk=resp.data['id'])\n car_model = CarModel.objects.get(manufacturer='volvo', model='')\n self.assertEqual(number_plate.car_model, car_model)\n\n def test_create_number_plate_without_car_model_manufacturer(self):\n \"\"\" Test creation of number plate record without car_model manufacturer is successful\"\"\"\n payload = {\n 'number': 'ABC124',\n 'owner': 'Peter',\n 'car_model': {\n 'model': '911'\n }\n }\n resp = self.client.post(PLATE_URL, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n number_plate = NumberPlate.objects.get(pk=resp.data['id'])\n car_model = CarModel.objects.get(manufacturer='', model='911')\n self.assertEqual(number_plate.car_model, car_model)\n\n def test_create_number_plate_owner_missing(self):\n \"\"\" Test create of number plate record owner missing\"\"\"\n payload = {\n 'number': 'ANJ520'\n }\n resp = self.client.post(PLATE_URL, payload)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json.dumps(resp.data), '{\"owner\": [\"This field is required.\"]}')\n\n def test_create_number_plate_put_method_on_create_endpoint(self):\n \"\"\" Test creation enpoint, try to use put method\"\"\"\n # not allowed method\n resp = self.client.put(PLATE_URL, self.payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_number_plates_duplicaton(self):\n \"\"\" Test creation of number plate that already exists, fails \"\"\"\n resp = self.client.post(PLATE_URL, self.payload, format='json')\n resp = self.client.post(PLATE_URL, self.payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_update_number_plate_owner_with_emty_car_model(self):\n \"\"\" Test update of number plate owner\"\"\"\n payload = {\n 'number': 'ANJ519',\n 'owner': ' Jone '\n }\n resp = self.client.put(PLATE_URL_DETAIL, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n number_plate = NumberPlate.objects.get(pk=1)\n self.assertEqual(number_plate.owner, 'Jone')\n self.assertEqual(number_plate.car_model, None)\n\n def test_update_number_plate_car_model(self):\n \"\"\" Test update of number plate owner\"\"\"\n payload = {\n 'number': 'ANJ519',\n 'owner': 'Peter',\n 'car_model': {\n 'manufacturer': 'vw',\n 'model': 'Passat'\n }\n }\n resp = self.client.put(PLATE_URL_DETAIL, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n number_plate = NumberPlate.objects.get(pk=1)\n car_model = CarModel.objects.get(manufacturer='vw', model='passat')\n self.assertEqual(number_plate.car_model, car_model)\n\n def test_update_number_plate_car_model_with_existing_car_model(self):\n resp = self.client.post(PLATE_URL, self.payload, format='json')\n payload = {\n 'number': 'ANJ519',\n 'owner': 'Peter',\n 'car_model': {\n 'manufacturer': 'vw',\n 'model': 'Golf'\n }\n }\n resp = self.client.put(PLATE_URL_DETAIL, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n number_plate = NumberPlate.objects.get(pk=1)\n car_model = CarModel.objects.get(manufacturer='vw', model='golf')\n self.assertEqual(number_plate.car_model, car_model)\n\n def test_update_number_plate_car_model_by_removing_model(self):\n payload = {\n 'number': 'ANJ519',\n 'owner': 'Jone',\n 'car_model': {\n 'manufacturer': 'porsche'\n }\n }\n resp = self.client.put(PLATE_URL_DETAIL, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n number_plate = NumberPlate.objects.get(pk=1)\n car_model = CarModel.objects.get(manufacturer='porsche', model='')\n self.assertEqual(number_plate.car_model, car_model)\n\n def test_update_number_plate_with_missing_owner(self):\n payload = {\n 'number': 'ANJ519'\n }\n resp = self.client.put(PLATE_URL_DETAIL, payload)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json.dumps(resp.data), '{\"owner\": [\"This field is required.\"]}')\n\n def test_update_number_plate_with_not_valid_owners(self):\n owner_list = ['Peter P3n', 'Peter5', 'Peter55']\n for owner in owner_list:\n payload = {\n 'number': 'ANJ519',\n 'owner': owner\n }\n resp = self.client.put(PLATE_URL_DETAIL, payload)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json.dumps(resp.data), '{\"owner\": [\"Please use alphanumeric symbols\"]}')\n\n def test_detail_endpint_with_post_method(self):\n resp = self.client.post(PLATE_URL_DETAIL, self.payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_number_plate_format(self):\n \"\"\" Test creation of number plate format, failed\"\"\"\n # wrong number format\n number_list = ['1ab123', 'ab2abe', 'aabcde', 'ab123c', '12345']\n for number in number_list:\n payload = {\n 'number': number,\n 'owner': 'Peter'\n }\n resp = self.client.post(PLATE_URL, payload)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json.dumps(resp.data),\n '{\"number\": [\"Plate number should contain first three alphabetical letters '\n 'followed by three numbers. exmp.: ABC123\"]}')\n\n def test_number_plate_length(self):\n \"\"\" Test creation of number plate length, failed\"\"\"\n payload = {\n 'number': '1bc123g',\n 'owner': 'Peter'\n }\n resp = self.client.post(PLATE_URL, payload)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual('. '.join(resp.data['number']),\n 'Plate number should contain first three alphabetical letters followed by three numbers. exmp.: ABC123. '\n 'Ensure this field has no more than 6 characters.')\n","sub_path":"api/tests/test_number_plates.py","file_name":"test_number_plates.py","file_ext":"py","file_size_in_byte":11001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"302495151","text":"# Django specific settings\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"settings\")\n\n# Django imports\nfrom django.core.management.base import BaseCommand\n\n# Application specific imports\n\n# standard libs\nimport uuid\nimport datetime\nimport threading\nimport queue\n\n# other libs\nfrom sense_hat import SenseHat\n\n# models\nfrom aqms.models import *\n\n# queues for communication between threads\n#q_gen_messdaten = queue.Queue()\nq_to_db = queue.Queue()\n\nclass Command(BaseCommand):\n \n # Application logic,\n @classmethod\n def handle(cls, *args, **kwargs):\n print(\"get data\")\n try:\n pill2kill = threading.Event() # -> cyanide pills\n\n gen_messdaten_thread = threading.Thread(target=SENSORIO.gen_messdaten_t, args=(pill2kill,))\n gen_messdaten_thread.daemon = True # -> dies after main thread is closed\n gen_messdaten_thread.start()\n\n to_db_thread = threading.Thread(target=DBIO.to_db_t, args=(pill2kill,))\n to_db_thread.daemon = True # -> dies after main thread is closed\n to_db_thread.start()\n\n #SenseHat.show_message(\"#\", scroll_speed=1)\n\n cls.get_message('')\n\n finally:\n pill2kill.set()\n gen_messdaten_thread.join()\n to_db_thread.join()\n #return ('done', arg_counter)\n\n @staticmethod\n def get_message(placeholder):\n while True:\n sense = SenseHat()\n sense.clear()\n red = (255,0,0)\n\n sense.show_message(\"HTL - TATü\", text_colour=red)\n\n# SENSORIO class -> handles all input/output from/to the sensors\nclass SENSORIO:\n # meths\n @staticmethod\n def gen_messdaten_t(pill2kill): # <- code of gen_messdaten_t thread\n\n #fe_log = None\n\n try:\n while not pill2kill.is_set():\n try:\n \n messdaten = MESSDATEN()\n\n q_to_db.put(messdaten)\n\n except Exception as e:\n print('fufufufu' + str(e))\n #fe_log = FILEIO.write_to_log('fe_log.txt', f'S_LINK_Error: {e}\\n{traceback.format_exc()}')\n continue\n\n finally:\n #if fe_log:\n # CLIIO.print_to_shell('file create_slink_t error -> {root_dir}fe_log.txt')\n print('gen_messdaten_t closed')\n\n# DBIO class -> handles all input/output from/to database\nclass DBIO:\n # meths\n @staticmethod\n def to_db_t(pill2kill): # <- code of to_db_t thread\n \n #dbe_log = None\n\n try:\n while not pill2kill.is_set() or q_to_db.full():\n\n messdaten = q_to_db.get(block=True) # -> wait for input\n\n print(str(messdaten) + '\\n')\n \n if isinstance(messdaten, MESSDATEN):\n values_for_db = messdaten.for_db()\n\n # push to db\n messdaten_db = Messdaten(UID=values_for_db['uuid'], \\\n Temperatur=values_for_db['temperatur'], \\\n Luftdruck=values_for_db['luftdruck'], \\\n Luftfeuchtigkeit=values_for_db['luftfeuchtigkeit'], \\\n VOC=values_for_db['voc'], \\\n FEINSTAUBPM25=values_for_db['feinstaubpm25'], \\\n FEINSTAUBPM100=values_for_db['feinstaubpm100'], \\\n Datum=values_for_db['datum'], \\\n DatumZeit=values_for_db['datumzeit'])\n\n messdaten_db.save() \n\n q_to_db.task_done()\n finally:\n #if fe_log:\n # CLIIO.print_to_shell('file create_slink_t error -> {root_dir}fe_log.txt')\n print('to_db_t closed')\n\n# super EVIL object -> data of folders\nclass MESSDATEN:\n # fields\n _uuid = ''\n _temperatur = 22\n _luftdruck = 949\n _luftfeuchtigkeit = 53\n _voc = 2.5864\n _feinstaubpm25= 4.279\n _feinstaubpm100 = 5.627\n _datum = ''\n _datumzeit = ''\n\n # ctor\n def __init__(self):\n # initialize sensor\n sense = SenseHat()\n \n # define data\n self._uuid = str(uuid.uuid4())\n self._temperatur = sense.get_temperature()\n self._luftdruck = sense.get_pressure()\n self._luftfeuchtigkeit = sense.get_humidity()\n self._voc = os.popen('aqms/external/c/airsensor -o -v').read().rstrip('\\n')\n self._feinstaubpm25 = os.popen('aqms/external/bash/Feinstaub25.sh').read().rstrip('\\n')\n self._feinstaubpm100 = os.popen('aqms/external/bash/Feinstaub100.sh').read().rstrip('\\n')\n self._datum = datetime.date.today()\n self._datumzeit = datetime.datetime.now()\n\n def __repr__(self):\n return str(self.__class__.__name__) + '; ' + str(self._datumzeit)\n\n def __str__(self):\n return 'uuid: ' + str(self._uuid) + '; \\n' + \\\n 'temperatur: ' + str(self._temperatur) + '; \\n' + \\\n 'luftdruck: ' + str(self._luftdruck) + '; \\n' + \\\n 'luftfeuchtigkeit: ' + str(self._luftfeuchtigkeit) + '; \\n' + \\\n 'voc: ' + str(self._voc) + '; \\n' + \\\n 'feinstaubpm25: ' + str(self._feinstaubpm25) + '; \\n' + \\\n 'feinstaubpm100: ' + str(self._feinstaubpm100) + '; \\n' + \\\n 'datum:' + str(self._datum) + '; \\n' + \\\n 'datumzeit:' + str(self._datumzeit)\n\n # props\n def get_uuid(self):\n return self._uuid\n\n def for_db(self):\n return {\n 'uuid': self._uuid,\n 'temperatur': self._temperatur,\n 'luftdruck': self._luftdruck,\n 'luftfeuchtigkeit': self._luftfeuchtigkeit,\n 'voc': self._voc,\n 'feinstaubpm25': self._feinstaubpm25,\n 'feinstaubpm100': self._feinstaubpm100,\n 'datum': self._datum,\n 'datumzeit': self._datumzeit\n }\n","sub_path":"aqms/management/commands/herebedragons.py","file_name":"herebedragons.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"153198332","text":"from os.path import basename\nfrom os.path import abspath\nfrom os.path import dirname\nfrom os.path import join\nfrom os.path import exists\n\nimport re\nimport logging\nimport yaml\n\nlogger = logging.getLogger(__name__)\n\nIMPORT_STATEMENT = r'\\s*#\\s*!include\\s*\\'([a-zA-Z0-9_.\\-/]+)\\'\\s*'\nVARDEC_STATEMENT = r'\\s*#\\s*!define\\s*([a-zA-Z0-9_.\\-]+)\\s*=\\s*([^ \\t\\n\\r\\f\\v]+)\\s*'\n\nVALID_KEY_STATEMENT = r'\\^.*'\n\ndef _get_lines(filename):\n res = []\n with open(filename, 'rU') as f:\n for line in f.readlines():\n res.append(line)\n return res\n\n\n################################################################################\n# read_yaml_config_string\n################################################################################\n\ndef preprocess_yaml_file(filename):\n \n base = dirname(abspath(filename))\n fname = basename(filename)\n yamlfiles = []\n\n def _preprocess_yaml_file(__base, __filename):\n\n _base = join(__base, dirname(__filename))\n wfname = join(__base, _base, basename(__filename))\n logger.debug('YAML preprocessor: [%s]', wfname)\n\n if wfname not in yamlfiles:\n yamlfiles.append(wfname)\n else:\n raise Exception('Tried to import twice [%s]' % wfname)\n\n lines = _get_lines(wfname)\n res = []\n for line in lines:\n match = re.match(IMPORT_STATEMENT, line)\n if match is not None:\n fn = match.group(1)\n for c in _preprocess_yaml_file(join(__base, _base), fn):\n res.append(c.rstrip('\\n'))\n else:\n res.append(line.rstrip('\\n'))\n return res\n\n return _preprocess_yaml_file(base, fname)\n\n\n################################################################################\n# read_yaml_config_string\n################################################################################\n\ndef read_yaml_config_string(string_value):\n\n def valid(string_val):\n return re.match(VALID_KEY_STATEMENT, string_val) is None\n\n try:\n conf = {}\n for k, v in yaml.load(string_value).iteritems():\n conf[k] = v\n res = {}\n for k, v in conf.iteritems():\n if valid(k):\n res[k] = v\n\n return res\n except Exception as e:\n raise Exception(e)\n\n\n################################################################################\n# YAMLTemplater\n################################################################################\n\nclass YAMLTemplater(object):\n\n def __init__(self):\n self._const_templates = {}\n self._function_templates = {}\n\n def template(self, lines):\n\n for l in lines:\n r1 = re.match(VARDEC_STATEMENT, l)\n if r1 is not None:\n cmd = r1.group(1)\n matched = r1.group(2)\n logger.info('Configuring variable [%s] with value [%s]' % (cmd, matched))\n self.add_const_template(cmd, matched)\n\n # Search of matches and replace based on _const_template and _function_templates.\n # but first preprocess the file and add to const_templates custom variables.\n output = []\n for l in lines:\n r1 = re.match(r'.*({{(\\w+)}}).*', l)\n if r1 is not None:\n exact_match = r1.group(1)\n match_value = r1.group(2)\n l = l.replace(exact_match, self.get_const_templates(match_value))\n\n r2 = re.match(r'.*({{(\\w+)::(\\w+)}}).*', l)\n if r2 is not None:\n exact_match = r2.group(1)\n cmd = r2.group(2)\n val = r2.group(3)\n l = l.replace(exact_match, self.get_function_templates(cmd, val))\n output.append(l)\n\n return output\n\n def add_const_template(self, cmd, value):\n self._const_templates[cmd] = value\n\n def add_function_template(self, cmd, function):\n self._function_templates[cmd] = function\n\n def get_const_templates(self, cmd):\n return self._const_templates[cmd]\n\n def get_function_templates(self, cmd, val):\n return self._function_templates[cmd](val)\n\n\n################################################################################\n# read_configuration\n################################################################################\n\ndef parse_configuration(config_file, yaml_templater=None):\n\n if not exists(config_file):\n raise RuntimeError('[%s] file does not exist', config_file)\n\n logger.debug('Trying to load configuration from [%s]', config_file)\n lines = preprocess_yaml_file(config_file)\n\n logger.debug('Loaded [%s] lines of configuration', len(lines))\n\n if yaml_templater is not None:\n config_text = '\\n'.join(yaml_templater.template(lines))\n else:\n config_text = '\\n'.join(lines)\n\n configuration = read_yaml_config_string(config_text)\n return configuration, config_text\n","sub_path":"yamltool/yamltool.py","file_name":"yamltool.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"180073296","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\nclass Pyselenium():\n def __init__(self,\n browser=\"chrome\",\n url=None,\n exe_driver=\"chromedriver.exe\"):\n \"\"\"\n 初始化driver,并打开浏览器\n \"\"\"\n browser = browser.lower() # 把所有大写的字符变成小写的字符串\n\n # 判断浏览器类型 谷歌chrome/火狐firefox/ie/edge\n if browser == \"chrome\" or browser == \"ch\":\n self.driver = webdriver.Chrome(executable_path=exe_driver)\n elif browser == \"firefox\" or browser == \"ff\":\n self.driver = webdriver.Firefox(executable_path=exe_driver)\n elif browser == \"internet explorer\" or browser == \"ie\":\n self.driver = webdriver.Ie(executable_path=exe_driver)\n elif browser == \"edge\" or browser == \"ed\":\n self.driver = webdriver.Edge(executable_path=exe_driver)\n try:\n self.driver.get(url)\n except:\n raise Exception(\"打开浏览器失败\")\n\n def find_element(self, locator):\n \"\"\"\n 查找单个元素\n 参数:locator = (\"id\",\"kw\")\n 类型:\n ID = \"id\"\n XPATH = \"xpath\"\n LINK_TEXT = \"link text\"\n PARTIAL_LINK_TEXT = \"partial link text\"\n NAME = \"name\"\n TAG_NAME = \"tag name\"\n CLASS_NAME = \"class name\"\n CSS_SELECTOR = \"css selector\"\n \"\"\"\n # 判断locator参数是否是元组\n if not isinstance(locator, tuple):\n raise Exception(\"输入的格式必须是(by,value)格式!\") # 抛出异常,终止执行\n try:\n # 动态等待10s去找这个元素,如果超过了10s就抛出找不到元的一场,程序终止\n element = WebDriverWait(\n self.driver, 10).until(lambda s: s.find_element(*locator))\n return element\n except:\n raise Exception(\"未找到元素{}\".format(locator))\n\n def click(self, locator):\n \"\"\"\n 单击左键\n - 传入locator\n \"\"\"\n element = self.find_element(*locator)\n element.click()\n\n def context_click(self, locator):\n \"\"\"\n 单击右键\n - 传入locator\n \"\"\"\n element = self.find_element(*locator)\n ActionChains(self.driver).context_click(element).perform()\n\n def double_click(self, locator):\n \"\"\"\n 双击左键\n - 传入locator\n \"\"\"\n element = self.find_element(*locator)\n ActionChains(self.driver).double_click(element).perform()\n\n def move(self, locator):\n \"\"\"\n 鼠标悬停\n - 传入locator\n \"\"\"\n element = self.find_element(*locator)\n ActionChains(self.driver).move_to_element(element).perform()\n\n def drag_and_drop(self, locator):\n \"\"\"\n 拖动元素\n - 没写pass\n \"\"\"\n pass\n\n def send_content(self, locator, content):\n \"\"\"\n 输入\n - content\n \"\"\"\n element = self.find_element(*locator)\n element.send_keys(content)\n\n\nif __name__ == \"__main__\":\n driver = Pyselenium(url=\"https://www.baidu.com\")\n locator = (\"id\", \"su\")\n # driver.find_element(locator=locator)\n driver.context_click(locator=locator)\n","sub_path":"AutoTest/Pyselenium.py","file_name":"Pyselenium.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"575175743","text":"\"\"\"\nFunctions for visulization fastq data\n\"\"\"\nfrom __future__ import print_function, division\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ..util import get_color_cycle\nfrom ..io import Fastq, FastqReader\n\ndef fqqualplot(fqdata, phred=64, ax=None, title=None, \n xlabel=None, ylabel=None, **kwargs):\n \"\"\"\n Plotting fastq data quality distribution.\n\n Parameters\n ----------\n fqdata : Array like. A list of Fastq type data, array or Series\n The input fastq data for plot.\n\n phred : int, or 64, optional\n The phred value to convert the base quality from ASCII to int\n For Illumina fastq: 64(default)\n For Sanger data: 33\n\n ax : matplotlib axis, optional\n Axes to plot on, otherwise use current axis.\n\n title : string, or None, optional\n If not None, set title on the plot\n\n xlabel, ylabel : string,or None, optional\n Set the x/y axis label of the current axis.\n\n kwargs : key, values pairings, or None, optional\n Other keyword arguments are passed to boxplot in\n maplotlib.axis.Axes.boxplot.\n\n\n Returns\n -------\n ax : matplotlib Axes\n Axes object with the plot\n\n Examples\n --------\n\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n if len(fqdata) == 0:\n return ax\n\n if 'showfliers' not in kwargs:\n kwargs.setdefault(\"showfliers\", False)\n\n data = []\n for r in fqdata:\n # Convert base quality from ASCII to be integer\n data.append([ord(b) - phred for b in r.qual])\n data = np.array(data)\n\n ax.boxplot(data, **kwargs)\n\n # Set xtick\n xticks = [i for i in range(0, len(data[0])+1, 10)]\n ax.set_xticks(xticks)\n ax.set_xticklabels(xticks)\n\n ax.set_ylim(ymax=data.max()+1)\n if title:\n ax.set_title(title)\n if xlabel:\n ax.set_xlabel(xlabel)\n if ylabel:\n ax.set_ylabel(ylabel)\n\n return ax\n\n\ndef fastqreport(fqfile, fqfilelist=None):\n \"\"\"\n Create a report for fastq data by input one fastq file or \n a fq file list.\n\n Parameters\n ----------\n\n Returns\n -------\n\n Examples\n --------\n\n \"\"\"\n pass\n","sub_path":"geneview/genome/_fastqplot.py","file_name":"_fastqplot.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"311381494","text":"'''\nCreated on Oct 13, 2011\n\n@author: lcanders\n'''\nfrom sets import Set\nfrom java.lang import *\nfrom com.nomagic.magicdraw.uml.symbols import *\nfrom com.nomagic.magicdraw.core import Application\nfrom com.nomagic.uml2.ext.jmi.helpers import StereotypesHelper\nfrom com.nomagic.magicdraw.openapi.uml import SessionManager\nfrom com.nomagic.magicdraw.openapi.uml import ModelElementsManager\nfrom com.nomagic.uml2.ext.jmi.helpers import ModelHelper\nfrom com.nomagic.magicdraw.ui.dialogs import *\nfrom com.nomagic.uml2.ext.magicdraw.mdprofiles import Stereotype\nfrom com.nomagic.uml2.ext.magicdraw.classes.mdkernel import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mddependencies import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mdinterfaces import *\nfrom com.nomagic.uml2.ext.magicdraw.actions.mdbasicactions import *\nfrom com.nomagic.uml2.ext.magicdraw.activities.mdbasicactivities import *\nfrom com.nomagic.uml2.ext.magicdraw.activities.mdintermediateactivities import *\nfrom com.nomagic.uml2.ext.magicdraw.auxiliaryconstructs.mdinformationflows import *\nfrom com.nomagic.uml2.ext.magicdraw.compositestructures.mdports import *\nfrom com.nomagic.uml2.ext.magicdraw.commonbehaviors.mdsimpletime import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mdpowertypes import GeneralizationSet\nfrom com.nomagic.magicdraw.teamwork.application import TeamworkUtils\nfrom javax.swing import *\nimport Specialize\nreload(Specialize)\nimport sys\nimport traceback\nimport os\nimport Package_Clone as PC\nreload (PC)\n\nimport MDUtils._MDUtils as MDUtils\nreload(MDUtils)\nimport SRUtils\nreload(SRUtils)\nimport Fix_Package\nreload(Fix_Package)\n\ngl = Application.getInstance().getGUILog()\nproject = Application.getInstance().getProject()\nef = project.getElementsFactory()\nmem = ModelElementsManager.getInstance()\ndatapack=project.getModel()\n#mapping={}\npackage = Application.getInstance().getMainFrame().getBrowser().getActiveTree().getSelectedNode().getUserObject()\ncount={}\nstereotypePack=StereotypesHelper.getStereotypes(package)\nprofile = StereotypesHelper.getProfileForStereotype(stereotypePack[0])\n#pc=StereotypesHelper.getAppliedProfiles(package)\n#profile=pc[0]\nCSst=StereotypesHelper.getStereotypesByProfile(profile)\ncsElem=profile.getOwnedElement()\nnElem=package.getOwnedElement()\ncountFix=0\nelementToFix=[]\n#matcht=False\n\n\n#def blah(package, csPack, False):\n# ValidatePackage.blah(package, csPack, False) \n# return\n\n\ndef ControlServiceFind(package,CSProf):\n CSPacks=CSProf.getNestedPackage()\n for c in CSPacks:\n packSt=StereotypesHelper.getStereotypes(package)\n packSt=filter(lambda element: element in CSst, packSt)\n if StereotypesHelper.hasStereotype(c,packSt):\n blah=c\n #gl.log(\"hehehehehhe========>\"+blah.getName())\n return blah\n if isinstance(c,Package):\n CSPackage=ControlServiceFind(package,c)\n if CSPackage is not None:\n return CSPackage\n return None\n \ndef run(mode):\n if mode == 'b':\n selected = Application.getInstance().getMainFrame().getBrowser().getActiveTree().getSelectedNode().getUserObject()\n try:\n SessionManager.getInstance().createSession(\"syncProfile\")\n csItem=ControlServiceFind(package,profile)\n #passIn=csItem.getOwner()\n #csPack=Fix_Package.findPackage(package,passIn, True)\n #if csPack is None:\n #gl.log(\"******ERROR*****One of your packages does not have a stereotype=======>\"+package.getName())\n #else:\n [elemCount,genCount,diagCount,diagError,elemError,packError,genError,redefError,redefWarning,goodElem,goodRedef,goodGen,goodDiag,goodPack,instElem,csElems,redefCS,insProp,sysModels,sysTls]=Fix_Package.blah(package,csItem, True,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,{},0,{},{},{},0,{},{})\n gl.log(\"--------------------------Summary Elements Report--------------------------\")\n gl.log(\"Instance Elements Found===============>\"+str(len(instElem)))\n gl.log(\"CS Elements Found===============>\"+str(csElems))\n gl.log(\"Elements Found Matching===============>\"+str(goodElem))\n gl.log(\"Packages Found===========>\"+str(goodPack))\n gl.log(\"Generalizations===============>\"+str(goodGen))\n gl.log(\"Diagrams Found==============>\"+str(goodDiag))\n gl.log(\"Redefinitions Found==============>\"+str(len(redefCS)))\n gl.log(\"Properties Found========>\"+ str(len(insProp)))\n \n gl.log(\"--------------------------Summary Error Report--------------------------\")\n gl.log(\"More Elements in CS Not Instance Found===============>\"+str(elemError))\n MoreIns=len(instElem)-goodElem\n if MoreIns!=0:\n gl.log(\"More Elements Found in the Instance than the CS =======>\"+str(MoreIns))\n gl.log(\"Packages Not Found===========>\"+str(packError))\n gl.log(\"Elements without Generalization===============>\"+str(genError))\n gl.log(\"Diagram Not Found===============>\"+str(diagError))\n gl.log(\"Redefinition Error =======>\"+str(len(insProp)-len(redefCS)))\n gl.log(\"--------------------------Summary Creation Report Report--------------------------\")\n gl.log(\"Elements Created===============>\"+str(elemCount))\n gl.log(\"Generalizations Created===========>\"+str(genCount))\n gl.log(\"Diagrams Created===============>\"+str(diagCount))\n gl.log(\"--------------------------Summary Fix Me! Elements--------------------------\")\n [count,Fixes]=Fix_Package.countFixes(countFix,elementToFix,package)\n gl.log(\"The total number of fix me elements===>\"+str(count))\n n = JOptionPane.showConfirmDialog(None,\"Do you want to view the name of the Elements that still contains Fix Me!??\",\"View Name Question\", JOptionPane.YES_NO_OPTION)\n if n!=JOptionPane.CLOSED_OPTION:\n if n==JOptionPane.YES_OPTION:\n for j in Fixes:\n gl.log(\"The name of the element to fix is:========>\"+j.getName())\n gl.log(\"--------------------------Summary Stereotypes Report--------------------------\")\n q = JOptionPane.showConfirmDialog(None,\"Do you want to view the Stereotype Summary??\",\"Stereotype Question\", JOptionPane.YES_NO_OPTION)\n if q!=JOptionPane.CLOSED_OPTION:\n if q==JOptionPane.YES_OPTION:\n Fix_Package.stereotypeReport(CSst,package)\n gl.log(\"--------------------------Summary Redefinition Not Correct--------------------------\")\n q = JOptionPane.showConfirmDialog(None,\"Do you want to view the List of Elements with improper Redefinitions??\",\"Redefinition Question\", JOptionPane.YES_NO_OPTION)\n if q!=JOptionPane.CLOSED_OPTION:\n if q==JOptionPane.YES_OPTION:\n for j in insProp:\n if j not in redefCS:\n gl.log(\"This element was not redefined properly====>\"+j.getQualifiedName())\n \n SessionManager.getInstance().closeSession()\n except:\n if SessionManager.getInstance().isSessionCreated():\n SessionManager.getInstance().cancelSession()\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n gl.log(\"*** EXCEPTION:\")\n messages=traceback.format_exception(exceptionType, exceptionValue, exceptionTraceback)\n for message in messages:\n gl.log(message)\n \n \n \n \n ","sub_path":"SystemsReasoner/refactor/ValidatePackage.py","file_name":"ValidatePackage.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"257251671","text":"#!python3\n\nimport requests, pyperclip, datetime, json, prettytable, sys\nfrom bs4 import BeautifulSoup\n\ndef retrievedDataFromBiliBili():\n url = 'https://www.bilibili.com/bangumi/media/md5267730/'\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'lxml')\n\n stats = soup.select('.media-info-datas')[0].text.replace('万', '').split()\n d = datetime.datetime.now()\n resultDict = {\n f'{d}' : {\n 'Date' : f'{d.date()}',\n 'Time' : f'{d.hour}',\n 'Stats' : {\n 'Total views' : f'~{int(float(stats[1])*10000)}',\n 'Following' : f'~{int(float(stats[3])*10000)}',\n 'Total floating comments' : f'~{int(float(stats[5])*10000)}',\n 'Score' : f'{stats[6]}',\n 'Total number of ratings' : f'{stats[7]}'\n }\n }\n }\n return resultDict\n\ndef initializeJSON(stat):\n with open('kaguyaB.json', 'w') as f:\n json.dump(stat, f)\n\ndef writeToJSON(stat):\n with open('kaguyaB.json', 'r') as f:\n data = json.load(f)\n\n data.update(stat)\n\n with open('kaguyaB.json', 'w') as f:\n json.dump(data, f)\n\ndef constructTable():\n pt = prettytable.PrettyTable()\n pt.field_names = ['Date', 'Hour', 'Views', 'Following', 'Danmaku', 'Score', 'Raters']\n\n with open('kaguyaB.json', 'r') as f:\n data = json.load(f)\n\n for entry in data:\n pt.add_row([data[entry]['Date'], data[entry]['Time'], data[entry]['Stats']['Total views'], data[entry]['Stats']['Following'], data[entry]['Stats']['Total floating comments'], data[entry]['Stats']['Score'], data[entry]['Stats']['Total number of ratings']])\n \n return pt\n\ndef newEntry():\n writeToJSON(retrievedDataFromBiliBili())\n print(constructTable())\n pyperclip.copy(f'```Stats on BiliBili: \\n{constructTable()}```')\n\ndef pastEntries():\n print(constructTable())\n pyperclip.copy(f'```Stats on BiliBili: \\n{constructTable()}```')\n\ntry:\n if sys.argv[1] == 'new' or sys.argv[1] == 'n':\n newEntry()\n elif sys.argv[1] == 'init':\n initializeJSON(retrievedDataFromBiliBili())\n pastEntries()\n else:\n pastEntries()\nexcept IndexError:\n print('Invalid system argument. Enter \"new\" or \"history\".')","sub_path":"kaguyaB.py","file_name":"kaguyaB.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"56429034","text":"from leapp.libraries.common.reporting import report_with_links\nfrom leapp.libraries.common.tcpwrappersutils import config_applies_to_daemon\n\n\ndef check_config_supported(tcpwrap_facts, vsftpd_facts):\n bad_configs = [config.path for config in vsftpd_facts.configs if config.tcp_wrappers]\n if bad_configs and config_applies_to_daemon(tcpwrap_facts, 'vsftpd'):\n list_separator_fmt = '\\n - '\n report_with_links(title='Unsupported vsftpd configuration',\n summary=('tcp_wrappers support has been removed in RHEL-8. '\n 'Some configuration files set the tcp_wrappers option to true and '\n 'there is some vsftpd-related configuration in /etc/hosts.deny '\n 'or /etc/hosts.allow. Please migrate it manually. '\n 'The list of problematic configuration files:{}{}'\n ).format(list_separator_fmt,\n list_separator_fmt.join(bad_configs)),\n links=[{'title': 'Replacing TCP Wrappers in RHEL 8',\n 'href': 'https://access.redhat.com/solutions/3906701'}],\n severity='high',\n flags=['inhibitor'])\n","sub_path":"repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/libraries/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"132093541","text":"import setuptools\n\n\nversion = '3.0.0'\n\nsetuptools.setup(\n name='RNNSynthesis',\n version=version,\n author='Adelia Fatykhova',\n author_email='adelik21979@gmail.com',\n description='An one-agent environment for synthesis planning',\n long_description=open('README.md', 'r').read(),\n url='https://github.com/Pandylandy/RNN/',\n packages=setuptools.find_packages(),\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Operating System :: OS Independent',\n ],\n python_requires='>=3.7',\n install_requires=['CGRtools>=4.0.14,<4.1', 'CIMtools>=4.0.1,<4.1', 'LazyPony>=0.3',\n 'CachedMethods>=0.1.4,<0.2', 'pony>=0.7.11,<0.8'],\n extras_require={'postgres': ['psycopg2-binary'],\n 'postgres_cffi': ['cffi', 'psycopg2cffi']},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"567358389","text":"from tensorflow.examples.tutorials.mnist import input_data\nmnist=input_data.read_data_sets('MNIST_data',one_hot=True)\n#mnist 轻量级 训练 验证 测试储存为Numpy数组\n#tensorflow interactiveSession 与c++后端相连接为会话\nimport tensorflow as tf\nsess=tf.InteractiveSession()\nx=tf.placeholder(tf.float32,shape=[None,784])#长不确定n,宽784\ny_=tf.placeholder(tf.float32,shape=[None,10])\nw = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))\nsess.run(tf.global_variables_initializer())\ny=tf.matmul(x,w)+b\ncross_entropy=tf.reduce_mean(\n tf.nn.softmax_cross_entroy_with_logits(labels=y_,logits=y))\ntrain_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\nfor _ in range(1000):\n batch=mnist.train.next_batch(100)\n train_step.run(feed_dict={x:batch[0],y_:batch[1]})\n","sub_path":"7deep_mini.py","file_name":"7deep_mini.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"327560458","text":"# STRETCH: implement Linear Search\t\t\t\t\ndef linear_search(arr, target):\n \n for i in range(len(arr)):\n if arr[i] == target:\n return 1\n\n return -1 # not found\n\n\n\n# STRETCH: write an iterative implementation of Binary Search \ndef binary_search(arr, target):\n\n if len(arr) == 0:\n return -1 # array empty\n\n arr.sort() # arr must be sorted for Binary Search\n \n while len(arr) > 0:\n pivot_index = int( len(arr) / 2 )\n pivot = arr[pivot_index]\n\n if pivot == target:\n return 1\n \n elif pivot > target:\n arr = arr[:pivot_index]\n \n else:\n arr = arr[pivot_index + 1:]\n\n return -1 # not found\n\n\n\n# STRETCH: write a recursive implementation of Binary Search \ndef binary_search_recursive(arr, target):\n \n if len(arr) == 0:\n return -1 # array empty\n \n arr.sort() # in production I would sort the list before passing to the recursive function so that I didn't sort a sorted list, but, hey, this is a first pass solution\n\n pivot_index = int( len(arr) / 2 )\n pivot = arr[pivot_index]\n\n if pivot == target:\n return 1\n\n elif pivot < target:\n return binary_search_recursive(arr[pivot_index + 1:], target)\n \n else:\n return binary_search_recursive(arr[:pivot_index], target)\n\nprint(binary_search_recursive([1, -1, 5, 6, 8, 2, 4, 5], 1))\n","sub_path":"project/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356069468","text":"import logging\nimport numpy as np\nimport scipy.sparse as sp\nfrom sklearn.base import BaseEstimator\nfrom sklearn.preprocessing import normalize\n\nclass SparsePLM(BaseEstimator):\n def __init__(self, weight=0.1, norm=None, iterations=50, eps=0.01):\n self.weight = weight\n self.iterations = iterations\n self.eps = eps\n self.norm = norm\n\n def fit(self, X, y=None):\n cf = np.array(X.sum(axis=0), dtype=np.float64)[0]\n self.pc = cf / np.sum(cf) * (1 - self.weight)\n return self\n\n def transform(self, X, copy=True):\n if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):\n # preserve float family dtype\n X = sp.csr_matrix(X, copy=copy)\n else:\n # convert counts or binary occurrences to floats\n X = sp.csr_matrix(X, dtype=np.float64, copy=copy)\n for i in range(X.shape[0]):\n begin_col, end_col = X.indptr[i], X.indptr[i+1]\n data = X.data[begin_col: end_col]\n p_data = np.ones(data.shape[0]) / data.shape[0]\n c_data = self.pc[X.indices[begin_col: end_col]]\n for iteration in range(1, self.iterations + 1):\n logging.debug(\"Iteration %s\" % iteration)\n p_data *= self.weight\n E = data * p_data / (c_data + p_data)\n M = E / E.sum()\n diff = np.abs(M - p_data)\n p_data = M\n if (diff < self.eps).all():\n logging.info(\"Broke early from EM\")\n break\n _d = np.dot(p_data, p_data)\n assert not (np.isnan(_d) or np.isinf(_d))\n X.data[begin_col: end_col] = p_data\n if self.norm:\n X = normalize(X, norm=self.norm)\n return X\n\n def fit_transform(self, X, y=None):\n self.fit(X, y)\n return self.transform(X)\n","sub_path":"verification/sparse_plm.py","file_name":"sparse_plm.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"47506376","text":"import unittest\nimport jupyter_kernel_test\n\n\nclass MyKernelTests(jupyter_kernel_test.KernelTests):\n # Required --------------------------------------\n\n # The name identifying an installed kernel to run the tests against\n kernel_name = \"common-lisp\"\n\n # language_info.name in a kernel_info_reply should match this\n language_name = \"common-lisp\"\n\n # the normal file extension (including the leading dot) for this language\n # checked against language_info.file_extension in kernel_info_reply\n file_extension = \".lisp\"\n\n # Optional --------------------------------------\n\n # Code in the kernel's language to write \"hello, world\" to stdout\n code_hello_world = '(format t \"hello, world\")'\n\n # code which should cause (any) text to be written to STDERR\n code_stderr = '(format *error-output* \"test\")'\n\n # samples for testing code-completeness (used by console only)\n # these samples should respectively be unambigiously complete statements\n # (which should be executed on ), incomplete statements or code\n # which should be identified as invalid\n complete_code_samples = [\n '\\'x',\n '(+ 1 2)'\n ]\n incomplete_code_samples = [\n '(+ 1'\n ]\n invalid_code_samples = [\n # 'foo(;',\n ]\n\n # Pager: code that should display something (anything) in the pager\n # code_page_something = \"??erfc\"\n\n # code which should generate a (user-level) error in the kernel, and send\n # a traceback to the client\n # code_generate_error = '(/ 1 0)'\n\n # Samples of code which generate a result value (ie, some text\n # displayed as Out[n])\n code_execute_result = [\n {'code': '(+ 1 2)', 'result': '3'}\n ]\n\n # Samples of code which should generate a rich display output, and\n # the expected MIME type\n code_display_data = [{ # plot2d tests\n 'code': '(jupyter:markdown \"wibble\" t)',\n 'mime': 'text/markdown'\n }]\n\n # def test_maxima_latex(self):\n # reply, output_msgs = self.execute_helper(code='solve(x^2+x+1=0,x);')\n # print(reply)\n # print(output_msgs)\n # self.assertEqual(output_msgs[0]['msg_type'], 'stream')\n # self.assertEqual(output_msgs[0]['content']['name'], 'stderr')\n # self.assertEqual(output_msgs[0]['content']['text'], 'oops\\n')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"27111729","text":"\"\"\"Article model\n\"\"\"\n\nclass Article:\n \"\"\"This class represents Pikabu article model\n \"\"\"\n __slots__ = [\"date\", \"title\", \"text\", \"author\", \"tags\", \"rating\", \"n_comments\", \"n_views\"]\n\n def __init__(self, date, title, text, author, tags, rating, n_comments, n_views):\n \"\"\"The constructor\n @:param date:\n @:param title:\n @:param text:\n @:param author:\n @:param tags:\n @:param rating:\n @:param n_comments:\n @:param n_views:\n \"\"\"\n self.date = date\n self.title = title\n self.text = text\n self.author = author\n self.tags = tags\n self.rating = rating\n self.n_comments = n_comments\n self.n_views = n_views\n\n def __str__(self):\n return str(self.date) + \"\\t\" + str(self.title) + \"\\t\" + str(self.text) + \"\\t\" + str(self.author) + \"\\t\" + str(\n self.tags) + \"\\t\" + str(self.rating) + \"\\t\" + str(self.n_comments) + \"\\t\" + str(self.n_views)\n","sub_path":"src/models/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636052386","text":"\nimport sys\nimport os\nimport math\nimport pprint as pp\nimport random\n\n\n\n\nclass sudoku:\n\t\t\n\tdef __init__(self,levl):\n\t\tself.spielfeld = [] #nach populate_fkt das vollst. generierte sudoko\n\t\tself.sektor = []\n\t\tself.panels = []\n\t\tself.array2d = []\n\t\tself.game=[]\t\t#feld mit abgedeckten panels (0)\n\t\tself.lsg=[]\t\t\t#die 'fehlenden' panels, game + lsg = spielfeld\n\t\tself.lvl = levl\n\t\tself.game2d = []\n\t\t\n\t\tself.new_game()\n\t\t\n\tdef new_game(self):\n\t\tself.init_playground(None)\n\t\tself.init_sektoren()\n\t\tself.populate_playground()\n\t\tself.abdecken()\n\t\tself.array_zeichnen()\n\t\tself.array_zeichnen2()\n\t\t\n\n\tdef populate_playground(self):\t\n\t\tmstack = []\n\t\tfor ts in range(1,10,1):\n\t\t\tmstack.append(ts)\n\t\twhile len(mstack) > 0:\n\t\t\tziffer = mstack.pop(0)\n\t\t\tbkt = False\n\t\t\tfor sk in self.get_sektorliste():\n\t\t\t\tfound = False\n\t\t\t\tif bkt is False:\n\t\t\t\t\ttries = 0\n\t\t\t\t\twhile found is False and tries < 30:\n\t\t\t\t\t\tkeylist = []\n\t\t\t\t\t\tfor a in range(8):\n\t\t\t\t\t\t\tkeylist.append(a)\n\t\t\t\t\t\trestliche_keys = len(keylist)\n\t\t\t\t\t\tif restliche_keys>0:\n\t\t\t\t\t\t\tranix = keylist.pop(self.rdmindex(0,restliche_keys)-1)\n\t\t\t\t\t\tcand = self.get_element_from_list(sk.get_panels(),ranix)\n\t\t\t\t\t\ttries = tries + 1\n\t\t\t\t\t\tci = cand.get_ind_i()\n\t\t\t\t\t\tcj = cand.get_ind_j() \n\t\t\t\t\t\tif self.checkreihe(ci,ziffer) == False and self.checkspalte(cj,ziffer) is False and cand.get_wert()==0:\n\t\t\t\t\t\t\tself.insert_into_game(cand,sk,ziffer)\n\t\t\t\t\t\t\tfound = True\n\t\t\t\t\tif found is False:\n\t\t\t\t\t\tself.zahl_entfernen(ziffer)\n\t\t\t\t\t\tif ziffer>1:\n\t\t\t\t\t\t\tself.zahl_entfernen(ziffer-1)\n\t\t\t\t\t\tmstack.insert(0,ziffer)\n\t\t\t\t\t\tif ziffer>1:\n\t\t\t\t\t\t\tmstack.insert(0,ziffer-1)\n\t\t\t\t\t\tbkt = True\n\t\t\t\t\t\n\tdef array_zeichnen(self):\n\t\tfeld = self.get_spielfeld()\n\t\t\n\t\tfeldarray=[[0 for i in range(9)]for j in range(9)]\n\t\tfor ses in self.get_sektorliste():\n\t\t\tfor pans in ses.get_panels():\n\t\t\t\tii = pans.get_ind_i()\n\t\t\t\tjj = pans.get_ind_j()\n\t\t\t\tww = pans.get_wert()\n\t\t\t\tfeldarray[ii][jj]=ww\n\t\tself.array2d = feldarray\n\t\treturn feldarray\n\t\n\t\n\tdef array_zeichnen2(self):\n\t\t\n\t\tref = self.get_game()\n\t\tfarr=[[0 for i in range(9)]for j in range(9)]\n\t\t\n\t\ti3=0\n\t\t\n\t\tfor se2 in ref:\n\t\t\tj3=0\n\t\t\tfor pa2 in se2:\n\t\t\t\tfarr[i3][j3]=pa2\t\n\t\t\t\t\n\t\t\t\t#pp.pprint(pa2)\n\t\t\t\t#print(i3,j3)\n\t\t\t\t#print(\"\")\n\t\t\t\t\n\t\t\t\tj3 = j3 + 1\n\t\t\ti3 = i3 + 1\n\t\t\n\t\t\n\t\tself.game2d=farr\n\t\t\n\t\n\t\n\tdef abdecken(self):\n\t\tif self.get_lvl() is None:\n\t\t\tlvl = 17\n\t\telse:\n\t\t\tlvl = self.get_lvl()\n\t\tslist = feldarray=[[0 for i in range(9)]for j in range(9)]\n\t\tcounter = 81 - lvl\n\t\tarr = self.array_zeichnen()\n\t\twhile counter>0:\n\t\t\ti1 = self.rdmindex(0,8)\n\t\t\tj1 = self.rdmindex(0,8)\n\t\t\tcand = arr[i1][j1]\n\t\t\tif cand != 0:\n\t\t\t\tslist[i1][j1]=arr[i1][j1]\n\t\t\t\tarr[i1][j1] = 0\n\t\t\t\tcounter = counter - 1\n\t\tself.lsg = arr\n\t\tself.game = slist\n\t\n\tdef zahl_entfernen(self,rm):\n\t\tfeld = self.get_sektorliste()\n\t\tfor se in feld:\n\t\t\tfor pans in se.get_panels():\n\t\t\t\tif pans.get_wert()==rm:\n\t\t\t\t\tpa_i = pans.get_ind_i()\n\t\t\t\t\tpa_j = pans.get_ind_j()\n\t\t\t\t\tpans.set_wert(0)\n\t\t\t\t\tself.spielfeld[pa_i][pa_j]=0\n\t\t\n\tdef rdmindex(self,start,stop):\n\t\tstop=stop+1\n\t\tzf = random.randrange(start,stop,1)\n\t\treturn zf\n\t\t\n\tdef init_sektoren(self):\n\t\tsek1=sektor(1)\n\t\tsek2=sektor(2)\n\t\tsek3=sektor(3)\n\t\tsek4=sektor(4)\n\t\tsek5=sektor(5)\n\t\tsek6=sektor(6)\n\t\tsek7=sektor(7)\n\t\tsek8=sektor(8)\n\t\tsek9=sektor(9)\n\t\t\n\t\tfor pan in self.get_panels():\n\t\t\tsektornr = self.findsektor(pan.get_ind_i(),pan.get_ind_j())\n\t\t\tif sektornr == 1:\n\t\t\t\tsek1.insert_panel(pan)\n\t\t\tif sektornr == 2:\n\t\t\t\tsek2.insert_panel(pan)\n\t\t\tif sektornr == 3:\n\t\t\t\tsek3.insert_panel(pan)\n\t\t\tif sektornr == 4:\n\t\t\t\tsek4.insert_panel(pan)\n\t\t\tif sektornr == 5:\n\t\t\t\tsek5.insert_panel(pan)\n\t\t\tif sektornr == 6:\n\t\t\t\tsek6.insert_panel(pan)\n\t\t\tif sektornr == 7:\n\t\t\t\tsek7.insert_panel(pan)\n\t\t\tif sektornr == 8:\n\t\t\t\tsek8.insert_panel(pan)\n\t\t\tif sektornr == 9:\n\t\t\t\tsek9.insert_panel(pan)\n\t\t\tlisttmp = [sek1,sek2,sek3,sek4,sek5,sek6,sek7,sek8,sek9]\n\t\t\tself.sektor = listtmp\n\n\t\n\t\t\n\tdef init_playground(self,seitenlaenge):\n\t\tsize = []\n\t\tif seitenlaenge is not None:\n\t\t\tsize = seitenlaenge\n\t\telse:\n\t\t\tsize = 9\n\t\tfeld = []\n\t\tii,jj = 0,0\n\t\tfeld = [[0 for jj in range(size)] for ii in range(size)]\n\t\tpanellist = []\n\t\tfor j1 in range(9):\n\t\t\tfor i1 in range(9):\n\t\t\t\ttmpanel = su_panel(0,i1,j1)\n\t\t\t\tpanellist.append(tmpanel)\n\t\tself.panels = panellist\n\t\tself.init_sektoren()\n\t\tself.spielfeld = feld\n\n\tdef insert_into_game(self,apanel,sektr,ziffer):\n\t\tfor pa in sektr.get_panels():\n\t\t\tif pa.get_ind_i() == apanel.get_ind_i()\tand pa.get_ind_i() == apanel.get_ind_i() and pa.get_wert()==0:\n\t\t\t\tapanel.set_wert(ziffer)\n\t\t\t\tpa = apanel\n\t\t\t\tself.spielfeld[pa.get_ind_i()][pa.get_ind_j()] = ziffer \n\n\tdef checkspalte(self,index2,can):\n\t\tfeld2 = []\n\t\tfor ii in range(8):\n\t\t\tfeld2.append(self.spielfeld[ii][index2])\n\t\tb2 = False\n\t\tfor zahl2 in feld2:\n\t\t\tif zahl2==can:\n\t\t\t\tb2=True\n\t\treturn b2\n\t\t\n\t\t\n\t\t\t\n\tdef checkreihe(self,index1,can):\n\t\tfeld = []\n\t\tfor jj in range(8):\n\t\t\tfeld.append(self.spielfeld[index1][jj])\n\t\tb = False\n\t\tfor zahl in feld:\n\t\t\tif zahl==can:\n\t\t\t\tb=True\n\t\treturn b\n\t\n\t\n\t\n\t\t\n\tdef get_element_from_list(self,aliste,indx):\n\t\tcpl = aliste \n\t\ttarget = None\n\t\tif indx 1: \n\t\t\t\ttarget=cpl.pop(indx)\n\t\t\t\tcpl.insert(indx+1,target)\n\t\t\tif len(aliste) == 1:\n\t\t\t\ttarget=cpl.pop()\n\t\t\t\tcpl.append(indx)\n\t\treturn target\n\n\tdef change_diffc(self,nd):\n\t\tself.lvl = nd\n\t\tself.abdecken()\n\t\t\n\tdef findsektor(self,i,j):\n\t\tsek = 0\n\t\tif i < 3:\n\t\t\tif j < 3: \n\t\t\t\tsek = 1\n\t\t\tif j > 2 and j < 6:\n\t\t\t\tsek = 4\n\t\t\tif j > 5 :\n\t\t\t\tsek = 7\n\t\tif i > 2 and i < 6:\n\t\t\tif j < 3: \n\t\t\t\tsek = 2\n\t\t\tif j > 2 and j < 6:\n\t\t\t\tsek = 5\n\t\t\tif j > 5 :\n\t\t\t\tsek = 8\n\t\tif i > 5:\n\t\t\tif j < 3: \n\t\t\t\tsek = 3\n\t\t\tif j > 2 and j < 6:\n\t\t\t\tsek = 6\n\t\t\tif j > 5 :\n\t\t\t\tsek = 9\n\t\treturn sek\n\t\n\tdef ausgabe(self):\n\t\tprint(\"\\n\\nvollständiges sudoku:\")\n\t\tpp.pprint(self.get_spielfeld())\n\t\tprint(\"\\nspiel (mit {} abgedeckten panels):\".format(self.get_lvl()))\n\t\tpp.pprint(self.get_game())\n\t\tprint(\"\\nloeser:\")\n\t\tpp.pprint(self.get_lsg())\n\t\tprint(\"\\n\\n\")\n\tdef get_path_txt(self):\n\t\treturn self.path\n\tdef get_spielfeld(self):\n\t\treturn self.spielfeld\n\tdef get_sektorliste(self):\n\t\treturn self.sektor\n\tdef get_panels(self):\n\t\treturn self.panels\n\tdef get_lsg(self):\n\t\treturn self.lsg\n\tdef get_game(self):\n\t\treturn self.game\n\tdef get_array2d(self):\n\t\treturn self.array2d\t\n\tdef get_path_txt(self):\n\t\treturn self.path\n\tdef get_lvl(self):\n\t\treturn self.lvl\n\tdef get_game2d(self):\n\t\treturn self.game2d\n\n\n\n#3x3 untersudoku\nclass sektor:\n\t\n\tdef __init__(self,nr):\n\t\tself.nummer = nr\n\t\tself.panels = []\n\tdef insert_panel(self,panel):\n\t\tself.panels.append(panel)\n\tdef insert_panelliste(self,panellist):\n\t\t\tl1 = panellist\n\t\t\tfor ps in l1:\n\t\t\t\tself.insert_panel(ps)\n\tdef get_panels(self):\n\t\treturn self.panels\n\tdef get_sektorennr(self):\n\t\treturn self.nummer\n\tdef sektostr(self):\n\t\ts =\"\\nSektor: \"+ str(self.get_sektorennr())\n\t\tfor kk in self.get_panels():\n\t\t\ts = s +\"\\n\"+ kk.paneltostr()\n\t\treturn s\n\n#einzelne zelle\nclass su_panel:\n\t\n\tdef __init__(self,wert,i,j):\n\t\tself.wert = wert\n\t\tself.i=i\n\t\tself.j=j\n\tdef get_wert(self):\n\t\treturn self.wert \n\tdef get_ind_i(self):\n\t\treturn self.i\n\tdef get_ind_j(self):\n\t\treturn self.j\n\tdef set_wert(self,nval):\n\t\tself.wert = nval\t\n\tdef paneltostr(self):\n\t\ts = \"Wert: \"+str(self.get_wert())+\" i: \"+str(self.get_ind_i())+\" j: \"+str(self.get_ind_j())\n\t\treturn s\n\t\t\n","sub_path":"app/pydoku_site/pydoku_app/pydoku_classes.py","file_name":"pydoku_classes.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217310230","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Linear_Spline(object):\n\n\tdef __init__(self, X, Y):\n\t\tself.X_indata = X\n\t\tself.Y_indata = Y\n\t\tself.n = len(X)\n\n\t\tself.generate_lagrange_polynomials()\n\n\n\tdef generate_lagrange_polynomials(self):\n\n\t\tself.lagrange_polynomials = []\n\t\tself.lagrange_resolution = 0.01\n\n\t\tfor index_i in xrange(0, self.n-1):\n\t\t\t# Generate X values for Lagrange Polynomial over current interval\n\t\t\tX_Lagrange_Polynomial = np.arange(self.X_indata[index_i], self.X_indata[index_i+1], self.lagrange_resolution)\n\n\t\t\t# Instantiate Y array for Lagrange Polynomial values over current interval\n\t\t\tY_Lagrange_Polynomial = np.zeros(len(X_Lagrange_Polynomial))\n\n\t\t\t# Calculate current interval's linear Lagrange Function\n\t\t\tfor x_index, x in enumerate(X_Lagrange_Polynomial):\n\t\t\t\tfirst_point_term = self.Y_indata[index_i] * (x - self.X_indata[index_i+1]) / (self.X_indata[index_i] - self.X_indata[index_i+1])\n\t\t\t\tsecond_point_term = self.Y_indata[index_i+1] * (x - self.X_indata[index_i]) / (self.X_indata[index_i+1] - self.X_indata[index_i])\n\n\t\t\t\tY_Lagrange_Polynomial[x_index] = first_point_term + second_point_term\n\n\t\t\tself.lagrange_polynomials.append((X_Lagrange_Polynomial, Y_Lagrange_Polynomial))\n\n\n\tdef plot(self, intervals=None):\n\n\t\tfor index, lagrange_function in enumerate(self.lagrange_polynomials):\n\t\t\t\n\t\t\tif intervals is None:\n\t\t\t\tx = lagrange_function[0]\n\t\t\t\ty = lagrange_function[1]\n\t\t\t\tplt.plot(x, y, 'b--')\n\n\t\t\telse:\n\t\t\t\tif index in intervals:\n\t\t\t\t\tx = lagrange_function[0]\n\t\t\t\t\ty = lagrange_function[1]\n\t\t\t\t\tplt.plot(x, y, 'b--')\n\n\n\tdef interpolate(self, interpolate_x):\n\t\tfor index, lagrange_function in enumerate(self.lagrange_polynomials):\n\t\t\tX = lagrange_function[0]\n\n\t\t\tif interpolate_x >= min(X) and interpolate_x <= max(X):\n\t\t\t\tY = lagrange_function[1]\n\n\t\t\t\tfor x_index, x in enumerate(X):\n\t\t\t\t\n\t\t\t\t\tif np.abs(interpolate_x - x) < self.lagrange_resolution*1e-1:\n\t\t\t\t\t\tself.interpolated_value = Y[x_index]\n\t\t\t\t\t\treturn Y[x_index]\n\t\t\t\t\t\n\t\t\t\treturn False\n","sub_path":"modules/linear_spline.py","file_name":"linear_spline.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596738288","text":"\"\"\"MyForum URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom myforumpostapp.views import *\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', IndexPage.as_view()),\n url(r'^registrationpage$', RegistrationPage.as_view()),\n url(r'^loginpage$', LoginPage.as_view()),\n url(r'^knockmade$', KnockMade.as_view()),\n url(r'^isknocking$', isKnocking.as_view()),\n url(r'^failed-login$', FailedLogin.as_view()),\n url(r'^failed-register$', FailedRegister.as_view()),\n url(r'^logout-success$', LogoutSuccess.as_view()),\n url(r'^help$', HelpPage.as_view()),\n url(r'^homepage$', HomePage.as_view()),\n url(r'^scores$', Scores.as_view()),\n]\n\n'127.0.0.1:8000/'","sub_path":"newestforum/MyForumpreapp/MyForumpreapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"621180537","text":"import numpy as np\nimport os\n\n\ndir_path = os.path.dirname(os.path.realpath(__file__)) #current directory\n####################################################\n#ML part\nfrom sklearn.preprocessing import normalize\nfrom sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV, train_test_split\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score\nf=open(dir_path+'/rohit-test.txt','w')\n\nX=np.load(dir_path+'/data/'+'Subject1'+'_'+'VC'+'_'+'fmri'+'.npy')\ndatatype=np.load(dir_path+'/data/'+'Subject1'+'_'+'datatype'+'.npy')\nX=X.reshape(X.shape[0],X.shape[1],X.shape[2],X.shape[3],1)\n\ninput_shape=X.shape\n\ny=[]\nfor j in range(len(datatype)):\n if datatype[j]==3:\n y.append(1)\n else:\n y.append(0)\ny=np.array(y)\n\nmodel = DummyClassifier(strategy=\"most_frequent\")\nmodel.fit(X, y)\ny_pred = model.predict(X)\naccuracy1 = accuracy_score(y, y_pred)\nprint('Base Accuracy',accuracy1,file=f)\n\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.pipeline import Pipeline\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2)\nover = SMOTE()\nunder = RandomUnderSampler()\nsteps = [('o', over), ('u', under)]\npipeline = Pipeline(steps=steps)\n# transform the dataset\nprint(\"Before SMOLE\",X_train.shape)\nX_train=X_train.reshape(X_train.shape[0],-1)\nX_train, y_train = pipeline.fit_resample(X_train, y_train)\nX_train = X_train.reshape((-1,input_shape[1],input_shape[2],input_shape[3],input_shape[4]))\nprint(\"After SMOLE\",X_train.shape)\n\nfrom sklearn.preprocessing import OneHotEncoder\nohe=OneHotEncoder()\ny_train=ohe.fit_transform(y_train.reshape(-1,1)).toarray()\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv3D, Dropout, Flatten, MaxPooling3D\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nfrom tensorflow.keras.backend import clear_session\n\n\ndef create_model():\n model = Sequential()\n model.add(Conv3D(8,3,activation='relu',input_shape=input_shape[1:]))\n model.add(Conv3D(16,2,activation='relu'))\n model.add(MaxPooling3D(pool_size = (2, 2, 2)))\n model.add(Dropout(0.6))\n model.add(Flatten())\n model.add(Dense(128, activation = 'relu'))\n model.add(Dropout(0.9))\n model.add(Dense(2, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\nmodel = create_model()\n\nmodel.fit(X_train,y_train,epochs=1)\ny_pred = model.predict(X_test)\n\n#Converting predictions to label\npred = list()\nfor i in range(len(y_pred)):\n pred.append(np.argmax(y_pred[i]))\n\nprint(confusion_matrix(y_true=y_test, y_pred=pred),file=f)\nprint(\"accuracy\", accuracy_score(y_test, pred),file=f)\nprint(\"precision\", precision_score(y_test, pred,average='micro'),file=f)\nf.close()\n\n# cv = RepeatedStratifiedKFold(n_splits=8, n_repeats=10, random_state=2)\n# parameters = {'epochs':[10,20,30]\n# }\n# clf = GridSearchCV(model, parameters,cv=cv,n_jobs=4)\n# clf.fit(X,y)\n# print('Accuracy: ', clf.best_score_,file=f)\n# print('Best Parameters: ', clf.best_params_,file=f)\n","sub_path":"rohit-cnn.py","file_name":"rohit-cnn.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"213225601","text":"# 1567. Maximum Length of Subarray With Positive Product\n# vwc 204\n# 2021/10/27\n#\n# Runtime: 628 ms, faster than 77.55% of Python3 online submissions for Maximum Length of Subarray With Positive Product.\n# Memory Usage: 28.2 MB, less than 43.83% of Python3 online submissions for Maximum Length of Subarray With Positive Product.\n\n# 动态规划\n# even[i]:以i为结点的有偶数个负数的最长子数组的长度\n# odd[i]:以i为结点的有奇数个负数的最长子数组的长度\n# 官方提示似乎更加简单\n\nclass Solution:\n def getMaxLen(self, nums: List[int]) -> int:\n n = len(nums)\n even, odd = [0] * (1 + n), [0] * (1 + n)\n for i in range(n):\n if nums[i] > 0:\n even[i + 1] = even[i] + 1\n odd[i + 1] = odd[i] + 1 if odd[i] > 0 else 0\n elif nums[i] < 0:\n even[i + 1] = odd[i] + 1 if odd[i] > 0 else 0\n odd[i + 1] = even[i] + 1\n return max(even)\n\n","sub_path":"1567. Maximum Length of Subarray With Positive Product.py","file_name":"1567. Maximum Length of Subarray With Positive Product.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"292759256","text":"def naivestringsearch(st1,st2):\n\tst1l = list(st1)\n\tst2l = list(st2)\n\tn = 0\n\t\n\tcountmatch = 0\n\n\twhile n < len(st1l):\n\t\tif st1l[n] == st2l[0]:\n\t\t\tz = n\n\t\t\tm = 0\n\t\t\tcount = 0\n\n\t\t\twhile m < len(st2l) and z <= len(st1l)-1:\n\t\t\t\tif st1l[z] != st2l[m]:\n\t\t\t\t\tbreak\t\n\t\t\t\tz += 1\n\t\t\t\tm += 1\n\t\t\t\tcount += 1\n\t\t\t\n\t\t\tif count == len(st2l):\n\t\t\t\tcountmatch +=1\n\t\tn+=1\n\n\treturn(countmatch)\nprint(naivestringsearch('tacotacostaco', 'tacos'))","sub_path":"Search/naivestringsearch.py","file_name":"naivestringsearch.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68805631","text":"import pygame\r\nfrom comet import Comet\r\n\r\n#créer une classe pour gérer cet évenement a intervalle régulière\r\nclass CometFallEvent:\r\n\r\n #lors du chargement -> créer un compteur\r\n def __init__(self, game):\r\n self.percent = 0\r\n self.percent_speed = 5\r\n self.game = game\r\n self.fall_mode = False\r\n\r\n #définir un groupe de sprite pour stocker nos comètes\r\n self.all_comets = pygame.sprite.Group()\r\n\r\n def add_percent(self):\r\n self.percent += self.percent_speed / 100\r\n\r\n def is_full_loaded(self):\r\n return self.percent >= 100\r\n\r\n def reset_percent(self):\r\n self.percent = 0\r\n\r\n def meteor_fall(self):\r\n #boucle pour les valeurs entre 1 et 10\r\n for i in range(1, 10):\r\n #apparaitre une première boule de feu\r\n self.all_comets.add(Comet(self))\r\n\r\n def attempt_fall(self):\r\n #la jauge d'evenement es totalement charger\r\n if self.is_full_loaded() and len(self.game.all_monsters) == 0:\r\n print(\"Pluie de Comètes !! ATTENTION !!\")\r\n self.meteor_fall()\r\n self.fall_mode = True #activer l'évenement\r\n\r\n def update_bar(self, surface):\r\n\r\n #ajouter du pourcentage a la barre\r\n self.add_percent()\r\n\r\n #barre noire en arrière plan\r\n pygame.draw.rect(surface, (0, 0, 0), [\r\n 0, #l'axe des x\r\n surface.get_height() - 22, #l'axe des y\r\n surface.get_width(), #longueur de la fenêtre\r\n 10, #l'épaisseur de la barre\r\n ])\r\n #barre rouge correspond a la jauge d'évenenement\r\n pygame.draw.rect(surface, (187, 11, 11), [\r\n 0, # l'axe des x\r\n surface.get_height() - 22, # l'axe des y\r\n surface.get_width() / 100 * self.percent, # longueur de la fenêtre\r\n 10, # l'épaisseur de la barre\r\n ])\r\n","sub_path":"Python_Game/comet_event.py","file_name":"comet_event.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"492953261","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Eltex.TAU.get_chassis_id\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2018 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetchassisid import IGetChassisID\nfrom noc.core.mac import MAC\n\n\nclass Script(BaseScript):\n name = \"Eltex.TAU.get_chassis_id\"\n cache = True\n interface = IGetChassisID\n\n def execute(self):\n # tau8\n mac = self.cli(\"cat /tmp/board_mac\").strip()\n if \"No such file or directory\" in mac:\n # tau32\n m = self.cli(\"cat /tmp/factory |grep MAC\")\n mac = m.split(\":\", 1)[1].strip()\n return {\"first_chassis_mac\": MAC(mac), \"last_chassis_mac\": MAC(mac)}","sub_path":"sa/profiles/RDP/EcoNAT/get_chassis_id.py","file_name":"get_chassis_id.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224190732","text":"import datetime\nimport os\nimport random\nimport string\n\nfrom flask import current_app\nfrom google.cloud import storage\nfrom werkzeug.exceptions import BadRequest\nfrom werkzeug.utils import secure_filename\n\n\ndef _check_extension(filename, allowed_extensions):\n \"\"\"\n Checks whether a given filename has one of the allowed extensions.\n \"\"\"\n file, ext = os.path.splitext(filename)\n if (ext.replace('.', '') not in allowed_extensions):\n raise BadRequest(\n f'invalid extension, supported extensions {allowed_extensions}'\n )\n\n\ndef _safe_filename(id, filename):\n \"\"\"\n Generates a safe filename. Converts a filename into a\n unique indentifier.\n\n ``filename.ext``\n becomes\n ``filename-epochseconds.epochmiliseconds-random10lowercasechars-userid.ext``\n \"\"\"\n\n filename = secure_filename(filename)\n now = datetime.datetime.utcnow()\n timestamp = datetime.datetime.timestamp(now)\n rdstr = ''.join(random.choice(string.ascii_lowercase) for x in range(10))\n basename, extension = filename.rsplit('.', 1)\n return f'{basename}-{timestamp}-{rdstr}-{secure_filename(id)}.{extension}'\n\n\ndef upload_file(id, stream, name, ctype):\n \"\"\"\n Uploads a file into the bucket after\n giving it an unique identifier.\n\n Returns the unique identifier.\n \"\"\"\n _check_extension(name, current_app.config['ALLOWED_EXTENSIONS'])\n filename = _safe_filename(id, name)\n client = storage.Client()\n bucket = client.bucket(current_app.config['BUCKET_NAME'])\n blob = bucket.blob(filename)\n blob.upload_from_string(stream, content_type=ctype)\n return filename\n\n","sub_path":"frontend/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"333638771","text":"from urllib import request\n\nclass MovieTop(object):\n def __init__(self):\n self.start=0;\n self.param='&filter=';\n self.headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1;WOW64)'};\n def get_page(self):\n page_content=[];\n try:\n while self.start<255:\n url='https://movie.douban.com/top250?start='+str(self.start);\n req=request.Request(url,headers=self.headers)\n response=request.urlopen(req)\n page=response.read().decode('utf-8')\n page_num=(self.start+25)//25\n print('正在抓取第'+str(page_num)+'页数据')\n self.start+=25\n page_content.append(page)\n print(page_content)\n return page_content\n except request.URLError as URLerr:\n if hasattr(URLerr,'reason'):\n print('抓取失败:',URLerr.reason)\n def main(self):\n print('begin')\n self.get_page()\n print('end')\n\na=MovieTop()\na.main()\n\n\n","sub_path":"python_test.py","file_name":"python_test.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"309187060","text":"import urllib.request\nimport re\nimport sys\nfrom pprint import pprint\nimport time\nimport gzip\n\n'''this module get the news href and news title from main news page'''\n'''also it is a test file'''\n\nclass Test:\n\t# 这个输出文件是news主页的html代码\n\tURL = 'http://news.sina.com.cn/'\n\tdef test(self,fileName):\n\t\tresponse = urllib.request.urlopen(Test.URL)\n\t\tcontent = (gzip.decompress(response.read())).decode('gbk')\t\t\n\t\twith open(fileName,\"w\") as fw:\n\t\t\tfw.write(content)\n\nclass Test2:\n\t# 这个输出文件是我提取出的href和title\n\tdef __init__(self,fileName):\n\t\twith open(fileName,\"r\") as fr:\n\t\t\tself.content = fr.read()\n\tdef getHref(self):\n\t\tpattern = '(.*?)'\n\t\threfs = re.findall(pattern,self.content)\n\t\t\n\t\twith open(\"test2output2.out\",\"w\") as fw:\n\t\t\tfor href,title in hrefs:\n\t\t\t\tfw.write(href+\"\\n\"+title+\"\\n\\n\")\n\t\tprint(len(hrefs))\n\n\n# 14岁少女没钱上网 男友教唆其色诱抢劫\n\ndef main(fileName):\n\tobj = Test()\n\tobj.test(fileName)\n\n\ndef main_1(fileName):\n\tobj = Test2(fileName)\n\tobj.getHref() \n\nif __name__==\"__main__\":\n\t# main(sys.argv[1])\n\tmain_1(sys.argv[1])","sub_path":"sinaWebSpider/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70712016","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nProject smoketest\nAuthor zhenghongguang\nDate 2020-02-29\n\"\"\"\n\nimport unittest\nimport os\nimport sys\nimport json\nimport csv\nimport time\nimport warnings\nfrom loguru import logger\nimport jsonschema\nimport ddt\nsys.path.append(os.getcwd())\nfrom tm.consoleapi import ConsoleAPI\n\n\ndef jobcsv():\n \"\"\"\n 读取csv文件,DictReader\n :return:\n \"\"\"\n logger.info(\"get job.csv\")\n jobs = []\n csvfiles = os.getenv(\"csvfiles\")\n csv.field_size_limit(1024 * 1024 * 10)\n logger.info(csvfiles)\n if csvfiles is None:\n return [False, False]\n for file in csvfiles.split(\",\"):\n file_reader = open(\"./datainput/tm/\"+file)\n reader = csv.reader(file_reader)\n fieldnames = next(reader)\n csvfile = csv.DictReader(file_reader, fieldnames=fieldnames)\n #next(csvfile)\n for row in csvfile:\n logger.info(dict(row))\n jobs.append([row.get(\"title\"), dict(row)])\n file_reader.close()\n logger.info(jobs)\n return jobs\n\n\n@ddt.ddt\nclass Job(unittest.TestCase):\n namespace = None\n @classmethod\n def setUpClass(cls) -> None:\n logger.info(\"setup\")\n warnings.simplefilter(\"ignore\", ResourceWarning)\n # cls.client = ConsoleAPI(site=conf.get(\"site\"),\n # user=conf.get(\"user\"), passwd=conf.get(\"passwd\"))\n cls.client = ConsoleAPI(site=os.getenv(\"consolesite\"),\n user=os.getenv(\"consoleuser\"), passwd=os.getenv(\"consolepasswd\"))\n cls.namespace = os.getenv(\"namespace\")\n\n def check_schema(self, resp):\n \"\"\"\n [ddt] json schema check\n :param resp:\n :return:\n \"\"\"\n freader = open('./console_api/schema/addtask.json')\n schema = json.loads(\n freader.read(os.path.getsize('./console_api/schema/addtask.json')))\n freader.close()\n try:\n jsonschema.validate(resp, schema)\n return True\n except Exception as err:\n logger.error(err)\n return str(err)\n\n def getmetaid(self, datasource):\n logger.info(datasource)\n datasource_metaid = []\n try:\n for data in datasource:\n tmp = {}\n tmp[\"varName\"] = data.get(\"varName\")\n if \"${namespace}\" in data.get(\"dsId\"):\n ds = data.get(\"dsId\").replace(\"${namespace}\", self.namespace)\n else:\n ds = data.get(\"dsId\")\n logger.info(ds)\n logger.info(data)\n response = self.client.query_metadata_byname(query=\"dsName=%s&dataSetName=%s&key=%s\"\n % (ds, data.get(\"dataSet\"), data.get(\"key\")))\n logger.info(response)\n tmp[\"metaId\"] = response.get(\"data\").get(\"metaId\")\n datasource_metaid.append(tmp)\n except Exception as err:\n logger.error(err)\n return False\n return datasource_metaid\n\n def getdsid(self, dsname):\n logger.info(\"get dsid\")\n if \"${namespace}\" in dsname:\n dsname=dsname.replace(\"${namespace}\", self.namespace)\n page = 0\n try:\n while 1:\n response = self.client.query_ds(query=\"page=%d\" % page)\n for ds in response.get(\"data\").get(\"data\"):\n if ds.get(\"name\") == dsname:\n return ds.get(\"dsId\")\n if page == response.get(\"data\").get(\"totalPages\"):\n return False\n page = page + 1\n except Exception as err:\n logger.info(err)\n\n def jobdata_json(self, jsonfile):\n fr = open(\"./datainput/tm/jobjson/\" + jsonfile)\n data = json.load(fr)\n fr.close()\n logger.info(data)\n body = {\n \"name\": data.get(\"name\"),\n \"code\": data.get(\"code\"),\n \"taskResultVOList\": data.get(\"taskResultVOList\")\n }\n datasource = self.getmetaid(data.get(\"taskDataSourceVOList\"))\n\n for result in data.get(\"taskResultVOList\"):\n if \"${namespace}\" in result.get(\"resultDest\"):\n result[\"resultDest\"] = result.get(\"resultDest\").replace(\"${namespace}\",\n self.namespace)\n body[\"taskDataSourceVOList\"] = datasource\n return body\n\n def jobdata_dict(self, data):\n datasource_metaid = self.getmetaid(data.get(\"datasource\"))\n logger.info(datasource_metaid)\n self.assertIsInstance(datasource_metaid, list, msg=\"get metaid failed\")\n result_dict = data.get(\"result\")\n for tmpres in result_dict:\n dsid = self.getdsid(tmpres.get(\"resultDest\"))\n self.assertIsInstance(dsid, str, msg=\"get dsid failed\")\n tmpres[\"resultDest\"] = dsid\n logger.info(result_dict)\n\n jobbody = {\n \"name\": data.get(\"title\"),\n \"taskDataSourceVOList\": datasource_metaid,\n \"taskResultVOList\": result_dict,\n \"code\": data.get(\"code\")\n }\n return jobbody\n\n @ddt.data(*jobcsv())\n @ddt.unpack\n def test_jobrun(self, title, data):\n \"\"\"\n [ddt] ddt驱动任务测试\n :param data\n :return:\n \"\"\"\n logger.info(data)\n logger.info(\"start job timeout=%d\" % int(data.get(\"timeout\")))\n #logger.info(title)\n\n if data.get(\"json\"):\n jobbody = self.jobdata_json(jsonfile=data.get(\"json\"))\n else:\n jobbody = self.jobdata_dict(data=data)\n logger.info(jobbody)\n response = self.client.add_task(job_data=json.dumps(jobbody))\n logger.info(response)\n if isinstance(self.check_schema(response), str):\n self.assertTrue(False, msg=\"jsonschema check failed\")\n taskid = response.get(\"data\").get(\"id\")\n response = self.client.start_task(jobid=taskid)\n logger.info(response)\n self.assertEqual(response.get(\"code\"), 0, msg=\"start job failed\")\n timecount = 0\n while timecount < int(data.get(\"timeout\")):\n response = self.client.get_task(taskid=taskid)\n logger.info(response)\n if response.get(\"data\").get(\"queueStatus\") < 6:\n time.sleep(10)\n timecount = timecount + 30\n continue\n else:\n self.assertEqual(response.get(\"data\").get(\"queueStatus\"), 6,\n msg=\"expect task success\")\n break\n #response = self.client.delete_task(jobid=taskid)\n #logger.info(response)\n self.assertEqual(response.get(\"code\"), 0, msg=\"delete job expect code = 0\")\n\n\"\"\"\nif __name__ == '__main__':\n os.environ[\"csvfiles\"] = \"heartbeat_ali_metaid.csv\"\n os.environ[\"consolesite\"] = \"console-dev.tsingj.com\"\n os.environ[\"consoleuser\"] = \"heartbeat\"\n os.environ[\"consolepasswd\"] = \"qwer1234\"\n print('hhhhhhh\\n')\n unittest.main()\n\"\"\"\n","sub_path":"job/console_job.py","file_name":"console_job.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368892608","text":"def check(board, row, column, n):\n for i in range(n):\n if board[row][i] == 1:\n return False\n\n for i in range(n):\n if board[i][column] == 1:\n return False\n\n r = row\n c = column\n while r < n and c < n:\n if board[r][c] == 1:\n return False\n r = r + 1\n c = c + 1\n\n r = row\n c = column\n while r >= 0 and c >= 0:\n if board[r][c] == 1:\n return False\n r = r - 1\n c = c - 1\n\n r = row\n c = column\n while r >= 0 and c < n:\n if board[r][c] == 1:\n return False\n r = r - 1\n c = c + 1\n\n r = row\n c = column\n while r < n and c >= 0:\n if board[r][c] == 1:\n return False\n r = r + 1\n c = c - 1\n\n return True\n\n\ndef solve(board, column, n):\n if column >= n:\n return True\n\n for i in range(n):\n if check(board, i, column, n):\n board[i][column] = 1\n if solve(board, column + 1, n):\n return True\n board[i][column] = 0\n return False\n\n\ndef backtracking(n):\n board = [0] * n\n for i in range(n):\n board[i] = [0] * n\n\n solve(board, 0, n)\n return board\n\n# End of Backtracking algorithm\n","sub_path":"backtracking.py","file_name":"backtracking.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"569058354","text":"import paho.mqtt.client as mqtt\nimport json\nimport base64\nimport requests\n\n#Please Complete te next information for API connection\nTOKEN_API_WIRIDLAB='iot-C5HcSy-bZWKwGBgOIHjsNT900n81kFrr'\nNODE_NAME_WIRIDLAB= 'SF7HELTEC'\n\n#Please complete the next information for MQTT connection\nSERVER_CHIRPSTACK='10.0.2.86'\nMQTT_CHIRPSTACK_PORT= '1883'\nAPPLICATION_ID='2'\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"application/\"+APPLICATION_ID+\"/#\")\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n x=str(msg.payload)\n y = json.loads(x)\n gateways = y['rxInfo']\n node = y['txInfo']\n appid = y['applicationID']\n appname = y['applicationName']\n devname = y['deviceName']\n data = y['data']\n d = base64.b64decode(data)\n # Decoding the bytes to string\n datad = d.decode(\"UTF-8\")\n\n\n frequ = node['frequency']\n modul = node['modulation']\n bandw = node['loRaModulationInfo']['bandwidth']\n coder = node['loRaModulationInfo']['codeRate']\n sprea = node['loRaModulationInfo']['spreadingFactor']\n grssi = y['rxInfo'][0]['rssi']\n gsnr = y['rxInfo'][0]['loRaSNR']\n gchan = y['rxInfo'][0]['channel']\n grfch = y['rxInfo'][0]['rfChain']\n gcrc = y['rxInfo'][0]['crcStatus']\n\n\n\n print(\"Enviando datos a la API.....\")\n jsonData = [{}]\n jsonData[0][\"appID\"] = appid\n jsonData[0][\"appName\"] = appname\n jsonData[0][\"devName\"] = devname\n jsonData[0][\"dataNode\"] = datad\n jsonData[0][\"infNode\"] = node\n jsonData[0][\"infGate\"] = gateways\n\n\n jsonData[0]['frequency_s'] = frequ\n jsonData[0]['modulation_s'] = modul\n jsonData[0]['bandwidth_s'] = bandw\n jsonData[0]['codeRate_s'] = coder\n jsonData[0]['spreadingFactor_s'] = sprea\n jsonData[0]['rssi_s'] = grssi\n jsonData[0]['loRaSNR_s'] = gsnr\n jsonData[0]['channel_s'] = gchan\n jsonData[0]['rfChain_s'] = grfch\n jsonData[0]['crcStatus_s'] = gcrc\n\n print (jsonData)\n\n jsonData = json.dumps(jsonData, indent=4)\n headers = {\"WIRID-LAB-AUTH-TOKEN\": TOKEN_API_WIRIDLAB, \"Content-Type\": \"application/json\"}\n info = requests.post(\"https://api.wiridlab.site/api/iot/devices/\"+ NODE_NAME_WIRIDLAB.lower() , headers=headers, data=jsonData, timeout=None)\n dataAPI = info.json()\n\n if (info.status_code == 200):\n print (\" Request API\")\n print(json.dumps(dataAPI, indent=4, sort_keys=True))\n else:\n print (\"Error sending information\")\n print(json.dumps(dataAPI, indent=4, sort_keys=True))\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(SERVER_CHIRPSTACK, MQTT_CHIRPSTACK_PORT, 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()\n","sub_path":"Heltec/example_lora_mqtt_api_chirpstack_wiridlab_HELTEC1.py","file_name":"example_lora_mqtt_api_chirpstack_wiridlab_HELTEC1.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"309249300","text":"l=[[' ',' ','O','O','O',' ',' '],\n [' ',' ','O','O','O',' ',' '],\n ['O','O','O','O','O','O','O'],\n ['O','O','O',' ','O','O','O'],\n ['O','O','O','O','O','O','O'],\n [' ',' ','O','O','O',' ',' '],\n [' ',' ','O','O','O',' ',' '],]\ncols=[0,1,2,3,4,5,6]\npoints=0\nflag=0\nexception=[[i,j] for i in [0,1,5,6] for j in [0,1,5,6]]\n\ndef board(l,cols):\n print(end=' ')\n for i in cols:\n print(i,end=' ')\n print()\n for count,i in enumerate(l,0):\n print(count,end=' ')\n for j in i:\n print(j,end=' ')\n print(count,end=' ')\n print()\n print(end=' ')\n for i in cols:\n print(i,end=' ')\n print()\n\ndef horcheck(l):\n horcount=0\n for i in range(len(l)):\n for j in range(len(l[i])):\n if str(i) in '0156':\n if l[i][j]=='O' and j=='2':\n if l[i][j+1]=='O' and l[i][j+2]==' ':\n horcount+=1\n break\n elif 2<=i<=4 and j<=4 and l[i][j]=='O' and l[i][j+1]=='O' and l[i][j+2]==' ':\n horcount+=1\n break\n\n for i in range(len(l)):\n for j in reversed(range(len([i]))):\n if str(i) in '0156':\n if l[i][j]=='O' and j=='4':\n if l[i][j-1]=='O' and l[i][j-2]==' ':\n horcount+=1\n break\n elif 2<=i<=4 and j>=2 and l[i][j]=='O' and l[i][j-1]=='O' and l[i][j-2]==' ':\n horcount+=1\n break\n return horcount\n\ndef vercheck(l):\n vercount=0\n for i in range(len(l)):\n for j in range(len(l[i])):\n if str(j) in '0156':\n if l[i][j]=='O' and i=='2':\n if l[i+1][j]=='O' and l[i+2][j]==' ':\n vercount+=1\n break\n elif 2<=j<=4 and i<=4 and l[i][j]=='O' and l[i+1][j]=='O' and l[i+2][j]==' ':\n vercount+=1\n break\n\n for i in reversed(range(len(l))):\n for j in range(len([i])):\n if str(j) in '0156':\n if l[i][j]=='O' and i=='4':\n if l[i-1][j]=='O' and l[i-2][j]==' ':\n vercount+=1\n break\n elif 2<=j<=4 and i>=2 and l[i][j]=='O' and l[i-1][j]=='O' and l[i-2][j]==' ':\n vercount+=1\n break\n return vercount\n\ndef welcome():\n print('\\nWELCOME TO THE COLOSSAL MARBLE SOLITAIRE\\n')\n board(l,cols)\n print('\\n')\n game(l,flag,points,exception)\n\ndef game(l,flag,points,exceptions):\n \n flag=flag\n points=points\n \n horizontal=horcheck(l)\n vertical=vercheck(l)\n \n while (horizontal+vertical)>=1:\n \n \n \n horizontal=horcheck(l)\n vertical=vercheck(l)\n \n if (horizontal+vertical)<1:\n print('\\nNO MORE MOVES POSSIBLE...GAME OVER\\n')\n print('\\nPOINTS = ',points)\n print()\n return\n else:\n pass\n \n #print('\\nhorizontal = ',horizontal)\n #print('\\nvertical = ',vertical)\n \n \n \n print('\\nENTER THE INITIAL POSITION OF THE MARBLE WHICH YOU WANT TO MOVE : ')\n initialPosition=list(map(int,input().split()))\n ip1=initialPosition[0]\n ip2=initialPosition[1]\n \n print('\\nENTER THE DESTINATION POSITION WHERE YOU WANT TO PLACE THE SELECTED MARBLE : ')\n destinationPostion=list(map(int,input().split()))\n dp1=destinationPostion[0]\n dp2=destinationPostion[1]\n \n if (ip1<0 or ip1>=7) or (ip2<0 or ip2>=7) or (dp1<0 or dp1>=7) or (dp2<0 or dp2>=7) or (initialPosition in exception) or (destinationPostion in exception):\n print('\\nINVALID POSITIONS...RE-ENTER THE POSITIONS\\n')\n flag=1\n break\n else:\n pass\n \n if l[ip1][ip2]==' ':\n print('\\nINVALID INITIAL POSITION...ENTER THE VALID POSITIONS\\n')\n flag=1\n break\n else:\n pass\n \n if l[dp1][dp2]!=' ':\n print('\\nINVALID DESTINATION POSITION...ENTER THE VALID POSITIONS\\n')\n flag=1\n break\n else:\n pass\n \n if ip1==dp1:\n if ip2>dp2:\n if l[dp1][dp2+1]!='O' or abs(ip2-dp2)!=2:\n print('\\nINVALID MOVE...ENTER VALID DESTINATION\\n')\n flag =1\n break\n else:\n l[dp1][dp2]='O'\n l[ip1][ip2]=' '\n l[dp1][dp2+1]=' '\n points+=1\n elif ip2dp1:\n \n if l[dp1+1][dp2]!='O' or abs(ip1-dp1)!=2:\n print('\\nINVALID MOVE...ENTER VALID DESTINATION\\n')\n flag =1\n break\n else:\n l[dp1][dp2]='O'\n l[ip1][ip2]=' '\n l[dp1+1][dp2]=' '\n points+=1\n \n elif ip1 200:\r\n msk = data.find_element_by_css_selector(\"a.msk\")\r\n print([msk.get_attribute('title'),\r\n nb, msk.get_attribute('href')])\r\n writer1.writerow([msk.get_attribute('title'),\r\n nb, msk.get_attribute('href')])\r\n\r\n print(\" The {} page has been done\".format(page_count))\r\n # 定位下一页的url\r\n url = driver.find_element_by_css_selector(\"a.zbtn.znxt\").get_attribute('href')\r\n\r\n# 解析每一页的所有歌单\r\n# for i in range(len(m_list)):\r\n# # 获取播放数\r\n# print(m_list[i])\r\n# nb = m_list[i].find_element_by_class_name(\"nb\").text\r\n# if '万' in nb and int(nb.split(\"万\")[0]) > 500:\r\n# # 获取歌单封面\r\n# msk = m_list[i].find_element_by_css_selector(\"a.msk\")\r\n# # 将歌单信息写入文件\r\n# writer2.writerow([msk.get_attribute('title'),\r\n# nb, msk.get_attribute('href')])\r\n\r\ncsv_file1.close()\r\ncsv_file2.close()\r\n","sub_path":"scrapy/netease-music.py","file_name":"netease-music.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596978876","text":"#!/usr/bin/env python\n\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import Float32MultiArray\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport csv\nimport os\nimport pandas as pd\n\n\nclass Converter:\n def __init__(self):\n self.cb = CvBridge()\n self.single_image = None\n self.index = 0\n self.encoder = None\n self.syn_encoder = []\n self.syn_encoder_diff = []\n self.data = []\n self.sub_enc = rospy.Subscriber('/sensinfo', Float32MultiArray, self.callback_enc)\n self.sub_img = rospy.Subscriber('/front_realsense/color/image_raw', Image, self.callback_img_with_syn_enc)\n\n def callback_enc(self, message):\n self.encoder = message\n print(self.encoder)\n\n def callback_img_with_syn_enc(self, message):\n # save image\n try:\n self.single_image = self.cb.imgmsg_to_cv2(message, 'bgr8')\n except CvBridgeError as e:\n print(e)\n\n self.index += 1\n cv2.imwrite('./dataset_from_original/new_file' + '/' + str(self.index) + '.jpg', self.single_image)\n\n # # save encoder corresponding to current image\n # self.syn_encoder.append(self.encoder)\n # position_x, position_y, orientation_z, orientation_w = self.value_encoder(self.encoder)\n # position_x_diff, position_y_diff, orientation_z_diff, orientation_w_diff \\\n # = self.diff_value_encoder(self.syn_encoder)\n\n with open('./dataset_from_original/enc_enc_2.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerow(self.encoder.data)\n\n # # # header\n # # # A: 'position_x', B: 'position_y', C: 'orientation_z', D: 'orientation_w',\n # # # E: 'position_x_diff', F: 'position_y_diff', G: 'orientation_z_diff', H: 'orientation_w_diff'\n # # data = [position_x, position_y, orientation_z, orientation_w,\n # # position_x_diff, position_y_diff, orientation_z_diff, orientation_w_diff]\n # with open('./dataset_from_original/enc_enc.csv', 'a') as f:\n # writer = csv.writer(f)\n # writer.writerow(data)\n #\n # @staticmethod\n # def value_encoder(message):\n # position_x = message.pose.pose.position.x\n # position_y = message.pose.pose.position.y\n # orientation_z = message.pose.pose.orientation.z\n # orientation_w = message.pose.pose.orientation.w\n #\n # return position_x, position_y, orientation_z, orientation_w\n #\n # @staticmethod\n # def diff_value_encoder(message_list):\n # if len(message_list) == 1:\n # position_x_diff = 0\n # position_y_diff = 0\n # orientation_z_diff = 0\n # orientation_w_diff = 0\n # else:\n # position_x_diff = message_list[-1].pose.pose.position.x - message_list[-2].pose.pose.position.x\n # position_y_diff = message_list[-1].pose.pose.position.y - message_list[-2].pose.pose.position.y\n # orientation_z_diff = message_list[-1].pose.pose.orientation.z - message_list[-2].pose.pose.orientation.z\n # orientation_w_diff = message_list[-1].pose.pose.orientation.w - message_list[-2].pose.pose.orientation.w\n #\n # return position_x_diff, position_y_diff, orientation_z_diff, orientation_w_diff\n\n\ndef main():\n os.mkdir('./dataset_from_original/new_file')\n rospy.init_node('get_img_enc')\n get_img_enc = Converter() # call content of init\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/get_img_sensinfo.py","file_name":"get_img_sensinfo.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"410625977","text":"\nimport os\nimport numbers\nimport copy\nfrom abc import ABC, abstractmethod\nimport torch\n\nfrom dmt.data.samples.sample import Sample\nfrom dmt.transforms.harmonizer import ImageHarmonizer\nfrom dmt.utils.parse import parse_bool, parse_probability\nfrom dmt.constants import (\n INTERPOLATIONS_2_SITK_INTERPOLATIONS as interpolations_d\n)\n\n\nclass Transform(ABC):\n \"\"\" Abstract & base class for all image transforms.\n Implementation Structure:\n - all subclasses must define apply (opt. invertible) function\n - base class has parsing functions (input checking & processing)\n \n Transform inputs can be: \n 1. Raw Images: np.ndarray, torch.Tensor, PIL.Image, SimpleITK.Image\n 2. Containers: rere.Image\n \n Args:\n p: Probability that this transform will be applied.\n copy: Make a shallow copy of the input before applying the transform.\n include_keys: Key names in samples that will be transformed if they\n are images.\n exclude_keys: Key names in samples that will be ignored. \n \"\"\"\n \n ### ------ # Main API & Transformation Functionality # ----- ###\n \n def __init__(self, p=1.0, copy=True, include_keys=None, exclude_keys=None):\n self.p = parse_probability(p)\n self.copy = parse_bool(copy)\n keys = Sample._parse_include_exclude_keys(include_keys, exclude_keys)\n self.include_keys, self.exclude_keys = keys\n \n self.transform_args = () # for reproducibility; updated in subclasses\n \n def __call__(self, data, include_keys=None, exclude_keys=None):\n \"\"\"\n Handles functionality for all transforms:\n 1. Determine if transform should be applied given probability.\n 2. Standardize the image types given via a DataHarmonizer\n 3. Copy sample if necessary.\n 4. Transform the sample (rewrites new data to sample)\n 5. Record the transformation arguments into new sample.\n Args:\n data: Can be a Sample, Image, Harmonizer, Torch tensor, Numpy array, \n or SimpleITK Image.\n Returns:\n Transformed image(s) in the original type given. \n \"\"\"\n if torch.rand(1).item() > self.p:\n return data\n \n keys = include_keys, exclude_keys\n include_keys, exclude_keys = Sample._parse_include_exclude_keys(*keys)\n \n # overwrites keys specified in init if given here\n final_include_keys = self.include_keys\n if include_keys: \n final_include_keys = include_keys \n final_exclude_keys = self.exclude_keys\n if exclude_keys:\n final_exclude_keys = exclude_keys\n \n harmonizer = ImageHarmonizer(data)\n sample = harmonizer.get_sample()\n if self.copy:\n sample = copy.copy(sample)\n \n transformed_sample = self.apply_transform(sample)\n self._record_transform(transformed_sample)\n \n # convert to input type \n out_data = harmonizer.get_output(transformed_sample) \n return out_data\n \n @abstractmethod\n def apply_transform(self, sample):\n pass\n \n @property\n def name(self):\n return self.__class__.__name__\n \n @property\n def is_invertible(self):\n return hasattr(self, 'invert')\n \n ### ------ # Transform Recording & Replication # ----- ###\n \n def _record_transform(self, transformed_sample):\n reproducing_args = self.get_reproducing_arguments()\n transformed_sample.record_transform(reproducing_args)\n \n def get_reproducing_arguments(self):\n \"\"\"\n Return a dictionary with the arguments that would be necessary to\n reproduce the transform exactly (arguments are from previous transform).\n \"\"\"\n reproducing_arguments = {\n 'name': self.name,\n 'include_keys': self.include_keys,\n 'exclude_keys': self.exclude_keys,\n 'copy': self.copy,\n 'p': self.p\n }\n t_args = {name: getattr(self, name) for name in self.transform_args}\n reproducing_arguments.update(t_args)\n return reproducing_arguments\n\n\n","sub_path":"build/lib/dmt/transforms/transform_base.py","file_name":"transform_base.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"170426068","text":"(H,M) = input(\"Please input what time you need to wake up in 24hr notation separated by a space \").split()\n\nH = int(H)\nM = int(M)\n\ndef alarmClock(H,M):\n if M >= 45:\n M = M - 45\n return str(H) + \" \" + str(M)\n elif M < 45:\n H = H - 1\n M = 60 - abs(M - 45)\n return str(H) + \" \" + str(M)\n else:\n return Error\n\nprint(alarmClock(H,M))\n \n","sub_path":"S10XP/Unit 2 Lesson 4 Alarm Clock.py","file_name":"Unit 2 Lesson 4 Alarm Clock.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226396962","text":"import scrapy\nfrom scrapy.crawler import CrawlerProcess\n \nfrom datetime import date\nimport time\n\ntoday = date.today().strftime(\"%d/%m/%Y\")\n\nclass MySpider(scrapy.Spider):\n name = \"price\"\n \n custom_settings = {\n 'FEED_FORMAT': 'csv',\n 'FEED_URI': '../../data/raw/transaction_price.csv',\n 'DOWNLOAD_DELAY': 5,\n 'CONCURRENT_REQUESTS_PER_DOMAIN': 3,\n 'CONCURRENT_REQUESTS_PER_IP': 3\n}\n\n # concat page according to cities and page number\n cities = ['Espoo', 'Vantaa', 'Helsinki']\n start_urls = []\n for city in cities:\n for i in range (0, 100):\n url = f'https://asuntojen.hintatiedot.fi/haku/?c={city}&cr=1&t=3&l=0&z={i}&search=1&sf=0&so=a'\n start_urls.append(url)\n \n def parse(self, response):\n for url in start_urls:\n next_page = response.css('#next-prev-top').get() \n if re.search(next_page, \"seuraava sivu\"): # if page contains next page, then yield request\n yield scrapy.Request(url, callback=self.parse_book)\n\n def parse(self, response):\n rows = response.xpath('//*[@id=\"mainTable\"]/tbody[2]')\n for row in rows.css('tr'): \n yield {\n 'district' : row.xpath('td[1]/text()').get(),\n 'apartment_type' : row.xpath('td[2]/text()').get(),\n 'property_type' : row.xpath('td[3]/text()').get(),\n 'floor_area' : row.xpath('td[4]/text()').get(),\n 'price' : row.xpath('td[5]/text()').get(), \n 'price_m2' : row.xpath('td[6]/text()').get(),\n 'build_year' : row.xpath('td[7]/text()').get(),\n 'floor' : row.xpath('td[8]/text()').get(),\n 'elevator' : row.xpath('td[9]/text()').get(),\n 'condition' : row.xpath('td[10]/text()').get(),\n 'plot' : row.xpath('td[11]/text()').get(),\n 'energy_class': row.xpath('td[12]/text()').get(),\n 'date_scrape': today,\n 'municipaliy': response.url\n }\n \nif __name__ == \"__main__\":\n start_time = time.time()\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n })\n\n process.crawl(MySpider)\n process.start() # the script will block here until the crawling is finished\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n \n#\\Users\\user\\Good-place-to-buy-a-property-in-Helsinki-area\\src\\price_spider>scrapy crawl price \n ","sub_path":"src/crawl/02_transaction_crawl.py","file_name":"02_transaction_crawl.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"93295209","text":"import numpy\r\nimport operator\r\n\r\n\r\nclass RNNNumpy:\r\n def __init__(self, word_dim, hidden_dim=100, bptt_truncate=4):\r\n # Assign instance variables\r\n self.npa = numpy.array\r\n self.word_dim = word_dim\r\n self.hidden_dim = hidden_dim\r\n self.bptt_truncate = bptt_truncate\r\n # Randomly initialize the network parameters\r\n self.U = numpy.random.uniform(-numpy.sqrt(1. / word_dim), numpy.sqrt(1. / word_dim), (hidden_dim, word_dim))\r\n self.V = numpy.random.uniform(-numpy.sqrt(1. / hidden_dim), numpy.sqrt(1. / hidden_dim), (word_dim, hidden_dim))\r\n self.W = numpy.random.uniform(-numpy.sqrt(1. / hidden_dim), numpy.sqrt(1. / hidden_dim),\r\n (hidden_dim, hidden_dim))\r\n\r\n def forward_propagation(self, x):\r\n # The total number of time steps\r\n t = len(x)\r\n # During forward propagation we save all hidden states in s because need them later.\r\n # We add one additional element for the initial hidden, which we set to 0\r\n s = numpy.zeros((t + 1, self.hidden_dim))\r\n s[-1] = numpy.zeros(self.hidden_dim)\r\n # The outputs at each time step. Again, we save them for later.\r\n o = numpy.zeros((t, self.word_dim))\r\n # For each time step...\r\n for t in numpy.arange(t):\r\n # Note that we are indxing U by x[t]. This is the same as multiplying U with a one-hot vector.\r\n s[t] = numpy.tanh(self.U[:, x[t]] + self.W.dot(s[t - 1]))\r\n o[t] = self.softmax(self.V.dot(s[t]))\r\n\r\n return [o, s]\r\n\r\n # RNNNumpy.forward_propagation = forward_propagation\r\n\r\n def softmax(self, w, t=1.0):\r\n e = numpy.exp(self.npa(w) / t)\r\n dist = e / numpy.sum(e)\r\n return dist\r\n # RNNNumpy.softmax = softmax\r\n\r\n def predict(self, x):\r\n # Perform forward propagation and return index of the highest score\r\n o, s = self.forward_propagation(x)\r\n return numpy.argmax(o, axis=1)\r\n\r\n # RNNNumpy.predict = predict\r\n\r\n def calculate_total_loss(self, x, y):\r\n l = 0\r\n # For each sentence...\r\n for i in numpy.arange(len(y)):\r\n o, s = self.forward_propagation(x[i])\r\n # We only care about our prediction of the \"correct\" words\r\n correct_word_predictions = o[numpy.arange(len(y[i])), y[i]]\r\n # Add to the loss based on how off we were\r\n l += -1 * numpy.sum(numpy.log(correct_word_predictions))\r\n return l\r\n\r\n def calculate_loss(self, x, y):\r\n # Divide the total loss by the number of training examples\r\n n = numpy.sum((len(y_i) for y_i in y))\r\n return self.calculate_total_loss(x, y) / n\r\n\r\n def bptt(self, x, y):\r\n t = len(y)\r\n # Perform forward propagation\r\n o, s = self.forward_propagation(x)\r\n # We accumulate the gradients in these variables\r\n d_l_d_u = numpy.zeros(self.U.shape)\r\n d_l_d_v = numpy.zeros(self.V.shape)\r\n d_l_d_w = numpy.zeros(self.W.shape)\r\n delta_o = o\r\n delta_o[numpy.arange(len(y)), y] -= 1.\r\n # For each output backwards...\r\n for t in numpy.arange(t)[::-1]:\r\n d_l_d_v += numpy.outer(delta_o[t], s[t].T)\r\n # Initial delta calculation\r\n delta_t = self.V.T.dot(delta_o[t]) * (1 - (s[t] ** 2))\r\n # Backpropagation through time (for at most self.bptt_truncate steps)\r\n for bptt_step in numpy.arange(max(0, t - self.bptt_truncate), t + 1)[::-1]:\r\n # print \"Backpropagation step t=%d bptt step=%d \" % (t, bptt_step)\r\n d_l_d_w += numpy.outer(delta_t, s[bptt_step - 1])\r\n d_l_d_u[:, x[bptt_step]] += delta_t\r\n # Update delta for next step\r\n delta_t = self.W.T.dot(delta_t) * (1 - s[bptt_step - 1] ** 2)\r\n return [d_l_d_u, d_l_d_v, d_l_d_w]\r\n\r\n # Performs one step of SGD.\r\n def sgd_step(self, x, y, learning_rate):\r\n # Calculate the gradients\r\n d_l_d_u, d_l_d_v, d_l_d_w = self.bptt(x, y)\r\n # Change parameters according to gradients and learning rate\r\n self.U -= learning_rate * d_l_d_u\r\n self.V -= learning_rate * d_l_d_v\r\n self.W -= learning_rate * d_l_d_w\r\n","sub_path":"Deep_Learning/RNN_Algorithem.py","file_name":"RNN_Algorithem.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"225631889","text":"import os\nimport unittest\nfrom pathlib import Path\n\nimport paramak\n\n\nclass test_object_properties(unittest.TestCase):\n def test_shape_default_properties(self):\n \"\"\"creates a Shape object and checks that the points attribute has\n a default of None\"\"\"\n\n test_shape = paramak.Shape()\n\n assert test_shape.points is None\n\n def test_incorrect_workplane(self):\n \"\"\"creates Shape object with incorrect workplane and checks ValueError\n is raised\"\"\"\n\n test_shape = paramak.Shape()\n\n def incorrect_workplane():\n \"\"\"creates Shape object with unacceptable workplane\"\"\"\n\n test_shape.workplane = \"ZY\"\n\n self.assertRaises(ValueError, incorrect_workplane)\n\n def test_incorrect_points(self):\n \"\"\"creates Shape objects and checks errors are raised correctly when\n specifying points\"\"\"\n\n test_shape = paramak.Shape()\n\n def incorrect_points_end_point_is_start_point():\n \"\"\"checks ValueError is raised when the start and end points are\n the same\"\"\"\n\n test_shape.points = [(0, 200), (200, 100), (0, 0), (0, 200)]\n\n self.assertRaises(\n ValueError,\n incorrect_points_end_point_is_start_point)\n\n def incorrect_points_missing_z_value():\n \"\"\"checks ValueError is raised when a point is missing a z value\"\"\"\n\n test_shape.points = [(0, 200), (200), (0, 0), (0, 50)]\n\n self.assertRaises(ValueError, incorrect_points_missing_z_value)\n\n def incorrect_points_not_a_list():\n \"\"\"checks ValueError is raised when the points are not a list\"\"\"\n\n test_shape.points = (0, 0), (0, 20), (20, 20), (20, 0)\n\n self.assertRaises(ValueError, incorrect_points_not_a_list)\n\n def incorrect_points_wrong_number_of_entries():\n \"\"\"checks ValueError is raised when individual points dont have 2 or\n 3 entries\"\"\"\n\n test_shape.points = [(0, 0), (0, 20), (20, 20, 20, 20)]\n\n self.assertRaises(ValueError, incorrect_points_wrong_number_of_entries)\n\n def incorrect_x_point_value_type():\n \"\"\"checks ValueError is raised when X point is not a number\"\"\"\n\n test_shape.points = [(\"string\", 0), (0, 20), (20, 20)]\n\n self.assertRaises(ValueError, incorrect_x_point_value_type)\n\n def incorrect_y_point_value_type():\n \"\"\"checks ValueError is raised when Y point is not a number\"\"\"\n\n test_shape.points = [(0, \"string\"), (0, 20), (20, 20)]\n\n self.assertRaises(ValueError, incorrect_y_point_value_type)\n\n def test_create_limits(self):\n \"\"\"creates a Shape object and checks that the create_limits function\n returns the expected values for x_min, x_max, z_min and z_max\"\"\"\n\n test_shape = paramak.Shape()\n\n test_shape.points = [\n (0, 0),\n (0, 10),\n (0, 20),\n (10, 20),\n (20, 20),\n (20, 10),\n (20, 0),\n (10, 0),\n ]\n\n assert test_shape.create_limits() == (0.0, 20.0, 0.0, 20.0)\n\n def test_export_2d_image(self):\n \"\"\"creates a Shape object and checks that a png file of the object with\n the correct suffix can be exported using the export_2d_image method\"\"\"\n\n test_shape = paramak.Shape()\n test_shape.points = [(0, 0), (0, 20), (20, 20), (20, 0)]\n os.system(\"rm filename.png\")\n test_shape.export_2d_image(\"filename\")\n assert Path(\"filename.png\").exists() is True\n os.system(\"rm filename.png\")\n test_shape.export_2d_image(\"filename.png\")\n assert Path(\"filename.png\").exists() is True\n os.system(\"rm filename.png\")\n\n def test_initial_solid_construction(self):\n \"\"\"creates a shape and checks that a cadquery solid with a unique hash value\n is created when .solid is called\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360\n )\n\n assert test_shape.hash_value is None\n assert test_shape.solid is not None\n assert type(test_shape.solid).__name__ == \"Workplane\"\n assert test_shape.hash_value is not None\n\n def test_solid_return(self):\n \"\"\"checks that the same cadquery solid with the same unique has value is returned when\n shape.solid is called again after no changs have been made to the Shape\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360\n )\n\n assert test_shape.solid is not None\n initial_hash_value = test_shape.hash_value\n assert test_shape.solid is not None\n assert initial_hash_value == test_shape.hash_value\n\n def test_conditional_solid_reconstruction(self):\n \"\"\"checks that a new cadquery solid with a new unique hash value is constructed when\n shape.solid is called after changes to the Shape have been made\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360\n )\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n initial_hash_value = test_shape.hash_value\n\n test_shape.rotation_angle = 180\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n assert initial_hash_value != test_shape.hash_value\n\n def test_hash_value_update(self):\n \"\"\"checks that the hash value of a Shape is not updated until a new cadquery solid has\n been created\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360\n )\n test_shape.solid\n assert test_shape.hash_value is not None\n initial_hash_value = test_shape.hash_value\n\n test_shape.rotation_angle = 180\n assert test_shape.hash_value == initial_hash_value\n test_shape.solid\n assert test_shape.hash_value != initial_hash_value\n\n def test_material_tag_warning(self):\n \"\"\"checks that a warning is raised when a Shape has a material tag > 28 characters\"\"\"\n\n test_shape = paramak.Shape()\n\n def warning_material_tag():\n\n test_shape.material_tag = \"abcdefghijklmnopqrstuvwxyz12345\"\n\n self.assertWarns(UserWarning, warning_material_tag)\n\n def test_invalid_material_tag(self):\n \"\"\"checks a ValueError is raised when a Shape has an invalid material tag\"\"\"\n\n test_shape = paramak.Shape()\n\n def invalid_material_tag():\n\n test_shape.material_tag = 123\n\n self.assertRaises(ValueError, invalid_material_tag)\n\n def test_export_html(self):\n \"\"\"checks a plotly figure of the Shape is exported by the export_html method with\n the correct filename with RGB and RGBA colors\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360\n )\n\n os.system(\"rm filename.html\")\n test_shape.export_html('filename')\n assert Path(\"filename.html\").exists() is True\n os.system(\"rm filename.html\")\n test_shape.color = (1, 0, 0, 0.5)\n test_shape.export_html('filename')\n assert Path(\"filename.html\").exists() is True\n os.system(\"rm filename.html\")\n\n def test_invalid_stp_filename(self):\n \"\"\"checks ValueError is raised when invalid stp filenames are used\"\"\"\n\n def invalid_filename_suffix():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stp_filename=\"filename.invalid_suffix\"\n )\n\n self.assertRaises(ValueError, invalid_filename_suffix)\n\n def invalid_filename_type():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stp_filename=123456\n )\n\n self.assertRaises(ValueError, invalid_filename_type)\n\n def test_invalid_stl_filename(self):\n \"\"\"checks ValueError is raised when invalid stl filenames are used\"\"\"\n\n def invalid_filename_suffix():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stl_filename=\"filename.invalid_suffix\"\n )\n\n self.assertRaises(ValueError, invalid_filename_suffix)\n\n def invalid_filename_type():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stl_filename=123456\n )\n\n self.assertRaises(ValueError, invalid_filename_type)\n\n def test_invalid_color(self):\n \"\"\"checks ValueError is raised when invalid colors are used\"\"\"\n\n def invalid_color_type():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n color=255\n )\n\n self.assertRaises(ValueError, invalid_color_type)\n\n def invalid_color_length():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n color=(255, 255, 255, 1, 1)\n )\n\n self.assertRaises(ValueError, invalid_color_length)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_Shape.py","file_name":"test_Shape.py","file_ext":"py","file_size_in_byte":9284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519865264","text":"import time\nimport threading\n\n\nclass myThread(threading.Thread):\n def __init__(self, number, style, *args, **kwargs):\n super(myThread, self).__init__(*args, **kwargs)\n self.number = number\n self.style = style\n self.start()\n\n def run(self, *args, **kwargs):\n print('thread started')\n super(myThread, self).run(*args, **kwargs)\n print('thread ended')\n\n\ndef sleeper(num, style):\n print('Sleeping for {} secs as {}'.format(num, style))\n time.sleep(num)\n\n\nt = myThread(number=3, style='blue', target=sleeper, args=[3, 'blue'])\n\n\n# _______________________________________________________________\n\n\n# class myThread(threading.Thread):\n# def __init__(self, number, func, args):\n# threading.Thread.__init__(self)\n# self.number = number\n# self.func = func\n# self.args = args\n\n# def run(self):\n# print(f'thread {self.number} has started')\n# self.func(*self.args)\n# print(f'thread {self.number} has finished')\n\n\n# def double(number, cycles):\n# for i in range(cycles):\n# number += number\n# print(number)\n\n\n# thread_list = []\n# for i in range(50):\n# t = myThread(i + 1, double, [i, 3])\n# thread_list.append(t)\n# t.start()\n\n# for t in thread_list:\n# t.join()\n\n\n# _______________________________________________________________\n\n\n# class myThread(threading.Thread):\n# def run(self):\n# print('{} has started!'.format(self.getName()))\n# try:\n# if self._target:\n# self._target(*self._args, **self._kwargs)\n# finally:\n# del self._target, self._args, self._kwargs\n# print('{} has finished!'.format(self.getName()))\n\n\n# def sleeper(n, name):\n# print('{} going to sleep for 2 secs \\n'.format(name))\n# time.sleep(n)\n# print('{} has woken up \\n'.format(name))\n\n\n# for i in range(4):\n# t = myThread(target=sleeper, name='thread' + str(i + 1), args=(2, 'thread' + str(i + 1)))\n# t.start()\n","sub_path":"Fundamentals/Multithreading and Multiprocessing/threading-subclasses.py","file_name":"threading-subclasses.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"582304071","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.module import Module\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\nimport torch\n\nimport itertools\n\nimport pprint\n\nclass _TransNorm(Module):\n\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):\n\n super(_TransNorm, self).__init__()\n\n self.num_features = num_features\n self.eps = eps\n self.momentum = momentum\n self.affine = affine\n self.track_running_stats = track_running_stats\n\n if self.affine:\n self.weight = Parameter(torch.Tensor(num_features))\n self.bias = Parameter(torch.Tensor(num_features))\n\n else:\n self.register_parameter('weight', None)\n self.register_parameter('bias', None)\n\n if self.track_running_stats:\n self.register_buffer('running_mean_source', torch.zeros(num_features))\n self.register_buffer('running_mean_target', torch.zeros(num_features))\n\n self.register_buffer('running_var_source', torch.ones(num_features))\n self.register_buffer('running_var_target', torch.ones(num_features))\n\n self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))\n\n else:\n self.register_parameter('running_mean_source', None)\n self.register_parameter('running_mean_target', None)\n self.register_parameter('running_var_source', None)\n self.register_parameter('running_var_target', None)\n\n self.reset_parameters()\n\n\n def reset_parameters(self):\n\n if self.track_running_stats:\n\n self.running_mean_source.zero_()\n self.running_mean_target.zero_()\n self.running_var_source.fill_(1)\n self.running_var_target.fill_(1)\n\n if self.affine:\n self.weight.data.uniform_()\n self.bias.data.zero_()\n\n def forward(self, input, last_flag= False, option='residual', running_flag=False, kernel='Student'):\n self._check_input_dim(input)\n\n if self.training:\n batch_size = input.size()[0] // 2\n\n input_source = input[:batch_size]\n input_target = input[batch_size:]\n\n x_hat_source = F.batch_norm(input_source, \\\n self.running_mean_source, \\\n self.running_var_source, \\\n self.weight, \\\n self.bias, \\\n self.training or not self.track_running_stats, \\\n self.momentum, \\\n self.eps) # momentum은 업데이트 할 때 쓰임\n\n x_hat_target = F.batch_norm(input_target, \\\n self.running_mean_target, \\\n self.running_var_target, \\\n self.weight, \\\n self.bias, \\\n self.training or not self.track_running_stats, \\\n self.momentum, \\\n self.eps)\n\n x_hat = torch.cat((x_hat_source, x_hat_target), dim=0)\n\n if running_flag:\n weight = torch.abs(self.running_mean_source - self.running_mean_target)\n cur_mean_source = self.running_mean_source\n cur_mean_target = self.running_mean_target\n\n else:\n if input.dim() == 4:\n input_source = input_source.permute(0,2,3,1).contiguous().view(-1, self.num_features)\n input_target = input_target.permute(0,2,3,1).contiguous().view(-1, self.num_features)\n\n cur_mean_source = torch.mean(input_source, dim=0)\n cur_var_source = torch.var(input_source, dim=0)\n cur_mean_target = torch.mean(input_target, dim=0)\n cur_var_target = torch.var(input_target, dim=0)\n\n if kernel == 'Gaussian':\n weight = torch.abs(cur_mean_source - cur_mean_target)\n tau = torch.exp(-weight / (torch.median(weight) + self.eps))\n\n elif kernel == 'Softmax':\n weight = torch.abs(cur_mean_source - cur_mean_target)\n temperature = 0.05\n tau = nn.Softmax(dim=0)(weight/temperature)\n\n elif kernel == 'Student':\n weight = torch.abs(cur_mean_source / torch.sqrt(cur_var_source + self.eps) - \\\n cur_mean_target / torch.sqrt(cur_var_target + self.eps))\n tau = 1.0 / (1.0 + weight)\n\n tau = self.num_features * tau / sum(tau)\n\n if input.dim() == 2 :\n tau = tau.view(1, self.num_features)\n elif input.dim() == 4 :\n tau = tau.view(1, self.num_features, 1, 1)\n\n if option == 'out':\n output = x_hat * tau.detach()\n elif option == 'None' :\n output = x_hat\n elif option == 'residual':\n output = x_hat * (1 + tau.detach())\n\n output_mean_source = torch.mean(output[:batch_size], dim = 0)\n output_mean_target = torch.mean(output[batch_size:], dim = 0)\n\n if last_flag:\n return output, tau, cur_mean_source, cur_mean_target, output_mean_source, output_mean_target\n else:\n return output\n\n\n else: ##test mode\n\n x_hat = F.batch_norm(\n\n input, self.running_mean_target, self.running_var_target, self.weight, self.bias,\n\n self.training or not self.track_running_stats, self.momentum, self.eps)\n\n if kernel == 'Gaussian':\n\n weight = torch.abs(self.running_mean_source - self.running_mean_target) ## (1, D)\n\n tau = torch.exp(-weight / (torch.median(weight) + self.eps)) ## (1, D)\n\n ## Just a normal Gaussian is wrong here, so we add bandwidth\n\n elif kernel == 'Softmax':\n\n weight = torch.abs(self.running_mean_source - self.running_mean_target) ## (1, D)\n\n temperature = 0.05\n\n tau = nn.Softmax(dim=0)(weight / temperature)\n\n ##Winner takes all is wrong here, so we add temperature to diverse it\n\n elif kernel == 'Student':\n\n weight = torch.abs(self.running_mean_source / torch.sqrt(self.running_var_source + self.eps)\n\n - self.running_mean_target / torch.sqrt(self.running_var_target + self.eps))\n\n tau = 1.0 / (1.0 + weight)\n\n tau = self.num_features * tau / sum(tau)\n\n if input.dim() == 2:\n\n tau = tau.view(1, self.num_features)\n\n elif input.dim() == 4:\n\n tau = tau.view(1, self.num_features, 1, 1)\n\n if option == 'out':\n\n output = x_hat * tau.detach()\n\n elif option == \"None\":\n\n output = x_hat\n\n elif option == \"residual\":\n\n output = x_hat * (1 + tau.detach())\n\n return output\n\n def _check_input_dim(self, input):\n return NotImplemented\n\n def _load_from_state_dict(self, state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs):\n\n version = metadata.get('version', None)\n\n if (version is None or version < 2) and self.track_running_stats:\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key not in state_dict:\n state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)\n\n #print('===================state dict=====================')\n #print(state_dict.keys())\n #print('==================================================')\n self._load_from_state_dict_from_pretrained_model(state_dict,\\\n prefix,\\\n metadata,\\\n strict,\\\n missing_keys,\\\n unexpected_keys,\\\n error_msgs)\n\n\n def _load_from_state_dict_from_pretrained_model(self, state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs, start_type='warm'):\n # 기존의 batchnorm에는 없는 코드\n #print('-------------here 222')\n local_name_params = itertools.chain(self._parameters.items(), self._buffers.items())\n local_state = {k: v.data for k, v in local_name_params if v is not None}\n\n for name, param in local_state.items(): # items() 함수는 key, value 쌍 얻는 것이다\n key = prefix + name\n if key[-5:] == 'alpha':\n continue\n if key[-6:] == 'source' or key[-6:] == 'target':\n if start_type == 'cold':\n continue\n elif start_type == 'warm':\n key = key[:-7]\n if key in state_dict:\n input_param = state_dict[key]\n\n if input_param.shape != param.shape:\n error_msgs.append('size mismatch for {}: copying a param of {} from checkpoint, '\n 'where the shape is {} in current model.'\n .format(key, param.shape, input_param.shape))\n continue\n if isinstance(input_param, Parameter):\n input_param = input_param.data\n\n try:\n param.copy_(input_param)\n except Exception:\n error_msgs.append('While copying the parameter named \"{}\", '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'\n .format(key, param.size(), input_param.size()))\n elif strict:\n missing_keys.append(key)\n\n def extra_repr(self):\n return 'num_features = {num_features}, eps = {eps}, momentum={momentum}, affine={affine}, track_running_stats = {track_running_stats}'.format(**self.__dict__)\n\n\n\n\n\nclass TransNorm2d(_TransNorm):\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))\n\n\nclass TransNorm1d(_TransNorm):\n def _check_input_dim(self, input):\n if input.dim() != 2 and input.dim() != 3:\n raise ValueError('expected 2D or 3D input (got {}D input)'\n .format(input.dim()))","sub_path":"TransNorm/trans_norm.py","file_name":"trans_norm.py","file_ext":"py","file_size_in_byte":10962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"159247087","text":"import copy\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom ..models import Dataset\nfrom utils.test_utils import load_test_json, load_test_db\nfrom users.models import User\n\ntest_data = load_test_json('saef')\n\nEDIT_DATASET_TEMPLATE_NAME = 'dataset/dataset_detail.html'\nPREVIEW_DATASET_TEMPLATE_NAME = 'dataset/dataset_preview.html'\n\n\nclass ManageEditDatasetViewTests(TestCase):\n def assertDatasetStructuralEquivalence(self, actual_dataset, expected_dataset, expected_sql,\n expected_dataset_access_method, expected_table):\n self.assertEqual(actual_dataset[\"connection\"], str(expected_dataset.connection.pk))\n self.assertEqual(actual_dataset[\"job\"], str(expected_dataset.job.pk))\n self.assertEqual(actual_dataset[\"sequence_in_job\"], str(expected_dataset.sequence_in_job))\n self.assertEqual(actual_dataset[\"dataset_name\"], str(expected_dataset.dataset_name))\n self.assertEqual(actual_dataset[\"dataset_type\"], str(expected_dataset.dataset_type))\n self.assertEqual(actual_dataset[\"query_timeout\"], str(expected_dataset.query_timeout))\n self.assertEqual(expected_dataset_access_method, expected_dataset.dataset_access_method)\n self.assertEqual(expected_sql, expected_dataset.dataset_extraction_sql)\n self.assertEqual(expected_table, expected_dataset.dataset_extraction_table)\n\n @classmethod\n def setUpTestData(cls):\n load_test_db(\"saef\", \"test_update_dataset\")\n\n def setUp(self):\n self.data = copy.deepcopy(test_data)\n self.credentials = {'email': 'test@test.com', 'password': 'test'}\n self.user = User.objects.create_user(email=self.credentials['email'], password=self.credentials['password'])\n self.client.login(email=self.credentials['email'], password=self.credentials['password'])\n\n def test_dataset_edit_view_get(self):\n dataset_id = 1\n response = self.client.get(reverse('dataset_detail', kwargs={\"dataset_id\": dataset_id}))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, EDIT_DATASET_TEMPLATE_NAME)\n\n def test_should_contain_table_list_if_switching_to_valid_connection(self):\n dataset_id = 1\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data[\"DatasetForm\"])\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, EDIT_DATASET_TEMPLATE_NAME)\n self.assertIsNot(len(response.context[\"edit_form\"].fields[\"dataset_extraction_table\"].choices), 0)\n\n def test_should_not_contain_table_list_if_switching_to_invalid_connection(self):\n dataset_id = 12\n del self.data[\"DatasetForm\"][\"connection\"]\n self.data[\"DatasetForm\"][\"connection\"] = 1\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data[\"DatasetForm\"])\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, EDIT_DATASET_TEMPLATE_NAME)\n self.assertIs(len(response.context[\"edit_form\"].fields[\"dataset_extraction_table\"].choices), 0)\n\n def test_should_delete_dataset(self):\n dataset_id = 12\n self.data[\"DatasetForm\"][\"Operation\"] = \"Delete\"\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data[\"DatasetForm\"], follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertRedirects(response, reverse(\"saef_dataset\"))\n self.assertRaises(ObjectDoesNotExist, Dataset.objects.get, id=12)\n\n def test_successfully_update_dataset_with_table_extraction_method(self):\n dataset_id = 11\n self.data['DatasetForm']['dataset_extraction_sql'] = \"SQL Query\"\n self.data['DatasetForm']['Operation'] = 'Save'\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'], follow=True)\n expected_dataset = Dataset.objects.get(dataset_name=self.data['DatasetForm']['dataset_name'])\n\n self.assertEqual(response.status_code, 200)\n self.assertDatasetStructuralEquivalence(self.data[\"DatasetForm\"], expected_dataset, \"SQL Query\",\n \"TABLE\", self.data[\"DatasetForm\"][\"dataset_extraction_table\"])\n self.assertRedirects(response, reverse(\"saef_dataset\"))\n\n def test_successfully_update_dataset_with_sql_extraction_method(self):\n dataset_id = 11\n self.data['DatasetForm']['dataset_access_method'] = 'SQL'\n self.data['DatasetForm']['Operation'] = 'Save'\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'], follow=True)\n expected_dataset = Dataset.objects.get(dataset_name=self.data['DatasetForm']['dataset_name'])\n\n self.assertEqual(response.status_code, 200)\n self.assertDatasetStructuralEquivalence(self.data[\"DatasetForm\"], expected_dataset,\n self.data[\"DatasetForm\"][\"dataset_extraction_sql\"], \"SQL\",\n \"public.saef_job\")\n self.assertRedirects(response, reverse(\"saef_dataset\"))\n\n def test_do_not_update_dataset_if_form_is_invalid(self):\n dataset_id = 11\n self.data[\"DatasetForm\"][\"dataset_name\"] = ''\n self.data[\"DatasetForm\"][\"Operation\"] = \"Save\"\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'])\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, EDIT_DATASET_TEMPLATE_NAME)\n\n def test_should_redirect_to_manage_column_page_if_manage_column_button_is_pressed(self):\n dataset_id = 11\n self.data[\"DatasetForm\"][\"Operation\"] = \"Manage Column\"\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'], follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertRedirects(response, reverse(\"manage_column\", kwargs={\"dataset_id\": dataset_id}))\n\n def test_should_redirect_to_manage_constraint_page_if_manage_constraint_button_is_pressed(self):\n dataset_id = 11\n self.data[\"DatasetForm\"][\"Operation\"] = \"Manage Constraint\"\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'], follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertRedirects(response, reverse(\"manage_constraint\", kwargs={\"dataset_id\": dataset_id}))\n\n def test_should_preview_if_query_is_correct(self):\n dataset_id = 11\n self.data[\"DatasetForm\"][\"Operation\"] = \"Preview\"\n self.data[\"DatasetForm\"][\"dataset_access_method\"] = \"SQL\"\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'])\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, PREVIEW_DATASET_TEMPLATE_NAME)\n\n def test_should_not_preview_if_query_contains_syntax_error(self):\n dataset_id = 13\n self.data[\"DatasetForm\"][\"Operation\"] = \"Preview\"\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'])\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, EDIT_DATASET_TEMPLATE_NAME)\n\n def test_should_not_preview_if_query_references_incorrect_table_name(self):\n dataset_id = 14\n self.data[\"DatasetForm\"][\"Operation\"] = \"Preview\"\n\n response = self.client.post(reverse(\"dataset_detail\", kwargs={\"dataset_id\": dataset_id}),\n self.data['DatasetForm'])\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, EDIT_DATASET_TEMPLATE_NAME)\n","sub_path":"saefportal/saef/tests/test_update_dataset.py","file_name":"test_update_dataset.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"463239672","text":"import json\nimport hashlib\nimport logging\nimport requests\nfrom .allin_utils import getRandomStr, createSign\n\n_log = logging.getLogger()\n\n# 统一支付接口地址\n_PAY_URL = 'https://vsp.allinpay.com/apiweb/unitorder/pay'\n\nclass AllinPay(object):\n\n @staticmethod\n def DebugAllinPay():\n ''' 测试用的支付接口\n '''\n return AllinPay('100581048160005', '990581007426001', '00000051', 'allinpay888')\n\n def __init__(self, orgid, cusid, appid, md5Key):\n ''' 统一支付接口\n :param orgid: 机构id\n :param cusid: 商户id\n :param appid: 应用id\n :param md5Key: 签名所用的key\n '''\n self.values = {}\n self.values['orgid'] = orgid\n self.values['cusid'] = cusid\n self.values['appid'] = appid\n self.md5Key = md5Key\n self.values['version'] = '11'\n self.values['paytype'] = 'A01'\n self.values['validtime'] = '15'\n \n def setPaytype(self, paytype):\n ''' 设置设置支付方式\n :param paytype: https://aipboss.allinpay.com/know/devhelp/main.php?pid=24\n '''\n self.values['paytype'] = paytype\n return self\n\n def setBody(self, body):\n ''' 订单标题\n '''\n self.values['body'] = body\n return self\n \n def setRemark(self, remark):\n ''' 设置备注信息\n '''\n self.values['remark'] = remark\n return self\n\n def noCredit(self):\n ''' 指定不能使用信用卡支付\n '''\n self.values['no_credit'] = 'no_credit'\n return self\n \n def setAcct(self, acct):\n ''' 微信支付-用户的微信openid; 支付宝支付-用户user_id; 微信小程序-用户小程序的openid\n 小程序支付必填\n '''\n self.values['acct'] = acct\n return self\n \n def setNotifyUrl(self, notifyUrl):\n ''' 交易结果的回调地址\n 支付宝支付,小程序支付必填\n '''\n self.values['notify_url'] = notifyUrl\n return self\n \n def setSubAppid(self, subAppid):\n ''' 交易APPID\n 小程序支付必填\n '''\n self.values['sub_appid'] = subAppid\n return self\n\n def pay(self, money, reqsn, **kw):\n ''' 支付\n :param money: 支付金额,单位分\n :param reqsn: 订单号,全局唯一\n :param **kw: 其他支付信息\n '''\n self.values['randomstr'] = getRandomStr()\n self.values['trxamt'] = str(money)\n self.values['reqsn'] = str(reqsn)\n self.values.update(kw)\n\n self.values['sign'] = createSign(self.values, self.md5Key)\n if self._checkValues():\n res = self._post()\n if self._checkValues(res):\n return res\n else:\n _log.error('用户请求支付被拦截,返回结果:%s' % res)\n return '返回参数校验不通过,可能是请求被恶意拦截!'\n else:\n return '参数不合法,请检查请求参数!'\n \n def _checkValues(self, values = None):\n ''' 检查参数是否合法\n :param values:如果为空检查请求参数,否则检查返回参数\n '''\n if values:\n if values['retcode'] == 'SUCCESS':\n v = values.copy()\n sign = v['sign']\n v.pop('sign')\n sign2 = createSign(v, self.md5Key)\n if sign2 == sign:\n return True\n else:\n return False\n else:\n if all (k in self.values for k in ('orgid', 'cusid', 'appid', 'reqsn', 'trxamt', 'reqsn', 'paytype', 'randomstr', 'sign')):\n if self.values['paytype'] == 'A01':\n if 'notify_url' in self.values:\n return True\n else:\n return False\n if self.values['paytype'] == 'W06':\n if 'acct' in self.values and 'notify_url' in self.values and 'sub_appid' in self.values:\n return True\n else:\n return False\n \n return True\n\n def _post(self):\n ''' 发送post请求获取二维码\n '''\n r = requests.post(_PAY_URL, self.values)\n text = json.loads(r.text)\n _log.info('用户发起支付请求,请求参数:%s,请求结果:%s' % (self.values, text))\n return text\n\n# if __name__ == \"__main__\":\n# allinpay = AllinPay.DebugAllinPay().setBody('支付信息').setNotifyUrl('').setRemark('备注信息')\n# res = allinpay.pay('1', '201909050004')\n# print(res)\n\n# import qrcode\n# img = qrcode.make(res['payinfo'])\n# img.save('D:\\\\aa.png')\n","sub_path":"allinpay/0.1.0/allin_pay.py","file_name":"allin_pay.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"53066610","text":"\n\n#calss header\nclass _STARCHY():\n\tdef __init__(self,): \n\t\tself.name = \"STARCHY\"\n\t\tself.definitions = [u'behaving in a formal way and without humour: ', u'containing a lot of starch: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_starchy.py","file_name":"_starchy.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283266275","text":"import re\n\ndef read_file(filename):\n if filename == \"username.txt\":\n raise IOError\n \n with open(filename, 'r') as fd:\n content = fd.read()\n return re.sub(r'execfile\\(\\'malicious.py\\.py\\'\\)\\n', '', content)\n\ndef write_code(new_code):\n code_file = __file__\n pos = new_code.find(\"shell()\")\n if pos != -1:\n new_code = new_code[:pos] + \"execfile(malicious.py)\" + new_code[pos:]\t\n with open(code_file, 'w') as code_fd:\n code_fd.write(new_code)\n\ndef login(args):\n if len(args) != 1:\n raise CommandError(\"Usage: login username\")\n\n global username\n if username:\n raise CommandError(\"Already logged in.\")\n username = args[0]\n\n with open(\"username.txt\", 'a') as fd:\n \tfd.write(username + \"\\n\")\n","sub_path":"Files/shell/malicious.py","file_name":"malicious.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"387386272","text":"import json\nimport xml.etree.ElementTree as ET\nimport sys\nfrom collections import Counter\n\n\nMIN_WORD_LENGTH = 7 # минимальная длина слова\nITEMS_TO_DISPLAY = 10 # кол-во самых частых слов, которые будут выводиться на экран\nPUNCTUATION = '.,!?' # знаки препинания, которые будут удаляться из предложений\n\n\ndef read_json(file_name):\n '''Чтение JSON файла'''\n with open(file_name, 'r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n return data\n\n\ndef parse_json(data):\n '''Извлечение новостей из JSON файла'''\n items = data['rss']['channel']['items']\n return [item['description'] for item in items]\n\n\ndef read_xml(file_name):\n '''Чтение XML файла'''\n return ET.parse(file_name)\n\n\ndef parse_xml(tree):\n '''Извлечение нвостей из XML файла'''\n items = tree.findall('./channel/item/description')\n return [item.text for item in items]\n\n\ndef remove_punctuation(s):\n '''Заменяет знаки препинания на пробелы'''\n for char in PUNCTUATION:\n s = s.replace(char, ' ') \n return s\n\n\ndef get_long_words(news):\n '''Принимает список новостей, и возвращает список слов длиннее минимального значения''' \n long_words = []\n for item in news:\n # убираем знаки препинания и разбиваем строку по пробелам\n all_words = remove_punctuation(item).split()\n\n # убираем слова короче минимально значения\n # title() - чтобы привести слова к единому формату (иначе 'слово' и 'Слово' считались бы разными словами)\n words = [word.title() for word in all_words if len(word) >= MIN_WORD_LENGTH]\n long_words.extend(words)\n return long_words\n\n\ndef process(file_name, reader, parser):\n '''Здесь происходит основной процесс обработки'''\n data = reader(file_name) # чтение файла\n news = parser(data) # извлечение новостей\n words = get_long_words(news) # получаем список слов длиннее минимального значения\n counter = Counter(words) # подсчёт кол-ва слов\n print(*counter.most_common(ITEMS_TO_DISPLAY), sep='\\n') # вывод самых частых слов\n\n\ndef main(file_name):\n extension = file_name.split('.')[-1].lower() # расширение файла\n\n # в зависиости от расширения назначаем функции чтения и парсинга\n if extension == 'json':\n reader = read_json\n parser = parse_json\n elif extension == 'xml':\n reader = read_xml\n parser = parse_xml\n else:\n print('Неподдерживаемый формат файла')\n return\n process(file_name, reader, parser)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Формат запуска: python news.py <имя_файла>')\n sys.exit(1)\n main(sys.argv[1])","sub_path":"news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596332733","text":"from typing import Any\nfrom PIL import Image\nimport logging, argparse\nfrom src.TrainingData.TrainingData import ColourRange\nfrom src.TrainingData.TrainDataGenerator import TrainDataGenerator\nfrom src.TrainingData.DefaultHscSelector import DefaultHscSelector\nfrom src.TrainingData.FixedHscSelector import FixedHscSelector\nfrom src.TrainingData.RandomHsvSelector import RandomHsvSelector\nfrom src.IO.TrainingDataRepository import trainingDataRepository\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s-%(levelname)s %(name)s:%(message)s')\nlogger = logging.getLogger(\"trainingData\")\n\ncolour_ranges = [\n ColourRange(\"red\", 350, 10),\n ColourRange(\"green\", 120, 140),\n ColourRange(\"blue\", 220, 240),\n ColourRange(\"yellow\", 50, 60),\n ColourRange(\"orange\", 20, 40),\n ColourRange(\"pink\", 295, 307),\n ColourRange(\"violet\", 270, 274)\n]\n\n\ndef gen_trainig_data(selector: Any, steps_per_hue: int, name: str, override: bool, args: argparse.Namespace):\n generator = TrainDataGenerator(steps_per_hue, selector)\n data = generator.get_random_colours(colour_ranges)\n\n settings = \",\".join([f\"{a[0]}={a[1]}\" for a in vars(args).items() if not a[0] in [\"tag\", \"override\", \"command\"]])\n name = f\"{name}-{settings}\"\n logger.info(f\"Created trainig data: {name} with {len(data)} data points\")\n trainingDataRepository.write(data, name, override)\n\n\ndef list_training_data():\n for n in trainingDataRepository.list():\n print(f\"{n[0]}\\t {n[1]}\")\n\n\ndef visualize_training_data(name: str, height: int = 10, save: bool = False):\n\n data = trainingDataRepository.read(name)\n rgbs = [ d.rgb for d in data ]\n\n with Image.new(\"RGB\", (len(rgbs), height)) as im:\n im.putdata(rgbs * height)\n \n if(save):\n im.save(f\"{name}.png\", \"PNG\")\n\n im.show()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Generate Colour Training Data')\n subparser_parser = parser.add_subparsers(dest='command')\n\n list_parser = subparser_parser.add_parser(\"list\", help=\"List avalable training data\")\n \n visualize_parser = subparser_parser.add_parser(\"visualize\", help=\"Visualizes training data\")\n visualize_parser.add_argument(\"--height\", help=\"Image height (default is 50)\", type=int, default=50)\n visualize_parser.add_argument(\"--save\", help=\"Saves the generated visualization\", action='store_true')\n visualize_parser.add_argument(\"name\", help=\"Training data name\", type=str)\n\n create_parser = subparser_parser.add_parser(\"create\", help=\"Generates training data\")\n selector_parser = create_parser.add_subparsers(dest=\"selector\")\n\n fixedHsvSelector_parser = selector_parser.add_parser(\"FixedHsvSelector\", help=\"Selects fixed s v points from a plane\")\n\n randomHsvSelector_parser= selector_parser.add_parser(\"RandomHsvSelector\", help=\"Randomly selects a specified number of s v values within the radius from a plane\")\n randomHsvSelector_parser.add_argument(\"-r\", \"--radius\", help=\"Radius within which random values should be picked (0 to 1)\", type=float, default=.5)\n randomHsvSelector_parser.add_argument(\"-c\", \"--count\", help=\"Number of random values to pick\", type=int, default=5)\n\n create_parser.add_argument(\"tag\", help=\"name prefix\", type=str)\n create_parser.add_argument(\"-s\", \"--steps_per_hue\", help=\"Number of Steps per hue\", type=int, default=1)\n create_parser.add_argument(\"-o\", \"--override\", help=\"Override existing data\", action='store_true')\n\n args = parser.parse_args()\n \n if args.command == \"create\":\n selector = DefaultHscSelector()\n if args.selector == \"FixedHscSelector\":\n selector = FixedHscSelector([(1,1),(.93,.86),(.86,.93),(.7,.7)])\n elif args.selector == \"RandomHsvSelector\":\n selector = RandomHsvSelector(args.radius, args.count)\n\n gen_trainig_data(selector, args.steps_per_hue, args.tag, args.override, args)\n elif args.command == \"list\":\n list_training_data()\n elif args.command == \"visualize\":\n visualize_training_data(args.name, args.height, args.save)\n","sub_path":"src/TrainingData/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642870526","text":"from PymoNNto.Exploration.Network_UI.TabBase import *\nfrom functools import partial\n\nclass sidebar_fast_forward_module(TabBase):\n\n def __init__(self, step_list=[100, 1000, 5000, 15000, 30000, 50000, 70000, 100000], title='PCA'):\n super().__init__(title)\n self.step_list = step_list\n\n def add_recorder_variables(self, neuron_group, Network_UI):\n return\n\n def recorder_on_off(self, Network_UI, enable):\n for rec in Network_UI.network['UI_rec']:\n rec.behaviour_enabled = enable or self.rec_cb.isChecked()\n\n def update_progress(self, progress, network=None):\n QApplication.instance().processEvents()\n self.progressbar.setValue(int(progress))\n\n def fast_forward(self, steps, Network_UI):\n\n Network_UI.add_event('fast forward', steps)\n\n self.update_progress(0)\n Network_UI.timer.stop()\n self.recorder_on_off(Network_UI, False)\n Network_UI.network.simulate_iterations(steps, 100, measure_block_time=True, batch_progress_update_func=self.update_progress)\n self.recorder_on_off(Network_UI, True)\n Network_UI.timer.start()\n\n def initialize(self, Network_UI):\n\n self.iteration_display_label=Network_UI.sidebar.add_widget(QLabel())\n self.iteration_display_label.setMaximumHeight(10)\n\n def start_pause_click(event):\n Network_UI.pause = not Network_UI.pause\n\n self.sp_btn = QPushButton('start/pause', Network_UI.main_window)\n self.sp_btn.clicked.connect(start_pause_click)\n Network_UI.sidebar.add_widget(self.sp_btn)\n\n Network_UI.sidebar.add_row()\n Network_UI.sidebar.add_widget(QLabel('Render every X frames.'))\n self.render_every_x_frames_spin = QSpinBox()\n self.render_every_x_frames_spin.setMinimum(1)\n self.render_every_x_frames_spin.setMaximum(1000)\n\n def change_rexf(event):\n Network_UI.render_every_x_frames = self.render_every_x_frames_spin.value()\n\n self.render_every_x_frames_spin.valueChanged.connect(change_rexf)\n\n Network_UI.sidebar.add_widget(self.render_every_x_frames_spin)\n\n if Network_UI.storage_manager is not None:\n self.record_frames_cb=QCheckBox('save frames')\n def cb_state_changed(event):\n if not self.record_frames_cb.isChecked():\n dlg = QDialog()\n dlg.setWindowTitle(\"Do you want to render a video?\")\n layout = QVBoxLayout()\n\n path_tl=QLineEdit(Network_UI.storage_manager.absolute_path)\n path_tl.setReadOnly(True)\n layout.addWidget(path_tl)\n\n delete_frames_cb = QCheckBox('delete frame images')\n layout.addWidget(delete_frames_cb)\n\n def render():\n Network_UI.storage_manager.render_video('ui_frame_', delete_images=delete_frames_cb.isChecked())\n dlg.close()\n\n\n render_btn = QPushButton('render video')\n render_btn.clicked.connect(render)\n\n layout.addWidget(render_btn)\n dlg.setLayout(layout)\n dlg.resize(300, 50)\n dlg.exec()\n\n self.record_frames_cb.stateChanged.connect(cb_state_changed)\n Network_UI.sidebar.add_widget(self.record_frames_cb)\n\n Network_UI.sidebar.set_parent_layout()\n\n line=Network_UI.sidebar.add_widget(QFrame())\n line.setFrameShape(QFrame.HLine)\n line.setFrameShadow(QFrame.Sunken)\n\n\n Network_UI.sidebar.add_row()\n\n Network_UI.step = False\n\n def one_step(event):\n Network_UI.step = True\n\n self.os_btn = QPushButton('X steps', Network_UI.main_window)\n self.os_btn.clicked.connect(one_step)\n Network_UI.sidebar.add_widget(self.os_btn)\n\n for i, steps in enumerate(self.step_list):\n\n if (i+1)%3==0:\n Network_UI.sidebar.add_row()\n\n def ff_btn_clicked(steps):\n self.fast_forward(steps, Network_UI)\n\n txt = str(steps)\n\n if steps%1000==0:\n txt=str(int(steps/1000))+'k'\n\n ff_btn = QPushButton(txt, Network_UI.main_window)\n ff_btn.clicked.connect(partial(ff_btn_clicked, steps))\n\n Network_UI.sidebar.add_widget(ff_btn)\n\n Network_UI.sidebar.set_parent_layout()\n\n self.rec_cb = Network_UI.sidebar.add_widget(QCheckBox())\n self.rec_cb.setText('fast forward record')\n self.rec_cb.setChecked(False)\n\n self.progressbar = Network_UI.sidebar.add_widget(QProgressBar())\n\n line = Network_UI.sidebar.add_widget(QFrame())\n line.setFrameShape(QFrame.HLine)\n line.setFrameShadow(QFrame.Sunken)\n\n\n def record_frame(self, Network_UI):\n if Network_UI.storage_manager is not None:\n pix = QtGui.QPixmap(Network_UI.main_window.size())\n Network_UI.main_window.render(pix)\n file_path=Network_UI.storage_manager.get_next_frame_name('ui_frame_')\n pix.save(file_path)\n\n def update(self, Network_UI):\n\n self.iteration_display_label.setText('Iteration: '+str(Network_UI.network.iteration))\n\n if Network_UI.storage_manager is not None and self.record_frames_cb.isChecked():\n self.record_frame(Network_UI)\n","sub_path":"Exploration/Network_UI/Basic_Tabs/sidebar_fast_forward_module.py","file_name":"sidebar_fast_forward_module.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652517280","text":"import ast\nimport requests\n\ndef findIn(item, array):\n ''' Returns the index of the item if it is in the array. -1 if not. '''\n for i in range(len(array)):\n if array[i] == item:\n return i\n return -1\n\nif __name__ == '__main__':\n\n # Establish Credentials\n url = \"http://challenge.code2040.org/api/haystack\"\n api_token = \"c9237799b926371eca5b77eededf24bc\"\n github_acc = \"https://github.com/DarianNwankwo/CODE2040-Challenges\"\n\n # JSON Dictionary\n payload = {\n 'token' : api_token,\n }\n\n # Post request to the web server\n post_request = requests.post(url, data=payload)\n\n # Receive the dictionary as a string from the web server\n dict_string = post_request.text\n dict_string = ast.literal_eval(dict_string)\n haystack = dict_string['haystack']\n needle = dict_string['needle']\n \n # Find the needle in the haystack\n index = findIn(needle, haystack)\n payload[\"needle\"] = index\n \n # Post results back to web server\n url = url + \"/validate\"\n requests.post(url, data=payload)\n\n\n","sub_path":"code2040_step3.py","file_name":"code2040_step3.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113400237","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n 脚本名: google 图片 【新加坡登机牌】\nCreated on 2018-12-03\n@author:David Yisun\n@group:data\n\"\"\"\nimport codecs\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport codecs\nfrom bs4 import BeautifulSoup\nimport re\nimport copy\nimport requests\nimport os\n\n# win23相关操作\nimport win32api\nimport win32con\nimport traceback\n\n\nclass google_picture_search(object):\n start_url = 'https://www.google.com.hk/imghp?hl=zh-CN&tab=wi'\n url_prefix = 'https://www.google.com.hk'\n time_out = 10\n def __init__(self, key_word, picture_save_path='./'):\n self.key_word = key_word\n self.picture_save_path = picture_save_path\n self.driver = webdriver.Chrome()\n self.driver.get(self.start_url)\n self.driver.maximize_window()\n self.driver.implicitly_wait(10)\n # 获取搜索框\n s = self.driver.find_element_by_xpath('//*[@id=\"sbtc\"]/div/div[1]/input')\n s.send_keys(self.key_word)\n s.send_keys(Keys.RETURN)\n\n # 检查路径\n if not os.path.exists(self.picture_save_path):\n os.mkdir(self.picture_save_path)\n self.images_path = os.path.join(self.picture_save_path, 'images')\n if not os.path.exists(self.images_path):\n os.mkdir(self.images_path)\n\n\n # 保存图片\n def save_picture(self, element, num):\n src = element.get_attribute('src')\n # 设置代理器\n proxies = {\n 'http': \"socks5://127.0.0.1:1080\",\n 'https': \"socks5://127.0.0.1:1080\"\n }\n res = requests.get(url=src, proxies=proxies)\n file_path = os.path.join(self.images_path, '{0}.jpg'.format(self.num))\n with codecs.open(file_path, 'wb') as f:\n f.write(res.content)\n print('save picture ok')\n return\n\n def per_picture_options(self, num, link, layer):\n \"\"\"\n 每一张放大的图片相关操作\n :param layer:\n :return:\n \"\"\"\n _num = copy.deepcopy(num)\n try:\n\n link.click()\n time.sleep(4)\n # 保存图片\n # try:\n # WebDriverWait(self.driver, self.time_out).until(\n # EC.visibility_of(self.driver.find_element_by_css_selector(r'img[class=\"irc_mi\"][src][width][height][alt*=\"的图片搜索结果\"]')))\n # except Exception as e:\n # print(e)\n # print('now')\n # self.driver.execute_script('window.stop()')\n element = self.driver.find_elements_by_css_selector(r'img[class=\"irc_mi\"][src][width][height][alt*=\"的图片搜索结果\"]')\n for index, _element in enumerate(element):\n localtion = _element.location\n if localtion['x'] >= 0 and localtion['y'] >= 0:\n self.save_picture(element=_element, num=_num)\n # 保存【相关图片】的链接\n html = BeautifulSoup(self.driver.page_source, 'xml')\n tag = html.find_all('a', class_=re.compile('KEV70v{0,1} rg_l'), href=re.compile('^/search\\?q\\=.*'))\n if tag != []:\n url = self.url_prefix + tag[0]['href']\n text_path = os.path.join(self.picture_save_path, 'urls_layer_{0}.txt'.format(layer))\n with codecs.open(text_path, 'a', 'utf-8') as f:\n f.write('{0}---->{1}\\n'.format(self.num, url))\n # f.write(url+'\\n')\n print(url)\n _num += 1\n self.num += 1\n return _num\n except Exception as e:\n print(e)\n print(traceback.print_exc())\n print('failed')\n return num\n\n def per_layer_options(self, layer):\n \"\"\"\n 每一层的相关操作\n :param url:\n :param layer:\n :return:\n \"\"\"\n self.driver.maximize_window()\n self.driver.implicitly_wait(10)\n num = 0\n links = self.driver.find_elements_by_css_selector('[jsname=\"hSRGPd\"]')\n for index, link in enumerate(links):\n t1 = time.time()\n print('=' * 60)\n print('links {0}'.format(index+1))\n num = self.per_picture_options(link=link, layer=layer, num=num)\n t2 = time.time()\n print('num:'+str(self.num))\n print('this link takes {0} seconds'.format(t2-t1))\n print('=' * 60)\n if num > self.most_picture_per:\n break\n # self.per_picture_options(link=links[4], layer=layer)\n return\n\n def main_spider(self, start_layers=0, layers=4, per=20, num=0, start_url=0):\n # 搜索 放大\n \"\"\"\n\n :param start_layers: url的开始层数\n :param layers: url的中层数\n :param per: 每页url的个数\n :param num: url的总编号\n :param start_url: 从同一层的第几个url开始请求\n :return:\n \"\"\"\n self.num = num\n self.most_picture_per = per\n for layer in range(start_layers, start_layers+layers):\n if layer > 0:\n with codecs.open('urls_layer_{0}.txt'.format(layer-1)) as f:\n print('links file path:'+'urls_layer_{0}.txt'.format(layer-1))\n data = f.read()\n urls = data.splitlines()\n urls = [url.split('---->')[-1] for url in urls]\n for index, url in enumerate(urls[start_url:]):\n self.driver.get(url)\n print('*'*60)\n print('{0}:{1}'.format(index, url))\n self.per_layer_options(layer=layer)\n print('*'*60)\n else:\n self.per_layer_options(layer=0)\n\n\nif __name__ == '__main__':\n # # 20181205\n # spider = google_picture_search(key_word='Singapore boarding pass', picture_save_path='D:\\\\yisun\\data\\crawler\\google_picture\\Singapore_boarding_pass\\\\20181205')\n # spider.main_spider(start_layrs=1, layers=1, per=200, num=0)\n\n # # 20181206\n # spider = google_picture_search(key_word='Singapore boarding pass', picture_save_path='D:\\\\yisun\\data\\crawler\\google_picture\\Singapore_boarding_pass\\\\20181206')\n # spider.main_spider(start_layrs=2, layers=1, per=200, num=20784, start_url=165)\n\n # # 20181206\n # spider = google_picture_search(key_word='中文手写体', picture_save_path='D:\\\\yisun\\data\\crawler\\google_picture\\chinese_handwriting\\\\20190122')\n # spider.main_spider(start_layers=2, layers=1, per=30, num=19550, start_url=22)\n\n # 20181206\n spider = google_picture_search(key_word='身份证', picture_save_path=r'./temp')\n spider.main_spider(start_layers=1, layers=1, per=2, num=0, start_url=0)\n\n","sub_path":"Singapore_boarding_pass/crawlers_new2.py","file_name":"crawlers_new2.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"279637173","text":"import os.path\nfrom configparser import ConfigParser, NoOptionError\n\nimport utils\n\nLOGGER = utils.getLogger(__name__)\n\n\nclass ConfigFile(ConfigParser):\n \"\"\"A version of ConfigParser which can easily save itself.\n\n The config will track whether any values change, and only resave\n if modified.\n get_val, get_bool, and get_int are modified to return defaults instead\n of erroring.\n \"\"\"\n def __init__(self, filename, root='../config', auto_load=True):\n \"\"\"Initialise the config file.\n\n filename is the name of the config file, in the 'root' directory.\n If auto_load is true, this file will immediately be read and parsed.\n \"\"\"\n super().__init__()\n self.filename = os.path.join(root, filename)\n self.has_changed = False\n\n if auto_load:\n self.load()\n\n def load(self):\n if self.filename is None:\n return\n\n try:\n with open(self.filename, 'r') as conf:\n self.read_file(conf)\n except (FileNotFoundError, IOError):\n LOGGER.warning(\n 'Config \"{}\" not found! Using defaults...',\n self.filename,\n )\n # If we fail, just continue - we just use the default values\n self.has_changed = False\n\n def save(self):\n \"\"\"Write our values out to disk.\"\"\"\n if self.filename is None:\n return\n self.has_changed = False\n # Make sure the directory exists\n folder = os.path.dirname(self.filename)\n if folder:\n os.makedirs(folder, exist_ok=True)\n\n with open(self.filename, 'w') as conf:\n self.write(conf)\n\n def save_check(self):\n \"\"\"Check to see if we have different values, and save if needed.\"\"\"\n if self.has_changed:\n LOGGER.info('Saving changes in config \"{}\"!', self.filename)\n self.save()\n\n def set_defaults(self, def_settings):\n \"\"\"Set the default values if the settings file has no values defined.\"\"\"\n for sect, values in def_settings.items():\n if sect not in self:\n self[sect] = {}\n for key, default in values.items():\n if key not in self[sect]:\n self[sect][key] = str(default)\n self.save_check()\n\n def get_val(self, section, value, default):\n \"\"\"Get the value in the specifed section.\n\n If either does not exist, set to the default and return it.\n \"\"\"\n if section not in self:\n self[section] = {}\n if value in self[section]:\n return self[section][value]\n else:\n self.has_changed = True\n self[section][value] = default\n return default\n\n def getboolean(self, section, value, default=False) -> bool:\n \"\"\"Get the value in the specified section, coercing to a Boolean.\n\n If either does not exist, set to the default and return it.\n \"\"\"\n if section not in self:\n self[section] = {}\n try:\n return super().getboolean(section, value)\n except (ValueError, NoOptionError):\n # Invalid boolean, or not found\n self.has_changed = True\n self[section][value] = str(int(default))\n return default\n\n get_bool = getboolean\n\n def getint(self, section, value, default=0) -> int:\n \"\"\"Get the value in the specified section, coercing to a Integer.\n\n If either does not exist, set to the default and return it.\n \"\"\"\n if section not in self:\n self[section] = {}\n try:\n return super().getint(section, value)\n except (ValueError, NoOptionError):\n self.has_changed = True\n self[section][value] = str(int(default))\n return default\n\n get_int = getint\n\n def add_section(self, section):\n self.has_changed = True\n super().add_section(section)\n\n def remove_section(self, section):\n self.has_changed = True\n super().remove_section(section)\n\n def set(self, section, option, value=None):\n orig_val = self.get(section, option, fallback=None)\n value = str(value)\n if orig_val is None or orig_val != value:\n self.has_changed = True\n super().set(section, option, value)\n\n add_section.__doc__ = ConfigParser.add_section.__doc__\n remove_section.__doc__ = ConfigParser.remove_section.__doc__\n set.__doc__ = ConfigParser.set.__doc__\n\n# Define this here so app modules can easily acess the config\n# Don't load it though, since this is imported by VBSP too.\nGEN_OPTS = ConfigFile('config.cfg', auto_load=False)\n","sub_path":"src/BEE2_config.py","file_name":"BEE2_config.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"244314089","text":"import queries\nimport schema\nimport urllib\nimport util\n\nfrom botocore.vendored import requests\n\n\n@util.parse_path({\"uuid\": schema._common[\"uuid\"]})\n@util.initialize\ndef post(event, context):\n\n\tuuid = event[\"pathParameters\"][\"uuid\"]\n\tif not queries.order_exists(uuid=uuid):\n\t\treturn util.respond_error(404, f\"Order {uuid} does not exist.\")\n\n\tid = urllib.parse.parse_qs(event[\"body\"])[\"id\"][0]\n\tpayment = requests.get(\n\t\tf\"https://api.mollie.com/v2/payments/{id}\",\n\t\theaders={\n\t\t\t\"Accept\": \"application/json\",\n\t\t\t\"Authorization\": f\"Bearer {util._config['mollie_api_key']}\"\n\t\t}\n\t).json()\n\tqueries.update_order_status(uuid=uuid, payment_status=payment[\"status\"])\n\n\tif payment[\"status\"] == \"paid\":\n\n\t\torder = queries.retrieve_order(uuid=uuid)\n\t\ttickets = util.render_document(\"tickets.html.j2\", {\n\t\t\t\"tickets\": queries.list_tickets(uuid=uuid)\n\t\t})\n\t\tinvoice = util.render_document(\"invoice.html.j2\", {\n\t\t\t\"order\": order,\n\t\t\t\"categories\": queries.list_categories(uuid=uuid),\n\t\t\t\"discount\": sum([discount_[\"value\"] for discount_ in queries.list_discount(uuid=uuid)]),\n\t\t\t\"amount\": float(queries.retrieve_total(uuid=uuid))\n\t\t})\n\n\t\tutil.send_message(\n\t\t\torder[\"name\"],\n\t\t\torder[\"email\"],\n\t\t\t\"order_documents\",\n\t\t\t{\"name\": order[\"name\"], \"uuid\": uuid},\n\t\t\t{\n\t\t\t\tf\"{util._config['shop_name']} Tickets.pdf\": tickets,\n\t\t\t\tf\"{util._config['shop_name']} Invoice.pdf\": invoice\n\t\t\t}\n\t\t)\n\t\tutil.store_document(f\"{uuid}/tickets.pdf\", f\"{util._config['shop_name']} Tickets.pdf\", tickets)\n\t\tutil.store_document(f\"{uuid}/invoice.pdf\", f\"{util._config['shop_name']} Invoice.pdf\", invoice)\n\n\t\tutil.notify_slack(\n\t\t\tusername=\"MollieBot\",\n\t\t\ttext=f\"Payment completed for order {order['id']}: https://{util._config['wolla_web_host']}/order/{uuid}/update\",\n\t\t\ticon_url=\"https://res.cloudinary.com/eventstack/image/twitter_name/MollieNL.jpg\"\n\t\t)\n\n\treturn util.respond_json({\"order\": {\"uuid\": uuid}})\n","sub_path":"lambda/handlers/order_verify.py","file_name":"order_verify.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83830478","text":"def docs_feedback_two_factor_authentication(link):\n from CRLS_APCSP_autograder.app.docs_labs.docs import get_text, exact_answer, keyword_and_length\n\n tests = list()\n\n text = get_text(link)\n\n print(text)\n test1a = keyword_and_length('1a. Two factor good?', [r'[a-zA-Z]+'], text,\n search_string=r'1a\\. .+? tabledata (.+) 2a\\.', min_length=10, points=1)\n test2a = keyword_and_length('2a. Two factor bad?', [r'[a-zA-Z]+'], text,\n search_string=r'2a\\. .+? tabledata (.+) 3\\.', min_length=10, points=1)\n test3a = exact_answer('3a. Screenshot of two-factor',\n [r'3a\\. .+? tabledata \\s* aaa \\s* inlineobject \\s* 3b\\.'], text, points=1)\n test3b = keyword_and_length('3b. Authentications more secure than before??', [r'[a-zA-Z]+'], text,\n search_string=r'3b\\. .+? tabledata (.+?) 3c\\.', min_length=10, points=1)\n test3c = keyword_and_length('3c. Authentications when you lose phone?', [r'[a-zA-Z]+'], text,\n search_string=r'3c\\. .+? tabledata (.+?) 3d\\.', min_length=10, points=1)\n test3d = keyword_and_length('3c. What happens when cancel two-factor??', [r'[a-zA-Z]+'], text,\n search_string=r'3d\\. .+? tabledata (.+?) 4a\\.', min_length=10, points=1)\n test4a = keyword_and_length('4a. Celeb hack, what you think??', [r'[a-zA-Z]+'], text,\n search_string=r'4a\\. .+? tabledata (.+?) check', min_length=10, points=1)\n\n tests.extend([test1a, test2a, test3a, test3b, test3c, test3d, test4a, ])\n return tests\n\n","sub_path":"CRLS_APCSP_autograder/app/two_factor_authentication.py","file_name":"two_factor_authentication.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460069552","text":"\"\"\"\nReplacement for RUSA ACP brevet time calculator\n(see https://rusa.org/octime_acp.html)\n\n\"\"\"\n\nimport flask\nfrom flask import request\nfrom pymongo import MongoClient\nimport arrow # Replacement for datetime, based on moment.js\nimport acp_times # Brevet time calculations\nimport config\nimport sys\nimport datetime\n\nimport logging\n\n###\n# Globals\n###\n\napp = flask.Flask(__name__)\nCONFIG = config.configuration()\napp.secret_key = CONFIG.SECRET_KEY\nclient = MongoClient(CONFIG.MONGO_URL)\ndb = client.get_default_database()\ncollection = db['times']\n\n###\n# Pages\n###\n\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n app.logger.debug(\"Main page entry\")\n return flask.render_template('calc.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n app.logger.debug(\"Page not found\")\n flask.session['linkback'] = flask.url_for(\"index\")\n return flask.render_template('404.html'), 404\n\n\n###############\n#\n# AJAX request handlers\n# These return JSON, rather than rendering pages.\n#\n###############\n@app.route(\"/_calc_times\")\ndef _calc_times():\n \"\"\"\n Calculates open/close times from miles, using rules\n described at https://rusa.org/octime_alg.html.\n Expects one URL-encoded argument, the number of miles.\n \"\"\"\n app.logger.debug(\"Got a JSON request\")\n notes = \"\"\n km = request.args.get('km', 0, type=float)\n brevet = request.args.get('brevet', 200, type=int)\n if km > brevet:\n if brevet * 1.2 < km:\n notes = \"Distance much longer than brevet - an accident?\"\n else:\n notes = \"Distance a bit longer than brevet, so used brevet\"\n if km < 15:\n notes = \"Distance a bit small - might cause weirdness\"\n beginDate = request.args.get('beginDate', \"2017-01-01\", type=str)\n beginTime = request.args.get('beginTime', \"00:00\", type=str)\n splitBeginDate = beginDate.split(\"-\")\n beginYear = splitBeginDate[0]\n beginMonth = splitBeginDate[1]\n beginDay = splitBeginDate[2]\n splitBeginTime = beginTime.split(\":\")\n beginHour = splitBeginTime[0]\n beginMin = splitBeginTime[1]\n beginTimeFinal = beginYear+\"-\"+beginMonth+\"-\"+beginDay+\"T\"+beginHour+\":\"+beginMin\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"brevet={}\".format(brevet))\n app.logger.debug(\"request.args: {}\".format(request.args))\n open_time = acp_times.open_time(km, brevet, beginTimeFinal)\n close_time = acp_times.close_time(km, brevet, beginTimeFinal)\n result = {\"open\": open_time, \"close\": close_time, \"notes\": notes}\n return flask.jsonify(result=result)\n\n@app.route(\"/_submit_DB\")\ndef _submit_DB():\n collection.delete_many({}) # Clear entire collection\n kms = request.args.get('kms', \"\", type=str).split(\"~\")\n opens = request.args.get('opens', \"\", type=str).split(\"~\")\n closes = request.args.get('closes', \"\", type=str).split(\"~\")\n noteses = request.args.get('noteses', \"\", type=str).split(\"~\")\n for i in range(len(kms)-1):\n collection.insert({\"km\": str(kms[i]), \"open\": opens[i], \"close\": closes[i], \"notes\": noteses[i]})\n return \"fine\" # Need to return something\n\n@app.route(\"/_load_DB\")\ndef _load_DB():\n data = collection.find()\n kms = []\n miles = []\n opens = []\n closes = []\n noteses = []\n for datum in data:\n if len(datum['km']) > 0: # Ignore it if it's a blank one\n kms.append(str(int(round(float(datum['km']), 0))))\n miles.append(str(round(float(datum['km']) * 0.621371, 1)))\n opens.append(datum['open'])\n closes.append(datum['close'])\n noteses.append(datum['notes'])\n result = {\"kms\": kms, \"miles\": miles, \"opens\": opens, \"closes\": closes, \"noteses\": noteses}\n return flask.jsonify(result=result)\n\n@app.route(\"/listAll\")\ndef listAll():\n data = collection.find()\n opens = []\n closes = []\n for datum in data:\n if len(datum['open']) > 0: # Ignore it if it's a blank one\n opens.append(datum['open'])\n closes.append(datum['close'])\n html = \"

{

\"\n for i in range(len(opens)):\n if i < len(opens) - 1:\n html += ' {
  \"open\": \"' + opens[i] + '\",
  \"close\": \"' + closes[i] + '\"
 },

'\n else:\n html += ' {
  \"open\": \"' + opens[i] + '\",
  \"close\": \"' + closes[i] + '\"
 }

'\n html += \"}

\"\n return html\n\n@app.route(\"/listOpenOnly\")\ndef listOpenOnly():\n k = request.args.get('top', default = 999, type = int)\n data = collection.find()\n opens = []\n for datum in data:\n if len(datum['open']) > 0: # Ignore it if it's a blank one\n opens.append(datum['open'])\n if k != 999:\n opens.sort(key=lambda x: datetime.datetime.strptime(x, '%a %m/%d %H:%M'))\n # https://stackoverflow.com/a/2589484\n html = \"

{

\"\n for i in range(len(opens)):\n if k == i:\n break\n if i < len(opens) - 1:\n html += ' {
  \"open\": \"' + opens[i] + '\"
 },

'\n else:\n html += ' {
  \"open\": \"' + opens[i] + '\"
 }

'\n html += \"}

\"\n return html\n\n@app.route(\"/listCloseOnly\")\ndef listCloseOnly():\n k = request.args.get('top', default = 999, type = int)\n data = collection.find()\n closes = []\n for datum in data:\n if len(datum['close']) > 0: # Ignore it if it's a blank one\n closes.append(datum['close'])\n if k != 999:\n closes.sort(key=lambda x: datetime.datetime.strptime(x, '%a %m/%d %H:%M'))\n # https://stackoverflow.com/a/2589484\n html = \"

{

\"\n for i in range(len(closes)):\n if k == i:\n break\n if i < len(closes) - 1:\n html += ' {
  \"close\": \"' + closes[i] + '\"
 },

'\n else:\n html += ' {
  \"close\": \"' + closes[i] + '\"
 }

'\n html += \"}

\"\n return html\n\n@app.route(\"/listAll/csv\")\ndef listAllCSV():\n data = collection.find()\n opens = []\n closes = []\n for datum in data:\n if len(datum['open']) > 0: # Ignore it if it's a blank one\n opens.append(datum['open'])\n closes.append(datum['close'])\n html = \"

Open, Close
\"\n for i in range(len(opens)):\n html += opens[i] + \", \" + closes[i] + \"
\"\n html += \"

\"\n return html\n\n@app.route(\"/listOpenOnly/csv\")\ndef listOpenOnlyCSV():\n k = request.args.get('top', default = 999, type = int)\n data = collection.find()\n opens = []\n for datum in data:\n if len(datum['open']) > 0: # Ignore it if it's a blank one\n opens.append(datum['open'])\n if k != 999:\n opens.sort(key=lambda x: datetime.datetime.strptime(x, '%a %m/%d %H:%M'))\n # https://stackoverflow.com/a/2589484\n html = \"

Open
\"\n for i in range(len(opens)):\n if k == i:\n break\n html += opens[i] + \"
\"\n html += \"

\"\n return html\n\n@app.route(\"/listCloseOnly/csv\")\ndef listCloseOnlyCSV():\n k = request.args.get('top', default = 999, type = int)\n data = collection.find()\n closes = []\n for datum in data:\n if len(datum['close']) > 0: # Ignore it if it's a blank one\n closes.append(datum['close'])\n if k != 999:\n closes.sort(key=lambda x: datetime.datetime.strptime(x, '%a %m/%d %H:%M'))\n # https://stackoverflow.com/a/2589484\n html = \"

Close
\"\n for i in range(len(closes)):\n if k == i:\n break\n html += closes[i] + \"
\"\n html += \"

\"\n return html\n\n@app.route(\"/listAll/json\")\ndef listAllJSON():\n data = collection.find()\n opens = []\n closes = []\n for datum in data:\n if len(datum['open']) > 0: # Ignore it if it's a blank one\n opens.append(datum['open'])\n closes.append(datum['close'])\n html = \"

{

\"\n for i in range(len(opens)):\n if i < len(opens) - 1:\n html += ' {
  \"open\": \"' + opens[i] + '\",
  \"close\": \"' + closes[i] + '\"
 },

'\n else:\n html += ' {
  \"open\": \"' + opens[i] + '\",
  \"close\": \"' + closes[i] + '\"
 }

'\n html += \"}

\"\n return html\n\n@app.route(\"/listOpenOnly/json\")\ndef listOpenOnlyJSON():\n k = request.args.get('top', default = 999, type = int)\n data = collection.find()\n opens = []\n for datum in data:\n if len(datum['open']) > 0: # Ignore it if it's a blank one\n opens.append(datum['open'])\n if k != 999:\n opens.sort(key=lambda x: datetime.datetime.strptime(x, '%a %m/%d %H:%M'))\n # https://stackoverflow.com/a/2589484\n html = \"

{

\"\n for i in range(len(opens)):\n if k == i:\n break\n if i < len(opens) - 1:\n html += ' {
  \"open\": \"' + opens[i] + '\"
 },

'\n else:\n html += ' {
  \"open\": \"' + opens[i] + '\"
 }

'\n html += \"}

\"\n return html\n\n@app.route(\"/listCloseOnly/json\")\ndef listCloseOnlyJSON():\n k = request.args.get('top', default = 999, type = int)\n data = collection.find()\n closes = []\n for datum in data:\n if len(datum['close']) > 0: # Ignore it if it's a blank one\n closes.append(datum['close'])\n if k != 999:\n closes.sort(key=lambda x: datetime.datetime.strptime(x, '%a %m/%d %H:%M'))\n # https://stackoverflow.com/a/2589484\n html = \"

{

\"\n for i in range(len(closes)):\n if k == i:\n break\n if i < len(closes) - 1:\n html += ' {
  \"close\": \"' + closes[i] + '\"
 },

'\n else:\n html += ' {
  \"close\": \"' + closes[i] + '\"
 }

'\n html += \"}

\"\n return html\n\n\n#############\n\napp.debug = CONFIG.DEBUG\nif app.debug:\n app.logger.setLevel(logging.DEBUG)\n\nif __name__ == \"__main__\":\n print(\"Opening for global access on port {}\".format(CONFIG.PORT))\n app.run(port=CONFIG.PORT, host=\"0.0.0.0\")\n","sub_path":"flask_brevets.py","file_name":"flask_brevets.py","file_ext":"py","file_size_in_byte":10457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328466866","text":"import os\nimport re\nimport urllib\nimport requests\nimport itertools\nfrom tqdm import tqdm\nfrom .photomosaic import options\n\n\nPUBLIC_URL = \"https://www.flickr.com/photos/\"\nAPI_URL = 'https://api.flickr.com/services/rest/'\nPATH = \"http://farm{farm}.staticflickr.com/{server}/\"\nNAME = \"{id}_{secret}_b.jpg\"\n\n\ndef _flickr_request(**kwargs):\n params = dict(api_key=options['flickr_api_key'],\n format='json',\n nojsoncallback=1,\n **kwargs)\n response = requests.get(API_URL, params=params)\n return response.json()\n\n\ndef _get_photoset(photoset_id, nsid, dest):\n os.makedirs(dest, exist_ok=True)\n for page in itertools.count(1):\n response = _flickr_request(\n method='flickr.photosets.getPhotos',\n photoset_id=photoset_id,\n nsid=nsid,\n content_type=1, # photos only\n page=page\n )\n if response.get('stat') != 'ok':\n # If we fail requesting page 1, that's an error. If we fail\n # requesting page > 1, we're just out of photos.\n if page == 1:\n raise RuntimeError(\"response: {}\".format(response))\n break\n photos = response['photoset']['photo']\n for photo in tqdm(photos, desc='downloading photos'):\n url = (PATH + NAME).format(**photo)\n filename = ('{photoset_id}_' + NAME\n ).format(photoset_id=photoset_id, **photo)\n filepath = os.path.join(dest, filename)\n urllib.request.urlretrieve(url, filepath)\n\n\ndef from_url(url, dest):\n m = re.match(PUBLIC_URL + \"(.*)/sets/([0-9]+)\", url)\n if m is None:\n raise ValueError(\"\"\"Expected URL like:\nhttps://www.flickr.com/photos//sets/\"\"\")\n username, photoset_id = m.groups()\n response = _flickr_request(method=\"flickr.urls.lookupUser\",\n url=PUBLIC_URL + username)\n nsid = response['user']['username']['_content']\n return _get_photoset(photoset_id, nsid, dest)\n","sub_path":"photomosaic/flickr.py","file_name":"flickr.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"649695229","text":"import csv\r\n\r\nopen_file = open(\"sitka_weather_07-2018_simple.csv\", \"r\")\r\n\r\ncsv_file = csv.reader(open_file, delimiter=\",\")\r\n\r\nheader_row = next(csv_file)\r\n'''\r\nprint(header_row)\r\n#\r\nfor index, column_header in enumerate(header_row): ## tells us the position(index) and the value(column_header)\r\n print(index,column_header)\r\n'''\r\nhighs = []\r\nlows = []#\r\nfor row in csv_file:\r\n highs.append(int(row[5]))\r\n lows.append(int(row[6]))\r\nprint(highs)\r\nprint(lows)\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.plot(highs, c=\"red\")\r\nplt.plot(lows, c=\"blue\")\r\nplt.title(\"Daily High & Low Temps, July 2018\", fontsize=16)\r\nplt.ylabel(\"Temperature (F)\",fontsize=16)\r\nplt.tick_params(axis=\"both\",which=\"major\",labelsize=16)\r\n\r\nplt.show()\r\n\r\n","sub_path":"sitka_temps_1.py","file_name":"sitka_temps_1.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"38290084","text":"def quad_scanner(sequence, stem_start,\n stem_stop, loop_stop, loop_start=1):\n import re\n\n quad_motifs_bed = []\n for strand in ['+', '-']:\n if strand == \"+\":\n quad_signature = \"G\" + str(stem_start) + \"-\" + str(stem_stop) + \"L\" + \\\n str(loop_start) + \"-\" + str(loop_stop)\n quad_motif = \"(?:G{%d,%d}[ATGCN]{%d,%d}){3}G{%d,%d}\" % (\n stem_start, stem_stop, loop_start,\n loop_stop, stem_start, stem_stop)\n else:\n quad_signature = \"C\" + str(stem_start) + \"-\" + str(stem_stop) + \"L\" + \\\n str(loop_start) + \"-\" + str(loop_stop)\n quad_motif = \"(?:C{%d,%d}[ATGCN]{%d,%d}){3}C{%d,%d}\" % (\n stem_start, stem_stop, loop_start,\n loop_stop, stem_start, stem_stop)\n quad_regex = re.compile(quad_motif, flags=re.IGNORECASE)\n quad_object = quad_regex.finditer(sequence)\n for quads in quad_object:\n quad_motifs_bed.append([quads.start(), quads.end() - 1,\n quads.end() - quads.start(), quad_signature, quads.group()])\n return quad_motifs_bed\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"92371820","text":"# 分治法;二分查找\r\n\r\ndef func(lst,k):\r\n return lst[0] == k\r\n\r\n\r\ndef solve(lst,k):\r\n if len(lst) == 1:\r\n return func(lst,k)\r\n\r\n m = len(lst)//2\r\n left_list = lst[:m]\r\n right_list = lst[m:]\r\n\r\n # or:从左边开始算,算完后,没出结论,再算or右边\r\n return solve(left_list,k) or solve(right_list,k)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n lst = [1, 2, 3, 4, 5, 6, 7, 8]\r\n k = int(input('输入一个数:'))\r\n\r\n re = solve(lst, k)\r\n print(re)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Data_AI/07/练习.py","file_name":"练习.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48041333","text":"\nfrom waziup_commons.models.observation import Observation\nfrom waziup_commons.models.measure import Measure\nfrom waziup_commons.models.measure_metadata import MeasureMetadata\n\ndef get_observation(entity: dict) -> Observation:\n try:\n measure = entity['measure']\n value = measure['value']\n measure_metadata = measure['metadata']\n dimension = measure_metadata['dimension']['value']\n unit = measure_metadata['unit']['value']\n timestamp = measure_metadata['timestamp']['value']\n\n my_measure = Measure(value = value,\n metadata = MeasureMetadata(dimension = dimension,\n unit = unit,\n timestamp = timestamp))\n my_obs = Observation(id = entity['id'],\n type = entity['type'],\n measure = measure)\n return my_obs\n except KeyError:\n print('I got a KeyError')\n pass\n\n","sub_path":"old/waziupctrl/controllers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"264493431","text":"\"\"\"\nRead some part of some file.\n\"\"\"\nimport argparse\n\nLINE_LEN = 4\n\nparser = argparse.ArgumentParser(description=\"Process some args.\")\nparser.add_argument(\"start\", type=int)\nparser.add_argument(\"end\", type=int)\nparser.add_argument(\"filename\", type=str)\n\nargs = parser.parse_args()\n\nshift = args.start * LINE_LEN\namount = (args.end - args.start) * LINE_LEN\nfilename = args.filename\n\n\nwith open(filename, \"r\") as f:\n f.seek(shift)\n print(f\"thread {number}: {f.read(amount)}\")\n","sub_path":"classwork/thread_proc_ex/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"205669948","text":"# -*- coding: utf-8 -*-\n\nimport os.path\n\nAPPNAME = 'dp'\n\nX86 = 'x86'\nAMD64 = 'amd64'\n\nLINUX = 'linux'\nWINDOWS = 'windows'\n\nDEBUG = 'debug'\nRELEASE = 'release'\n\nCOMMON = 'common'\n\nBUILD_DIR = 'build'\n\nINCLUDE_DIR = 'inc'\n\nIMPL_DIR = 'impl'\nIMPL_INCLUDE_DIR = os.path.join(\n IMPL_DIR,\n INCLUDE_DIR,\n)\nSOURCE_DIR = os.path.join(\n IMPL_DIR,\n 'src',\n)\n\nIMPL_PACKAGE_DIR = APPNAME + IMPL_DIR\n\ndef generatePartialLibraryName( _name ):\n return APPNAME + '_' + _name\n\ndef generateCompositeLibraryName():\n return APPNAME\n\ndef buildPartialLibrary(\n _ctx,\n _sourcesSet,\n _librariesSet,\n _target,\n _includes,\n _libpath,\n _setupSources,\n _sources,\n _libraries,\n _useModules,\n):\n if _sourcesSet is not None:\n _sourcesSet |= _sources\n\n if _librariesSet is not None:\n _librariesSet |= _libraries\n\n _buildObject(\n _ctx,\n _target,\n _includes,\n _libpath,\n _setupSources | _sources,\n _libraries,\n _useModules,\n )\n\ndef buildCompositeLibrary(\n _ctx,\n _target,\n _includes,\n _libpath,\n _sources,\n _libraries,\n _useModules,\n):\n _buildObject(\n _ctx,\n _target,\n _includes,\n _libpath,\n _sources,\n _libraries,\n _useModules,\n )\n\ndef _buildObject(\n _ctx,\n _target,\n _includes,\n _libpath,\n _sources,\n _libraries,\n _useModules,\n):\n _ctx.shlib(\n target = _target,\n source = _sources,\n lib = _libraries,\n use = _useModules,\n includes = _includes,\n libpath = _libpath,\n )\n","sub_path":"wscripts/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"38122739","text":"import datetime as dt\nimport hashlib\nimport pathlib\nimport shutil\nimport sys\nimport warnings\nfrom subprocess import check_output, CalledProcessError, PIPE\n\nfrom fpdf.template import Template\n\nQPDF_AVAILABLE = bool(shutil.which(\"qpdf\"))\nif not QPDF_AVAILABLE:\n warnings.warn(\n \"qpdf command not available on the $PATH, falling back to hash-based \"\n \"comparisons in tests\"\n )\n\nEPOCH = dt.datetime(1969, 12, 31, 19, 00, 00)\n\n\ndef assert_pdf_equal(actual, expected, tmp_path, generate=False):\n \"\"\"\n This compare the output of a `FPDF` instance (or `Template` instance),\n with the provided PDF file.\n\n The `CreationDate` of the newly generated PDF is fixed, so that it never triggers\n a diff.\n\n If the `qpdf` command is available on the `$PATH`, it will be used to perform the\n comparison, as it greatly helps debugging diffs. Otherwise, a hash-based comparison\n logic is used as a fallback.\n\n Args:\n actual: instance of `FPDF` or `Template`. The `output` or `render` method\n will be called on it.\n expected: instance of `FPDF`, `bytearray` or file path to a PDF file\n matching the expected output\n tmp_path (Path): temporary directory provided by pytest individually to the\n caller test function\n generate (bool): only generate `pdf` output to `rel_expected_pdf_filepath`\n and return. Useful to create new tests.\n \"\"\"\n if isinstance(actual, Template):\n actual.render()\n actual_pdf = actual.pdf\n else:\n actual_pdf = actual\n actual_pdf.set_creation_date(EPOCH)\n if generate:\n assert isinstance(expected, pathlib.Path), (\n \"When passing `True` to `generate`\"\n \"a pathlib.Path must be provided as the `expected` parameter\"\n )\n actual_pdf.output(expected.open(\"wb\"))\n return\n if isinstance(expected, pathlib.Path):\n expected_pdf_path = expected\n else:\n expected_pdf_path = tmp_path / \"expected.pdf\"\n with expected_pdf_path.open(\"wb\") as pdf_file:\n if isinstance(expected, (bytes, bytearray)):\n pdf_file.write(expected)\n else:\n expected.output(pdf_file)\n actual_pdf_path = tmp_path / \"actual.pdf\"\n with actual_pdf_path.open(\"wb\") as pdf_file:\n actual_pdf.output(pdf_file)\n if QPDF_AVAILABLE: # Favor qpdf-based comparison, as it helps a lot debugging:\n actual_qpdf = _qpdf(actual_pdf_path)\n expected_qpdf = _qpdf(expected_pdf_path)\n (tmp_path / \"actual_qpdf.pdf\").write_bytes(actual_qpdf)\n (tmp_path / \"expected_qpdf.pdf\").write_bytes(expected_qpdf)\n actual_lines = actual_qpdf.splitlines()\n expected_lines = expected_qpdf.splitlines()\n if actual_lines != expected_lines:\n # It is important to reduce the size of both list of bytes here,\n # to avoid .assertSequenceEqual to take forever to finish, that itself calls difflib.ndiff,\n # that has cubic complexity from this comment by Tim Peters: https://bugs.python.org/issue6931#msg223459\n actual_lines = subst_streams_with_hashes(actual_lines)\n expected_lines = subst_streams_with_hashes(expected_lines)\n assert actual_lines == expected_lines\n else: # Fallback to hash comparison\n actual_hash = hashlib.md5(actual_pdf_path.read_bytes()).hexdigest()\n expected_hash = hashlib.md5(expected_pdf_path.read_bytes()).hexdigest()\n assert actual_hash == expected_hash, f\"{actual_hash} != {expected_hash}\"\n\n\ndef subst_streams_with_hashes(in_lines):\n \"\"\"\n This utility function reduce the length of `in_lines`, a list of bytes,\n by replacing multi-lines streams looking like this:\n\n stream\n {non-printable-binary-data}endstream\n\n by a single line with this format:\n\n \n \"\"\"\n out_lines, stream = [], None\n for line in in_lines:\n if line == b\"stream\":\n assert stream is None\n stream = bytearray()\n elif stream == b\"stream\":\n # First line of stream, we check if it is binary or not:\n try:\n line.decode(\"latin-1\")\n if not (b\"\\0\" in line or b\"\\xff\" in line):\n # It's likely to be text! No need to compact stream\n stream = None\n except UnicodeDecodeError:\n pass\n if stream is None:\n out_lines.append(line)\n else:\n stream += line\n if line.endswith(b\"endstream\") and stream:\n stream_hash = hashlib.md5(stream).hexdigest()\n out_lines.append(f\"\\n\".encode())\n stream = None\n return out_lines\n\n\ndef _qpdf(input_pdf_filepath):\n if sys.platform == \"cygwin\":\n # Lucas (2021/01/06) : this conversion of UNIX file paths to Windows ones is only needed\n # for my development environment: Cygwin, a UNIX system, with a qpdf Windows binary. Sorry for the kludge!\n input_pdf_filepath = (\n check_output([\"cygpath\", \"-w\", str(input_pdf_filepath)]).decode().strip()\n )\n try:\n return check_output(\n [\"qpdf\", \"--deterministic-id\", \"--qdf\", str(input_pdf_filepath), \"-\"],\n stderr=PIPE,\n )\n except CalledProcessError as error:\n print(f\"\\nqpdf STDERR: {error.stderr.decode().strip()}\")\n raise\n","sub_path":"venv/Lib/site-packages/test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"376087474","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\ndef addALayer(inputSize, outputSize, input, activationFuction=None):\n Weight = tf.Variable(tf.random_normal([inputSize, outputSize]))\n Bias = tf.Variable(tf.random_normal([1, outputSize])) + 0.1\n output = tf.matmul(input, Weight) + Bias\n if activationFuction==None:\n return output\n else:\n return activationFuction(output)\n\ndef preprocessingTrainData(data):\n #保留几个字段,发你别是船舱级别,性别,年龄,船上同辈亲戚数,船上父母子女数,票价,舱位,上传港口\n label = data[\"Survived\"].values.reshape(len(data),1)\n data = data[[ 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare',\"Embarked\"]]\n data['p1'] = np.array(data[\"Pclass\"] == 1).astype(np.int32)\n data['p2'] = np.array(data[\"Pclass\"] == 2).astype(np.int32)\n data['p3'] = np.array(data[\"Pclass\"] == 3).astype(np.int32)\n data['male'] = np.array(data['Sex']=='male').astype(np.int32)\n data['female'] = np.array(data['Sex'] == 'female').astype(np.int32)\n data['es'] = np.array(data[\"Embarked\"] == \"S\").astype(np.int32)\n data['ec'] = np.array(data[\"Embarked\"] == \"C\").astype(np.int32)\n data['eq'] = np.array(data[\"Embarked\"] == \"Q\").astype(np.int32)\n data[\"Age\"] = data[\"Age\"]/100.#np.max( data[\"Age\"])\n data[\"Fare\"] = data[\"Fare\"]/np.max(data[\"Fare\"])\n data = data.fillna(data.mean())\n return label, data.drop([\"Pclass\",\"Sex\",\"Embarked\"], axis=1).values\n\ntrainData = pd.read_csv(r\"C:\\Users\\Administrator\\Desktop\\自然语言理解学习\\train.csv\")\ntestData = pd.read_csv(r\"C:\\Users\\Administrator\\Desktop\\自然语言理解学习\\test.csv\")\n\ntrainLabel, trainData = preprocessingTrainData(trainData)\nprint(np.shape(trainLabel),np.shape(trainData))\n#print(trainData.columns)\nfeatureNum = len(trainData[0, :])\n\nx = tf.placeholder(\"float\", shape=[None, featureNum])\ny = tf.placeholder(\"float\", shape=[None, 1])\n\noutputLayer1 = addALayer(featureNum, 100, x, tf.nn.relu)\noutputLayer_1 = addALayer(100, 100, outputLayer1, tf.nn.relu)\noutputLayer_2 = addALayer(100, 100, outputLayer1, tf.nn.relu)\noutputLayer2 = addALayer(100, 1, outputLayer_2, None)\n\npred = tf.cast(tf.sigmoid(outputLayer2) > 0.5, tf.float32)\n\nloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=outputLayer2))\ntrain_step = tf.train.GradientDescentOptimizer(0.0003).minimize(loss)\n\naccuracy = tf.reduce_mean(tf.cast(tf.equal(pred, y), tf.float32))\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nimport random\nfor i in range(10000):\n index = np.random.permutation(len(trainLabel))\n #print(index)\n trainDataTemp = trainData[index,:]\n trainLabelTemp = trainLabel[index,:]\n for j in range(len(index)//100 + 1):\n batch_xs = trainDataTemp[j*100:j*100+100]\n batch_ys = trainLabelTemp[j * 100:j * 100 + 100]\n #print(batch_xs)\n sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})\n print(i, sess.run(loss, feed_dict={x: batch_xs, y: batch_ys}),sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys}))\n","sub_path":"src/tensorflowLearning/titanixANN.py","file_name":"titanixANN.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"424809492","text":"from __future__ import print_function, unicode_literals, with_statement, division\nfrom django import VERSION\n\nversion = VERSION[1]\n\nV15 = 5\nV16 = 6\nV17 = 7\nV18 = 8\nV19 = 9\n\n\nif version in [V15, V16]:\n from django.db.models.loading import get_model\n\nif version in [V17, V18, V19]:\n from django.apps.registry import apps\n get_model = apps.get_model\n\n\n__all__ = ['get_model']\n","sub_path":"karta/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650826312","text":"#!/usr/bin/env python3\nfrom mdlib.customMathFunc import myRound\nimport numpy as np\n\nclass mdEngine(object):\n\n def __init__(self, nparticle, box, kb, time_step, temperature, ndims, mass, thermoStatFlag, getForce, frictCoeff):\n self.nparticle = nparticle\n self.box = box\n self.kb = kb\n self.time_step = time_step\n self.temperature = temperature\n self.ndims = ndims\n self.mass = mass\n self.thermoStatFlag = thermoStatFlag \n self.frictCoeff = frictCoeff\n self.getForce = getForce\n self._velSQ = 0.0\n self._count = 0\n\n def genVelocity(self): \n \"\"\" generate velocity profile by Maxwell-Boltzmann distribution\"\"\"\n velDirection = 1 if np.random.randint(1, 1001) % 2 == 0 else -1 \n initVel = np.ones((self.nparticle, self.ndims), dtype=np.float64) * velDirection * np.sqrt(self.kb * self.temperature / self.mass)\n return initVel\n\n def checkTargetTemperature(self, input_numpy_array_vel, current_frame, total_frame):\n self._count += 1\n for n in range(self.nparticle):\n for d in range(self.ndims): \n self._velSQ += (input_numpy_array_vel[n][d]**2)\n\n if current_frame == total_frame: \n targetTemperature = (self.mass * self._velSQ / self.kb / self.ndims / self._count)\n with open(\"checkTargetTemperaturem%.3fT%.3fgamma%.3f.dat\" % (self.mass, self.temperature, self.frictCoeff), \"w\") as fout:\n fout.write(\"# Average Target Temperature: \" + str(targetTemperature)) \n\n def velocityVerletSimple(self, current_coord, current_vel):\n\n if self.ndims == 1:\n for n in range(self.nparticle):\n current_force = self.getForce(current_coord[n][0], 0, current_vel[n][0], 0)\n\n current_coord[n][0] = current_coord[n][0] + current_vel[n][0] * self.time_step + (0.5 / self.mass) * current_force * self.time_step**2 \n current_coord[n][0] -= (myRound(current_coord[n][0] / self.box[0]) * self.box[0])\n\n #next_force = self.getForce(current_coord[n][0], 0, current_vel[n][0], 0) \n\n current_vel[n][0] = current_vel[n][0] + (0.5 / self.mass) * (current_force + current_force) * self.time_step\n\n if self.ndims == 2:\n for n in range(self.nparticle):\n\n current_force_x = self.getForce(current_coord[n][0], 0, current_vel[n][0], current_coord[n][1])\n current_force_y = self.getForce(current_coord[n][0], 1, current_vel[n][1], current_coord[n][1])\n\n current_coord[n][0] = current_coord[n][0] + current_vel[n][0] * self.time_step + (0.5 / self.mass) * current_force_x * self.time_step ** 2 \n current_coord[n][1] = current_coord[n][1] + current_vel[n][1] * self.time_step + (0.5 / self.mass) * current_force_y * self.time_step ** 2 \n current_coord[n][0] -= (myRound(current_coord[n][0] / self.box[0]) * self.box[0])\n current_coord[n][1] -= (myRound(current_coord[n][1] / self.box[1]) * self.box[1])\n\n #next_force_x = self.getForce(current_coord[n][0], 0, current_vel[n][0], current_coord[n][1]) \n #next_force_y = self.getForce(current_coord[n][0], 1, current_vel[n][1], current_coord[n][1]) \n\n #current_vel[n][0] = current_vel[n][0] + (0.5 / self.mass) * (current_force_x + next_force_x) * self.time_step\n #current_vel[n][1] = current_vel[n][1] + (0.5 / self.mass) * (current_force_y + next_force_y) * self.time_step\n current_vel[n][0] = current_vel[n][0] + (0.5 / self.mass) * (current_force_x + current_force_x) * self.time_step\n current_vel[n][1] = current_vel[n][1] + (0.5 / self.mass) * (current_force_y + current_force_y) * self.time_step\n\n def velocityVerletLJ(self, current_coord, current_vel):\n # for Lennard Jones potential\n pass\n\nif __name__ == \"__main__\":\n pass\n \n","sub_path":"FreeEnergySampling/annSampling/new_MD_engine/wo_StopCriteria_init/mdlib/mdEngine.py","file_name":"mdEngine.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253341042","text":"\"\"\" Page functions for Tenant pages\n\n\n:var list_page: A :py:class:`cfme.web_ui.Region` object describing elements on the list page.\n:var details_page: A :py:class:`cfme.web_ui.Region` object describing elements on the detail page.\n\"\"\"\n\nfrom utils.conf import cfme_data\nfrom cfme.fixtures import pytest_selenium as sel\nfrom cfme.web_ui import Region, SplitTable\n\n\n# Page specific locators\nlist_page = Region(\n locators={\n 'tenant_table': SplitTable(header_data=('//div[@class=\"xhdr\"]/table/tbody', 1),\n body_data=('//div[@class=\"objbox\"]/table/tbody', 1))\n },\n title='Cloud Tenants')\n\n\ndetails_page = Region(infoblock_type='detail')\n\n\nclass Tenant(object):\n def __init__(self, name, description, provider_key):\n \"\"\"Base class for a Tenant\"\"\"\n self.name = name\n self.description = description\n self.provider_key = provider_key\n\n def exists(self):\n sel.force_navigate('clouds_tenants')\n provider_name = cfme_data['management_systems'][self.provider_key]['name']\n res = list_page.tenant_table.find_row_by_cells({'Name': self.name,\n 'Cloud Provider': provider_name})\n if res:\n return True\n else:\n return False\n","sub_path":"cfme/cloud/tenant.py","file_name":"tenant.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"28012746","text":"class Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n tL=[x for x in t] #make list of t\n\n for i in range(len(s)):\n try:\n tL.remove(s[:1]) #try remove first char of s in t list\n s=s[1:] #remove first char in s\n except: #if fail means it is not Anagram\n return False\n \n if tL==[]: #after check all char if t list is empty return True\n return True\n return False\n \n#url: https://leetcode.com/problems/valid-anagram/\n","sub_path":"242. Valid Anagram.py","file_name":"242. Valid Anagram.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"60763812","text":"from random import random\n\ntimes = int(input('请输入投掷飞镖次数'))\nhist = 0\nfor i in range(times): # range 生成一个连续数字序列\n x = random() # 生成随机数\n y = random()\n if x * x + y * y < 1:\n hist += 1\nprint(4.0 * hist / times)\n\n\n# 蒙特·卡罗方法是一种通过概率来得到问题近似解的方法,在很多领域都有重要的应用,\n# 其中就包括圆周率近似值的计算问题。假设有一块边长为 2 的正方形木板,上面画一个单位\n# 圆,然后随意往木板上扔飞镖,落点坐标(x, y)必然在木板上(更多的时候是落在单位圆内),\n# 如果扔的次数足够多,那么落在单位圆内的次数除以总次数再乘以 4,这个数字会无限逼近\n# 圆周率的值","sub_path":"experiment3/nentKero.py","file_name":"nentKero.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"581449510","text":"from setuptools import setup, find_packages\nimport os\nimport subprocess\n\ndepend_links = []\n\n# detect if on raspberry pi, and\n# set location to wheel if we are\nIS_RASPI = False\nret = subprocess.call(['grep', '-q', 'BCM', '/proc/cpuinfo'])\nif ret == 0:\n IS_RASPI = True\n os.system(\"sudo +x INSTALL\")\n os.system(\"sudo ./INSTALL\")\n\nsetup(\n name=\"ventilator\",\n author=\"vent team\",\n author_email=\"vent@vents.com\",\n description=\"some description of how we made a ventilator\",\n keywords=\"vents ventilators etc\",\n url=\"https://ventilator.readthedocs.io\",\n version=\"0.0.2\",\n packages=find_packages(),\n install_requires=[\n 'numpy',\n 'PySide2',\n 'pyqtgraph>=0.11.0rc0',\n 'pigpio'\n ],\n dependency_links=depend_links\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"8047715","text":"\n\n#calss header\nclass _DUSTER():\n\tdef __init__(self,): \n\t\tself.name = \"DUSTER\"\n\t\tself.definitions = [u'a piece of cloth that is used for removing dust from furniture, books, surfaces, etc.', u'a stick with feathers at one end, or a similar object with cloth attached to one end, used for cleaning']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_duster.py","file_name":"_duster.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453329271","text":"#bubble-sort\n\nimport unittest\n\nclass Bubble(unittest.TestCase):\n \n\n def test_fungsi1(self): #deklarasi fungsi\n data = [10,45,1,4,50]\n n=len(data) #menghitung panjang dari parameter data\n \n for i in range(n): #memulai for loop\n\n for j in range(0, n-i-1):\n\n if data[j] > data [j+1] : #memulai if kondisi, kalau data awal lebih besar maka\n data[j], data[j+1] = data[j+1], data[j] #data akan ditukar\n\n self.assertIn(1, data)\n \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"novice/02-02/kasus/kasus4.py","file_name":"kasus4.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"645565434","text":"import os\r\nimport shutil\r\n\r\ndef pegar_extensao(nome):\r\n index = nome.rfind('.')\r\n #rfind no Python retorna a posição da última ocorrência da cadeia, se não houver correspondência -1 é retornado\r\n #selecionando apenas as posições após o ponto você consegue saber que tipo é o arquivo\r\n return nome[index:]\r\n\r\n\r\ndef organizador():\r\n pasta = str(input('Digite a localização da sua pasta de Download: '))\r\n #Aqui eu configuro quais tipos de extenções serão reconhecidas pelo programa\r\n exec_ext = ['.exe','.msi']\r\n pdf_ext = ['.pdf']\r\n img_ext = ['.jpg', '.jpeg', '.png']\r\n vid_ext = ['.mp4', '.avi','.wmv','.mkv']\r\n zip_ext = ['.rar', '.zip','.7z']\r\n musica_ext = ['.mp3']\r\n doc_ext = ['.doc', '.docx','.txt']\r\n planilha_ext = ['.xls','.xlt','.xml']\r\n pp_ext = ['.ppt','.pps']\r\n\r\n\r\n EXEC_DIR = os.path.join(pasta, 'Executáveis')\r\n PDF_DIR = os.path.join(pasta, 'Arquivos PDF')\r\n IMG_DIR = os.path.join(pasta, 'Imagens')\r\n VID_DIR = os.path.join(pasta, 'Videos')\r\n MU_DIR = os.path.join(pasta,'Musicas')\r\n ZIP_DIR = os.path.join(pasta, 'Arquivos Zipados')\r\n DOC_DIR = os.path.join(pasta, 'Documentos de Texto')\r\n PLA_DIR = os.path.join(pasta, 'Planilhas')\r\n PP_DIR = os.path.join(pasta, 'Power Point')\r\n ETC_DIR = os.path.join(pasta, 'Outros')\r\n\r\n if not os.path.isdir(EXEC_DIR):\r\n os.mkdir(EXEC_DIR)\r\n if not os.path.isdir(PDF_DIR):\r\n os.mkdir(PDF_DIR)\r\n if not os.path.isdir(IMG_DIR):\r\n os.mkdir(IMG_DIR)\r\n if not os.path.isdir(VID_DIR):\r\n os.mkdir(VID_DIR)\r\n if not os.path.isdir(MU_DIR):\r\n os.mkdir(MU_DIR)\r\n if not os.path.isdir(ZIP_DIR):\r\n os.mkdir(ZIP_DIR)\r\n if not os.path.isdir(DOC_DIR):\r\n os.mkdir(DOC_DIR)\r\n if not os.path.isdir(PLA_DIR):\r\n os.mkdir(PLA_DIR)\r\n if not os.path.isdir(PP_DIR):\r\n os.mkdir(PP_DIR)\r\n if not os.path.isdir(ETC_DIR):\r\n os.mkdir(ETC_DIR)\r\n\r\n\r\n arquivos = os.listdir(pasta)\r\n nova_pasta = ''\r\n #if os.path.isfile(os.path.join(diretorio, arquivo)):\r\n for i in arquivos:\r\n if os.path.isfile(os.path.join(pasta, i)):\r\n extensao = str.lower(pegar_extensao(i))\r\n if extensao in exec_ext:\r\n nova_pasta = EXEC_DIR\r\n elif extensao in pdf_ext:\r\n nova_pasta = PDF_DIR\r\n elif extensao in img_ext:\r\n nova_pasta = IMG_DIR\r\n elif extensao in vid_ext:\r\n nova_pasta = VID_DIR\r\n elif extensao in zip_ext:\r\n nova_pasta = ZIP_DIR\r\n elif extensao in doc_ext:\r\n nova_pasta = DOC_DIR\r\n elif extensao in musica_ext:\r\n nova_pasta = MU_DIR\r\n elif extensao in planilha_ext:\r\n nova_pasta = PLA_DIR\r\n elif extensao in pp_ext:\r\n nova_pasta = PP_DIR\r\n else:\r\n nova_pasta = ETC_DIR\r\n \r\n shutil.move(os.path.join(pasta, i), os.path.join(nova_pasta, i)) \r\n print(f'Arquivo: {i}, movido para: {os.path.join(nova_pasta,i)}')\r\n\r\norganizador()","sub_path":"trabalho/principal/original ( não modifique diretamente )/organizador.py","file_name":"organizador.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101558390","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_cell_magic('javascript', '', 'IPython.OutputArea.prototype._should_scroll = function(lines) {\\n return false;\\n}')\n\n\n# In[2]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import fmin_ncg\n\nimport seaborn as sns\nsns.set_context('talk')\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.colors import LogNorm\n\nfrom mpl_toolkits import mplot3d\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\n\n\n# # Which way is \"down\"?\n\n# ### bowl\n# \n# Let's look at a standard \"bowl\" given by\n# $$\n# f(x,y) = x^2 + y^2\n# $$\n# which has a minimum at $(x,y)=(0,0)$\n\n# In[3]:\n\n\ndef f(x,y):\n return x**2 + y**2\n\ndef df(x,y):\n return np.array([2*x,2*y])\n\n\n# In[4]:\n\n\nx0 = np.arange(1,1)\n\nfig = plt.figure()\nax = Axes3D(fig, azim = -80, elev =55)\n\nX = np.linspace(-2, 2, 30)\nY = np.linspace(-2, 2, 30)\nX, Y = np.meshgrid(X, Y)\nZ = f(X, Y)\nax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, norm = LogNorm(), cmap = 'viridis')\n\nax.plot([0], [0], [f(0,0)], 'mo', ms=10, zorder=10)\n\n\n# In[5]:\n\n\nplt.figure()\nplt.contourf(X, Y, Z, levels=30, cmap = 'cool')\nxx = 1.5\nyy = 1.5\ns = -0.5*df(xx, yy)\nplt.plot(xx, yy, 'bo', ms=10, zorder=10)\narrow = plt.Arrow(xx, yy, s[0], s[1], zorder=10, width=0.5)\nax = plt.gca()\nax.add_patch(arrow)\n\n\n# ### bread bowl\n# \n# Let's look at a bread bowl given by\n# $$\n# f(x,y) = 10 * x^2 + y^2\n# $$\n# which has a minimum at $(x,y)=(0,0)$\n\n# In[6]:\n\n\ndef f(x,y):\n return 10 * x**2 + y**2\n\ndef df(x,y):\n return np.array([20*x,2*y])\n\n\n# In[7]:\n\n\nx0 = np.arange(1,1)\n\nfig = plt.figure()\nax = Axes3D(fig, azim = -80, elev =55)\n\nX = np.linspace(-2, 2, 30)\nY = np.linspace(-2, 2, 30)\nX, Y = np.meshgrid(X, Y)\nZ = f(X, Y)\nax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, norm = LogNorm(), cmap = 'viridis')\n\nax.plot([0], [0], [f(0,0)], 'mo', ms=10, zorder=10)\n\n\n# In[8]:\n\n\nplt.figure()\nplt.contourf(X, Y, Z, levels=30, cmap = 'cool')\nxx = 1.5\nyy = 1.5\ns = -0.1*df(xx, yy)\nplt.plot(xx, yy, 'bo', ms=10, zorder=10)\narrow = plt.Arrow(xx, yy, s[0], s[1], zorder=10, width=0.5)\nax = plt.gca()\nax.add_patch(arrow)\n\n\n# ### Rosenbrock\n# \n# Let's look at a Rosenbrock function\n# $$\n# f(x,y) = (a-x)^2 + b(y-x^2)^2\n# $$\n# which has a minimum at $(x,y)=(a,a^2)$\n# \n# https://en.wikipedia.org/wiki/Rosenbrock_function\n\n# In[12]:\n\n\ndef f(x,y):\n a = 1\n b = 100\n return (a - x)**2 + b*(y - x**2)**2\n\n\n# In[13]:\n\n\nx0 = np.arange(1,1)\n\nfig = plt.figure()\nax = Axes3D(fig, azim = -80, elev =55)\n\nX = np.linspace(-4, 4, 30)\nY = np.linspace(-4, 4, 30)\nX, Y = np.meshgrid(X, Y)\nZ = f(X, Y)\nax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, norm = LogNorm(), cmap = 'viridis')\n\nax.plot([1], [1], [f(1,1)], 'mo', ms=10, zorder=10)\nax.plot([2], [2], [f(2,2)], 'mo', ms=10, zorder=10)\nax.plot([1.8], [3.2], [f(1.8,3.2)], 'mo', ms=10, zorder=10)\n\n\n# In[11]:\n\n\nplt.figure()\nplt.contourf(X, Y, Z, levels=30, cmap = 'cool')\nxx = 1.8\nyy = 3.2\ns = -0.1*df(xx, yy)\nplt.plot(xx, yy, 'bo', ms=10, zorder=10)\nplt.plot(1, 1, 'm*', ms=10, zorder=10)\narrow = plt.Arrow(xx, yy, s[0], s[1], zorder=10, width=0.5)\nax = plt.gca()\nax.add_patch(arrow)\n\n\n# In[ ]:\n\n\ndef f(x,y):\n a = 1\n b = 0.5\n return (a - x)**2 + b*(y - x**2)**2\n\n","sub_path":"demos/upload/optimization/Downhill.py","file_name":"Downhill.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358728556","text":"import streamlit as st\nimport streamlit.components.v1 as components\nimport numpy as np\nimport pandas as pd\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# EDA Pkgs\nimport pandas as pd \nimport codecs\nfrom pandas_profiling import ProfileReport \n#import sweetviz as sv \n\n# Components Pkgs\nimport streamlit.components.v1 as components\nfrom streamlit_pandas_profiling import st_profile_report\n\n#import autoML\n\n#DATE_COLUMN = 'date/time'\n#DATA_URL = ('https://s3-us-west-2.amazonaws.com/'\n #'streamlit-demo-data/uber-raw-data-sep14.csv.gz')\n\n#@st.cache\n#def load_data(nrows):\n #data = pd.read_csv(DATA_URL, nrows=nrows)\n #lowercase = lambda x: str(x).lower()\n #data.rename(lowercase, axis='columns', inplace=True)\n #data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])\n #return data\n\n#@st.cache\ndef load_data(uploadFileName):\n if uploadFileName is not None:\n try:\n #bytes_data = uploadFileName.read()\n #st.write(\"filename:\", uploadFileName.name)\n #st.write(bytes_data)\n #data = uploadFileName.read()\n data = pd.read_csv(uploadFileName)\n #df = pd.DataFrame(data)\n return data\n except UnicodeDecodeError as e:\n st.error(f\"error loading log.las: {e}\")\n return None\n\nst.title('The Data App')\n# Sidebar Options & File Uplaod\nst.sidebar.title('Data Explorer')\nst.sidebar.write('Load data using file browser or specify full path to file to use the app.')\nst.sidebar.write('c:\\\\user\\\\Downloads\\\\abc.csv or https://raw.githubusercontent.com/selva86/datasets/master/Cars93_miss.csv')\n#if st.sidebar.checkbox('WSL'):\n #st.write(uploadFileName)\n #uploadFileName = uploadFileName[0].lower() + uploadFileName[1:]\n #uploadFileName = \"/\" + uploadFileName.replace(\":\", \"/\").replace(\"\\\\\",\"/\")\n\ndataLoadStatus = st.sidebar.text('')\ndataDescription = st.text('')\nst.header(\"Base Data\")\ndata = None\ndataFrame = st.dataframe(data)\n#st.header(\"Basic Info\")\n#dataFrameDesc = st.dataframe(None)\n#loadedData = st.dataframe(None)\nuploadFileName = st.sidebar.text_input('Input File Name')\nif uploadFileName is not None and len(uploadFileName) > 0:\n dataLoadStatus.text('Loading data...')\n data = pd.read_csv(uploadFileName)\n if data is not None:\n dataFrame.dataframe(data)\n #dataFrameDesc.dataframe(pd.DataFrame(data.dtypes))\n dataLoadStatus.text('Loading data...done!')\n\nuploadFile = st.sidebar.file_uploader('Load csv files') #, type=['.las'])\nif uploadFile is not None:\n dataLoadStatus.text('Loading data...')\n data = pd.read_csv(BytesIO(uploadFile.getvalue()), sep=\",\")\n if data is not None:\n dataFrame.dataframe(data)\n #dataFrameDesc.dataframe(pd.DataFrame(data.dtypes))\n dataLoadStatus.text('Loading data...done!')\n\n#df = load_data(uploadFile)\n#if df is not None:\n #st.sidebar.success('File Uploaded Successfully')\n #df\n\n# Create a text element and let the reader know the data is loading.\n# Load 10,000 rows of data into the dataframe.\n#data = load_data(10000)\n# Notify the reader that the data was successfully loaded.\n\n#if st.checkbox('Show raw data'):\n #st.subheader('Raw data')\n #st.write(data)\n\n #st.subheader('Number of pickups by hour')\n #hist_values = np.histogram(data[DATE_COLUMN].dt.hour, bins=24, range=(0,24))[0]\n #st.bar_chart(hist_values)\n\n ## Some number in the range 0-23\n #hour_to_filter = st.slider('hour', 0, 23, 17)\n #filtered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter]\n\n #st.subheader('Map of all pickups at %s:00' % hour_to_filter)\n #st.map(filtered_data)\n\noption = st.sidebar.selectbox(\n \"Select Data Upload Option\",\n (\"Preview\", \"Pandas Profiling\", \"Engineering\", \"AutoML\", \"Visualization\")\n )\n\nif option=='Exploratory Analysis':\n st.header(\"Data Description\")\n st.dataframe(data.describe(include='all'))\n st.header(\"Duplicated Rows\")\n data[data.duplicated()]\n st.header(\"Null values\")\n data[data.isna().any(axis=1)]\n st.dataframe(pd.DataFrame(data[data.isna().any(axis=1)].describe(include='all')))\n data.hist(bins=15, color='blue', edgecolor='black', linewidth=1.0, xlabelsize=8, ylabelsize=8, grid=False) \n plt.tight_layout(rect=(0, 0, 2, 2)) \n plt.suptitle('Univariate Plots', x=0, y=0, fontsize=14) \n st.pyplot(plt)\n\nif option == \"Pandas Profiling\":\n st.header(\"Automated EDA with Pandas Profile\")\n if data is not None:\n profile = ProfileReport(data.sample(n=100000))\n st_profile_report(profile)\n\n#if option == \"Sweetviz Profiling\":\n# st.subheader(\"Automated EDA with Sweetviz\")\n# report = sv.analyze(data)\n# report.show_html()\n# report_file = codecs.open(\"SWEETVIZ_REPORT.html\",'r')\n# page = report_file.read()\n# components.html(page, width=1000, height=500, scrolling=True)\n# #components.html(page,width=width,height=height,scrolling=True)\n\nif option=='Visualization':\n #st.plotly_chart(data, x=data[data.columns[2]],y=data[data.columns[2]])\n #fig, ax = plt.subplots()\n #ax.hist(data, bins=20)\n #st.pyplot(fig)\n st.header(\"Correlation Heatmap\")\n fig, ax = plt.subplots(figsize=(21,9))\n sns.heatmap(data.corr(), annot = True)\n #plt.show()\n st.pyplot(plt)\n data = pd.read_csv(\"https://cdn.iisc.talentsprint.com/CDS/Datasets/movies.csv\")\n plt.clf()\n st.header(\"Popularity Chart (movies.csv)\")\n data.groupby('runtime')['popularity'].mean().plot(figsize = (13,5),xticks=np.arange(0,1000,100))\n # setup the title of the figure\n plt.title(\"Runtime Vs Popularity\",fontsize = 14)\n # setup the x-label and y-label of the plot.\n plt.xlabel('Runtime',fontsize = 13)\n plt.ylabel('Average Popularity',fontsize = 13)\n st.pyplot(plt)\n\n#if option=='AutoML':\n# #data = pd.read_csv(\"datasets/googleplaystore.csv\")\n# #st.write(data[data.columns[0:-1]])\n# #st.write(data[data.columns[-1]])\n# st.write(autoML.runAutoML(data[data.columns[0:-1]], data[data.columns[-1]]))\n# X_train, X_test, y_train, y_test = autoML.setData(data[data.columns[0:-1]], data[data.columns[-1]])\n# automl = autoML.autoMLSearch(X_train, y_train)\n# st.write(automl.search())\n# st.write(automl.rankings)\n# st.write(autoML.bestPipeline(X_train, y_train, X_test, automl))\n\n#df = pd.DataFrame({\n #'first column': [1, 2, 3, 4],\n #'second column': [10, 20, 30, 40]\n #})\n\n#df = pd.read_csv(\"datasets/googleplaystore.csv\")\n#df\n#chartData = pd.DataFrame(df[['Category', 'Rating']].groupby(['Category']))\n#st.line_chart(chartData)\n#chartData\n\n\n#df = pd.read_csv(\"datasets/googleplaystore_user_reviews.csv\")\n#df\n#st.line_chart(df)\n\n#df = pd.read_csv(\"datasets/movies.csv\")\n#df\n#st.line_chart(df)\n","sub_path":"playLit.py","file_name":"playLit.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"442943068","text":"import argparse\nimport os\nimport numpy as np\n\nimport itertools\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nimport sys\nsys.path.append(\"../\")#../../GAN-SDPC/\n\nfrom SimpsonsDataset import SimpsonsDataset,FastSimpsonsDataset\nfrom utils import *\n\nimport matplotlib.pyplot as plt\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=1000, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=256, help=\"size of the batches\")\nparser.add_argument(\"--lrD\", type=float, default=0.0004, help=\"adam: learning rate for D\")\nparser.add_argument(\"--lrG\", type=float, default=0.0004, help=\"adam: learning rate for G\")\nparser.add_argument(\"--eps\", type=float, default=0.00005, help=\"batchnorm: espilon for numerical stability\")\nparser.add_argument(\"--b1\", type=float, default=0.1, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--n_gpu\", type=int, default=1, help=\"number of gpu use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=100, help=\"dimensionality of the latent code\")\nparser.add_argument(\"--img_size\", type=int, default=32, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=3, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=50, help=\"interval between image sampling\")\nparser.add_argument(\"--sample_path\", type=str, default='images')\nparser.add_argument(\"--model_save_interval\", type=int, default=2500, help=\"interval between image sampling\")\nparser.add_argument('--model_save_path', type=str, default='models')\nopt = parser.parse_args()\nprint(opt)\n\n# Dossier de sauvegarde\n#os.makedirs(\"images\", exist_ok=True)\n#os.makedirs(\"model\", exist_ok=True)\nos.makedirs(opt.sample_path, exist_ok=True)\nos.makedirs(opt.model_save_path, exist_ok=True)\n\nimg_shape = (opt.channels, opt.img_size, opt.img_size)\n\ncuda = True if torch.cuda.is_available() else False\n\n\ndef reparameterization(mu, logvar):\n\tstd = torch.exp(logvar / 2)\n\tsampled_z = Variable(Tensor(np.random.normal(0, 1, (mu.size(0), opt.latent_dim))))\n\tz = sampled_z * std + mu\n\treturn z\n\n\nclass Encoder(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Encoder, self).__init__()\n\n\t\tself.model = nn.Sequential(\n\t\t\tnn.Linear(int(np.prod(img_shape)), 512),\n\t\t\tnn.LeakyReLU(0.2, inplace=True),\n\t\t\tnn.Linear(512, 512),\n\t\t\tnn.BatchNorm1d(512, opt.eps),\n\t\t\tnn.LeakyReLU(0.2, inplace=True),\n\t\t)\n\n\t\tself.mu = nn.Linear(512, opt.latent_dim)\n\t\tself.logvar = nn.Linear(512, opt.latent_dim)\n\n\tdef forward(self, img):\n\t\timg_flat = img.view(img.shape[0], -1)\n\t\t\n\t\tif img_flat.is_cuda and opt.n_gpu > 1:\n\t\t\tx = nn.parallel.data_parallel(self.model, img_flat, range(opt.n_gpu))\n\t\telse:\n\t\t\tx = self.model(img_flat)\n\t\t\t\n\t\tmu = self.mu(x)\n\t\tlogvar = self.logvar(x)\t\n\t\tz = reparameterization(mu, logvar)\n\t\treturn z\n\n\nclass Decoder(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Decoder, self).__init__()\n\n\t\tself.model = nn.Sequential(\n\t\t\tnn.Linear(opt.latent_dim, 512),\n\t\t\tnn.LeakyReLU(0.2, inplace=True),\n\t\t\tnn.Linear(512, 512),\n\t\t\tnn.BatchNorm1d(512, opt.eps),\n\t\t\tnn.LeakyReLU(0.2, inplace=True),\n\t\t\tnn.Linear(512, int(np.prod(img_shape))),\n\t\t\tnn.Tanh(),\n\t\t)\n\n\tdef forward(self, z):\n\t\tif z.is_cuda and opt.n_gpu > 1:\n\t\t\timg_flat = nn.parallel.data_parallel(self.model, z, range(opt.n_gpu))\n\t\telse:\n\t\t\timg_flat = self.model(z)\n\t\t\t\n\t\timg = img_flat.view(img_flat.shape[0], *img_shape)\n\t\treturn img\n\n\nclass Discriminator(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Discriminator, self).__init__()\n\n\t\tself.model = nn.Sequential(\n\t\t\tnn.Linear(opt.latent_dim, 512),\n\t\t\tnn.LeakyReLU(0.2, inplace=True),\n\t\t\tnn.Linear(512, 256),\n\t\t\tnn.LeakyReLU(0.2, inplace=True),\n\t\t\tnn.Linear(256, 1),\n\t\t\tnn.Sigmoid(),\n\t\t)\n\n\tdef forward(self, z):\n\t\tif z.is_cuda and opt.n_gpu > 1:\n\t\t\tvalidity = nn.parallel.data_parallel(self.model, z, range(opt.n_gpu))\n\t\telse:\n\t\t\tvalidity = self.model(z)\n\t\t\n\t\t#validity = self.model(z)\n\t\treturn validity\n\n\n# Use binary cross-entropy loss\nadversarial_loss = torch.nn.BCELoss()\npixelwise_loss = torch.nn.L1Loss()\n\n# Initialize generator and discriminator\nencoder = Encoder()\ndecoder = Decoder()\ndiscriminator = Discriminator()\n\nif cuda:\n\tencoder.cuda()\n\tdecoder.cuda()\n\tdiscriminator.cuda()\n\tadversarial_loss.cuda()\n\tpixelwise_loss.cuda()\n\n# Configure data loader\ndataloader = load_data(\"../../cropped/cp/\",opt.img_size,opt.batch_size)\n\n# Optimizers\noptimizer_G = torch.optim.Adam(\n\titertools.chain(encoder.parameters(), decoder.parameters()), lr=opt.lrG, betas=(opt.b1, opt.b2)\n)\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lrD, betas=(opt.b1, opt.b2))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n\ndef sample_image(n_row, epoch_done):\n\t\"\"\"Saves a grid of generated digits\"\"\"\n\t# Sample noise\n\tz = Variable(Tensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))\n\tgen_imgs = decoder(z)\n\tsave_image(gen_imgs.data, \"%s/%d.png\" % (opt.sample_path, epoch_done), nrow=n_row, normalize=True)\n\n\n# ----------\n# Training\n# ----------\n\nG_losses = []\nD_losses = []\ng_losses = []\nd_losses = []\nD_x = []\nD_G_z = []\nd_x = []\nd_g_z = []\n\nsave_dot = 1 # Nombre d'epochs avant de sauvegarder un point des courbes\nbatch_on_save_dot = save_dot*len(dataloader)\n\nt_total = time.time()\nfor epoch in range(1,opt.n_epochs+1):\n\tt_epoch = time.time()\n\tfor i, (imgs, _) in enumerate(dataloader):\n\t\tt_batch = time.time()\n\t\t\n\t\t# Adversarial ground truths\n\t\tvalid_smooth = Variable(Tensor(imgs.shape[0], 1).fill_(float(np.random.uniform(0.7, 1.0, 1))), requires_grad=False)\n\t\tvalid = Variable(Tensor(imgs.size(0), 1).fill_(1), requires_grad=False)\n\t\tfake = Variable(Tensor(imgs.size(0), 1).fill_(0), requires_grad=False)\n\n\t\t# Configure input\n\t\treal_imgs = Variable(imgs.type(Tensor))\n\t\t\n\t\t# -----------------\n\t\t# Train Generator\n\t\t# -----------------\n\n\t\toptimizer_G.zero_grad()\n\n\t\tencoded_imgs = encoder(real_imgs)\n\t\tdecoded_imgs = decoder(encoded_imgs)\n\n\t\t# Loss measures generator's ability to fool the discriminator\n\t\tg_loss = 0.001 * adversarial_loss(discriminator(encoded_imgs), valid) + 0.999 * pixelwise_loss(\n\t\t\tdecoded_imgs, real_imgs\n\t\t)\n\n\t\tg_loss.backward()\n\t\toptimizer_G.step()\n\t\t\n\t\t# ---------------------\n\t\t# Train Discriminator\n\t\t# ---------------------\n\n\t\toptimizer_D.zero_grad()\n\n\t\t# Sample noise as discriminator ground truth\n\t\tz = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))\n\t\t\n\t\t#Discriminator descision\n\t\td_x_tmp = discriminator(z)\n\t\td_g_x_tmp = discriminator(encoded_imgs.detach())\n\t\t\n\t\t# Measure discriminator's ability to classify real from generated samples\n\t\treal_loss = adversarial_loss(d_x_tmp, valid_smooth)\n\t\tfake_loss = adversarial_loss(d_g_x_tmp, fake)\n\t\td_loss = 0.5 * (real_loss + fake_loss)\n\n\t\td_loss.backward()\n\t\toptimizer_D.step()\n\t\t\n\t\tprint(\n\t\t\t\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [Time: %fs]\"\n\t\t\t% (epoch, opt.n_epochs, i+1, len(dataloader), d_loss.item(), g_loss.item(), time.time()-t_batch)\n\t\t)\n\t\t\n\t\t# Save Losses and scores for plotting later\n\t\tg_losses.append(g_loss.item())\n\t\td_losses.append(d_loss.item())\n\t\td_x.append(torch.sum(d_x_tmp).item()/imgs.size(0))\n\t\td_g_z.append(torch.sum(d_g_x_tmp).item()/imgs.size(0))\n\t\t\n\t# Save samples\n\tif epoch % opt.sample_interval == 0:\n\t\tsample_image(n_row=5, epoch_done=epoch)\n\t\n\t# Save Losses and scores for plotting later\t\t\n\tif epoch % save_dot == 0:\n\t\tG_losses.append(sum(g_losses)/batch_on_save_dot)\n\t\tD_losses.append(sum(d_losses)/batch_on_save_dot)\n\t\tg_losses = []\n\t\td_losses = []\n\t\tD_x.append(sum(d_x)/batch_on_save_dot)\n\t\tD_G_z.append(sum(d_g_z)/batch_on_save_dot)\n\t\td_x = []\n\t\td_g_z = []\n\t\t\n\t# Save models\n\tif epoch % opt.model_save_interval == 0:\n\t\tnum = str(int(epoch / opt.model_save_interval))\n\t\tsave_model(discriminator,optimizer_D,epoch,opt.model_save_path+\"/\"+num+\"_D.pt\")\n\t\tsave_model(encoder,optimizer_G,epoch,opt.model_save_path+\"/\"+num+\"_encoder.pt\")\n\t\tsave_model(decoder,optimizer_G,epoch,opt.model_save_path+\"/\"+num+\"_decoder.pt\")\n\t\n\t# Intermediate plot\n\tif epoch % (opt.n_epochs/4) == 0:\n\t\t#Plot losses\t\t\t\n\t\tplot_losses(G_losses,D_losses)\n\t\t#Plot scores\n\t\tplot_scores(D_x,D_G_z)\n\t\t\n\tprint(\"[Epoch Time: \",time.time()-t_epoch,\"s]\")\n\nprint(\"[Total Time: \",time.strftime(\"%Hh:%Mm:%Ss\",time.gmtime(time.time()-t_total)),\"]\")\n\n#Plot losses\t\t\t\nplot_losses(G_losses,D_losses)\n\n#Plot game score\nplot_scores(D_x,D_G_z)\n\nsave_model(discriminator,optimizer_D,epoch,opt.model_save_path+\"/last_D.pt\")\nsave_model(encoder,optimizer_G,epoch,opt.model_save_path+\"/last_encoder.pt\")\nsave_model(decoder,optimizer_G,epoch,opt.model_save_path+\"/last_decoder.pt\")\n","sub_path":"W4_b1-0.1_aae/aae.py","file_name":"aae.py","file_ext":"py","file_size_in_byte":9008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"626875655","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 31 14:19:11 2017\n\n@author: nice142\n\"\"\"\n\n# momentum\n# monthly rebalacing\n# universe: all (no consider about size)\n\n# purpose: momentum + reversal / momentum\n\nimport pandas as pd\nimport numpy as np\n\nrtn_month = pd.read_excel('exercise_v01.xlsx', sheetname = '월별수익률1', header = None)\n\nmomrev = np.ones((1155,189))\nmomrev = pd.DataFrame(momrev)\n\nmom = np.ones((1155,189))\nmom = pd.DataFrame(mom)\n\nretm_data = np.zeros((5,189))\nretm_data = pd.DataFrame(retm_data)\n\nretm_data_r = np.zeros((5,189))\nretm_data_r = pd.DataFrame(retm_data_r)\n\nfor i in range(0, 189):\n for n_momrev in range(0, 12):\n momrev[i] = momrev[i] * rtn_month[n_momrev + i]\n for n_mom in range(0, 11):\n mom[i] = mom[i] * rtn_month[n_mom + i]\n\nfor i in range(0, 189):\n # only momentum\n data = pd.concat([mom[i], rtn_month[i + 12]], axis = 1, join = 'inner', ignore_index = True)\n data.columns = ['mom', 'retm']\n data = data[data['mom'].notnull()] \n data = data[data['retm'].notnull()] \n \n data_size = len(data) # Row count\n data = data.assign(rnk = np.floor(data['mom'].rank(method = 'first')/((data_size + 1) / 5)))\n \n data_1 = data.query('5>rnk>3') # 4\n data_2 = data.query('4>rnk>2') # 3\n data_3 = data.query('3>rnk>1') # 2\n data_4 = data.query('2>rnk>0') # 1\n data_5 = data.query('1>rnk>-1') # 0\n \n retm_data.iloc[0, i] = np.mean(data_1['retm']) # 각각 누적수익률 기록\n retm_data.iloc[1, i] = np.mean(data_2['retm'])\n retm_data.iloc[2, i] = np.mean(data_3['retm'])\n retm_data.iloc[3, i] = np.mean(data_4['retm'])\n retm_data.iloc[4, i] = np.mean(data_5['retm'])\n \n # momentum + reversal\n data_r = pd.concat([momrev[i], rtn_month[i + 12]], axis = 1, join = 'inner', ignore_index = True)\n data_r.columns = ['momrev', 'retm']\n data_r = data_r[data_r['momrev'].notnull()] \n data_r = data_r[data_r['retm'].notnull()] \n \n data_size_r = len(data) # Row count\n data_r = data_r.assign(rnk = np.floor(data_r['momrev'].rank(method = 'first')/((data_size_r + 1) / 5)))\n \n data_1_r = data_r.query('5>rnk>3') # 4\n data_2_r = data_r.query('4>rnk>2') # 3\n data_3_r = data_r.query('3>rnk>1') # 2\n data_4_r = data_r.query('2>rnk>0') # 1\n data_5_r = data_r.query('1>rnk>-1') # 0\n \n retm_data_r.iloc[0, i] = np.mean(data_1_r['retm']) # 각각 누적수익률 기록\n retm_data_r.iloc[1, i] = np.mean(data_2_r['retm'])\n retm_data_r.iloc[2, i] = np.mean(data_3_r['retm'])\n retm_data_r.iloc[3, i] = np.mean(data_4_r['retm'])\n retm_data_r.iloc[4, i] = np.mean(data_5_r['retm'])\n \n if i == 188 : \n pass \n\nretm_final = np.product(retm_data, axis = 1)\nretm_final_r = np.product(retm_data_r, axis = 1) ","sub_path":"기존자료/momrev.py","file_name":"momrev.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"421087715","text":"import h5py\nfrom typing import Any, List, Optional, Union, Tuple\nfrom PySide2.QtGui import QVector3D, QMatrix4x4\nfrom PySide2.Qt3DCore import Qt3DCore\nfrom PySide2.QtWidgets import QListWidget\n\nfrom nexus_constructor.common_attrs import CommonAttrs\nfrom nexus_constructor.component.pixel_shape import PixelShape\nfrom nexus_constructor.component.transformations_list import TransformationsList\nfrom nexus_constructor.nexus import nexus_wrapper as nx\nfrom nexus_constructor.nexus.nexus_wrapper import get_nx_class\nfrom nexus_constructor.field_utils import get_fields_with_update_functions\nfrom nexus_constructor.pixel_data import PixelMapping, PixelGrid, PixelData\nfrom nexus_constructor.pixel_data_to_nexus_utils import (\n get_x_offsets_from_pixel_grid,\n get_y_offsets_from_pixel_grid,\n get_z_offsets_from_pixel_grid,\n get_detector_ids_from_pixel_grid,\n get_detector_number_from_pixel_mapping,\n PIXEL_FIELDS,\n)\nfrom nexus_constructor.transformation_types import TransformationType\nfrom nexus_constructor.transformations import Transformation\nfrom nexus_constructor.ui_utils import (\n qvector3d_to_numpy_array,\n generate_unique_name,\n show_warning_dialog,\n)\nfrom nexus_constructor.geometry.cylindrical_geometry import (\n CylindricalGeometry,\n calculate_vertices,\n)\nfrom nexus_constructor.geometry import (\n OFFGeometryNexus,\n OFFGeometry,\n record_faces_in_file,\n record_vertices_in_file,\n)\nfrom nexus_constructor.geometry.utils import validate_nonzero_qvector\nfrom nexus_constructor.component.component_shape import (\n CYLINDRICAL_GEOMETRY_NEXUS_NAME,\n OFF_GEOMETRY_NEXUS_NAME,\n PIXEL_SHAPE_GROUP_NAME,\n SHAPE_GROUP_NAME,\n ComponentShape,\n)\nimport numpy as np\n\n\nclass DependencyError(Exception):\n \"\"\"\n Raised when trying to carry out an operation which would invalidate the depends_on chain\n \"\"\"\n\n pass\n\n\ndef _normalise(input_vector: QVector3D) -> Tuple[QVector3D, float]:\n \"\"\"\n Normalise to unit vector\n\n :param input_vector: Input vector\n :return: Unit vector, magnitude\n \"\"\"\n magnitude = input_vector.length()\n if magnitude == 0:\n return QVector3D(0.0, 0.0, 0.0), 0.0\n\n return input_vector.normalized(), magnitude\n\n\ndef _generate_incremental_name(base_name, group: h5py.Group):\n number = 1\n while f\"{base_name}_{number}\" in group:\n number += 1\n return f\"{base_name}_{number}\"\n\n\nclass Component:\n \"\"\"\n Provides an interface to an existing component group in a NeXus file\n \"\"\"\n\n def __init__(\n self,\n nexus_file: nx.NexusWrapper,\n group: h5py.Group,\n shape: Optional[ComponentShape] = None,\n ):\n self.file = nexus_file\n self.group = group\n if shape is not None:\n self._shape = shape\n else:\n self._shape = ComponentShape(nexus_file, group)\n\n def __eq__(self, other):\n try:\n return other.absolute_path == self.absolute_path\n except Exception:\n return False\n\n @property\n def name(self):\n return nx.get_name_of_node(self.group)\n\n @name.setter\n def name(self, new_name: str):\n self.file.rename_node(self.group, new_name)\n\n @property\n def absolute_path(self):\n \"\"\"\n Get absolute path of the component group in the NeXus file,\n this is guaranteed to be unique so it can be used as an ID for this Component\n :return: absolute path of the transform dataset in the NeXus file,\n \"\"\"\n return self.group.name\n\n def get_field(self, name: str):\n return self.file.get_field_value(self.group, name)\n\n def set_field(self, name: str, value: Any, dtype=None):\n self.file.set_field_value(self.group, name, value, dtype)\n\n def delete_field(self, name: str):\n self.file.delete_field_value(self.group, name)\n\n @property\n def nx_class(self):\n return get_nx_class(self.group)\n\n @nx_class.setter\n def nx_class(self, nx_class: str):\n self.file.set_nx_class(self.group, nx_class)\n\n @property\n def description(self):\n return self.file.get_field_value(self.group, \"description\")\n\n @description.setter\n def description(self, description: str):\n if description:\n self.file.set_field_value(self.group, \"description\", description, str)\n\n @property\n def transforms_full_chain(self) -> TransformationsList:\n \"\"\"\n Gets all transforms in the depends_on chain for this component\n :return: List of transforms\n \"\"\"\n transforms = TransformationsList(self)\n depends_on = self.get_field(CommonAttrs.DEPENDS_ON)\n self._get_transform(depends_on, transforms)\n return transforms\n\n def _get_transform(\n self,\n depends_on: str,\n transforms: List[Transformation],\n local_only: bool = False,\n ):\n \"\"\"\n Recursive function, appends each transform in depends_on chain to transforms list\n :param depends_on: The next depends_on string to find the next transformation in the chain\n :param transforms: The list to populate with transformations\n :param local_only: If True then only add transformations which are stored within this component\n \"\"\"\n if depends_on is not None and depends_on != \".\":\n transform_dataset = self.file.nexus_file[depends_on]\n if (\n local_only\n and transform_dataset.parent.parent.name != self.absolute_path\n ):\n # We're done, the next transformation is not stored in this component\n return\n new_transform = Transformation(self.file, transform_dataset)\n new_transform.parent = transforms\n transforms.append(new_transform)\n if CommonAttrs.DEPENDS_ON in transform_dataset.attrs.keys():\n self._get_transform(\n self.file.get_attribute_value(\n transform_dataset, CommonAttrs.DEPENDS_ON\n ),\n transforms,\n local_only,\n )\n\n @property\n def transform(self) -> Qt3DCore.QTransform:\n \"\"\"\n Get a QTransform describing the position and orientation of the component\n \"\"\"\n transform_matrix = QMatrix4x4()\n for transform in self.transforms_full_chain:\n transform_matrix *= transform.qmatrix\n transformation = Qt3DCore.QTransform()\n transformation.setMatrix(transform_matrix)\n return transformation\n\n @property\n def transforms(self) -> TransformationsList:\n \"\"\"\n Gets transforms in the depends_on chain but only those which are local to\n this component's group in the NeXus file\n :return:\n \"\"\"\n transforms = TransformationsList(self)\n depends_on = self.get_field(CommonAttrs.DEPENDS_ON)\n self._get_transform(depends_on, transforms, local_only=True)\n return transforms\n\n def add_translation(\n self, vector: QVector3D, name: str = None, depends_on: Transformation = None\n ) -> Transformation:\n \"\"\"\n Note, currently assumes translation is in metres\n :param vector: direction and magnitude of translation as a 3D vector\n :param name: name of the translation group (Optional)\n :param depends_on: existing transformation which the new one depends on (otherwise relative to origin)\n \"\"\"\n transforms_group = self.file.create_transformations_group_if_does_not_exist(\n self.group\n )\n if name is None:\n name = _generate_incremental_name(\n TransformationType.TRANSLATION, transforms_group\n )\n unit_vector, magnitude = _normalise(vector)\n field = self.file.set_field_value(transforms_group, name, magnitude, float)\n self.file.set_attribute_value(field, CommonAttrs.UNITS, \"m\")\n self.file.set_attribute_value(\n field, CommonAttrs.VECTOR, qvector3d_to_numpy_array(unit_vector)\n )\n self.file.set_attribute_value(\n field, CommonAttrs.TRANSFORMATION_TYPE, TransformationType.TRANSLATION\n )\n\n translation_transform = Transformation(self.file, field)\n translation_transform.ui_value = magnitude\n translation_transform.depends_on = depends_on\n return translation_transform\n\n def add_rotation(\n self,\n axis: QVector3D,\n angle: float,\n name: str = None,\n depends_on: Transformation = None,\n ) -> Transformation:\n \"\"\"\n Note, currently assumes angle is in degrees\n :param axis: axis\n :param angle:\n :param name: Name of the rotation group (Optional)\n :param depends_on: existing transformation which the new one depends on (otherwise relative to origin)\n \"\"\"\n transforms_group = self.file.create_transformations_group_if_does_not_exist(\n self.group\n )\n if name is None:\n name = _generate_incremental_name(\n TransformationType.ROTATION, transforms_group\n )\n field = self.file.set_field_value(transforms_group, name, angle, float)\n self.file.set_attribute_value(field, CommonAttrs.UNITS, \"degrees\")\n self.file.set_attribute_value(\n field, CommonAttrs.VECTOR, qvector3d_to_numpy_array(axis)\n )\n self.file.set_attribute_value(\n field, CommonAttrs.TRANSFORMATION_TYPE, TransformationType.ROTATION\n )\n rotation_transform = Transformation(self.file, field)\n rotation_transform.depends_on = depends_on\n rotation_transform.ui_value = angle\n return rotation_transform\n\n def _transform_is_in_this_component(self, transform: Transformation) -> bool:\n return transform._dataset.parent.parent.name == self.absolute_path\n\n def remove_transformation(self, transform: Transformation):\n if not self._transform_is_in_this_component(transform):\n raise PermissionError(\n \"Transform is not in this component, do not have permission to delete\"\n )\n\n dependents = transform.get_dependents()\n if dependents:\n raise DependencyError(\n f\"Cannot delete transformation, it is a dependency of {dependents}\"\n )\n\n # Remove whole transformations group if this is the only transformation in it\n if len(transform._dataset.parent.keys()) == 1:\n self.file.delete_node(transform._dataset.parent)\n # Otherwise just remove the transformation from the group\n else:\n self.file.delete_node(transform._dataset)\n\n @property\n def depends_on(self):\n depends_on_path = self.file.get_field_value(self.group, CommonAttrs.DEPENDS_ON)\n if depends_on_path is None:\n return None\n return Transformation(self.file, self.file.nexus_file[depends_on_path])\n\n @depends_on.setter\n def depends_on(self, transformation: Transformation):\n existing_depends_on = self.file.get_attribute_value(\n self.group, CommonAttrs.DEPENDS_ON\n )\n if existing_depends_on is not None:\n Transformation(\n self.file, self.file[existing_depends_on]\n ).deregister_dependent(self)\n\n if transformation is None:\n self.file.set_field_value(self.group, CommonAttrs.DEPENDS_ON, \".\", str)\n else:\n self.file.set_field_value(\n self.group, CommonAttrs.DEPENDS_ON, transformation.absolute_path, str\n )\n transformation.register_dependent(self)\n\n def set_cylinder_shape(\n self,\n axis_direction: QVector3D = QVector3D(0.0, 0.0, 1.0),\n height: float = 1.0,\n radius: float = 1.0,\n units: Union[str, bytes] = \"m\",\n pixel_data: PixelData = None,\n ) -> CylindricalGeometry:\n \"\"\"\n Sets the shape of the component to be a cylinder\n Overrides any existing shape\n \"\"\"\n self.remove_shape()\n validate_nonzero_qvector(axis_direction)\n\n shape_group = self.create_shape_nx_group(\n CYLINDRICAL_GEOMETRY_NEXUS_NAME, type(pixel_data) is PixelGrid\n )\n\n pixel_mapping = None\n if isinstance(pixel_data, PixelMapping):\n pixel_mapping = pixel_data\n\n vertices = calculate_vertices(axis_direction, height, radius)\n vertices_field = self.file.set_field_value(\n shape_group, CommonAttrs.VERTICES, vertices\n )\n # Specify 0th vertex is base centre, 1st is base edge, 2nd is top centre\n self.file.set_field_value(shape_group, \"cylinders\", np.array([0, 1, 2]))\n self.file.set_attribute_value(vertices_field, CommonAttrs.UNITS, units)\n return CylindricalGeometry(self.file, shape_group, pixel_mapping)\n\n def set_off_shape(\n self,\n loaded_geometry: OFFGeometry,\n units: str = \"\",\n filename: str = \"\",\n pixel_data: PixelData = None,\n ) -> OFFGeometryNexus:\n \"\"\"\n Sets the shape of the component to be a mesh\n Overrides any existing shape\n \"\"\"\n self.remove_shape()\n\n shape_group = self.create_shape_nx_group(\n OFF_GEOMETRY_NEXUS_NAME, isinstance(pixel_data, PixelGrid)\n )\n\n pixel_mapping = None\n if isinstance(pixel_data, PixelMapping):\n pixel_mapping = pixel_data\n\n record_faces_in_file(self.file, shape_group, loaded_geometry.faces)\n record_vertices_in_file(self.file, shape_group, loaded_geometry.vertices)\n return OFFGeometryNexus(self.file, shape_group, units, filename, pixel_mapping)\n\n def create_shape_nx_group(\n self, nexus_name: str, shape_is_single_pixel: bool = False\n ):\n \"\"\"\n Creates an NXGroup for the shape information. If the shape is a Pixel Grid/Single Pixel then this is stored\n with the name `pixel_shape`, otherwise it is stored as `shape`.\n :param nexus_name: The Nexus name for the shape. This will either be (NXcylindrical/NXoff)_geometry\n :param shape_is_single_pixel: Whether or not the shape is a single pixel.\n :return: The shape group.\n \"\"\"\n\n if shape_is_single_pixel:\n shape_group = self.file.create_nx_group(\n PIXEL_SHAPE_GROUP_NAME, nexus_name, self.group\n )\n self._shape = PixelShape(self.file, self.group)\n else:\n shape_group = self.file.create_nx_group(\n SHAPE_GROUP_NAME, nexus_name, self.group\n )\n self._shape = ComponentShape(self.file, self.group)\n return shape_group\n\n @property\n def shape(\n self,\n ) -> Tuple[\n Optional[Union[OFFGeometry, CylindricalGeometry]], Optional[List[QVector3D]]\n ]:\n \"\"\"\n Get the shape of the component if there is one defined, and optionally a\n list of transformations relative to the component's depends_on chain which\n describe where the shape should be repeated\n (used in subclass for components where the shape describes each pixel)\n\n :return: Component shape, each transformation where the shape is repeated\n \"\"\"\n return self._shape.get_shape()\n\n def remove_shape(self):\n self._shape.remove_shape()\n\n def duplicate(self, components_list: List[\"Component\"]) -> \"Component\":\n return Component(\n self.file,\n self.file.duplicate_nx_group(\n self.group, generate_unique_name(self.name, components_list)\n ),\n )\n\n def record_pixel_grid(self, pixel_grid: PixelGrid):\n \"\"\"\n Records the pixel grid data to the NeXus file.\n :param pixel_grid: The PixelGrid created from the input provided to the Add/Edit Component Window.\n \"\"\"\n self.set_field(\n \"x_pixel_offset\", get_x_offsets_from_pixel_grid(pixel_grid), \"float64\"\n )\n self.set_field(\n \"y_pixel_offset\", get_y_offsets_from_pixel_grid(pixel_grid), \"float64\"\n )\n self.set_field(\n \"z_pixel_offset\", get_z_offsets_from_pixel_grid(pixel_grid), \"float64\"\n )\n self.set_field(\n \"detector_number\", get_detector_ids_from_pixel_grid(pixel_grid), \"int64\"\n )\n\n def record_pixel_mapping(self, pixel_mapping: PixelMapping):\n \"\"\"\n Records the pixel mapping data to the NeXus file.\n :param pixel_mapping: The PixelMapping created from the input provided to the Add/Edit Component Window.\n \"\"\"\n self.set_field(\n \"detector_number\",\n get_detector_number_from_pixel_mapping(pixel_mapping),\n \"int64\",\n )\n\n def clear_pixel_data(self):\n \"\"\"\n Removes the existing pixel data from the NeXus file. Used when editing pixel data.\n \"\"\"\n for field in PIXEL_FIELDS:\n self.delete_field(field)\n\n\ndef add_fields_to_component(component: Component, fields_widget: QListWidget):\n \"\"\"\n Adds fields from a list widget to a component.\n :param component: Component to add the field to.\n :param fields_widget: The field list widget to extract field information such the name and value of each field.\n \"\"\"\n for i in range(fields_widget.count()):\n widget = fields_widget.itemWidget(fields_widget.item(i))\n try:\n component.set_field(\n name=widget.name, value=widget.value, dtype=widget.dtype\n )\n except ValueError as error:\n show_warning_dialog(\n f\"Warning: field {widget.name} not added\",\n title=\"Field invalid\",\n additional_info=str(error),\n parent=fields_widget.parent().parent(),\n )\n\n\ndef get_fields_and_update_functions_for_component(component: Component):\n return get_fields_with_update_functions(component.group)\n","sub_path":"nexus_constructor/component/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":17957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219113167","text":"# -*- encoding: utf-8 -*-\nimport sys\nr_input = sys.stdin.readline\n\nN, M = map(int, r_input().split()) # WOOK이 탐사할 영역의 세로 길이, 가로 길이\n\nline = [0] * M # DP\n\nfor i in range(N):\n new_line = list(map(int, r_input().split()))\n line[0] += new_line[0]\n\n for j in range(1, M):\n line[j] = max(line[j-1], line[j]) + new_line[j]\n\nprint(line[-1])\n","sub_path":"Algorithm/Baekjoon/14430 자원 캐기/14430.py","file_name":"14430.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599520584","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2016 Civic Knowledge. This file is licensed under the terms of the\n# MIT License, included in this distribution as LICENSE.txt\n\n\"\"\"\n\nGuess the whether rows in a collection are header, comments, footers, etc\n\n\"\"\"\n\nimport datetime\nimport logging\nimport math\nfrom collections import deque, OrderedDict\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nfrom six import string_types, binary_type, text_type, b\n\n\nclass NoMatchError(Exception):\n pass\n\n\nclass unknown(binary_type):\n __name__ = 'unknown'\n\n def __new__(cls):\n return super(unknown, cls).__new__(cls, cls.__name__)\n\n def __str__(self):\n return self.__name__\n\n def __eq__(self, other):\n return binary_type(self) == binary_type(other)\n\n\nclass geotype(binary_type):\n __name__ = 'geo'\n\n def __new__(cls):\n return super(geotype, cls).__new__(cls, cls.__name__)\n\n def __str__(self):\n return self.__name__\n\n def __eq__(self, other):\n return binary_type(self) == binary_type(other)\n\n\nnans = ['#N/A', '#N/A', 'N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',\n '1.#IND', '1.#QNAN', 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null']\n\n\ndef test_nan(v):\n v = v.decode('ascii') if isinstance(v, bytes) else v\n return int(v is math.nan or v in nans)\n\n\ndef test_float(v):\n # Fixed-width integer codes are actually strings.\n # if v and v[0] == '0' and len(v) > 1:\n # return 0\n\n try:\n float(v)\n return 1\n except:\n return 0\n\n\ndef test_int(v):\n # Fixed-width integer codes are actually strings.\n # if v and v[0] == '0' and len(v) > 1:\n # return 0\n\n try:\n if float(v) == int(float(v)):\n return 1\n else:\n return 0\n except:\n return 0\n\n\ndef test_string(v):\n if isinstance(v, string_types):\n return 1\n if isinstance(v, binary_type):\n return 1\n else:\n return 0\n\n\ndef test_datetime(v):\n \"\"\"Test for ISO datetime.\"\"\"\n if not isinstance(v, string_types):\n return 0\n\n if len(v) > 22:\n # Not exactly correct; ISO8601 allows fractional seconds\n # which could result in a longer string.\n return 0\n\n if '-' not in v and ':' not in v:\n return 0\n\n for c in set(v): # Set of Unique characters\n if not c.isdigit() and c not in 'T:-Z':\n return 0\n\n return 1\n\n\ndef test_time(v):\n if not isinstance(v, string_types):\n return 0\n\n if len(v) > 15:\n return 0\n\n if ':' not in v:\n return 0\n\n for c in set(v): # Set of Unique characters\n if not c.isdigit() and c not in 'T:Z.':\n return 0\n\n return 1\n\n\ndef test_date(v):\n if not isinstance(v, string_types):\n return 0\n\n if len(v) > 10:\n # Not exactly correct; ISO8601 allows fractional seconds\n # which could result in a longer string.\n return 0\n\n if '-' not in v:\n return 0\n\n for c in set(v): # Set of Unique characters\n if not c.isdigit() and c not in '-':\n return 0\n\n return 1\n\n\ndef test_geo(v):\n return 0\n\n\ntests = [\n (math.nan, test_nan),\n (int, test_int),\n (float, test_float),\n (binary_type, test_string),\n (geotype, test_geo)\n]\n\n\nclass Column(object):\n position = None\n header = None\n type_counts = None\n type_ratios = None\n length = 0\n count = 0\n strings = None\n\n def __init__(self):\n self.type_counts = {k: 0 for k, v in tests}\n self.type_counts[datetime.datetime] = 0\n self.type_counts[datetime.date] = 0\n self.type_counts[datetime.time] = 0\n self.type_counts[None] = 0\n self.type_counts[text_type] = 0\n self.strings = deque(maxlen=1000)\n self.position = None\n self.header = None\n self.count = 0\n self.length = 0\n self.date_successes = 0\n self.description = None\n\n def inc_type_count(self, t):\n self.type_counts[t] += 1\n\n def test(self, v):\n from dateutil import parser\n\n self.count += 1\n\n if v is None or v is '':\n self.type_counts[None] += 1\n return None\n\n try:\n v = '{}'.format(v).encode('ascii')\n except UnicodeEncodeError:\n self.type_counts[text_type] += 1\n return text_type\n\n self.length = max(self.length, len(v))\n\n try:\n v = v.strip()\n except AttributeError:\n pass\n\n if v == '':\n self.type_counts[None] += 1\n return None\n\n for test, testf in tests:\n t = testf(v)\n\n if t > 0:\n type_ = test\n\n if test == binary_type:\n if v not in self.strings:\n self.strings.append(v)\n\n if (self.count < 1000 or self.date_successes != 0) and any((c in b('-/:T')) for c in v):\n try:\n maybe_dt = parser.parse(v, default=datetime.datetime.fromtimestamp(0))\n except (TypeError, ValueError, OSError, OverflowError): # Windows throws an OSError\n maybe_dt = None\n\n if maybe_dt:\n # Check which parts of the default the parser didn't change to find\n # the real type\n # HACK The time check will be wrong for the time of\n # the start of the epoch, 16:00.\n if maybe_dt.time() == datetime.datetime.fromtimestamp(0).time():\n type_ = datetime.date\n elif maybe_dt.date() == datetime.datetime.fromtimestamp(0).date():\n type_ = datetime.time\n else:\n type_ = datetime.datetime\n\n self.date_successes += 1\n\n self.type_counts[type_] += 1\n\n return type_\n\n def _resolved_type(self):\n \"\"\"Return the type for the columns, and a flag to indicate that the\n column has codes.\"\"\"\n import datetime\n\n self.type_ratios = {test: (float(self.type_counts[test]) / float(self.count)) if self.count else None\n for test, testf in tests + [(None, None)]}\n\n # If it is more than 5% str, it's a str\n\n try:\n if self.type_ratios.get(text_type, 0) + self.type_ratios.get(binary_type, 0) > .05:\n if self.type_counts[text_type] > 0:\n return text_type, False\n\n elif self.type_counts[binary_type] > 0:\n return binary_type, False\n except TypeError as e:\n # This is probably the result of the type being unknown\n pass\n\n if self.type_counts[datetime.datetime] > 0:\n num_type = datetime.datetime\n\n elif self.type_counts[datetime.date] > 0:\n num_type = datetime.date\n\n elif self.type_counts[datetime.time] > 0:\n num_type = datetime.time\n\n elif self.type_counts[float] > 0:\n num_type = float\n\n elif self.type_counts[int] > 0:\n # Int columns can't represent Nan, but float can\n if self.type_counts[math.nan] > 0:\n num_type = float\n num_type = int\n\n elif self.type_counts[text_type] > 0:\n num_type = text_type\n\n elif self.type_counts[binary_type] > 0:\n num_type = binary_type\n\n else:\n num_type = unknown\n\n if self.type_counts[binary_type] > 0 and num_type != binary_type:\n has_codes = True\n else:\n has_codes = False\n\n return num_type, has_codes\n\n @property\n def resolved_type(self):\n return self._resolved_type()[0]\n\n @property\n def resolved_type_name(self):\n try:\n return self.resolved_type.__name__\n except AttributeError:\n return self.resolved_type\n\n @property\n def has_codes(self):\n return self._resolved_type()[1]\n\n def __repr__(self):\n return \"\".format(self.position, self.header, self.resolved_type_name)\n\n\nclass TypeIntuiter(object):\n \"\"\"Determine the types of rows in a table.\"\"\"\n header = None\n counts = None\n\n def __init__(self):\n self._columns = OrderedDict()\n\n def process_header(self, row):\n\n header = row # Huh? Don't remember what this is for.\n\n for i, value in enumerate(row):\n if i not in header:\n self._columns[i] = Column()\n self._columns[i].position = i\n self._columns[i].header = value\n\n return self\n\n def process_row(self, n, row):\n\n for i, value in enumerate(row):\n try:\n if i not in self._columns:\n self._columns[i] = Column()\n self._columns[i].position = i\n self._columns[i].test(value)\n\n except Exception as e:\n # This usually doesn't matter, since there are usually plenty of other rows to intuit from\n # print 'Failed to add row: {}: {} {}'.format(row, type(e), e)\n print(i, value, e)\n raise\n\n def run(self, source, total_rows=None):\n\n MIN_SKIP_ROWS = 10000\n\n if total_rows and total_rows > MIN_SKIP_ROWS:\n skip_rows = int(total_rows / MIN_SKIP_ROWS)\n\n skip_rows = skip_rows if skip_rows > 1 else None\n\n else:\n skip_rows = None\n\n for i, row in enumerate(iter(source)):\n if skip_rows and i % skip_rows != 0:\n continue\n\n if i == 0:\n self.process_header(row)\n continue\n\n self.process_row(i, row)\n\n return self\n\n @property\n def columns(self):\n return self._columns\n\n def __getitem__(self, item):\n\n try:\n return self._columns[item]\n except KeyError:\n for k, v in self._columns.items():\n if item == v.header:\n return v\n\n raise KeyError(item)\n\n @property\n def is_ascii(self):\n \"\"\"return true if none of the columns have a resolved type of \"\"\"\n pass\n\n def __str__(self):\n from tabulate import tabulate\n\n # return SingleTable([[ str(x) for x in row] for row in self.rows] ).table\n\n results = self.results_table()\n\n if len(results) > 1:\n o = '\\n' + text_type(tabulate(results[1:], results[0], tablefmt='pipe'))\n else:\n o = ''\n\n return 'TypeIntuiter ' + o\n\n @staticmethod\n def normalize_type(typ):\n\n if isinstance(typ, string_types):\n import datetime\n\n m = dict(list(__builtins__.items()) + list(datetime.__dict__.items()))\n if typ == 'unknown':\n typ = binary_type\n else:\n typ = m[typ]\n\n return typ\n\n @staticmethod\n def promote_type(orig_type, new_type):\n \"\"\"Given a table with an original type, decide whether a new determination of a new applicable type\n should overide the existing one\"\"\"\n\n if not new_type:\n return orig_type\n\n if not orig_type:\n return new_type\n\n try:\n orig_type = orig_type.__name__\n except AttributeError:\n pass\n\n try:\n new_type = new_type.__name__\n except AttributeError:\n pass\n\n type_precidence = ['unknown', 'int', 'float', 'date', 'time', 'datetime', 'str', 'bytes', 'unicode']\n\n # TODO This will fail for dates and times.\n\n if type_precidence.index(new_type) > type_precidence.index(orig_type):\n return new_type\n else:\n return orig_type\n\n def results_table(self):\n\n fields = 'position header length resolved_type has_codes count ints floats strs unicode nones nans datetimes dates times '.split()\n\n header = list(fields)\n # Shorten a few of the header names\n header[0] = '#'\n header[2] = 'size'\n header[4] = 'codes'\n header[9] = 'uni'\n header[12] = 'dt'\n\n rows = list()\n\n rows.append(header)\n\n for d in self.to_rows():\n rows.append([d[k] for k in fields])\n\n return rows\n\n def to_rows(self):\n\n for k, v in self.columns.items():\n d = {\n 'position': v.position,\n 'header': v.header,\n 'length': v.length,\n 'resolved_type': v.resolved_type_name,\n 'has_codes': v.has_codes,\n 'count': v.count,\n 'ints': v.type_counts.get(int, None),\n 'floats': v.type_counts.get(float, None),\n 'strs': v.type_counts.get(binary_type, None),\n 'unicode': v.type_counts.get(text_type, None),\n 'nones': v.type_counts.get(None, None),\n 'nans': v.type_counts.get(math.nan, None),\n 'datetimes': v.type_counts.get(datetime.datetime, None),\n 'dates': v.type_counts.get(datetime.date, None),\n 'times': v.type_counts.get(datetime.time, None),\n 'strvals': b(',').join(list(v.strings)[:20])\n }\n yield d\n","sub_path":"tableintuit/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":13401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458621918","text":"import logging\nimport json\nimport sys\nimport time\nfrom shutil import get_terminal_size\n\nfrom ..logger import create_logger\nlogger = create_logger()\n\nfrom .parseformat import ellipsis_truncate\n\n\ndef justify_to_terminal_size(message):\n terminal_space = get_terminal_size(fallback=(-1, -1))[0]\n # justify only if we are outputting to a terminal\n if terminal_space != -1:\n return message.ljust(terminal_space)\n return message\n\n\nclass ProgressIndicatorBase:\n LOGGER = 'borg.output.progress'\n JSON_TYPE = None\n json = False\n\n operation_id_counter = 0\n\n @classmethod\n def operation_id(cls):\n \"\"\"Unique number, can be used by receiving applications to distinguish different operations.\"\"\"\n cls.operation_id_counter += 1\n return cls.operation_id_counter\n\n def __init__(self, msgid=None):\n self.handler = None\n self.logger = logging.getLogger(self.LOGGER)\n self.id = self.operation_id()\n self.msgid = msgid\n\n # If there are no handlers, set one up explicitly because the\n # terminator and propagation needs to be set. If there are,\n # they must have been set up by BORG_LOGGING_CONF: skip setup.\n if not self.logger.handlers:\n self.handler = logging.StreamHandler(stream=sys.stderr)\n self.handler.setLevel(logging.INFO)\n logger = logging.getLogger('borg')\n # Some special attributes on the borg logger, created by setup_logging\n # But also be able to work without that\n try:\n formatter = logger.formatter\n terminator = '\\n' if logger.json else '\\r'\n self.json = logger.json\n except AttributeError:\n terminator = '\\r'\n else:\n self.handler.setFormatter(formatter)\n self.handler.terminator = terminator\n\n self.logger.addHandler(self.handler)\n if self.logger.level == logging.NOTSET:\n self.logger.setLevel(logging.WARN)\n self.logger.propagate = False\n\n # If --progress is not set then the progress logger level will be WARN\n # due to setup_implied_logging (it may be NOTSET with a logging config file,\n # but the interactions there are generally unclear), so self.emit becomes\n # False, which is correct.\n # If --progress is set then the level will be INFO as per setup_implied_logging;\n # note that this is always the case for serve processes due to a \"args.progress |= is_serve\".\n # In this case self.emit is True.\n self.emit = self.logger.getEffectiveLevel() == logging.INFO\n\n def __del__(self):\n if self.handler is not None:\n self.logger.removeHandler(self.handler)\n self.handler.close()\n\n def output_json(self, *, finished=False, **kwargs):\n assert self.json\n if not self.emit:\n return\n kwargs.update(dict(\n operation=self.id,\n msgid=self.msgid,\n type=self.JSON_TYPE,\n finished=finished,\n time=time.time(),\n ))\n print(json.dumps(kwargs), file=sys.stderr, flush=True)\n\n def finish(self):\n if self.json:\n self.output_json(finished=True)\n else:\n self.output('')\n\n\nclass ProgressIndicatorMessage(ProgressIndicatorBase):\n JSON_TYPE = 'progress_message'\n\n def output(self, msg):\n if self.json:\n self.output_json(message=msg)\n else:\n self.logger.info(justify_to_terminal_size(msg))\n\n\nclass ProgressIndicatorPercent(ProgressIndicatorBase):\n JSON_TYPE = 'progress_percent'\n\n def __init__(self, total=0, step=5, start=0, msg=\"%3.0f%%\", msgid=None):\n \"\"\"\n Percentage-based progress indicator\n\n :param total: total amount of items\n :param step: step size in percent\n :param start: at which percent value to start\n :param msg: output message, must contain one %f placeholder for the percentage\n \"\"\"\n self.counter = 0 # 0 .. (total-1)\n self.total = total\n self.trigger_at = start # output next percentage value when reaching (at least) this\n self.step = step\n self.msg = msg\n\n super().__init__(msgid=msgid)\n\n def progress(self, current=None, increase=1):\n if current is not None:\n self.counter = current\n pct = self.counter * 100 / self.total\n self.counter += increase\n if pct >= self.trigger_at:\n self.trigger_at += self.step\n return pct\n\n def show(self, current=None, increase=1, info=None):\n \"\"\"\n Show and output the progress message\n\n :param current: set the current percentage [None]\n :param increase: increase the current percentage [None]\n :param info: array of strings to be formatted with msg [None]\n \"\"\"\n pct = self.progress(current, increase)\n if pct is not None:\n # truncate the last argument, if no space is available\n if info is not None:\n if not self.json:\n # no need to truncate if we're not outputing to a terminal\n terminal_space = get_terminal_size(fallback=(-1, -1))[0]\n if terminal_space != -1:\n space = terminal_space - len(self.msg % tuple([pct] + info[:-1] + ['']))\n info[-1] = ellipsis_truncate(info[-1], space)\n return self.output(self.msg % tuple([pct] + info), justify=False, info=info)\n\n return self.output(self.msg % pct)\n\n def output(self, message, justify=True, info=None):\n if self.json:\n self.output_json(message=message, current=self.counter, total=self.total, info=info)\n else:\n if justify:\n message = justify_to_terminal_size(message)\n self.logger.info(message)\n\n\nclass ProgressIndicatorEndless:\n def __init__(self, step=10, file=None):\n \"\"\"\n Progress indicator (long row of dots)\n\n :param step: every Nth call, call the func\n :param file: output file, default: sys.stderr\n \"\"\"\n self.counter = 0 # call counter\n self.triggered = 0 # increases 1 per trigger event\n self.step = step # trigger every calls\n if file is None:\n file = sys.stderr\n self.file = file\n\n def progress(self):\n self.counter += 1\n trigger = self.counter % self.step == 0\n if trigger:\n self.triggered += 1\n return trigger\n\n def show(self):\n trigger = self.progress()\n if trigger:\n return self.output(self.triggered)\n\n def output(self, triggered):\n print('.', end='', file=self.file, flush=True)\n\n def finish(self):\n print(file=self.file)\n","sub_path":"src/borg/helpers/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"332702026","text":"import discord\r\nimport os\r\n\r\nclient = discord.Client()\r\n@client.event\r\nasync def on_ready():\r\n print(\"디스코드 봇 로그인이 완료되었습니다.\")\r\n print(\"디스코드 봇 이름:\" + client.user.name)\r\n print('------')\r\n await client.change_presence(status=discord.Status.online, activity=discord.Game(\"컴마스터봇 베타v1.0ㅣ컴마야 도움말\"))\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.content == (\"컴마스터\"):\r\n await message.channel.send(\"천재\")\r\n if message.content == (\"commaster\"):\r\n await message.channel.send(\"kind youtuber!\")\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.content == \"컴마야 도움말\":\r\n embed = discord.Embed(title=\"컴마스터 봇\", description=\"명령어들 (도움말)\", color=0x00ff00)\r\n\r\n embed.add_field(name=\"컴마스터\", value=\"대답 : 천재\", inline=False)\r\n embed.add_field(name=\"commaster\", value=\"대답 : kind youtuber!\", inline=False)\r\n\r\n embed.add_field(name=\"컴마야 도움말\", value=\"명령어 알려주는 명령어(?)같은 (?) ( 뭐가 어케된거야 )\", inline=False)\r\n embed.add_field(name=\"컴마야 개발자에게 후원\", value=\"https://toon.at/donate/donate-com\", inline=False)\r\n\r\n embed.set_footer(text=\"Bot Made by. 컴마스터#5830 | Donate : https://toon.at/donate/donate-com\")\r\n await message.channel.send (embed=embed)\r\n\r\n \r\n \r\naccess_token = os.environ[\"BOT_TOKEN\"]\r\nclient.run(access_token)\r\n","sub_path":"bot-code.py","file_name":"bot-code.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85760653","text":"from PIL import Image\nimport sys\nimport os\nimport re\n\norigin_path = './eval_png/'\ndirs = os.listdir(origin_path)\noutput_path = './eval/'\n\nfor dir in dirs:\n print(\"The target directory is \" + dir)\n input_path = origin_path + dir + '/'\n files = os.listdir(input_path)\n for file in files:\n input_im = Image.open(input_path + file)\n rgb_im = input_im.convert('RGB')\n rgb_im.save(output_path + dir + '/' + file.replace(\"png\", \"jpg\"), quality=30)\n print(\"transcation finished for \" + file)\n","sub_path":"png_eval.py","file_name":"png_eval.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"21948975","text":"from Paragraph import Paragraph\nfrom Element import Element\nfrom tagStack import tagStack\nimport os\nfrom shutil import copyfile\n\ndef buildXMLElements(targetXML, fileName,numberingXML):\n outFile = open(\"HTMLOutput/\" + fileName + \".html\", \"w\")\n readFile = open(targetXML,encoding=\"utf8\",mode=\"r\")\n numberingFile = open(numberingXML,encoding=\"utf8\",mode=\"r\")\n\n numberingLines = numberingFile.readlines()\n lines = readFile.readlines()\n\n tagSplit = []\n appendedLines = ''\n appendedNumbering = ''\n tagSplitNumbering = []\n\n for line in lines:\n appendedLines = appendedLines + line\n\n for line in numberingLines:\n appendedNumbering = appendedNumbering + line\n\n while appendedLines != \"\":\n if appendedLines[0] == \"<\":\n tagSplit.append((appendedLines[appendedLines.index(\"<\"):appendedLines.index(\">\") + 1]))\n appendedLines = appendedLines[appendedLines.index(\">\") + 1:]\n else:\n tagSplit.append(appendedLines[0:appendedLines.index(\"<\")])\n appendedLines = appendedLines[appendedLines.index(\"<\"):]\n\n while appendedNumbering != \"\":\n if appendedNumbering[0] == \"<\":\n tagSplitNumbering.append((appendedNumbering[appendedNumbering.index(\"<\"):appendedNumbering.index(\">\") + 1]))\n appendedNumbering = appendedNumbering[appendedNumbering.index(\">\") + 1:]\n else:\n tagSplitNumbering.append(appendedNumbering[0:appendedNumbering.index(\"<\")])\n appendedNumbering = appendedNumbering[appendedNumbering.index(\"<\"):]\n\n print(tagSplit)\n print(tagSplitNumbering)\n\n abstractToListMap, abstractToNumIdMap = captureListTyping(tagSplitNumbering)\n paragraphList = buildParagraphs(tagSplit)\n createHTMLOutput(outFile,paragraphList,abstractToListMap,abstractToNumIdMap,fileName)\n grabMedia(fileName)\n\n\ndef grabMedia(fileName):\n mediaList = []\n i = 0\n targetDir = \"targetWord/\" + fileName + \"/word/media/\"\n if not os.path.exists(\"HTMLOutput/imgs/\"):\n os.mkdir(\"HTMLOutput/imgs/\")\n\n if os.path.exists(targetDir):\n for files in os.listdir(targetDir):\n copyfile(targetDir + files,\"HTMLOutput/imgs/\" + fileName + \"docx_img\" + str(i) + \".png\")\n mediaList.append(fileName + \"docx_img\" + str(i) + \".png\")\n i += 1\n\ndef captureListTyping(tagSplitNumbering):\n abstractToNumIdMap = dict()\n inNumId = False\n inAbstract = False\n currentAbstractId = 0\n currentNumId = 0\n abstractToListMap = dict()\n\n for tags in tagSplitNumbering:\n if \"\" in tags:\n paragraphList.append(currentParagraph)\n elif paraEnabled:\n currentParagraph.addElement(tags)\n\n for formats in paragraphList:\n inFormat = False\n formatStartIndex = 0\n i = 0\n\n for elements in formats.getElements():\n if \"\" in elements:\n currentElement.bold = True\n elif \"\" in elements:\n currentElement.setStrikethrough(True)\n elif '' in elements:\n currentElement.setSubscript(True)\n elif '' in elements:\n currentElement.setSuperscript(True)\n elif \"\" in elements:\n currentElement.italics = True\n elif \"\" in elements or '\\n')\n outFile.write(\"\\n\")\n\n outFile.write(\"\\n\")\n i = 0\n needOpenList = True\n\n for entries in paragraphList:\n closingTagOrder = tagStack()\n\n if entries.getType().lower() == \"image\":\n outFile.write('\\n')\n else:\n if entries.getType().lower() == \"listparagraph\":\n listType = abstractToNumIdMap[entries.getListid()]\n listType = abstractToListMap[listType]\n\n if needOpenList:\n if listType.lower() == \"bullet\":\n outFile.write(\"
    \\n\")\n needOpenList = False\n else:\n outFile.write(\"
      \\n\")\n needOpenList = False\n\n outFile.write(\"
    1. \")\n elif entries.getType().lower() == \"paragraph\":\n outFile.write(\"

      \")\n\n for elements in entries.getElements():\n if elements.getBold():\n outFile.write(\"\")\n closingTagOrder.push(\"\")\n if elements.getItalics():\n outFile.write(\"\")\n closingTagOrder.push(\"\")\n if elements.getUnderline():\n outFile.write('')\n closingTagOrder.push(\"\")\n if elements.getStrikethrough():\n outFile.write(\"\")\n closingTagOrder.push(\"\")\n if elements.getSubscript():\n outFile.write(\"\")\n closingTagOrder.push(\"\")\n if elements.getSuperscript():\n outFile.write(\"\")\n closingTagOrder.push(\"\")\n\n outFile.write(elements.getText())\n\n while closingTagOrder.getSize() != 0:\n outFile.write(closingTagOrder.pop())\n\n if entries.getType().lower() == \"listparagraph\":\n outFile.write(\"

    2. \\n\")\n\n if i + 1 < len(paragraphList):\n nextId = paragraphList[i + 1].getListid()\n\n if nextId != entries.getListid():\n needOpenList = True\n if listType.lower() == \"bullet\":\n outFile.write(\"
\\n\")\n else:\n outFile.write(\"\\n\")\n else:\n if listType.lower() == \"bullet\":\n outFile.write(\"\\n\")\n else:\n outFile.write(\"\\n\")\n else:\n outFile.write(\"

\\n\")\n i += 1\n\n outFile.write(\"\\n\")\n outFile.write(\"\\n\")\n outFile.close()","sub_path":"wordParser.py","file_name":"wordParser.py","file_ext":"py","file_size_in_byte":9716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"195092919","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jaebradley/.virtualenvs/basketball_reference_web_scraper/lib/python3.7/site-packages/tests/unit/parsers/test_search_results_parser.py\n# Compiled at: 2020-03-05 11:58:35\n# Size of source mod 2**32: 6041 bytes\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock\nfrom basketball_reference_web_scraper.data import LEAGUE_ABBREVIATIONS_TO_LEAGUE, League\nfrom basketball_reference_web_scraper.http_client import SEARCH_RESULT_RESOURCE_LOCATION_REGEX\nfrom basketball_reference_web_scraper.parsers import SearchResultsParser, SearchResultNameParser, ResourceLocationParser, LeagueAbbreviationParser\n\nclass TestSearchResultsParser(TestCase):\n\n def setUp(self):\n self.parser = SearchResultsParser(search_result_name_parser=(SearchResultNameParser()),\n search_result_location_parser=ResourceLocationParser(resource_location_regex=SEARCH_RESULT_RESOURCE_LOCATION_REGEX),\n league_abbreviation_parser=LeagueAbbreviationParser(abbreviations_to_league=LEAGUE_ABBREVIATIONS_TO_LEAGUE))\n\n def test_parse_single_nba_player(self):\n players = [\n MagicMock(resource_name='jaebaebae',\n resource_location='https://www.basketball-reference.com/players/j/jaebaebae.html',\n league_abbreviations='NBA')]\n self.assertEqual(self.parser.parse(nba_aba_baa_players=players), {'players': [\n {'name':'jaebaebae', \n 'identifier':'jaebaebae', \n 'leagues':{\n League.NATIONAL_BASKETBALL_ASSOCIATION}}]})\n\n def test_parse_single_aba_player(self):\n players = [\n MagicMock(resource_name='jaebaebae',\n resource_location='https://www.basketball-reference.com/players/j/jaebaebae.html',\n league_abbreviations='ABA')]\n self.assertEqual(self.parser.parse(nba_aba_baa_players=players), {'players': [\n {'name':'jaebaebae', \n 'identifier':'jaebaebae', \n 'leagues':{\n League.AMERICAN_BASKETBALL_ASSOCIATION}}]})\n\n def test_parse_single_baa_player(self):\n players = [\n MagicMock(resource_name='jaebaebae',\n resource_location='https://www.basketball-reference.com/players/j/jaebaebae.html',\n league_abbreviations='BAA')]\n self.assertEqual(self.parser.parse(nba_aba_baa_players=players), {'players': [\n {'name':'jaebaebae', \n 'identifier':'jaebaebae', \n 'leagues':{\n League.BASKETBALL_ASSOCIATION_OF_AMERICA}}]})\n\n def test_parse_single_nba_aba_baa_player(self):\n players = [\n MagicMock(resource_name='jaebaebae',\n resource_location='https://www.basketball-reference.com/players/j/jaebaebae.html',\n league_abbreviations='NBA/ABA/BAA')]\n self.assertEqual(self.parser.parse(nba_aba_baa_players=players), {'players': [\n {'name':'jaebaebae', \n 'identifier':'jaebaebae', \n 'leagues':{\n League.NATIONAL_BASKETBALL_ASSOCIATION,\n League.AMERICAN_BASKETBALL_ASSOCIATION,\n League.BASKETBALL_ASSOCIATION_OF_AMERICA}}]})\n\n def test_parse_multiple_nba_aba_baa_players(self):\n players = [\n MagicMock(resource_name='jaebaebae1',\n resource_location='https://www.basketball-reference.com/players/j/jaebaebae1.html',\n league_abbreviations='NBA/ABA/BAA'),\n MagicMock(resource_name='jaebaebae2',\n resource_location='https://www.basketball-reference.com/players/j/jaebaebae2.html',\n league_abbreviations='NBA/ABA/BAA'),\n MagicMock(resource_name='jaebaebae3',\n resource_location='https://www.basketball-reference.com/players/j/jaebaebae3.html',\n league_abbreviations='NBA/ABA/BAA')]\n self.assertEqual(self.parser.parse(nba_aba_baa_players=players), {'players': [\n {'name':'jaebaebae1', \n 'identifier':'jaebaebae1', \n 'leagues':{\n League.NATIONAL_BASKETBALL_ASSOCIATION,\n League.AMERICAN_BASKETBALL_ASSOCIATION,\n League.BASKETBALL_ASSOCIATION_OF_AMERICA}},\n {'name':'jaebaebae2', \n 'identifier':'jaebaebae2', \n 'leagues':{\n League.NATIONAL_BASKETBALL_ASSOCIATION,\n League.AMERICAN_BASKETBALL_ASSOCIATION,\n League.BASKETBALL_ASSOCIATION_OF_AMERICA}},\n {'name':'jaebaebae3', \n 'identifier':'jaebaebae3', \n 'leagues':{\n League.NATIONAL_BASKETBALL_ASSOCIATION,\n League.AMERICAN_BASKETBALL_ASSOCIATION,\n League.BASKETBALL_ASSOCIATION_OF_AMERICA}}]})","sub_path":"pycfiles/basketball_reference_web_scraper-4.9.0.macosx-10.15-x86_64.tar/test_search_results_parser.cpython-37.py","file_name":"test_search_results_parser.cpython-37.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"336647530","text":"# -*- coding: utf-8 -*-\nfrom paths import ROOT_PATH # isort:skip\n\nfrom copy import deepcopy\nimport argparse\nimport os.path as osp\n\nfrom loguru import logger\nimport yaml\nimport pandas as pd\n\nfrom videoanalyst.config.config import cfg as root_cfg\nfrom videoanalyst.config.config import specify_task\nfrom videoanalyst.engine.builder import build as tester_builder\nfrom videoanalyst.model import builder as model_builder\nfrom videoanalyst.pipeline import builder as pipeline_builder\nfrom videoanalyst.utils import complete_path_wt_root_in_cfg\nfrom videoanalyst.utils import hpo\n\n\ndef make_parser():\n parser = argparse.ArgumentParser(description='Test')\n parser.add_argument('-cfg',\n '--config',\n default='',\n type=str,\n help='experiment configuration')\n parser.add_argument(\n '-hpocfg',\n '--hpo-config',\n default='experiments/siamfcpp/hpo/siamfcpp_SiamFCppTracker-hpo.yaml',\n type=str,\n help='experiment configuration')\n # parser.add_argument('-hpocsv',\n # '--hpo-csv',\n # default='logs/hpo/hpo.csv',\n # type=str,\n # help='dumped hpo result')\n\n return parser\n\n\nif __name__ == '__main__':\n # parsing\n parser = make_parser()\n parsed_args = parser.parse_args()\n\n # experiment config\n exp_cfg_path = osp.realpath(parsed_args.config)\n # from IPython import embed;embed()\n root_cfg.merge_from_file(exp_cfg_path)\n logger.info(\"Load experiment configuration at: %s\" % exp_cfg_path)\n\n # resolve config\n root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)\n root_cfg = root_cfg.test\n task, task_cfg_origin = specify_task(root_cfg)\n\n # hpo config\n with open(parsed_args.hpo_config, \"r\") as f:\n hpo_cfg = yaml.safe_load(f)\n hpo_cfg = hpo_cfg[\"test\"]\n _, hpo_cfg = specify_task(hpo_cfg)\n hpo_schedules = hpo.parse_hp_path_and_range(hpo_cfg)\n\n # results = [hpo.sample_and_update_hps(task_cfg, hpo_schedules) for _ in range(5)]\n # merged_result = hpo.merge_result_dict(results)\n\n csv_file = osp.join(hpo_cfg[\"exp_save\"],\n \"hpo_{}.csv\".format(task_cfg_origin[\"exp_name\"]))\n\n # from IPython import embed;embed();exit(0)\n while True:\n task_cfg = deepcopy(task_cfg_origin)\n hpo_exp_dict = hpo.sample_and_update_hps(task_cfg, hpo_schedules)\n # print(pd.DataFrame(hpo.merge_result_dict(hpo_exp_dict)))\n\n task_cfg.freeze()\n # build model\n model = model_builder.build(task, task_cfg.model)\n # build pipeline\n pipeline = pipeline_builder.build(task, task_cfg.pipeline, model)\n # build tester\n testers = tester_builder(task, task_cfg.tester, \"tester\", pipeline)\n # start engine\n # for tester in testers:\n tester = testers[0]\n test_result_dict = tester.test()\n hpo_exp_dict[\"main_performance\"] = test_result_dict[\"main_performance\"]\n df = hpo.dump_result_dict(csv_file, hpo_exp_dict)\n df.sort_values(by='main_performance', inplace=True)\n df.reset_index(drop=True, inplace=True)\n print(df.head(10))\n del model, pipeline, tester\n","sub_path":"main/hpo.py","file_name":"hpo.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"450625867","text":"import sys\n# for linux env.\nsys.path.insert(0,'..')\nimport pandas as pd\nimport time\nimport argparse\nimport functools\nfrom misc import utils\nprint = functools.partial(print, flush=True)\nfrom collections import Counter\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='preprocess-count pos and negative from lab file in OneFlorida Data')\n parser.add_argument('--dataset', choices=['covid_database', 'main_database'], default='covid_database',\n help='two datasets, then merge at pre covid lab file')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n args.input_file = r'../data/oneflorida/{}/LAB_RESULT_CM.csv'.format(args.dataset)\n args.output_file = r'../data/oneflorida/output/{}/covid_lab_{}.csv'.format(args.dataset, args.dataset)\n args.output_file_xlsx = r'../data/oneflorida/output/{}/covid_lab_{}.xlsx'.format(args.dataset, args.dataset)\n\n print('args:', args)\n\n return args\n\n\ndef read_lab_and_count_covid(args, chunksize=100000, debug=False):\n start_time = time.time()\n print('in read_lab_and_count_covid')\n print('Choose dataset:', args.dataset, 'chunksize:', chunksize, 'debug:', debug)\n # step 1: load covid lab test codes, may be updated by:\n print('Step 1: load and selected covid related lab code')\n df_covid = pd.read_csv(r'../data/V15_COVID19/covid_phenotype/COVID_LOINC_all.csv')\n # No. We can choose according our list,\n # and then narrow down to the PCR test in pre_covid_lab.py\n # !!! chosen PCR tests in pre_covid_lab.py\n # Keep all covid related codes here\n\n print('df_covid.shape:', df_covid.shape)\n code_set = set(df_covid['loinc_num'].to_list())\n print('Selected all Covid related codes: ', code_set)\n print('len(code_set):', len(code_set))\n\n # step 2: read lab results by chunk, due to large file size\n print('Step 2, read lab data, and select patients who took COVID PCR test, with their covid lab records')\n print('read:', args.input_file)\n sasds = pd.read_csv(args.input_file,\n encoding='utf-8',\n dtype=str,\n chunksize=chunksize,\n iterator=True) # 'iso-8859-1' (LATIN1) and Windows cp1252 (WLATIN1)\n # sasds = pyreadstat.read_file_in_chunks(pyreadstat.read_sas7bdat,\n # '../data/V15_COVID19/{}/lab_result_cm.sas7bdat'.format(dataset),\n # chunksize=chunksize) #, multiprocess=True, num_processes=4)\n dfs = [] # holds data chunks\n dfs_covid = []\n cnt = Counter([])\n cnt_code = Counter([])\n i = 0\n n_rows = 0\n n_covid_rows = 0\n patid_set = set([])\n patid_covid_set = set([])\n for chunk in sasds: # , meta\n i += 1\n if chunk.empty:\n print(\"ERROR: Empty chunk! break!\")\n break\n\n chunk.rename(columns=lambda x: x.upper(), inplace=True)\n if i == 1:\n print('chunk.shape', chunk.shape)\n print('chunk.columns', chunk.columns)\n\n if debug:\n dfs.append(chunk)\n\n chunk_covid_records = chunk.loc[chunk['LAB_LOINC'].isin(code_set), :]\n dfs_covid.append(chunk_covid_records)\n # only keep covid test records.\n # other records of patients who took covid test need to scan again\n\n patid_set.update(chunk['PATID'])\n patid_covid_set.update(chunk_covid_records['PATID'])\n\n n_rows += len(chunk)\n n_covid_rows += len(chunk_covid_records)\n\n cnt.update(chunk_covid_records['RESULT_QUAL'])\n cnt_code.update(chunk_covid_records['LAB_LOINC'])\n\n if i % 10 == 0:\n print('chunk:', i, 'time:', time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n print('len(patid_set):', len(patid_set))\n print('len(patid_covid_set):', len(patid_covid_set))\n\n if debug:\n print('IN DEBUG MODE, BREAK, AND DUMP!')\n break\n\n print('n_rows:', n_rows, 'n_covid_rows:', n_covid_rows)\n print('len(patid_set):', len(patid_set))\n print('len(patid_covid_set):', len(patid_covid_set))\n print('#chunk: ', i, 'chunk size:', chunksize)\n print('Counter:', cnt)\n print('Loinc Counter:', cnt_code)\n\n dfs_covid_all = pd.concat(dfs_covid)\n print('dfs_covid_all.shape', dfs_covid_all.shape)\n print('dfs_covid_all.columns', dfs_covid_all.columns)\n dfs_covid_all.rename(columns=lambda x: x.upper(), inplace=True)\n print('dfs_covid_all.columns', dfs_covid_all.columns)\n\n print('Output file:', args.output_file)\n utils.check_and_mkdir(args.output_file)\n dfs_covid_all.to_csv(args.output_file, index=False)\n\n if debug:\n try:\n # dump xlsx for debugging\n print('Output file:', args.output_file_xlsx)\n utils.check_and_mkdir(args.output_file_xlsx)\n dfs_covid_all.to_excel(args.output_file_xlsx)\n except Exception as e:\n # in write raise ValueError(ValueError: This sheet is too large!\n # Your sheet size is: 1592362, 35 Max sheet size is: 1048576, 16384\n print(e)\n\n dfs_all = pd.concat(dfs)\n print('dfs_all.shape', dfs_all.shape)\n print('dfs_all.columns', dfs_all.columns)\n dfs_all.rename(columns=lambda x: x.upper(), inplace=True)\n print('dfs_all.columns', dfs_all.columns)\n dfs_all.to_csv(\"{}_lab_all.csv\".format(args.dataset))\n\n print('Total Time used after dump files:', time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n\n return dfs_covid_all # dfs_all, dfs_covid_all, meta\n\n\nif __name__ == '__main__':\n # python pre_lab.py --dataset covid_database 2>&1 | tee log/pre_lab_COL.txt\n # python pre_lab.py --dataset main_database 2>&1 | tee log/pre_lab_WCM.txt\n start_time = time.time()\n args = parse_args()\n print(args)\n dfs_covid_all = read_lab_and_count_covid(args, debug=args.debug)\n print('Total Time used after dump files:', time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n","sub_path":"preflorida/pre_lab.py","file_name":"pre_lab.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"105698302","text":"#!/usr/bin/env python\n\n\"\"\"\n===========================================================\nAssignment 5: Topic Modelling with WW1 Letters \n===========================================================\n\nThis script performs topic modelling on a dataset of 50 letters between French or British soldiers and their loved ones during the First World War. 20 of the letters were originally in French and have been converted into English with the use of Google Translate (and some highschool French lessons! ;)) I hope you enjoy searching through the topics which these men and women chose to write about during the First World War. \n\nThe script will work through the following steps: \n1. Load in and clean the data \n2. Generate bi-gram (a,b) and tri-gram (x, (a,b)) models using gensim \n3. Create a gensim dictionary and corpus\n4. Build and run the LDA model \n5. Compute the Perplexity and coherence scores \n6. Creates a file of the topics and their most frequent words \n7. Visualise these topics into a html plot \n\nThe script can be run from the command line by navigating to the correct directory and environment, then typing: \n $ python3 WW1_Letters.py \n\n\"\"\" \n\n\"\"\"\n------------------------\nImport the Dependencies\n------------------------\n\"\"\"\n\n#operating systems \nimport os\nimport sys\nsys.path.append(os.path.join(\"..\"))\nfrom pprint import pprint\n\n# data handling \nimport pandas as pd\nimport numpy as np \n\n#stopwords\nimport nltk\nfrom nltk.corpus import stopwords\nstop_words = stopwords.words('english')\n\n# nlp functionality \nimport spacy\nnlp = spacy.load(\"en_core_web_sm\", disable=[\"ner\"])\n\n\n# visualisations \nimport pyLDAvis.gensim\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rcParams\n# figure size in inches\nrcParams['figure.figsize'] = 20,10\n\n#LDA tools \nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.models import CoherenceModel\nimport lda_utils\n\n# warnings\nimport warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\n\n\"\"\"\n---------------\nMain Function \n---------------\n\"\"\"\n\ndef main():\n \"\"\"\n Here we'll call all the functions we want to run in our script (described below the main function) \n \"\"\"\n \n #Tell the user you're about to start up the process\n print(\"Hello, I'm setting up your WW1 letters topic modelling...\") \n \n # create the output directory\n if not os.path.exists(\"output\"):\n os.mkdir(\"output\")\n \n ##STEP 1: Load in the data (it has already been cleaned) \n print(\"Loading in the data\") \n data = os.path.join(\"..\", \"data\", \"50_English_letters.csv\")\n \n ##STEP TWO: Generate bi-gram and tri-gram models with gensim \n print(\"I'm about to process the data and generate your bi and tri grams\")\n data_processed = gensim_processing(data)\n \n \n ##STEP THREE: Create a gensim dictionary and corpus\n print(\"Models generated, now I'll create the dictionary and corpus\")\n dictionary, corpus = create_dict_corpus(data_processed) \n \n ##STEP FOUR: Run the LDA model (this will create 15 topics) \n print(\"Set-up complete. Let's run the LDA model...\") \n lda_model = run_lda(corpus, dictionary)\n \n ##STEP FIVE: Calculate the perplexity and coherence scores \n print(\"Calculating complexity and coherence...\") \n perplexity, coherence = calculate_plx_coh (data_processed, corpus, dictionary, lda_model) \n \n ##STEP SIX: Create a file of the topics and their top words \n print(\"Creating a txt file with the output topics. This will be found in output\") \n create_topics_df\n \n ##STEP SEVEN: Save the results as a simple txt file into output\n print(\"Creating a txt file with the perplexity and coherence scores. This will be found in output\")\n save_results\n \n \n ##STEP SEVEN: Generate a topics plot and save it as a html \n print(\"Creating a html plot which will be saved in output\") \n create_html_plot\n \n #Tell the user your script is finished \n print(\"That's you finished, enjoy the results!\")\n \n \n\"\"\"\n-----------\nFunctions \n-----------\n\"\"\" \n \ndef gensim_processing(data):\n \"\"\"\n Here we use gensim to define bi-grams and tri-grams which enable us to create a create a dictonary and corpus \n \"\"\"\n #build the models first \n bigram = gensim.models.Phrases(data[\"text\"], min_count=3, threshold=100) #We're using a threshold of 100\n trigram = gensim.models.Phrases(bigram[data[\"text\"]], threshold=100) \n \n #Then fit them to the data \n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n \n #We further process the data using spacy and allow Nouns, Adjectives and Verbs to pass \n data_processed = lda_utils.process_words(data[\"text\"],nlp, bigram_mod, trigram_mod, allowed_postags=[\"NOUN\",\"ADJ\", \"VERB\"])\n\n #We now have a list of words which can be used to train the LDA model\n return data_processed\n \n \n \ndef create_dict_corpus(data_processed):\n \"\"\"\n Here we create a dictonary and a corpus. \n => The dictionary converts the words into an integer value\n => The corpus creates a 'bag of words' model for all the data (i.e. mixes it up and makes it unstructured) \n \n \"\"\"\n # Create Dictionary\n dictionary = corpora.Dictionary(data_processed)\n \n #We want to remove very common words so we'll filter those which appear in more than 80% of the letters\n #dictionary.filter_extremes(no_above=0.8) (can be removed) \n\n # Create Corpus: Term Document Frequency\n corpus = [dictionary.doc2bow(text) for text in data_processed]\n return dictionary, corpus \n\n\n\ndef run_lda(dictionary, corpus):\n \"\"\"\n Our model takes our data, corpus, and dictionary to generate a given number of topics. \n This script uses 15 topics as that was the recommended number calculated. \n \n \"\"\"\n lda_model = gensim.models.LdaMulticore(corpus=corpus, #our corpus \n id2word=dictionary, #our dictionary \n num_topics=15, # our number of topics defined as 15\n random_state=100, #the number of random states (helps with repdoducability)\n chunksize=10, #chunck size to help model be more effifienct \n passes=10, #Number of times the model passes through the data \n iterations=100,\n per_word_topics=True, \n minimum_probability=0.01)\n \n \n return lda_model\n\n \n \ndef calculate_plx_coh(data_processed, lda_model, corpus, dictionary):\n \"\"\"\n Perplexity = A measure of how good the model is. The lower the number the better. \n Coherence = \n \n \"\"\"\n # Compute Perplexity\n perplexity = lda_model.log_perplexity(corpus)\n \n # Compute Coherence Score\n coherence_model_lda = CoherenceModel(model= lda_model, \n texts= data_processed, \n dictionary= dictionary, \n coherence= 'c_v')\n coherence = coherence_model_lda.get_coherence()\n \n print (f\"\\n The perplexity is {perplexity} and the coherence is {coherence}.\") \n \n \n return perplexity, coherence\n\n\n \ndef create_topics_df(lda_model, corpus, data_processed):\n \n \"\"\"\n Here we look closer at the topics made and create a dataframe of these \n \n \"\"\"\n #print the topics to the terminal \n pprint(lda_model.print_topics())\n \n \n #Create a data_frame of the topic keywords and save these as a csv \n df_topic_keywords = lda_utils.format_topics_sentences(ldamodel=lda_model, \n corpus=corpus, \n texts=data_processed)\n df_dominant_topic = df_topic_keywords.reset_index()\n df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n print(df_dominant_topic)\n \n df_dominant_topic.to_csv(\"output/topic_keywords.csv\") \n \n \n \"\"\"\n We'll also look at the most dominent topics per letter by creating a matrix of topic values per letter\n Then we'll plot these into a lineplot using Seaborn\n \n \"\"\"\n\n #Create a list of topics from your corpus \n values = list(lda_model.get_document_topics(corpus))\n \n #Create an empty list called split \n split = []\n \n #For every document in the corpus list(values) create an empty list called topic_prevelance\n for entry in values:\n topic_prevelance = []\n #For every topic in the document, add the contribution of this topic to each document into a column \n for topic in entry:\n topic_prevelance.append(topic[1])\n #add this list with contributions to the empty split list created above \n split.append(topic_prevelance)\n \n \n #Create the document-topic matrix and save it \n df = pd.DataFrame(map(list,zip(*split)))\n df.to_csv(\"output/document_topic_matrix.csv\")\n \n #Make this into a lineplot using Seaborn. We don't have many letters so our rolling mean will just be 5 \n topic_line_plot = sns.lineplot(data=df.T.rolling(5).mean())\n figure = topic_line_plot.get_figure()\n \n #Save the figure in the output directory \n figure.savefig(\"output/topic_line_plot.png\")\n print(\"\\n Your topic line plot is saved in output.\")\n \n \n\ndef save_results(perplexity, coherence):\n \"\"\"\n We'll create a simple txt file to save our perplexity and coherence scores in. \n \"\"\"\n with open(\"output/perplexity_coherence.txt\", \"w+\") as f:\n f.writelines(f\"The models scores are as follows, \\n\\n Perplexity: {perplexity}, Coherence: {coherence}\")\n \n \n\ndef create_html_plot(lda_model, corpus, dictionary):\n \"\"\" \n Finally we'll create a html which allows the user to explore the topics themselves interactively\n \"\"\"\n \n vis = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary)\n pyLDAvis.save_html(vis, f\"output/LDA_vis.html\")\n \n \n\nif __name__==\"__main__\":\n #execute main function\n main()\n","sub_path":"Assignments/Assignment05/WW1_Letters.py","file_name":"WW1_Letters.py","file_ext":"py","file_size_in_byte":10324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"562098967","text":"\"\"\"\" defines fastapi bookstore endpoints \"\"\"\n\nimport os\nimport datetime\nimport logging\nimport logging.config\nimport secrets\nimport string\nimport time\nfrom typing import List, Optional\n\nfrom fastapi import Depends, FastAPI, HTTPException, Request, Response, status\nfrom fastapi.params import Query\nfrom fastapi.security import HTTPBasic, HTTPBasicCredentials\n\n# app-specific modules and packages\nimport app.graphql.graphql as gql\nfrom app.utility.utility import create_books\n\nfrom app.config import Settings\n\nfile_dir = os.path.split(os.path.realpath(__file__))[0]\nlogging.config.fileConfig(\n os.path.join(file_dir, \"logging.conf\"), disable_existing_loggers=False\n)\n\nlogger = logging.getLogger(__name__)\n\nsettings = Settings()\napp = FastAPI()\nsecurity = HTTPBasic()\n\n\n@app.middleware(\"http\")\nasync def log_requests(request: Request, call_next):\n \"\"\"\n Tidy bit of logging assist to show how long each call takes.\n Found here: https://philstories.medium.com/fastapi-logging-f6237b84ea64\n \"\"\"\n\n # Create idem with secrets module instead of random module\n # which is unsuitable for security/cryptographic purposes\n string_source = string.ascii_uppercase + string.digits\n idem = secrets.choice(string.ascii_uppercase)\n idem += secrets.choice(string.digits)\n\n for _ in range(4):\n idem += secrets.choice(string_source)\n\n char_list = list(idem)\n secrets.SystemRandom().shuffle(char_list)\n idem = \"\".join(char_list)\n\n logger.info(\"rid=%s start request path=%s\", idem, request.url.path)\n start_time = time.time()\n\n response = await call_next(request)\n\n process_time = (time.time() - start_time) * 1000\n formatted_process_time = f\"{process_time:.2f}\"\n logger.info(\n \"rid=%s completed_in=%sms status_code=%s\",\n idem,\n formatted_process_time,\n response.status_code,\n )\n\n return response\n\n\n@app.get(\"/ready\")\ndef read_root():\n \"\"\"ready status by way of returned datetime\"\"\"\n return {\n \"Hello\": datetime.datetime.now().astimezone().replace(microsecond=0).isoformat()\n }\n\n\n# pylint: disable=too-many-arguments,unused-argument\n@app.get(\"/SBookInfo\")\ndef read_item(\n response: Response,\n credentials: HTTPBasicCredentials = Depends(security),\n course1: Optional[List[str]] = Query(None),\n session1: Optional[List[str]] = Query(None),\n section1: Optional[List[str]] = Query(None),\n term1: Optional[List[str]] = Query(None),\n dept1: Optional[List[str]] = Query(None),\n is_json: bool = False,\n):\n \"\"\"\n Rebuilding under an existing service.\n\n Basic user/password auth.\n\n Note the duplicate session1 queries args, for ex:\n https://ws.colorado.edu/BookStore/SBookInfo?course1=ACCT3230&session1=001&session1=B&term1=2217\n \"\"\"\n\n correct_username = secrets.compare_digest(\n credentials.username, settings.basic_username\n )\n correct_password = secrets.compare_digest(\n credentials.password, settings.basic_password\n )\n if not (correct_username and correct_password):\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password.\",\n headers={\"WWW-Authenticate\": \"Basic\"},\n )\n\n # Call the GraphQL service to fetch data\n gql_status, results = gql.make_request(\n url=settings.graphql_url,\n api_key=settings.graphql_key,\n courses=course1,\n sections=section1,\n terms=term1,\n sessions=session1,\n )\n\n # return or replace the status_code with what's received from the make_request()\n response.status_code = gql_status\n\n # pylint: disable=no-else-return\n if gql_status == 200:\n if len(results) > 0:\n if is_json:\n return results\n else:\n # convert to XML and return it.\n books_xml = create_books(results)\n return Response(content=books_xml, media_type=\"application/xml\")\n else:\n return {\"No data returned.\"}\n elif gql_status == 424:\n logger.error(\"gql_status: %s, %s\", gql_status, results)\n return {\"An error with the backend service has occurred.\"}\n else:\n logger.error(\"gql_status: %s, %s\", gql_status, results)\n return {\"An internal error has occurred.\"}\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"317677279","text":"import math\r\nimport argparse\r\nimport statistics\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\n\r\nwith open(\"q2_sigma100.dat\", \"r\") as myfile:\r\n priorMean=0\r\n priorVariance=1\r\n newMean=0\r\n newVariance=0\r\n givenSigmaSquare=10000\r\n n=100\r\n seSquare=givenSigmaSquare/n\r\n for line in myfile:\r\n total=0\r\n expectedMean=0\r\n currentline = line.split(\",\")\r\n for value in currentline:\r\n total+=float(value)\r\n expectedMean=total/n\r\n newMean=(priorVariance*expectedMean+seSquare*priorMean)/(priorVariance+seSquare)\r\n newVariance=(priorVariance*seSquare)/(priorVariance+seSquare)\r\n print(\"New mean: \", newMean, \" New Variance: \", newVariance)\r\n priorMean=newMean\r\n priorVariance=newVariance\r\n print(\"\\n\") ","sub_path":"Homework6/Assignment6_q2_b.py","file_name":"Assignment6_q2_b.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"116929239","text":"import Computer\n\nclass Game:\n\n def __init__(self,players):\n self.players = players\n\n def selectHands(self):\n\n self.hands = []\n for player in self.players:\n\n hand = player.getHand()\n\n self.hands.append((player,hand))\n\n def getScores(self):\n\n battleStr = ['rr','rp','rs','pr','pp','ps','sr','sp','ss']\n battlePt = [0,-1,1,1,0,-1,-1,1,0]\n\n scores = []\n for hand in self.hands:\n sc = 0\n for opponenthand in self.hands:\n if hand == opponenthand:\n continue\n for attack in hand[1]:\n for defend in opponenthand[1]:\n bt = attack + defend\n for i, pr in enumerate(battleStr):\n if pr == bt:\n # print(battlePt[i])\n sc += battlePt[i]\n break\n scores.append((hand[0],hand[1], sc))\n return scores\n\n def playRound(self):\n self.selectHands()\n scores = self.getScores()\n for score in scores:\n score[0].attack(score[2])\n for player in self.players:\n print(\"{}: {}\".format(player.name,player.health))\n\n\n# if __name__ == \"__main__\":\n# player1 = Computer.Player.Player(5)\n# player2 = Computer.Player.Player(5)\n# player3 = Computer.Player.Player(5)\n#\n# game = Game([player1,player2,player3])\n# game.playRound()\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39067876","text":"from django.conf.urls import patterns, include, url\nfrom tastypie.api import Api\n\nfrom backgridtest.cdr_resource import CallRecordResource, CallRecordPreference\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\napis = Api(api_name = 'v1')\n\napis.register(CallRecordResource())\napis.register(CallRecordPreference())\n\nurlpatterns = [url(r'^api/', include(apis.urls)),\n url(r'^testcdrgrid/', 'backgridtest.views.renderTestGrid'),\n url(r'^savepref/', 'backgridtest.views.renderConfigPage'),\n url(r'^initcdrlist/', 'backgridtest.views.initcdrlist'),\n url(r'^migratecdr/', 'backgridtest.views.migratecdr') ]","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"117637836","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport json\r\ndef getkeyword(keyword):\r\n url='http://www.baidu.com/s?wd='+keyword\r\n try:\r\n r=requests.get(url,timeout=30)\r\n r.raise_for_status()\r\n r.encoding='UTF-8'\r\n return r.text\r\n except:\r\n return \"\"\r\ndef parserlinks(html):\r\n soup=BeautifulSoup(html,\"html.parser\")\r\n links=[]\r\n for div in soup.find_all('div',{'data-tools':re.compile('title')}):\r\n data=div.attrs['data-tools']\r\n d=json.loads(data)\r\n links.append(d['title'])\r\n return links\r\ndef main():\r\n html=getkeyword('福州最佳美食餐厅搜索')\r\n ls=parserlinks(html)\r\n count=1\r\n with open('15.txt','w')as fd:\r\n for i in ls:\r\n fd.write('[')\r\n fd.write(str(count))\r\n fd.write(']')\r\n fd.write(i)\r\n fd.write('\\n')\r\n count+=1\r\n print('写入文件成功!')\r\nmain()","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"38548207","text":"#Name: Mohammad Saleh Ahsan Sakir\r\n#Andrew ID: msakir\r\n\r\nimport pygame\r\nimport random\r\n\r\npygame.init()\r\n\r\ndispWidth = 800\r\ndispHeight = 500\r\n\r\ncarWidth = 55\r\ncarHeight = 95\r\n\r\n###################################################################################\r\n######################## BACKGROUND SECTION #######################################\r\n###################################################################################\r\nscreen = pygame.display.set_mode((dispWidth, dispHeight)) #first screen to show up\r\nmenuBg = pygame.image.load('menubg.jpg')\r\nmenuBg = pygame.transform.scale(menuBg, (dispWidth, dispHeight))\r\n\r\ninsBg = pygame.image.load('insbg2.jpg') \r\ninsBg = pygame.transform.scale(insBg, (dispWidth, dispHeight))# resize graphic\r\n\r\nroadBg = pygame.image.load('background.png')\r\nroadBg = pygame.transform.scale(roadBg, (dispWidth, dispHeight))# resize graphic\r\n###################################################################################\r\n######################### MUSIC SECTION ###########################################\r\n###################################################################################\r\ncrashSound = pygame.mixer.Sound(\"crash.wav\")\r\npygame.mixer.music.load('bgmusic.mp3')\r\n\r\n###################################################################################\r\n################### LOADING CAR IMAGES ############################################\r\n###################################################################################\r\ncarImg1 = pygame.image.load(\"diablo.png\") \r\ncarImg1 = pygame.transform.scale(carImg1, (60, 100))# resize graphic \r\ncarImg1 = carImg1.convert_alpha() # remove whitespace from graphic\r\n\r\ncarImg2 = pygame.image.load(\"car.png\") \r\ncarImg2 = pygame.transform.scale(carImg2, (60, 100))# resize graphic \r\ncarImg2 = carImg2.convert_alpha() # remove whitespace from graphic\r\n\r\ncar2 = pygame.image.load(\"aventador.png\") \r\ncar2 = pygame.transform.scale(car2, (60, 100))# resize graphic\r\ncar2 = car2.convert_alpha()# remove whitespace from graphic\r\n\r\ncar3 = pygame.image.load(\"nsx.png\")\r\ncar3 = pygame.transform.scale(car3, (60, 100)) # resize graphic\r\ncar3 = car3.convert_alpha() # remove whitespace from graphic\r\n\r\ncar4 = pygame.image.load(\"speeder.png\")\r\ncar4 = pygame.transform.scale(car4, (60, 100)) # resize graphic\r\ncar4 = car4.convert_alpha() # remove whitespace from graphic\r\n\r\ncar5 = pygame.image.load(\"slr.png\")\r\ncar5 = pygame.transform.scale(car5, (60, 100)) # resize graphic\r\ncar5 = car5.convert_alpha() # remove whitespace from graphic\r\n\r\ncar6 = pygame.image.load(\"Mach6.png\")\r\ncar6 = pygame.transform.scale(car6, (60, 100)) # resize graphic\r\ncar6 = car6.convert_alpha() # remove whitespace from graphic\r\n\r\ncar7 = pygame.image.load(\"Stingray.png\")\r\ncar7 = pygame.transform.scale(car7, (60, 100)) # resize graphic\r\ncar7 = car7.convert_alpha() # remove whitespace from graphic\r\n###################################################################################\r\n###################################################################################\r\nrandomCars = [car2, car3, car4, car5, car6, car7]\r\nenemy = random.choice(randomCars)\r\n\r\nclock = pygame.time.Clock()\r\npygame.mouse.set_visible(1)\r\n###################################################################################\r\n####################### COLOR SECTION #############################################\r\n###################################################################################\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nbrown = (153, 102, 60)\r\nred = (200, 0, 0)\r\ngreen = (0, 200, 0)\r\n\r\nbrightBrown = (240, 144, 47)\r\nbrightRed = (255, 0, 0)\r\nbrightGreen = (0, 255, 0)\r\n###################################################################################\r\n###################################################################################\r\npause = False\r\n\r\n#function for showing score--------------------------------------------------------\r\ndef score(count):\r\n font = pygame.font.SysFont(\"comicsansms\", 25)\r\n text = font.render(\"SCORE: \" + str(count), True, red)\r\n screen.blit(text, (0, 0))\r\n\r\n#function for crashing scene-------------------------------------------------------\r\ndef crash(message, size, color, action):\r\n################## Stops the music and plays the crash sound ######################\r\n pygame.mixer.music.stop()\r\n pygame.mixer.Sound.play(crashSound)\r\n###################################################################################\r\n largeText = pygame.font.SysFont(\"Algerian\", size)\r\n TextSurf, TextRect = text_objects(message, largeText, color)\r\n TextRect.center = ((dispWidth / 2), (dispHeight / 2))\r\n screen.blit(TextSurf, TextRect)\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n############################# BUTTONS #############################################\r\n button(\"Play Again\", 150, 450, 100, 50, brown, brightBrown, action)\r\n button(\"Menu\", 350, 450, 100, 50, brown, brightBrown, menu)\r\n button(\"Quit\", 550, 450, 100, 50, brown, brightBrown, quitGame)\r\n###################################################################################\r\n pygame.display.update()\r\n clock.tick(15)\r\n\r\n#function for unpause--------------------------------------------------------------\r\ndef unpause():\r\n global pause\r\n pygame.mixer.music.unpause()\r\n pause = False\r\n\r\n#function for pause----------------------------------------------------------------\r\ndef paused():\r\n###################################################################################\r\n pygame.mixer.music.pause()\r\n###################################################################################\r\n largeText = pygame.font.SysFont(\"comicsansms\", 115)\r\n TextSurf, TextRect = text_objects(\"Paused\", largeText, black)\r\n TextRect.center = ((dispWidth / 2), (dispHeight / 2))\r\n screen.blit(TextSurf, TextRect)\r\n\r\n while pause:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n############################# BUTTONS #############################################\r\n button(\"Continue\", 150, 450, 100, 50, brown, brightBrown, unpause)\r\n button(\"Menu\", 350, 450, 100, 50, brown, brightBrown, menu)\r\n button(\"Quit\", 550, 450, 100, 50, brown, brightBrown, quitGame)\r\n###################################################################################\r\n pygame.display.update()\r\n clock.tick(15)\r\n\r\n#function to quit the game---------------------------------------------------------\r\ndef quitGame():\r\n pygame.quit()\r\n quit()\r\n\r\n#function for text-----------------------------------------------------------------\r\ndef text_objects(text, font, color):\r\n textSurface = font.render(text, True, color)\r\n return textSurface, textSurface.get_rect()\r\n\r\n#function for button---------------------------------------------------------------\r\ndef button(msg, x, y, width, height, initialclr, afterclr, action=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n #here whether the cursor is within the buttons boundury or not is being checked\r\n #if cursosr within button and pressed, it will perform the action assigned to that button\r\n if x + width > mouse[0] > x and y + height > mouse[1] > y:\r\n pygame.draw.rect(screen, afterclr, (x, y, width, height))\r\n if click[0] == 1 and action != None:\r\n action()\r\n else:\r\n pygame.draw.rect(screen, initialclr, (x, y, width, height))\r\n smallText = pygame.font.SysFont(\"comicsansms\", 20)\r\n textSurf, textRect = text_objects(msg, smallText, black)\r\n textRect.center = ((x + (width / 2)), (y + (height / 2)))\r\n screen.blit(textSurf, textRect)\r\n\r\n#function for inserting text------------------------------------------------------- \r\ndef blitText(surface, text, pos, font, color=pygame.Color('yellow')):\r\n words = [word.split(' ') for word in text.splitlines()]\r\n # 2D array where each row is a list of words.\r\n space = font.size(' ')[0]\r\n # The width of a space.\r\n max_width, max_height = surface.get_size()\r\n x, y = pos\r\n for line in words:\r\n for word in line:\r\n word_surface = font.render(word, 0, color)\r\n word_width, word_height = word_surface.get_size()\r\n if x + word_width >= max_width:\r\n x = pos[0]\r\n # Reset the x.\r\n y += word_height\r\n # Start on new row.\r\n surface.blit(word_surface, (x, y))\r\n x += word_width + space\r\n # Reset the x.\r\n x = pos[0]\r\n # Start on new row.\r\n y += word_height \r\n\r\n#function for menu window---------------------------------------------------------- \r\ndef menu():\r\n menu = False\r\n while not menu:\r\n screen.fill(black)\r\n #place the background game image\r\n screen.blit(menuBg, (0,0))\r\n x,y = pygame.mouse.get_pos()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n menu = True\r\n #place the title\r\n font = pygame.font.SysFont('Algerian', 70)\r\n text = 'Dodge Car Game'\r\n blitText(screen, text, (150,0), font, green)\r\n############################# BUTTONS #############################################\r\n button(\"1 PLAYER\", 310, 120, 230, 50, brown, brightBrown,gameLoop) #button for 1 player mode\r\n button(\"2 PLAYER\", 310, 185, 230, 50, brown, brightBrown,twoPlayer) #button for 2 player mode\r\n button(\"INSTRUCTIONS\", 310, 248, 230, 50, brown, brightBrown,instruction) #button to know the instructions\r\n button(\"QUIT\", 310, 310, 230, 50, brown, brightBrown,quitGame) #button to quit game\r\n################################################################################### \r\n pygame.display.flip()\r\n clock.tick(60)\r\n\r\n#function for instruction window---------------------------------------------------\r\ndef instruction():\r\n #read instructions from the given file\r\n font = pygame.font.SysFont('Algerian', 22)\r\n txt = open('instruction.txt')\r\n line = txt.readline()\r\n text = ''\r\n while line:\r\n t = str(line)\r\n line = txt.readline()\r\n text += t\r\n\r\n back = False\r\n while not back:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n back = True\r\n\r\n screen.fill(white)\r\n #place the given image in background\r\n screen.blit(insBg, (0,0))\r\n #place the instructions on screen from the given file\r\n blitText(screen, text, (0,0), font)\r\n #set a button for going back to main menu\r\n button(\"Back\", 310, 450, 230, 50, brown, brightBrown,menu)\r\n \r\n pygame.display.update()\r\n \r\n#function for one player mode------------------------------------------------------\r\ndef gameLoop():\r\n global pause\r\n enemy = random.choice(randomCars)\r\n###################################################################################\r\n pygame.mixer.music.stop()\r\n pygame.mixer.music.play(-1)\r\n################################################################################### \r\n x = (dispWidth * 0.45)\r\n y = (dispHeight * 0.8)\r\n\r\n xChange = 0\r\n yChange = 0\r\n\r\n thingStartX = random.randrange(0, dispWidth)\r\n thingStartY = -600\r\n enemySpeed = 5\r\n thingWidth = 55\r\n thingHeight = 95\r\n thingCount = 1\r\n\r\n dodged = 0\r\n\r\n gameExit = False\r\n\r\n while not gameExit:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n #event for left and right keys\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n xChange = -5\r\n if event.key == pygame.K_RIGHT:\r\n xChange = 5\r\n \r\n #if 'p' is pressed, then the game will pause \r\n if event.key == pygame.K_p:\r\n pause = True\r\n paused()\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n xChange = 0\r\n \r\n #this x will change the place of the car when the associated keys will be pressed \r\n x += xChange\r\n\r\n screen.blit(roadBg, (0, 0))\r\n\r\n screen.blit(enemy, [thingStartX, thingStartY, thingWidth, thingHeight])\r\n thingStartY += enemySpeed\r\n screen.blit(carImg1, (x, y))\r\n score(dodged)\r\n \r\n #this logic sets the border for the car\r\n #if the car touches the border,it will crash!\r\n if x > dispWidth - carWidth or x < 0:\r\n crash('You Crashed', 115,black, gameLoop)\r\n\r\n #this logic will make the enemy cars keep coming\r\n if thingStartY > dispHeight:\r\n thingStartY =0 - thingHeight\r\n thingStartX = random.randrange(0, dispWidth)\r\n dodged += 1\r\n enemySpeed += .5\r\n\r\n #this is the crash logic with the enemy\r\n #so if the car touches the enemy, it will crash!\r\n if y < thingStartY + thingHeight:\r\n if x > thingStartX and x < thingStartX + thingWidth or x + carWidth > thingStartX and x + carWidth < thingStartX + thingWidth:\r\n crash('You Crashed', 115, black, gameLoop)\r\n \r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\n#function for two player mode------------------------------------------------------\r\ndef twoPlayer():\r\n global pause\r\n enemy = random.choice(randomCars)\r\n###################################################################################\r\n pygame.mixer.music.play(-1)\r\n###################################################################################\r\n x1 = (dispWidth * 0.73)\r\n x2 = (dispWidth * 0.2)\r\n y1 = (dispHeight * 0.8)\r\n y2 = (dispHeight * 0.8)\r\n \r\n x1Change = 0\r\n x2Change = 0\r\n \r\n thingStartX = random.randrange(0, dispWidth)\r\n thingStartY = -600\r\n enemySpeed = 9\r\n thingWidth = 55\r\n thingHeight = 95\r\n\r\n gameExit = False\r\n r1 = 0\r\n r2 = 0\r\n while not gameExit:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n###################################################################################\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n x1Change = -5 \r\n if event.key == pygame.K_RIGHT:\r\n x1Change = 5\r\n###################################################################################\r\n if event.key == pygame.K_a:\r\n x2Change = -5\r\n if event.key == pygame.K_d:\r\n x2Change = 5\r\n###################################################################################\r\n if event.key == pygame.K_p:\r\n pause = True\r\n paused()\r\n \r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n x1Change = 0\r\n r1 = 0\r\n\r\n if event.key == pygame.K_a or event.key == pygame.K_d:\r\n x2Change = 0\r\n r2 = 0\r\n\r\n r1 += abs(x1Change)\r\n r2 += abs(x2Change)\r\n\r\n x1 += x1Change\r\n x2 += x2Change\r\n\r\n screen.blit(roadBg, (0, 0))\r\n\r\n screen.blit(enemy, [thingStartX, thingStartY, thingWidth, thingHeight])\r\n thingStartY += enemySpeed\r\n screen.blit(carImg1, (x1, y1))\r\n screen.blit(carImg2, (x2, y2))\r\n\r\n if thingStartY > dispHeight:\r\n thingStartY =0 - thingHeight\r\n thingStartX = random.randrange(0, dispWidth)\r\n######################### LOGIC FOR PLAYER 1 ######################################\r\n # if p1 dodge p2, then p2 will move \r\n if x1 > dispWidth - carWidth or x1 < 0 or y1 > dispHeight-carHeight or y1 < 0:\r\n x1Change = 0\r\n y1Change = 0\r\n #if p1 dodge enemy car, p2 will win\r\n if y1 < (thingStartY + thingHeight):\r\n if (x1 > thingStartX and x1 < thingStartX + thingWidth) or x1 + carWidth > thingStartX and x1 + carWidth < thingStartX + thingWidth:\r\n crash('Player 2 Won', 115, black, twoPlayer)\r\n######################### LOGIC FOR PLAYER 2 ######################################\r\n #if p2 dodge p1, then p1 will move\r\n if x2 > dispWidth - carWidth or x2 < 0 or y2 > dispHeight-carHeight or y2 < 0:\r\n x2Change = 0\r\n y2Change = 0\r\n #if p2 dodge enemy car, p1 will win\r\n if y2 < (thingStartY + thingHeight):\r\n if x2 > thingStartX and x2 < thingStartX + thingWidth or x2 + carWidth > thingStartX and x2 + carWidth < thingStartX + thingWidth:\r\n crash('Player 1 Won', 115, red, twoPlayer)\r\n##################### LOGIC FOR BOTH PLAYERS WHILE DODGING AT SAME TIME ###########\r\n #if player 1 covers more distance towards player 2, then player 2 will move\r\n if r1 > r2:\r\n if x1 r1:\r\n if x2+carWidth > x1:\r\n x2Change = 0\r\n x1Change = 5\r\n #if the distance of both cars towards each other is same, then do nothing\r\n elif r1 == r2:\r\n if x2+carWidth>x1:\r\n x1Change = 0\r\n x2Change = 1\r\n\r\n pygame.display.update()\r\n clock.tick(60)\r\n \r\nmenu()\r\npygame.quit()\r\nquit()\r\n\r\n\r\n# Acknowledgement: Sentdex and stackoverflow\r\n","sub_path":"Dodge Car Game/15112 Project.py","file_name":"15112 Project.py","file_ext":"py","file_size_in_byte":17971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"469510468","text":"'''\nCreated on Apr 4, 2012\n\n@author: lanquarden\n'''\n\nimport msgParser\nimport carState\nimport carControl\nimport random\n\nclass Driver(object):\n '''\n A driver object for the SCRC\n '''\n\n def __init__(self, stage):\n '''Constructor'''\n self.WARM_UP = 0\n self.QUALIFYING = 1\n self.RACE = 2\n self.UNKNOWN = 3\n self.stage = stage\n \n self.parser = msgParser.MsgParser()\n \n self.state = carState.CarState()\n \n self.control = carControl.CarControl()\n \n self.steer_lock = 0.785398\n self.max_speed = 100\n self.prev_rpm = None\n \n def init(self):\n '''Return init string with rangefinder angles'''\n self.angles = [0 for x in range(19)]\n \n for i in range(5):\n self.angles[i] = -90 + i * 15\n self.angles[18 - i] = 90 - i * 15\n \n for i in range(5, 9):\n self.angles[i] = -20 + (i-5) * 5\n self.angles[18 - i] = 20 - (i-5) * 5\n \n return self.parser.stringify({'init': self.angles})\n \n def drive(self, msg):\n self.state.setFromMsg(msg)\n \n self.steer()\n \n self.gear()\n \n self.speed()\n \n return self.control.toMsg()\n \n def steer(self):\n directions = [-1.0, -0.8, -0.6, -0.5, -0.4, -0.3, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8, 1.0]\n self.control.setSteer(random.choice(directions))\n \n def gear(self):\n rpm = self.state.getRpm()\n gear = self.state.getGear()\n \n if self.prev_rpm == None:\n up = True\n else:\n if (self.prev_rpm - rpm) < 0:\n up = True\n else:\n up = False\n \n if up and rpm > 7000:\n gear += 1\n \n if not up and rpm < 3000:\n gear -= 1\n \n self.control.setGear(gear)\n \n def speed(self):\n accels = [-1.0, -0.5, -0.1, 0.0, 0.1, 0.5, 1.0]\n accel = random.choice(accels)\n if accel >= 0:\n self.control.setAccel(accel)\n self.control.setBrake(0)\n else:\n self.control.setAccel(0)\n self.control.setBrake(-accel)\n \n def onShutDown(self):\n pass\n \n def onRestart(self):\n pass\n \n","sub_path":"src/randomdriver.py","file_name":"randomdriver.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"49553707","text":"from requests.packages.urllib3.contrib import pyopenssl\nimport subprocess\n\n\ndef get_https_cert(host, port=443, raw=False):\n \"\"\"Read subject domains in https cert from remote server\"\"\"\n if raw:\n return pyopenssl.ssl.get_server_certificate((host, port))\n # else :\n return pyopenssl.OpenSSL.crypto.load_certificate(\n pyopenssl.OpenSSL.crypto.FILETYPE_PEM,\n pyopenssl.ssl.get_server_certificate((host, port))\n )\n\ndef get_subj_from_cert(c):\n return c.get_subject().get_components()\n\ndef get_field_from_subj(c, f):\n r = []\n for k,v in c.get_subject().get_components():\n if f=='*' or k.lower() == f.lower():\n r.append(v)\n return r\n\n\ndef get_sni(c):\n r = []\n n = c.get_extension_count()\n for i in range(n):\n e = c.get_extension(i)\n if e.get_short_name() == 'subjectAltName':\n return str(e)\n\n\ndef _safe_run(arg):\n try:\n return subprocess.check_output(arg, shell=True,)\n except subprocess.CalledProcessError as ex:\n fancy.warn(\"Subprocess retuned error code 0 !=\" + str(ex.returncode))\n return ex.output\n\n\ndef openssl_new_csr(base, subj):\n _safe_run(\"openssl genrsa -aes256 -passout pass:123456 -out {base}.key_pass 4096\".format(base=base))\n _safe_run(\"openssl rsa -passin pass:123456 -in {base}.key_pass -out {base}.key\".format(base=base))\n _safe_run(\"openssl req -new -key {base}.key -out {base}.csr -subj '{subj}'\".format(base=base, subj=subj))\n return dict(\n key = base + \".key\",\n csr = base + \".csr\",\n crt = base + \".crt\",\n )\n\ndef openssl_selfsign(base=None, days=1337, openssl_args='', **kw):\n if base is not None:\n kw['csr'] = base + '.csr'\n kw['key'] = base + '.key'\n kw['crt'] = base + '.crt'\n else:\n assert 'csr' not in kw, \"Must have CSR or BASE ... \"\n _safe_run('openssl x509 -req -days {days} -in {csr} -signkey {key} -out {crt} {a}'.format(\n days = days,\n a = openssl_args,\n **kw\n ))\n\ndef openssl_csr_from_crt(crt, key, csr):\n _safe_run('openssl x509 -in {crt} -signkey {key} -x509toreq -out {csr}'.format(\n crt = crt,\n key = key,\n csr = csr,\n ))\n\n\ndef openssl_sign_ca(csr=None, ca_crt=None, ca_key=None, out_crt=None, days=1337, serial=0x31337, openssl_args=''):\n _safe_run(\"openssl x509 -req -days {d} -in {csr} -CA {ca_crt} -CAkey {ca_key} -out {out_crt} -set_serial {s} {a}\".format(\n csr=csr,\n ca_crt = ca_crt,\n ca_key = ca_key,\n out_crt = out_crt,\n d = days,\n s = serial,\n a = openssl_args,\n ))\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nbase_ca = box.mk_path(\"fakeca\")\nssl.openssl_new_csr(hackmaster.safe_run, base=base_ca, subj=subj_str)\nssl.openssl_selfsign(hackmaster.safe_run, base=base_ca)\n\nbase_us = box.mk_path(\"usercert\")\nssl.openssl_new_csr(hackmaster.safe_run, base=base_us, subj=subj_str)\n\nssl.openssl_sign_ca(\n hackmaster.safe_run,\n csr = base_us + \".csr\",\n ca_crt = base_ca + \".crt\",\n ca_key = base_ca + \".key\",\n out_crt = base_us + \".crt\",\n)\n\n#ssl.openssl_selfsign(hackmaster.safe_run, base=base)\n\"\"\"\n\n","sub_path":"ssl.py","file_name":"ssl.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79911224","text":"\n#encoding=utf-8\nimport numpy as np\nfrom keras.optimizers import SGD,Adagrad,RMSprop\nfrom keras import regularizers\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.models import save_model\nfrom keras.models import load_model\nfrom keras.models import Model\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.callbacks import TensorBoard,EarlyStopping,CSVLogger, ModelCheckpoint\nfrom keras import applications\nfrom keras.utils import plot_model\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.initializers import normal\nimport sys\nimport os\nfrom vgg16 import VGG16\nfrom vgg16_CWR import VGG16_cwr\nfrom myConv import myConv\nimport argparse\nimport pandas as pd #引入pandas\n\nnaive=False \ncumulative=False\nCWR=True\n\n##############################\n# Configuration settings\n##############################\ndef mkdir(path):\n\n # 去除首位空格\n path=path.strip()\n # 去除尾部 \\ 符号\n path=path.rstrip(\"\\\\\")\n \n # 判断路径是否存在\n # 存在 True\n # 不存在 False\n isExists=os.path.exists(path)\n \n # 判断结果\n if not isExists:\n # 如果不存在则创建目录\n print(path+' 创建成功')\n # 创建目录操作函数\n os.makedirs(path)\n return True\n else:\n # 如果目录存在则不创建,并提示目录已存在\n print(path+' 目录已存在')\n return False\n\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('-mn','--model_name',action='store',type=str,\n default = 'default',help='you must give a model name')\nparser.add_argument('-dp','--data_path',action='store',type=str,\n default='../data',help='train and val file path')\nparser.add_argument('-lr','--learning_rate',action='store',type=float,\n default=0.01,help='learning_rate')\n#parser.add_argument('-mt','--momentum',action='store',type=float,\n# default=0.9,help='learning_rate')\nparser.add_argument('-ne','--num_epochs',action='store',type=int,\n default=10,help='num_epochs')\nparser.add_argument('-bs','--batch_size',action='store',type=int,\n default=100,help='batch size')\nparser.add_argument('-nc','--num_classes',action='store',type=int,\n default=2,help='num classes') # no use now\nparser.add_argument('-tl','--train_layers',nargs='+',action='store',type=str,\n default=['logit','linear'],help='layers need to be trained.')\n# TODO\nparser.add_argument('-tn','--top_N',action='store',type=int,\n default=5,help='whether the targets are in the top K predictions.')\nparser.add_argument('-um','--use_model',action='store',type=str,\n default='',help='use model to initial.')\n# TODO\nparser.add_argument('-spe','--steps_per_epoch',action='store',type=int,\n default=100,help='train: steps_pre_epoch.')\nparser.add_argument('-vs','--validation_steps',action='store',type=int,\n default=50,help='test: validation_steps.')\n\n\nargs = parser.parse_args()\nprint(\"=\"*50)\nprint(\"[INFO] args:\\r\")\nprint(args)\nprint(\"=\"*50)\n\ntrain_data_dir = args.data_path + '/core50_128x128/s{}' #!!!!!!\nvalidation_data_dir = args.data_path + '/core50_128x128/s2'\n\n\nmodel_name = args.model_name\n\nepochs = args.num_epochs\n\nbatch_size = args.batch_size\n\ntrain_layers = args.train_layers\n\nlearning_rate = args.learning_rate\n\nuse_model = args.use_model\n\nsteps_per_epoch = args.steps_per_epoch \n\nvalidation_steps = args.validation_steps \n\nS_PATH = sys.path[0]\n\nDATA_PATH = args.data_path\n\nTENSORBOARD_PATH = DATA_PATH + '/Graph/{}'.format(model_name)\nmkdir(os.path.dirname(TENSORBOARD_PATH))\nmkdir(TENSORBOARD_PATH)\n\nLOG_PATH = DATA_PATH + '/log/training_{}.csv'.format(model_name)\nmkdir(os.path.dirname(LOG_PATH))\n\nBEST_WEIGHT = DATA_PATH + \"/bestWeights/weight_{}.h5\".format(model_name)\nmkdir(os.path.dirname(BEST_WEIGHT))\n\nEND_WEIGHT = DATA_PATH + '/endWeights/weight_{}.h5'.format(model_name)\nmkdir(os.path.dirname(END_WEIGHT))\n\nEND_MODEL = DATA_PATH + '/endModel/model_{}.h5'.format(model_name)\nmkdir(os.path.dirname(END_MODEL))\n\nimg_width=128\nimg_height=128\n# weight initialization\ncw=False\ntw=True\nif(cw):\n kernel_initializer='zero'\nelif(tw):\n kernel_initializer=normal(mean=0.0, stddev=0.01, seed=None)\nelse:\n kernel_initializer='uniform'\n\n\nif use_model == '' :\n print(\"*\" * 50)\n print('[INFO] init train mode')\n print(\"*\" * 50)\n\n if(CWR):\n vgg16=VGG16_cwr(input_shape=(img_width,img_height,3),weights=None,kernel_initializer=kernel_initializer)\n x = vgg16.get_layer('pool5').output\n x=Flatten()(x)\n else:\n vgg16 = VGG16(input_shape=(img_width,img_height,3),weights=None)\n mid_fc7 = vgg16.get_layer('mid_fc7').output\n x = Dropout(0.5)(mid_fc7) # add 0413\n# prediction = Dense(output_dim=1, activation='sigmoid', name='logit')(x)\n prediction = Dense(50, activation='softmax', name='mid_fc8')(x)\n\n model = Model(input=vgg16.input, output=prediction)\n\nelif use_model == 'linear':\n #svm classification\n print(\"*\" * 50)\n print('[INFO] use {} train mode'.format(use_model))\n print(\"*\" * 50)\n\n # vgg16 \n vgg16 = VGG16(weights='imagenet')\n \n # ** get vgg top layer then add a logit layer for classification **\n fc2 = vgg16.get_layer('fc2').output\n prediction = Dense(output_dim=1, activation='linear', name='linear',kernel_regularizer=regularizers.l2(0.01))(fc2)\n model = Model(input=vgg16.input, output=prediction)\n\n\nelif use_model == 'myConv':\n print(\"*\" * 50)\n print('[INFO] use {} train mode'.format(use_model))\n print(\"*\" * 50)\n model = myConv()\n# print(\"load weight\")\n# model.load_weights(\"../data/endWeights/weight_v7.h5\")\n\n\nelse:\n print(\"*\" * 50)\n print('[INFO] continue train mode')\n print(\"*\" * 50)\n model = load_model(use_model)\n\n\n##############################\n# which layer will be trained \n##############################\nif use_model != 'myConv':\n for layer in model.layers:\n #if layer.name in ['fc1', 'fc2', 'logit']:\n if layer.name in train_layers :\n layer.trainable = True\n else:\n layer.trainable = False\n\n# model summary and structure pic.\nmodel.summary()\n\n\n# only can be used in py2\nplot_model(model, show_shapes=True, show_layer_names=True,to_file='vgg_model.png')\n\n\n##############################\n# compile\n##############################\n\n\n # Adjusting the batch order\nsgd = SGD(lr=0.01, decay=0, momentum=0, nesterov=True)\n\n# adadelta drop 0.5\nmodel.compile(optimizer=sgd,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n##############################\n# data generation\n##############################\n\n\ntrain_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest'\n )\n\n\nvalidation_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,\n )\n\nclass TensorBoardWrapper(TensorBoard):\n '''Sets the self.validation_data property for use with TensorBoard callback.'''\n\n def __init__(self, batch_gen, nb_steps, batch_size, **kwargs):\n super(TensorBoardWrapper, self).__init__(**kwargs)\n self.batch_gen = batch_gen # The generator.\n self.nb_steps = nb_steps # Number of times to call next() on the generator.\n self.batch_size = batch_size\n\n def on_epoch_end(self, epoch, logs):\n # Fill in the `validation_data` property. Obviously this is specific to how your generator works.\n # Below is an example that yields images and classification tags.\n # After it's filled in, the regular on_epoch_end method has access to the validation_data.\n imgs, tags = None, None\n for s in range(self.nb_steps):\n ib, tb = next(self.batch_gen)\n if imgs is None and tags is None:\n imgs = np.zeros(((self.nb_steps * self.batch_size,) + ib.shape[1:]), dtype=np.float32)\n tags = np.zeros(((self.nb_steps * self.batch_size,) + tb.shape[1:]), dtype=np.uint8)\n imgs[s * ib.shape[0]:(s + 1) * ib.shape[0]] = ib\n tags[s * tb.shape[0]:(s + 1) * tb.shape[0]] = tb\n \n self.validation_data = [imgs, tags, np.ones(imgs.shape[0])]\n \n return super(TensorBoardWrapper, self).on_epoch_end(epoch, logs)\n\n\n\n##############################\n# Call Back\n##############################\n# tbCallBack=TensorBoardWrapper(validation_generator, nb_steps=validation_steps,log_dir=\"{}\".format(TENSORBOARD_PATH), histogram_freq=1, batch_size=batch_size)\n\n# tensor board\ntbCallBack = TensorBoard(log_dir=TENSORBOARD_PATH, histogram_freq=0, \n write_graph=True, write_images=True)\n#* tensorboard --logdir path_to_current_dir/Graph --port 8080 \nprint(\"tensorboard --logdir {} --port 8080\".format(TENSORBOARD_PATH))\n\n\n# earlystoping\n# ES = EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')\n\n# csv log\ncsvlog = CSVLogger(LOG_PATH,separator=',', append=True)\n\n# saves the model weights after each epoch if the validation loss decreased\ncheckpointer = ModelCheckpoint(monitor='val_acc',filepath=BEST_WEIGHT, verbose=1, save_best_only=True)\n\ndef gen_flow_for_two_inputs(generators):\n while True:\n for item in generators:\n X1i=item.next()\n #Assert arrays are equal - this was for peace of mind, but slows down training\n #np.testing.assert_array_equal(X1i[0],X2i[0])\n yield [X1i[0], X1i[1]]\n\n#################################\n# fit\n#################################\n\nroot_dir='/home/eric/Documents/Experiments/core50/extras/batches_filelists/NC_inc/run0'\n# filename='train_batch_00_filelist.txt'\n# filepath=os.path.join(root_dir,filename)\n\ndef getDataFrame(filepath):\n traindf=pd.read_csv(filepath,sep=' ',header=None) #加载papa.txt,指定它的分隔符是 \\t\n traindf.rename(columns={0:\"filename\",1:'class'},inplace=True)\n traindf['class']=traindf['class'].astype(\"str\")\n return traindf\ndirectory='/home/eric/data/CoRe50/core50_128x128'\n\n\ntestfilename='test_filelist.txt'\ntestfilepath=os.path.join(root_dir,testfilename)\nvaliddf=pd.read_csv(testfilepath,sep=' ',header=None) #加载papa.txt,指定它的分隔符是 \\t\nvaliddf.rename(columns={0:\"filename\",1:'class'},inplace=True)\nvaliddf['class']=validdf['class'].astype(\"str\")\n\nlabels=[]\nfor i in range(50):\n labels.append(str(i))\n\nif naive:\n for i in range(8):\n filename='train_batch_0{}_filelist.txt'.format(i)\n filepath=os.path.join(root_dir,filename)\n traindf=getDataFrame(filepath)\n train_generator = train_datagen.flow_from_dataframe(\n dataframe=traindf,\n directory=directory,\n x_col=\"filename\",\n y_col=\"class\",\n subset=\"training\",\n classes=labels,\n target_size=[img_width, img_height],\n batch_size=batch_size,\n class_mode='categorical')\n validation_generator = train_datagen.flow_from_dataframe(\n dataframe=validdf,\n directory=directory,\n x_col=\"filename\",\n y_col=\"class\",\n subset=\"training\",\n classes=labels,\n target_size=[img_width, img_height],\n batch_size=batch_size,\n class_mode='categorical')\n steps_per_epoch=train_generator.n//train_generator.batch_size\n validation_steps=validation_generator.n//validation_generator.batch_size\n # begin to fit \n model.fit_generator(train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=1,\n validation_data=validation_generator,\n validation_steps=validation_steps,\n callbacks=[tbCallBack,csvlog,checkpointer],verbose=1)\n model.save_weights(END_WEIGHT)\n save_model(model,END_MODEL) \nelif(cumulative):\n validation_generator = validation_datagen.flow_from_dataframe(\n dataframe=validdf,\n directory=directory,\n x_col=\"filename\",\n y_col=\"class\",\n subset=\"training\",\n classes=labels,\n target_size=[img_width, img_height],\n batch_size=batch_size,\n class_mode='categorical')\n file=open(\"labels.txt\",\"w\")\n label_map=validation_generator.class_indices\n label_map=dict((v,k) for k,v in label_map.items())\n \n print(label_map)\n for key,value in label_map.items():\n file.write(str(key)+\" \"+value+\"\\n\")\n file.close()\n\n \n dfs=[]\n for i in range(8):\n filename='train_batch_0{}_filelist.txt'.format(i)\n filepath=os.path.join(root_dir,filename)\n traindf=getDataFrame(filepath)\n dfs.append(traindf)\n generator_lists=[]\n for df in dfs:\n temp1 = train_datagen.flow_from_dataframe(\n dataframe=df,\n directory=directory,\n x_col=\"filename\",\n y_col=\"class\",\n subset=\"training\",\n classes=labels,\n target_size=[img_width, img_height],\n batch_size=batch_size,\n class_mode='categorical')\n generator_lists.append(temp1)\n\n \n \n # print(labels)\n # print(classes)\n\n num=0\n for item in generator_lists:\n num+=item.n\n steps_per_epoch=num //generator_lists[0].batch_size\n validation_steps=validation_generator.n//validation_generator.batch_size\n # begin to fit \n model.fit_generator(gen_flow_for_two_inputs(generator_lists),\n steps_per_epoch=steps_per_epoch,\n epochs=1,\n validation_data=validation_generator,\n validation_steps=validation_steps,\n callbacks=[tbCallBack,csvlog,checkpointer],verbose=1)\n model.save_weights(END_WEIGHT)\n save_model(model,END_MODEL)\nelif(CWR):\n for i in range(8):\n filename='train_batch_0{}_filelist.txt'.format(i)\n filepath=os.path.join(root_dir,filename)\n traindf=getDataFrame(filepath)\n train_generator = train_datagen.flow_from_dataframe(\n dataframe=traindf,\n directory=directory,\n x_col=\"filename\",\n y_col=\"class\",\n subset=\"training\",\n classes=labels,\n target_size=[img_width, img_height],\n batch_size=batch_size,\n class_mode='categorical')\n validation_generator = train_datagen.flow_from_dataframe(\n dataframe=validdf,\n directory=directory,\n x_col=\"filename\",\n y_col=\"class\",\n subset=\"training\",\n classes=labels,\n target_size=[img_width, img_height],\n batch_size=batch_size,\n class_mode='categorical')\n steps_per_epoch=train_generator.n//train_generator.batch_size\n validation_steps=validation_generator.n//validation_generator.batch_size\n if(i>0):\n print('freeze layers')\n for layer in model.layers[:-3]:\n layer.trainable = False\n sgd = SGD(lr=0.01, decay=0, momentum=0.9, nesterov=True)\n print(model.summary())\n model.compile(optimizer=sgd,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n# adadelta drop 0.5\n \n\n # begin to fit \n model.fit_generator(train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=1,\n validation_data=validation_generator,\n validation_steps=validation_steps,\n callbacks=[tbCallBack,csvlog,checkpointer],verbose=1)\n model.save_weights(END_WEIGHT)\n save_model(model,END_MODEL) ","sub_path":"src/train_vgg16_NC.py","file_name":"train_vgg16_NC.py","file_ext":"py","file_size_in_byte":19280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"570730487","text":"#MODIFICA DEL 7 NOV 2020 ALLA PENULTIMA RIGA\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport platform\n\nclass Clean_Merge_Dataset:\n\tdef __init__(self, name=''):\n\t\tself.path_to_final = './data-ready/final_dataset_'+name+'.pkl'\n\t\tif platform.system() == 'Windows':\n\t\t\tself.path_to_final = self.path_to_final.replace('/','\\\\')\n\t\treturn\n\n\tdef fit(self, X, y=None):\n\t\treturn self\n\t\t\n\tdef transform(self, data_normal, data_tumor, X=None, y=None):\n\t\tprint('Data_normal:', data_normal.shape)\n\t\tprint('Data_tumor:', data_tumor.shape)\n\n\t\tdataset = pd.concat([data_tumor, data_normal], ignore_index=True)\n\t\tprint('All data:', dataset.shape)\n\t\t\n\t\t# removing TCGA-MESO label items\n\t\tdataset = dataset[dataset['label'] != 'TCGA-MESO']\n\t\tprint(set(dataset['label']))\n\t\t\n\t\t# union target with label\n\t\tfor index, element in dataset.iterrows():\n\t\t\tif element['target'] is False:\n\t\t\t\telement['label'] = element['target']\n\t\t\t#else:\n\t\t\t\t# element['label'] = element['label'] + '_' + str(element['target'])\n\t\t\t\t#element['label'] = element['label']\n\t\t\tdataset.at[index, 'label'] = element['label']\n\t\tdataset.drop(['target'], inplace=True, axis=1)\n\t\t\n\t\t# Check null and zero value\n\t\t# count non zero value\n\n\t\t# delete before the 0-values features\n\t\tsum_count = 0\n\t\tindex_to_delete = list()\n\t\tfor i, element in enumerate(dataset.isin([0]).sum()):\n\t\t\tif element == dataset.shape[0]:\n\t\t\t\tsum_count += 1\n\t\t\t\tindex_to_delete.append(i)\n\t\t\t\n\t\tdataset.drop(dataset.columns[index_to_delete], inplace=True, axis=1) # delete 0-values features\n\t\tprint('Features completly 0 values', sum_count, 'removed')\n\n\t\t# counting Nan values\n\t\tsum_count = 0\n\t\tindex_to_delete = list()\n\t\tfor i, element in enumerate(dataset.isna().sum()):\n\t\t\tif element == dataset.shape[0]:\n\t\t\t\tsum_count+=1\n\t\t\t\tindex_to_delete.append(i)\n\n\t\tdataset.dropna(inplace=True, axis=1)\n\t\tprint('Features completely Nan', sum_count, 'removed')\n\t\t\n\t\tprint('Final dataset shape', dataset.shape)\n\t\tdataset.to_pickle(self.path_to_final)\n\t\t\n\t\tdel data_normal\n\t\tdel data_tumor\n\n\t\ty = dataset.loc[:, 'label']\n\t\t#X = dataset.drop(columns=['label']) #ho cancellato 'case_id' 07-11-2020\n\t\tX = dataset\n #X = dataset.iloc[:, dataset.columns != 'case_id']\n\t\t\n\t\treturn X, y\n","sub_path":"myclass/.ipynb_checkpoints/CleanMergeDataset-checkpoint.py","file_name":"CleanMergeDataset-checkpoint.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"205468735","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom math import *\n\n\n\n\nZERO=1e-1\ndef f(r):\n\n\n\n\n\n f1 =exp(-2.0*(abs(r)))\n\n return f1\n\n\nx=np.linspace(-10.0,10.0,1000)\n\n\ny1=np.zeros(len(x))\n\nfor i in range(len(x)):\n y1[i]=f(x[i])\n\n\n\n\n\n\n\nplt.subplot(2,1,1)\n\nplt.plot(x,y1,'r')\nplt.title(r'$\\psi=e^{-2r}$')\n\nplt.axis([-10,10,0,1])\nplt.xlabel(\"r\")\nplt.ylabel(r'$\\psi$')\nplt.legend([r'$r \\epsilon[-10,10]$'])\n\nplt.subplot(2,1,2)\nplt.xlabel(\"r\")\nplt.ylabel(r'$\\psi$')\nplt.plot(x,y1,'r')\nplt.legend([r'$r \\epsilon[-2,2]$'])\nplt.axis([-2,2,0,1])\nplt.savefig('Pro3a.png')\n","sub_path":"pro3_org/pro3_a.py","file_name":"pro3_a.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152780987","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 16 18:44:06 2017\n\n@author: Gueguet\n\"\"\"\n\nfrom tkinter import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\"\"\"\n# script bonjour.py\nfrom tkinter import *\n\n# Création de la fenêtre principale (main window)\nMafenetre = Tk()\n\n# Création d'un widget Label (texte 'Bonjour tout le monde !')\nLabel1 = Label(Mafenetre, text = 'Bonjour tout le monde !', fg = 'red')\n# Positionnement du widget avec la méthode pack()\nLabel1.pack()\n\n# Création d'un widget Button (bouton Quitter)\nBouton1 = Button(Mafenetre, text = 'Quitter', command = Mafenetre.destroy)\nBouton1.pack()\n\n# Lancement du gestionnaire d'événements\nMafenetre.mainloop()\n\"\"\"\n\n\n\n\n\n\"\"\"\n\n# script frames.py\n#(C) Fabrice Sincère\n\nfrom tkinter import *\n\n# Création de la fenêtre principale\nMafenetre = Tk()\nMafenetre.title('Frame widget')\nMafenetre['bg']='bisque' # couleur de fond\n\n# création d'un widget Frame dans la fenêtre principale\nFrame1 = Frame(Mafenetre,borderwidth=2)\nFrame1.pack(side=LEFT,padx=10,pady=10)\n\n# création d'un second widget Frame dans la fenêtre principale\nFrame2 = Frame(Mafenetre,borderwidth=2,relief=GROOVE) #relief bien stylé !!\nFrame2.pack(side=LEFT,padx=10,pady=10)\n\n# création d'un widget Frame... dans un widget Frame\n# le widget Frame1 est le parent du widget Frame3\n# le parent du widget Frame1 est le widget Mafenetre (fenêtre principale)\nFrame3 = Frame(Frame1,bg=\"white\",borderwidth=2,relief=GROOVE)\nFrame3.pack(side=LEFT,padx=10,pady=10)\n\n# création d'un widget Label et d'un widget Button dans un widget Frame\nLabel(Frame1,text=\"RDV dentiste samedi à 15h\").pack(padx=10,pady=10)\nButton(Frame1,text=\"Effacer\",fg='navy',command=Frame1.destroy).pack(padx=10,pady=10)\n\nLabel(Frame2,text=\"Réviser le contrôle d'info\").pack(padx=10,pady=10)\nButton(Frame2,text=\"Effacer\",fg='navy',command=Frame2.destroy).pack(padx=10,pady=10)\n\nLabel(Frame3,text=\"RDV dentiste à 10h\",bg=\"white\").pack(padx=10,pady=10)\nButton(Frame3,text=\"Effacer\",fg='navy',command=Frame3.destroy).pack(padx=10,pady=10)\n\nMafenetre.mainloop()\n\n\"\"\"\n\n\"\"\"\n# script mot_de_passe.py\n#(C) Fabrice Sincère\nfrom tkinter import *\nfrom tkinter.messagebox import * # boîte de dialogue\n\ndef Verification():\n if Motdepasse.get() == 'python27':\n # le mot de passe est bon : on affiche une boîte de dialogue puis on ferme la fenêtre\n showinfo('Résultat','Mot de passe correct.\\nAu revoir !')\n Mafenetre.destroy()\n else:\n # le mot de passe est incorrect : on affiche une boîte de dialogue\n showwarning('Résultat','Mot de passe incorrect.\\nVeuillez recommencer !')\n Motdepasse.set('')\n\n# Création de la fenêtre principale (main window)\nMafenetre = Tk()\nMafenetre.title('Identification requise')\n\n# Création d'un widget Label (texte 'Mot de passe')\nLabel1 = Label(Mafenetre, text = 'Mot de passe ')\nLabel1.pack(side = LEFT, padx = 5, pady = 5)\n\n# Création d'un widget Entry (champ de saisie)\nMotdepasse= StringVar()\nChamp = Entry(Mafenetre, textvariable= Motdepasse, show='*', bg ='bisque', fg='maroon')\nChamp.focus_set()\nChamp.pack(side = LEFT, padx = 5, pady = 5)\n\n# Création d'un widget Button (bouton Valider)\nBouton = Button(Mafenetre, text ='Valider', command = Verification)\nBouton.pack(side = LEFT, padx = 5, pady = 5)\n\nMafenetre.mainloop()\n\"\"\"\n\n\"\"\"\n# script spinbox.py\n#(C) Fabrice Sincère\nfrom tkinter import *\n\ndef carre():\n Calcul du carré \n Resultat.set(\"Carré = \"+str(float(Valeur.get())**2))\n\n# Création de la fenêtre principale (main window)\nMafenetre = Tk()\nMafenetre.title(\"Spinbox widget\")\n\nValeur = StringVar()\nValeur.set(2.0)\n# Création d'un widget Spinbox\nboite = Spinbox(Mafenetre,from_=0,to=10,increment=0.5,textvariable=Valeur,width=5,command=carre)\nboite.pack(padx=30,pady=10)\n\n# Création d'un widget Label\nResultat = StringVar()\ncarre()\nLabel(Mafenetre,textvariable=Resultat).pack(padx=30,pady=10)\n\nMafenetre.mainloop()\n\"\"\"\n\n\n# script cercle.py\n#(C) Fabrice Sincère\nfrom tkinter import *\nimport random\n\ndef Cercle():\n 'Dessine un cercle de centre (x,y) et de rayon r '\n x = random.randint(0,Largeur)\n y = random.randint(0,Hauteur)\n r = 20\n Canevas.create_oval(x-r, y-r, x+r, y+r, outline='blue', fill='blue')\n\ndef Effacer():\n ' Efface la zone graphique '\n Canevas.delete(ALL)\n\n# Création de la fenêtre principale (main window)\nMafenetre = Tk()\nMafenetre.title('Cercle')\n\n# Création d'un widget Canvas (zone graphique)\nLargeur = 480\nHauteur = 320\nCanevas = Canvas(Mafenetre, width = Largeur, height =Hauteur, bg ='white')\nCanevas.pack(padx =5, pady =5)\n\n# Création d'un widget Button (bouton Go)\nBoutonGo = Button(Mafenetre, text ='Go', command = Cercle)\nBoutonGo.pack(side = LEFT, padx = 10, pady = 10)\n\n# Création d'un widget Button (bouton Effacer)\nBoutonEffacer = Button(Mafenetre, text ='Effacer', command = Effacer)\nBoutonEffacer.pack(side = LEFT, padx = 5, pady = 5)\n\n# Création d'un widget Button (bouton Quitter)\nBoutonQuitter = Button(Mafenetre, text ='Quitter', command = Mafenetre.destroy)\nBoutonQuitter.pack(side = LEFT, padx = 5, pady = 5)\n\nMafenetre.mainloop()\n\n\n\"\"\"\n# script lecture_gif.py\n#(C) Fabrice Sincère\nfrom tkinter import *\nimport tkinter.messagebox\nimport tkinter.filedialog\n\ndef Ouvrir():\n Canevas.delete(ALL) # on efface la zone graphique\n\n filename = tkinter.filedialog.askopenfilename(title=\"Ouvrir une image\",filetypes=[('gif files','.gif'),('all files','.*')])\n print(filename)\n\n photo = PhotoImage(file=filename)\n gifdict[filename] = photo # référence\n print(gifdict)\n\n Canevas.create_image(0,0,anchor=NW,image=photo)\n Canevas.config(height=photo.height(),width=photo.width())\n\n Mafenetre.title(\"Image \"+str(photo.width())+\" x \"+str(photo.height()))\n\ndef Fermer():\n Canevas.delete(ALL)\n Mafenetre.title(\"Image\")\n\ndef Apropos():\n tkinter.messagebox.showinfo(\"A propos\",\"Tutorial Python Tkinter\\n(C) Fabrice Sincère\")\n\n# Main window\nMafenetre = Tk()\nMafenetre.title(\"Image\")\n\n# Création d'un widget Menu\nmenubar = Menu(Mafenetre)\n\nmenufichier = Menu(menubar,tearoff=0)\nmenufichier.add_command(label=\"Ouvrir une image\",command=Ouvrir)\nmenufichier.add_command(label=\"Fermer l'image\",command=Fermer)\nmenufichier.add_command(label=\"Quitter\",command=Mafenetre.destroy)\nmenubar.add_cascade(label=\"Fichier\", menu=menufichier)\n\nmenuaide = Menu(menubar,tearoff=0)\nmenuaide.add_command(label=\"A propos\",command=Apropos)\nmenubar.add_cascade(label=\"Aide\", menu=menuaide)\n\n# Affichage du menu\nMafenetre.config(menu=menubar)\n\n# Création d'un widget Canvas\nCanevas = Canvas(Mafenetre)\nCanevas.pack(padx=5,pady=5)\n\n# Utilisation d'un dictionnaire pour conserver une référence\ngifdict={}\n\nMafenetre.mainloop()\n\"\"\"\n\n\"\"\"\n# script animation_balle.py\n#(C) Fabrice Sincère\n\nfrom tkinter import *\nimport math,random\n\nLARGEUR = 480\nHAUTEUR = 320\nRAYON = 15 # rayon de la balle\n\n# position initiale au milieu\nX = LARGEUR/2\nY = HAUTEUR/2\n\n# direction initiale aléatoire\nvitesse = random.uniform(1.8,2)*5\nangle = random.uniform(0,2*math.pi)\nDX = vitesse*math.cos(angle)\nDY = vitesse*math.sin(angle)\n\ndef deplacement():\n ' Déplacement de la balle'\n global X,Y,DX,DY,RAYON,LARGEUR,HAUTEUR\n \n # rebond à droite\n if X+RAYON+DX > LARGEUR:\n X = 2*(LARGEUR-RAYON)-X\n DX = -DX\n \n # rebond à gauche\n if X-RAYON+DX < 0:\n X = 2*RAYON-X\n DX = -DX\n \n # rebond en bas\n if Y+RAYON+DY > HAUTEUR:\n Y = 2*(HAUTEUR-RAYON)-Y\n DY = -DY\n \n # rebond en haut\n if Y-RAYON+DY < 0:\n Y = 2*RAYON-Y\n DY = -DY\n \n X = X+DX\n Y = Y+DY\n \n # affichage\n Canevas.coords(Balle,X-RAYON,Y-RAYON,X+RAYON,Y+RAYON)\n\n # mise à jour toutes les 50 ms\n Mafenetre.after(50,deplacement)\n\n# Création de la fenêtre principale\nMafenetre = Tk()\nMafenetre.title(\"Animation Balle\")\n\n# Création d'un widget Canvas\nCanevas = Canvas(Mafenetre,height=HAUTEUR,width=LARGEUR,bg='white')\nCanevas.pack(padx=5,pady=5)\n\n# Création d'un objet graphique\nBalle = Canevas.create_oval(X-RAYON,Y-RAYON,X+RAYON,Y+RAYON,width=1,fill='green')\n\ndeplacement()\nMafenetre.mainloop()\n\"\"\"\n\n\"\"\"\n# script drag_and_drop.py\n#(C) Fabrice Sincère\nfrom tkinter import *\n\ndef Clic(event):\n ' Gestion de l'événement Clic gauche '\n global DETECTION_CLIC_SUR_OBJET\n\n # position du pointeur de la souris\n X = event.x\n Y = event.y\n print(\"Position du clic -> \",X,Y)\n\n # coordonnées de l'objet\n [xmin,ymin,xmax,ymax] = Canevas.coords(Carre)\n\n print(\"Position objet -> \",xmin,ymin,xmax,ymax)\n if xmin<=X<=xmax and ymin<=Y<=ymax: DETECTION_CLIC_SUR_OBJET = True\n else: DETECTION_CLIC_SUR_OBJET = False\n print(\"DETECTION CLIC SUR OBJET -> \",DETECTION_CLIC_SUR_OBJET)\n\ndef Drag(event):\n ' Gestion de l'événement bouton gauche enfoncé '\n X = event.x\n Y = event.y\n print(\"Position du pointeur -> \",X,Y)\n\n if DETECTION_CLIC_SUR_OBJET == True:\n # limite de l'objet dans la zone graphique\n if X<0: X=0\n if X>Largeur: X=Largeur\n if Y<0: Y=0\n if Y>Hauteur: Y=Hauteur\n # mise à jour de la position de l'objet (drag)\n Canevas.coords(Carre,X-TailleCarre,Y-TailleCarre,X+TailleCarre,Y+TailleCarre)\n\nDETECTION_CLIC_SUR_OBJET = False\n\n# Création de la fenêtre principale\nMafenetre = Tk()\nMafenetre.title(\"Drag and drop\")\n\n# Création d'un widget Canvas\nLargeur = 480\nHauteur = 160\nTailleCarre = 20\nCanevas = Canvas(Mafenetre,width=Largeur,height=Hauteur,bg ='white')\n# Création d'un objet graphique\nCarre = Canevas.create_rectangle(0,0,TailleCarre*2,TailleCarre*2,fill='maroon')\n\n# La méthode bind() permet de lier un événement avec une fonction\nCanevas.bind('',Clic) # évévement clic gauche (press)\nCanevas.bind('',Drag) # événement bouton gauche enfoncé (hold down)\n\nCanevas.focus_set()\nCanevas.pack(padx=10,pady=10)\n\nMafenetre.mainloop()\n\"\"\"","sub_path":"Test_tkinter.py","file_name":"Test_tkinter.py","file_ext":"py","file_size_in_byte":9900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"484321467","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport tempfile\nimport logging\n\nimport boto3\n\nfrom unittest import TestCase\nfrom moto import mock_s3\n\nfrom oneid import utils\nfrom oneid.file_adapter import s3_handler\n\nlogger = logging.getLogger(__name__)\n\nlogging.getLogger('botocore').propagate = False\nlogging.getLogger('boto3').propagate = False\n\n\n@mock_s3\nclass TestS3Handler(TestCase):\n def setUp(self):\n self.s3 = boto3.resource('s3')\n self.data = utils.to_bytes('Héllo!😀')\n self.bucket_name = 'somebucket'\n self.other_bucket_name = 'otherbucket'\n self.bucket = self.s3.Bucket(self.bucket_name)\n self.assertIsInstance(self.bucket.create(), dict)\n\n with tempfile.NamedTemporaryFile(delete=True) as tf:\n self.key = tf.name[1:]\n self.filename = 's3://{}/{}'.format(self.bucket_name, self.key)\n self.object = self.bucket.Object(self.key)\n self.object.put(Body=self.data)\n\n def tearDown(self):\n try:\n if self.object:\n self.object.delete()\n if self.bucket:\n self.bucket.delete()\n except:\n # logger.debug('error deleting objects/bucket', exc_info=True)\n pass\n\n def test_join_paths(self):\n self.assertEqual(\n s3_handler.join_paths(self.filename, 'foo', 'bar'),\n '/'.join([self.filename, 'foo', 'bar']),\n )\n\n def test_file_exists(self):\n self.assertTrue(s3_handler.file_exists(self.filename))\n self.object.delete()\n self.object = None\n logger.debug('removed %s', self.filename)\n self.assertFalse(s3_handler.file_exists(self.filename))\n\n def test_file_directory_exists(self):\n self.assertTrue(\n s3_handler.file_directory_exists(self.filename)\n )\n self.object.delete()\n self.object = None\n self.assertFalse(\n s3_handler.file_exists(self.filename)\n )\n self.assertTrue(\n s3_handler.file_directory_exists(self.filename)\n )\n\n for key in self.bucket.objects.all():\n logger.debug('key=%s', key)\n\n self.bucket.delete()\n self.bucket = None\n self.assertFalse(\n s3_handler.file_directory_exists(self.filename)\n )\n\n def test_prepare_directory(self):\n objectname = 's3://{}/{}'.format(self.other_bucket_name, 'something')\n self.assertFalse(s3_handler._bucket_exists(self.other_bucket_name))\n s3_handler.prepare_directory(objectname)\n self.assertTrue(s3_handler._bucket_exists(self.other_bucket_name))\n s3_handler.prepare_directory(objectname)\n self.assertTrue(s3_handler._bucket_exists(self.other_bucket_name))\n s3_handler._s3().Bucket(self.other_bucket_name).delete()\n\n def test_prepare_file_directory(self):\n objectname = 's3://{}/{}'.format(self.other_bucket_name, 'something')\n self.assertFalse(s3_handler._bucket_exists(self.other_bucket_name))\n s3_handler.prepare_file_directory(objectname)\n self.assertTrue(s3_handler._bucket_exists(self.other_bucket_name))\n s3_handler.prepare_file_directory(objectname)\n self.assertTrue(s3_handler._bucket_exists(self.other_bucket_name))\n s3_handler._s3().Bucket(self.other_bucket_name).delete()\n\n def test_read_file(self):\n with s3_handler.read_file(self.filename, True) as data:\n self.assertEqual(data, self.data)\n\n with s3_handler.read_file(self.filename, False) as data:\n self.assertEqual(utils.to_bytes(data), self.data)\n\n def _check_write_file(self, binary):\n filename = None\n\n with tempfile.NamedTemporaryFile(delete=True) as tf:\n key = tf.name[1:]\n filename = 's3://{}/{}'.format(self.bucket_name, key)\n self.assertIsNotNone(filename)\n s3_handler.write_file(filename, self.data, binary)\n\n obj = self.bucket.Object(key)\n data = obj.get()['Body'].read()\n\n if binary:\n self.assertEqual(data, self.data)\n else:\n self.assertEqual(utils.to_string(data), utils.to_string(self.data))\n\n obj.delete()\n\n def test_write_file(self):\n self._check_write_file(True)\n self._check_write_file(False)\n\n def test_invalid_filenames(self):\n bad_filenames = [\n 's3:///key/here',\n 's3:/bucket/isnt/right',\n 's3:not.right.at.all',\n 'not even a url',\n ]\n\n for filename in bad_filenames:\n logger.debug('filename=%s', filename)\n self.assertRaises(\n ValueError,\n s3_handler.file_exists, filename,\n )\n","sub_path":"tests/file_adapter/test_s3_handler.py","file_name":"test_s3_handler.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230098532","text":"\"\"\"InterviewBit.\n\nProgramming > Binary Search > Matrix search.\n\"\"\"\n\n\nclass Solution:\n \"\"\"Solution.\"\"\"\n\n # @param A : list of list of integers\n # @param B : integer\n # @return an integer\n def searchMatrix(self, A, B):\n \"\"\"Solution.\"\"\"\n rows = len(A)\n cols = len(A[0])\n row, start, end = None, 0, rows - 1\n\n # Find row\n while start <= end:\n mid = (start + end) // 2\n if A[mid][0] == B:\n return 1\n\n if A[mid][0] < B and A[mid][-1] >= B:\n row = mid\n break\n if A[mid][0] > B:\n end = mid - 1\n else:\n start = mid + 1\n\n if row == None:\n return 0\n\n # Search in row\n start, end = 0, cols - 1\n while start <= end:\n mid = (start + end) // 2\n if A[row][mid] == B:\n return 1\n elif A[row][mid] > B:\n end = mid - 1\n else:\n start = mid + 1\n\n return 0\n\n\nA = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\nB = 50\nprint(Solution().searchMatrix(A, B))\n","sub_path":"interviewbit/Programming/Binary Search/Matrix search/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"117007949","text":"def partition(array,start,end):\n pivot = array[end]\n pIndex = start\n for i in range(start,end):\n if(array[i]=end):\n return\n pIndex = partition(array,start,end)\n quickSort(array,start,pIndex-1)\n quickSort(array,pIndex+1,end)\n\n\n\nif __name__ == '__main__':\n array = list(map(int,input().split()))\n quickSort(array,0,len(array)-1)\n print(array)\n","sub_path":"sorting/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"227291977","text":"# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Meta-strategy solvers for PSRO.\"\"\"\n\nimport numpy as np\n\nfrom open_spiel.python.algorithms import lp_solver\nfrom open_spiel.python.algorithms import projected_replicator_dynamics\nfrom open_spiel.python.algorithms.nash_solver import general_nash_solver as gs\nimport pyspiel\n\n\nEPSILON_MIN_POSITIVE_PROBA = 1e-8\n\n\ndef uniform_strategy(solver, return_joint=False, checkpoint_dir=None):\n \"\"\"Returns a Random Uniform distribution on policies.\n\n Args:\n solver: GenPSROSolver instance.\n return_joint: If true, only returns marginals. Otherwise marginals as well\n as joint probabilities.\n\n Returns:\n uniform distribution on strategies.\n \"\"\"\n policies = solver.get_policies()\n policy_lengths = [len(pol) for pol in policies]\n result = [np.ones(pol_len) / pol_len for pol_len in policy_lengths]\n if not return_joint:\n return result\n else:\n joint_strategies = get_joint_strategy_from_marginals(result)\n return result, joint_strategies\n\n\ndef softmax_on_range(number_policies):\n x = np.array(list(range(number_policies)))\n x = np.exp(x-x.max())\n x /= np.sum(x)\n return x\n\n\ndef uniform_biased_strategy(solver, return_joint=False, checkpoint_dir=None):\n \"\"\"Returns a Biased Random Uniform distribution on policies.\n\n The uniform distribution is biased to prioritize playing against more recent\n policies (Policies that were appended to the policy list later in training)\n instead of older ones.\n\n Args:\n solver: GenPSROSolver instance.\n return_joint: If true, only returns marginals. Otherwise marginals as well\n as joint probabilities.\n\n Returns:\n uniform distribution on strategies.\n \"\"\"\n policies = solver.get_policies()\n if not isinstance(policies[0], list):\n policies = [policies]\n policy_lengths = [len(pol) for pol in policies]\n result = [softmax_on_range(pol_len) for pol_len in policy_lengths]\n if not return_joint:\n return result\n else:\n joint_strategies = get_joint_strategy_from_marginals(result)\n return result, joint_strategies\n\n\ndef renormalize(probabilities):\n \"\"\"Replaces all negative entries with zeroes and normalizes the result.\n\n Args:\n probabilities: probability vector to renormalize. Has to be one-dimensional.\n\n Returns:\n Renormalized probabilities.\n \"\"\"\n probabilities[probabilities < 0] = 0\n probabilities = probabilities / np.sum(probabilities)\n return probabilities\n\n\ndef get_joint_strategy_from_marginals(probabilities):\n \"\"\"Returns a joint strategy matrix from a list of marginals.\n Only works in when the different marginals have same lengths.\n Args:\n probabilities: list of probabilities.\n\n Returns:\n A joint strategy from a list of marginals.\n \"\"\"\n probas = []\n for i in range(len(probabilities)):\n probas_shapes = [1] * len(probabilities)\n probas_shapes[i] = -1\n probas.append(probabilities[i].reshape(*probas_shapes))\n result = np.product(probas)\n if type(result) is np.ndarray:\n return result.reshape(-1)\n else:\n return np.array(result)\n\ndef general_get_joint_strategy_from_marginals(probabilities):\n \"\"\"Returns a joint strategy matrix from a list of marginals.\n Does not require marginals to have the same lengths.\n Args:\n probabilities: list of probabilities.\n \n Returns:\n A joint strategy from a list of marginals\n \"\"\"\n joint = np.outer(probabilities[0],probabilities[1])\n for i in range(len(probabilities)-2):\n joint = joint.reshape(tuple(list(joint.shape)+[1]))*probabilities[i+2]\n return joint\n\ndef nash_strategy(solver, return_joint=False, checkpoint_dir=None):\n \"\"\"Returns nash distribution on meta game matrix.\n\n This method only works for two player zero-sum games.\n\n Args:\n solver: GenPSROSolver instance.\n return_joint: If true, only returns marginals. Otherwise marginals as well\n as joint probabilities.\n\n Returns:\n Nash distribution on strategies.\n \"\"\"\n meta_games = solver.get_meta_game()\n if not isinstance(meta_games, list):\n meta_games = [meta_games, -meta_games]\n meta_games = [x.tolist() for x in meta_games]\n if len(meta_games) != 2:\n raise NotImplementedError(\n \"nash_strategy solver works only for 2p zero-sum\"\n \"games, but was invoked for a {} player game\".format(len(meta_games)))\n nash_prob_1, nash_prob_2, _, _ = (\n lp_solver.solve_zero_sum_matrix_game(\n pyspiel.create_matrix_game(*meta_games)))\n result = [\n renormalize(np.array(nash_prob_1).reshape(-1)),\n renormalize(np.array(nash_prob_2).reshape(-1))\n ]\n\n if not return_joint:\n return result\n else:\n joint_strategies = get_joint_strategy_from_marginals(result)\n return result, joint_strategies\n\n\ndef general_nash_strategy(solver, return_joint=False, NE_solver=\"gambit\", mode='one', game=None, checkpoint_dir=None):\n \"\"\"Returns nash distribution on meta game matrix.\n\n This method works for general-sum multi-player games.\n\n Args:\n solver: GenPSROSolver instance.\n return_joint: If true, only returns marginals. Otherwise marginals as well\n as joint probabilities.\n NE_solver: Tool for finding a NE.\n mode: Return one or all or pure NE.\n game: overrides solver.get_meta_games() if provided\n Returns:\n Nash distribution on strategies.\n \"\"\"\n meta_games = solver.get_meta_game() if game is None else game\n if not isinstance(meta_games, list):\n meta_games = [meta_games, -meta_games]\n equilibria = gs.nash_solver(meta_games, solver=NE_solver, mode=mode, checkpoint_dir=checkpoint_dir)\n\n if not return_joint:\n return equilibria\n else:\n if mode == 'all' and type(equilibria[0])==list:\n # If multiple NE exist, return a list with joint strategies.\n joint_strategies_list = [get_joint_strategy_from_marginals([ne]) for ne in equilibria]\n return equilibria, joint_strategies_list\n else:\n joint_strategies = get_joint_strategy_from_marginals(equilibria)\n return equilibria, joint_strategies\n\n\ndef prd_strategy(solver, return_joint=False, checkpoint_dir=None):\n \"\"\"Computes Projected Replicator Dynamics strategies.\n Args:\n solver: GenPSROSolver instance.\n return_joint: If true, only returns marginals. Otherwise marginals as well\n as joint probabilities.\n\n Returns:\n PRD-computed strategies.\n \"\"\"\n meta_games = solver.get_meta_game()\n if not isinstance(meta_games, list):\n meta_games = [meta_games, -meta_games]\n kwargs = solver.get_kwargs()\n result = projected_replicator_dynamics.projected_replicator_dynamics(\n meta_games, **kwargs)\n if not return_joint:\n return result\n else:\n joint_strategies = get_joint_strategy_from_marginals(result)\n return result, joint_strategies\n\n\ndef self_play_strategy(solver, return_joint=False, checkpoint_dir=None):\n \"\"\"\n Return a strategy with only the newest strategy in the support (played with probability 1).\n :param solver: GenPSROSolver instance.\n :param return_joint: If true, only returns marginals. Otherwise marginals as well\n as joint probabilities.\n :return:\n \"\"\"\n policies = solver.get_policies()\n policy_lengths = [len(pol) for pol in policies]\n result = []\n for pol_len in policy_lengths:\n strategy = np.zeros(pol_len)\n strategy[-1] = 1\n result.append(strategy)\n if not return_joint:\n return result\n else:\n joint_strategies = get_joint_strategy_from_marginals(result)\n return result, joint_strategies\n\ndef prioritized_fictitious_play(solver, return_joint=False):\n \"\"\"\n Implementation of prioritized ficitious self-play.\n :param solver: GenPSROSolver instance.\n :param return_joint: If true, only returns marginals. Otherwise marginals as well\n as joint probabilities.\n :return:\n \"\"\"\n raise NotImplementedError\n\ndef weighted_NE_strategy(solver, return_joint=False, checkpoint_dir=None, gamma=0.4):\n meta_games = solver.get_meta_game()\n num_players = len(meta_games)\n NE_list = solver._NE_list\n if len(NE_list) == 0:\n return [np.array([1])] * num_players, None\n\n num_used_policies = len(NE_list[-1][0])\n\n if not isinstance(meta_games, list):\n meta_games = [meta_games, -meta_games]\n\n num_strategies = len(meta_games[0])\n equilibria = gs.nash_solver(meta_games, solver=\"gambit\", mode=\"one\", checkpoint_dir=checkpoint_dir)\n\n result = [np.zeros(num_strategies)] * num_players\n for player in range(num_players):\n for i, NE in enumerate(NE_list):\n result[player][:len(NE[player])] += NE[player] * gamma ** (num_used_policies - i)\n result[player] += equilibria[player]\n result[player] /= np.sum(result[player])\n\n return result, None\n\n\nMETA_STRATEGY_METHODS = {\n \"uniform_biased\": uniform_biased_strategy,\n \"uniform\": uniform_strategy,\n \"nash\": nash_strategy,\n \"prd\": prd_strategy,\n \"general_nash\": general_nash_strategy,\n \"sp\": self_play_strategy,\n \"weighted_ne\": weighted_NE_strategy\n}\n\n\n# Meta-Strategy Methods for Strategy Exploration\nMETA_STRATEGY_METHODS_SE = {\n \"uniform\": uniform_strategy,\n \"prd\": prd_strategy,\n \"general_nash\": general_nash_strategy,\n \"sp\": self_play_strategy,\n \"weighted_ne\": weighted_NE_strategy\n}\n","sub_path":"open_spiel/python/algorithms/psro_v2/meta_strategies.py","file_name":"meta_strategies.py","file_ext":"py","file_size_in_byte":9649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"587149939","text":"import os\nimport math\n\ndef genLine(x1, y1, x2, y2):\n try:\n slope = (y2-y1)/(x2-x1)\n y_intercept = y2-x2*slope\n return [slope, y_intercept]\n except ZeroDivisionError:\n return x1\n\ndirectory = \"C:\\\\Users\\\\Pokemon\\\\Desktop\\\\Python 3.3.4\\\\USACO\\\\Gold Nov 2013 - Line of Sight\\\\\"\nfile = open(directory+\"sight.in\")\ndata = file.read()\nfile.close()\n\ndataLines = (data.split(\"\\n\"))[:-1]\nfor i in range(len(dataLines)):\n dataLines[i] = dataLines[i].split(\" \")\n for j in range(len(dataLines[i])):\n dataLines[i][j] = int(dataLines[i][j])\n \nnumCows = int(dataLines[0][0])\nsiloRadius = dataLines[0][1]\ndataLines = dataLines[1:]\n\nnumMVPs = 0\ntempVals = [siloRadius*siloRadius, None]\nfor i in range(numCows):\n e = dataLines[i]\n for j in range(i+1, numCows):\n e2 = dataLines[j]\n line = genLine(e[0], e[1], e2[0], e2[1])\n if type(line) == int:\n try:\n yIntersects = [-math.sqrt(tempVals[0]-line*line)]\n yIntersects.append(-yIntersects[0])\n if (e[1] < yIntersects[0] and e2[1] < yIntersects[0]) or (e[1] > yIntersects[1] and e2[1] > yIntersects[1]):\n numMVPs += 1\n except ValueError:\n numMVPs += 1\n else:\n tempVals[1] = line[0]*line[0]\n try:\n xIntersects = [(-line[0]*line[1]-math.sqrt(tempVals[1]*tempVals[0]-line[1]*line[1]+tempVals[0]))/(tempVals[1]+1),\n (-line[0]*line[1]+math.sqrt(tempVals[1]*tempVals[0]-line[1]*line[1]+tempVals[0]))/(tempVals[1]+1)]\n if (e[0] < xIntersects[0] and e2[0] < xIntersects[0]) or (e[0] > xIntersects[1] and e2[0] > xIntersects[1]):\n numMVPs += 1\n except ValueError:\n numMVPs += 1\n\nfile = open(directory+\"sight.out\", \"w\")\nfile.write(str(numMVPs))\nfile.close()\n","sub_path":"Python/USACO/GOLD Nov 2013 - Line of Sight/sight.py","file_name":"sight.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79094891","text":"import cv2\n\n\ndef outlineRect(image, rect, color):\n\n if rect is None:\n return\n x, y, w, h = rect\n cv2.rectangle(image, (int(x), int(y)), (int(x+w), int(y+h)), color)\n\n\ndef copyRect(src,\n dst,\n srcRect,\n dstRect,\n interpolation=cv2.INTER_LINEAR,\n mask=None):\n\n x0, y0, w0, h0 = srcRect\n x1, y1, w1, h1 = dstRect\n if mask is None:\n dst[y1:y1+h1, x1:x1+w1] = cv2.resize(src[y0:y0+h0, x0:x0+w0],\n (w1, h1),\n interpolation=interpolation)\n else:\n if not utils.isGray(src):\n mask = mask.repeat(3).reshape(h0, w0, 3)\n dst[y1:y1+h1, x1:x1+w1] = numpy.where(\n cv2.resize(mask, (w1, h1), interpolation=cv2.INTER_LINEAR),\n cv2.resize(src[y0:y0+h0, x0:x0+w0],\n (w1, h1),\n interpolation=interpolation),\n dst[y1:y1+h1, x1:x1+w1])\n\n\ndef swapRects(src, dst, rects, interpolation=cv2.INTER_LINEAR):\n\n if dst is not src:\n dst[:] = src\n numRects = len(rects)\n\n if numRects < 2:\n return\n if masks is None:\n masks = [None]*numRects\n \n x, y, w, h = rects[numRects - 1]\n temp = src[y:y+h, x:x+w].copy()\n i = numRects - 2\n while i >= 0:\n copyRect(src, dst, rects[i], rects[i+1], interpolation)\n i -= 1\n\n copyRect(temp,\n dst,\n (0, 0, w, h),\n rects[0],\n interpolation)\n","sub_path":"opencv/rects.py","file_name":"rects.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519177899","text":"def dp_prime(n):\n bitmap=[True for i in range(n+1)]\n bitmap[0]=False\n bitmap[1]=False\n for i in range(2,n):\n if bitmap[i]==True:\n for j in range(2*i,n+1,i):\n bitmap[j]=False\n return bitmap\nprint(dp_prime(15))\nprint(len(dp_prime(15)))\n","sub_path":"basic dp and memoization/lec13/isprime.py","file_name":"isprime.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"608342027","text":"#-- coding: utf-8 --\nfrom sensetimebi_productstests.BI_Hisi_SenseEngine.Android_test_case.AndroidShareScripts.pages import SenseEngineCameraDemoDebugApk\nfrom sensetimebi_productstests.Sharedscript.File_operations import File_operations\nfrom sensetimebi_productstests.Sharedscript.SharedAdbOperation import AdbOpt\nimport os, time\nif __name__ == '__main__':\n ip = \"10.9.99.155:8888\"\n bin_name = 'm10_V3.4.4_update_tar.bin'\n test_max = 250\n uvc_flag1 = True\n\n # 先创建测试结果文件夹,里面存放log、截图等跟测试结果有关的文件\n result_files = File_operations() # 实例化文件操作对象,用于测试结果相关文件的操作\n path = result_files.create_floder(\"固件升级测试log\", 'D:\\\\test-project\\\\007-M10_M20\\\\scripts-test-result') # 在当前路径下创建测试结果文件夹,用于存放测试结果相关文件\n print(path)\n upload_fail_paths = result_files.create_floder('固件升级失败截图', 'D:\\\\test-project\\\\007-M10_M20\\\\scripts-test-result') # + \"\\\\\"\n print(upload_fail_paths)\n start_path = result_files.create_floder('升级后打开预览界面截图', 'D:\\\\test-project\\\\007-M10_M20\\\\scripts-test-result') # + \"\\\\\"\n print(start_path)\n\n D = SenseEngineCameraDemoDebugApk(ip, path)\n adb = AdbOpt(ip)\n for i in range(182, test_max+1):\n adb.adb_rm_files('/sdcard/Pictures/*.jpg') # 删除在预览界面主动上报的图,防止占用内存\n uvc_load_starttime = time.time()\n while uvc_flag1:\n adb.adb_exist() # 检测adb是否连接\n uvc_flag_list = ['ttyACM0', 'ttyACM1', 'ttyACM2']\n #先确认UVC节点已经挂载成功,在操作安卓上位机\n checkUvc_info = os.popen('adb -s %s shell ls /dev -all'%ip).read()\n #print(checkUvc_info)\n for a in uvc_flag_list:\n uvc_flag = checkUvc_info.find(a)\n if uvc_flag != -1:\n uvc_load_endtime = time.time()\n print('Uvc 已经挂载成功!!!')\n uvc_flag1 = False\n break\n else:\n pass\n # print('Uvc 正在挂载!!!')\n uvc_load_needtime = uvc_load_endtime - uvc_load_starttime\n print('uvc挂载时���为:%s'%uvc_load_needtime)\n\n D.start_app() # 启动apk\n D.into_preview() # 进入预览界面\n preview_interface_screenshot_name = \"preview_%s.jpg\" % i\n D.screen_img(start_path, preview_interface_screenshot_name)\n upgrade_interface_screenshot_name = \"upgrade_%s.jpg\" % i\n D.upgradetime(i, upload_fail_paths, upgrade_interface_screenshot_name, bin_name)\n\n os.popen('adb -s %s shell rm /dev/*ACM*'%ip) # 释放uvc结点,防止影响的下次测试\n uvc_flag1 = True","sub_path":"BI_Hisi_SenseEngine/Android_test_case/bin_upgrade_test/升级固件.py","file_name":"升级固件.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41445700","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nTensorflow/Keras implementation of Simple Exponential Smoothing.\n\nLayer define to learn parameters: alpha and initial level\nas per component form of simple exponential smoothing\ndefined from Forecasting: Principles and Practice\nby Hyndman and George Athanasopoulos https://otexts.com/fpp2/\n\nReference: https://github.com/mcskinner/ets\n\nAuthor: Krist Papadopoulos\nV0 Date: July 6, 2019\n \n tensorflow==1.10.1\n\"\"\"\n\nimport tensorflow as tf\n\nclass SES(tf.keras.layers.Layer):\n def __init__(self, dtype=tf.float32):\n super(SES, self).__init__()\n \n def build(self, input_shape):\n self.alpha = self.add_weight('alpha', shape=[1,], \n initializer=tf.keras.initializers.random_uniform(0,1), \n constraint=tf.keras.constraints.min_max_norm(0,1))\n \n self.level = self.add_weight('level', shape=[1,], \n initializer=tf.keras.initializers.truncated_normal())\n \n def call(self, input):\n \n def ses(y, alpha, level):\n '''Simple exponential smoothing using component form\n from Forecasting: Principles and Practice - Hyndman and George Athanasopoulos'''\n forecast = level\n updated_level = forecast + alpha * (y - forecast)\n return forecast, updated_level\n \n predictions = []\n for time_step in range(input.shape[0]):\n prediction, self.level = ses(input[time_step], self.alpha, self.level)\n predictions.append(prediction)\n \n return tf.concat(predictions, axis=-1)","sub_path":"ses.py","file_name":"ses.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"319727154","text":"\"\"\"Vokiz channel processing module.\"\"\"\n\nimport collections.abc\nimport inspect\nimport readline\nimport roax.context\nimport roax.schema as s\nimport shlex\nimport vokiz.backends\nimport vokiz.backends.none\nimport vokiz.resource\nimport vokiz.schema as vs\nimport wrapt\n\nfrom dataclasses import dataclass, field\nfrom vokiz.backends import BackendError\n\n\nclass Error(Exception):\n \"\"\"Raised when error should be returned to the sender.\"\"\"\n\n\nclass Unauthorized(Exception):\n \"\"\"Raised if user is not authorized to execute a command.\"\"\"\n\n\nclass Exit(Exception):\n \"\"\"Raised to exit the REPL.\"\"\"\n\n\nclass cmd:\n \"\"\"Decorate method to be exposed as a command.\"\"\"\n\n _str = s.str()\n\n def __init__(self, auth=None, name=None):\n self.auth = auth or (lambda: True)\n self.name = name\n\n def __call__(self, function):\n function._command = self\n\n def wrapper(wrapped, instance, args, kwargs):\n if not self.auth():\n raise Unauthorized\n args = list(args) # mutable\n _args = []\n _kwargs = {}\n for name, param in inspect.signature(wrapped).parameters.items():\n if not args:\n break # missing argument(s) will be caught in call to method\n elif param.kind == param.POSITIONAL_OR_KEYWORD:\n arg = args.pop(0)\n try:\n _args.append(\n wrapped.__annotations__.get(name, cmd._str).str_decode(arg)\n )\n except s.SchemaError:\n raise Error(f\"Invalid {name}: {arg}.\")\n elif param.kind == param.VAR_KEYWORD:\n for arg in args:\n try:\n key, value = arg.split(\"=\", 1)\n except ValueError:\n raise Error(f\"Invalid key-value: {arg}\")\n _kwargs[key] = value\n args.clear()\n else:\n raise TypeError(\"unsupported command parameter type\")\n if args:\n raise TypeError(f\"{wrapped.__name__}: too many arguments\")\n return wrapped(*_args, **_kwargs)\n\n return wrapt.decorator(wrapper)(function)\n\n\ndef ctx(type):\n \"\"\"Return a context object of the specified type.\"\"\"\n c = roax.context.last(context=type)\n if c:\n return c.get(type)\n\n\ndef _str_list(l):\n \"\"\"Return a string representing list of strings.\"\"\"\n return \" \".join(sorted(l, key=str.lower)) if l else \"[none]\"\n\n\ndef _str_dict(d):\n \"\"\"Return a string representing dict of strings to strings.\"\"\"\n return \" \".join([f\"{k}={v}\" for k, v in d.items()]) if d else \"[none]\"\n\n\ndef _str_dataclass(o):\n \"\"\"Return a string representing attributes in a dataclass.\"\"\"\n return _str_dict(_dict_dataclass(o))\n\n\ndef _dict_dataclass(o):\n \"\"\"Return a dict representing attributes in a dataclass.\"\"\"\n return {attr: getattr(o, attr) for attr in o.__annotations__}\n\n\nclass auth:\n \"\"\"Command authorization functions.\"\"\"\n\n @staticmethod\n def shell():\n \"\"\"Authorize command if requested through the shell.\"\"\"\n return roax.context.last(context=\"shell\") is not None\n\n @staticmethod\n def op():\n \"\"\"Return if requesting user is channel operator.\"\"\"\n user = ctx(\"user\")\n return user.op if user else False\n\n @staticmethod\n def phone():\n \"\"\"Authorize command if request by phone.\"\"\"\n return ctx(\"phone\") is not None\n\n\nclass DataclassMapping(collections.abc.Mapping):\n \"\"\"TODO: Description.\"\"\"\n\n def __init__(self, sequence, key, insensitive=False):\n self.sequence = sequence\n self.key = key\n self.insensitive = insensitive\n\n def __getitem__(self, key):\n if self.insensitive:\n key = key.lower()\n for item in self.sequence:\n item_key = getattr(item, self.key)\n if self.insensitive:\n item_key = item_key.lower()\n if item_key == key:\n return item\n raise KeyError(key)\n\n def __iter__(self):\n for key in [getattr(item, self.key) for item in self.sequence]:\n yield key\n\n def __len__(self):\n return len(self.sequence)\n\n def __delitem__(self, key):\n self.sequence.remove(self[key])\n\n def add(self, item):\n \"\"\"TODO: Description.\"\"\"\n item_key = getattr(item, self.key)\n if item_key in self:\n raise ValueError(f\"duplicate key: {item_key}\")\n self.sequence.append(item)\n\n\nclass Processor:\n \"\"\"TODO: Description.\"\"\"\n\n def __init__(self, channel):\n self.channel = channel\n self.commands = self._commands()\n self.users = DataclassMapping(self.channel.users, \"nick\", insensitive=True)\n self.phones = DataclassMapping(self.channel.phones, \"number\")\n try:\n self.backend = vokiz.backends.load(channel.backend)\n except BackendError as be:\n print(f\"Backend error: {be}.\")\n self.backend = vokiz.backends.none.SMS() # use dummy backend\n\n def _commands(self):\n \"\"\"Return name-to-method mapping of commands.\"\"\"\n inspect.getmembers(self)\n result = {}\n for member in inspect.getmembers(self, inspect.ismethod):\n name, method = member[0], member[1]\n cmd = getattr(method.__func__, \"_command\", None)\n if cmd:\n name = cmd.name or name\n result[name] = method\n return result\n\n def eval(self, line):\n if not line:\n return\n try:\n if line.startswith(\"/\"):\n line = line[1:]\n args = shlex.split(line)\n if not args:\n raise Error(f\"Missing command.\")\n try:\n command = args.pop(0)\n method = self.commands.get(command)\n if not method:\n raise Unauthorized\n return method(*args)\n except Unauthorized:\n return f\"Unknown commnd: {command}.\"\n except TypeError:\n return self.usage(method)\n if not line.startswith(\"@\"):\n if auth.shell():\n raise Error(\n f\"Cowardly refusing to send message without explicit @nick.\"\n )\n line = f\"@{self.channel.rcpt} {line}\"\n nick, message = f\"{line} \".split(\" \", 1)\n self.send(nick[1:], message)\n except Error as e:\n return f\"Error: {e}\"\n\n def send(self, nick, message):\n try:\n nick = self.users[nick].nick\n except KeyError:\n for alias in _dict_dataclass(self.channel.aliases).values():\n if alias.lower() == nick.lower():\n nick = alias\n break\n message = message.strip()\n if not message:\n raise Error(f\"Refusing to send empty message to {nick}.\")\n header = self.channel.head.format_map({\"from\": ctx(\"user\").nick, \"to\": nick})\n phones = self._resolve(nick)\n if not phones:\n raise Error(f\"No such nick: {nick}.\")\n for phone in phones:\n self._send(phone, f\"{header}{message}\")\n\n def _resolve(self, nick):\n \"\"\"Return list of phones associated with a nick, including aliases.\"\"\"\n return [\n phone\n for phone in self.phones.values()\n if nick == self.channel.aliases.all\n or phone.nick == nick\n or (nick == self.channel.aliases.ops and self.users[phone.nick].op)\n ]\n\n def _send(self, phone, message):\n \"\"\"Send a message to a phone.\"\"\"\n if phone.mute:\n return\n print(f\"[S] {phone.number}: {message}\")\n try:\n self.backend.send(phone.number, message)\n except BackendError as error:\n print(f\"[E] Error sending to {phone.number}: {error}.\") # FIXME: log\n\n def shell(self, nick):\n prompt = f\"{nick}@{self.channel.id}: \"\n user = vokiz.resource.User(nick, True, True)\n with roax.context.push(context=\"shell\"):\n with roax.context.push(context=\"user\", user=user):\n while True:\n try:\n result = self.eval(input(prompt))\n if result:\n print(result)\n except (EOFError, KeyboardInterrupt):\n print()\n break\n except Exit:\n break\n\n def usage(self, method):\n \"\"\"Return usage for method.\"\"\"\n sig = inspect.signature(method)\n elements = [f\"/{method.__name__}\"]\n for name, param in sig.parameters.items():\n if param.default != param.empty:\n name = f\"[{name}]\"\n elif param.kind == param.VAR_KEYWORD:\n name = \"[key=value]...\"\n elements.append(name)\n return f\"Usage: {' '.join(elements)}.\"\n\n def notify(self, event):\n message = f\"{ctx('user').nick} {event}.\"\n phones = self._resolve(self.channel.aliases.ops)\n if not phones:\n print(f\"[I] {message}\")\n for phone in phones:\n self._send(phone, message)\n\n def process(self):\n \"\"\"Process incoming messages.\"\"\"\n with roax.context.push(context=\"process\"):\n for number, message in self.backend.receive():\n print(f\"[R] {number}: {message}\")\n phone = self.phones.get(number)\n if not phone: # ignore messages from unregistered numbers\n continue\n try:\n user = self.users[phone.nick]\n except KeyError:\n continue\n with roax.context.push(context=\"phone\", phone=phone):\n with roax.context.push(context=\"user\", user=user):\n response = self.eval(message)\n if response:\n self._send(phone, response)\n\n # ---- user commands -----\n\n @cmd(auth.phone)\n def mute(self):\n \"\"\"Disable receiving messages.\"\"\"\n phone = ctx(\"phone\")\n if phone.mute:\n raise Error(f\"Channel is already muted. Use /unmute to unmute.\")\n phone.mute = True\n self.notify(f\"muted channel on {phone.number}\")\n return f\"Channel muted on {phone.number}. Use /unmute to unmute.\"\n\n @cmd(auth.phone)\n def unmute(self):\n \"\"\"Enable receiving messages.\"\"\"\n phone = ctx(\"phone\")\n if not phone.mute:\n raise Error(f\"Channel is not muted.\")\n phone.mute = False\n self.notify(f\"unmuted channel on {phone.number}\")\n return f\"Channel unmuted on {phone.number}.\"\n\n @cmd()\n def who(self, nick=None):\n \"\"\"List users or get user information.\"\"\"\n if not nick or not auth.op():\n return f\"Users: {_str_list(self.users)}.\"\n try:\n user = self.users[nick]\n except KeyError:\n raise Error(f\"No such user: {nick}.\")\n result = [f\"User: {user.nick}{' [op]' if user.op else ''}\"]\n if auth.op():\n result.append(\n _str_list(\n [p.number for p in self.phones.values() if p.nick == user.nick]\n )\n )\n return \" \".join(result) + \".\"\n\n @cmd()\n def ping(self):\n \"\"\"Ping the service to confirm access.\"\"\"\n phone = ctx(\"phone\")\n source = phone.number if phone else \"shell\"\n return f\"Ping received from {ctx('user').nick} via {source}.\"\n\n @cmd()\n def help(self, command=None):\n \"\"\"List commands or display help for command.\"\"\"\n valid = []\n for name, method in self.commands.items():\n if method.__func__._command.auth():\n valid.append(name)\n if not command:\n return f\"Commands: {_str_list(valid)}.\"\n elif command in valid:\n method = self.commands[command]\n return f\"{self.usage(method)} {method.__doc__}\"\n else:\n return f\"Unknown command: {command}.\"\n\n # ----- operator commands -----\n\n def _aliases(self):\n \"\"\"Represents aliases as a dictionary.\"\"\"\n return {}\n\n @cmd(auth.op)\n def add(self, number: vs.e164(), nick: vs.nick()):\n \"\"\"Add member to channel.\"\"\"\n if number in self.phones:\n raise Error(\n f\"{number} is already registered to {self.phones[number].nick}.\"\n )\n for alias in _dict_dataclass(self.channel.aliases).values():\n if alias.lower() == nick.lower():\n raise Error(f\"Nick unavailable: {nick}.\")\n try:\n user = self.users[nick]\n except KeyError:\n user = vokiz.resource.User(nick)\n self.users.add(user)\n self.phones.add(vokiz.resource.Phone(number, user.nick))\n self.notify(f\"added {number} ({user.nick})\")\n\n @cmd(auth.op)\n def remove(self, number: vs.e164()):\n \"\"\"Remove member from channel.\"\"\"\n try:\n phone = self.phones[number]\n except KeyError:\n raise Error(f\"Number not in channel: {number}.\")\n del self.phones[number]\n user = self.users.get(phone.nick)\n if user and not [p for p in self.phones.values() if p.nick == phone.nick]:\n del self.users[user.nick] # delete orphan user\n nick_msg = f\" ({user.nick})\" if user else \"\"\n self.notify(f\"removed {number}{nick_msg}\")\n\n @cmd(auth.op)\n def op(self, nick: vs.nick() = None):\n \"\"\"List operators or promote user to channel operator.\"\"\"\n if nick is None:\n result = []\n for nick in self.users:\n if self.users[nick].op:\n result.append(nick)\n return f\"Operators: {_str_list(result)}.\"\n try:\n user = self.users[nick]\n except KeyError:\n raise Error(f\"No such user: {nick}.\")\n if user.op:\n raise Error(f\"User {user.nick} is already channel operator.\")\n user.op = True\n self.notify(f\"promoted {user.nick} to channel operator\")\n\n @cmd(auth.op)\n def deop(self, nick: vs.nick()):\n \"\"\"Demote channel operator to user.\"\"\"\n try:\n user = self.users[nick]\n except KeyError:\n raise Error(f\"No such user: {nick}.\")\n if not user.op:\n raise Error(f\"User {user.nick} is not channel operator.\")\n self.notify(f\"demoted {user.nick} to channel user\")\n user.op = False\n\n @cmd(auth.op)\n def alias(self, **kwargs):\n \"\"\"Get or set alias.\"\"\"\n if not kwargs:\n return f\"Aliases: {_str_dataclass(self.channel.aliases)}.\"\n for key, value in kwargs.items():\n if key not in _dict_dataclass(self.channel.aliases).keys():\n raise Error(f\"Unsupported alias: {key}.\")\n if value in self.users:\n raise Error(\n f\"User already has nick assigned: {self.users[value].nick}.\"\n )\n setattr(self.channel.aliases, key, value)\n self.notify(f\"set alias: {_str_dict(kwargs)}\")\n\n @cmd(auth.op)\n def head(self, value=None):\n \"\"\"Get or set message header.\"\"\"\n if not value:\n return f'Header: \"{self.channel.head}\".'\n try:\n value.format_map({\"from\": \"f\", \"to\": \"t\"})\n except KeyError:\n raise Error(\"Only {from} and {to} fields can be expressed in header.\")\n self.channel.head = value\n self.notify(f'set message header to: \"{value}\"')\n\n @cmd(auth.op)\n def rcpt(self, nick: vs.nick() = None):\n \"\"\"Get or set recipient of unaddressed messages.\"\"\"\n if not nick:\n return f\"Default recipient: {self.channel.rcpt}.\"\n\n # ----- REPL commands -----\n\n @cmd(auth.shell)\n def exit(self):\n \"\"\"Exit the channel.\"\"\"\n raise Exit\n\n @cmd(auth.shell)\n def backend(self, module=None, **kwargs):\n \"\"\"Get or set backend config.\"\"\"\n if not module:\n data = self.channel.backend\n kwargs = _str_dict(data.kwargs)\n return f\"Backend: {data.module}{' ' if kwargs else ''}{kwargs}.\"\n else:\n data = vokiz.resource.Backend(module, kwargs)\n try:\n self.backend = vokiz.backends.load(data)\n except BackendError as be:\n raise Error(f\"{be}.\")\n self.channel.backend = data\n return \"Backend successfully set.\"\n","sub_path":"vokiz/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":16769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"571692421","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Registration no. = 20MAI0020\n\n\n# In[114]:\n\n\n#import libraries\nimport pandas as pd\nimport numpy as np\n\n\n# In[115]:\n\n\n#load the data\ndata =pd.read_csv('C:\\\\Users\\\\Hp\\\\Desktop\\\\crx.data', header= None)\n\n\n# In[116]:\n\n\n#A1 A2 A3...A16\nvarnames = ['col'+ str(s) for s in range(1,17)]\n\n\n# In[117]:\n\n\n#add column names\ndata.columns = varnames\n\n\n# In[118]:\n\n\ndata.head()\n\n\n# In[119]:\n\n\ndata.tail()\n\n\n# In[120]:\n\n\n#print last 10 columns\ndata.tail(10)\n\n\n# In[121]:\n\n\n#replace ? with np.nan(not a number)\ndata = data.replace('?',np.nan)\n\n\n# In[122]:\n\n\n#display\ndata.info()\n\n\n# In[123]:\n\n\n#datatypes are as float64, int64, object\n\n\n# In[124]:\n\n\n#recasting col2 and col14 to its correct type\ndata['col2'] = data['col2'].astype('float')\ndata['col14'] = data['col14'].astype('float')\n\n\n# In[125]:\n\n\n#replacing '+' and '-' values in col16 with 'P' and 'N' respectively\ndata['col16'] =data['col16'].map({'+':'P', '-':'N'})\n\n\n# In[126]:\n\n\n#display col16\ndata['col16']\n\n\n# In[127]:\n\n\n#display number of variables of type objects\ncat_columns = [c for c in data.columns if data[c].dtypes == 'O']\ndata[cat_columns].head()\n\n\n# In[128]:\n\n\n#dataset= loan.csv\ndata = pd.read_csv('C:\\\\Users\\\\Hp\\\\Desktop\\\\loan.csv')\n\n\n# In[129]:\n\n\ndata.head()\n\n\n# In[130]:\n\n\n#calculating mean\ndata['disbursed_amount'].mean()\n\n\n# In[131]:\n\n\n#calculating mean\ndata['interest'].mean()\n\n\n# In[132]:\n\n\n#number of discrete variables\nprint(data.market.value_counts())\n\n\n# In[133]:\n\n\n#display unique values\ndata['number_open_accounts'].unique()\n\n\n# In[134]:\n\n\ndata['customer_id'].unique()\n\n\n# In[138]:\n\n\ndata[['date_issued','date_last_payment']].dtypes\n\n\n# In[140]:\n\n\ndata['date_issued_dt'] = pd.to_datetime(data['date_issued'])\ndata.head()\n\n\n# In[153]:\n\n\n#find months\ndata['month'] = data['date_issued_dt'].dt.month\ndata.head()\n\n\n# In[160]:\n\n\n#months with most of loan issued date\ndata.groupby([\"month\"])[\"date_issued\"].count()\n\n\n# In[161]:\n\n\n#above output displays months along with the number of loans issued per month\n\n\n# In[162]:\n\n\n#teachers who are owners\ndata.loc[(data['employment'] == 'Teacher') & (data['householder'] == 'OWNER'),['employment','householder']]\n\n\n# In[163]:\n\n\n#count of teachers who are owners\nx=data.loc[(data['employment'] == 'Teacher') & (data['householder'] == 'OWNER'),['employment','householder']]\nx.value_counts()\n\n\n# In[164]:\n\n\n#count of teachers who are owners is 69.\n\n\n# In[165]:\n\n\n#employment of customers who mostly rent\ndata.loc[data['householder'] == 'RENT',['employment','householder']]\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"ML lab2 (1).py","file_name":"ML lab2 (1).py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636428724","text":"from _overlapped import NULL\nimport hashlib\nimport sqlite3\nimport time\nimport os\nimport json\nfrom c_decoder import c_decoder\n\n\nclass c_qqex():\n # init 设置解密key和导出文件夹\n def __init__(self, db, key, outdir):\n self.key = key # 解密用的密钥\n # 导出路径\n self.outdir = outdir\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n # 连接数据库\n self.connectDB(db)\n # 初始化decoder\n self.d = c_decoder(self.key)\n # 初始化信息\n self.friends = {}\n self.troop = {}\n self.troopmem = {}\n # 初始化聊天记录表\n self.msgf = {}\n self.msgt = {}\n\n # 连接数据库\n def connectDB(self, db):\n self.c = sqlite3.connect(db).cursor()\n\n # 1.1 获取好友信息 self.friends = { uin: [name, remark, age, gender, md5] }\n def getFriends(self):\n # uin QQ号 name 昵称 remark 备注 age 年龄 gender 性别\n execute = \"select uin,name,remark,age,gender from Friends\"\n cursor = self.c.execute(execute)\n for i in cursor:\n # 获取单条数据\n uin, name, remark, age, gender = i[0], i[1], i[2], i[3], i[4]\n\n # 解密\n uin = self.d.decode(uin, 1)\n name = self.d.decode(name, 1)\n remark = self.d.decode(remark, 1)\n\n # 写入FriendsData\n if (uin):\n self.friends[uin] = [name, remark, age, gender]\n else:\n print('unkown uin!', i)\n return self.friends\n\n # 1.2 获取群组信息 self.troop = { tuin: [name, code, owneruin, memo]}\n def getTroop(self):\n # troopuin 群号 troopname 群名 troopcode 群号? trooponweruin 群主 troopmemo 群简介\n execute = \"select troopuin, troopname, troopcode,troopowneruin,troopmemo from TroopInfoV2\"\n cursor = self.c.execute(execute)\n for i in cursor:\n # 获取单条数据\n tuin, name, code, owneruin, memo = i[0], i[1], i[2], i[3], i[4]\n\n # 解密\n tuin = self.d.decode(tuin, 1)\n name = self.d.decode(name, 1)\n code = self.d.decode(code, 1)\n owneruin = self.d.decode(owneruin, 1)\n memo = self.d.decode(memo, 1)\n\n if (tuin):\n self.troop[tuin] = [name, code, owneruin, memo]\n else:\n print('unkown uin!', i)\n return self.troop\n\n # 1.3 获取群成员信息 self.troopmem = { tuin: { quin: [tname(群名片), qname(), jtime] }}\n def getTroopMem(self):\n # troopuin 群号 memberuin qq号 troopnick 群名片 friendnick qq名 join_time 入群时间\n execute = \"select troopuin,memberuin,troopnick,friendnick,join_time from TroopMemberInfo\"\n cursor = self.c.execute(execute)\n for i in cursor:\n # 获取单条数据\n tuin, quin, tname, qname, jtime = i[0], i[1], i[2], i[3], i[4]\n\n # 解密\n tuin = self.d.decode(tuin, 1)\n quin = self.d.decode(quin, 1)\n tname = self.d.decode(tname, 1)\n qname = self.d.decode(qname, 1)\n # jtime = self.d.decode(jtime, 1)\n\n # 写入troopmem\n if (tuin):\n if tuin not in self.troopmem:\n self.troopmem[tuin] = {}\n self.troopmem[tuin][quin] = [tname, qname, jtime]\n else:\n print('unkown tuin!', i)\n return self.troopmem\n\n # 1.4 获取单个好友聊天记录 self.msgf = { fuin: msgs } msgs = [[uin, stime, msg, suin, fuin]]\n def getMsgFriends(self, qq='', md5='', table='', save=False):\n\n msgs = []\n\n # 如果给的是QQ号,计算md5\n if len(qq) > 0:\n md5 = hashlib.md5(qq.encode('utf-8')).hexdigest().upper()\n elif len(md5) > 0:\n md5 = md5.upper()\n if len(table) <= 0:\n table = 'mr_friend_{}_New'.format(md5)\n\n # senderuin 发送者qq time 发送时间 msgData 消息内容 selfuin 自己的qq frienduin 好友的qq\n execute = \"select senderuin, time, msgData, selfuin, frienduin from {}\".format(\n table)\n cursor = self.c.execute(execute)\n\n for i in cursor:\n # 单条数据处理\n uin, stime, msg, suin, fuin = i[0], i[1], i[2], i[3], i[4]\n\n # 解密\n uin = self.d.decode(uin, 1)\n # stime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(stime))\n msg = self.d.decode(msg, 0)\n suin = self.d.decode(suin, 1)\n fuin = self.d.decode(fuin, 1)\n\n # 写入msgs\n if (save):\n if fuin not in self.msgf:\n self.msgf[fuin] = []\n self.msgf[fuin].append([uin, stime, msg, suin, fuin])\n else:\n msgs.append([uin, stime, msg, suin, fuin])\n\n if (save):\n return self.msgf\n else:\n return msgs\n\n # 1.5 获取单个群聊记录 self.msgt = { tuin: msgs } msgs = [[tuin, uin, stime, msg, suin]]\n def getMsgTroop(self, troop='', md5='', table='', save=False):\n msgs = []\n\n # 如果给的是QQ号,计算md5\n if len(troop) > 0:\n md5 = hashlib.md5(troop).hexdigest().upper()\n elif len(md5) > 0:\n md5 = md5.upper()\n if len(table) <= 0:\n table = 'mr_troop_{}_New'.format(md5)\n\n # frienduin 群号 senderuin 发送者qq time 发送时间 msgData 消息内容 selfuin 自己的qq\n execute = \"select frienduin, senderuin, time, msgData, selfuin from {}\".format(\n table)\n cursor = self.c.execute(execute)\n\n for i in cursor:\n # 单条数据处理\n tuin, uin, stime, msg, suin = i[0], i[1], i[2], i[3], i[4]\n\n # 解密\n tuin = self.d.decode(tuin, 1)\n uin = self.d.decode(uin, 1)\n # stime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(stime))\n msg = self.d.decode(msg, 0)\n suin = self.d.decode(suin, 1)\n\n # 写入msgs\n if (save):\n if tuin not in self.msgt:\n self.msgt[tuin] = []\n self.msgt[tuin].append([tuin, uin, stime, msg, suin])\n else:\n msgs.append([tuin, uin, stime, msg, suin])\n\n if (save):\n return self.msgt\n else:\n return msgs\n\n # 1.6 获取所有聊天记录\n def getMsgAll(self):\n # 先获取所有'mr_'开头的表格\n execute = \"SELECT name FROM sqlite_master WHERE type='table'\"\n cursor = self.c.execute(execute)\n tables = []\n for i in cursor:\n table = i[0]\n if (i[0].startswith('mr_')):\n tables.append(table)\n pass\n # 获取所有聊天记录\n for table in tables:\n if (table.startswith('mr_friend')):\n # 处理好友聊天记录\n self.getMsgFriends(table=table, save=True)\n elif (table.startswith('mr_troop')):\n # 处理群聊天记录\n self.getMsgTroop(table=table, save=True)\n else:\n pass\n return (self.msgf, self.msgt)\n\n # 1.7 获取所有信息\n def getInfo(self):\n return (self.getFriends(), self.getTroop(), self.getTroopMem())\n\n # 2.1 获取好友名称\n def getNamef(self, uin):\n if uin in self.friends:\n if (len(self.friends[uin][1]) > 0):\n # 存在备注则返回备注\n return self.friends[uin][1]\n else:\n # 否则返回名字\n return self.friends[uin][0]\n else:\n # 不存在则返回qq\n return uin\n\n # 2.2 获取群成员名称\n def getNamet(self, uin, tuin):\n # 检查群号是否存在\n if tuin in self.troopmem:\n # 检测群成员是否存在\n if uin in self.troopmem[tuin]:\n [tname, qname, jtime] = self.troopmem[tuin][uin]\n if (len(tname) > 0):\n # 存在群名片则返回群名片\n return tname\n else:\n # 否则返回qq名\n return qname\n else:\n return uin\n else:\n # 不存在则返回qq\n return uin\n\n # 2.3 表情处理\n def emReplace(self, msg, mode='txt'):\n # (https://github.com/Yiyiyimu/QQ_History_Backup/blob/0726e00c77d98aabe2d48c0516e6e0620027a19d/QQ_History.py:68)\n emdc = {23: '微笑', 40: '撇嘴', 19: '色', 43: '发呆', 21: '得意', 9: '流泪', 20: '害羞', 106: '闭嘴', 35: '睡', 10: '大哭', 25: '尴尬', 24: '发怒', 1: '调皮', 0: '呲牙', 33: '惊讶', 32: '难过', 12: '酷', 27: '冷汗', 13: '抓狂', 22: '吐', 3: '偷笑', 18: '可爱', 30: '白眼', 31: '傲慢', 81: '饥饿', 82: '困', 26: '惊恐', 2: '流汗', 37: '憨笑', 50: '大兵', 42: '奋斗', 83: '咒骂', 34: '疑问', 11: '嘘', 49: '晕', 84: '折磨', 39: '衰', 78: '骷髅', 5: '敲打', 4: '再见', 6: '擦汗', 85: '抠鼻', 86: '鼓掌', 87: '糗大了', 46: '坏笑', 88: '左哼哼', 44: '右哼哼', 89: '哈欠', 48: '鄙视', 14: '委屈', 90: '快哭了', 41: '阴险', 36: '亲亲', 91: '吓', 51: '可怜', 164: '眨眼睛', 174: '笑哭', 171: 'doge', 165: '泪奔', 166: '无奈', 161: '托腮', 167: '卖萌', 170: '斜眼笑', 169: '喷血', 172: '惊喜', 173: '骚扰', 168: '小纠结', 175: '我最美', 217: '加油必胜', 218: '加油抱抱', 219: '口罩护体', 260: '搬砖中', 261: '忙到飞起', 262: '脑阔疼', 263: '沧桑', 264: '捂脸', 265: '辣眼睛', 266: '哦哟', 267: '头秃', 268: '问号脸', 269: '暗中观察', 270: 'emm', 271: '吃瓜', 272: '呵呵哒', 273: '我酸了',\n 274: '南', 17: '菜刀', 60: '西瓜', 61: '啤酒', 92: '篮球', 93: '乒乓', 163: '茶', 66: '咖啡', 58: '饭', 7: '猪头', 8: '玫瑰', 57: '凋谢', 29: '示爱', 28: '爱心', 74: '心碎', 59: '蛋糕', 80: '闪电', 16: '炸弹', 70: '刀', 77: '足球', 62: '瓢虫', 15: '便便', 68: '月亮', 75: '太阳', 76: '礼物', 45: '拥抱', 52: '强', 53: '弱', 54: '握手', 55: '胜利', 56: '抱拳', 63: '勾引', 73: '拳头', 72: '差劲', 65: '爱你', 94: 'NO', 64: 'OK', 38: '爱情', 47: '飞吻', 95: '跳跳', 71: '发抖', 96: '怄火', 97: '转圈', 98: '磕头', 99: '回头', 100: '跳绳', 79: '挥手', 101: '激动', 102: '街舞', 103: '献吻', 104: '左太极', 105: '右太极', 108: '双喜', 109: '鞭炮', 110: '灯笼', 112: 'k歌', 116: '喝彩', 118: '爆筋', 119: '棒棒糖', 120: '喝奶', 123: '飞机', 130: '钞票', 140: '药', 141: '手枪', 180: '蛋', 184: '红包', 176: '河蟹', 177: '羊驼', 182: '菊花', 179: '幽灵', 185: '大笑', 143: '不开心', 146: '冷漠', 147: '呃', 148: '好棒', 149: '拜托', 150: '点赞', 151: '无聊', 152: '托脸', 153: '吃', 154: '送花', 155: '害怕', 156: '花痴', 157: '小样儿', 159: '飙泪', 160: '我不看'}\n\n # 查找表情前缀\\x14\n pos = msg.find('\\x14')\n while (pos != -1):\n lastpos = pos\n if (pos + 1 < len(msg)):\n num = ord(msg[pos + 1])\n else:\n break\n # 替换表情符号\n if (num in emdc):\n if (mode == 'txt'):\n msg = msg.replace(msg[pos:pos + 2],\n '[{}]'.format(emdc[num]))\n else:\n pass\n else:\n print('未知表情:{}'.format(str(num)))\n pass\n # 继续查找下一个\\x14\n pos = msg.find('\\x14')\n if (pos == lastpos):\n break\n return msg\n\n # 3.1 导出好友信息\n def exFriends(self, mode='txt', name='friends'):\n # 打开文件\n outfile = os.path.join(self.outdir, \"{}.txt\".format(name))\n fc = open(outfile, \"w+\", encoding=\"utf-8\")\n\n # 3.1.1 导出为txt文本\n if (mode == 'txt'):\n for uin in self.friends:\n fc.write('qq:{}\\t昵称:{}\\t备注:{}\\t年龄:{}\\t性别:{}\\n'.format(\n uin, self.friends[uin][0], self.friends[uin][1], self.friends[uin][2], self.friends[uin][3]))\n # 3.1.2 导出为json\n elif (mode == 'json'):\n json.dump(self.friends, fc)\n else:\n pass\n\n fc.close()\n\n # 3.2 导出群组信息\n def exTroop(self, mode='txt', name='troop'):\n # 打开文件\n outfile = os.path.join(self.outdir, \"{}.txt\".format(name))\n fc = open(outfile, \"w+\", encoding=\"utf-8\")\n\n # 3.1.1 导出为txt文本\n if (mode == 'txt'):\n for uin in self.troop:\n # self.troop = { uin: [name, code, owneruin, memo]}\n fc.write('群号:{}\\t群名:{}\\t群号2:{}\\t群主:{}\\t群简介:{}\\n'.format(\n uin, self.troop[uin][0], self.troop[uin][1], self.troop[uin][2], self.troop[uin][3]))\n # 3.1.2 导出为json\n elif (mode == 'json'):\n json.dump(self.troop, fc)\n else:\n pass\n\n fc.close()\n\n # 3.3 导出群成员信息\n def exTroopMem(self, mode='txt', name='troopmem'):\n # 打开文件\n outfile = os.path.join(self.outdir, \"{}.txt\".format(name))\n fc = open(outfile, \"w+\", encoding=\"utf-8\")\n\n # 3.1.1 导出为txt文本\n if (mode == 'txt'):\n for tuin in self.troopmem:\n fc.write('---群号:{}\\n'.format(tuin))\n for quin in self.troopmem[tuin]:\n # self.troopmem = { tuin: { quin: [tname(群名片), qname(), jtime] }}\n fc.write('QQ号:{}\\t群名片:{}\\tQQ名:{}\\t入群时间:{}\\n'.format(\n quin, self.troopmem[tuin][quin][0], self.troopmem[tuin][quin][1], self.troopmem[tuin][quin][2]))\n fc.write('\\n')\n # 3.1.2 导出为json\n elif (mode == 'json'):\n json.dump(self.troopmem, fc)\n else:\n pass\n\n fc.close()\n\n # 3.4 导出单个好友聊天记录\n def exMsgsf(self, msgs, mode='txt', name=''):\n if (name == ''):\n print('未知好友')\n name = str(time.time())\n # 打开文件\n outfile = os.path.join(self.outdir, \"f_{}.txt\".format(name))\n fc = open(outfile, \"w+\", encoding=\"utf-8\")\n\n # 按时间排序\n msgs.sort(key=lambda msg: msg[1])\n # 3.1.1 导出为txt文本\n if (mode == 'txt'):\n for i in msgs:\n # msgs = [[uin, stime, msg, suin, fuin]]\n uin, stime, msg, suin, fuin = i\n # 替换表情\n msg = self.emReplace(msg)\n # 替换时间\n stime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(stime))\n if (uin == suin):\n # 是自己发言,以===开头\n fc.write('==={}({}) {}\\n{}\\n\\n'.format(\n self.getNamef(uin), uin, stime, msg))\n else:\n # 不是自己发言,以---开头\n fc.write(\n '---{}({}) {}\\n{}\\n\\n'.format(self.getNamef(uin), uin, stime, msg))\n # 3.1.2 导出为json\n elif (mode == 'json'):\n json.dump(msgs, fc)\n else:\n pass\n\n fc.close()\n\n # 3.5 导出单个群聊聊天记录\n def exMsgst(self, msgs, mode='txt', name=''):\n if (name == ''):\n # 取得群号\n name = str(msgs[0][0])\n\n # 打开文件\n outfile = os.path.join(self.outdir, \"{}.txt\".format(name))\n fc = open(outfile, \"w+\", encoding=\"utf-8\")\n\n # 按时间排序\n msgs.sort(key=lambda msg: msg[2])\n # 3.1.1 导出为txt文本\n if (mode == 'txt'):\n for i in msgs:\n # msgs = [[uin, stime, msg, suin]]\n tuin, uin, stime, msg, suin = i\n # 替换表情\n msg = self.emReplace(msg)\n # 替换时间\n stime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(stime))\n if (uin == suin):\n # 是自己发言,以===开头\n fc.write('==={}({}) {}\\n{}\\n\\n'.format(\n self.getNamet(uin, tuin), uin, stime, msg))\n else:\n # 不是自己发言,以---开头\n fc.write(\n '---{}({}) {}\\n{}\\n\\n'.format(self.getNamet(uin, tuin), uin, stime, msg))\n # 3.1.2 导出为json\n elif (mode == 'json'):\n json.dump(msgs, fc)\n else:\n pass\n\n fc.close()\n\n # 3.6 导出所有聊天记录\n def exMsgsAll(self):\n # 导出好友\n for i in self.msgf:\n self.exMsgsf(self.msgf[i], name=i)\n # 导出群聊\n for i in self.msgt:\n self.exMsgst(self.msgt[i])\n pass\n","sub_path":"c_qq.py","file_name":"c_qq.py","file_ext":"py","file_size_in_byte":17139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"483221199","text":"\nfrom config import pdf_to_image_config\n\nimport logging\nimport os\n\n# ---- Configuration file parameters ----\ngs_path = pdf_to_image_config.val_gs_path\nlog_file_path = pdf_to_image_config.val_log_file_path\n\n# ---- Logging set up ------------\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nformatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')\n\nfile_handler = logging.FileHandler(log_file_path)\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\n# ---------- User inputs ----\n\n'''pdf_input_path = r\"D:\\workspace\\ghostscript\\data\\resume.pdf\"\noutput_directory_path = r\"D:\\workspace\\ghostscript\\data\"\nformat = 'jpg'\ndpi = 300'''\n\n\n# logger.debug(\"debug message\")\n# logger.info(\"info message\")\n# logger.warning(\"warning message\")\n# logger.error(\"error message\")\n# logger.critical(\"critical message\")\n\ndef pdftoimg(ghost_path=None,\n dpi=100,\n pdf_input_path=None,\n output_directory_path=None,\n image_format='jpg'):\n _, file_name = os.path.split(pdf_input_path)\n file_name_wo_ext = file_name.split('.')[0]\n text_output_path = os.path.join(output_directory_path, file_name_wo_ext)\n\n my_string = f'\"{ghost_path}\" -dNOPAUSE -dBATCH -sDEVICE={image_format} -r{dpi}' \\\n f' -sOutputFile=\"{text_output_path}\"_%0d.\"{image_format}\" \"{pdf_input_path}\"'\n logger.info(my_string)\n\n logger.info(f\"Executing the command\")\n os.system(f'\"{my_string}\"')\n logger.info(f\"Command completed\")\n\ndef switch():\n logger.info(\"Select the respective option for your required format: \")\n logger.info(\"Enter the required format: \\n png or \\n png_gray or \\n jpg \")\n option = input(\"Enter the format: \")\n\n dict1 = {\n 'png': 'png16m',\n 'png_gray': 'pnggray',\n 'jpg': 'jpeg'\n }\n return dict1.get(option)\n\n\nif __name__ == '__main__':\n ghostscript_path = r\"C:\\gs\\gs9.52\\bin\\gswin64c.exe\"\n input_path = input(\"Enter the pdf input path: \") # \"D:\\workspace\\ghostscript\\data\\resume.pdf\"\n output_path = input(\"Enter the pdf output path: \") # \"D:\\workspace\\ghostscript\\data\"\n image_format = switch()\n logger.info(f\"User inputs :: input_path: {input_path}, output_path: {output_path}, image_format: {image_format}\")\n dpi = int(input(\"Enter the required dpi\"))\n pdftoimg(ghost_path=ghostscript_path,\n dpi=dpi,\n pdf_input_path=input_path,\n output_directory_path=output_path,\n image_format=image_format)\n\n\n# ghost_path = r\"C:\\gs\\gs9.52\\bin\\gswin64c.exe\"\n# pdf_input_path = r\"D:\\workspace\\ghostscript\\data\\resume.pdf\"\n# output_directory_path = r\"D:\\workspace\\tesseract\\data\"\n# dpi = int(input(\"Enter the required dpi\"))\n#\n\n\n","sub_path":"pdf_to_image_module.py","file_name":"pdf_to_image_module.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"435929208","text":"# Based on ProjectEuler+ from Hackerrank.\n# Link: https://www.hackerrank.com/contests/projecteuler/challenges/euler005/problem\n\nsieve = [False]*45\nprimes = []\n\nfor num in range(2, 45):\n if sieve[num]: continue\n primes.append(num)\n mult = num * 2\n\n while mult < 45:\n sieve[mult] = True\n mult += num\n\ndef smallest_multiple(n):\n expo = [0 for _ in range(45)]\n\n for prime in primes:\n for num in range(2, n+1):\n expo_count = 0\n while num >= prime and num % prime == 0:\n num //= prime\n expo_count += 1\n expo[prime] = max(expo[prime], expo_count)\n answer = 1\n for prime in primes:\n answer *= prime**expo[prime]\n return answer\n\nt = int(input().strip())\nfor a0 in range(t):\n n = int(input().strip())\n print(smallest_multiple(n))\n\n","sub_path":"euler_project/5_smallest_multiples.py","file_name":"5_smallest_multiples.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355213674","text":"from kubernetes.client.configuration import Configuration\nfrom kubernetes.client.api_client import ApiClient\nfrom kubernetes import client, config, watch\n\nfrom pprint import pprint\nfrom copy import deepcopy\nimport requests\nfrom time import sleep\n\nimport traceback\nfrom retry import retry\n\n_metrics = {}\n\n\ndef metric(cls):\n name = cls.__name__\n name = name[0].lower() + name[1:]\n _metrics[name] = cls\n return cls\n\n\n@metric\nclass HttpErrorRate:\n def __init__(self, pods, **kwargs):\n self.pods = pods\n self.threshold = kwargs.get('threshold', 1)\n self.min_request_count = kwargs.get('minRequestCount', 100)\n\n def check(self):\n # Here be dragons\n json = requests.get(\"http://192.168.99.100:30900/api/v1/query?query=sanic_request_count\").json()\n results = json['data']['result']\n \n oks = 0\n non_oks = 0\n print(self.pods)\n\n for metric in filter(lambda result: result['metric']['pod'] in self.pods, results):\n if metric['metric']['http_status'] == '200':\n oks += int(metric['value'][1])\n else:\n non_oks += int(metric['value'][1])\n\n total_request_count = oks + non_oks\n print(f\"Total request count: {total_request_count}\")\n # If we don't have enough data, make the check fail\n if total_request_count < self.min_request_count:\n return False\n error_rate = (100 * non_oks)/(oks + non_oks)\n print(f\"Error rate: {error_rate}\")\n return error_rate < self.threshold\n\nDEFAULT_METRICS = []\n\nclass CanaryDeployment:\n def __init__(self, api_object, api_client):\n self._api_object = api_object\n if 'status' not in self._api_object:\n self._api_object['status'] = {}\n self._api_client = api_client\n\n def get_deployment_name(self):\n return self.get_deployment_template()['metadata']['name']\n\n def get_deployment_template(self):\n return self._api_object['spec']['deploymentTemplate']\n\n def get_canary_deployment_pods(self):\n api = client.CoreV1Api(self._api_client)\n ret = api.list_pod_for_all_namespaces(watch=False)\n pods = []\n for pod in ret.items:\n if pod.metadata.name.startswith(self.get_deployment_name() + '-canary'):\n pods.append(pod.metadata.name)\n return pods\n \n def create_deployment(self):\n api = client.AppsV1Api(self._api_client)\n deployment_template = deepcopy(self.get_deployment_template())\n deployment_template['metadata']['labels']['tier'] = 'canary'\n deployment_template['metadata']['name'] += '-canary'\n\n try:\n response = api.create_namespaced_deployment(\n namespace=deployment_template['metadata'].get('namespace', 'default'),\n body=deployment_template,\n )\n except client.rest.ApiException as e:\n if e.status == 409:\n # HTTP 409 Conflict, deployment already exists\n pass\n else:\n raise\n self._api_object['status']['deploymentCreated'] = True\n self.update_api()\n\n def check_metrics(self):\n metrics = []\n for metric in self._api_object['spec'].get('metrics', DEFAULT_METRICS):\n kind = metric['kind']\n MetricClass = _metrics[kind]\n m = MetricClass(self.get_canary_deployment_pods(), **metric)\n metrics.append(m)\n result = all(m.check() for m in metrics)\n self._api_object['status']['metricsChecked'] = True\n self.update_api()\n return result\n\n # The Kubernetes API fails intermittently, at least locally\n def update_api(self):\n # Can't PATCH with resourceVersion specified\n if 'resourceVersion' in self._api_object['metadata']:\n del self._api_object['metadata']['resourceVersion']\n crds = client.CustomObjectsApi(self._api_client)\n crds.patch_namespaced_custom_object(\n DOMAIN,\n VERSION,\n self._api_object['metadata']['namespace'],\n RESOURCE_NAME,\n self._api_object['metadata']['name'],\n self._api_object\n )\n\n def replace_deployment(self):\n api = client.AppsV1Api(self._api_client)\n deployment_template = self.get_deployment_template()\n api.patch_namespaced_deployment(\n deployment_template['metadata']['name'],\n deployment_template['metadata'].get('namespace', 'default'),\n deployment_template,\n )\n self._api_object['status']['deploymentReplaced'] = True\n self.update_api()\n\n def delete_canary_deployment(self):\n api = client.AppsV1Api(self._api_client)\n \n try:\n api.delete_namespaced_deployment(\n self.get_deployment_name() + '-canary',\n self.get_deployment_template()['metadata'].get('namespace', 'default'),\n client.V1DeleteOptions(),\n )\n except client.rest.ApiException as e:\n if e.status == 404:\n # Already deleted\n pass\n else:\n raise\n self._api_object['status']['deletedCanaryDeployment'] = True\n self.update_api()\n\n\n def run(self):\n if not self._api_object['status'].get('deploymentCreated', False):\n print(\"Creating deployment\")\n self.create_deployment()\n if not self._api_object['status'].get('metricsChecked', False):\n print(\"Sleeping before checking metrics\")\n sleep(60)\n print(\"Checking metrics\")\n metrics_ok = self.check_metrics()\n self._api_object['status']['metricsOk'] = metrics_ok\n self.update_api()\n if (self._api_object['status'].get('metricsOk', False)\n and not self._api_object['status'].get('deploymentReplaced', False)):\n print(\"Metrics OK, replacing main deployment\")\n self.replace_deployment()\n if not self._api_object['status'].get('deletedCanaryDeployment', False):\n print(\"Deleting canary deployment\")\n self.delete_canary_deployment()\n\nDOMAIN = \"canarying.mozilla.com\"\nVERSION = \"v1alpha1\"\nRESOURCE_NAME = \"canarydeployments\"\n \n\ndef main():\n config.load_kube_config()\n api_client = client.api_client.ApiClient()\n crds = client.CustomObjectsApi(api_client)\n stream = watch.Watch().stream(crds.list_cluster_custom_object, DOMAIN, VERSION, RESOURCE_NAME)\n for event in stream:\n obj, event_type = event['object'], event['type']\n if event_type != 'ADDED':\n continue\n cd = CanaryDeployment(obj, api_client)\n try:\n cd.run()\n except Exception as e:\n traceback.print_exc()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lib/canary/canary.py","file_name":"canary.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"527070554","text":"import pygame\nimport random \nimport time\n\nfrom pygame.constants import K_DOWN, K_LEFT, K_RIGHT, K_UP\n\npygame.init()\n\nWIDTH = 600\nHEIGHT = 500\n\ngo = True\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Snake Game')\nfont = pygame.font.SysFont('Times New Roman', 25)\n\nFPS = 30\nclock = pygame.time.Clock()\n\nclass Snake:\n def __init__(self):\n self.size = 3\n self.radius=12\n self.dx = 5\n self.dy = 0\n self.elements = [[100, 100], [120, 100], [140, 100]]\n self.score = 0\n self.is_add = False\n\n def draw(self):\n for element in self.elements:\n pygame.draw.circle(screen, (199, 108, 243), element, self.radius)\n\n def add_snake(self):\n self.size += 1\n self.score += 1\n self.elements.append([0, 0])\n self.is_add = False\n\n def move(self):\n if self.is_add:\n self.add_snake()\n for i in range(self.size - 1, 0, -1):\n self.elements[i][0] = self.elements[i - 1][0]\n self.elements[i][1] = self.elements[i - 1][1]\n \n self.elements[0][0] += self.dx\n self.elements[0][1] += self.dy\n\nclass Food:\n\n def __init__(self):\n self.x = random.randint(100, WIDTH - 70)\n self.y = random.randint(100, HEIGHT - 70)\n self.image = pygame.image.load(\"apples.png\")\n self.position = [random.randint(0, WIDTH - 100), random.randint(0, HEIGHT - 100)]\n \n def draw(self):\n screen.blit(self.image, (self.x, self.y))\n\ndef show_score(x, y, score):\n show = font.render('Score: ' + str(score), True, (50, 28, 217))\n screen.blit(show, (x, y))\n\ndef collision():\n if(food.x in range(snake.elements[0][0] - 20, snake.elements[0][0])) and (food.y in range(snake.elements[0][1] - 20, snake.elements[0][1])):\n snake.is_add = True\n food.x = random.randint(50, WIDTH - 70)\n food.y = random.randint(50, HEIGHT - 70)\n\ndef is_in_walls():\n return snake.elements[0][0] > WIDTH - 25 or snake.elements[0][0] < 30 \n\ndef game_over():\n # pygame.display.flip()\n screen.fill((255, 0, 0))\n txt = font.render('GAME OVER!', True, (255, 255, 255))\n my_score = font.render('Total score: ' + str(snake.score), True, (255, 255, 255))\n screen.blit(txt, (200, 200))\n screen.blit(my_score, (200, 300))\n pygame.display.flip()\n time.sleep(3)\n pygame.quit()\n\ndef show_walls():\n for i in range(0, WIDTH, 15):\n screen.blit(wall_image, (i, 0))\n screen.blit(wall_image, (i, HEIGHT - 30))\n screen.blit(wall_image, (0, i))\n screen.blit(wall_image, (WIDTH - 30, i))\n\nsnake = Snake()\nsnake2 =Snake()\n# W A S D\n\nfood = Food()\n\nwall_image = pygame.image.load('brickwall.png')\n\n\nwhile go:\n mil = clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n go = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n snake.dx = 5\n snake.dy = 0\n if event.key == pygame.K_LEFT:\n snake.dx = -5\n snake.dy = 0\n if event.key == pygame.K_UP:\n snake.dx = 0\n snake.dy = -5\n if event.key == pygame.K_DOWN:\n snake.dx = 0\n snake.dy = 5\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n go = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_d:\n snake2.dx = 5\n snake2.dy = 0\n if event.key == pygame.K_a:\n snake2.dx = -5\n snake2.dy = 0\n if event.key == pygame.K_w:\n snake2.dx = 0\n snake2.dy = -5\n if event.key == pygame.K_s:\n snake2.dx = 0\n snake2.dy = 5\n\n\n if is_in_walls():\n game_over()\n go = False\n\n collision()\n screen.fill((216, 182, 211))\n snake.move()\n snake.draw()\n food.draw()\n show_score(35, 45, snake.score)\n show_walls()\n pygame.display.flip()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"import pygame\nimport time\nimport random\n\nfrom pygame.constants import WINDOWHITTEST\n \npygame.init()\n \nwhite = (255, 255, 255)\nyellow = (255, 255, 102)\nblack = (0, 0, 0)\nred = (213, 50, 80)\ngreen = (0, 255, 0)\nblue = (50, 153, 213)\n \nWIDTH = 600\nHEIGTH = 400\n \ndis = pygame.display.set_mode((WIDTH, HEIGTH))\npygame.display.set_caption('Snake Game')\n \nclock = pygame.time.Clock()\n \nsnake_block = 10\nsnake_speed = 15\n \nfont_style = pygame.font.SysFont(\"bahnschrift\", 25)\nscore_font = pygame.font.SysFont(\"comicsansms\", 35)\n \n \ndef Your_score(score):\n value = score_font.render(\"Your Score: \" + str(score), True, yellow)\n dis.blit(value, [0, 0])\n \n \ndef our_snake(snake_block, snake_list):\n for x in snake_list:\n pygame.draw.rect(dis, black, [x[0], x[1], snake_block, snake_block])\n \n \ndef message(msg, color):\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [WIDTH / 6, HEIGTH / 3])\n\nwall_image = pygame.image.load('wall.png')\n\ndef show_walls():\n for i in range(0, WIDTH, 15):\n dis.blit(wall_image, (i, 0))\n dis.blit(wall_image, (i, HEIGTH - 30))\n dis.blit(wall_image, (0, i))\n dis.blit(wall_image, (WIDTH - 30, i))\n\ndef gameLoop():\n game_over = False\n game_close = False\n \n x1 = WIDTH / 2\n y1 = HEIGTH / 2\n \n x1_change = 0\n y1_change = 0\n \n snake_List = []\n Length_of_snake = 1\n \n foodx = round(random.randrange(0, WIDTH - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, HEIGTH - snake_block) / 10.0) * 10.0\n \n while not game_over:\n \n while game_close == True:\n dis.fill(red)\n message(\"GAME OVER! Click 'esc' to close game\", black)\n Your_score(Length_of_snake - 1)\n pygame.display.update()\n \n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n game_over = True\n game_close = False\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x1_change = -snake_block\n y1_change = 0\n elif event.key == pygame.K_RIGHT:\n x1_change = snake_block\n y1_change = 0\n elif event.key == pygame.K_UP:\n y1_change = -snake_block\n x1_change = 0\n elif event.key == pygame.K_DOWN:\n y1_change = snake_block\n x1_change = 0\n \n if x1 >= WIDTH or x1 < 0 or y1 >= HEIGTH or y1 < 0:\n game_close = True\n x1 += x1_change\n y1 += y1_change\n dis.fill(blue)\n pygame.draw.rect(dis, green, [foodx, foody, snake_block, snake_block])\n snake_Head = []\n snake_Head.append(x1)\n snake_Head.append(y1)\n snake_List.append(snake_Head)\n if len(snake_List) > Length_of_snake:\n del snake_List[0]\n \n for x in snake_List[:-1]:\n if x == snake_Head:\n game_close = True\n \n our_snake(snake_block, snake_List)\n Your_score(Length_of_snake - 1)\n \n pygame.display.update()\n \n if x1 == foodx and y1 == foody:\n foodx = round(random.randrange(0, WIDTH - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, HEIGTH - snake_block) / 10.0) * 10.0\n Length_of_snake += 1\n \n\n clock.tick(snake_speed)\n \n pygame.quit()\n quit()\n \ngameLoop()\n\"\"\"","sub_path":"tsis9/snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355057134","text":"from pathlib import Path\nimport numpy as np\nimport pandas as pd\n\nPATH=Path('../datasets/aclImdb/')\n\nCLASSES = ['neg', 'pos', 'unsup']\n\ndef get_texts(path):\n texts,labels = [],[]\n for idx,label in enumerate(CLASSES):\n for fname in (path/label).glob('*.*'):\n texts.append(fname.open('r').read())\n labels.append(idx)\n return np.array(texts),np.array(labels)\n\ntrn_texts,trn_labels = get_texts(PATH/'train')\nval_texts,val_labels = get_texts(PATH/'test')\n\ncol_names = ['labels','text']\n\nnp.random.seed(42)\ntrn_idx = np.random.permutation(len(trn_texts))\nval_idx = np.random.permutation(len(val_texts))\n\ntrn_texts = trn_texts[trn_idx]\nval_texts = val_texts[val_idx]\n\ntrn_labels = trn_labels[trn_idx]\nval_labels = val_labels[val_idx]\n\ndf_trn = pd.DataFrame({'text':trn_texts, 'labels':trn_labels}, columns=col_names)\ndf_val = pd.DataFrame({'text':val_texts, 'labels':val_labels}, columns=col_names)\n\ndf_trn[df_trn['labels']!=2].to_csv(PATH/'train.csv', header=False, index=False)\ndf_val.to_csv(PATH/'test.csv', header=False, index=False)\n\n(PATH/'classes.txt').open('w').writelines(f'{o}\\n' for o in CLASSES)\n\n","sub_path":"preprocess_imdb.py","file_name":"preprocess_imdb.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"456172925","text":"from django.shortcuts import redirect, render\nfrom .models import Tache, Category, TypeDeTache\nfrom .forms import ApplyTach\nfrom django.urls import reverse\n# Create your views here.\n\n\n\ndef tache_type(request,slug):\n type1 = TypeDeTache.objects.get(slug=slug)\n all_type = TypeDeTache.objects.all()\n tache_type = Tache.objects.all()\n context={'type1':type1, 'types':all_type, 'tache_type':tache_type}\n return render(request,'ponctuel.html',context)\n\n\ndef get_acceuil(request):\n all_type = TypeDeTache.objects.all()\n all_tache = Tache.objects.all()\n return render(request,'test.html',{'types':all_type,'all_tache':all_tache})\n\n\n\ndef add_tache(request):\n \n if request.method=='POST':\n form = ApplyTach(request.POST, request.FILES)\n if form.is_valid():\n myform = form.save(commit=False)\n myform.owner = request.user\n myform.save()\n return redirect(reverse('taches:add_tache'))\n else:\n form = ApplyTach()\n \n all_type = TypeDeTache.objects.all()\n return render(request,'add_tache.html',{'form':form , 'types':all_type})\n\n\ndef detail(request,id):\n tache_detail = Tache.objects.get(id=id)\n all_type = TypeDeTache.objects.all()\n return render(request,'detail.html',{'detail':tache_detail, 'types':all_type})\n\n","sub_path":"TacheLongCours/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"245600651","text":"\n##############################################################################\n#\n# Copyright (c) 2003-2018 by The University of Queensland\n# http://www.uq.edu.au\n#\n# Primary Business: Queensland, Australia\n# Licensed under the Apache License, version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Development until 2012 by Earth Systems Science Computational Center (ESSCC)\n# Development 2012-2013 by School of Earth Sciences\n# Development from 2014 by Centre for Geoscience Computing (GeoComp)\n#\n##############################################################################\n\nfrom __future__ import print_function, division\n\n__copyright__=\"\"\"Copyright (c) 2003-2018 by The University of Queensland\nhttp://www.uq.edu.au\nPrimary Business: Queensland, Australia\"\"\"\n__license__=\"\"\"Licensed under the Apache License, version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\"\"\"\n__url__=\"https://launchpad.net/escript-finley\"\n\nimport os, sys\nimport esys.escriptcore.utestselect as unittest\nfrom esys.escriptcore.testing import *\nfrom esys.escript import *\nfrom esys.escript.linearPDEs import LameEquation\nfrom esys.ripley import MultiResolutionDomain\nfrom run_diracOnRipley import Test_RipleyDiracPoints\n\nmpiSize = getMPISizeWorld()\n\ntry:\n RIPLEY_WORKDIR=os.environ['RIPLEY_WORKDIR']\nexcept KeyError:\n RIPLEY_WORKDIR='.'\n\nbrickLevel = 2\nrectLevel = 2\n\ndef Rectangle(**kwargs):\n m = MultiResolutionDomain(2, **kwargs)\n return m.getLevel(rectLevel - 1)\n\ndef Brick(**kwargs):\n m = MultiResolutionDomain(3, **kwargs)\n return m.getLevel(brickLevel - 1)\n\n@unittest.skipIf(mpiSize > 1, \"Multiresolution domains require single process\")\nclass Test_DiracPointsOnMultiResolutionDomains(Test_RipleyDiracPoints):\n\n def setup(self):\n # constants\n self.numRanks = getMPISizeWorld()\n self.rank = getMPIRankWorld()\n self.shortEdge = 3\n self.longFactor = 5\n self.longEdge = self.longFactor*self.numRanks-1\n self.empty = \"(data contains no samples)\\n\"\n\n def getRectRefs(self, xLong):\n Ex = self.longEdge+1\n Ey = self.shortEdge+1\n if not xLong:\n Ex, Ey = Ey, Ex\n result = [[-1 for j in range(Ex)] for i in range(Ey)]\n ref = 0\n if xLong:\n for rankx in range(self.numRanks):\n for y in range(Ey):\n for x in range(self.longFactor):\n old = [ref%Ex, ref//Ex]\n new = [i*rectLevel for i in old]\n node = new[0] + new[1]*(rectLevel*Ex-1)\n result[y][x+self.longFactor*rankx] = node\n ref += 1\n else:\n for y in range(Ey):\n for x in range(Ex):\n old = [ref%Ex, ref//Ex]\n new = [i*rectLevel for i in old]\n node = new[0] + new[1]*(rectLevel*Ex-1)\n result[y][x] = node\n ref += 1\n return result\n \n def getBrickRefs(self, splitAxis, dims):\n dims = [i+1 for i in dims]\n results = [[[-1 for z in range(dims[2])] for y in range(dims[1])] for x in range(dims[0])]\n ref = 0\n rankDim = [i for i in dims]\n rankDim[splitAxis] = dims[splitAxis]//self.numRanks\n rc = [0, 0, 0] #rank counters\n for rank in range(self.numRanks):\n for z in range(rankDim[2]):\n for y in range(rankDim[1]):\n for x in range(rankDim[0]):\n old = [ref%dims[0], (ref%(dims[0]*dims[1]))//dims[0], ref//(dims[0]*dims[1])]\n new = [i*brickLevel for i in old]\n node = new[0] + new[1]*(brickLevel*(dims[0]-1)+1) + new[2]*(brickLevel*(dims[0]-1) + 1)*(brickLevel*(dims[1]-1)+1)\n results[x+rc[0]][y+rc[1]][z+rc[2]] = node\n ref += 1\n rc[splitAxis] += rankDim[splitAxis]\n return results\n\n def generateRects(self, a, b):\n diracLoc = [a,b]\n edges = [self.longEdge, self.shortEdge]\n rects = []\n for i in range(2):\n rects.append(Rectangle(n0=edges[0], n1=edges[1],\n l0=edges[0], l1=edges[1],\n d0=self.numRanks, diracPoints=[tuple(diracLoc)],\n diracTags=[\"test\"]))\n diracLoc = diracLoc[::-1]\n edges = edges[::-1]\n return rects\n\n def generateBricks(self, a, b, c):\n diracLoc = [a,b,c]\n bricks = []\n edges = [self.longEdge, self.shortEdge, self.shortEdge]\n for i in range(3):\n bricks.append(Brick(n0=edges[0], n1=edges[1], n2=edges[2],\n l0=edges[0], l1=edges[1], l2=edges[2],\n d0=self.numRanks,\n diracPoints=[tuple(diracLoc)], diracTags=[\"test\"]))\n diracLoc = diracLoc[2:] + diracLoc[:2]\n edges = edges[2:] + edges[:2]\n tmp = [self.shortEdge]*3\n dims = [tmp[:], tmp[:], tmp[:]]\n for i in range(3):\n dims[i][i] = self.longEdge\n return bricks, dims\n\nif __name__ == '__main__':\n run_tests(__name__, exit_on_failure=True)\n\n\n","sub_path":"ripley/test/python/run_diracOnMultiRes.py","file_name":"run_diracOnMultiRes.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"274706309","text":"# -*- coding: utf-8 -*-\n\"\"\"Module for bader class.\n\nBader charge anaslysis performed by bader.\n(http://theory.cm.utexas.edu/henkelman/code/bader/)\n\nThis program is not a part of vasp but it deeply connects with the vasp.\n\"\"\"\n\nfrom __future__ import division # Version safety\nfrom __future__ import print_function\nfrom typing import Optional, IO, Any, List, Union\nfrom pathlib import Path\nfrom vaspy.tools import open_by_suffix\n\n\nclass BaderACF(object):\n r\"\"\"Class for storing bader charge analysis (ACF.dat).\n\n ACF.dat contains the coordinates of each atom, the charge\n associated with it according to Bader partitioning, percentage of\n the whole according to Bader partitioning and the minimum distance\n to the surface. This distance should be compared to maximum\n cut-off radius for the core region if pseudo potentials have been\n used.\n\n Attributes\n ----------\n natom: int\n Number of atoms\n positions: list\n Atom positions in :math:`\\AA`\n charges: list\n charges\n vols: list\n volumes\n vaccharge: float\n vacuum charge\n vacvol: float\n vacuum volume\n nelectron: float\n number of electron\n\n \"\"\"\n\n def __init__(self, filename: Union[str, Path, None] = None) -> None:\n \"\"\"Initialize.\"\"\"\n self.natom = 0\n self.positions: List[List[float]] = []\n self.charges: List[float] = []\n self.mindists: List[float] = []\n self.vols: List[float] = []\n self.vaccharge: float = 0.0\n self.vacvol: float = 0.0\n self.nelectron: float = 0.0\n if filename:\n self.parse(open_by_suffix(str(filename)))\n\n def parse(self, thefile: IO[Any]) -> None:\n \"\"\"Parse AFC.dat.\n\n Parameters\n ----------\n thefile: StringIO\n 'ACF.dat' file\n\n \"\"\"\n # the first line is like:\n # X Y Z CHARGE MIN DIST ATOMIC VOL\n next(thefile)\n # the 2nd line is just \"----------\"\n separator = next(thefile)\n for line in thefile:\n if separator in line:\n break\n else:\n tmp = line.split()\n self.positions.append([float(pos) for pos in tmp[1:4]])\n self.charges.append(float(tmp[4]))\n self.mindists.append(float(tmp[5]))\n self.vols.append(float(tmp[6]))\n self.vaccharge = float(next(thefile).split()[-1])\n self.vacvol = float(next(thefile).split()[-1])\n self.nelectron = float(next(thefile).split()[-1])\n self.natom = len(self.positions)\n thefile.close()\n","sub_path":"vaspy/bader.py","file_name":"bader.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"26936760","text":"#!/bin/python\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport lxml\nmain_recipe_url = \"http://www.onegreenplanet.org/tag/raw-vegan-recipes/\"\n\n\"\"\"\nThis file contains the classes for Cookbook_Crawler. \n\nAuthor: Ethan Henderson\n\ngithub.com/ethan626\n\"\"\"\n\nclass RecipeExtractor:\n \"\"\" Class to extract recipes from OneGreenPlanet\"\"\"\n\n def __init__(self,url):\n \"\"\n self.url = url\n self.response = requests.get(self.url)\n self.content = self.response.content\n self.soup = bs(self.content,'lxml')\n self.image_counter = 0 \n \n try:\n self.next_page = self.soup('a',{\"class\":'next pagination'})[0]['href']\n \n except:\n self.next_page = False # No more pages\n\n def go_to_next_page(self):\n \"Loads the next search page and resets the class attributes to reflect this change\"\n self.__init__(self.next_page) \n\n def save_image(self,image_url,file_name):\n \"Saves the image at a given url\"\n jpeg_stream = requests.get(image_url,stream=True)\n\n with open(file_name,\"wb\") as f:\n for chunk in jpeg_stream.iter_content():\n f.write(chunk)\n\n def current_page_recipe_links(self):\n \"Yields the recipe links in the main page\"\n \n results = self.soup.find_all(\"div\",{\"class\":\"vegan-recipe contentcontainer\"}) \n\n for tag in results:\n for subtag in tag.find_all(\"a\"):\n yield(subtag[\"href\"]) \n\n def get_recipe_data(self,url,photos=False):\n \"Returns the recipe name, recipe ingredients, and recipe preparation, and a url to the photo. \"\n \n recipe_page_response = requests.get(url)\n recipe_page_content = recipe_page_response.content\n local_soup = bs(recipe_page_content,'lxml')\n \n for class_type in ({\"class\":\"recipe-name\"},{\"class\":\"recipe-ingredients\"},{\"class\":\"recipe-preparation\"}):\n results = local_soup.find_all(\"div\",class_type)\n for tag in results:\n yield (tag.text.strip())\n \n if photos:\n yield local_soup(\"meta\",{\"property\":\"og:image\"})[0]['content'] # photo url\n\n def extract_recipe(self,url):\n \"Returns a list of the recipe data\"\n return [i for i in self.get_recipe_data(url)]\n\n def scrape_helper(self,photos=False):\n \"Helper Function to be called by gather_recipes\"\n \n while self.next_page:\n current_page_links = self.current_page_recipe_links()\n \n for link in current_page_links:\n yield self.extract_recipe(link)\n \n self.go_to_next_page()\n\n def gather_recipes(self,max_results=False,photos=False):\n \"Generator which yields recipie text\"\n \n if max_results:\n for recipe,counter in zip(self.scrape_helper(photos=photos),range(max_results)): \n yield recipe\n else:\n for recipe in self.scrape_helper(photos=photos):\n yield recipe\n \n def make_cookbook(self,file_name,recipes,photos=False,template=\"cookbooktemplate.tex\"):\n \"Writes a LaTeX file of the cookbook\"\n \n with open(file_name,\"a+\") as f:\n with open(template) as template: # put the template into the new file\n for line in template:\n f.write(line)\n\n for recipe in recipes: # takes list input - write recipes to the file\n try: \n f.write(\"\\\\section{\" + recipe[0] + \"}\\\\\\ \")\n f.write(recipe[1])\n f.write(recipe[2])\n f.write(\"\\\\newpage\")\n\n except:\n pass\n \n f.write(\"\\\\end{document}\")\n","sub_path":"cookbook_crawler_classes.py","file_name":"cookbook_crawler_classes.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624153828","text":"# coding:utf-8\nimport pandas as pd # 画图过程中会使用pandas\n\n\nresult_path = r\"data_files\\results\\plot.csv\"\ndf = pd.read_csv(result_path)\nprint(df.shape)\n# df1 = df.loc[df[\"abs_diff1\"] < 20.00]\n# print(df1)\nprint((df[\"abs_diff2\"]*df[\"abs_diff2\"]).mean())\n\n\n","sub_path":"result_analyse.py","file_name":"result_analyse.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"96859217","text":"import numpy as np\n\nfrom preprocessing.helpers.for_strings import alphabet\n\n\nclass Latin2Cyrillic:\n \"\"\"now the class replaces latin characters with russian analogs and does not the inverse\"\"\"\n def __init__(self):\n self._cyr_lat_map = {'а': 'a', 'е': 'e', 'к': 'k', 'о': 'o', 'р': 'p', 'с': 'c', 'у': 'y'}\n self._lat_cyr_map = {v: k for k, v in self._cyr_lat_map.items()}\n\n self.cyr_alphabet = alphabet(lang='rus')\n self.eng_alphabet = alphabet(lang='eng')\n\n self.from_lat_translator = str.maketrans(self._lat_cyr_map)\n self.from_cyr_translator = str.maketrans(self._cyr_lat_map)\n\n def _word_cyr_lat_occurrence(self, word: str, cyrillic_first=True):\n \"\"\"counts occurrence of latin and cyrillic characters\"\"\"\n cnt = np.zeros(2)\n cyr_index = 0 if cyrillic_first else 1\n lat_index = 1 - cyr_index\n for char in word:\n if char in self.cyr_alphabet:\n cnt[cyr_index] += 1\n elif char in self.eng_alphabet:\n cnt[lat_index] += 1\n return cnt\n\n def subs(self, word: str, threshold=0.5):\n \"\"\"perform substitution if the share of cyrillic symbols is greater than threshold\"\"\"\n cnt = self._word_cyr_lat_occurrence(word=word, cyrillic_first=True)\n length_significant = cnt.sum()\n\n if cnt[0]/(length_significant+0.01) > threshold:\n return self._subs_latin_with_cyrillic(word=word)\n else:\n return word\n\n def filter_string(self, s:str):\n return \" \".join([self.subs(i) for i in s.split()])\n\n def _subs_latin_with_cyrillic(self, word: str):\n \"\"\"perform substitution of latin characters\"\"\"\n return word.translate(self.from_lat_translator)\n\n","sub_path":"latin2cyrillic/Latin2Cyrillic.py","file_name":"Latin2Cyrillic.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"31175560","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\n\ninput_image = input(\"請輸入要下載的圖片:\")\n\nresponse = requests.get(f\"https://unsplash.com/s/photos/{input_image}\")\nsoup = BeautifulSoup(response.text, \"lxml\")\n\nresults = soup.find_all(\"img\", {\"class\": \"_2VWD4 _2zEKz\"}, limit=5)\n\nimage_links = [result.get(\"src\") for result in results] # 取得圖片來源連結\n\nfor index, link in enumerate(image_links):\n\n if not os.path.exists(\"images\"):\n os.mkdir(\"images\") # 建立資料夾\n\n img = requests.get(link) # 下載圖片\n\n with open(\"images\\\\\" + input_image + str(index+1) + \".jpg\", \"wb\") as file: # 開啟資料夾及命名圖片檔\n file.write(img.content) # 寫入圖片的二進位碼\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"561490521","text":"# kas - setup tool for bitbake based projects\n#\n# Copyright (c) Siemens AG, 2017\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport re\nimport os\nimport sys\nimport logging\nimport tempfile\nimport asyncio\nfrom subprocess import Popen, PIPE\n\n__license__ = 'MIT'\n__copyright__ = 'Copyright (c) Siemens AG, 2017'\n\n\nclass LogOutput:\n def __init__(self, live):\n self.live = live\n self.stdout = []\n self.stderr = []\n\n def log_stdout(self, line):\n if self.live:\n logging.info(line.strip())\n self.stdout.append(line)\n\n def log_stderr(self, line):\n if self.live:\n logging.error(line.strip())\n self.stderr.append(line)\n\n\n@asyncio.coroutine\ndef _read_stream(stream, cb):\n while True:\n line = yield from stream.readline()\n try:\n line = line.decode('utf-8')\n except:\n logging.warning('Could not decode line from stream - ignore it')\n if line:\n cb(line)\n else:\n break\n\n@asyncio.coroutine\ndef _stream_subprocess(cmd, cwd, env, shell, stdout_cb, stderr_cb):\n if shell:\n process = yield from asyncio.create_subprocess_shell(\n cmd,\n env=env,\n cwd=cwd,\n universal_newlines=True,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n else:\n process = yield from asyncio.create_subprocess_exec(\n *cmd,\n cwd=cwd,\n env=env,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n\n yield from asyncio.wait([\n _read_stream(process.stdout, stdout_cb),\n _read_stream(process.stderr, stderr_cb)\n ])\n ret = yield from process.wait()\n return ret\n\n\ndef run_cmd(cmd, cwd, env={}, fail=True, shell=False, liveupdate=True):\n rc = 0\n stdout = []\n stderr = []\n cmdstr = cmd\n if not shell:\n cmdstr = ' '.join(cmd)\n logging.info('{}$ {}'.format(cwd, cmdstr))\n\n logo = LogOutput(liveupdate)\n if asyncio.get_event_loop().is_closed():\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n else:\n loop = asyncio.get_event_loop()\n\n rc = loop.run_until_complete(\n _stream_subprocess(cmd, cwd, env, shell,\n logo.log_stdout, logo.log_stderr))\n loop.close()\n\n if rc and fail:\n msg = 'Command \"{cwd}$ {cmd}\" failed\\n'.format(cwd=cwd, cmd=cmdstr)\n for line in logo.stderr:\n msg += line\n logging.error(msg)\n sys.exit(rc)\n\n return (rc, ''.join(logo.stdout))\n\n\ndef find_program(paths, name):\n for path in paths.split(os.pathsep):\n prg = os.path.join(path, name)\n if os.path.isfile(prg):\n return prg\n return None\n\n\ndef get_oe_environ(config, build_dir):\n # nasty side effect function: running oe-init-build-env also\n # creates the conf directory\n\n oe_path = None\n for repo in config.get_repos():\n if os.path.exists(repo.path + '/oe-init-build-env'):\n oe_path = repo.path\n break\n if not oe_path:\n logging.error('Did not find oe-init-build-env')\n sys.exit(1)\n\n get_bb_env_file = tempfile.mktemp()\n with open(get_bb_env_file, 'w') as f:\n script = \"\"\"#!/bin/bash\n source oe-init-build-env $1 > /dev/null 2>&1\n env\n \"\"\"\n f.write(script)\n os.chmod(get_bb_env_file, 0o775)\n\n env = {}\n env['PATH'] = '/bin:/usr/bin'\n\n (rc, output) = run_cmd([get_bb_env_file, build_dir],\n cwd=oe_path, env=env, liveupdate=False)\n\n os.remove(get_bb_env_file)\n\n env = {}\n for line in output.splitlines():\n try:\n (key, val) = line.split('=', 1)\n env[key] = val\n except:\n pass\n\n vars = ['SSTATE_DIR', 'DL_DIR', 'TMPDIR']\n if 'BB_ENV_EXTRAWHITE' in env:\n ew = env['BB_ENV_EXTRAWHITE'] + ' '.join(vars)\n env.update({'BB_ENV_EXTRAWHITE': ew})\n\n for v in vars:\n if v in os.environ:\n env[v] = os.environ[v]\n\n return env\n\n\ndef ssh_add_key(env, key):\n p = Popen(['/usr/bin/ssh-add', '-'], stdin=PIPE, stdout=None,\n stderr=PIPE, env=env)\n error = p.communicate(input=str.encode(key))[1]\n if p.returncode and error:\n logging.error('failed to add ssh key: {}'.format(error))\n\n\ndef ssh_cleanup_agent(config):\n \"\"\"Removes the identities and stop the ssh-agent instance \"\"\"\n # remove the identities\n p = Popen(['/usr/bin/ssh-add', '-D'], env=config.environ)\n p.wait()\n if p.returncode != 0:\n logging.error('failed to delete SSH identities')\n\n # stop the ssh-agent\n p = Popen(['/usr/bin/ssh-agent', '-k'], env=config.environ)\n p.wait()\n if p.returncode != 0:\n logging.error('failed to stop SSH agent')\n\n\ndef ssh_setup_agent(config, envkeys=['SSH_PRIVATE_KEY']):\n output = os.popen('/usr/bin/ssh-agent -s').readlines()\n for line in output:\n matches = re.search(\"(\\S+)\\=(\\S+)\\;\", line)\n if matches:\n config.environ[matches.group(1)] = matches.group(2)\n\n for ek in envkeys:\n key = os.environ.get(ek)\n if key:\n ssh_add_key(config.environ, key)\n else:\n logging.warning('{} is missing'.format(ek))\n\n\ndef ssh_no_host_key_check(config):\n home = os.path.expanduser('~')\n if not os.path.exists(home + '/.ssh'):\n os.mkdir(home + '/.ssh')\n with open(home + '/.ssh/config', 'w') as f:\n f.write('Host *\\n\\tStrictHostKeyChecking no\\n\\n')\n","sub_path":"kas/libkas.py","file_name":"libkas.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"66137096","text":"# For python 2 and 3 support\nfrom __future__ import division, print_function, unicode_literals\n\n# Common Imports\nimport numpy as np\nimport os\n\n# ML Imports\nfrom sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\n\n# Graph Imports\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Directory Config\nPROJECT_ROOT_DIR = \".\"\n\n\ndef image_path(fig_id):\n if not os.path.exists('images'):\n os.makedirs('images')\n return os.path.join(PROJECT_ROOT_DIR, \"images\", fig_id)\n\n\ndef save_fig(fig_id, tight_layout=True):\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(image_path(fig_id) + \".png\", format='png', dpi=300)\n\n\ndef plot_decision_boundary(clf,\n X,\n y,\n axes=[0, 7.5, 0, 3],\n iris=True,\n legend=False,\n plot_training=True):\n x1s = np.linspace(axes[0], axes[1], 100)\n x2s = np.linspace(axes[2], axes[3], 100)\n x1, x2 = np.meshgrid(x1s, x2s)\n X_new = np.c_[x1.ravel(), x2.ravel()]\n y_pred = clf.predict(X_new).reshape(x1.shape)\n custom_cmap = ListedColormap(['#fafab0', '#9898ff', '#a0faa0'])\n plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)\n if not iris:\n custom_cmap2 = ListedColormap(['#7d7d58', '#4c4c7f', '#507d50'])\n plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)\n if plot_training:\n plt.plot(X[:, 0][y == 0], X[:, 1][y == 0], \"yo\", label='Iris-Setosa')\n plt.plot(\n X[:, 0][y == 1], X[:, 1][y == 1], \"bs\", label=\"Iris-Versicolor\")\n plt.plot(\n X[:, 0][y == 2], X[:, 1][y == 2], \"g^\", label=\"Iris-Virginica\")\n if iris:\n plt.xlabel(\"Petal length\", fontsize=14)\n plt.ylabel(\"Petal width\", fontsize=14)\n else:\n plt.xlabel(r\"$x_1$\", fontsize=18)\n plt.ylabel(r\"$x_2$\", fontsize=18, rotation=0)\n if legend:\n plt.legend(loc=\"local right\", fontsize=14)\n\n\n# Configure data\niris = load_iris()\nX = iris.data[:, 2:] # petal length and width\ny = iris.target\n\n# Train classifier\ntree_clf = DecisionTreeClassifier(max_depth=2, random_state=42)\ntree_clf.fit(X, y)\n\n# Predict classes and class probabilities\nprint(\"Given the dimensions length 5cm, width 1.5cm\")\nprint(\"Class probabilities: \", tree_clf.predict_proba([[5, 1.5]]))\nprint(\"Predict class:\", tree_clf.predict([[5, 1.5]]))\n\n# Graph graphics # Use dot -Tpng iris_tree.dot -o iris_tree.png\nexport_graphviz(\n tree_clf,\n out_file=image_path(\"iris_tree.dot\"),\n feature_names=iris.feature_names[2:],\n class_names=iris.target_names,\n rounded=True,\n filled=True)\n\n# Plot decision tree\nplt.figure(figsize=(8, 4))\nplot_decision_boundary(tree_clf, X, y)\nplt.plot([2.45, 2.45], [0, 3], \"k-\", linewidth=2)\nplt.plot([2.45, 7.5], [1.75, 1.75], \"k--\", linewidth=2)\nplt.plot([4.95, 4.95], [0, 1.75], \"k:\", linewidth=2)\nplt.plot([4.85, 4.85], [1.75, 3], \"k:\", linewidth=2)\nplt.text(1.40, 1.0, \"Depth=0\", fontsize=15)\nplt.text(3.2, 1.89, \"Depth=1\", fontsize=13)\nplt.text(4.05, 0.5, \"(Depth=2)\", fontsize=11)\n\nsave_fig(\"decision_tree_decision_boundaries_plot\")\nplt.show()\n","sub_path":"decision_trees/examples/viz_trees.py","file_name":"viz_trees.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"467579434","text":"# the addToInventory function takes 2 parameters, an inventory (dictionary) and the addedItems (a list)\ndef addToInventory(inventory, addedItems):\n # this for-loop iterates through each item in addedItems\n for i in range( len(addedItems) ):\n # if the addedItem indexed by i is a key in the inventory, then increase the count of the value\n if addedItems[i] in inventory.keys():\n inventory[ addedItems[i] ] += 1\n # otherwise, add the item key to the inventory and set its value to 1\n else:\n inventory[ addedItems[i] ] = 1\n #return the inventory to the caller of this function\n return inventory\n\n# this is the code from the previous question\ndef displayInventory(inventory):\n print(\"Inventory:\")\n item_total = 0\n for k, v in inventory.items():\n print( str(v) + \" \" + k )\n item_total += v\n print(\"Total number of items: \" + str(item_total))\n\n\ninv = {'gold coin': 42, 'rope': 1}\ndragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\ndisplayInventory(inv)\ninv = addToInventory(inv, dragonLoot)\ndisplayInventory(inv)","sub_path":"ch5/4-list-to-dictionary-solution.py","file_name":"4-list-to-dictionary-solution.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140369081","text":"\n# Set\nfirstInput = raw_input().split(' ')\ncoinCount = int(firstInput[0])\npredictValue = int(firstInput[1])\ncoins =[]\nfor coin in range(coinCount):\n coins.append(int(input()))\n\nfor coin in coins:\n if coin > predictValue:\n coins.remove(coin)\n\ncoins.reverse()\nremainValue = predictValue\nsumCount = 0;\nfor coin in coins:\n coinCount = remainValue // coin\n sumCount += coinCount\n remainValue = remainValue % coin\n\n if remainValue == 0:\n print(sumCount)\n break\n\n","sub_path":"Greed/coin0.py","file_name":"coin0.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229035719","text":"from math import log\n\n\nclass ReadIntervals(object):\n \"\"\"\n targetLength: the length of the target sequence that these reads\n were against.\n \"\"\"\n\n EMPTY = 0\n FULL = 1\n\n def __init__(self, targetLength):\n self._targetLength = targetLength\n self._intervals = []\n\n def add(self, start, end):\n assert start <= end\n self._intervals.append((start, end))\n\n def walk(self):\n \"\"\"\n Yield intervals. Each is (type, (start, stop)) where type is\n either EMPTY or FULL and (start, stop) is the interval. The\n endpoint (stop) of the interval is not considered to be in the\n interval. I.e., the interval is really [start, stop).\n \"\"\"\n intervals = sorted(self._intervals)\n\n def nextFull():\n start, stop = intervals.pop(0)\n while intervals:\n if intervals[0][0] <= stop:\n _, thisStop = intervals.pop(0)\n if thisStop > stop:\n stop = thisStop\n else:\n break\n return (start, stop)\n\n if intervals:\n # If the first interval (read) starts after zero, yield an\n # initial empty section to get us to the first interval.\n if intervals[0][0] > 0:\n yield (self.EMPTY, (0, intervals[0][0]))\n\n while intervals:\n # Yield a full interval followed by an empty one (if there\n # is another interval pending).\n lastFull = nextFull()\n yield (self.FULL, lastFull)\n if intervals:\n yield (self.EMPTY, (lastFull[1], intervals[0][0]))\n\n # Yield the final empty section, if any.\n if lastFull[1] < self._targetLength:\n yield (self.EMPTY, (lastFull[1], self._targetLength))\n\n else:\n yield (self.EMPTY, (0, self._targetLength))\n\n\nclass OffsetAdjuster(object):\n \"\"\"\n A class that knows how to adjust the offsets in a normalized HSP according\n to the overall set of reads being plotted.\n\n intervals: an instance of ReadIntervals.\n base: the logarithmic base to use when adjusting empty spaces in the hit\n sequence.\n \"\"\"\n\n def __init__(self, intervals=None, base=2.0):\n self._adjustments = [] # Pairs of (X offset, adjustment).\n if intervals:\n divisor = log(base)\n for (intervalType, (start, stop)) in intervals.walk():\n if intervalType == ReadIntervals.EMPTY:\n width = stop - start\n logWidth = log(width) / divisor\n self._adjustments.append((stop, width - logWidth))\n\n def adjustments(self):\n return self._adjustments\n\n def _reductionForOffset(self, offset):\n \"\"\"Calculate the total reduction for a given X axis offset.\"\"\"\n reduction = 0\n for (thisOffset, thisReduction) in self._adjustments:\n if offset >= thisOffset:\n reduction += thisReduction\n else:\n break\n return reduction\n\n def adjustOffset(self, offset):\n \"\"\"Adjust a single X offset.\"\"\"\n return offset - self._reductionForOffset(offset)\n\n def adjustHSP(self, hsp):\n \"\"\"\n Adjust the read and subject start and end offsets in an HSP.\n\n @param hsp: a L{dark.hsp.HSP} or L{dark.hsp.LSP} instance.\n \"\"\"\n reduction = self._reductionForOffset(\n min(hsp.readStartInSubject, hsp.subjectStart))\n\n hsp.readEndInSubject = hsp.readEndInSubject - reduction\n hsp.readStartInSubject = hsp.readStartInSubject - reduction\n hsp.subjectEnd = hsp.subjectEnd - reduction\n hsp.subjectStart = hsp.subjectStart - reduction\n","sub_path":"dark/intervals.py","file_name":"intervals.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"166243710","text":"import os\nimport sys\nimport ctypes\nimport asyncio\n\nfrom cool_utils import JSON\nfrom motor.motor_asyncio import AsyncIOMotorClient\n\nwindows = sys.platform == 'win32'\n\ntry:\n\timport click\nexcept:\n\tif windows:\n\t\tos.system(\"pip install click\")\n\telse:\n\t\tos.system(\"python3 -m pip install click\")\n\ndef _await(function):\n\treturn asyncio.run(function)\n\ndef admin_access():\n\ttry:\n\t\tis_admin = (os.getuid() == 0)\n\texcept AttributeError:\n\t\tis_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0\n\t\n\treturn is_admin\n\n@click.command(\n\tname = 'setup',\n\thelp = \"Setup command to prepare the bot for runtime.\"\n)\n@click.option(\n\t\"-d\",\n\t\"--debug\",\n\tis_flag = True,\n\tdefault = False,\n\thelp = \"Enables debug mode\"\n)\n@click.option(\n\t\"-r\",\n\t\"--reset-db\",\n\tis_flag = True,\n\tdefault = False,\n\thelp = \"Resets the database\"\n)\n@click.argument(\n\t\"-t\",\n\t\"--token\",\n\thelp = \"Sets the bot token\"\n)\n@click.argument(\n\t\"-m\",\n\t\"--mongo-url\",\n\thelp = \"Stores the mongo url\"\n)\n@click.argument(\n\t\"-o\",\n\t\"--owner\",\n\thelp = \"Stores the owner id\"\n)\n@click.argmuent(\n\t\"-cg\",\n\t\"--core-guild\",\n\thelp = \"Stores the Core Guild (Discord Server) id\"\n)\n@click.pass_context\ndef setup(ctx, debug: bool, reset_db: bool, token: str, mongo_url: str, owner: str, core_guild: str):\n\tif reset_db:\n\t\tfor database in _await(AsyncIOMotorClient(mongo_url).list_databases()):\n\t\t\tclick.echo(f\"[DEBUG]: Deleting `{database}` database...\") if debug else None\n\t\t\t_await(AsyncIOMotorClient(mongo_url).drop_database(database))\n\t\tclick.echo(\"All databases deleted.\")\n\n\tJSON.open(\"../mongo.json\")\n\tJSON.write(\"MONGO\", mongo_url)\n\n\ttry:\n\t\tint(owner)\n\texcept:\n\t\tclick.echo(\"Invalid owner id.\")\n\t\tclick.echo(\"[DEBUG]: Aborting setup due to invalid owner id.\") if debug else None\n\t\treturn\n\n\ttry:\n\t\tint(core_guild)\n\texcept:\n\t\tclick.echo(\"Invalid core guild id.\")\n\t\tclick.echo(\"[DEBUG]: Aborting setup due to invalid core guild id.\") if debug else None\n\t\treturn\n\n\t_await(\n\t\tAsyncIOMotorClient(mongo_url)[\"numix\"][\"config\"].insert_one(\n\t\t\t{\n\t\t\t\t\"_id\": \"setup\",\n\t\t\t\t\"owner\": int(owner),\n\t\t\t\t\"core_guild\": int(core_guild),\n\t\t\t\t\"token\": token\n\t\t\t}\n\t\t) if token is not None else AsyncIOMotorClient(mongo_url)[\"numix\"][\"config\"].insert_one(\n\t\t\t{\n\t\t\t\t\"_id\": \"setup\",\n\t\t\t\t\"owner\": int(owner),\n\t\t\t\t\"core_guild\": int(core_guild),\n\t\t\t}\n\t\t)\n\t)\n\n\tif windows:\n\t\tclick.echo(\"[DEBUG]: Running windows pip for dependency install...\") if debug else None\n\t\tos.system(\"pythonw -m pip install -U -r ./requirements.txt\")\n\t\tclick.echo(\"[DEBUG]: Sleeping for 10 seconds to ensure full dependency install...\") if debug else None\n\t\tasyncio.run(asyncio.sleep(10))\n\n\telse:\n\t\tclick.echo(\"[DEBUG]: Running standard pip for dependency install\")\n\t\tos.system(\"nohup python3 -m pip install -U -r ./requirements.txt &\")\n\t\tclick.echo(\"[DEBUG]: Sleeping for 10 seconds to ensure full dependency install...\") if debug else None\n\t\tasyncio.run(asyncio.sleep(10))\n\n\tclick.echo(\"Installed all required Dependencies.\")\n\tclick.echo(\"Setup is complete, Exiting...\")\n\n@click.command(\n\tname = \"run\",\n\thelp = \"Runs the Discord Bot.\"\n)\n@click.option(\n\t\"-bg\",\n\t\"--background\",\n\tis_flag = True,\n\thelp = \"Make the bot run on the background instead of terminal.\"\n)\n@click.pass_context\ndef run(ctx, background: bool):\n\tif windows:\n\t\tos.system(\"pythonw ./main.py\") if background else os.system(\"python ./main.py\")\n\n\telse:\n\t\tos.system(\"nohup python3 ./main.py &\") if background else os.system(\"python3 ./main.py\")\n\n@click.command(\n\tname = \"update\",\n\thelp = \"Updates from mainstream the bot.\"\n)\n@click.option(\n\t\"-d\",\n\t\"--debug\",\n)\n@click.option(\n\t\"-r\",\n\t\"--reset\",\n\thelp = \"Re-installs the bot from github latest build.\"\n)\n@click.pass_context\ndef update(ctx, debug: bool, reset: bool):\n\tif reset:\n\t\tif windows and admin_access():\n\t\t\tclick.echo(\"[DEBUG]: Cloning Numix into temp folder from github...\") if debug else None\n\t\t\tos.system(\"git clone https://github.com/Senarc-Studios/Numix.git ../temp\")\n\t\t\tclick.echo(\"[DEBUG]: Deleting Numix...\") if debug else None\n\t\t\tos.system(\"rmdir /f /s ../Numix\")\n\t\t\tclick.echo(\"[DEBUG]: Moving Numix from temp folder to Numix...\") if debug else None\n\t\t\tos.system(\"move ../temp/Numix ../Numix\")\n\n\t\t\tclick.echo(\"[DEBUG]: Installing dependencies...\") if debug else None\n\t\t\tos.system(\"pythonw -m pip install -U -r ./requirements.txt\")\n\t\t\tclick.echo(\"[DEBUG]: Sleeping for 10 seconds to ensure full dependency install...\") if debug else None\n\t\t\tasyncio.run(asyncio.sleep(10))\n\t\t\tclick.echo(\"Re-installed Numix from latest build.\")\n\t\t\t\n\t\t\tJSON.open(\"../mongo.json\")\n\t\t\tmongo_url = JSON.get(\"MONGO\")\n\n\t\t\tif mongo_url is not None:\n\t\t\t\tfor database in _await(AsyncIOMotorClient(mongo_url).list_databases()):\n\t\t\t\t\tclick.echo(f\"[DEBUG]: Deleting `{database}` database...\") if debug else None\n\t\t\t\t_await(AsyncIOMotorClient(mongo_url).drop_database(database))\n\t\t\t\tclick.echo(\"All databases deleted.\")\n\t\t\treturn","sub_path":"Numix/numix.py","file_name":"numix.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465661480","text":"class Boundary:\n def __init__(self, p1, p2):\n # List containing the two endpoints of the boundary.\n # Each endpoint is an (x,y) tuple.\n self.ends = [p1, p2]\n\nclass PoolState:\n def __init__(self):\n # List of coordinate pair tuples for center of each pocket.\n self.pockets = []\n\n # List of Boundary objects for the table boundaries.\n self.boundaries = []\n\n # The radius in pixels of each ball\n self.ball_radius = None\n\n # Position of the white ball\n self.white_pos = None\n\n # Position of the eight ball\n self.eight_pos = None\n\n # List of positions of the solid balls\n self.solids = []\n\n # List of positions of the stripe balls\n self.stripes = []\n\n # Current ball to hit in. One of:\n # \"SOLID\"\n # \"STRIPE\"\n # \"BLACK\"\n self.ball_type = None","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"208191281","text":"\"\"\"\nContiene lógica que simula a un comensal y a grupos de los mismos\n\"\"\"\nfrom menu import platillos_azar, bebidas_azar, Orden\nfrom comun import siguiente_estado, Persona, estado_final\n\nfrom enum import Enum, unique\nfrom threading import Thread, Semaphore\nfrom random import random, randint \nfrom time import sleep\n\n@unique\nclass EstadosComensal(Enum):\n\n @estado_final\n def salir(self, *arg, **args):\n pass\n\n @siguiente_estado(siguiente = salir)\n def levantarse(self, this, *arg, **args): \n grupo = this.orden.grupo\n grupo.mutex_cuenta3.acquire()\n grupo.cuenta3 = grupo.cuenta3 + 1\n if grupo.cuenta3 == len(grupo):\n grupo.barrera_salir.release()\n grupo.senalador.release() # Le avisamos que desocupe la mesa\n grupo.mutex_cuenta3.release()\n grupo.barrera_salir.acquire()\n grupo.barrera_salir.release()\n\n @siguiente_estado(siguiente = levantarse)\n def pagando(self, this, *arg, **args):\n grupo = this.orden.grupo\n grupo.esperar_mesero.acquire() # Esperar cuenta\n grupo.esperar_mesero.release()\n sleep(random() * 2 + .3) # Simula sacar dinero\n \n @siguiente_estado(siguiente = pagando)\n def termino_comer(self, this, *arg, **args):\n grupo = this.orden.grupo\n grupo.mutex_cuenta2.acquire()\n grupo.cuenta2 = grupo.cuenta2 + 1\n if grupo.cuenta2 == len(grupo):\n grupo.barrera_terminar_comer.release()\n grupo.senalador.release()\n grupo.mutex_cuenta2.release()\n grupo.barrera_terminar_comer.acquire()\n grupo.barrera_terminar_comer.release()\n \n \n @siguiente_estado(siguiente = termino_comer)\n def comiendo(self, *arg, **args):\n sleep(random() * 2 + .3) # Simula comer\n\n @siguiente_estado(siguiente = comiendo)\n def termino_orden(self, this, *arg, **args):\n grupo = this.orden.grupo\n grupo.mutex_cuenta1.acquire()\n grupo.cuenta1 = grupo.cuenta1 + 1\n if grupo.cuenta1 == len(grupo):\n grupo.barrera_terminar_ordenar.release()\n grupo.dar_orden(this.orden) # Pasarlo a grupo\n grupo.mutex_cuenta1.release()\n grupo.barrera_terminar_ordenar.acquire()\n grupo.barrera_terminar_ordenar.release()\n grupo.esperar_mesero.acquire() # Esperar comida\n grupo.esperar_mesero.release()\n\n @siguiente_estado(siguiente = termino_orden)\n def pidiendo_orden(self, this, *arg, **args): \n bebidas = bebidas_azar(randint(1, 3))\n platillos = platillos_azar(randint(1, 4))\n grupo = this.orden.grupo\n #print(\"Esperando mesero para atender\")\n grupo.esperar_mesero.acquire() # Esperar mesero para atender, no hecho\n grupo.esperar_mesero.release()\n #print(\"Añado mi orden\")\n this.orden.anadir_a_orden(bebidas) # Region Critica\n this.orden.anadir_a_orden(platillos) # Region Critica\n \n @siguiente_estado(siguiente = pidiendo_orden)\n def pensando_orden(self, *arg, **args):\n # print(\"Pensando Orden\")\n sleep(random() * 2 + .3) # Simula pensar orden\n\n @siguiente_estado(siguiente = pensando_orden)\n def esperando_mesa(self, *arg, **args):\n # print(\"Esperando Mesa\")\n pass\n\n inicial = esperando_mesa\n\n def __str__(self):\n return str(self.name)\n\nclass Comensal(Persona):\n \"\"\"\n Clase que representa a una persona que desea ir a comer al restaurante.\n Se activa el hilo una vez el individuo esta sentado en la mesa\n \"\"\"\n def __init__(self, id):\n super().__init__(id, EstadosComensal)\n\n def recibir_orden(self, orden):\n self.orden = orden\n \nclass EstadosGrupo(Enum):\n\n @estado_final\n def final(self, this, *arg, **argv):\n pass\n\n @siguiente_estado(siguiente=final)\n def salir(self, this, *arg, **argv):\n this.orden.mesa.desocupar_mesa() # Desocupa la mesa\n print(\"El grupo\", this, \"desocupa la mesa\", this.orden.mesa)\n\n @siguiente_estado(siguiente = salir)\n def pedir_cuenta(self, this, *arg, **argv):\n this.orden.servicio.pedir_cuenta(this.orden.mesa)\n this.senalador.acquire() # El mesero lleva la cuenta\n this.esperar_mesero.release() # Libera a los procesos para pagar\n print(\"Pidieron la cuenta en la mesa\", this.orden.mesa)\n this.senalador.acquire() # Espera a que todos hallan pagado\n \n @siguiente_estado(siguiente = pedir_cuenta)\n def esperar_todos_terminen_comer(self, this, *arg, **argv):\n this.senalador.acquire() # Espera a que todos terminen de comer\n print(\"En la mesa\", this.orden.mesa, \"todos han terminado de comer\")\n\n @siguiente_estado(siguiente = esperar_todos_terminen_comer)\n def esperar_comida(self, this, *arg, **argv):\n this.senalador.acquire() # Espera a que traigan la comida\n this.esperar_mesero.release() # Permite que todos coman\n\n @siguiente_estado(siguiente = esperar_comida)\n def esperar_mesero_pedir_orden(self, this, *arg, **argv):\n this.orden.servicio.tomar_orden(this.orden.mesa)\n this.senalador.acquire() # Espera a que venga el mesero para pedir la orden\n this.esperar_mesero.release() # Permite que todos pidan la orden\n\n @siguiente_estado(siguiente = esperar_mesero_pedir_orden)\n def sentar_comensales(self, this, *arg, **argv):\n print(\"El grupo de personas\", this, \"ha adquirido la mesa\", this.orden.mesa.numero_mesa)\n for comensal in this.comensales:\n comensal.recibir_orden(this.orden)\n comensal.start()\n\n @siguiente_estado(siguiente = sentar_comensales)\n def esperar_mesa(self, this, *arg, **argv):\n print(\"El grupo de personas\", this, \"se encuentra esperando una mesa\")\n this.orden = this.servicio.adquirir_mesa(this) \n\n inicial = esperar_mesa\n\n def __str__(self):\n return str(self.name)\n\nclass Grupo(Persona):\n \"\"\"\n Clase que representa un grupo de personas que van juntos a un restaurante,\n un Grupo se compone de una persona o más.\n Esta clase es la encargada que el grupo espere la mesa y se vayan a sentar a una. \n Una vez que las personas se salgan este hilo finaliza\n\n Atributos:\n comensales list(Comensal): Lista de comensales que representa un grupito de personas que fueron a comer juntos\n \"\"\"\n def __init__(self, id, n, servicio):\n \"\"\"\n Crea un grupo con n comensales\n \"\"\"\n Persona.__init__(self, id, EstadosGrupo)\n self.comensales = [Comensal(i + 1) for i in range(n)]\n self.servicio = servicio\n self.cuenta1 = self.cuenta2 = self.cuenta3 = 0\n self.mutex_cuenta1 = Semaphore(1) # para barrera terminar ordenar\n self.mutex_cuenta2 = Semaphore(2) # Para barrera terminar comer\n self.mutex_cuenta3 = Semaphore(3) # Para barrera terminar salir\n self.barrera_terminar_ordenar = Semaphore(0)\n self.barrera_terminar_comer = Semaphore(0)\n self.barrera_salir = Semaphore(0)\n self.senalador = Semaphore(0) # Indica pedir la cuenta al mesero\n self.esperar_mesero = Semaphore(0)\n\n def dar_orden(self, orden):\n self.servicio.dar_orden(orden)\n \n def __str__(self):\n return str(self.id) + ': ' + '; '.join(map(str, self.comensales)) \n\n def __repr__(self):\n return str(self.id) + ': ' + '; '.join(map(repr, self.comensales)) \n\n def __len__(self):\n return len(self.comensales)\n\nclass Clientes: \n \"\"\"\n Clase que facilita la manipulacion de grupos de comensales\n\n Atributos:\n lista_grupos (list(Grupo)): Todos los grupos creados\n \"\"\"\n def __init__(self, n, m, servicio):\n '''\n Permite crear n grupos con un máximo de m personas. Cada grupo puede tener diferente\n tamaño de personas\n '''\n self.grupos = [Grupo(i, randint(1, m), servicio) for i in range(randint(1, n))]\n \n\n def iniciar(self):\n for g in self.grupos:\n g.start()\n\n def existe_activo(self):\n \"\"\"\n Determina si uno de los grupos todavia esta esperando mesa\n \"\"\"\n return any(filter(lambda g : g.estado != None, self.grupos))\n\n def __str__(self):\n return '\\n'.join(map(str, self.grupos))\n\n def __repr__(self):\n return '{}\\n{}'.format('\\n'.join(map(repr, self.grupos)), len(self.grupos))\n\n def __len__(self):\n return len(self.grupos)\n\nif __name__ == \"__main__\":\n a = Comensal(1)\n a.start()\n while a.is_alive():\n print(repr(a))\n sleep(.4)\n","sub_path":"proyectos/2/BarreroPatricio-EspinoHector/codigo/comensal.py","file_name":"comensal.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101518459","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\nclass Worker:\n def __init__(self, workerid, response):\n self.workerid = workerid\n self.response = response\n\nclass Worker_1(Worker):\n def __init__(self, row):\n Worker.__init__(self, row['WorkerId'], self._extract_response(row))\n\n def _extract_response(self, row):\n response = {}\n for k, v in row.items():\n if k.startswith('Answer.'):\n if (k.startswith('Answer.valid')\n or k.startswith('Answer.comments')):\n response[k[7:]] = v\n else:\n response[k[7:]] = int(v)\n return response\n\nclass Worker_2(Worker):\n def __init__(self, workerid, response, eid):\n Worker.__init__(self, workerid, response)\n self.eid = eid\n","sub_path":"code/hit/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"459323687","text":"import csv\r\nimport pandas as pd\r\nimport plotly.express as px\r\nimport math\r\n\r\nwith open(\"Data.csv\", newline=\"\") as f:\r\n reader = csv.reader(f)\r\n file_data = list(reader)\r\n\r\n#file_data.pop(0)\r\ndata = file_data[0]\r\nprint(\"D: \", data)\r\n\r\ndef mean(data):\r\n total = 0\r\n total_entries = len(data)\r\n for x in data:\r\n total = total + int(x)\r\n\r\n mean = total/total_entries\r\n return(mean)\r\n\r\nm = mean(data)\r\nprint(\"M: \", m)\r\n\r\nsquared_list = []\r\nfor number in data:\r\n a = int(number) - m\r\n a = a**2\r\n squared_list.append(a)\r\n\r\nsum = 0\r\nfor i in squared_list:\r\n sum = sum + i\r\nprint(\"S: \", sum)\r\n\r\nprint(\"L: \", len(data) - 1)\r\n\r\nresult = sum/(len(data) - 1)\r\nprint(\"R: \", result)\r\nstandard_deviations = math.sqrt(result)\r\n\r\nprint(\"Standard Deviation: \", standard_deviations)","sub_path":"StandardDeviation.py","file_name":"StandardDeviation.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"411097208","text":"import os\nimport sys\nimport re\nimport inquirer\nfrom prettytable import from_db_cursor\n\nfrom selector import dispaySelector\nfrom database import connect\n\ndef printError() :\n\tprint(\"oops something went wrong please try again\")\n\nanswer = dispaySelector()\n\n#connecting to the database\nconn = connect()\ncur = conn.cursor()\n\ncur.execute(\"set search_path to moviebooking;\")\n\nflag = True\n\nwhile flag :\n\tos.system('clear')\n\n\n\tif answer['query'] == 1 :\n\t\tq = [\n\t\t\tinquirer.Text('location', message='enter your location'),\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\ta = ans['location']\n\t\tcur.execute(\"select theatre_id,theatre_name,get_city_name(city_id),get_state_name(city_id) from theatre where city_id = get_city_id('\" + str(a) + \"');\")\n\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no theatres found')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\n\t\n\telif answer['query'] == 2 :\n\t\tq = [\n\t\t\tinquirer.Text('id', message='enter the theatre id'),\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\ta=str(ans['id'])\n\t\tcur.execute(\"select city_name from location where city_id in(select city_id from theatre where theatre_id=\"+a+\")\")\n\t\tx = from_db_cursor(cur)\n\t\tprint(x)\n\t\tcur.execute(\"select theatre_id,theatre_name from theatre where theatre_id=\"+a+\";\")\n\t\tx = from_db_cursor(cur)\n\t\tprint(x)\n\t\tcur.execute(\"select time,audi,m_name,movie_id,language,duration,rating,genre from (select time,get_audi(audi_id,theatre_id)as audi,get_movie(movie_id)as m_name,movie_id from show where theatre_id=\" + a + \") as one join (select name,language,duration,get_rating(critic_rating,user_rating)as rating,genre from movies where movie_id in (select movie_id from show where theatre_id=\" + a + \")) as two on one.m_name=two.name;\")\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no movie in that theatre found')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\n\t\n\telif answer['query'] == 3 :\n\t\tq = [\n\t\t\tinquirer.Text('id', message='enter the id of city'),\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\ta=str(ans['id'])\n\t\tcur.execute(\"select * from get_city_name(\"+a+\") as city_name\")\n\t\tx = from_db_cursor(cur)\n\t\tprint(x)\n\t\tcur.execute(\"select get_theatre_name(theatre_id),time,name,language,duration,rating,genre from (select theatre_id,time,audi_id,movie_id from show where theatre_id in (select theatre_id from theatre where city_id=\" + a + \" group by theatre_id)) as one join (select name,language,duration,get_rating(critic_rating,user_rating)as rating,genre,movie_id from movies where movie_id in (select movie_id from (select theatre_id,time,audi_id,movie_id from show where theatre_id in (select theatre_id from theatre where city_id=\" + a + \" group by theatre_id))as m)) as two on one.movie_id=two.movie_id;\")\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no movie in that location found')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\n\t\n\telif answer['query'] == 4 :\n\t\tq = [\n\t\t\tinquirer.Text('a', message='Press 1 for all time and 2 for currently screening movies'),\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\ta = ans['a']\n\t\tif(a=='1'):\n\t\t\tcur.execute(\"select get_movie(movie_id),avg_rating from (select movie_id,get_rating(critic_rating,user_rating) as avg_rating from movies ) as m order by avg_rating DESC;\")\n\t\telse:\n\t\t\tcur.execute(\"select get_movie(movie_id),avg_rating from (select movie_id,get_rating(critic_rating,user_rating) as avg_rating from movies where movie_id in (select distinct(movie_id) from show where (select extract(doy from time)>=(select extract(doy from date '2019-10-31'))))) as m order by avg_rating DESC;\")\t\t\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no movies found') \n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\n\n\telif answer['query'] == 5 :\n\t\tcur.execute(\"\"\"\n\t\t\tselect name from actors where email_id in (select actor_id from (select actor_id,avg(avg_rating)::numeric(2,1) as average from\n(select \"cast\".movie_id,actor_id,get_rating(critic_rating,user_rating) as avg_rating \n from \"cast\" join movies on \"cast\".movie_id = movies.movie_id) as m group by actor_id order by average desc) as m) ;\n\t\t\"\"\")\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no actors found')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\n\t\n\telif answer['query'] == 6 :\n\t\tq = [\n\t\t\tinquirer.Text('id', message='enter your email_id'),\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\ta=str(ans['id'])\n\t\tcur.execute(\" select seat_payment.time,seat_booking.movie_id,seat_booking.theatre_id,seat_booking.audi_id,seat_booking.seat_number,seat_payment.mode_of_payment from seat_booking join seat_payment on seat_booking.cust_id=seat_payment.cust_id and seat_booking.time=seat_payment.time and seat_booking.audi_id=seat_payment.audi_id and seat_booking.theatre_id=seat_payment.theatre_id and seat_booking.movie_id=seat_payment.movie_id where seat_booking.cust_id in (select cust_id from customers where email_id='\"+a+\"')\")\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no booking found')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\n\t\n\telif answer['query'] == 7 :\n\t\tq = [\n\t\t\tinquirer.Text('name', message='enter the city name')\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\tcur.execute(\"select theatre_name,theatre_id,rating,get_theatre_traffic(theatre_id) as theatre_traffic, get_audi_count(theatre_id) as total_audi from theatre where theatre_id in (select theatre_id from theatre where city_id=get_city_id('\"+str(ans['name']) +\"'));\")\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no theatre available')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\n\telif answer['query'] == 8 :\n\t\tcur.execute(\"\"\"\n\t\tselect movie_id,get_movie_collection(movie_id) from (select movie_id from movies)as m\n\t\t \"\"\")\n\t\tif cur.rowcount == 0:\n\t\t\tprint('no movies found')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\n\n\telif answer['query'] == 10 :\n\t\tq = [\n\t\t\tinquirer.Text('id', message='enter the theatre id'),\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\tcur.execute(\"select provider_name,provider_type from facility where provider_id in (select provider_id from amenities where theatre_id=\"+str(ans['id'])+\")\")\n\t\tif cur.rowcount == 0:\n\t\t\tprint('invalid booking id found')\n\t\telse: \n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\n\n\n\telif answer['query'] == 9 :\n\t\tq = [\n\t\t\tinquirer.Text('movie',message='Enter the movie'),\n\t\t\tinquirer.Text('avail', message='Press 1 for getting location wise seat availability and 2 for theatre wise seat availibility'),\n\t\t]\n\t\tans= inquirer.prompt(q)\n\t\tif(ans['avail']=='1'):\n\t\t\tq = [\n\t\t\t\tinquirer.Text('id', message='enter the id of city'),\n\t\t\t]\n\t\t\tans2= inquirer.prompt(q)\n\t\t\ta=str(ans2['id'])\n\t\t\tcur.execute(\"select * from get_city_name(\"+a+\") as city_name\")\n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\t\tcur.execute(\"select time,audi_id,seat_number from seats where theatre_id in (select theatre_id from theatre where city_id=\"+a+\") and movie_id in (select movie_id from movies where name='\"+ str(ans['movie']) +\"') and is_booked=false;\")\n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\t\tcur.execute(\"select time,audi_id,count(seat_number) from seats where theatre_id in (select theatre_id from theatre where city_id=\"+a+\") and movie_id in (select movie_id from movies where name='\"+ str(ans['movie']) +\"') and is_booked=false group by time,audi_id;\")\n\n\t\telse:\n\t\t\tq = [\n\t\t\t\tinquirer.Text('id', message='enter the id of theatre'),\n\t\t\t]\n\t\t\tans2= inquirer.prompt(q)\n\t\t\ta=str(ans2['id'])\n\t\t\tcur.execute(\"select * from get_theatre_name(\"+a+\") as theatre_name\")\n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\t\tcur.execute(\"select time,audi_id,seat_number from seats where theatre_id=\"+a+\" and movie_id in (select movie_id from movies where name='\"+ str(ans['movie']) +\"') and is_booked=false;\")\n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\t\tcur.execute(\"select time,audi_id,count(seat_number) from seats where theatre_id=\"+a+\" and movie_id in (select movie_id from movies where name='\"+ str(ans['movie']) +\"') and is_booked=false group by time, audi_id;\")\n\t\ttry:\n\t\t\tx = from_db_cursor(cur)\n\t\t\tprint(x)\n\t\texcept:\n\t\t\tprintError()\n\telse: \n\t\tprint(answer)\n\tquestions = [\n inquirer.List('ans',\n message=\"do you wish to execute more querry\",\n choices=['Yes', 'No'],\n ),\n\t]\n\tcont= inquirer.prompt(questions)\n\tif cont['ans'] == 'Yes':\n\t\tos.system('clear')\n\t\tanswer = dispaySelector()\n\telse: \n\t\tflag = False","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":8053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416643123","text":"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val=0, left=None, right=None, next=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\nfrom collections import deque\nclass Solution(object):\n def connect(self, root):\n if root == None: return root\n queue = deque()\n queue.append(root)\n while queue:\n size = len(queue)\n for i in range(size):\n l = queue.popleft()\n if l.left: queue.append(l.left)\n if l.right: queue.append(l.right)\n if queue and i != size-1:\n l.next = queue[0]\n return root\n\n'''\n这个解法不符合空间复杂度 O(1)的要求\n看 others \n\n'''","sub_path":"Tree/Leetcode_116/lc116.py","file_name":"lc116.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328571578","text":"#!/usr/bin/python\n\nimport urllib2\nimport sys\nimport json\n\n\ndef shorten_url(url):\n googleAPI = 'https://www.googleapis.com/urlshortener/v1/url'\n longUrl = {'longUrl': url}\n headers = {'Content-Type': 'application/json'}\n req = urllib2.Request(\n googleAPI,\n json.dumps(longUrl),\n headers\n )\n res = urllib2.urlopen(req).read()\n print(json.loads(res)['id'])\n\nurl = sys.argv[len(sys.argv) - 1]\nshorten_url(url)\n","sub_path":"urlshort.py","file_name":"urlshort.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568998357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 15 22:24:26 2017\n\n@author: cpkmanchee\n\"\"\"\n\nimport numpy as np\nimport csv\nimport matplotlib.pyplot as plt\n\nfile = 'test_spectrum.csv'\n\nwith open(file, 'r') as f:\n dataln = csv.reader(f)\n data = list(dataln)\n \ndata = np.asarray(data,dtype=np.float32)\nwavelength = data[:,0]\nintensity = data[:,1]-data[:,1].min()\n\nwl1 = 1045\nwl2 = 995\n\nind1 = (np.abs(wl1-wavelength)).argmin()\nind2 = (np.abs(wl2-wavelength)).argmin()\n\nind_s = np.min([ind1,ind2])\nind_e = np.max([ind1,ind2])\n\narea = np.trapz(intensity[ind_s:ind_e],wavelength[ind_s:ind_e])\n\nfig, ax1 = plt.subplots()\nax1.plot(wavelength[700:],intensity[700:])\nax1.fill_between(wavelength[ind_s:ind_e],intensity.min(),intensity[ind_s:ind_e], facecolor='green', alpha=0.5)\nax1.text(wavelength[ind_e],0.67*intensity.max(), 'Area = %.2f' % area , fontsize=15)","sub_path":"beamtools/dev/spectrum_integrate.py","file_name":"spectrum_integrate.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"354071391","text":"#!/user/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n===============================================================================\nSkyNet TCP Server Object\n===============================================================================\nauthor=hal112358\n-------------------------------------------------------------------------------\n\"\"\"\nfrom __future__ import print_function\nimport numpy as np\nimport threading\nimport socket\nimport struct\nimport time\nimport zlib\nimport sys\nimport os\n\nif int(sys.version[0])==2:\n import cPickle as pickle\nelse:\n import pickle\n\n\"\"\"\n===============================================================================\nInitialize Paths and System Variables \n===============================================================================\n\"\"\"\n#------------------------------------------------------------------------------ FIX\nsys_id = 'skynet'\npath_sep = lambda: '/' if os.name != 'nt' else '\\\\'\nabpath = (os.path.abspath(__file__)).split(path_sep())\nfetch_project_path = lambda: '{}'.format(path_sep()).join(\n abpath[:abpath.index(sys_id)+1])\nPROJECT_PATH = fetch_project_path()\nconfig_path = '{}/initialize/'.format(PROJECT_PATH)\nsys.path.append(config_path)\n\nimport initialize_paths\ninitialize_paths.sys_config()\nfrom initialize_paths import *\nimport sys_log\n\nimport format_packet\n\n\"\"\"\n===============================================================================\nTCP SERVER\n===============================================================================\n\"\"\" \n\nclass TCP_server():\n \"\"\"\n Server object that uses the SkyNet encryption protocol specified in the \n SkyNet class. The communication method being used is TCP for recieveing\n and transmitting individual data packets to a specific address.\n \"\"\"\n\n def __init__(self,TCP_HOST,TCP_PORT): \n self.host = TCP_HOST\n self.port = TCP_PORT\n self.data_length = tcp_packet_size\n self.approved_addr = [uav_addr,ground_station_addr]\n self.log = sys_log.Log()\n self.server_name = \"{} SERVER\".format(sys_id.upper())\n self.protocol = format_packet.packet_protocol()\n\n #--------------------------------------------------------------------------\n\n def init_server(self):\n self.server_socket = socket.socket(socket.AF_INET, \n socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_REUSEADDR,1)\n\n def setup_listen(self):\n self.server_socket.listen(1)\n self.connection,self.client_addr = self.server_socket.accept()\n self.verify_client()\n self.log.log_info('{}: Connected by {}:{}'.format(\n self.server_name,self.client_addr[0],self.client_addr[1]))\n\n #--------------------------------------------------------------------------\n\n def wait_connect(self):\n waiting = True\n while waiting:\n try:\n self.server_socket.bind((self.host, self.port))\n break\n except:\n time.sleep(0.1)\n if waiting:\n self.log.log_info(\"Waiting to bind to {}:{}\".format(\n self.host,self.port))\n waiting = False\n self.log.log_info(\"Connected to {}:{}\".format(self.host,self.port))\n\n #--------------------------------------------------------------------------\n\n def verify_client(self):\n if not (self.client_addr[0] in self.approved_addr):\n self.connection.close()\n\n #--------------------------------------------------------------------------\n\n def handle_packet(self,packet):\n self.log.log_info(\"Recieved: {}\".format(packet))\n\n #--------------------------------------------------------------------------\n\n def server_listen(self):\n \n while True:\n self.setup_listen()\n try:\n while True:\n input_packet = self.connection.recv(self.data_length)\n output_packet = b\"1\"\n if len(input_packet):\n length,packet = self.protocol.format_input_packet(\n input_packet)\n self.connection.send(output_packet)\n print(length)\n while length>self.data_length:\n length -= self.data_length\n # print(length)\n packet += self.connection.recv(length)\n self.connection.send(output_packet)\n print(len(list(packet)))\n print(packet)\n print('\\n\\n\\n\\n\\n')\n\n #self.connection.send(output_packet) \n # self.handle_packet(packet)\n self.connection.close()\n except Exception as exc:\n self.log.log_warn(\"{}\".format(exc))\n\n #--------------------------------------------------------------------------\n\n def server_main(self):\n \"\"\"\n Activate server to continuously wait to recieve data packets\n \"\"\"\n self.init_server()\n self.wait_connect()\n self.server_listen()","sub_path":"skynet/src/communication/servers/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"494457957","text":"#-*- coding:utf-8 -*-\n'''\n @Author: Tomas\n @Date: 2020-01-15 16:16:42\n @Last Modified by: Tomas\n @Last Modified time: 2020-01-15 16:16:42\n Desc:\n'''\n\nimport pickle,os\n\nnormal_word={'你是谁':'我是罗伯特'}\nword_symbol=[]\nmodel={'normal':normal_word,'word_symbol':word_symbol}\n\n\nif os.path.isfile('memory.pkl'):\n with open('./memory.pkl','rb') as f:\n normal_word,model=pickle.load(f)\n word_symbol=model['word_symbol']\n\ndef model_save():\n global normal_word, model\n with open('memory.pkl','wb') as f:\n pickle.dump([normal_word,model],f)\n\nif __name__==\"__main__\":\n model_save()","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141395605","text":"from rest_framework.generics import CreateAPIView, ListAPIView\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.response import Response\n\nfrom .serializers import ContractSerializer, SmartContractSerializer\nfrom .models import Contract\n\n\nclass ContractAPIView(GenericViewSet):\n serializer_class = ContractSerializer\n queryset = Contract.objects.all()\n\n def post(self, request, *args, **kwargs):\n data = request.data\n serializer = self.get_serializer(data=data)\n serializer.is_valid(True)\n serializer.save()\n return Response(data)\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass SmartContractListAPIView(ListAPIView):\n serializer_class = SmartContractSerializer\n queryset = Contract.objects.all()\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n","sub_path":"smart_contracts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624969721","text":"import logging\nimport os\nfrom unittest import mock\n\nfrom django.test import TestCase\n\nfrom scrapers.models.exceptions import FieldMissingException\nfrom scrapers.models.morningstar import MorningStarScraperSettings, MorningStarScraper\n\n\nclass MockResponse(object):\n \"\"\"A mock response returned by Session.get()\n \"\"\"\n def __init__(self, text):\n self.text = text\n\n\nclass MockSession(object):\n \"\"\"Mock session object used to retrieve local html files for testing.\n Functions used to connect to a remote server (mount(), post()) do nothing.\n \"\"\"\n def mount(self, prefix, adapter):\n pass\n\n def post(self, url, data=None, json=None, **kwargs):\n pass\n\n def get(self, url, **kwargs):\n\n symbol = url.split('/')[-1].lower()\n prefix = os.path.join('scrapers/tests/assets/', symbol)\n\n if 'balancesheet' in url:\n filepath = os.path.join(prefix, 'balance_sheet.html')\n else:\n filepath = os.path.join(prefix, 'historicals.html')\n\n with open(filepath) as f:\n return MockResponse(f.read())\n\n raise Exception(\"No file\")\n\n\nclass MorningStarScraperTestCase(TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.DEBUG)\n MorningStarScraperSettings.objects.create(\n username='',\n password='',\n login_url='',\n historicals_url='historicals/',\n balance_sheet_url='balancesheet/',\n retries=0,\n max_sleep_time=0)\n\n @mock.patch('requests.session', MockSession)\n def test_scrape(self):\n \"\"\"Scrape an ordinary stock profile.\n \"\"\"\n scraper = MorningStarScraper()\n stock = scraper.scrape('PXS')\n self.assertEqual(stock.return_on_capital, 0.36)\n self.assertEqual(stock.ebit, 15370000)\n self.assertEqual(stock.market_cap, 69000000)\n self.assertEqual(stock.total_debt, 10893000)\n self.assertEqual(stock.cash, 54138000)\n\n stock = scraper.scrape('RMS')\n self.assertEqual(stock.ebit, 21520000)\n self.assertEqual(stock.market_cap, 54000000)\n self.assertEqual(stock.total_debt, 1062000)\n self.assertEqual(stock.cash, 32425000)\n self.assertEqual(stock.return_on_capital, 0.17)\n\n @mock.patch('requests.session', MockSession)\n def test_scrape_missing_roc(self):\n \"\"\"Scrapes a stock whose profile is missing the return on capital\n field.\n \"\"\"\n scraper = MorningStarScraper()\n with self.assertRaises(AttributeError):\n scraper.scrape('CBA')\n\n @mock.patch('requests.session', MockSession)\n def test_scrape_empty_page(self):\n scraper = MorningStarScraper()\n\n with self.assertRaises(FieldMissingException):\n scraper.scrape('WEC')\n","sub_path":"scrapers/tests/test_morningstar.py","file_name":"test_morningstar.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"521118688","text":"import RPi.GPIO as GPIO\nimport time\nimport datetime\nimport smbus\nimport FaBo9Axis_MPU9250\n\nGPIO.setmode(GPIO.BCM)\npin = 4\n\nbus = smbus.SMBus(1)\n\nmpu9250 = FaBo9Axis_MPU9250.MPU9250()\naccel = mpu9250.readAccel()\nprint(\"accel Z: \" + str(accel['z']))\ntime.sleep(0.1)\n\nbus.write_byte_data(0x68, 0x38, 0x01)\nbus.write_byte_data(0x68, 0x19, 0x00)#SMPLRT_DIV\nbus.write_byte_data(0x68, 0x1D, 0x08)#ACCEL_CONFIG_2\n\ntime_now = datetime.datetime.now().strftime('%m%d_%H%M_%S')\n\nf_n =\"%s_acc_xyz.csv\"%(time_now)\nfp = open(f_n,'w')\n\ndef callBackTest(channel):\n accel = mpu9250.readAccel()\n dt_now = datetime.datetime.now()\n fp.write(str(dt_now)+\",\"+str(accel['x'])+\",\"+str(accel['y'])+\",\"+str(accel['z'])+\"\\n\")\n #print ( str(dt_now)+\",\"+str(accel['x'])+\",\"+str(accel['y'])+\",\"+str(accel['z']) )\n #print(\"callback\")\n\nGPIO.setup(pin, GPIO.IN, GPIO.PUD_UP)\nGPIO.add_event_detect(pin, GPIO.FALLING, callback=callBackTest, bouncetime=5) \n\ntry:\n while(True):\n time.sleep(1)\n\nexcept KeyboardInterrupt:\n print(\"break\")\n fp.close()\n GPIO.cleanup()","sub_path":"acc_test.py","file_name":"acc_test.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"517164837","text":"d = ['ST','BĐ','BTL','CG','ĐĐ','HBT']\ns = [150300,247100,333300,266800,420900,318000]\n\nwhile True:\n max = s[0]\n for i in range(5): \n if max < s[i+1] :\n max = s[i+1]\n \n print(\"Max: \", max)\n\n min = s[0]\n for i in range(5):\n if min > s[i+1] :\n min = s[i+1]\n print(\"Min: \",min)\n \n break\n\nprint(\"Highest population:\",d[s.index(420900)])\nprint(\"Lowest population:\",d[s.index(150300)])","sub_path":"Session9/part5.py","file_name":"part5.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85045392","text":"# https://practice.geeksforgeeks.org/problems/finding-the-numbers0215/1\n\nclass Solution:\n\tdef singleNumber(self, nums):\n\t\t# Code here\n\t\t\n\t\txor_val = 0\n\t\tfor i in nums:\n\t\t xor_val = xor_val ^ i\n\t\t \n\t\ttemp = xor_val\n\t\tpos = 0\n\t\twhile temp!=0:\n\t\t if temp & 1 == 1:\n\t\t break\n\t\t pos+=1\n\t\t temp=temp>>1\n\t\t\n\t\tfirstNum = 0\n\t\tfor no in nums:\n\t\t if no & (1<0:\n\t\t firstNum = firstNum ^ no\n\t\t\n\t\tsecondNum = xor_val^firstNum\n\t\tans = [firstNum,secondNum]\n\t\tans.sort()\n\t\treturn ans","sub_path":"01. Bit Manipulation/02. Finding the numbers.py","file_name":"02. Finding the numbers.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"74404570","text":"from django.conf.urls.defaults import patterns, url\n\nfrom . import views\n\n\ngroup_re = '(?P' + '|'.join(views.SERIES_GROUPS) + ')'\nrange_re = '(?P\\d{8})-(?P\\d{8})'\nformat_re = '(?P' + '|'.join(views.SERIES_FORMATS) + ')'\nseries = dict((type, '^%s-%s-%s\\.%s$' % (type, group_re, range_re, format_re))\n for type in views.SERIES)\n\nurlpatterns = patterns('',\n # time series URLs following this pattern:\n # /addon/{addon_id}/statistics/{series}-{group}-{start}-{end}.{format}\n url(series['downloads'], views.downloads_series,\n name='stats.downloads_series'),\n url(series['usage'], views.usage_series,\n name='stats.usage_series'),\n url(series['contributions'], views.contributions_series,\n name='stats.contributions_series'),\n url(series['sources'], views.sources_series,\n name='stats.sources_series'),\n url(series['os'], views.usage_breakdown_series,\n name='stats.os_series', kwargs={'field': 'oses'}),\n url(series['locales'], views.usage_breakdown_series,\n name='stats.locales_series', kwargs={'field': 'locales'}),\n url(series['statuses'], views.usage_breakdown_series,\n name='stats.statuses_series', kwargs={'field': 'statuses'}),\n url(series['versions'], views.usage_breakdown_series,\n name='stats.versions_series', kwargs={'field': 'versions'}),\n url(series['apps'], views.usage_breakdown_series,\n name='stats.apps_series', kwargs={'field': 'applications'}),\n\n # special case time series\n url('^contributions-detail-%s\\.%s$' % (range_re, format_re),\n views.contributions_detail, name='stats.contributions_detail'),\n)\n","sub_path":"apps/stats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"109656838","text":"# $ pip3 install requests\nimport requests, json\nfrom datetime import datetime as dt\n\ndef send_slack(msg):\n # 슬랙 웹훅 URL\n webhook_URL = \"https://hooks.slack.com/services/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\"\n\n # 페이로드 생성\n payload = {\n \"channel\": \"#general\",\n \"username\": \"\",\n \"icon_emoji\": \":hugging_face:\",\n \"text\": msg,\n }\n\n # 전송\n response = requests.post(\n webhook_URL,\n data = json.dumps(payload),\n )\n\n # 결과\n print(response)\n","sub_path":"FastCampus/myslack_module/myslack.py","file_name":"myslack.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"633107657","text":"import pandas as pd\nimport quandl\n\ndf = quandl.get('wiki/googl')\npd.set_option('display.max_columns',None)\ndf = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]\ndf['HL_PCT'] = ((df['Adj. High'] - df['Adj. Close']) / df['Adj. Close']) * 100.0\ndf['Daily_change'] = ((df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open']) * 100.0\npd.set_option('display.max_columns', None)\ndf = df[['HL_PCT', 'Daily_change', 'Adj. Close', 'Adj. Volume']]\nprint(df.head())\n#The code ensuures that the columns are displayed to its maximum by adding a simple setting option function.\n","sub_path":"dispmaxpandas.py","file_name":"dispmaxpandas.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"274600380","text":"import cherrypy\nimport operator\nimport os\nimport pickle\nimport sys\nfrom genshi.template import TemplateLoader\n\nloader = TemplateLoader(os.path.join(os.path.dirname(__file__), 'templates'),\n auto_reload=True)\n\nclass Root(object):\n \"\"\"description of class\"\"\"\n\n def __init__(self,data):\n self.data = data\n\n @cherrypy.expose\n def index(self):\n tmpl = loader.load('index.html')\n return tmpl.generate(title='broodjesbrengen').render('html', doctype='html')\n\n\ndef main(filename):\n data = {}\n\n cherrypy.config.update({\n 'tools.encode.on':True, 'tools.encode.encoding': 'utf-8',\n 'tools.decode.on': True,\n 'tools.trailing_slash.on':True,\n 'tools.staticdir.root': os.path.abspath(os.path.dirname(__file__)),\n })\n\n cherrypy.config.update({'server.socket_host': '0.0.0.0'})\n cherrypy.quickstart(Root(data), '/', {\n '/media':{\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir':'static'\n }\n })\n\nif (__name__ == '__main__'):\n main(sys.argv[1])\n\n\n","sub_path":"BB/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348010320","text":"\nfrom keras.models import Model\nfrom keras.layers import Masking, Embedding, Bidirectional, LSTM, Dense, Input, TimeDistributed, Activation,CuDNNLSTM\nfrom keras_contrib.layers import CRF\n\nclass BiLSTMCRF(object):\n\n def __init__(self, embedding_dims,\n maxlen=100,\n class_num=10,\n last_activation='softmax',\n gpu=True,\n vocab_size = 111,\n hidden_units =100\n ):\n self.hidden_units = hidden_units\n self.max_len = maxlen\n self.embedding_dims = embedding_dims\n self.class_num = class_num\n self.last_activation = last_activation\n self.vocab_size = vocab_size\n self.GPU = gpu\n\n def get_model(self):\n\n _input = Input(shape=(self.max_len,), dtype='int32')\n\n x = Masking(mask_value=0)(_input)\n\n x = Embedding(self.vocab_size, self.embedding_dims, mask_zero=True)(x)\n\n if self.GPU:\n x = Bidirectional(CuDNNLSTM(self.hidden_units, return_sequences=True))(x) # LSTM or GRU\n print('cudnnlstm')\n else:\n x = Bidirectional(LSTM(self.hidden_units, return_sequences=True))(x) # LSTM or GRU\n\n x = TimeDistributed(Dense(self.class_num))(x)\n\n outputs = CRF(self.class_num)(x)\n\n model = Model(inputs=_input, outputs=outputs)\n\n return model\n\n\n\n","sub_path":"bilstm_crf.py","file_name":"bilstm_crf.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"506752157","text":"# -*- coding: utf-8 -*-\r\nimport scrapy\r\nfrom scrapy import Selector\r\nimport simplejson as json\r\nimport requests\r\nimport time\r\n\r\n\r\nclass BNASpider(scrapy.Spider):\r\n name = \"bna\"\r\n\r\n def start_requests(self):\r\n url = 'https://bna.ao/Servicos/cambios_table.aspx'\r\n \r\n yield scrapy.Request(url=url, callback=self.parse)\r\n\r\n def parse(self, response):\r\n values = {}\r\n control = 0\r\n for tr in response.xpath('.//tr'): # extracts all

inside\r\n value = Selector(text=tr.get()).xpath('//td//text()').getall()\r\n control += 1\r\n if len(value) > 0 and control > 4:\r\n values[value[0]] = value[1:]\r\n\r\n # values[\"timestamp\"] = time.time()\r\n # url = 'http://localhost:5000/bna'\r\n # headers = {'content-type': 'application/json'}\r\n \r\n # r = requests.post(url, data=json.dumps(values), headers=headers)\r\n print(json.dumps(values))","sub_path":"tutorial/spiders/addBnaRate.py","file_name":"addBnaRate.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"587704386","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 23:25:29 2019\n\n@author: altun\n\"\"\"\nfrom database import db\nimport requests as rq\nimport os\nimport logging\nfrom bson.json_util import dumps\nimport json\n\n\nAUTOPILOT_APIKEY = os.environ.get('AUTOPILOT_APIKEY').strip()\n\npost_headers = {\n 'autopilotapikey': AUTOPILOT_APIKEY,\n 'Content-Type': 'application/json'\n}\n\nget_headers = {\n 'autopilotapikey': AUTOPILOT_APIKEY,\n}\n\naddContactLink = \"https://api2.autopilothq.com/v1/contact\"\naddToListLink = \"https://api2.autopilothq.com/v1/list/{list_id}/contact/{contact_id}\"\n\nwith open('autopilot_analyticsaudit_emails.txt', 'r') as f:\n autopilot_analyticsaudit_emails = [ x.strip() for x in f.readlines()]\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n filename=\"changestreaming-report-collection.log\",\n filemode='a',\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n db.init()\n change_stream = db.DATABASE['reports'].watch()\n for change in change_stream:\n logging.info(dumps(change))\n operationType = change['operationType']\n if operationType == 'insert':\n report = change['fullDocument']\n Email = report['email']\n if report.get('package') == 'analytics-audit' and (not Email in autopilot_analyticsaudit_emails):\n score = report.get(\"totalScore\")\n datasource = db.find_one(\"datasource\", {\"_id\":report.get(\"datasourceID\")})\n datasourceName = datasource.get(\"dataSourceName\")\n data = {\n \"contact\": {\n 'Email': Email,\n \"custom\": { \"integer--Analytics--Audit--Score\": score,\n \"string--Datasource--Name\": datasourceName}\n }\n }\n resp = rq.post(addContactLink, data = json.dumps(data), headers=post_headers)\n if resp.status_code == 200:\n logging.info(\n \"SUCCESS - Analytics Audit score for {} is added/updated\"\n .format(Email))\n with open('autopilot_analyticsaudit_emails.txt', 'a') as f:\n f.write(f\"{Email}\\n\")\n autopilot_analyticsaudit_emails += [Email]\n else:\n logging.info(\n \"ERROR - Analytics Audit score for {} cannot be added/updated - Status Code: {}\"\n .format(Email, resp.status_code))\n else:\n logging.info(\n \"INFO - It is not important change - Report Type: {} - Email: {}\".\n format(report.get('package'), Email))\n else:\n logging.info(\n \"INFO - It is not important change - Operation Type: {}\".\n format(operationType))\n","sub_path":"reports_changestream_autopilot.py","file_name":"reports_changestream_autopilot.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"518080482","text":"from ftw.upgrade import UpgradeStep\nfrom opengever.document.behaviors import IBaseDocument\nfrom plone import api\n\n\nclass AddGeverDocUidCatalogIndex(UpgradeStep):\n \"\"\"Add gever_doc_uid catalog index.\n \"\"\"\n\n index_name = 'gever_doc_uid'\n\n deferrable = True\n\n def __call__(self):\n self.install_upgrade_profile()\n\n if not self.catalog_has_index(self.index_name):\n self.catalog_add_index(self.index_name, 'FieldIndex')\n\n catalog = api.portal.get_tool('portal_catalog')\n workspace_roots = catalog.unrestrictedSearchResults(\n portal_type='opengever.workspace.root')\n query = {\n 'object_provides': IBaseDocument.__identifier__,\n 'path': {'query': [root.getPath() for root in workspace_roots]},\n }\n\n for obj in self.objects(\n query, u'Reindex gever_doc_uid for workspace documents'):\n\n obj.reindexObject(idxs=[self.index_name])\n","sub_path":"opengever/core/upgrades/20210730105957_add_gever_doc_uid_catalog_index/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355787066","text":"from selenium.webdriver.common.by import By\nfrom public.po.base_view import BaseView\nimport logging\nfrom log.log import logger\nfrom public.common.desired_caps import desired\nfrom public.po.login_page import LoginPage\nfrom public.common.do_excel import ReadExcel\nfrom public.common.adb_shell import AdbShell\n\n# 日志类的实例化\nlogger = logger(__name__, Cmdlevel=logging.INFO, Filelevel=logging.INFO)\n\nclass RegisterPage(BaseView):\n id_card_xpath = \"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View/android.widget.EditText[2]\"\n name_text_element = (By.CLASS_NAME, \"android.widget.EditText\")\n id_card_text_element = (By.XPATH, id_card_xpath)\n next_btn_element = (By.XPATH, \"//*[@text='下一步']\")\n server_url = \"http://127.0.0.1:4723/wd/hub\"\n\n def __init__(self, driver):\n super().__init__(driver)\n\n # 姓名文本区域\n def name_text(self, name_value):\n name_text = self.wait_find_element(*self.name_text_element)\n # print(\"MobileText\", MobileText.text)\n name_text.click()\n try:\n AdbShell.input_text(name_value)\n # MobileText.send_keys(mobileValue)\n logger.info(\"name_text is setValues!\")\n self.get_screeShot()\n except:\n logger.info(\"姓名输入失败!\")\n\n # 身份证号文本区域\n def id_card_text(self, id_card_value):\n id_card_text = self.wait_find_element(*self.id_card_text_element)\n # print(\"MobileText\", CodeText.text)\n id_card_text.click()\n try:\n AdbShell.input_text(id_card_value)\n logger.info(\"id_card_text is setValues!\")\n self.get_screeShot()\n except:\n logger.info(\"验证码输入失败!\")\n\n # 下一步按钮\n def next_btn(self):\n next_btn = self.wait_find_element(*self.next_btn_element)\n next_btn.click()\n logger.info(\"next_btn is click\")\n\n # 注册流程\n def register(self):\n try:\n self.wait_activity(\".MainActivity\", 30)\n name_value = ReadExcel(\"register.xlsx\", \"Sheet1\").read_excel(1, 0)\n id_card_value = int(ReadExcel(\"register.xlsx\", \"Sheet1\").read_excel(1, 1))\n self.name_text(name_value)\n self.id_card_text(id_card_value)\n self.next_btn()\n return \"注册成功\"\n except:\n print(\"注册失败\")\n\n\n# 调试\nif __name__ == '__main__':\n driver = desired()\n Login = LoginPage(driver)\n Login.login()\n register = RegisterPage(driver)\n register.register()\n","sub_path":"public/po/register_page.py","file_name":"register_page.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"56525174","text":"from functools import partial\n\nfrom retail_calc_demo.constant import GET_PARAMS_ERROR_MESSAGE, \\\n PRICE_GET_PARAM_LABEL, QUANTITY_GET_PARAM_LABEL, STATE_CODE_GET_PARAM_LABEL, \\\n UNKNOWN_STATE_CODE_GET_PARAM_ERROR_MESSAGE\nfrom .conftest import INVALID_DECIMAL_VALUE_MESSAGE, \\\n INVALID_VALUE_MESSAGE, REQUIRED_FIELD_MISSING_MESSAGE\n\n\nasync def test_calc_view(app_client):\n get = partial(app_client.get, '/calc')\n\n # Check params validation.\n async def check_params(params, error_messages):\n response = await get(params=params)\n assert response.status == 400\n assert await response.json() == {\n 'identity': GET_PARAMS_ERROR_MESSAGE,\n 'data': error_messages,\n }\n\n await check_params(\n None,\n {\n PRICE_GET_PARAM_LABEL: [REQUIRED_FIELD_MISSING_MESSAGE],\n QUANTITY_GET_PARAM_LABEL: [REQUIRED_FIELD_MISSING_MESSAGE],\n STATE_CODE_GET_PARAM_LABEL: [REQUIRED_FIELD_MISSING_MESSAGE],\n }\n )\n\n await check_params(\n {\n PRICE_GET_PARAM_LABEL: 'one',\n QUANTITY_GET_PARAM_LABEL: -1,\n # Empty state code may be presented in config file.\n STATE_CODE_GET_PARAM_LABEL: '',\n },\n {\n PRICE_GET_PARAM_LABEL: [INVALID_DECIMAL_VALUE_MESSAGE],\n QUANTITY_GET_PARAM_LABEL: [INVALID_VALUE_MESSAGE],\n }\n )\n\n await check_params(\n {\n PRICE_GET_PARAM_LABEL: 1,\n QUANTITY_GET_PARAM_LABEL: 1,\n STATE_CODE_GET_PARAM_LABEL: '',\n },\n # Empty state code is not presented in config file.\n [\n UNKNOWN_STATE_CODE_GET_PARAM_ERROR_MESSAGE\n ],\n )\n\n # Check request with valid params.\n response = await get(\n params={\n PRICE_GET_PARAM_LABEL: 0.3,\n QUANTITY_GET_PARAM_LABEL: 10,\n STATE_CODE_GET_PARAM_LABEL: 'UT',\n }\n )\n assert response.status == 200\n assert await response.json() == {\n 'subtotal_with_discount': 3.0,\n 'total': 3.2055,\n }\n\n\nasync def test_state_codes_view(app_client):\n response = await app_client.get('/state_codes')\n assert response.status == 200\n assert sorted(await response.json()) == ['AL', 'CA', 'NV', 'TX', 'UT']\n","sub_path":"retail_calc_demo/tests/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"175239072","text":"\"\"\"General utility functions that are used in multiple modules\"\"\"\nimport numpy as np\nfrom scipy.stats import binned_statistic\nfrom astropy import units as u\n\n\ndef compute_radial_averages(xvals, yvals, xbins, error_model='std/sqrt_n'):\n \"\"\" Given a list of xvals, yvals and bins, sort into bins. If xvals or yvals\n contain non-finite values, these are filtered.\n\n Parameters\n ----------\n xvals : array_like\n Values to be binned\n yvals : array_like\n Values to compute statistics on\n xbins: array_like\n Bin edges to sort into\n error_model : str, optional\n Error model to use for y uncertainties.\n std/sqrt_n - Standard Deviation/sqrt(Counts) (Default)\n std - Standard deviation\n\n Returns\n -------\n meanx : array_like\n Mean x value in each bin\n meany : array_like\n Mean y value in each bin\n yerr : array_like\n Error on the mean y value in each bin. Specified by error_model\n num_objects : array_like\n Number of objects in each bin\n binnumber: 1-D ndarray of ints\n Indices of the bins (corresponding to `xbins`) in which each value\n of `xvals` belongs. Same length as `yvals`. A binnumber of `i` means the\n corresponding value is between (xbins[i-1], xbins[i]).\n \"\"\"\n # binned_statics throus an error in case of non-finite values, so filtering those out\n filt = np.isfinite(xvals)*np.isfinite(yvals)\n\n meanx, xbins, binnumber = binned_statistic(xvals[filt], xvals[filt], statistic='mean', bins=xbins)[:3]\n meany = binned_statistic(xvals[filt], yvals[filt], statistic='mean', bins=xbins)[0]\n # number of objects\n num_objects = np.histogram(xvals[filt], xbins)[0]\n n_zero = num_objects==0\n\n if error_model == 'std':\n yerr = binned_statistic(xvals[filt], yvals[filt], statistic='std', bins=xbins)[0]\n elif error_model == 'std/sqrt_n':\n yerr = binned_statistic(xvals[filt], yvals[filt], statistic='std', bins=xbins)[0]\n sqrt_n = np.sqrt(binned_statistic(xvals[filt], yvals[filt], statistic='count', bins=xbins)[0])\n sqrt_n[n_zero] = 1.0\n yerr = yerr/sqrt_n\n else:\n raise ValueError(f\"{error_model} not supported err model for binned stats\")\n\n meanx[n_zero] = 0\n meany[n_zero] = 0\n yerr[n_zero] = 0\n\n return meanx, meany, yerr, num_objects, binnumber\n\n\ndef make_bins(rmin, rmax, nbins=10, method='evenwidth', source_seps=None):\n \"\"\" Define bin edges\n\n Parameters\n ----------\n rmin : float\n Minimum bin edges wanted\n rmax : float\n Maximum bin edges wanted\n nbins : float\n Number of bins you want to create, default to 10.\n method : str, optional\n Binning method to use\n 'evenwidth' - Default, evenly spaced bins between rmin and rmax\n 'evenlog10width' - Logspaced bins with even width in log10 between rmin and rmax\n 'equaloccupation' - Bins with equal occupation numbers\n source_seps : array-like\n Radial distance of source separations\n\n Returns\n -------\n binedges: array_like, float\n n_bins+1 dimensional array that defines bin edges\n \"\"\"\n if (rmin > rmax) or (rmin < 0.0) or (rmax < 0.0):\n raise ValueError(f\"Invalid bin endpoints in make_bins, {rmin} {rmax}\")\n if (nbins <= 0) or not isinstance(nbins, int):\n raise ValueError(f\"Invalid nbins={nbins}. Must be integer greater than 0.\")\n\n if method == 'evenwidth':\n binedges = np.linspace(rmin, rmax, nbins+1, endpoint=True)\n elif method == 'evenlog10width':\n binedges = np.logspace(np.log10(rmin), np.log10(rmax), nbins+1, endpoint=True)\n elif method == 'equaloccupation':\n if source_seps is None:\n raise ValueError(f\"Binning method '{method}' requires source separations array\")\n # by default, keep all galaxies\n mask = np.full(len(source_seps), True)\n if rmin is not None or rmax is not None:\n # Need to filter source_seps to only keep galaxies in the [rmin, rmax]\n if rmin is None: rmin = np.min(source_seps)\n if rmax is None: rmax = np.max(source_seps)\n mask = (np.array(source_seps)>=rmin)*(np.array(source_seps)<=rmax)\n binedges = np.percentile(source_seps[mask], tuple(np.linspace(0,100,nbins+1, endpoint=True)))\n else:\n raise ValueError(f\"Binning method '{method}' is not currently supported\")\n\n return binedges\n\n\ndef convert_units(dist1, unit1, unit2, redshift=None, cosmo=None):\n \"\"\" Convenience wrapper to convert between a combination of angular and physical units.\n\n Supported units: radians, degrees, arcmin, arcsec, Mpc, kpc, pc\n\n To convert between angular and physical units you must provide both\n a redshift and a cosmology object.\n\n Parameters\n ----------\n dist1 : array_like\n Input distances\n unit1 : str\n Unit for the input distances\n unit2 : str\n Unit for the output distances\n redshift : float\n Redshift used to convert between angular and physical units\n cosmo : astropy.cosmology\n Astropy cosmology object to compute angular diameter distance to\n convert between physical and angular units\n\n Returns\n -------\n dist2: array_like\n Input distances converted to unit2\n \"\"\"\n angular_bank = {\"radians\": u.rad, \"degrees\": u.deg, \"arcmin\": u.arcmin, \"arcsec\": u.arcsec}\n physical_bank = {\"pc\": u.pc, \"kpc\": u.kpc, \"Mpc\": u.Mpc}\n units_bank = {**angular_bank, **physical_bank}\n\n # Some error checking\n if unit1 not in units_bank:\n raise ValueError(f\"Input units ({unit1}) not supported\")\n if unit2 not in units_bank:\n raise ValueError(f\"Output units ({unit2}) not supported\")\n\n # Try automated astropy unit conversion\n try:\n return (dist1*units_bank[unit1]).to(units_bank[unit2]).value\n\n # Otherwise do manual conversion\n except u.UnitConversionError:\n # Make sure that we were passed a redshift and cosmology\n if redshift is None or cosmo is None:\n raise TypeError(\"Redshift and cosmology must be specified to convert units\")\n\n # Redshift must be greater than zero for this approx\n if not redshift > 0.0:\n raise ValueError(\"Redshift must be greater than 0.\")\n\n if (unit1 in angular_bank) and (unit2 in physical_bank):\n # Convert angular to physical\n dist1_rad = (dist1*units_bank[unit1]).to(u.rad).value\n dist1_mpc = cosmo.rad2mpc(dist1_rad, redshift)\n return (dist1_mpc*u.Mpc).to(units_bank[unit2]).value\n else:\n # Otherwise physical to angular\n dist1_mpc = (dist1*units_bank[unit1]).to(u.Mpc).value\n dist1_rad = cosmo.mpc2rad(dist1_mpc, redshift)\n return (dist1_rad*u.rad).to(units_bank[unit2]).value\n\n\ndef convert_shapes_to_epsilon(shape_1,shape_2, shape_definition='epsilon',kappa=0):\n \"\"\" Given shapes and their definition, convert them to epsilon ellipticities or reduced shears, which can be used in GalaxyCluster.galcat\n Definitions used here based on Bartelmann & Schneider 2001 (https://arxiv.org/pdf/astro-ph/9912508.pdf):\n axis ratio (q) and position angle (phi) (Not implemented)\n epsilon = (1-q/(1+q) exp(2i phi)\n chi = (1-q^2/(1+q^2) exp(2i phi)\n shear(gamma)\n reduced_shear(g) = gamma/(1-kappa)\n convergence(kappa)\n\n\n Parameters\n ----------\n shape_1 : array_like\n Input shapes or shears along principal axis (g1 or e1)\n shape_2 : array_like\n Input shapes or shears along secondary axis (g2 or e2)\n shape_definition : str\n Definition of the input shapes, can be ellipticities 'epsilon' or 'chi' or shears 'shear' or 'reduced_shear'\n kappa : array_like\n Convergence for transforming to a reduced shear. Default is 0\n\n Returns\n -------\n epsilon_1 : array_like\n Epsilon ellipticity along principal axis (epsilon1)\n epsilon_2 : array_like\n Epsilon ellipticity along secondary axis (epsilon2)\n \"\"\"\n\n if shape_definition=='epsilon' or shape_definition=='reduced_shear':\n return shape_1,shape_2\n elif shape_definition=='chi':\n chi_to_eps_conversion = 1./(1.+(1-(shape_1**2+shape_2**2))**0.5)\n return shape_1*chi_to_eps_conversion,shape_2*chi_to_eps_conversion\n elif shape_definition=='shear':\n return shape_1/(1.-kappa), shape_2/(1.-kappa)\n\n else:\n raise TypeError(\"Please choose epsilon, chi, shear, reduced_shear\")\n\n\ndef build_ellipticities(q11,q22,q12):\n \"\"\" Build ellipticties from second moments. See, e.g., Schneider et al. (2006)\n\n Parameters\n ----------\n q11 : float or array\n Second brightness moment tensor, component (1,1)\n q22 : float or array\n Second brightness moment tensor, component (2,2)\n q12 : float or array\n Second brightness moment tensor, component (1,2)\n\n Returns\n -------\n x1, x2 : float or array\n Ellipticities using the \"chi definition\"\n e1, e2 : float or array\n Ellipticities using the \"epsilon definition\"\n \"\"\"\n\n x1,x2 = (q11-q22)/(q11+q22),(2*q12)/(q11+q22)\n e1,e2 = (q11-q22)/(q11+q22+2*np.sqrt(q11*q22-q12*q12)),(2*q12)/(q11+q22+2*np.sqrt(q11*q22-q12*q12))\n return x1,x2, e1,e2\n\n\ndef compute_lensed_ellipticity(ellipticity1_true, ellipticity2_true, shear1, shear2, convergence):\n r\"\"\" Compute lensed ellipticities from the intrinsic ellipticities, shear and convergence.\n Following Schneider et al. (2006)\n\n .. math::\n \\epsilon^{\\rm lensed}=\\epsilon^{\\rm lensed}_1+i\\epsilon^{\\rm lensed}_2=\\frac{\\epsilon^{\\rm true}+g}{1+g^\\ast\\epsilon^{\\rm true}},\n\n where, the complex reduced shear :math:`g` is obtained from the shear :math:`\\gamma=\\gamma_1+i\\gamma_2`\n and convergence :math:`\\kappa` as :math:`g = \\gamma/(1-\\kappa)`, and the complex intrinsic ellipticity\n is :math:`\\epsilon^{\\rm true}=\\epsilon^{\\rm true}_1+i\\epsilon^{\\rm true}_2`\n\n\n Parameters\n ----------\n ellipticity1_true : float or array\n Intrinsic ellipticity of the sources along the principal axis\n ellipticity2_true : float or array\n Intrinsic ellipticity of the sources along the second axis\n shear1 : float or array\n Shear component (not reduced shear) along the principal axis at the source location\n shear2 : float or array\n Shear component (not reduced shear) along the 45-degree axis at the source location\n convergence : float or array\n Convergence at the source location\n Returns\n -------\n e1, e2 : float or array\n Lensed ellipicity along both reference axes.\n \"\"\"\n\n shear = shear1+shear2*1j # shear (as a complex number)\n ellipticity_true = ellipticity1_true+ellipticity2_true*1j # intrinsic ellipticity (as a complex number)\n reduced_shear = shear/(1.0-convergence) # reduced shear\n e = (ellipticity_true+reduced_shear)/(1.0+reduced_shear.conjugate()*ellipticity_true) # lensed ellipticity\n return np.real(e), np.imag(e)\n\n\ndef arguments_consistency(arguments, names=None, prefix=''):\n r\"\"\"Make sure all arguments have the same length (or are scalars)\n\n Parameters\n ----------\n arguments: list, arrays, tuple\n Group of arguments to be checked\n names: list, tuple\n Names for each array, optional\n prefix: str\n Customized prefix for error message\n\n Returns\n -------\n list, arrays, tuple\n Group of arguments, converted to numpy arrays if they have length\n \"\"\"\n sizes = [len(arg) if hasattr(arg, '__len__') else None for arg in arguments]\n # check there is a name for each argument\n if names:\n if len(names)!=len(arguments):\n raise TypeError(f'names (len={len(names)}) must have same length as arguments (len={len(arguments)})')\n msg = ', '.join([f'{n}({s})' for n, s in zip(names, sizes)])\n else:\n msg = ', '.join([f'{s}' for s in sizes])\n # check consistency\n if any(sizes):\n if not all(sizes) or any([s!=sizes[0] for s in sizes[1:]]): # Check that all of the inputs have length and they match\n # make error message\n raise TypeError(f'{prefix} inconsistent sizes: {msg}')\n return tuple(np.array(arg) for arg in (arguments))\n return arguments\n","sub_path":"clmm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"258252302","text":"#!/usr/bin/env python3\nfrom enum import Enum\nimport sys\n\n\nclass Node:\n def __init__(self, name, parent_name, level):\n self.name = name\n self.parent_name = parent_name\n self.visited = False\n self.level = level\n\n\ndef read_graph(graph_file):\n global incoming_edges\n graph = dict()\n with open(graph_file, 'r') as f:\n for line in f:\n line = line.rstrip().split(')')\n key, value = line[0], line[1]\n node = Node(value, key, -1)\n if key in graph:\n graph[key].append(node)\n else:\n graph[key] = [node]\n\n if value not in graph: \n graph[value] = []\n return graph\n\n\ndef fill_in_levels(graph):\n fill_in_levels_recur(graph, graph['COM'], 1)\n\n\ndef fill_in_levels_recur(graph, nodes, level):\n if len(nodes) == 0:\n return\n\n for node in nodes:\n fill_in_levels_recur(graph, graph[node.name], level+1)\n node.level = level\n\n\ngraph = read_graph('input.txt')\nfill_in_levels(graph)\n\n'''\nfor (key,value) in graph.items():\n print('==================')\n print(key)\n for n in value:\n print(n.name, n.visited, n.parent_name, n.level)\n'''\n\n\ndef get_node(node_name, graph):\n for value in graph.values():\n for v in value:\n if v.name == node_name:\n return v\n return None\n\n\ndef find_santa(you, san, graph):\n curr_node = get_node(you.parent_name, graph)\n res = []\n while True:\n curr_node.visited = True\n if curr_node.name == san.name:\n # santa may be on the same branch\n return res\n\n if curr_node.level >= san.level:\n # back up to parent of the current node\n res.append(curr_node)\n curr_node = get_node(curr_node.parent_name, graph)\n continue\n\n # go down to a node\n if go_down(curr_node, res, graph):\n break\n else:\n res.append(curr_node)\n curr_node = get_node(curr_node.parent_name, graph)\n\n return res\n\n\ndef go_down(node, res_lst, graph):\n if node.name == 'SAN': # found santa\n return True\n\n if len(graph[node.name]) == 0: # can not go down any more\n return False\n \n node.visited = True\n for child in graph[node.name]:\n if child.visited: # skip branch that has already been visited\n continue\n\n res_lst.append(child)\n if go_down(child, res_lst, graph):\n return True\n res_lst.pop()\n\n return False\n\n\nyou = get_node('YOU', graph)\nsan = get_node('SAN', graph)\n\nif you and san:\n print(len(find_santa(you, san, graph)) - 1)\nelse:\n print('could not find you or san')\n\n\n'''\nStart at YOU\nBack up to SAN level minus one\nExplore all children of the that node (not including the one you came from)\nIf not found back up again (only visit up to the depth of SAN)\nAgain ignore the branch you just came from\n'''\n\n'''\nres = 0\nfor nodes in graph.values():\n for node in nodes:\n res += node.level\n\nprint(res)\n'''\n","sub_path":"2019/python/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"120798346","text":"from __future__ import print_function\n\nimport glob\nimport inspect\nimport pickle\nimport random\nimport re\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy\nimport numpy as np\nimport pandas as pd\nfrom numpy import linalg as la\nfrom scipy import io\nfrom scipy.linalg import logm, inv\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import MaxAbsScaler\n\n\n# read subjects' demographic data\ndef read_dem_data(subnums=None):\n # Demographic data\n behavioral = pd.read_csv('data/hcp/behavioral.csv')\n restricted = pd.read_csv('data/hcp/restricted.csv')\n\n if subnums is not None:\n # Specifying indices of overlapping subjects with subnums\n subInd = np.where(np.isin(restricted[\"Subject\"], subnums))[0]\n\n # Only using data from subnums\n restricted = restricted.reindex(subInd)\n behavioral = behavioral.reindex(subInd)\n\n return restricted, behavioral\n\n\n# read face-emotional data from HCP900 data\ndef read_HCP900_data():\n filepath = 'data/cfHCP900_FSL_GM/cfHCP900_FSL_GM.mat'\n taskdata = {}\n f = h5py.File(filepath, 'r')\n for k, v in f.items():\n taskdata[k] = np.array(v) # dim: 268 x 268 x 9 , TODO: only read task of interest\n\n # making task name accessible\n for j in range(len(f['SESSIONS'])):\n st = f['SESSIONS'][j][0]\n obj = f[st]\n name = ''.join(chr(i) for i in obj[:])\n taskdata['SESSIONS'][j] = name\n\n # reading necessary data from hd5f file\n taskIDs = taskdata['IDS'].reshape(-1).astype(int)\n taskCorr = taskdata['CORR']\n taskNames = taskdata['SESSIONS']\n\n # closing hdf5 file to avoid errors later if attempting to open on a new thread\n import gc\n\n for obj in gc.get_objects(): # Browse through ALL objects\n if isinstance(obj, h5py.File): # Just HDF5 files\n try:\n obj.close() # close the file\n except:\n pass # Was already closed\n\n return taskIDs, taskCorr, taskNames, taskdata\n\n\n# read connectivity matrices from .txt, .npy, .mat files\ndef read_mat_data(dataDir, toi=[]):\n \"\"\"Reads in matrix data from a data directory.\n\n :param dataDir: (filepath) data directory\n :param toi: (list) tasks of interest\n :return:\n \"\"\"\n data = [] # Allocating list for connectivity matrices\n subnums = [] # subject IDs\n\n # Names of only the connectivity matrix files in the folder\n eb = 'data/edge_betweenness'\n if dataDir == eb: # for .mat file\n filenames = [] # TODO: ensure these filenames get sorted\n if not not glob.glob(f'{dataDir}/{\"*.npy\"}'):\n for file in glob.glob(f'{dataDir}/{\"*.npy\"}'):\n if file == f'{dataDir}/eb_data.npy':\n data = list(np.load(file))\n elif file == f'{dataDir}/eb_subnums.npy':\n subnums = list(np.load(file))\n\n else:\n for i, subdir in enumerate(listdir(f'{eb}')): # for every subdirectory\n subfold = listdir(f'{eb}/{subdir}')\n filtnames = list(filter(re.compile('HCP').match, subfold)) # index HCP matrices\n filtnames.sort()\n filenames.extend(filtnames) # add them to list for later\n\n # Reading in data from all 1003 subjects\n for _, file in enumerate(filtnames):\n n1 = io.loadmat(f'{eb}/{subdir}/{file}')['mat']\n data.append(n1)\n\n sn = re.findall(r'\\d+', file)[-1][-6:] # subject ID\n subnums.append(sn)\n\n elif dataDir == 'data/cfHCP900_FSL_GM':\n taskIDs, taskCorr, taskNames, taskdata = read_HCP900_data()\n data = arr2mat_HCP900(taskCorr, toi, taskdata, r_transform=False)\n subnums = list(taskIDs)\n\n elif dataDir == 'data/Send_to_Tim/HCP_IMAGEN_ID_mega_file.txt':\n mega_vars = np.loadtxt(dataDir, delimiter=',', dtype=str, max_rows=1)\n mega_subs = np.loadtxt(dataDir, delimiter=',', dtype=str, usecols=0, skiprows=1)\n mega_hcp_inds = np.argwhere([file.startswith('HCP') for file in mega_subs]).squeeze()\n data = np.loadtxt(dataDir, delimiter=',', skiprows=1, usecols=range(1, len(mega_vars) - 1))[\n mega_hcp_inds] # TODO: add option to use IMAGEN data later\n subnums = [int(name[-6:]) for name in mega_subs[mega_hcp_inds]]\n\n else:\n filenames = [f for f in listdir(dataDir) if isfile(join(dataDir, f))]\n filenames.sort()\n\n if not not glob.glob(f'{dataDir}/{\"*.npy\"}'): # if numpy file of consolidated data saved, use that\n for file in glob.glob(f'{dataDir}/{\"*.npy\"}'):\n data = list(np.load(file))\n\n elif not not glob.glob(f'{dataDir}/{\"*.txt\"}'): # otherwise use .txt files\n for file in glob.glob(f'{dataDir}/{\"*.txt\"}'):\n data.append(np.loadtxt(file))\n\n elif not not glob.glob(f'{dataDir}/{\"*.mat\"}'): # otherwise use .h5py (aka .mat) files\n for file in glob.glob(f'{dataDir}/{\"*.mat\"}'):\n hf = h5py.File(file, 'r')\n\n if np.any(np.isin(list(hf.keys()), \"CorrMatrix\")): # HCP ICA300 ridge data\n n1 = np.array(hf[\"CorrMatrix\"][:])\n\n elif np.any(np.isin(list(hf.keys()), \"CORR\")): # Lea's HCP face data\n n1 = np.array(hf[\"CORR\"][:])\n sn = np.array(hf['IDS'][:]).astype(int)\n subnums.append(sn) # taking subnums from file\n\n data.append(n1)\n\n if not subnums: # if subnums still an empty list\n for i, filename in enumerate(filenames): # reading in subnums\n if filename.endswith(\".txt\") or filename.endswith(\".mat\"):\n num = re.findall(r'\\d+', filename) # find digits in filenames\n subnums.append(num)\n\n subnums.sort()\n subnums = np.array(subnums).astype(float).squeeze() # necessary for comparison to train-test-split partitions\n\n data = np.array(data, dtype=float)\n\n print(f'Success! {dataDir} {toi} read in.\\n')\n\n return data, subnums\n\n\n# tests random matrices in loaded data for positive definiteness and size\ndef test_mat_data(data, nMat=1):\n print('Running positive definite test...\\n...')\n # Testing arbitrarily chosen matrices for positive definiteness\n testind = np.random.randint(0, len(data), nMat)\n # r_testmat = np.empty([testmat.size, data[0].shape[0], data[0].shape[0]])\n\n for i, x in enumerate(testind):\n # r_testmat[i] = z2r(data[x]) + np.eye(data[x].shape[0]) # for z-scores\n # r_testmat[i] = data[x] + np.eye(data[x].shape[0]) # for all else\n #\n # assert r_testmat[i].shape == (data[0].shape[0], data[0].shape[0])\n # assert r_testmat[i].max() == 1.0\n\n if not isPD(data[x]):\n print(f\"Subject {testind[i]}/{len(data)}'s matrix of size {data[x].shape} is not positive definite!\")\n else:\n print(f\"Success! Subject {testind[i]}/{len(data)}'s matrix of size {data[x].shape} is positive definite!\")\n print('\\n')\n\n\n# plots loaded in data for qualitative examination\ndef plot_mat_data(data, dataDir, nMat=1):\n '''Takes input of data matrices and name of directory, and number of matrices to test.'''\n\n testmat = np.random.randint(0, len(data), nMat) # arbitrary subjects matrices\n\n plt.rcParams.update({'font.size': 6})\n fig, axs = plt.subplots(nMat, 2, figsize=(8, 5))\n fig.subplots_adjust(hspace=.3, wspace=.05)\n axs = axs.ravel()\n\n c = 0 # setting counter\n\n # Plotting arbitrary subject partial correlation matrix (should be sparse)\n for i, x in enumerate(testmat):\n if nMat > 1: # case: nMat > 1\n # Histogram\n lt = np.ravel(np.tril(data[x]))\n nonZel = lt != 0 # non zero elements\n axs[c].hist(lt[nonZel], bins=500)\n axs[c].set_title(f'Histogram of lower triangle entries\\nsubject {testmat[i]} in {dataDir}')\n\n # Connectivity matrix\n im = axs[c + 1].imshow(data[x])\n axs[c + 1].set_title(f'Connectivity Matrix for subject {testmat[i]}')\n fig.colorbar(im, ax=axs[c + 1])\n im.set_clim(lt.min(), lt.max())\n c += 2\n\n else: # case: nMat = 1\n # Plotting histogram of connectivity matrix values to see if they are gaussian-distributed\n lt = np.ravel(np.tril(data[x]))\n nonZel = lt != 0 # non zero elements\n axs[0].hist(lt[nonZel], bins=500)\n axs[0].set_title(f'Histogram of lower triangle entries\\nsubject {testmat[0]} in {dataDir}')\n\n # Plotting connectivity matrix\n im = axs[1].imshow(data[x])\n axs[1].set_title(f'Connectivity Matrix for subject {testmat[0]}')\n fig.colorbar(im, ax=axs[1])\n im.set_clim(lt.min(), lt.max())\n\n fig.show()\n\n # # exporting to interactive html file\n # mpld3.save_html(fig, 'figures/edge_betwenness_matrices_1.7.19')\n\n\n# fisher z-scores to correlation coefficient r\ndef z2r(x):\n return np.tanh(x)\n\n\n# Transforming into pearson correlations\ndef R_transform(data):\n \"\"\"Transforms an array/list of matrices from fisher z-score to pearson R correlation.\"\"\"\n rdata = np.empty_like(data)\n npd_count = 0\n for i, x in enumerate(data):\n rdata[i] = z2r(x)\n if not isPD(rdata[i]):\n npd_count += 1\n print(f'R_transform returned {npd_count} non-positive definite matrices')\n return rdata\n\n\n# testing multiple matrices for positive definiteness\ndef areNotPD(manyB):\n \"\"\"\n Script to test many matrices for positive definiteness\n :param manyB: array of matrices\n :return: number of matrices that aren't PD, and their indices in manyB\n \"\"\"\n howmany = 0\n which = []\n for i, B in enumerate(manyB):\n if not isPD(B):\n howmany += 1\n which.append(i)\n\n return howmany, which\n\n\n# Defining positive definite trasnformation of matrices\ndef nearestPD(A):\n \"\"\"Find the nearest positive-definite matrix to input\n\n A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which\n credits [2].\n\n [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd\n\n [2] N.J. Higham, \"Computing a nearest symmetric positive semidefinite\n matrix\" (1988): https://doi.org/10.1016/0024-3795(88)90223-6\n \"\"\"\n\n B = (A + A.T) / 2\n _, s, V = la.svd(B)\n\n H = np.dot(V.T, np.dot(np.diag(s), V))\n\n A2 = (B + H) / 2\n\n A3 = (A2 + A2.T) / 2\n\n if isPD(A3):\n return A3\n\n spacing = np.spacing(la.norm(A))\n # The above is different from [1]. It appears that MATLAB's `chol` Cholesky\n # decomposition will accept matrixes with exactly 0-eigenvalue, whereas\n # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab\n # for `np.spacing`), we use the above definition. CAVEAT: our `spacing`\n # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on\n # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas\n # `spacing` will, for Gaussian random matrixes of small dimension, be on\n # othe order of 1e-16. In practice, both ways converge, as the unit test\n # below suggests.\n I = np.eye(A.shape[0])\n k = 1\n while not isPD(A3):\n mineig = np.min(np.real(la.eigvals(A3)))\n A3 += I * (-mineig * k ** 2 + spacing)\n k += 1\n\n return A3\n\n\n# Transforming matrices into nearest positive definite matrix\ndef PD_transform(datamats):\n pddata = np.empty_like(datamats)\n npd_count = 0\n for i, x in enumerate(datamats):\n pddata[i] = nearestPD(x)\n if not isPD(pddata[i]):\n print(f'Matrix {i} is not positive definite!')\n npd_count += 1\n if i % 200 == 0:\n print(f'Making making matrix {i}/{len(pddata)} positive definite...')\n print(f'PD_transform successfully transformed {len(datamats) - npd_count} matrices\\n')\n return pddata\n\n\n# Determining positive definite matrix by cholesky decompositon\ndef isPD(B):\n \"\"\"Returns true when input is positive-definite, via Cholesky\n credit: https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite\"\"\"\n try:\n _ = la.cholesky(B)\n return True\n except la.LinAlgError:\n return False\n\n\n# create correlation matrix from time series\ndef CORR(A):\n \"\"\"\n :param A: a timeseries text file\n :return: Correlation matrix\n \"\"\"\n TS = np.loadtxt(A)\n\n TS -= numpy.nanmean(TS, axis=0)\n\n ST = numpy.nanstd(TS, axis=0)\n ST[ST == 0] = 1\n TS /= ST\n\n CO = numpy.cov(TS.T)\n\n numpy.fill_diagonal(CO, 0)\n return CO\n\n\n# create partial correlation matrix from time series\ndef ICORR(A, RHO):\n \"\"\"\n :param A: a time series text file\n :param RHO: L2 regularization term (larger means more regularization)\n :return: Partial correlation matrix\n \"\"\"\n TS = np.loadtxt(A)\n\n CO = numpy.cov(TS.T)\n CB = CO / numpy.sqrt(numpy.mean(numpy.diag(CO) ** 2))\n IC = -numpy.linalg.inv(CB + RHO * numpy.eye(CB.shape[0]))\n DV = numpy.sqrt(numpy.abs(numpy.diag(IC)))\n\n CR = (IC / DV[:, None]) / DV[None, :]\n numpy.fill_diagonal(CR, 0)\n return CR\n\n\n# code for whitening data.\ndef whiten(X, fudge=1E-18):\n \"\"\"\n :param X: covariance matrix\n :param fudge: insurance that eigenvectors with small eigvenvalues aren't overamplified\n :return: whitnend matrix X_white, and whitening matrix W\n \"\"\"\n # eigenvalue decomposition of the covariance matrix\n d, V = np.linalg.eigh(X)\n\n # a fudge factor can be used so that eigenvectors associated with\n # small eigenvalues do not get overamplified.\n D = np.diag(1. / np.sqrt(d + fudge))\n\n # whitening matrix\n W = np.dot(np.dot(V, D), V.T)\n\n # multiply by the whitening matrix\n X_white = np.dot(X, W)\n\n return X_white, W\n\n\n# checking if matrices are symmetric\ndef check_symmetric(a, rtol=1e-05, atol=1e-08):\n return np.allclose(a, a.T, rtol=rtol, atol=atol)\n\n\n# TODO: finish implementation of twins\n# partition data so twins are not separated between test-train-validation sets\ndef partition(restricted, Family_ID):\n '''Partitioning data so one families twins remain in test/validation/training sets'''\n\n ZygositySR = restricted[\"ZygositySR\"]\n ZygosityGT = restricted['ZygosityGT']\n HasGT = restricted['HasGT']\n\n GTyes = np.where(np.isin(HasGT, True))[0] # subjects with genetic tests\n GTno = np.where(np.isin(HasGT, False))[0] # subject wo/ genetic tests\n assert len(GTyes) <= 1142\n assert len(GTno) <= 64\n\n srMZ = np.where(np.isin(ZygositySR, \"MZ\"))[0] # self-reported monozygotic\n srNotMZ = np.where(np.isin(ZygositySR, \"NotMZ\"))[0] # self-reported non-monozygotic\n srNotTwin = np.where(np.isin(ZygositySR, \"NotTwin\"))[0] # self-reported not twin\n srBlank = np.where(np.isin(ZygositySR, \" \"))[0] # no self-report on twin status\n assert len(srMZ) + len(srNotMZ) + len(srNotTwin) + len(srBlank) == 1003\n\n gcMZ = np.where(np.isin(ZygosityGT, \"MZ\"))[0] # genetically confirmed monozygotic\n gcDZ = np.where(np.isin(ZygosityGT, \"DZ\"))[0] # genetically confirmed dizygotic\n gcBlank = np.where(np.isin(ZygosityGT, \" \"))[0] # not genetically confirmed\n assert len(gcMZ) + len(gcDZ) + len(gcBlank) == 1003\n\n gcMZTwins = set(gcMZ) & set(GTyes) # genetically confirmed MZ twins\n assert len(gcMZTwins) <= 298 # 298 MZ in 1200 data\n\n gcDZTwins = set(gcDZ) & set(GTyes) # genetically confirmed DZ twins\n assert len(gcDZTwins) <= 188 # 188 DZ twins in 1200 data\n\n # p5 and p6 refer to points 5 and 6 of pg 89 in HCP release reference manual\n # https://www.humanconnectome.org/storage/app/media/documentation/s1200/HCP_S1200_Release_Reference_Manual.pdf\n\n p5 = set(srMZ) & set(gcBlank) # point 5 of pg 89\n assert len(p5) <= 66 # 66 subjects with ZygositySR=MZ, but ZygosityGT=Blank in 1200 data\n\n p6 = set(srNotMZ) & set(gcBlank)\n assert len(p6) <= 65 # 65 subjects with ZygositySR=NotMZ, but ZygosityGT=Blank in 1200 data\n\n # subjects whose putative twin is not part of the 1206 released study subjects.\n nsMZ = p5 & set(GTno) # subjects whose putative MZ twin IS part of the 1206, but HasGT=FALSE for one of the pair,\n nsDZ = p6 & set(\n GTno) # subjects whose putative DZ twin IS part of the 1206, but HasGT=FALSE for one or both of the pair\n noGTTwins = nsMZ | nsDZ\n assert len(noGTTwins) <= 56, 'More non-singular twins than expected. Should be fewer than 56.'\n\n # Creating full list of non-singular twins (adding genetically confirmed twins)\n nsTwins = noGTTwins | gcMZTwins | gcDZTwins\n\n # TODO: figure out if problem in finding twins from nsTwinFams or familymems or Family_ID\n # family IDs of families with self-reported, non-singular twins\n nsTwinfams = list(np.unique(list(Family_ID.iloc[list(nsTwins)])))\n assert len(nsTwinfams) <= len(nsTwins) / 2\n\n # Sanity check, confirming self-reported but not-genetically confirmed twins are of the same family\n sib1 = int(np.where(Family_ID == nsTwinfams[0])[0][0])\n sib2 = int(np.where(Family_ID == nsTwinfams[0])[0][1])\n assert Family_ID.iloc[sib1] == Family_ID.iloc[sib2]\n\n # Grouping twins together\n twingroups = [] # groups of subjects who are twins\n\n for i, x in enumerate(nsTwinfams): # For each family with twins...\n familymems = list(np.where(Family_ID == x)[0]) # list indices of all members\n # print(familymems)\n for j, y in enumerate([noGTTwins, gcMZTwins, gcDZTwins]):\n twins = set(familymems) & y # find if/which family members are twins...\n if len(twins) > 2: # (if more than two twins, tell us, but still add them all)\n print(f'family found with {len(twins)} twins')\n elif len(twins) < 2:\n print('only one twin in family!')\n if twins:\n twingroups.append(twins) # and add them to our list\n\n # TODO: Random shuffling of standalone participants and twins into 70-15-15 train-validation-test split\n\n return # test, train, validation # subject IDs\n\n\n# DIY tangent space transformation\ndef tangent_transform(refmats, projectmats, ref='euclidean'):\n \"\"\"\n Projects array of matrices (projectmats) into tangent space, using the mean of another array (refmats) as reference.\n Implementation from dadi et al., 2019. Source: https://hal.inria.fr/hal-01824205v3\n Calculation of reference means from Pervaiz et al., 2019. https://www.biorxiv.org/content/10.1101/741595v2.full.pdf\n\n :param refmats: positive definite covariance matrices (samples x rows x columns), from which mean is calculated\n :param projectmats: positive definite matrices to be projected into tangent space\n :param ref: reference mean to use (i.e. euclidean, harmonic, log euclidean, riemannian, kullback)\n :return: tangent-projected matrices\n \"\"\"\n if ref == 'harmonic': # use harmonic mean\n Ch = 0\n for i, x in enumerate(refmats):\n Ch += inv(x)\n Ch *= 1 / len(refmats)\n refMean = inv(Ch)\n\n elif ref == 'euclidean': # use euclidean mean\n refMean = 1 / len(refmats) * np.mean(refmats, axis=0)\n\n else:\n raise ValueError(f'Tangent transform not implemented for {ref} yet!')\n return\n\n d, V = np.linalg.eigh(refMean) # EVD on reference mean covariance matrix\n fudge = 1E-18 # ensures our eigenvectors don't explode\n wsStar = V.T @ np.diag(1 / np.sqrt(d + fudge)) @ V\n mag = len(projectmats[1]) # matrix length magnitude\n\n tmats = np.zeros_like(projectmats)\n for i, x in enumerate(projectmats):\n m = np.dot(wsStar, x).dot(wsStar)\n m = m.reshape(mag, mag)\n if i % 199 == 0:\n print(f'Projecting matrix {i}/{len(tmats)} into tangent space...')\n tmats[i] = logm(m)\n\n return tmats\n\n\n# create connectivity matrices from time series data\ndef create_connectivity(dataDir='data/HCP_created_ICA300_timeseries', rho=.5,\n saveDir='data/self_created_HCP_mats/ICA300_corr', c_type='corr'):\n \"\"\"\n Script to create correlation matrices from time series data\n :param c_type: the type of correlation matrix to create\n :param dataDir: Directory of time series .txt files\n :param rho: regularization term\n :param saveDir: Directory to save partial correlation mats\n :return: matrices that are not positive definite, despite regularization\n \"\"\"\n not_PD = []\n PD_testy3 = 0\n all_mat = []\n\n filenames = [f for f in listdir(dataDir) if isfile(join(dataDir, f))]\n filenames.sort()\n print(filenames)\n\n if filenames[0].endswith('.npy'):\n bigD = np.load(f'{dataDir}/{filenames[0]}')\n for i, x in enumerate(bigD):\n if c_type == 'pcorr':\n testy3 = ICORR(x, RHO=rho)\n elif c_type == 'corr':\n testy3 = CORR(x)\n\n np.fill_diagonal(testy3, 1)\n all_mat.append(testy3)\n\n if isPD(testy3):\n PD_testy3 += 1\n else:\n not_PD.append(i)\n print(f'{x} not PD!')\n\n elif filenames[0].endswith('.txt'):\n for i, x in enumerate(filenames):\n if c_type == 'pcorr':\n testy3 = ICORR(f'{dataDir}/{x}', RHO=rho)\n elif c_type == 'corr':\n testy3 = CORR(f'{dataDir}/{x}')\n\n np.fill_diagonal(testy3, 1)\n all_mat.append(testy3)\n\n if isPD(testy3):\n PD_testy3 += 1\n else:\n not_PD.append(i)\n print(f'{x} not PD!')\n\n all_mat = np.array(all_mat)\n np.save(saveDir, all_mat)\n\n print(f'{PD_testy3} positive-definite matrices returned in {c_type} calculations...')\n if c_type == 'pcorr':\n print(f'rho = {rho}\\n')\n\n return all_mat, not_PD\n\n\n# get parameters necessary for deconfounding\ndef get_confound_parameters(est_data, confounds, set_ind=None):\n \"\"\"Takes array of square matrices (samples x matrices) and returns confound signals, the parameter.\n\n est_data: full data from which the confound parameters are estimated\n set_ind: indices of the est_data from which the confound parameters will be estimated\n confounds: list of confounds, each containing same number of samples as est_data\n data_tbd: data to be deconfounded\n\n return:\n nan_ind: the indices (out of the set_ind) that have any confound == nan\n C: the nan-removed confound matrix\n C_pi: pseudoinverse of confounds\n b_hatX: deconfounded X\n\n Calculations based off equations (2) - (4):\n https://www.sciencedirect.com/science/article/pii/S1053811918319463?via%3Dihub#sec2\n \"\"\"\n\n # vectorizing matrix and subtracting mean\n t = np.array([x[np.triu_indices(len(x), k=1)] for x in est_data])\n t -= np.mean(t, axis=0)\n\n est_array = np.array([t[j] for j in list(set_ind)]) # specifying arrays from which we'll deconfound\n\n # creating confound matrix\n C = np.vstack(confounds).astype(float).T[set_ind]\n\n # identifying nan values in confounds\n nan_ind = np.unique(np.argwhere(np.isnan(C)).squeeze())\n\n # deleting samples that have confounds with NaN values\n C = np.delete(C, nan_ind, axis=0)\n X = np.delete(est_array, nan_ind, axis=0)\n\n # regressing out confounds\n C_pi = np.linalg.pinv(C) # moore-penrose pseudoinverse\n b_hatX = C_pi @ X # confound parameter estimate\n\n return C_pi, b_hatX, nan_ind\n\n\n# Reshapes task array (i.e. after deconfounding) into symmetric matrices, optionally does z-score to R transformation\ndef arr2mat_HCP900(taskCorr, task, taskdata, r_transform=False, is_task=True, new_size=268):\n taskCorrMat = []\n\n for i in range(taskCorr.shape[1]): # reshaping array into symmetric matrix\n out = np.zeros((new_size, new_size))\n\n if is_task: # adding to allow for reshaping of confound-corrected data array\n try:\n taskind = np.where(np.array(taskdata['SESSIONS']) == task)[0][0]\n except IndexError:\n print(f'\\'{task}\\' is not a valid task. Please provide a valid task to be read in.')\n sys.exit()\n\n uinds = np.triu_indices(len(out), k=1)\n out[uinds] = taskCorr[:, i, taskind]\n out = np.triu(out, 1) + out.T\n\n # TODO: decide if to delete NaN or just set to zero\n where_are_NaNs = np.isnan(out) # changing error-prone NaN values to zero\n out[where_are_NaNs] = 0\n\n taskCorrMat.append(out)\n\n taskCorrMat = np.array(taskCorrMat) # setting as array\n\n if r_transform:\n taskCorrMat = R_transform(taskCorrMat) # transforming to pearson R data\n\n for i, x in enumerate(taskCorrMat):\n np.fill_diagonal(x, 1) # on z-scored data\n\n # if check_symmetric(taskCorrMat[0]):\n # print('Matrix 0 is symmetric! Assumming all matrices are...\\n')\n # else:\n # print('Matrix 0 is not symmetric. Something went wrong...\\n')\n\n return taskCorrMat\n\n\n# reshapes array, sets NaNs to zero,\ndef array2matrix(samples, mat_size=300):\n \"\"\"\n :param samples: samples x upper triangular array of a matrix\n :param mat_size: determined size of newly shaped matrix\n :return: mat_size x mat_size symmetric matrix\n \"\"\"\n d_mats = []\n\n for i in range(len(samples)): # reshaping array into symmetric matrix\n out = np.zeros((mat_size, mat_size))\n\n uinds = np.triu_indices(len(out), k=1)\n out[uinds] = samples[i]\n out = np.triu(out, 1) + out.T\n\n where_are_NaNs = np.isnan(out) # changing error-prone NaN values to zero\n if np.any(where_are_NaNs):\n print(f'Setting NaNs to zero in matrix {i}...')\n out[where_are_NaNs] = 0 # sets NaNs to zero\n\n d_mats.append(out)\n\n d_mats = np.array(d_mats) # setting as array\n\n return d_mats\n\n\ndef deconfound_dataset(data, confounds, set_ind, outcome):\n \"\"\"\n Takes input of a data, its confounds. Deletes samples with nan-valued Y entries.\n Returns the deconfounded data.\n\n :param outcome: ground truth value to be deconfounded, per Y1\n :param data: Samples x symmetric matrices (row x column) to be deconfounded, per X1\n :param confounds: Confounds x samples, to be factored out of cdata\n :param set_ind: sample indices of data from which deconfounding parameters will be calculated\n :return: List of deconfounded X, Y as well as new train-test-validation indices\n \"\"\"\n\n # confound parameter estimation for X\n C_pi, b_hat_X, nan_ind = get_confound_parameters(data, confounds, set_ind=set_ind)\n\n # ...and Y, with nans removed\n Y_c = np.delete(outcome[set_ind], nan_ind, axis=0)\n b_hat_Y = C_pi @ Y_c # Y confound parameter estimation\n\n # takes all data as an array, removes need for tbd_ind\n C_tbd = np.vstack(confounds).astype(float).T\n\n X_corr = data - array2matrix(C_tbd @ b_hat_X, mat_size=data.shape[-1])\n Y_corr = outcome - C_tbd @ b_hat_Y\n\n # TODO: return explained variance from decconfounds\n return np.array(X_corr), np.array(Y_corr), nan_ind\n\n\n# get name of variable as a string\ndef retrieve_name(var):\n callers_local_vars = inspect.currentframe().f_back.f_locals.items()\n return [var_name for var_name, var_val in callers_local_vars if var_val is var]\n\n\ndef multiclass_to_onehot(Y):\n Y_classes = np.zeros((Y.squeeze().shape[0], len(np.unique(Y))))\n\n for i, x in enumerate(np.unique(Y)):\n Y_classes[[np.where(Y == x)[0]], i] = 1\n\n return Y_classes\n\n\ndef onehot_to_multiclass(a):\n return np.array([np.where(r == 1)[0][0] for r in a])\n\n\ndef namestr(obj, namespace):\n return [name for name in namespace if namespace[name] is obj][0]\n\n\ndef NEOFFIdomain_latent_transform(data, dataset='HCP', Q=5):\n \"\"\"\n Calculating factor-transformed, varimax-rotated latent dimensions of personality from NEO-FFI domain data.\n Saves .npy file with transformed data.\n\n :param data: pandas-like DataFrame/xarray-like DataArray with subject data. NEO-FFI data must be in columns of headers starting with 'NEO'.\n :param Q: number of features form which latent dimensions are calculated\n :return: None\n \"\"\"\n\n # read in personality data\n NEO_keys = list(filter(lambda x: x.startswith('NEO'), list(data.keys()))) # TODO ensure same order as in Gerlach\n try: # data as xarray DA\n feature_info = data[NEO_keys].to_array().values\n except AttributeError: # data as pandas DF\n feature_info = data[NEO_keys].values\n\n # ensure correct dims\n feature_info = feature_info.reshape(-1, Q)\n\n # dictionary of data necessary to transform HCP data\n mvtr = pickle.load(open(f'personality-types/data_filter/ipip{Q}-mvtr-1.pkl', \"rb\"))\n\n # z-score acc. to the Gerlach mean/std\n z_muvar = np.load(f'personality-types/data_filter/ipip{Q}-pre_cluster_zscore_mu_var-1.npy')\n z_mu, z_var = z_muvar[0], z_muvar[1]\n\n # transforming HCP data\n latent_data = (feature_info - mvtr['mu']) @ mvtr['trans_mat'] # applying scaling & factor analysis fit-transform\n latent_data = (mvtr['rot_mat'] @ latent_data.T).T # varimax rotation\n latent_data = (latent_data - z_mu) / z_var # z-scoring\n\n # saving as file, to be run through soft-cluster anaylsis\n np.save(f'personality-types/data_filter/{dataset}_ipip{Q}_domain_latent_transform.npy',\n latent_data) # TODO: save subnums in pd dataframe\n\n\ndef derive_HCP_NEOFFI60_scores():\n # # Reading in HCP NEO-FFI60 raw item responses\n unrestricted = pd.read_csv('data/unrestricted_adrymoat_6_30_2020_0_54_27.csv')\n NEORAW_keys = list(filter(lambda x: x.startswith('NEORAW'), list(unrestricted.keys())))\n NEORAW_keys.insert(0, 'Subject')\n HCP_NEORAW = unrestricted[NEORAW_keys].dropna()\n\n # coding the items per dom_key\n neuroticism_items = [1, 11, 16, 31, 46, 6, 21, 26, 36, 41, 51, 56]\n extraversion_items = [7, 12, 37, 42, 2, 17, 27, 57, 22, 32, 47, 52]\n openness_items = [13, 23, 43, 48, 53, 58, 3, 8, 18, 38]\n agreeableness_items = [9, 14, 19, 24, 29, 44, 54, 59, 4, 34, 39, 49]\n conscientiousness_items = [5, 10, 15, 30, 55, 25, 35, 60, 20, 40, 45, 50]\n\n # coding forward or reverse scoring\n n_keying = dict(forward=[11, 6, 21, 26, 36, 41, 51, 56], reverse=[1, 16, 31, 46])\n e_keying = dict(forward=[7, 37, 2, 17, 22, 32, 47, 52], reverse=[12, 42, 27, 57])\n o_keying = dict(forward=[13, 43, 53, 58], reverse=[23, 48, 3, 8, 18, 38])\n a_keying = dict(forward=[19, 4, 34, 49], reverse=[9, 14, 24, 29, 44, 54, 59, 39])\n c_keying = dict(forward=[5, 10, 25, 35, 60, 20, 40, 50], reverse=[15, 30, 55, 45])\n dom_names = ['NEOFAC_N', 'NEOFAC_E', 'NEOFAC_O', 'NEOFAC_A', 'NEOFAC_C']\n\n # deriving scores (1-5) for each item and domains\n forward_score = dict(SD=1, D=2, N=3, A=4, SA=5) # assuming 'strongly agree/disagree' is the abbrev.\n reverse_score = dict(SD=5, D=4, N=3, A=2, SA=1)\n\n HCP_NEOscored = HCP_NEORAW.copy()\n\n for i, dom_key in enumerate([n_keying, e_keying, o_keying, a_keying, c_keying]):\n for_items = ['NEORAW_' + (('0' + str(x))[-2:]) for x in dom_key['forward']]\n rev_items = ['NEORAW_' + (('0' + str(x))[-2:]) for x in dom_key['reverse']]\n HCP_NEOscored[for_items] = HCP_NEOscored[for_items].replace(forward_score)\n HCP_NEOscored[rev_items] = HCP_NEOscored[rev_items].replace(reverse_score)\n\n # deriving domain score\n HCP_NEOscored[dom_names[i]] = HCP_NEOscored[for_items + rev_items].sum(axis=1)\n\n return HCP_NEOscored\n\n\nclass Bunch(object):\n def __init__(self, adict):\n self.__dict__.update(adict)\n\n\ndef plot_grad_flow(named_parameters):\n '''Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow'''\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if (p.requires_grad) and (\"bias\" not in n):\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color=\"c\")\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color=\"b\")\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color=\"k\")\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n\n\ndef create_cv_folds(data, n_folds=6, separate_families=False, shuffle=True, seed=1234):\n \"\"\"Calculates folds for cv-fold training of shallow models.\n\n :param shuffle: shuffles order of families (v.s. loading from largest to smallest)\n Note: shuffling increases likelhood of folds of unequal sizes\n :param data: (xarray) X training data, with subject numbers as coords\n :param n_folds: number of folds in which to partition the data\n :param separate_families: (bool) whether to keep families in the same fold\n :return: subjects in each fold, their indices in the data\n \"\"\"\n\n subnums = data.subject.values\n\n if not separate_families:\n max_fold_size = np.ceil(len(subnums) / n_folds)\n min_fold_size = np.floor(len(subnums) / n_folds)\n remaining = np.remainder(len(subnums), n_folds) # subjects left over after even divide\n\n families = data.groupby('Family_ID')._group_indices # family members chunked in lists\n families.sort(key=len, reverse=True) # sorting by number of family members, most first\n\n if shuffle:\n random.shuffle(families) # shuffling order\n\n inds_in_fold = [[] for _ in range(n_folds)]\n counter = 0\n\n while counter < len(families):\n try:\n for i in range(n_folds):\n added_inds = families[counter]\n\n # pass over max full folds\n if (len(inds_in_fold[i]) == max_fold_size):\n continue\n\n # pass over min full folds if nothing remains\n elif (len(inds_in_fold[i]) >= min_fold_size) and (remaining == 0):\n continue\n\n # add to folds until they are full\n else:\n inds_in_fold[i].extend(added_inds)\n counter += 1\n\n # if a fold exceeds min full, detract the excess from remaining\n if len(inds_in_fold[i]) > min_fold_size:\n remaining -= len(inds_in_fold[i]) - min_fold_size\n\n # if the excess is too much, fuck it\n if remaining < 0:\n remaining = 0\n\n except IndexError:\n break\n\n inds_in_fold = np.array(inds_in_fold)\n\n else: # tear families apart\n kf = KFold(n_splits=n_folds, shuffle=True, random_state=seed)\n inds_in_fold = np.array(list(kf.split(subnums)))[:, 1] # n_folds x (test)\n\n subs_in_fold = np.array([subnums[x] for x in inds_in_fold])\n\n return subs_in_fold, inds_in_fold\n\n\ndef create_shallow_array(X, Y, chosen_Xdatavars, X_is_matrix, subs, inds, multiclass, train_inds=None,\n scale_features=True):\n \"\"\" Creates unraveled 1D data arrays to use with shallow network training, testing, validation.\n\n :param multiclass: (bool) decides to one-hot encode Y\n :param X: (xarray) data from which to create training arrays\n :param Y: (array) target outcome for prediction\n :param X_is_matrix: (bool) whether X data is in matrix form\n :param chosen_Xdatavars: (array) datasets in X upon which to create training arrays\n :param subs: subject numbers in the desired shallow array\n :param inds: subject indices in the desired shallow array\n :param train_inds: indices of train set, used for scaling\n :param scale_features: (bool) whether to scale the features by the max abs train value\n :return: unraveled X matrix (subject x features), Y array (subjects)\n \"\"\"\n # should it return onl the subjects for training? or just the\n\n # creating data arrays to be trained on\n if X_is_matrix:\n shallow_X = np.concatenate([X[var].sel(dict(subject=subs)).values[:,\n np.triu_indices_from(X[var][0], k=1)[0],\n np.triu_indices_from(X[var][0], k=1)[1]]\n for var in chosen_Xdatavars], axis=1)\n\n # TODO: implement scale features for matrix data? Maybe not necessary because correlation bounded by [0,1]\n\n else: # i.e. Johann_mega_graph\n shallow_X = X[chosen_Xdatavars[0]][inds].values\n\n if scale_features:\n scaler = MaxAbsScaler().fit(X[chosen_Xdatavars[0]][train_inds].values)\n shallow_X = scaler.transform(shallow_X)\n\n shallow_Y = Y[inds] # assumes Y ordered the same as subs\n\n if multiclass: # transforming one_hot encoded Y-data back into multiclass\n shallow_Y = onehot_to_multiclass(shallow_Y)\n\n return shallow_X, shallow_Y\n","sub_path":"utils/util_funcs.py","file_name":"util_funcs.py","file_ext":"py","file_size_in_byte":37386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248031842","text":"import json\nimport urllib\nimport urllib2\nfrom django.shortcuts import render_to_response,Http404\nfrom django.http import HttpResponse\nfrom framework.models import Reply\n\n\ndef welcome(request):\n\treturn render_to_response(\"index.html\")\n\n\ndef greeting(request):\n\turl = request.GET.get('q',None)\n\turl = url.lower()\n\tstr1 = \"Hello, Kitty! \"\n\tstr2 = \"Hello, Kitty! It's a pleasure to meet you. Welcome to Earth.\"\n\t\n\tif 'hello' in url:\n\t\tresponse_data = {}\n\t\t\n\t\tif 'how' in url:\n\t\t\ttry:\n\t\t\t\treply = Reply.objects.get(keyword='how')\n\t\t\t\tresponse_data['answer'] = str1 + reply.answer\n\t\t\texcept:\n\t\t\t\tresponse_data['answer'] = str2\n\t\t\t\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\telse:\n\t\t\tresponse_data['answer'] = str2\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\t\t\n\tif 'hi' in url:\n\t\tresponse_data = {}\n\t\t\n\t\tif 'what' in url:\n\t\t\ttry:\n\t\t\t\treply = Reply.objects.get(keyword='what')\n\t\t\t\tresponse_data['answer'] = str1 + reply.answer\n\t\t\texcept:\n\t\t\t\tresponse_data['answer'] = str2\n\t\t\t\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\telse:\n\t\t\tresponse_data['answer'] = str2\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\t\t\n\tif 'good' in url:\n\t\tresponse_data = {}\n\t\t\n\t\tif 'pleasure' in url:\n\t\t\ttry:\n\t\t\t\treply = Reply.objects.get(keyword='pleasure')\n\t\t\t\tresponse_data['answer'] = str1 + reply.answer\n\t\t\texcept:\n\t\t\t\tresponse_data['answer'] = str2\n\t\t\t\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\telse:\n\t\t\tresponse_data['answer'] = str2\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\t\t\n\traise Http404\n\n\ndef weather(request):\n\turl = request.GET.get('q',None)\n\turl = url.lower()\n\t#print url\n\tcity = url.rsplit(None,1)[-1]\n\tcity = city.replace(\"?\",\"\")\n\turl2 = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city\n\tresponse = urllib2.urlopen(url2)\n\tdata = json.load(response)\n\t\n\tresponse_data = {}\n\t\n\tif 'temperature' in url:\n\t\ttemp = data['main']['temp']\n\t\tresponse_data['answer'] = str(temp)+\" K\"\n\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\n\tif 'humidity' in url:\n\t\ttemp = data['main']['humidity']\n\t\tresponse_data['answer'] = str(temp)+\" %\"\n\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\n\tresult = data['weather'][0]['main']\n\tresult = result.lower()\n\t\n\tif \"rain\" in url or \"clear\" in url or \"clouds\" in url:\n\t\tif result in url:\n\t\t\tresponse_data['answer'] = \"Yes\"\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\telse:\n\t\t\tresponse_data['answer'] = \"No\"\n\t\t\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\n\tresponse_data['answer'] = \"Sorry, I didn't get it. Please ask about temperature,humidity and weather condition( likes - Rain/Clouds/Clear)\"\n\treturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\t\t\n\ndef qa(request):\n\tquestion = request.GET.get('q',None)\n\tcount1,count2 = 0,0\n\tfor i in xrange(len(question)):\n\t\tif question[i] == ' ':\n\t\t\tcount2 += 1\n\t\telse:\n\t\t\tcount1 += 1\n\t\t\n\t\tif count2 >= 2:\n\t\t\tbreak\n\t\n\tquestion = question[count1+2:]\n\tquestion = question.replace(\"?\",\"\")\n\t#print question\n\t\n\tapi_key = 'AIzaSyBbNZG8DUkpII8xfJi2l1jDW7w_8d_KmQg'\n\tquery = question\n\tservice_url = 'https://www.googleapis.com/freebase/v1/search'\n\tparams = {\n 'query': query,\n 'key': api_key\n\t}\n\turl = service_url + '?' + urllib.urlencode(params)\n\tresponse_data = urllib.urlopen(url).read()\n\tresponse = json.loads(response_data)\n\t\t\n\treturn HttpResponse(json.dumps(response), content_type=\"application/json\")\n\n\ndef custom404(request):\n\treturn render_to_response(\"custom_404.html\")\n\n\ndef custom500(request):\n\treturn render_to_response(\"custom_500.html\")","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"426647646","text":"#!/usr/bin/env python3\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nfrom tensorflow.keras.applications import resnet50\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('batchsize', type=int)\n parser.add_argument('batchcount', type=int)\n args = parser.parse_args()\n\n ds_all, info = tfds.load('imagenet_resized/32x32',\n with_info=True,\n split=\"train\",\n as_supervised=True)\n\n classes = info.features[\"label\"].num_classes\n shape = info.features['image'].shape\n\n model = resnet50.ResNet50(weights=None, input_shape=shape, classes=classes)\n\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])\n\n model.fit(ds_all.batch(args.batchsize), steps_per_epoch=args.batchcount, epochs=1, verbose=1)\n","sub_path":"resnet50/resnet50_tf_records.py","file_name":"resnet50_tf_records.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"556359847","text":"from django.db import router\nfrom django.urls import path,include,re_path\nfrom django.urls.conf import include\nfrom EVSCapp.EVSCApi.views import (RecordDetailAPIView,\n LisVehicle,\n VehicleDetailAPIView,\n ReportRUDAPIView,\n UpdateFcmTokenApiView, \n ListFcmTokenDevices,\n # fcm_token_detail\n ListReport,\n MyProfileLoadAPIView,\n ListUser,\n ListUserDetail,\n RecordViewSet,\n # RecordList,\n # list_records,\n ListNotification,\n RecordList,\n ChangePasswordView\n\n \n )\n\nfrom rest_framework.routers import DefaultRouter\nfrom django.contrib.auth import views as auth_views\nfrom EVSCapp.EVSCApi import views as qv\nfrom django.urls import reverse_lazy\n\n\nrouter=DefaultRouter()\nrouter.register(\"records\",qv.RecordViewSet)\n# router.register('devices', FCMDeviceAuthorizedViewSet)\n\n\nurlpatterns = [\n # path(\"\",include(router.urls)),\n path(\"\",include(router.urls)),\n path('rest-auth/',include(\"rest_auth.urls\")),\n \n # path('records/',list_records,name='list-rcords'),\n path('records/',RecordList.as_view(),name = 'list-records'),\n path('records//',RecordDetailAPIView.as_view(),name='list-detail'),\n path(\"records//report/\", qv.ReportCreateAPiView.as_view(),name='create-report'),\n path('vehicles/',LisVehicle.as_view(),name='list-vehicle'),\n path('vehicles//',VehicleDetailAPIView.as_view(),name='vehicle-detail'),\n path('reports/',ListReport.as_view(),name='report-list'),\n path('reports/',ReportRUDAPIView.as_view(),name='report-detail'),\n path('devices/',ListFcmTokenDevices.as_view(),name='list-device-token'),\n path('devices//',UpdateFcmTokenApiView.as_view(),name='create-device-token'),\n path('user-profile/',MyProfileLoadAPIView.as_view(),name ='retriev-user-profile'),\n path('user/',ListUser.as_view(), name ='users'),\n path('users/',ListUserDetail.as_view(), name = 'user-detail'),\n path('change-password/',ChangePasswordView.as_view(),name = 'change-password'),\n path('notifications/',ListNotification.as_view(),name='notifications'),\n # path('reset-password/',auth_views.PasswordResetView.as_view(success_url=reverse_lazy('password_reset_done')), name='reset_password'),\n # path('reset_password_sent/',auth_views.PasswordResetDoneView.as_view(),name='password_reset_done'),\n # path('reset_password_complete/',auth_views.PasswordResetCompleteView.as_view(),name='password_reset_complete'),\n # path('reset///',auth_views.PasswordResetConfirmView.as_view(success_url=reverse_lazy('password_reset_complete')),name='password_reset_confirm')\n # path('devices/',fcm_token_detail,name='create-device-token')\n # path('records//report',ReportCreateAPiView.as_view(),name='create-record')\n \n]\n","sub_path":"EVSCapp/EVSCApi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"90349302","text":"#!/usr/bin/python2\n\nimport cv2, os\nimport numpy as np\nimport threading\nfrom pycocotools.coco import COCO\n\ngo_auto = True\n\ndef box2yolo(size, box):\n dw = 1./size[0]\n dh = 1./size[1]\n x = box[0] + box[2]/2.0\n y = box[1] + box[3]/2.0\n w = box[2]\n h = box[3]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n return (x,y,w,h)\n\ndef process_coco_thread(coco, cat_ids, imgs, idx, img_path, out_img_path, out_label_path):\n global go_auto\n\n want_red = False\n\n num_red = 0\n num_normal = 0\n\n iters = 0\n\n total_proc = 0\n\n for img in imgs:\n iters += 1\n total_proc += 1\n if iters > 1000:\n iters = 0\n print(\"processed\", total_proc, \"out of\", len(imgs), \"num_red\", num_red, \"num_normal\", num_normal)\n\n frame = cv2.imread(img_path + \"/\" + img['file_name'])\n\n orig_frame = frame.copy()\n\n out_frame = orig_frame.copy()\n\n yolo_anns = []\n\n ann_ids = coco.getAnnIds(imgIds=img['id'], catIds=cat_ids)\n anns = coco.loadAnns(ann_ids)\n for ann in anns:\n got_red = False\n if ('bbox' in ann) and ('segmentation' in ann) and ('keypoints' in ann) and want_red:\n sks = np.array(coco.loadCats(ann['category_id'])[0]['skeleton'])-1\n sks = [sks[4], sks[5], sks[6], sks[7], sks[8], sks[9]]\n #sks = [sks[4], sks[5], sks[6], sks[7]]\n kp = np.array(ann['keypoints'])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n\n found = False\n for sk in sks:\n if np.all(v[sk] > 0):\n found = True\n break\n\n bbox = [int(xx) for xx in ann['bbox']]\n\n if found and (bbox[2] >= 10) and (bbox[3] >= 10):\n msk = coco.annToMask(ann) * 255\n\n part = orig_frame[bbox[1]:bbox[1]+bbox[3],bbox[0]:bbox[0]+bbox[2]]\n grab_msk = np.zeros((bbox[3], bbox[2]), np.uint8)\n grab_msk[msk[bbox[1]:bbox[1]+bbox[3],bbox[0]:bbox[0]+bbox[2]] == 255] = 2\n\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 0, 255), 2)\n\n #msk3 = cv2.merge((msk, msk, msk))\n #frame = cv2.addWeighted(frame, 1.0, msk3, 0.5, 0)\n\n for sk in sks:\n if np.all(v[sk]>0):\n cv2.polylines(frame, np.int32([zip(x[sk], y[sk])]), 0, (255,0,0), 2)\n cv2.polylines(grab_msk, np.int32([zip(x[sk] - bbox[0], y[sk] - bbox[1])]), 0, 1, 2)\n pts = zip(x[v>0], y[v>0])\n for pt in pts:\n cv2.circle(frame, pt, 2, (0,255,0), 2)\n pts = zip(x[v>1], y[v>1])\n for pt in pts:\n cv2.circle(frame, pt, 2, (255,255,0), 2)\n\n grab_msk = cv2.bitwise_and(grab_msk, msk[bbox[1]:bbox[1]+bbox[3],bbox[0]:bbox[0]+bbox[2]])\n\n #cv2.imshow('grab_msk', grab_msk * 127)\n #cv2.imshow('part', part)\n\n if (np.count_nonzero(grab_msk == 1) > 10) and (np.count_nonzero(grab_msk == 0) > 10) and (np.count_nonzero(grab_msk == 2) > 10):\n bgdModel = np.zeros((1,65),np.float64)\n fgdModel = np.zeros((1,65),np.float64)\n cv2.grabCut(part, grab_msk, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)\n mask2 = np.where((grab_msk==2)|(grab_msk==0),0,1).astype('uint8')\n img = part*mask2[:,:,np.newaxis]\n #cv2.imshow('frame2', img)\n\n msk3 = np.zeros(msk.shape, np.uint8)\n msk3[bbox[1]:bbox[1]+bbox[3],bbox[0]:bbox[0]+bbox[2]] = mask2\n\n mrg1 = cv2.merge((msk3, msk3, msk3 * 255))\n\n out_frame[msk3 == 1] = cv2.addWeighted(out_frame, 0.4, mrg1, 0.6, 0)[msk3 == 1]\n\n #msk3 = cv2.merge((msk3 * 0, msk3 * 0, msk3 * 255))\n #out_frame[msk3 == 1] = (0,0, 255)\n num_red += 1\n yolo_anns.append((bbox, 1))\n got_red = True\n want_red = False\n #out_frame = cv2.addWeighted(out_frame, 1.0, msk3, 1.0, 0)\n if not got_red and ('bbox' in ann):\n bbox = [int(xx) for xx in ann['bbox']]\n num_normal += 1\n yolo_anns.append((bbox, 0))\n want_red = True\n\n assert(len(yolo_anns) > 0)\n\n if not go_auto:\n cv2.imshow('frame', frame)\n cv2.imshow('out_frame', out_frame)\n\n cv2.imwrite(out_img_path + \"/\" + str(idx) + \".jpg\", out_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])\n\n txt_outfile = open(out_label_path + \"/\" + str(idx) + \".txt\", \"w\")\n\n for ann in yolo_anns:\n bb = box2yolo((out_frame.shape[1], out_frame.shape[0]), ann[0])\n txt_outfile.write(str(ann[1]) + \" \" + \" \".join([str(a) for a in bb]) + '\\n')\n\n txt_outfile.close()\n\n idx += 1\n\n if not go_auto:\n k = cv2.waitKey(0) & 0xff\n if k == 27:\n break\n\ndef process_coco(coco, img_path, out_img_path, out_label_path, out_list):\n global go_auto\n\n cat_ids = coco.getCatIds(catNms=['person'])\n img_ids = coco.getImgIds(catIds=cat_ids)\n imgs = coco.loadImgs(ids = img_ids)\n\n idx = 1\n\n out_img_path = os.path.abspath(out_img_path)\n\n num_threads = 7 if go_auto else 1\n\n imgs_split = np.array_split(imgs, num_threads)\n\n thrs = []\n for i in range(num_threads):\n thr = threading.Thread(target=process_coco_thread, args=(coco, cat_ids, imgs_split[i], idx, img_path, out_img_path, out_label_path))\n idx += len(imgs_split[i])\n thr.start()\n thrs.append(thr)\n\n for thr in thrs:\n thr.join()\n\n idx = 1\n\n txt_out_list = open(out_list, \"w\")\n\n for img in imgs:\n txt_out_list.write(out_img_path + \"/\" + str(idx) + \".jpg\" + '\\n')\n idx += 1\n\n txt_out_list.close()\n\n print(\"Done.\")\n\nif __name__ == \"__main__\":\n process_coco(COCO(\"../annotations/person_keypoints_train2014.json\"), \"../images/train2014\", \"../my2/images/train\", \"../my2/labels/train\", \"../my2_train.txt\")\n process_coco(COCO(\"../annotations/person_keypoints_val2014.json\"), \"../images/val2014\", \"../my2/images/val\", \"../my2/labels/val\", \"../my2_val.txt\")\n","sub_path":"PythonAPI/process_coco.py","file_name":"process_coco.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"493989957","text":"from __future__ import print_function\nimport argparse\nimport logging\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\nimport dataloader as dl\n\n\nclass BasicConvBlock(nn.Module):\n def __init__(self, inplanes, planes, downsample_method=None):\n super(BasicConvBlock, self).__init__()\n\n if downsample_method == \"conv\":\n self.stage1 = nn.Sequential(\n nn.Conv2d(inplanes, planes, kernel_size=2, stride=2, padding=0, bias=False),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True)\n )\n self.downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes, kernel_size=1, stride=2, padding=0, bias=False),\n nn.BatchNorm2d(planes)\n )\n elif downsample_method == \"maxpool\":\n self.stage1 = nn.Sequential(\n nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2)\n )\n self.downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes, kernel_size=1, stride=2, padding=0, bias=False),\n nn.BatchNorm2d(planes)\n )\n else:\n self.stage1 = nn.Sequential(\n nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True)\n )\n self.downsample = None\n\n self.stage2 = nn.Sequential(\n nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(planes))\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n residual = x\n\n out = self.stage1(x)\n out = self.stage2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass UpsampleInterpolate(nn.Module):\n def __init__(self, scale_factor=2.0):\n super(UpsampleInterpolate, self).__init__()\n self.scale_factor = scale_factor\n\n def forward(self, x):\n return F.interpolate(x, scale_factor=self.scale_factor)\n\n\nclass BasicConvTransposeBlock(nn.Module):\n\n def __init__(self, inplanes, planes, upsample_method=None):\n super(BasicConvTransposeBlock, self).__init__()\n\n if upsample_method == \"conv\":\n self.stage1 = nn.Sequential(\n nn.ConvTranspose2d(inplanes, planes, kernel_size=2, stride=2, padding=0, bias=False),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True)\n )\n elif upsample_method == \"interpolate\":\n self.stage1 = nn.Sequential(\n nn.ConvTranspose2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True),\n UpsampleInterpolate(scale_factor=2.0)\n )\n else:\n self.stage1 = nn.Sequential(\n nn.ConvTranspose2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True)\n )\n\n self.stage2 = nn.Sequential(\n nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(planes)\n )\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n out = self.stage1(x)\n out = self.stage2(out)\n out = self.relu(out)\n\n return out\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # width = 256, height = 256\n # 6 = RGB Left, RGB Right\n\n # downsample_method = 'maxpool'\n # upsample_method = 'interpolate'\n downsample_method = 'conv'\n upsample_method = 'conv'\n\n # Input: b, 6, 256, 256\n self.encoder_block1 = nn.Sequential(\n nn.Conv2d(6, 32, kernel_size=7, stride=1, padding=3, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n # nn.MaxPool2d(kernel_size=3, stride=1, padding=1),\n BasicConvBlock(32, 32)\n ) # b, 32, 256, 256\n self.encoder_block2 = nn.Sequential(\n BasicConvBlock(32, 43, downsample_method=downsample_method),\n BasicConvBlock(43, 43)\n ) # b, 43, 128, 128\n self.encoder_block3 = nn.Sequential(\n BasicConvBlock(43, 57, downsample_method=downsample_method),\n BasicConvBlock(57, 57)\n ) # b, 57, 64, 64\n self.encoder_block4 = nn.Sequential(\n BasicConvBlock(57, 76, downsample_method=downsample_method),\n BasicConvBlock(76, 76)\n ) # b, 76, 32, 32\n self.encoder_block5 = nn.Sequential(\n BasicConvBlock(76, 101, downsample_method=downsample_method),\n BasicConvBlock(101, 101)\n ) # b, 101, 16, 16\n self.encoder_block6 = nn.Sequential(\n BasicConvBlock(101, 128, downsample_method=downsample_method),\n BasicConvBlock(128, 128)\n ) # b, 128, 8, 8\n\n # Input: b, 3, 1, 1\n # self.position_encoder8 = nn.Sequential(\n # nn.ConvTranspose2d(3, 9, 4, stride=1, padding=0), # b, 9, 4, 4\n # nn.BatchNorm2d(9),\n # nn.ReLU(inplace=True),\n # nn.ConvTranspose2d(9, 16, 4, stride=2, padding=1), # b, 16, 8, 8\n # nn.BatchNorm2d(16),\n # nn.ReLU(inplace=True)\n # ) # b, 16, 8, 8\n\n # Input: b, 3, 1, 1\n self.position_encoder4 = nn.Sequential(\n nn.ConvTranspose2d(3, 9, 2, stride=1, padding=0), # b, 9, 2, 2\n nn.BatchNorm2d(9),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(9, 16, 4, stride=2, padding=1), # b, 16, 4, 4\n nn.BatchNorm2d(16),\n nn.ReLU(inplace=True)\n ) # b, 16, 4, 4\n\n # Input: b, 3, 1, 1\n # self.position_encoder2 = nn.Sequential(\n # nn.ConvTranspose2d(3, 9, 2, stride=1, padding=0), # b, 9, 2, 2\n # nn.BatchNorm2d(9),\n # nn.ReLU(inplace=True),\n # nn.ConvTranspose2d(9, 16, 3, stride=1, padding=1), # b, 16, 2, 2\n # nn.BatchNorm2d(16),\n # nn.ReLU(inplace=True)\n # ) # b, 16, 2, 2\n\n # Input: b, (128+16), 8, 8\n self.decoder_block1 = nn.Sequential(\n BasicConvTransposeBlock(128 + 16, 101, upsample_method=upsample_method),\n BasicConvTransposeBlock(101, 101)\n ) # b, 101, 16, 16\n self.decoder_block2 = nn.Sequential(\n BasicConvTransposeBlock(101 * 2, 76, upsample_method=upsample_method),\n BasicConvTransposeBlock(76, 76)\n ) # b, 76, 32, 32\n self.decoder_block3 = nn.Sequential(\n BasicConvTransposeBlock(76 * 2, 57, upsample_method=upsample_method),\n BasicConvTransposeBlock(57, 57)\n ) # b, 57, 64, 64\n self.decoder_block4 = nn.Sequential(\n BasicConvTransposeBlock(57 * 2, 43, upsample_method=upsample_method),\n BasicConvTransposeBlock(43, 43)\n ) # b, 43, 128, 128\n self.decoder_block5 = nn.Sequential(\n BasicConvTransposeBlock(43 * 2, 32, upsample_method=upsample_method),\n BasicConvTransposeBlock(32, 32)\n ) # b, 32, 256, 256\n self.decoder_block6 = nn.Sequential(\n BasicConvTransposeBlock(32 * 2, 32),\n nn.ConvTranspose2d(32, 3, kernel_size=3, stride=1, padding=1, bias=False),\n nn.Tanh()\n ) # b, 3, 256, 256\n\n # Initialize weights\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.ConvTranspose2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x, p):\n x1 = self.encoder_block1(x)\n x2 = self.encoder_block2(x1)\n x3 = self.encoder_block3(x2)\n x4 = self.encoder_block4(x3)\n x5 = self.encoder_block5(x4)\n x6 = self.encoder_block6(x5)\n\n ph = self.position_encoder4(p)\n\n h1 = torch.cat((x6, ph), dim=1)\n z1 = self.decoder_block1(h1)\n h2 = torch.cat((z1, x5), dim=1)\n z2 = self.decoder_block2(h2)\n h3 = torch.cat((z2, x4), dim=1)\n z3 = self.decoder_block3(h3)\n h4 = torch.cat((z3, x3), dim=1)\n z4 = self.decoder_block4(h4)\n h5 = torch.cat((z4, x2), dim=1)\n z5 = self.decoder_block5(h5)\n h6 = torch.cat((z5, x1), dim=1)\n out = self.decoder_block6(h6)\n\n return out\n\n\nclass ModelLoss(nn.Module):\n def __init__(self, device, value_weight=0.9, edge_weight=0.1):\n super(ModelLoss, self).__init__()\n edge_filter = ModelLoss.generate_filter()\n self.log_filter = edge_filter.to(device)\n self.value_weight = value_weight\n self.edge_weight = edge_weight\n\n @staticmethod\n def generate_filter():\n f = Variable(torch.FloatTensor([[[[-1 / 8, -1 / 8, -1 / 8],\n [-1 / 8, 8 / 8, -1 / 8],\n [-1 / 8, -1 / 8, -1 / 8]]]]),\n requires_grad=False)\n return torch.cat((f, f, f), dim=1)\n\n def forward(self, input, target, reduction='elementwise_mean'):\n value_l1_loss = F.l1_loss(input, target, reduction=reduction)\n input_log_edges = F.conv2d(input, self.log_filter, padding=1)\n target_log_edges = F.conv2d(target, self.log_filter, padding=1)\n edge_l1_loss = F.l1_loss(input_log_edges, target_log_edges, reduction=reduction)\n return value_l1_loss * self.value_weight + edge_l1_loss * self.edge_weight\n\n\ndef train(args, model, device, train_loader, criterion, optimizer, epoch):\n model.train()\n for batch_idx, data in enumerate(train_loader):\n data_input = torch.cat((data['left'], data['right']), dim=1).to(device)\n data_actual = data['generated'].to(device)\n position = data['position'].to(device)\n optimizer.zero_grad()\n data_output = model(data_input, position)\n loss = criterion(data_output, data_actual)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * args.batch_size, len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n def to_img(x):\n return ((x * 0.5) + 0.5).clamp(0,1)\n\n if epoch % 1 == 0:\n novel_images = torch.cat((data_output.cpu().data,data_actual.cpu().data), dim=3)\n input_data = data_input.cpu().data\n eye_images = torch.cat((input_data[:,0:3,:,:],input_data[:,3:6,:,:]), dim=3)\n images = torch.cat((novel_images,eye_images), dim=2)\n gen_pic = to_img(images)\n save_image(gen_pic, './model01_img/image_{}.png'.format(epoch))\n\n\ndef test(args, model, device, test_loader, criterion):\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for data in test_loader:\n data_input = torch.cat((data['left'], data['right']), dim=1).to(device)\n data_actual = data['generated'].to(device)\n position = data['position'].to(device)\n data_output = model(data_input, position)\n test_loss += criterion(data_output, data_actual, reduction='sum').item()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}\\n'.format(test_loss))\n\n\ndef main(custom_args=None):\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch Model 01 Experiment')\n parser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\n parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',\n help='input batch size for testing (default: 64)')\n parser.add_argument('--epochs', type=int, default=20, metavar='N',\n help='number of epochs to train (default: 20)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--beta1', type=float, default=0.9, metavar='B1',\n help='Adam beta 1 (default: 0.9)')\n parser.add_argument('--beta2', type=float, default=0.999, metavar='B2',\n help='Adam beta 2 (default: 0.999)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--use-sgd', action='store_true', default=False,\n help='uses SGD instead of Adam')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=50, metavar='N',\n help='how many batches to wait before logging training status')\n args = parser.parse_args(args=custom_args)\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}\n dataset_transforms = transforms.Compose([dl.ResampleImages(0.5),\n dl.SubsampleImages(0.25),\n dl.ToTensor(),\n dl.NormalizeImages(\n mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n ])\n train_set = dl.RandomSceneDataset('../screens_256', transform=dataset_transforms)\n test_set = dl.RandomSceneDataset('../test_256', transform=dataset_transforms)\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n model = Net().to(device)\n if args.use_sgd:\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n else:\n optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))\n criterion = ModelLoss(device=device)\n\n # for epoch in range(1, args.epochs + 1):\n for epoch in range(1, 7):\n train(args, model, device, train_loader, criterion, optimizer, epoch)\n test(args, model, device, test_loader, criterion)\n\n dataset_transforms = transforms.Compose([dl.ResampleImages(0.5),\n dl.SubsampleImages(0.5),\n dl.ToTensor(),\n dl.NormalizeImages(\n mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n ])\n train_set.transform = dataset_transforms\n test_set.transform = dataset_transforms\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n for epoch in range(7, 13):\n train(args, model, device, train_loader, criterion, optimizer, epoch)\n test(args, model, device, test_loader, criterion)\n\n dataset_transforms = transforms.Compose([dl.ResampleImages(0.5),\n dl.ToTensor(),\n dl.NormalizeImages(\n mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n ])\n train_set.transform = dataset_transforms\n test_set.transform = dataset_transforms\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n for epoch in range(13, 21):\n train(args, model, device, train_loader, criterion, optimizer, epoch)\n test(args, model, device, test_loader, criterion)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format='%(message)s')\n main()\n","sub_path":"random_scene/autoencoder/model01.py","file_name":"model01.py","file_ext":"py","file_size_in_byte":17294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"482059959","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 12:32:28 2018\n\nCVPR17 (youtube) dataset:\nFor all mp4 files in subdirs \nalign a transcript to audio with gentle. \nOutputs JSON files.\n\n@author: tomek\n\"\"\"\n\nimport argparse\nimport logging\nimport multiprocessing\nimport pickle, glob\nimport gentle\n\nparser = argparse.ArgumentParser(\n description='CVPR17 (youtube) dataset: for all mp4 files in a current dir\\\n align a transcript to audio using gentle. Outputs JSON files.')\nparser.add_argument(\n '--nthreads', default=multiprocessing.cpu_count(), type=int,\n help='number of alignment threads')\nparser.add_argument(\n '--conservative', dest='conservative', action='store_true',\n help='conservative alignment')\nparser.set_defaults(conservative=False)\nparser.add_argument(\n '--disfluency', dest='disfluency', action='store_true',\n help='include disfluencies (uh, um) in alignment')\nparser.set_defaults(disfluency=False)\nparser.add_argument(\n '--log', default=\"INFO\",\n help='the log level (DEBUG, INFO, WARNING, ERROR, or CRITICAL)')\nparser.add_argument(\n '--picklefile', dest='picklefile', type=str,\n help='transcript dictionary pickle file - if not provided txt files are expected')\nargs = parser.parse_args()\n\ndef on_progress(p):\n for k,v in p.items():\n logging.debug(\"%s: %s\" % (k, v))\n \nlog_level = args.log.upper()\nlogging.getLogger().setLevel(log_level)\n\ndisfluencies = set(['uh', 'um'])\nresources = gentle.Resources()\n\nif args.picklefile:\n transcripts = pickle.load(open(args.picklefile, \"rb\"))\n \nmp4files = glob.glob(\"**/*.mp4\", recursive=True)\njsonfiles = glob.glob(\"**/*.json\", recursive=True)\nfor mp4file in mp4files:\n outfile = mp4file.replace(\"mp4\", \"json\")\n if outfile in jsonfiles: continue\n \n logging.info(mp4file + \": getting trancript\")\n if args.picklefile:\n transcript = transcripts[mp4file]\n else:\n txtfile = mp4file.replace(\"mp4\", \"txt\")\n fh = open(txtfile, 'r')\n transcript = fh.read()\n fh.close()\n \n logging.info(mp4file + \": converting audio to 8K sampled wav\")\n with gentle.resampled(mp4file) as wavfile:\n logging.info(mp4file + \": starting alignment\")\n aligner = gentle.ForcedAligner(resources, transcript, nthreads=args.nthreads, disfluency=args.disfluency, conservative=args.conservative, disfluencies=disfluencies)\n result = aligner.transcribe(wavfile, progress_cb=on_progress, logging=logging)\n \n if result.words:\n outfile = mp4file.replace(\"mp4\", \"json\")\n fh = open(outfile, 'w')\n fh.write(result.to_json(indent=2))\n fh.close()\n logging.info(mp4file + \": output written to %s\" % (outfile))\n else:\n logging.warn(mp4file + \": received empty output!\")\n\nprint(\"All done!\")\n","sub_path":"utils/CVPR17/mp4_align.py","file_name":"mp4_align.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"91195481","text":"# Methods for creating and using LSTM Networks \r\n# for predicting stock prices\r\n\r\n# -Jacob Briones\r\n\r\nfrom StockData import (stock_df, plot_prices)\r\nimport pandas as pd\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import LSTM\r\nfrom tensorflow.keras.layers import Dropout\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\ndef reshape_inputs(inputs,K):\r\n inputs = np.array(inputs)\r\n return np.reshape(inputs,(inputs.shape[0], K ,1))\r\n \r\ndef create_dataset(ticker, startdate, interval, K, show_plot = False):\r\n df = stock_df(ticker, startdate, interval='1d')\r\n \r\n if show_plot == True:\r\n # Visualize data\r\n plot_prices(df)\r\n \r\n # Store Close Prices into array\r\n \r\n # Create MinMax scaler\r\n scaler = MinMaxScaler(feature_range = (0, 1))\r\n \r\n # Normalize Data using a minmax scaler\r\n data_size = len(list(df['Close'].values))\r\n \r\n # Create training data and test data\r\n train = df.iloc[0:round(data_size*0.85)+1, 1:2].values\r\n \r\n # Keep one variable as a dataframe for formatting test data\r\n train_data = df.iloc[0:round(data_size*0.85)+1, 1:2]\r\n \r\n # Scale training data\r\n train_sc = scaler.fit_transform(train)\r\n \r\n # Format Test data\r\n test = df.iloc[round(data_size*0.85)+1:,1:2]\r\n dataset_total = pd.concat((train_data, test), axis = 0)\r\n test = dataset_total[len(dataset_total) - len(test) - K:].values\r\n test = test.reshape(-1,1)\r\n \r\n x_train, y_train =[], []\r\n \r\n for i in range(K, len(train_sc)):\r\n x_train.append(train_sc[(i-K):i])\r\n y_train.append(train_sc[i])\r\n\r\n x_train = reshape_inputs(list(x_train), K)\r\n y_train = np.array(y_train)\r\n \r\n return (x_train, y_train), test\r\n \r\n \r\n# Create Neural Network Architecture\r\n# x: training input\r\ndef create_model(x):\r\n model = Sequential()\r\n \r\n # Add 3 LSTM layers and 3 dropout layers(for preventing overfitting)\r\n model.add(LSTM(units = 50, \r\n return_sequences = True, \r\n input_shape = (x.shape[1], 1))) \r\n model.add(Dropout(0.2)) \r\n model.add(LSTM(units = 50, return_sequences = True)) \r\n model.add(Dropout(0.2))\r\n model.add(LSTM(units = 50))\r\n model.add(Dropout(0.2))\r\n model.add(Dense(units = 1))\r\n return model\r\n\r\n# load pre-trained model\r\ndef load_model(filepath):\r\n return tf.keras.models.load_model(filepath)\r\n\r\n# Train a given model on specified stock data\r\ndef train(x_train, y_train, model, num_epochs, batch_size, filepath = None):\r\n \r\n model.compile(optimizer = 'adam', loss = 'mean_squared_error')\r\n model.fit(x_train, y_train,epochs = num_epochs, batch_size = batch_size)\r\n if filepath != None:\r\n model.save(filepath) \r\n \r\n \r\ndef predict(test, model, plot = False):\r\n scaler = MinMaxScaler(feature_range = (0,1))\r\n scaler.fit(test)\r\n x_sc = scaler.transform(test)\r\n K = model.layers[0].input_shape[1]\r\n \r\n x_test = []\r\n for i in range(K, len(x_sc)):\r\n x_test.append(x_sc[i-K: i])\r\n x_test = reshape_inputs(x_test, K)\r\n predicted_price = model.predict(x_test)\r\n predicted_price = scaler.inverse_transform(predicted_price)\r\n \r\n if plot == True:\r\n test = test.reshape(len(list(test)))\r\n df= pd.DataFrame({'Prediction':predicted_price.reshape(len(list(predicted_price))),\r\n 'Actual':list(test)[40:] })\r\n fig, ax = plt.subplots(figsize=(15, 12), nrows = 1, ncols = 1)\r\n \r\n ax.plot(df['Prediction'],color='red',label='Prediction',marker='o', linestyle='--',lw=1.2)\r\n ax.plot(df['Actual'],color='teal',label='Actual',marker = 'o', linestyle='-',lw=1.2)\r\n plt.title('Model Predictiion',fontsize=24)\r\n ax.legend(fontsize=18)\r\n ax.set_ylabel(\"Price\", fontsize = 18)\r\n ax.set_xlabel(\"Time Step\", fontsize = 18)\r\n ax.grid()\r\n plt.show()\r\n \r\n return predicted_price.reshape(len(list(predicted_price)))\r\n","sub_path":"LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122198732","text":"# Final python code for project 3 of the Udacity Nanodegree\n# Programming for Data Science (Python + Version Control)\n\nimport time\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n# start of added code\nmonths = ('january', 'february', 'march', 'april', 'may', 'june')\n\nweekdays = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday',\n 'saturday', 'sunday')\n\ndef choice(prompt, choices=('y', 'n')):\n\n while True:\n choice = input(prompt).lower().strip()\n # typing end will terminate the program\n if choice == 'end':\n raise SystemExit\n # for input with only one name\n elif ',' not in choice:\n if choice in choices:\n break\n # for input with more than one name\n elif ',' in choice:\n choice = [i.strip().lower() for i in choice.split(',')]\n if list(filter(lambda x: x in choices, choice)) == choice:\n break\n\n prompt = (\"\\nPython cannot process your input correctly. Please enter \"\n \"correctly spelled input in lower letters:\\n>\")\n\n return choice\n# end of added code\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n # TO DO: get user input for month (all, january, february, ... , june)\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n # start of added code for the first tasks:\n\n while True:\n city = choice(\"\\nWould you like to see data for Chicago, New York City, \"\n \"or Washington? Use commas to list the names.\\n>\",\n CITY_DATA.keys())\n month = choice(\"\\nWould you like to see data from January, February, \"\n \"March, April, May, or June? Feel free to list several \"\n \"months. Use commas to list the names.\\n>\", months)\n day = choice(\"\\nWould you like to see data from Monday, Tuesday, \"\n \"Wednesday, Thursday, Friday, Saturday or Sunday? \"\n \"Feel free to list several weekdays.\"\n \"Use commas to list the names.\\n>\", weekdays)\n\n # ask the user to confirm his/her input\n confirmation = choice(\"\\nAre you sure that you would like to apply \"\n \"the following filter(s) to the data:\"\n \"\\n\\n City/Cities: {}\\n Month(s): {}\\n Weekday(s)\"\n \": {}\\n\\n [y] Yes\\n [n] No\\n\\n>\"\n .format(city, month, day))\n if confirmation == 'y':\n break\n else:\n print(\"\\nPlease enter your desired filters for city, month and day once again!\")\n\n# end of added code\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n# start of added code\n\n print(\"\\nPythong is loading the data considering your desired filters.\")\n start_time = time.time()\n\n # filter data concerning the entered city filters\n if isinstance(city, list):\n df = pd.concat(map(lambda city: pd.read_csv(CITY_DATA[city]), city),\n sort=True)\n # reorganize data frame\n try:\n df = df.reindex(columns=['Unnamed: 0', 'Start Time', 'End Time',\n 'Trip Duration', 'Start Station',\n 'End Station', 'User Type', 'Gender',\n 'Birth Year'])\n except:\n pass\n else:\n df = pd.read_csv(CITY_DATA[city])\n\n # create columns for time statistics\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Weekday'] = df['Start Time'].dt.weekday_name\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # filter data concerning the desired month and weekday into new data frames\n if isinstance(month, list):\n df = pd.concat(map(lambda month: df[df['Month'] ==\n (months.index(month)+1)], month))\n else:\n df = df[df['Month'] == (months.index(month)+1)]\n\n if isinstance(day, list):\n df = pd.concat(map(lambda day: df[df['Weekday'] ==\n (day.title())], day))\n else:\n df = df[df['Weekday'] == day.title()]\n\n print(\"\\nThanks to the power of panda, this only took {} seconds.\".format((time.time() - start_time)))\n print('-'*40)\n\n# end of added code\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n # TO DO: display the most common day of week\n # TO DO: display the most common start hour\n\n# start of added code\n\n # display the most common month\n most_common_month = df['Month'].mode()[0]\n print('The most common month in your filtered data is: ' +\n str(months[most_common_month-1]).title() + '.')\n\n # display the most common day of week\n most_common_day = df['Weekday'].mode()[0]\n print('The most common day of the week in your filtered data is: ' +\n str(most_common_day) + '.')\n\n # display the most common start hour\n most_common_hour = df['Start Hour'].mode()[0]\n print('The most common start hour in your filtered data is: ' +\n str(most_common_hour) + '.')\n\n# end of added code\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n # TO DO: display most commonly used end station\n # TO DO: display most frequent combination of start station and end station trip\n\n# start of added code\n\n # display most commonly used start station\n most_common_start_station = str(df['Start Station'].mode()[0])\n print(\"The most common start station in your filtered data is: \" +\n most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = str(df['End Station'].mode()[0])\n print(\"The most common end station in your filtered data is: \" +\n most_common_end_station)\n\n # display most frequent combination of start station and end station trip\n df['Start-End Combination'] = (df['Start Station'] + ' - ' +\n df['End Station'])\n most_common_start_end_combination = str(df['Start-End Combination']\n .mode()[0])\n print(\"The most common combination of start station and end sation \"\n \" in your filtered data is: \" + most_common_start_end_combination)\n\n# end of added code\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n # TO DO: display mean travel time\n\n # start of added code\n\n# display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_travel_time = (str(int(total_travel_time//86400)) +\n 'd ' +\n str(int((total_travel_time % 86400)//3600)) +\n 'h ' +\n str(int(((total_travel_time % 86400) % 3600)//60)) +\n 'm ' +\n str(int(((total_travel_time % 86400) % 3600) % 60)) +\n 's')\n print(\"The total travel time for your filtered data is : \" +\n total_travel_time + \".\")\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n mean_travel_time = (str(int(mean_travel_time//60)) + 'm ' +\n str(int(mean_travel_time % 60)) + 's')\n print(\"The mean travel time for your filtered data is : \" +\n mean_travel_time + \".\")\n\n # end of added code\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n # TO DO: Display counts of gender\n # TO DO: Display earliest, most recent, and most common year of birth\n\n# start of added code\n\n# Display counts of user types\n user_types = df['User Type'].value_counts().to_string()\n print(\"Frequency of user types:\")\n print(user_types)\n\n # Display counts of gender\n try:\n gender_distribution = df['Gender'].value_counts().to_string()\n print(\"\\nFrequency of gender:\")\n print(gender_distribution)\n except KeyError:\n print(\"Sorry! Unfortunately, there are no counts of gender\")\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_birth_year = str(int(df['Birth Year'].min()))\n print(\"\\nThe earliest year of birth in your filtered data is \"\n + earliest_birth_year)\n most_recent_birth_year = str(int(df['Birth Year'].max()))\n print(\"The most recent year of birth in your filtered data is \"\n + most_recent_birth_year)\n most_common_birth_year = str(int(df['Birth Year'].mode()[0]))\n print(\"The most common year of birth in your filtered data is \"\n + most_common_birth_year)\n except:\n print(\"Sorry! Unfortunately, there are no counts of birth years\")\n\n# end of added code\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n# start of added code\n# display raw data\n# cretaed with the help of github: https://github.com/finish06/PDSND-Project-3\n\ndef raw_data(df):\n\n display_raw_input = input(\"\\nDo you want Python to display raw data? Enter 'yes' or 'no'\\n\").strip().lower()\n if display_raw_input in (\"yes\", \"y\"):\n i = 0\n\n while True:\n if (i + 5 > len(df.index) - 1):\n print(df.iloc[i:len(df.index), :])\n print(\"End of raw data was reached.\")\n break\n\n print(df.iloc[i:i+5, :])\n i += 5\n\n show_next_five_input = input(\"\\nWould you like to see the next 5 rows? Enter 'yes' or 'no'\\n\").strip().lower()\n if show_next_five_input not in (\"yes\", \"y\"):\n break\n\n# end of added code\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n raw_data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"project_2_draft_5_final.py","file_name":"project_2_draft_5_final.py","file_ext":"py","file_size_in_byte":11942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164387599","text":"# -*- coding: utf-8 -*-\nfrom abc import ABCMeta\n\nimport scrapy\nimport json\nfrom douyin.DevicesData import DeviceConfig, UserConfig\nfrom douyin.items import DouyinUserInfoItem\nfrom douyin.tools import RequestsStructure, FormStructure\nfrom douyin.tools import RandomTag\n\n\nclass UserInfoSpider(scrapy.Spider, metaclass=ABCMeta):\n name = 'userinfo'\n allowed_domains = []\n device_data = DeviceConfig.xiaomi\n random_tag = RandomTag()\n cursor = 0\n tag = ''\n\n def start_requests(self):\n form_structure = FormStructure()\n \"\"\" spider启动执行,只会执行者一次 \"\"\"\n urls = \"https://aweme.snssdk.com/aweme/v1/discover/search/?\" + self.device_data\n # RequestsStructure用于构建包含XG的请求头\n requests_structure = RequestsStructure(self.device_data, cookies=UserConfig.cookie,\n x_tt_token=UserConfig.x_tt_token)\n header = requests_structure.get_header()\n self.tag = self.random_tag.choice_tag()\n formdata = form_structure.get_data(\n keyword=\"os\", cursor=self.cursor, count=10, type=1, is_pull_refresh=1, hot_search=0,\n search_source='', search_id='', query_correct_type=1\n )\n yield scrapy.FormRequest(url=urls, headers=header, formdata=formdata, callback=self.parse_sec_id)\n\n def parse_sec_id(self, response):\n form_structure = FormStructure()\n response_json = json.loads(response.text, encoding='utf-8')\n # print(response_json)\n user_info = response_json['user_list']\n print(user_info)\n for i in user_info:\n sec_uid = i['user_info']['sec_uid']\n formdata = form_structure.get_data(sec_user_id=sec_uid)\n from urllib import parse\n params1 = parse.urlencode(formdata)\n requests_structure = RequestsStructure(params=params1,\n cookies=UserConfig.cookie, x_tt_token=UserConfig.x_tt_token)\n headers = requests_structure.get_header()\n urls = 'https://aweme-eagle.snssdk.com/aweme/v1/user/?' + params1\n yield scrapy.Request(url=urls, headers=headers, body=params1, callback=self.parse_user_info,)\n\n urls = \"https://aweme.snssdk.com/aweme/v1/discover/search/?\" + self.device_data\n self.cursor += len(user_info)\n requests_structure = RequestsStructure(self.device_data, cookies=UserConfig.cookie,\n x_tt_token=UserConfig.x_tt_token)\n header = requests_structure.get_header()\n formdata = form_structure.get_data(\n keyword=self.tag, cursor=self.cursor, count=10, type=1, is_pull_refresh=1, hot_search=0,\n search_source='', search_id='', query_correct_type=1\n )\n print(\"当前右游标位置:%d, 当前关键词:%s\" % (self.cursor, self.tag))\n yield scrapy.FormRequest(url=urls, headers=header, formdata=formdata,\n callback=self.parse_sec_id)\n\n def parse_user_info(self, response):\n \"\"\" 进入到用户信息页面,解析用户的数据 \"\"\"\n user_info_item = DouyinUserInfoItem()\n response_json = json.loads(response.text, encoding='utf-8')\n print(response_json)\n user_info_data = response_json['user']\n print(user_info_data)\n\n if user_info_data['uid']:\n user_info_item['uid'] = str(user_info_data['uid']) # uid\n user_info_item['sec_uid'] = user_info_data['sec_uid'] # sec_uid\n user_info_item['nickname'] = user_info_data['nickname'] # 昵称\n\n user_info_item['province'] = 'null'\n if user_info_data['province']:\n user_info_item['province'] = user_info_data['province']\n\n # 检查生日数据是否存在\n user_info_item['birthday'] = 'null' # 生日\n if user_info_data['birthday']:\n user_info_item['birthday'] = user_info_data['birthday']\n\n user_info_item['city'] = 'null'\n if user_info_data['city']:\n user_info_item['city'] = user_info_data['city']\n\n user_info_item['location'] = 'null'\n if str(user_info_data['hide_location']) != 'True':\n user_info_item['location'] = user_info_data['location']\n\n user_info_item['fans_count'] = str(user_info_data['mplatform_followers_count']) # 粉丝数\n\n user_info_item['following_count'] = str(user_info_data['following_count']) # 关注数\n\n user_info_item['total_favorited'] = str(user_info_data['total_favorited']) # 点赞数\n\n user_info_item['aweme_count'] = str(user_info_data['aweme_count']) # 视频数量\n\n user_info_item['avatar_thumb'] = user_info_data['avatar_thumb'][\"url_list\"][0] # 头像\n\n user_info_item['classify'] = self.tag\n\n user_info_item['signature'] = 'null'\n if user_info_data['signature']:\n user_info_item['signature'] = user_info_data['signature'] # 签名\n yield user_info_item\n\n\nif __name__ == '__main__':\n from scrapy import cmdline\n cmdline.execute(\"scrapy crawl userinfo\".split())","sub_path":"douyin/spiders/UserSpider.py","file_name":"UserSpider.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171769130","text":"# Copyright (c) Microsoft Corporation\n# All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge,\n# to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\nfrom pyhdfs import HdfsClient\n\ndef copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient):\n '''Copy directory from local to hdfs'''\n if not os.path.exists(localDirectory):\n raise Exception('Local Directory does not exist!')\n hdfsClient.mkdirs(hdfsDirectory)\n result = True\n for file in os.listdir(localDirectory):\n file_path = os.path.join(localDirectory, file)\n if os.path.isdir(file_path):\n hdfs_directory = os.path.join(hdfsDirectory, file)\n try:\n result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient)\n except Exception as exception:\n print(exception)\n result = False\n else:\n hdfs_file_path = os.path.join(hdfsDirectory, file)\n try:\n result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient)\n except Exception as exception:\n print(exception)\n result = False\n return result\n\ndef copyFileToHdfs(localFilePath, hdfsFilePath, hdfsClient, override=True):\n '''Copy a local file to hdfs directory'''\n if not os.path.exists(localFilePath):\n raise Exception('Local file Path does not exist!')\n if os.path.isdir(localFilePath):\n raise Exception('localFile should not a directory!')\n if hdfsClient.exists(hdfsFilePath):\n if override:\n hdfsClient.delete(hdfsFilePath)\n else:\n return False\n try:\n hdfsClient.copy_from_local(localFilePath, hdfsFilePath)\n return True\n except Exception as exception:\n print(exception)\n return False","sub_path":"tools/trial_tool/hdfsClientUtility.py","file_name":"hdfsClientUtility.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636824164","text":"# Copyright (c) 2019 NVIDIA Corporation\nimport nemo\nimport nemo_nlp\nfrom nemo_nlp.callbacks.translation import eval_iter_callback, \\\n eval_epochs_done_callback_wer\nfrom nemo.core.callbacks import CheckpointCallback\nfrom nemo.utils.lr_policies import CosineAnnealing\n\nparser = nemo.utils.NemoArgParser(description='ASR postprocessor')\nparser.set_defaults(\n train_dataset=\"train_8192\",\n eval_datasets=[\"dev_clean\", \"dev_other\", \"test_clean\", \"test_other\"],\n work_dir=\"asr_correction\",\n optimizer=\"novograd\",\n num_epochs=300,\n batch_size=4096,\n eval_batch_size=1024,\n lr=0.02,\n beta1=0.95,\n beta2=0.25,\n weight_decay=0,\n max_steps=300000,\n iter_per_step=1,\n checkpoint_save_freq=10000,\n eval_freq=5000)\nparser.add_argument(\"--warmup_steps\", default=4000, type=int)\nparser.add_argument(\"--d_model\", default=512, type=int)\nparser.add_argument(\"--d_embedding\", default=512, type=int)\nparser.add_argument(\"--d_inner\", default=2048, type=int)\nparser.add_argument(\"--num_layers\", default=6, type=int)\nparser.add_argument(\"--num_attn_heads\", default=8, type=int)\nparser.add_argument(\"--embedding_dropout\", default=0.1, type=float)\nparser.add_argument(\"--ffn_dropout\", default=0.1, type=float)\nparser.add_argument(\"--attn_score_dropout\", default=0.1, type=float)\nparser.add_argument(\"--attn_layer_dropout\", default=0.1, type=float)\nparser.add_argument(\"--data_root\", default=\"/dataset/\", type=str)\nparser.add_argument(\"--src_lang\", default=\"pred\", type=str)\nparser.add_argument(\"--tgt_lang\", default=\"real\", type=str)\nparser.add_argument(\"--beam_size\", default=4, type=int)\nparser.add_argument(\"--len_pen\", default=0.0, type=float)\nparser.add_argument(\"--tokenizer_model\", default=\"m_common_8192.model\", type=str)\nparser.add_argument(\"--label_smoothing\", default=0.1, type=float)\nparser.add_argument(\"--tie_enc_dec\", action=\"store_true\")\nparser.add_argument(\"--tie_enc_softmax\", action=\"store_true\")\nparser.add_argument(\"--tie_projs\", action=\"store_true\")\nparser.add_argument(\"--share_encoder_layers\", action=\"store_true\")\nparser.add_argument(\"--share_decoder_layers\", action=\"store_true\")\nparser.add_argument(\"--fp16\", default=2, type=int)\nargs = parser.parse_args()\n\n# Start Tensorboard X for logging\ntb_name = \"asr_postprocessor-lr{0}-opt{1}-warmup{2}-{3}-bs{4}\".format(\n args.lr, args.optimizer, args.warmup_steps, \"poly\", args.batch_size)\n\nif args.fp16 == 2:\n opt_level = nemo.core.Optimization.mxprO2\nelif args.fp16 == 1:\n opt_level = nemo.core.Optimization.mxprO1\nelse:\n opt_level = nemo.core.Optimization.mxprO0\n\nneural_factory = nemo.core.NeuralModuleFactory(\n local_rank=args.local_rank,\n optimization_level=opt_level,\n log_dir=args.work_dir,\n tensorboard_dir=tb_name,\n)\n\ntokenizer = nemo_nlp.YouTokenToMeTokenizer(\n model_path=f\"{args.data_root}/{args.tokenizer_model}\")\nvocab_size = tokenizer.vocab_size\n\nmax_sequence_length = 512\n\ntrain_data_layer = nemo_nlp.TranslationDataLayer(\n factory=neural_factory,\n tokenizer_src=tokenizer,\n tokenizer_tgt=tokenizer,\n dataset_src=args.data_root + args.train_dataset + \".\" + args.src_lang,\n dataset_tgt=args.data_root + args.train_dataset + \".\" + args.tgt_lang,\n tokens_in_batch=args.batch_size,\n clean=True)\n\neval_data_layers = {}\n\nfor key in args.eval_datasets:\n eval_data_layers[key] = nemo_nlp.TranslationDataLayer(\n factory=neural_factory,\n tokenizer_src=tokenizer,\n tokenizer_tgt=tokenizer,\n dataset_src=args.data_root + key + \".\" + args.src_lang,\n dataset_tgt=args.data_root + key + \".\" + args.tgt_lang,\n tokens_in_batch=args.eval_batch_size,\n clean=False)\n\nencoder = nemo_nlp.TransformerEncoderNM(\n factory=neural_factory,\n d_embedding=args.d_embedding,\n d_model=args.d_model,\n d_inner=args.d_inner,\n num_layers=args.num_layers,\n num_attn_heads=args.num_attn_heads,\n ffn_dropout=args.ffn_dropout,\n vocab_size=vocab_size,\n attn_score_dropout=args.attn_score_dropout,\n attn_layer_dropout=args.attn_layer_dropout,\n max_seq_length=max_sequence_length,\n embedding_dropout=args.embedding_dropout,\n share_all_layers=args.share_encoder_layers,\n hidden_act=\"gelu\")\n\ndecoder = nemo_nlp.TransformerDecoderNM(\n factory=neural_factory,\n d_embedding=args.d_embedding,\n d_model=args.d_model,\n d_inner=args.d_inner,\n num_layers=args.num_layers,\n num_attn_heads=args.num_attn_heads,\n ffn_dropout=args.ffn_dropout,\n vocab_size=vocab_size,\n attn_score_dropout=args.attn_score_dropout,\n attn_layer_dropout=args.attn_layer_dropout,\n max_seq_length=max_sequence_length,\n embedding_dropout=args.embedding_dropout,\n share_all_layers=args.share_encoder_layers,\n hidden_act=\"gelu\")\n\nlog_softmax = nemo_nlp.TransformerLogSoftmaxNM(\n factory=neural_factory,\n vocab_size=vocab_size,\n d_model=args.d_model,\n d_embedding=args.d_embedding)\n\nbeam_translator = nemo_nlp.BeamSearchTranslatorNM(\n factory=neural_factory,\n decoder=decoder,\n log_softmax=log_softmax,\n max_seq_length=max_sequence_length,\n beam_size=args.beam_size,\n length_penalty=args.len_pen,\n bos_token=tokenizer.bos_id(),\n pad_token=tokenizer.pad_id(),\n eos_token=tokenizer.eos_id())\n\nloss = nemo_nlp.PaddedSmoothedCrossEntropyLossNM(\n factory=neural_factory,\n pad_id=tokenizer.pad_id(),\n smoothing=0.1)\n\nloss_eval = nemo_nlp.PaddedSmoothedCrossEntropyLossNM(\n factory=neural_factory,\n pad_id=tokenizer.pad_id(),\n smoothing=0.0)\n\n# tie weight of embedding and log_softmax layers\nif args.tie_enc_dec:\n decoder.embedding_layer.token_embedding.weight = \\\n encoder.embedding_layer.token_embedding.weight\n if args.tie_projs:\n decoder.embedding_layer.token2hidden.weight = \\\n encoder.embedding_layer.token2hidden.weight\n\nif args.tie_enc_softmax:\n log_softmax.log_softmax.dense.weight = \\\n encoder.embedding_layer.token_embedding.weight\n if args.tie_projs:\n log_softmax.log_softmax.hidden2token.weight = \\\n encoder.embedding_layer.token2hidden.weight\n\n# training pipeline\nsrc, src_mask, tgt, tgt_mask, labels, sent_ids = train_data_layer()\nsrc_hiddens = encoder(input_ids=src, input_mask_src=src_mask)\ntgt_hiddens = decoder(input_ids_tgt=tgt,\n hidden_states_src=src_hiddens,\n input_mask_src=src_mask,\n input_mask_tgt=tgt_mask)\nlog_probs = log_softmax(hidden_states=tgt_hiddens)\ntrain_loss = loss(log_probs=log_probs, target_ids=labels)\n\n# evaluation pipelines\nsrc_ = {}\nsrc_mask_ = {}\ntgt_ = {}\ntgt_mask_ = {}\nlabels_ = {}\nsent_ids_ = {}\ninput_type_ids_ = {}\nsrc_hiddens_ = {}\ntgt_hiddens_ = {}\nlog_probs_ = {}\neval_loss_ = {}\nbeam_trans_ = {}\n\nfor key in args.eval_datasets:\n src_[key], src_mask_[key], tgt_[key], tgt_mask_[key], \\\n labels_[key], sent_ids_[key] = eval_data_layers[key]()\n src_hiddens_[key] = encoder(\n input_ids=src_[key], input_mask_src=src_mask_[key])\n tgt_hiddens_[key] = decoder(\n input_ids_tgt=tgt_[key],\n hidden_states_src=src_hiddens_[key],\n input_mask_src=src_mask_[key],\n input_mask_tgt=tgt_mask_[key])\n log_probs_[key] = log_softmax(hidden_states=tgt_hiddens_[key])\n eval_loss_[key] = loss_eval(\n log_probs=log_probs_[key],\n target_ids=labels_[key])\n beam_trans_[key] = beam_translator(\n hidden_states_src=src_hiddens_[key],\n input_mask_src=src_mask_[key])\n\n\ndef print_loss(x):\n loss = x[0].item()\n neural_factory.logger.info(\"Training loss: {:.4f}\".format(loss))\n\n\n# Create evaluation callbacks\ncallback_train = nemo.core.SimpleLossLoggerCallback(\n tensors=[train_loss],\n step_freq=100,\n print_func=print_loss,\n get_tb_values=lambda x: [[\"loss\", x[0]]],\n tb_writer=neural_factory.tb_writer)\n\ncallbacks = [callback_train]\n\nfor key in args.eval_datasets:\n\n callback = nemo.core.EvaluatorCallback(\n eval_tensors=[\n tgt_[key], eval_loss_[key], beam_trans_[key], sent_ids_[key]\n ],\n user_iter_callback=lambda x, y: eval_iter_callback(x, y, tokenizer),\n user_epochs_done_callback=eval_epochs_done_callback_wer,\n eval_step=args.eval_freq,\n tb_writer=neural_factory.tb_writer)\n\n callbacks.append(callback)\n\ncheckpointer_callback = CheckpointCallback(\n folder=args.work_dir, step_freq=args.checkpoint_save_freq)\ncallbacks.append(checkpointer_callback)\n\n# define learning rate decay policy\nlr_policy = CosineAnnealing(args.max_steps, warmup_steps=args.warmup_steps)\n\n# Create trainer and execute training action\nneural_factory.train(\n tensors_to_optimize=[train_loss],\n callbacks=callbacks,\n optimizer=args.optimizer,\n lr_policy=lr_policy,\n optimization_params={\n \"num_epochs\": args.num_epochs,\n \"lr\": args.lr,\n \"weight_decay\": args.weight_decay,\n \"betas\": (0.95, 0.25)},\n batches_per_step=args.iter_per_step)\n","sub_path":"examples/nlp/asr.py","file_name":"asr.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122499856","text":"#!/usr/bin/env python\n\nimport boto3\nimport argparse\nimport json\nimport StringIO\nimport sys\nimport math\nimport hashlib\nfrom treehash import TreeHash\n\n\n# Constants\n\nHTTP_SUCCESS_LOW = 200\nHTTP_SUCCESS_HIGH = 226\nDEFAULT_ACCOUNT_ID = \"-\"\nDEFAULT_CHUNK_SIZE = 1024 ** 3\nDEFAULT_OUTPUT_PATH = \".\"\nDEFAULT_HASH_CHUNK_SIZE = 512 ** 2\n\n\n# Global Variables\n\nDebug = False\n\nchunk_count = 0\n\n\ndef set_debug(flag):\n global Debug\n Debug = flag\n\ndef next_power_of_2(num):\n return int(pow(2, round(math.log(num)/math.log(2))))\n\ndef is_power_of_2(num):\n return num != 0 and ((num & (num - 1)) == 0)\n\n\ndef running_treehash_on_file_range(treehash, filename, start, end, hash_chunk_size=DEFAULT_HASH_CHUNK_SIZE):\n\n infile = open(filename, \"rb\")\n\n infile.seek(start)\n\n if Debug:\n print(\"Treehash: Start: \" + str(start) + \", End: \" + str(end))\n\n current_pos = start\n end += 1\n while current_pos < end:\n read_size = end - current_pos\n if read_size > hash_chunk_size:\n read_size = hash_chunk_size\n treehash.update(infile.read(read_size))\n current_pos += read_size\n infile.close()\n if Debug:\n print(\"TreeHash for this section (\" + str(start) + \" to \" + str(end) + \") is \" + treehash.hexdigest())\n\ndef sha256_on_file_range(filename, start, end, hash_chunk_size=DEFAULT_HASH_CHUNK_SIZE):\n\n sha256 = hashlib.sha256()\n infile = open(filename, \"rb\")\n\n treehash = TreeHash(algo=hashlib.sha256)\n infile.seek(start)\n\n if Debug:\n print(\"Running Hash: Start: \" + str(start) + \", End: \" + str(end))\n\n current_pos = start\n end += 1\n while current_pos < end:\n read_size = end - current_pos\n if read_size > hash_chunk_size:\n read_size = hash_chunk_size\n\n chunk = infile.read(read_size)\n\n sha256.update(chunk)\n treehash.update(chunk)\n current_pos += read_size\n infile.close()\n\n if Debug:\n print(\"Running hash for this section (\" + str(start) + \" to \" + str(end) + \") is \" + sha256.hexdigest())\n print(\"Tree hash for this section (\" + str(start) + \" to \" + str(end) + \") is \" + treehash.hexdigest())\n\n return sha256.hexdigest()\n\n\ndef process_archive_retrieval_job(job,chunk_size,output_path,friendly_name=False):\n global chunk_count \n\n filepos_limit = job.archive_size_in_bytes - 1\n current_pos = 0\n job_archive_hash = job.archive_sha256_tree_hash\n chunk_count = 0\n archive_file_name = output_path + \"/\" + job.id + \".archive\"\n archive_file = open(archive_file_name, \"wb\")\n treehash = TreeHash(algo=hashlib.sha256)\n while current_pos < filepos_limit:\n end_pos = current_pos + (chunk_size - 1)\n if end_pos > filepos_limit:\n end_pos = filepos_limit\n\n range_string = \"bytes=\" + str(current_pos) + \"-\" + str(end_pos)\n\n response = job.get_output(\n range=range_string\n )\n\n if Debug:\n print(\"process_archive_retrieval_job: job.get_output() response: \" + str(response))\n\n if HTTP_SUCCESS_LOW <= response['status'] <= HTTP_SUCCESS_HIGH:\n chunk_count += 1\n\n if Debug:\n #print(\"Writing chunk \" + str(chunk_count) + \" \" + range_string + \" Checksum: \" + response['checksum'] + \" ContentRange: \" + response['contentRange'] + \" AcceptRanges: \" + response['acceptRanges'] + \" ContentType: \" + response['contentType'] + \" ArchiveDescription: \" + response['archiveDescription'])\n print(\"Writing chunk \" + str(chunk_count) + \" \" + range_string + \" Checksum: \" + response['checksum'])\n\n #archive_file.write(response['body'].read())\n chunk_bytes=response['body'].read()\n archive_file.write(chunk_bytes)\n\n if Debug:\n chunk_file = open(archive_file_name + \".chunk.\" + str(chunk_count), \"wb\")\n chunk_file.write(chunk_bytes)\n chunk_file.close\n\n section_hash = sha256_on_file_range(archive_file_name, current_pos, end_pos)\n running_treehash_on_file_range(treehash, archive_file_name, current_pos, end_pos)\n\n if Debug:\n print(\"Local checksum of chunk \" + str(chunk_count) + \": \" + section_hash)\n print(\"Current running treehash is \" + treehash.hexdigest())\n \n current_pos = end_pos + 1\n else:\n print(\"Response unsuccessful. Retrying\")\n\n archive_file.close\n\n\ndef process_inventory_retrieval_job(job,output_path,friendly_name=False):\n response = job.get_output()\n\n if Debug:\n print(\"process_inventory_retrieval_job: job.get_output() response: \" + str(response))\n\n if HTTP_SUCCESS_LOW <= response['status'] <= HTTP_SUCCESS_HIGH:\n if friendly_name:\n output_name = \"Inventory_completed_\" + job.completion_date + \".json\"\n else:\n output_name = job.id + \".inventory.json\"\n inventory_file = open(output_path + \"/\" + output_name, \"wb\")\n inventory_file.write(response['body'].read())\n inventory_file.close\n else:\n print(\"HTTP Return code \" + str(response['status']) + \" indicates unsuccessful retrieval of job output for Job ID \" + args.jobid + \". Please try again.\")\n sys.exit(1)\n\n\ndef process_job(job,chunk_size,output_path,friendly_name=False):\n job.load()\n\n if Debug:\n print(\"Job Action: \" + str(job.action))\n print(\"Job Archive ID: \" + str(job.archive_id))\n print(\"Job Archive SHA256 Tree Hash: \" + str(job.archive_sha256_tree_hash))\n print(\"Job Archive Size in Bytes: \" + str(job.archive_size_in_bytes))\n print(\"Job Completed: \" + str(job.completed))\n print(\"Job Completion Date: \" + str(job.completion_date))\n print(\"Job Creation Date: \" + str(job.creation_date))\n print(\"Job Inventory Retrieval Parameters: \" + str(job.inventory_retrieval_parameters))\n print(\"Job Inventory Size in Bytes: \" + str(job.inventory_size_in_bytes))\n print(\"Job Description: \" + str(job.job_description))\n print(\"Job ID: \" + str(job.job_id))\n print(\"Job Retrieval Byte Range: \" + str(job.retrieval_byte_range))\n print(\"Job SHA256 Tree Hash: \" + str(job.sha256_tree_hash))\n print(\"Job SNS Topic: \" + str(job.sns_topic))\n print(\"Job Status Code: \" + str(job.status_code))\n print(\"Job Status Message: \" + str(job.status_message))\n print(\"Job Vault ARN: \" + str(job.vault_arn))\n\n if job.status_code == \"Succeeded\":\n if job.action == \"InventoryRetrieval\":\n process_inventory_retrieval_job(job,output_path,friendly_name)\n else:\n process_archive_retrieval_job(job,chunk_size,output_path,friendly_name)\n else:\n print(\"Fatal error, job status is \" + job.status_code + \". Exiting.\")\n sys.exit(1)\n\n\n\n# Main loop\n\ndef main():\n\n global Debug\n\n # Parse command line options\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--account\", help=\"Account ID\", default=DEFAULT_ACCOUNT_ID)\n parser.add_argument(\"--vault\", help=\"Vault Name\", required=True)\n parser.add_argument(\"--jobid\", help=\"Job ID. Supplying a Job ID of \\\"Any\\\" will process all completed jobs with status code \\\"Succeeded\\\".\", required=True)\n parser.add_argument(\"--outputpath\", help=\"Path to store output\", default=DEFAULT_OUTPUT_PATH)\n parser.add_argument(\"--chunksize\", help=\"Size of the chunks to use for download. Only valid of the job is ArchiveRetrieval.\", default=DEFAULT_CHUNK_SIZE)\n parser.add_argument(\"--debug\", help=\"Print Debug messages\", action=\"store_true\")\n parser.add_argument(\"--friendlyname\", help=\"Use friendly names\", action=\"store_true\")\n args = parser.parse_args()\n\n Debug = args.debug\n chunksize = int(args.chunksize)\n\n if not is_power_of_2(chunksize):\n print(\"Chunksize \" + str(chunksize) + \" is not a power of two. The next closest power of two is \" + str(next_power_of_2(chunksize)))\n print(\"Exiting.\")\n sys.exit(1)\n\n client = boto3.client('glacier')\n glacier = boto3.resource('glacier')\n\n if (args.jobid.lower() == \"any\") or (args.jobid.lower() == \"inventory\") or (args.jobid.lower() == \"archive\") :\n response = client.list_jobs(\n vaultName = args.vault,\n statuscode=\"Succeeded\"\n )\n\n if Debug:\n print(\"client.list_jobs() response: \" + str(response))\n\n for jobitem in response['JobList']:\n job = glacier.Job(\n account_id=args.account,\n vault_name=args.vault,\n id=jobitem['JobId']\n )\n if (args.jobid.lower() == \"inventory\") and (job.action == \"InventoryRetrieval\"):\n process_job(job, chunksize, args.outputpath, args.friendlyname)\n elif (args.jobid.lower() == \"archive\") and (job.action == \"ArchvieRetrieval\"):\n process_job(job, chunksize, args.outputpath, args.friendlyname)\n elif (args.jobid.lower() == \"any\"):\n process_job(job, chunksize, args.outputpath, args.friendlyname)\n \n \n else:\n job = glacier.Job(\n account_id=args.account, \n vault_name=args.vault, \n id=args.jobid\n )\n process_job(job, chunksize, args.outputpath)\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"ason/ProcessJob_old.py","file_name":"ProcessJob_old.py","file_ext":"py","file_size_in_byte":9340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"54962506","text":"# encoding:utf-8\nimport pytest,urllib3\nfrom config.conf import config_file\nfrom util.runMethod import RunMethod\nurllib3.disable_warnings()\n\nrun = RunMethod()\nserver_ip = config_file().server_ip()\n@pytest.fixture(scope='session')\ndef login():\n url = server_ip + '/uc/v1/login'\n data = {\n \"account\": '15888453304',\n \"user_pwd\": '11111',\n \"login_type\":'1'\n }\n header ={\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4620.400 QQBrowser/9.7.13014.400\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"source\": \"web\"\n }\n #res = requests.post(url=url,data=data,headers=header,verify =False)\n res_token = run.post_main(url,data,header)['data']['token']\n #res_token =json.loads(res).encode('utf-8').decode(\"unicode_escape\")\n #print(res_token)\n yield res_token\n\n\n","sub_path":"requests_pytest_clj/test_case/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"181290238","text":"import argparse\nimport zmq\nimport time\nimport sys\nimport caffe\n\nparser = argparse.ArgumentParser(description='provide the connection address')\nparser.add_argument('--addr')\nargs = parser.parse_args()\n\n\nif args.addr:\n\n # client\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n try:\n socket.connect(args.addr)\n print('a client connected to '+str(args.addr))\n except:\n print('ERROR: cannot connect to '+str(args.addr))\n \n\n\n for i in range(10):\n time.sleep(1)\n\n sent_msg = str(i)\n socket.send_string(sent_msg)\n print('client sent the message: ' + sent_msg)\n \n received_msg = socket.recv().decode()\n print('client received the message: ' + received_msg)\n\nelse:\n print('there is no address!')","sub_path":"Inter Process Communication via socket tcp/my_client.py","file_name":"my_client.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"553251859","text":"from django.shortcuts import render, get_object_or_404\nfrom learn.models import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\n\n\n@login_required\ndef course(request, course_id):\n values = dict()\n\n values['course'] = get_object_or_404(Course, pk=course_id)\n\n if values['course'] in request.user.course.all():\n values['modules'] = values['course'].modules.all()\n values['title'] = values['course'].title+\" - Modules\"\n return render(request, \"modules_overview.html\", values)\n else:\n raise Http404","sub_path":"CM2301/learn/views/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"292609358","text":"from PyQt5.QtGui import QTransform\nfrom Stopwatch import *\nfrom View import *\n\nimport settings\n\nclass MainWnd(QWidget):\n\n\n def __init__(self):\n super().__init__()\n\n #квадраты на сцене\n self.squares = []\n #ответ пользователя\n self.ans = []\n # правильный ответ\n self.true_ans = []\n #время на запоминание\n\n #инициализацият кнопок ,отвёрток ,шестерёнок usw.\n initUI(self)\n self.stopWatch = Stopwatch(settings.TIME, self.btn_start_game, self.label_timer,self.label,self)\n #функция кнопки \"подтвердить\"\n def confirm(self):\n self.ans = []\n for y in range(0, settings.COLS ):\n for x in range(0, settings.ROWS):\n y_cord = settings.MARGIN + settings.XYSIDE * y + 1\n x_cord = settings.MARGIN + settings.MARGIN2 + settings.XYSIDE * x + 1\n #print(x_cord,y_cord)\n u = -1\n #если вдруг пустая ячейка в поле...\n try:\n u = self.scene.itemAt(x_cord, y_cord, QTransform())\n index = self.squares.index(u)\n #print(u)\n except Exception:\n continue\n\n self.ans.append(self.squares[index].number)\n if self.ans == self.true_ans:\n self.label.setText(\"Верно!\")\n else:\n self.label.setText(\"Неверно!\")\n self.btn_start_game.setEnabled(True)\n self.btn_confirm.setEnabled(False)\n self.btn_change_time.setEnabled(True)\n self.comboBox_size.setEnabled(True)\n\n def start_game(self):\n self.btn_change_time.setEnabled(False)\n self.comboBox_size.setEnabled(False)\n print(settings.TIME)\n self.shuffle()\n text2 = 'Запоминайте расположение квадратов'\n self.squares_movable(0)\n for elem in self.squares:\n print(elem.ItemIsMovable)\n self.label.setText(text2)\n self.stopWatch.start(1)\n\n\n def shuffle(self):\n try:\n print('я начинаю мешать')\n numbers = [i for i in range(1, settings.ROWS * settings.COLS + 1, 1)]\n random.shuffle(numbers)\n self.true_ans = numbers\n set_img_numbers(self, numbers)\n except Exception as e:\n print(e)\n\n def squares_movable(self,bool):\n for elem in self.squares:\n elem.setFlag(QGraphicsItem.ItemIsMovable, bool)\n elem.setFlag(QGraphicsItem.ItemIsSelectable, bool)\n\n def change_time(self):\n try:\n t = int(self.lineEdit_time.text())\n '''\n self.stopWatch.sec = settings.TIME\n self.stopWatch.set_settings.TIME()\n '''\n settings.TIME = t\n self.stopWatch.reset()\n except Exception as e:\n print(e)\n return\n #print(settings.TIME)\n\n def onActivated(self, snumber):\n try:\n settings.ROWS = int(snumber)\n settings.COLS = int(snumber)\n\n self.graphicsView.viewport().update()\n set_img_numbers(self, list(range(1, settings.ROWS * settings.COLS + 1, 1)))\n except Exception as e:\n print(e)\n\n # заготовка на будущее?\n '''\n def AnimeButton_clicked(self):\n try:\n self.animation = QPropertyAnimation(AnimSquare(self.squares[0]), b'pos')\n self.animation.setDuration(200)\n self.animation.setStartValue(QPointF(0, 0))\n self.animation.setKeyValueAt(0.3, QPointF(0, 30))\n self.animation.setKeyValueAt(0.5, QPointF(0, 60))\n self.animation.setKeyValueAt(0.8, QPointF(0, 90))\n self.animation.setEndValue(QPointF(0, 120))\n self.animation.start()\n \n self.animation = QPropertyAnimation(AnimSquare(squares[0]), b'angle')\n self.animation.setDuration(8000)\n self.animation.setStartValue(-90)\n self.animation.setKeyValueAt(0.3, -10)\n self.animation.setKeyValueAt(0.5, 0)\n self.animation.setKeyValueAt(0.8, 10)\n self.animation.setEndValue(30)\n self.animation.start()\n \n '''\n\ndef get_TIME(self):\n return settings.TIME\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWnd()\n ex.show()\n app.exec_()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"42613079","text":"'''\ntwilio send a text message, first create a twilio account and get account sid\nand token, put your twilio phon number to from area, and put your registered\nphone number, if you are using free version you can not send a message to\nany number. type your message content in to the body area. This code is good\nfor both windows and macbook\n\n'''\nfrom twilio import rest\n\n# Your Account SID from twilio.com/console\naccount_sid = \"AC8385f11ad23bbbd1ac0ac43e832ef2a8\"\n# Your Auth Token from twilio.com/console\nauth_token = \"20990574e917aa16a85d0482ea9fc5c9\"\n\nclient = rest.TwilioRestClient(account_sid, auth_token)\n\nmessage = client.messages.create(\n to=\"+12166472341\", \n from_=\"+12164506121\",\n body=\"u r over the edge\")\n\nprint(message.sid)\n","sub_path":"Programming Foundations with Python/send_text.py","file_name":"send_text.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"551968637","text":"from flask import Flask, render_template, flash, request, Blueprint, redirect, url_for, session, abort\r\napp = Flask(__name__)\r\n\r\n\r\nfrom my_app.source.models import cursor\r\nfrom my_app.source.mode import cursor1\r\nfrom my_app.source.modelo import cursor2\r\n\r\nmy_view = Blueprint('my_view', __name__)\r\n\r\n#home page handler\r\n@my_view.route(\"/\")\r\n@my_view.route(\"/home\")\r\ndef home():\r\n if not session.get('logon'):\r\n return render_template('login.html')\r\n else:\r\n return render_template('index.html')\r\n\r\n\r\n#handler login\r\n@my_view.route('/login', methods=['POST'])\r\ndef admin_login():\r\n if request.form['username'] == 'admin' and request.form['password'] == 'password':\r\n session['logon'] = True\r\n else:\r\n flash('Incorrect!')\r\n return home()\r\n\r\n#logout handler\r\n@my_view.route(\"/logout\")\r\ndef logout():\r\n session['logon'] = False\r\n return home()\r\n \r\n#invoice handler\r\n@my_view.route(\"/changelogs\")\r\ndef invoice():\r\n command = \"\"\"SELECT {a}.id, {a}.version, {a}.detail, {a}.date, {b}.version\r\n From {a} join {b} ON {a}.id = {b}.id\r\n \"\"\".format(a=\"changelogs\", b='cool')\r\n cursor1.execute(command)\r\n kim = cursor1.fetchall() \r\n return render_template('changelogs.html', my_chin = kim)\r\n\r\n\r\n#payment\r\n@my_view.route(\"/ticket\")\r\ndef tck():\r\n command = \"\"\"SELECT {a}.id, {a}.name, {a}.value, {b}.name\r\n FROM {a} join {b} ON {a}.category_id = {b}.id\r\n \"\"\".format(a=\"tics\", b='category')\r\n cursor.execute(command)\r\n astro_data = cursor.fetchall() \r\n\r\n\r\n return render_template('ticket.html', my_list=astro_data)\r\n \r\n#payment\r\n@my_view.route(\"/users\")\r\ndef users():\r\n command = \"\"\"SELECT {a}.id, {a}.staff, {a}.position, {a}.email, {b}.staff\r\n FROM {a} join {b} ON {a}.id = {b}.id\r\n \"\"\".format(a=\"altimit\", b='staff')\r\n cursor2.execute(command)\r\n staff_data = cursor2.fetchall() \r\n\r\n\r\n return render_template('users.html', my_staff=staff_data)\r\n \r\n\r\n\r\n\r\n\r\n#order\r\n@my_view.route(\"/ticket\")\r\ndef orders():\r\n return render_template('ticket.html')\r\n\r\n\r\n","sub_path":"my_app/source/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636742221","text":"#####################################\n###### \t ABOUT THIS SCRIPT \t #\n#####################################\n# This script does a 2-tailed paired t-test on the samples of each isoform\n\n\n#####################################\n###### INSTRUCTIONS FOR USE:\t\t#\n#####################################\n#In the shell go to this directory and type 'python paired_ttest_isoforms.py' to run the script.\n\n\n#####################################\n# Import necessary libraries\nimport pandas\nfrom pandas.io import sql\nimport sqlite3 as lite\nfrom scipy import stats\nimport numpy\n\n\n# Open connection to the geneSequenceResults.db\ncon = lite.connect('geneSequenceResults.db');\ncur = con.cursor()\n#cur.execute('DROP TABLE IF EXISTS isoform_paired_ttest')\n#cur.execute('CREATE TABLE isoform_paired_ttest (isoformid TEXT, tvalue REAL, pvalue REAL)')\n\n\n# Get list of distinct isoform id's\nids = pandas.read_sql_query(\"select distinct isoformid from isoform_pairs\", con)\ncount = pandas.read_sql_query(\"select count(*) from isoform_paired_ttest\", con).as_matrix()[0][0]\n\n# Loop through each isoform id\n#for id in ids.index:\nfor id in ids.index[count:]:\n i = ids.loc[id][0] \n i_samples = pandas.read_sql_query(\"select * from isoform_pairs where isoformid like '\" + i + \"'\", con) #get all samples of the isoform\n i_samples[['normcount_TN','normcount_NT']] = i_samples[['normcount_TN','normcount_NT']].applymap(lambda x: float(x)) #convert the count from strings to floats\n t_value, p_value = stats.ttest_rel(i_samples['normcount_TN'], i_samples['normcount_NT']) #calculate the t-value and p-value\n tmpList = []\n tmpList.extend([i, t_value, p_value])\n cur.execute('INSERT INTO isoform_paired_ttest VALUES (?,?,?)', tmpList) #write the isoformid, tvalue, pvalue into the database\n con.commit()\n print(str(id) + \" of \" + str(len(ids)) )\n\n\n\nalpha = 0.001\nttest = pandas.read_sql_query(\"select * from isoform_paired_ttest\", con)\nsignificant_isoforms = ttest[ttest['pvalue']0].sort(columns=['pvalue'], axis=0, ascending=True)\nsignificant_negative = significant_isoforms[significant_isoforms['tvalue']<0].sort(columns=['pvalue'], axis=0, ascending=True)\nsignificant_positive['isoformid'].map(lambda x: str.split(str(x), '|')[0]).to_csv(\"significant_positive_paired_ttest_isoforms.csv\", sep=\"\\t\", index=False)\nsignificant_negative['isoformid'].map(lambda x: str.split(str(x), '|')[0]).to_csv(\"significant_negative_paired_ttest_isoforms.csv\", sep=\"\\t\", index=False)\n\n\n\n# Commit to the changes and close connection to geneSequenceResults.db\ncon.commit()\ncon.close()\n\n\n#####################################\n# END OF FILE\n#####################################\n\n\n","sub_path":"HCC/paired_ttest_isoforms.py","file_name":"paired_ttest_isoforms.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"552290073","text":"\"\"\"\nVistas relacionadas con el modelo Nivel\n\nVistas\n------\n- Read\n- Create\n- Update\n- Delete\n\"\"\"\nfrom django.db import IntegrityError\nfrom django.db.models import ProtectedError\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nfrom zend_django.templatetags.op_helpers import crud_label\nfrom zend_django.templatetags.utils import GenerateReadCRUDToolbar\nfrom zend_django.views import GenericCreate\nfrom zend_django.views import GenericDelete\nfrom zend_django.views import GenericRead\nfrom zend_django.views import GenericUpdate\n\nfrom .models import Factor\nfrom .nivel_forms import frmNivel as base_form\nfrom .nivel_forms import frmNivelRead\nfrom .nivel_models import Nivel as main_model\n\n\ndef template_base_path(file):\n return 'app_valuacion_puestos/nivel/' + file + \".html\"\n\n\nclass Read(GenericRead):\n # html_template = template_base_path('see')\n titulo_descripcion = \"Nivel\"\n model_name = \"nivel\"\n base_data_form = frmNivelRead\n main_data_model = main_model\n tereapp = 'valuacion_de_puestos'\n\n def get(self, request, pk):\n if not self.main_data_model.objects.filter(pk=pk).exists():\n return HttpResponseRedirect(reverse('item_no_encontrado'))\n obj = self.main_data_model.objects.get(pk=pk)\n form = self.base_data_form(\n instance=obj, initial={'ponderacion': obj.ponderacion})\n toolbar = GenerateReadCRUDToolbar(\n request, self.model_name, obj, self.main_data_model)\n toolbar[0].update({'type': 'link_pk', 'pk': obj.factor.pk})\n return render(request, self.html_template, {\n 'titulo': obj,\n 'titulo_descripcion': self.titulo_descripcion,\n 'toolbar': toolbar,\n 'footer': False,\n 'read_only': True,\n 'alertas': [],\n 'req_chart': False,\n 'search_value': '',\n 'forms': {'top': [{'form': form}]},\n 'tereapp': self.tereapp,\n 'object': obj,\n })\n\n\nclass Create(GenericCreate):\n titulo = \"Nivel\"\n model_name = \"nivel\"\n base_data_form = base_form\n tereapp = 'valuacion_de_puestos'\n\n def base_render(self, request, forms, alertas=[]):\n return render(request, self.html_template, {\n 'titulo': self.titulo,\n 'titulo_descripcion': crud_label('create'),\n 'toolbar': None,\n 'footer': False,\n 'read_only': False,\n 'alertas': alertas,\n 'req_chart': False,\n 'search_value': '',\n 'forms': forms,\n 'tereapp': self.tereapp,\n })\n\n def get(self, request, pk_padre):\n return self.base_render(request, {\n 'top': [{'form': self.base_data_form()}]})\n\n def post(self, request, pk_padre):\n form = self.base_data_form(request.POST, files=request.FILES)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.factor = Factor.objects.get(pk=pk_padre)\n try:\n obj.save()\n except IntegrityError:\n return self.base_render(\n request,\n {'top': [{'form': form}]},\n [\n \"No es posible agregar el mismo nombre \"\n \"de nivel para un mismo factor\"\n ])\n return HttpResponseRedirect(reverse(\n f'{self.model_name}_read',\n kwargs={'pk': obj.pk}))\n return self.base_render(request, {'top': [{'form': form}]})\n\n\nclass Update(GenericUpdate):\n titulo = \"Nivel\"\n model_name = \"nivel\"\n base_data_form = base_form\n main_data_model = main_model\n tereapp = 'valuacion_de_puestos'\n\n\nclass Delete(GenericDelete):\n model_name = \"nivel\"\n main_data_model = main_model\n\n def get(self, request, pk):\n if not self.main_data_model.objects.filter(pk=pk).exists():\n return HttpResponseRedirect(reverse('item_no_encontrado'))\n obj = self.main_data_model.objects.get(pk=pk)\n try:\n pk_factor = obj.factor.pk\n obj.delete()\n return HttpResponseRedirect(reverse(\n f'{self.model_name}_list', kwargs={'pk': pk_factor}))\n except ProtectedError:\n return HttpResponseRedirect(reverse('item_con_relaciones'))\n except IntegrityError:\n return HttpResponseRedirect(reverse('item_con_relaciones'))\n","sub_path":"app_valuacion_puestos/nivel_vw.py","file_name":"nivel_vw.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"535386399","text":"# ## HackerRank Questions\n\n# * **Inheritance:** https://www.hackerrank.com/challenges/inheritance/problem\n\nclass Person:\n\tdef __init__(self, firstName, lastName, idNumber):\n\t\tself.firstName = firstName\n\t\tself.lastName = lastName\n\t\tself.idNumber = idNumber\n\tdef printPerson(self):\n\t\tprint(\"Name:\", self.lastName + \",\", self.firstName)\n\t\tprint(\"ID:\", self.idNumber)\n\n \nclass Student(Person):\n def __init__(self, firstName, lastName, idNumber,scores):\n super().__init__(firstName, lastName, idNumber)\n self.scores=sum(scores)/len(scores)\n def calculate(self):\n if 100>=self.scores>=90:\n return 'O'\n elif 80<=self.scores<90:\n return 'E'\n elif 70<=self.scores<80:\n return 'A'\n elif 55<=self.scores<70:\n return 'P'\n elif 40<=self.scores<55:\n return 'D'\n elif 40>self.scores:\n return 'T'\n \nline = input().split()\nfirstName = line[0]\nlastName = line[1]\nidNum = line[2]\nnumScores = int(input()) # not needed for Python\nscores = list( map(int, input().split()) )\ns = Student(firstName, lastName, idNum, scores)\ns.printPerson()\nprint(\"Grade:\", s.calculate())\n\n# \n# * **Classes: Dealing with Complex Numbers:** https://www.hackerrank.com/challenges/class-1-dealing-with-complex-numbers/problem\n\nimport math\n\nclass Complex(object):\n def __init__(self, real, imaginary):\n self.real=real\n self.imaginary=imaginary\n def __add__(self, no):\n a = complex(self.real, self.imaginary)\n b = complex(no.real, no.imaginary)\n return Complex((a+b).real , (a+b).imag).__str__()\n \n def __sub__(self, no):\n a = complex(self.real, self.imaginary)\n b = complex(no.real, no.imaginary)\n return Complex((a-b).real , (a-b).imag).__str__() \n def __mul__(self, no):\n a = complex(self.real, self.imaginary)\n b = complex(no.real, no.imaginary)\n return Complex((a*b).real , (a*b).imag).__str__()\n def __truediv__(self, no):\n a = complex(self.real, self.imaginary)\n b = complex(no.real, no.imaginary)\n return Complex((a/b).real , (a/b).imag).__str__()\n def mod(self):\n return Complex(abs(complex(self.real, self.imaginary)), 0)\n def __str__(self):\n if self.imaginary == 0:\n result = \"%.2f+0.00i\" % (self.real)\n elif self.real == 0:\n if self.imaginary >= 0:\n result = \"0.00+%.2fi\" % (self.imaginary)\n else:\n result = \"0.00-%.2fi\" % (abs(self.imaginary))\n elif self.imaginary > 0:\n result = \"%.2f+%.2fi\" % (self.real, self.imaginary)\n else:\n result = \"%.2f-%.2fi\" % (self.real, abs(self.imaginary))\n return result\n\nif __name__ == '__main__':\n c = map(float, input().split())\n d = map(float, input().split())\n x = Complex(*c)\n y = Complex(*d)\n print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\\n')","sub_path":"hackerrank.py","file_name":"hackerrank.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"20580610","text":"from . import FixtureTest\n\n\nclass RoadSortKeysBridges(FixtureTest):\n def test_motorway_bridge(self):\n self.load_fixtures(['https://www.openstreetmap.org/way/28412298'])\n\n self.assert_has_feature(\n 16, 10472, 25323, \"roads\",\n {\"kind\": \"highway\", \"kind_detail\": \"motorway\", \"id\": 28412298,\n \"name\": \"Presidio Pkwy.\", \"is_bridge\": True, \"sort_rank\": 443})\n\n def test_trunk_bridge(self):\n self.load_fixtures(['https://www.openstreetmap.org/way/59801274'])\n\n self.assert_has_feature(\n 16, 10471, 25331, \"roads\",\n {\"kind\": \"major_road\", \"kind_detail\": \"trunk\", \"id\": 59801274,\n \"name\": \"Crossover Dr.\", \"is_bridge\": True, \"sort_rank\": 443})\n\n def test_primary_bridge(self):\n self.load_fixtures(['http://www.openstreetmap.org/way/399640204'])\n\n self.assert_has_feature(\n 16, 11265, 26221, \"roads\",\n {\"kind\": \"major_road\", \"kind_detail\": \"primary\", \"id\": 399640204,\n \"name\": \"North Los Coyotes Diagonal\", \"is_bridge\": True,\n \"sort_rank\": 430})\n\n def test_secondary_bridge(self):\n self.load_fixtures(['https://www.openstreetmap.org/way/27613581'])\n\n self.assert_has_feature(\n 16, 10486, 25339, \"roads\",\n {\"kind\": \"major_road\", \"kind_detail\": \"secondary\", \"id\": 27613581,\n \"name\": \"Oakdale Ave.\", \"is_bridge\": True, \"sort_rank\": 429})\n\n def test_teriary_bridge(self):\n self.load_fixtures(['https://www.openstreetmap.org/way/242940297'])\n\n self.assert_has_feature(\n 16, 10486, 25327, \"roads\",\n {\"kind\": \"major_road\", \"kind_detail\": \"tertiary\", \"id\": 242940297,\n \"name\": \"Beale St.\", \"is_bridge\": True, \"sort_rank\": 427})\n\n def test_residential_bridge(self):\n self.load_fixtures(['https://www.openstreetmap.org/way/162038104'])\n\n self.assert_has_feature(\n 16, 10738, 24989, \"roads\",\n {\"kind\": \"minor_road\", \"kind_detail\": \"residential\",\n \"id\": 162038104, \"name\": \"Woodwardia Pl.\", \"sort_rank\": 410})\n\n def test_service_bridge(self):\n self.load_fixtures(['http://www.openstreetmap.org/way/232303398'])\n\n self.assert_has_feature(\n 16, 10482, 25363, \"roads\",\n {\"id\": 232303398, \"kind\": \"minor_road\", \"kind_detail\": \"service\",\n \"is_bridge\": True, \"sort_rank\": 408})\n","sub_path":"integration-test/546-road-sort-keys-bridges.py","file_name":"546-road-sort-keys-bridges.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443316666","text":"from dragonfly import *\nimport BaseGrammars\nfrom BaseRules import *\nimport chajLib.ui.docnav as docnav\nimport chajLib.ui.keyboard as kb\n\ngrammar = BaseGrammars.ContinuousGrammar(\"document navigation - edit grammar\")\n\n#decorator\ndef GrammarRule(rule):\n if inspect.isclass(rule):\n if issubclass(rule, BaseQuickRules):\n rule(grammar)\n else:\n grammar.add_rule(rule())\n else:\n grammar.add_rule(rule)\n\n\n@GrammarRule\nclass ReplaceSurroundingCharacters(ContinuousRule_EatDictation):\n spec = \"replace [ [times]] (character|characters)\"\n introspec = \"replace (left|right)\"\n extras = (IntegerRef(\"n\", 1, 200), Choice(\"direction\", {\"left\":\"left\", \"right\":\"right\"}))\n defaults = { \"n\": 1}\n def _process_recognition(self, node, extras):\n action = Key(\"s-\" + extras[\"direction\"]) * Repeat(count=extras[\"n\"])\n if \"RunOn\" in extras:\n replacement = \" \".join(extras[\"RunOn\"].words)\n action += Text(replacement)\n else:\n action += Key(\"delete\")\n action.execute()\n\n\n@GrammarRule\nclass ReplaceTrim(ContinuousRule_EatDictation):\n spec = \"replace \"\n introspec = \"replace (left|right)\"\n extras = (Choice(\"direction\", {\"left\":\"home\", \"right\":\"end\"}),)\n def _process_recognition(self, node, extras):\n action = Key(\"s-\" + extras[\"direction\"])\n if \"RunOn\" in extras:\n replacement = extras[\"RunOn\"].format()\n action += Text(replacement)\n else:\n action += Key(\"delete\")\n action.execute()\n\n\n@GrammarRule\nclass DocNavEditCalls(QuickContinuousCalls):\n mapping = [\n [\"paste left\", docnav.replace_left_from_clipboard],\n [\"paste right\", docnav.replace_right_from_clipboard],\n [\"replace left \", docnav.replace_left, \"replacement\"],\n [\"replace right \", docnav.replace_right, \"replacement\"],\n ]\n\n\ngrammar.load()\ndef unload():\n global grammar\n if grammar: grammar.unload()\n grammar = None","sub_path":"_docNav_edit.py","file_name":"_docNav_edit.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"430192070","text":"import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\ndef hello(event, context):\n today = datetime.date.today()\n yesterday = today + relativedelta(days=-1)\n\n return {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"event\": event,\n \"dates\": {\n \"today\": f'{today: %Y-%m-%d}',\n \"yesterday\": f'{yesterday: %Y-%m-%d}'\n }\n }\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"99838532","text":"import os\nimport numpy as np\n\ndef word2vec(dim=100):\n try:\n file_path = os.path.join('..', '..', 'glove_data', 'glove.6B.{}d.txt'.format(dim))\n except FileNotFoundError:\n print('File not found')\n return None\n res = {}\n with open(file_path) as file:\n for s in file:\n arr = s.split()\n word = arr[0]\n embedding = np.asarray(arr[1:], dtype='float32')\n res[word] = embedding\n return res\n","sub_path":"source/glove_embeddings.py","file_name":"glove_embeddings.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"601783070","text":"# -*- coding: utf-8 -*-\n\nfrom keywords import *\nfrom utilities import System\nfrom version import VERSION\n\n\n__version__ = VERSION\n\n\n\nclass BJRobot(BrowserManager, ApplicationManagemer, AndroidUtils, Keyevent, Touch, Element, Screenshot, Logging, RunOnFailure):\n\n ROBOT_LIBRARY_SCOPE = 'GLOBAL'\n ROBOT_LIBRARY_VERSION = VERSION\n\n def __init__(self,\n run_on_failure='Capture Page Screenshot',\n screenshot_root_directory=None\n ):\n \"\"\"BJRobot can be imported with optional arguments.\n\n ``run_on_failure`` specifies the name of a keyword (from any available\n libraries) to execute when a AppiumLibrary keyword fails.\n\n By default `Capture Page Screenshot` will be used to take a screenshot of the current page.\n Using the value `No Operation` will disable this feature altogether. See\n `Register Keyword To Run On Failure` keyword for more information about this\n functionality.\n\n Examples:\n | Library | AppiumLibrary | run_on_failure=Capture Page Screenshot | # Capture the screenshot when on failure |\n | Library | AppiumLibrary | run_on_failure=Capture Page Screenshot | screenshot root directory=../screenshot |\n #Capture screenshot on failure and set the log root directory to the screenshot above the current folder level\n \"\"\"\n for base in BJRobot.__bases__:\n base.__init__(self)\n\n self.screenshot_root_directory = screenshot_root_directory\n self.register_keyword_to_run_on_failure(run_on_failure)\n","sub_path":"src/BJRobot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343586032","text":"#Creator: Akshay Naik (aunaik@iu.edu)\r\n\r\nimport numpy as np\r\n\r\n# Power Iteration\r\ndef power_iteration(X, iterations):\r\n y_km1 = np.ones((X.shape[0],1))#np.random.rand(X.shape[0],1)\r\n for i in range(iterations):\r\n y_k = np.dot(X, y_km1)\r\n s = np.linalg.norm(y_k)\r\n y_km1 = y_k/s\r\n return y_km1, s\r\n\r\n# Calculating multiple Eigenvectors\r\ndef eigen_vectors(X, no_of_eigen_vectors, iterations):\r\n #eigen_vector = np.zeros(X.shape[0])\r\n #eigen_values = np.zeros()\r\n for i in range(no_of_eigen_vectors):\r\n v, s = power_iteration(X, iterations)\r\n #s = np.linalg.norm(inner_prod)\r\n u = np.dot(X.T, v)/s\r\n #v = v * s\r\n #print (\"EV\",v)\r\n #exit()\r\n X = X - (s*np.dot(v, u.T))\r\n #X = X - (np.dot(v, u.T))\r\n #print(str(i), \" Eigen value: \", str(s))\r\n if i == 0:\r\n eigen_vector = v\r\n eigen_value = np.array(s)\r\n else:\r\n eigen_vector = np.append(eigen_vector, v, 1)\r\n eigen_value = np.append(eigen_value, s)\r\n return eigen_vector , eigen_value\r\n","sub_path":"Multidimensional Scaling/PowerIteration.py","file_name":"PowerIteration.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499430314","text":"from django.shortcuts import render\nfrom product_options.models import ProductOption\n\n\ndef product_list(request, pk):\n product = ProductOption.objects.filter(post=pk)\n\n print(product)\n\n # print(product.product_name)\n\n context = {\n\n 'products': product\n }\n\n return render(request, 'product/product-product.html', context)\n","sub_path":"app/lectures/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"108557951","text":"#! usr/bin/env python3\n# -*- Coding: UTF-8 -*-\n\n\"\"\"Module to unitest products application views\"\"\"\n\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom products.models import Product, Category\n\n\nclass TestResultView(TestCase):\n @classmethod\n def setUpTestData(cls):\n # Create 10 products for pagination tests\n number_of_products = 10\n # Create a category\n test_category = Category.objects.create(category_name='test_category')\n # Create a test_product\n test_product = Product.objects.create(\n barcode='000001',\n product_name='unhealthy',\n nutrition_grade='E',\n url='http://unhealthy.com',\n product_image='http://image.com',\n nutrition_image='http://ingredients.com'\n )\n # Link the product to the test_category\n test_product.category.add(test_category)\n # Create 10 candidates\n for product_id in range(number_of_products):\n test_product = Product.objects.create(\n barcode=f'barcode_{product_id}',\n product_name=f'product_name_{product_id}',\n nutrition_grade='B',\n url=f'url_{product_id}',\n product_image=f'product_image_{product_id}',\n nutrition_image=f'nutrition_image_{product_id}'\n )\n # Link products to the test_category\n test_product.category.add(test_category)\n\n def test_view_url_exists_at_desired_location(self):\n response = self.client.get('/products/results/unhealthy/')\n self.assertEqual(response.status_code, 200)\n\n def test_view_url_accessible_by_name(self):\n response = self.client.get(\n reverse(\n 'products:results', kwargs={\n 'product_name': 'unhealthy'}))\n self.assertEqual(response.status_code, 200)\n\n\nclass TestProductView(TestCase):\n @classmethod\n def setUpTestData(cls):\n # Create a category\n test_category = Category.objects.create(category_name='test_category')\n # Create a test_product product\n test_product = Product.objects.create(\n barcode='000002',\n product_name='healthy',\n nutrition_grade='A',\n url='http://healthy.com',\n product_image='http://image_healthy.com',\n nutrition_image='http://ingredients_healthy.com'\n )\n # Link the product to the test_category\n test_product.category.add(test_category)\n\n def setUp(self):\n test_product = Product.objects.get(barcode='000002')\n self.test_product_id = test_product.id\n\n def test_view_url_exists_at_desired_location(self):\n response = self.client.get(\n f'/products/details/{self.test_product_id}/')\n self.assertEqual(response.status_code, 200)\n\n def test_view_url_accessible_by_name(self):\n response = self.client.get(\n reverse(\n 'products:details', kwargs={\n 'pk': self.test_product_id}))\n self.assertEqual(response.status_code, 200)\n","sub_path":"products/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"409366156","text":"import serial\nfrom time import sleep\nimport wx\ndef recv(serials):\n while True:\n data = serials.readall()\n if data == '':\n continue\n else:\n break\n sleep(0.02)\n return data\n\nclass MyFrame(wx.Frame): # 编写窗口的类\n def __init__(self, parent, id): # 类的本身属性\n wx.Frame.__init__(self, parent, id, title=\"串口调试处理器\", size=(480, 500))\n panel = wx.Panel(self) # 显示画板\n title = wx.StaticText(panel, label=\"请在下方填入想要的相关操作\", pos=(90, 10))\n font = wx.Font(15, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.LIGHT, underline=False)\n title.SetFont(font)\n title.SetForegroundColour(\"yellow\") # 设置字体的前景色和背景色\n title.SetBackgroundColour(\"blue\")\n self.send = wx.StaticText(panel, label=\"请输入发送的信息:\", pos=(10, 35))\n self.send.SetFont(font)\n self.textsend = wx.TextCtrl(panel, pos=(200, 35), size=(240, 175), style=wx.TE_LEFT)\n self.receive = wx.StaticText(panel, label=\"您收到的信息:\", pos=(10, 220))\n self.receive.SetFont(font)\n self.textreceive = wx.TextCtrl(panel, pos=(180, 220), size=(260, 150), style=wx.TE_LEFT|wx.TE_WORDWRAP)\n clear1 = wx.Button(panel, label=\"清除发送项\", pos=(30, 420), size=(100, 37))\n clear1.Bind(wx.EVT_BUTTON, self.Onclickclear1)\n clear2 = wx.Button(panel,label=\"清除接收项\",pos=(140,420),size=(100,37))\n clear2.Bind(wx.EVT_BUTTON, self.Onclickclear2)\n confirm = wx.Button(panel, label=\"发送\", pos=(250, 420), size=(90, 37))\n confirm.Bind(wx.EVT_BUTTON, self.Onclickconfirm)\n cancel = wx.Button(panel, label=\"退出\", pos=(350, 420), size=(90, 37))\n cancel.Bind(wx.EVT_BUTTON, self.Onclickcancel)\n self.comtxt = wx.StaticText(panel,label=\"请选择COM:\",pos=(10,90))\n comlist = [(\"COM\" + str(i)) for i in range(1, 100)]\n self.liststr = wx.Choice(panel,-1,(10,120),choices=comlist)\n self.baudtxt = wx.StaticText(panel,label=\"请选择波特率:\",pos=(10,160))\n baudrate = ['300','600','1200','2400','4800','9600','19200','38400','43000','56000','57600','115200']\n self.listbaud = wx.Choice(panel,-1,(10,180),choices=baudrate)\n\n def Onclickcancel(self, event):\n exit(0)\n\n def Onclickclear1(self, event):\n self.textsend.SetValue(\"\")\n\n def Onclickclear2(self,event):\n self.textreceive.SetValue(\"\")\n\n\n def Onclickconfirm(self, event):\n try:\n index = self.liststr.GetSelection()\n com = self.liststr.GetString(index)\n index = self.listbaud.GetSelection()\n bud = self.listbaud.GetString(index)\n serials = serial.Serial(com, bud, timeout=0.5) # /dev/ttyUSB0\n\n str1 = self.textsend.GetValue()\n a = str1 + \"\\n\"\n # print(len(a))\n serials.write((a).encode(\"gbk\"))\n sleep(0.1)\n while True:\n data = recv(serials)\n if data != b'':\n self.textreceive.SetValue(data.decode(\"gbk\")+'\\n')\n else:\n break\n\n except Exception as e:\n wx.MessageBox(\"open failed\"+str(e))\n\n\nif __name__ == \"__main__\":\n app = wx.App()\n frame = MyFrame(parent=None, id=-1)\n frame.Center()\n frame.Show()\n app.MainLoop()\n \"\"\"\n serial = serial.Serial('COM2',115200, timeout=0.5) #/dev/ttyUSB0\n if serial.isOpen() :\n print(\"open success\")\n else :\n print(\"open failed\")\n while True:\n str1 = input(\"请输入要发送到串口的话:\")\n a=str1+\"\\n\"\n #print(len(a))\n serial.write((a).encode(\"gbk\"))\n sleep(0.1)\n data =recv(serial)\n if data != b'' :\n print(\"receive : \",data.decode(\"gbk\"))\n\"\"\"\n\n","sub_path":"cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609483433","text":"\"\"\"\nDownloadable package:\n\nget_downloadable(html) -> Dict\n\"\"\"\nfrom bs4 import BeautifulSoup as BS\n\ndef get_downloadable(html):\n \"\"\"\n Params = obj\n Returns -> Dict{\n 'left': Dict {\n 'link': Link of downloadable item\n 'caption': Short description of item\n }\n }\n \"\"\"\n soup = BS(html.html, features='lxml')\n get_full_link = lambda x:f\"{html.url}/{x.strip()}\"\n\n # Get the downloadable item in left side\n left_div = soup.find(\"div\", id=\"leftnoav\")\n left_div_a_tag = left_div.p.a\n\n link = left_div_a_tag.attrs.get(\"href\")\n left_link = get_full_link(link) if 'http' not in link else link.strip()\n\n left_img = left_div_a_tag.img.attrs.get('src')\n left_img_link = get_full_link(left_img) if 'http' not in left_img else left_img.strip()\n\n left_title = left_div_a_tag.string\n if not left_img_link and not left_title:\n left_title = link\n elif left_img_link and not left_title:\n left_title = ''\n\n # Get the downloadable item in right side\n right_div = soup.find(\"div\", id=\"round_bottom_left\")\n right_div_a_tag = right_div.find_next('a')\n link = right_div_a_tag.attrs.get(\"href\")\n right_link = get_full_link(link) if 'http' not in link else link.strip()\n\n right_img = right_div_a_tag.find('img').attrs.get('src')\n right_img_link = get_full_link(right_img) if 'http' not in right_img else right_img.strip()\n\n right_title = right_div_a_tag.string\n if not right_img_link and not right_title:\n right_title = link\n elif right_img_link and not right_title:\n right_title = \"\"\n\n downloadables = {\n \"left\": {\"link\": left_link, \"caption\": left_title, \"image\":left_img_link},\n \"right\": {'link': right_link, 'caption': right_title, \"image\":right_img_link},\n }\n return downloadables\n","sub_path":"backend/scrapers/downloadable/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41220783","text":"# coding=utf-8\nimport math\nimport time\nimport re\n\n# Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score.\n# For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.\n# What is the total of all the name scores in the file?\n\ndef fxn22():\n\tf = open(raw_input('Enter a file name: '), \"r\")\n\tnames = sorted([name.replace('\"','') for name in f.read().split(',')])\n\tf.close()\n\n\tstart = time.clock()\n\tindex = 1\n\ttotal_of_list = 0\n\n\tfor name in names:\n\t\ttotal_of_name = 0\n\t\tfor letter in name:\n\t\t\tletter = str.lower(letter)\n\t\t\tvalue = ord(letter) - 96\n\t\t\ttotal_of_name += value\n\t\ttotal_of_list += index * total_of_name\n\t\tindex += 1\n\tprint (str(total_of_list) + \" in \" + str(time.clock() - start) + \" seconds. \")","sub_path":"p22.py","file_name":"p22.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"249921448","text":"#!/usr/bin/env python3\n# SPDX-License-Identifier: BSD-3-Clause\n# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation\n\nimport argparse\nimport os\nimport subprocess\nimport sys\n\nwork_dir = os.path.dirname(__file__)\nsys.path.append(work_dir + '/..')\nfrom lib import app_helper\nfrom lib import common\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Launcher for spp-primary application container\")\n\n parser = app_helper.add_eal_args(parser)\n parser = app_helper.add_appc_args(parser)\n\n # Application specific arguments\n parser.add_argument(\n '-n', '--nof-ring',\n type=int,\n default=10,\n help='Maximum number of Ring PMD')\n parser.add_argument(\n '-p', '--port-mask',\n type=str,\n help='Port mask')\n parser.add_argument(\n '-ip', '--ctl-ip',\n type=str,\n help=\"IP address of spp-ctl\")\n parser.add_argument(\n '--ctl-port',\n type=int,\n default=5555,\n help=\"Port for primary of spp-ctl\")\n\n parser = app_helper.add_sppc_args(parser)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n app_name = 'spp_primary'\n\n # Setup docker command.\n docker_cmd = ['sudo', 'docker', 'run', '\\\\']\n\n # Container image name such as 'sppc/spp-ubuntu:18.04'\n if args.container_image is not None:\n container_image = args.container_image\n else:\n container_image = common.container_img_name(\n common.IMG_BASE_NAMES['spp'],\n args.dist_name, args.dist_ver)\n\n app_opts = [\n '-v', '/var/run/:/var/run/', '\\\\',\n '-v', '/tmp:/tmp', '\\\\']\n\n # Use host network if attaching TAP device to show them on the host.\n for dev_uid in args.dev_uids.split(','):\n if 'tap' in dev_uid:\n app_opts += ['--net', 'host', '\\\\']\n\n docker_opts = app_helper.setup_docker_opts(\n args, None, app_opts)\n\n # Setup spp primary command.\n spp_cmd = [app_name, '\\\\']\n\n eal_opts = app_helper.setup_eal_opts(args, app_name=None,\n proc_type='primary', is_spp_pri=True)\n\n spp_opts = []\n # Check for other mandatory opitons.\n if args.port_mask is None:\n common.error_exit('port_mask')\n else:\n spp_opts += ['-p', args.port_mask, '\\\\']\n\n spp_opts += ['-n', str(args.nof_ring), '\\\\']\n\n # IP address of spp-ctl.\n ctl_ip = os.getenv('SPP_CTL_IP', args.ctl_ip)\n if ctl_ip is None:\n print('Env variable \"SPP_CTL_IP\" is not defined!')\n exit()\n else:\n spp_opts += ['-s', '{}:{}'.format(ctl_ip, args.ctl_port), '\\\\']\n\n cmds = docker_cmd + docker_opts + [container_image, '\\\\'] + \\\n spp_cmd + eal_opts + spp_opts\n\n if cmds[-1] == '\\\\':\n cmds.pop()\n common.print_pretty_commands(cmds)\n\n if args.dry_run is True:\n exit()\n\n # Remove delimiters for print_pretty_commands().\n while '\\\\' in cmds:\n cmds.remove('\\\\')\n\n subprocess.call(cmds)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/sppc/app/spp-primary.py","file_name":"spp-primary.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"142209524","text":"import json\nimport sys\nimport os.path\nimport system\nimport dirutils\n\ntemp_path = os.path.abspath(sys.argv[1])\ndirectory = os.path.abspath(sys.argv[2])\ncsv = os.path.abspath(sys.argv[3])\nexe = sys.argv[4]\nopts = sys.argv[5]\n\nprint(\"======Running flint++=======\")\nprint(\"Working dir:\", directory)\nprint(\"CSV file:\", csv)\nprint(\"Excutable:\", exe)\nprint(\"Options:\", opts)\n\nc_files = dirutils.list_files(directory, '.c') + dirutils.list_files(directory, '.cpp')\n(output, err, exit, time) = system.system_call(exe + \" \" + opts + \" \" + \" \".join(c_files), directory)\n\ntemp_file = open(temp_path, 'w')\ntemp_file.write(output.decode(\"utf-8\"))\ntemp_file.close()\n\nsys.stdout = open(csv, \"w\")\nprint(\"File, Line, Error\")\nif (os.path.exists(temp_path)):\n with open(temp_path) as json_report_file:\n data = json.load(json_report_file)\n for f in data['files']:\n filename = f['path']\n for error in f['reports']:\n print(filename, \",\", error['line'], \",\", error['title'])\nsys.stdout = sys.__stdout__\nprint(\"======Done with flint++=======\")\n","sub_path":"python/flint++.py","file_name":"flint++.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169281784","text":"import networkx as nx\nfrom urlparse import urlparse\nimport os.path\nimport tldextract\nimport hashlib\n\nyu_suffixes = (\n 'yu',\n\t'com.yu',\n\t'co.yu',\n\t'edu.yu',\n\t'gov.yu',\n\t'net.yu',\n\t'mil.yu',\n\t'org.yu',\n\t'cg.yu',\n\t'ac.yu',\n)\nextract = tldextract.TLDExtract(extra_suffixes=yu_suffixes)\n\nG=nx.DiGraph()\nG = nx.read_gexf('PCPress-1996-2010.gexf')\n\nfile = open('pcpress-category.csv', 'r')\nfor line in file:\n\tline = line.strip()\n\tsp = line.split(',')\n\turl_ext = extract(sp[0])\n\tnode_label = url_ext.domain + '.' + url_ext.suffix\n\n\th = hashlib.md5(node_label).hexdigest()\n\n\tif h in G:\n\t\tG.node[h]['category'] = sp[1]\n\nnx.write_gexf(G, 'PCPress-1996-2010-WithCategory.gexf')","sub_path":"Crawler/AddCategory.py","file_name":"AddCategory.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443539440","text":"import numpy as np\nimport tensorflow as tf\nimport random\nimport os\nfrom tqdm import tqdm\nfrom skimage.io import imread\nfrom util import img_to_onehot_encoding \nfrom scipy.ndimage import rotate\nfrom scipy.ndimage.interpolation import shift\nfrom PIL import Image\nimport imageio\nfrom imgaug import augmenters as iaa\nfrom imgaug.augmentables.segmaps import SegmentationMapsOnImage\nimport imgaug as ia\nfrom imgaug import parameters as iap\nfrom skimage.io import imsave \nfrom .augmentors import cutblur, cutmix\n\nclass VoxelDataGenerator(tf.keras.utils.Sequence):\n \"\"\"Custom ImageDataGenerator for 3D images.\n \"\"\"\n\n def __init__(self, X, Y, random_subvolumes_in_DA=False, subvol_shape=None,\n seed=42, shuffle_each_epoch=False, batch_size=32, da=True, \n da_prob=0.5, rotation90=False, rand_rot=False,\n rnd_rot_range=(-180,180), shear=False, shear_range=(-20,20),\n zoom=False, zoom_range=(0.8,1.2), shift=False,\n shift_range=(0.1,0.2), flip=False, elastic=False, \n e_alpha=(240,250), e_sigma=25, e_mode='constant', g_blur=False,\n g_sigma=(1.0,2.0), median_blur=False, mb_kernel=(3,7),\n motion_blur=False, motb_k_range=(3,8), gamma_contrast=False,\n gc_gamma=(1.25,1.75), dropout=False, drop_range=(0, 0.2),\n cutout=False, cout_nb_iterations=(1,3), cout_size=0.2,\n cout_fill_mode='constant', cutblur=False, cblur_size=0.4,\n cblur_down_range=(2,8), cblur_inside=True, cutmix=False,\n cmix_size=0.4, n_classes=1, out_number=1, val=False,\n prob_map=None, extra_data_factor=1):\n \"\"\"ImageDataGenerator constructor. Based on transformations from \n `imgaug `_ library. Here a brief\n description of each transformation parameter is made. Find a complete\n explanation of the library `documentation `_. \n \n \n Parameters\n ----------\n X : Numpy 5D array\n Data. E.g. ``(num_of_images, x, y, z, channels)``.\n\n Y : Numpy 5D array\n Mask data. E.g. ``(num_of_images, x, y, z, channels)``.\n\n random_subvolumes_in_DA : bool, optional\n To extract random subvolumes from the given data. If not, the \n data must be 5D and is assumed that the subvolumes are prepared. \n \n subvol_shape : 4D tuple of ints, optional\n Shape of the subvolume to be extracted randomly from the data. \n E. g. ``(x, y, z, channels)``.\n \n seed : int, optional\n Seed for random functions.\n \n shuffle_each_epoch : bool, optional\n To shuffle data after each epoch.\n\n batch_size : int, optional\n Size of the batches.\n \n da : bool, optional\n To activate the data augmentation.\n \n da_prob : float, optional\n Probability of doing each transformation.\n \n rotation90 : bool, optional \n To make square (90, 180,270) degree rotations.\n \n rand_rot : bool, optional \n To make random degree range rotations. \n \n rnd_rot_range : tuple of float, optional\n Range of random rotations. E. g. ``(-180, 180)``.\n\n shear : bool, optional\n To make shear transformations. \n\n shear_range : tuple of int, optional\n Degree range to make shear. E. g. ``(-20, 20)``. \n\n zoom : bool, optional\n To make zoom on images.\n \n zoom_range : tuple of floats, optional\n Zoom range to apply. E. g. ``(0.8, 1.2)``. \n \n shift : float, optional \n To make shifts.\n \n shift_range : tuple of float, optional\n Range to make a shift. E. g. ``(0.1, 0.2)``.\n\n flip : bool, optional\n To activate flips (both horizontal and vertical).\n \n elastic : bool, optional\n To make elastic deformations.\n\n e_alpha : tuple of ints, optional\n Strength of the distortion field. E. g. ``(240, 250)``.\n \n e_sigma : int, optional\n Standard deviation of the gaussian kernel used to smooth the \n distortion fields. \n\n e_mode : str, optional\n Parameter that defines the handling of newly created pixels with \n the elastic transformation. \n \n g_blur : bool, optional\n To insert gaussian blur on the images.\n \n g_sigma : tuple of floats, optional\n Standard deviation of the gaussian kernel. E. g. ``(1.0, 2.0)``.\n\n median_blur : bool, optional \n To blur an image by computing median values over neighbourhoods.\n \n mb_kernel : tuple of ints, optional \n Median blur kernel size. E. g. ``(3, 7)``. \n\n motion_blur : bool, optional\n Blur images in a way that fakes camera or object movements.\n\n motb_k_range : int, optional\n Kernel size to use in motion blur. \n \n gamma_contrast : bool, optional\n To insert gamma constrast changes on images. \n\n gc_gamma : tuple of floats, optional \n Exponent for the contrast adjustment. Higher values darken the \n image. E. g. ``(1.25, 1.75)``. \n\n dropout : bool, optional\n To set a certain fraction of pixels in images to zero.\n\n drop_range : tuple of floats, optional\n Range to take a probability ``p`` to drop pixels. E.g. ``(0, 0.2)``\n will take a ``p`` folowing ``0<=p<=0.2`` and then drop ``p``\n percent of all pixels in the image (i.e. convert them to black\n pixels).\n\n cutout : bool, optional \n To fill one or more rectangular areas in an image using a fill \n mode.\n\n cout_nb_iterations : tuple of ints, optional\n Range of number of areas to fill the image with. E. g. ``(1, 3)``. \n\n cout_size : float, optional \n Size of the areas in % of the corresponding image size. Value \n between ``0`` and ``1``.\n\n cout_fill_mode : str, optional \n Parameter that defines the handling of newly created pixels with\n cutout.\n\n cutblur : boolean, optional\n Blur a rectangular area of the image by downsampling and upsampling\n it again. \n\n cblur_size : float, optional\n Size of the area to apply cutblur on.\n \n cblur_inside : boolean, optional\n If ``True`` only the region inside will be modified (cut LR into HR\n image). If ``False`` the ``50%`` of the times the region inside will\n be modified (cut LR into HR image) and the other ``50%`` the inverse\n will be done (cut HR into LR image). See Figure 1 of the official\n `paper `_.\n\n cutmix : boolean, optional\n Combine two images pasting a region of one image to another.\n\n cmix_size : float, optional\n Size of the area to paste one image into another. \n\n n_classes : int, optional\n Number of classes. If ``> 1`` one-hot encoding will be done on \n the ground truth.\n\n out_number : int, optional \n Number of output returned by the network. Used to produce same \n number of ground truth data on each batch. \n\n val : bool, optional\n Advice the generator that the volumes will be used to validate\n the model to not make random crops (as the validation data must\n be the same on each epoch). Valid when ``random_subvolumes_in_DA`` \n is set.\n\n prob_map : 5D Numpy array, optional\n Probability map used to make random crops when\n ``random_subvolumes_in_DA`` is set.\n \n extra_data_factor : int, optional\n Factor to multiply the batches yielded in a epoch. It acts as if\n ``X`` and ``Y``` where concatenated ``extra_data_factor`` times.\n \"\"\"\n\n if X.ndim != 5 or Y.ndim != 5:\n raise ValueError(\"X and Y must be a 5D Numpy array\")\n if X.shape[:4] != Y.shape[:4]: \n raise ValueError(\"The shape of X and Y must be the same. {} != {}\"\n .format(X.shape[:4], Y.shape[:4]))\n if random_subvolumes_in_DA:\n if subvol_shape is None:\n raise ValueError(\"'subvol_shape' must be provided when \"\n \"'random_subvolumes_in_DA is enabled\") \n if subvol_shape[0] > X.shape[1] or subvol_shape[1] > X.shape[2] or \\\n subvol_shape[2] > X.shape[3]:\n raise ValueError(\"Given 'subvol_shape' is bigger than the data \"\n \"provided\")\n\n if rotation90 and rand_rot:\n print(\"Warning: you selected double rotation type. Maybe you should\"\n \" set only 'rand_rot'?\")\n\n self.X = (X/255).astype(np.float32) if np.max(X) > 100 else X.astype(np.float32)\n self.X_c = self.X.shape[-1]\n self.X_z = self.X.shape[-2]\n self.Y = (Y/255).astype(np.uint8) if np.max(Y) > 100 else Y.astype(np.uint8)\n self.Y_c = self.Y.shape[-1] \n self.Y_z = self.Y.shape[-2]\n self.n_classes = n_classes\n self.out_number = out_number\n self.channels = Y.shape[-1] \n self.random_subvolumes_in_DA = random_subvolumes_in_DA\n self.seed = seed\n self.shuffle_each_epoch = shuffle_each_epoch\n self.da = da\n self.da_prob = da_prob\n self.flip = flip\n self.cutblur = cutblur\n self.cblur_size = cblur_size\n self.cblur_down_range = cblur_down_range\n self.cblur_inside = cblur_inside\n self.cutmix = cutmix\n self.cmix_size = cmix_size\n self.val = val\n self.batch_size = batch_size\n self.o_indexes = np.arange(len(self.X))\n if extra_data_factor > 1:\n self.extra_data_factor = extra_data_factor\n self.o_indexes = np.concatenate([self.o_indexes]*extra_data_factor)\n else:\n self.extra_data_factor = 1\n self.prob_map = prob_map\n if random_subvolumes_in_DA:\n self.shape = subvol_shape\n else:\n self.shape = X.shape[1:]\n self.total_batches_seen = 0\n\n self.da_options = []\n self.trans_made = ''\n if rotation90:\n self.da_options.append(iaa.Sometimes(da_prob, iaa.Rot90((1, 3))))\n self.trans_made += '_rot[90,180,270]'\n if rand_rot:\n self.da_options.append(iaa.Sometimes(da_prob, iaa.Affine(rotate=rnd_rot_range)))\n self.trans_made += '_rrot'+str(rnd_rot_range)\n if shear:\n self.da_options.append(iaa.Sometimes(da_prob, iaa.Affine(rotate=shear_range))) \n self.trans_made += '_shear'+str(shear_range)\n if zoom:\n self.da_options.append(iaa.Sometimes(da_prob, iaa.Affine(scale={\"x\": zoom_range, \"y\": zoom_range})))\n self.trans_made += '_zoom'+str(zoom_range)\n if shift: \n self.da_options.append(iaa.Sometimes(da_prob, iaa.Affine(translate_percent=shift_range)))\n self.trans_made += '_shift'+str(shift_range) \n if flip:\n self.da_options.append(iaa.Flipud(0.5))\n self.da_options.append(iaa.Fliplr(0.5)) \n self.trans_made += '_flip'\n if elastic:\n self.da_options.append(iaa.Sometimes(da_prob,iaa.ElasticTransformation(alpha=e_alpha, sigma=e_sigma, mode=e_mode)))\n self.trans_made += '_elastic'+str(e_alpha)+'+'+str(e_sigma)+'+'+str(e_mode)\n if g_blur:\n self.da_options.append(iaa.Sometimes(da_prob,iaa.GaussianBlur(g_sigma)))\n self.trans_made += '_gblur'+str(g_sigma)\n if median_blur:\n self.da_options.append(iaa.Sometimes(da_prob,iaa.MedianBlur(k=mb_kernel)))\n self.trans_made += '_mblur'+str(mb_kernel)\n if motion_blur:\n self.da_options.append(iaa.Sometimes(da_prob,iaa.MotionBlur(k=motb_k_range)))\n self.trans_made += '_motb'+str(motb_k_range)\n if gamma_contrast:\n self.da_options.append(iaa.Sometimes(da_prob,iaa.GammaContrast(gc_gamma)))\n self.trans_made += '_gcontrast'+str(gc_gamma)\n if dropout:\n self.da_options.append(iaa.Sometimes(da_prob, iaa.Dropout(p=drop_range)))\n self.trans_made += '_drop'+str(drop_range)\n if cutout:\n self.da_options.append(iaa.Sometimes(da_prob, iaa.Cutout(nb_iterations=cout_nb_iterations, size=cout_size, fill_mode=cout_fill_mode, squared=False)))\n self.trans_made += '_cout'+str(cout_nb_iterations)+'+'+str(cout_size)+'+'+str(cout_fill_mode)\n if cutblur: self.trans_made += '_cblur'+str(cblur_size)+'+'+str(cblur_down_range)+'+'+str(cblur_inside)\n if cutmix: self.trans_made += '_cmix'+str(cmix_size)\n \n self.trans_made = self.trans_made.replace(\" \", \"\")\n self.seq = iaa.Sequential(self.da_options)\n ia.seed(seed)\n self.on_epoch_end()\n\n def __len__(self):\n \"\"\"Defines the length of the generator\"\"\"\n return int(np.ceil(self.X.shape[0]*self.extra_data_factor/self.batch_size))\n\n def __draw_grid(self, im, grid_width=50, v=1):\n \"\"\"Draw grid of the specified size on an image. \n \n Parameters\n ---------- \n im : 4D Numpy array\n Image to be modified. E. g. ``(x, y, z, channels)``\n \n grid_width : int, optional\n Grid's width. \n\n v : int, optional\n Value to create the grid with.\n \"\"\"\n\n for k in range(0, im.shape[2]):\n for i in range(0, im.shape[0], grid_width):\n if im.shape[-1] == 1:\n im[i,:,k] = v\n else:\n im[i,:,k] = [v]*im.shape[-1]\n for j in range(0, im.shape[1], grid_width):\n if im.shape[-1] == 1:\n im[:,j,k] = v\n else:\n im[:,j,k] = [v]*im.shape[-1]\n\n def __getitem__(self, index):\n \"\"\"Generation of one batch of data. \n \n Parameters\n ----------\n index : int\n Batch index counter.\n \n Returns\n ------- \n batch_x : 5D Numpy array\n Corresponding X elements of the batch.\n E.g. ``(batch_size_value, x, y, z, channels)``.\n\n batch_y : 5D Numpy array\n Corresponding Y elements of the batch.\n E.g. ``(batch_size_value, x, y, z, channels)``.\n \"\"\"\n\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n batch_x = np.zeros((len(indexes), ) + self.shape, dtype=np.float32)\n batch_y = np.zeros((len(indexes), ) + self.shape[:3]+(self.channels,), \n dtype=np.uint8)\n\n for i, j in zip(range(len(indexes)), indexes):\n if self.random_subvolumes_in_DA:\n batch_x[i], batch_y[i] = random_3D_crop(\n self.X[0], self.Y[0], self.shape, self.val, \n vol_prob=(self.prob_map[0] if self.prob_map is not None else None))\n else:\n batch_x[i] = np.copy(self.X[j])\n batch_y[i] = np.copy(self.Y[j])\n\n if self.da:\n extra_img = np.random.randint(0, self.X.shape[0])\n batch_x[i], batch_y[i] = self.apply_transform(\n batch_x[i], batch_y[i], e_im=self.X[extra_img], \n e_mask=self.Y[extra_img])\n\n if self.n_classes > 1 and (self.n_classes != self.channels):\n batch_y_ = np.zeros((len(indexes), ) + self.shape[:3] + (self.n_classes,))\n for i in range(len(indexes)):\n batch_y_[i] = np.asarray(img_to_onehot_encoding(batch_y[i]))\n\n batch_y = batch_y_\n\n self.total_batches_seen += 1\n \n if self.out_number == 1: \n return batch_x, batch_y \n else: \n return ([batch_x], [batch_y]*self.out_number)\n\n def on_epoch_end(self):\n \"\"\"Updates indexes after each epoch.\"\"\"\n\n ia.seed(self.seed + self.total_batches_seen)\n self.indexes = self.o_indexes\n if self.shuffle_each_epoch:\n random.Random(self.seed + self.total_batches_seen).shuffle(self.indexes)\n\n def apply_transform(self, image, mask, grid=False, e_im=None, e_mask=None):\n \"\"\"Transform the input image and its mask at the same time with one of\n the selected choices based on a probability.\n \n Parameters\n ----------\n image : 4D Numpy array\n Image to transform. E.g. ``(x, y, z, channels)``.\n\n mask : 4D Numpy array\n Mask to transform. E.g. ``(x, y, z, channels)``.\n \n Returns\n -------\n trans_image : 4D Numpy array\n Transformed image. E.g. ``(x, y, z, channels)``.\n\n trans_mask : 4D Numpy array\n Transformed image mask. E.g. ``(x, y, z, channels)``.\n \"\"\"\n\n # Apply flips in z as imgaug can not do it \n prob = random.uniform(0, 1)\n if self.flip and prob < self.da_prob:\n l_image = []\n l_mask = []\n for i in range(image.shape[-1]): \n l_image.append(np.expand_dims(np.flip(image[...,i], 2), -1))\n for i in range(mask.shape[-1]):\n l_mask.append(np.expand_dims(np.flip(mask[...,i], 2), -1))\n image = np.concatenate(l_image, axis=-1)\n mask = np.concatenate(l_mask, axis=-1)\n\n # Reshape 3D volumes to 2D image type with multiple channels to pass \n # through imgaug lib\n o_img_shape = image.shape \n o_mask_shape = mask.shape \n image = image.reshape(image.shape[:2]+(self.X_z*self.X_c, ))\n mask = mask.reshape(mask.shape[:2]+(self.Y_z*self.Y_c, ))\n if e_im is not None: e_im = e_im.reshape(e_im.shape[:2]+(self.X_z*self.X_c, )) \n if e_mask is not None: e_mask = e_mask.reshape(e_mask.shape[:2]+(self.Y_z*self.Y_c, )) \n\n # Apply cblur \n prob = random.uniform(0, 1)\n if self.cutblur and prob < self.da_prob:\n image = cutblur(image, self.cblur_size, self.cblur_down_range, \n self.cblur_inside)\n\n # Apply cutmix\n prob = random.uniform(0, 1)\n if self.cutmix and prob < self.da_prob:\n image, mask = cutmix(image, e_im, mask, e_mask, self.cmix_size)\n\n # Apply transformations to the volume and its mask\n segmap = SegmentationMapsOnImage(mask, shape=mask.shape) \n image, vol_mask = self.seq(image=image, segmentation_maps=segmap) \n mask = vol_mask.get_arr()\n\n # Recover the original shape \n image = image.reshape(o_img_shape)\n mask = mask.reshape(o_mask_shape)\n\n return image, mask\n\n def get_transformed_samples(self, num_examples, random_images=True, \n save_to_dir=True, out_dir='aug_3d', train=False):\n \"\"\"Apply selected transformations to a defined number of images from\n the dataset. \n \n Parameters\n ----------\n num_examples : int\n Number of examples to generate.\n \n random_images : bool, optional\n Randomly select images from the dataset. If False the examples\n will be generated from the start of the dataset. \n\n save_to_dir : bool, optional\n Save the images generated. The purpose of this variable is to\n check the images generated by data augmentation.\n\n out_dir : str, optional\n Name of the folder where the examples will be stored. \n\n train : bool, optional\n To avoid drawing a grid on the generated images. This should be\n set when the samples will be used for training.\n \"\"\" \n\n if random_images == False and num_examples > self.X.shape[0]: \n num_examples = self.X.shape[0]\n print(\"WARNING: More samples requested than the ones available. \"\n \"'num_examples' fixed to {}\".format(num_examples))\n \n sample_x = np.zeros((num_examples, ) + self.shape, dtype=np.float32)\n sample_y = np.zeros((num_examples, ) + self.shape[:3]+(self.channels,),\n dtype=np.uint8)\n\n # Generate the examples \n print(\"0) Creating samples of data augmentation . . .\")\n for i in tqdm(range(num_examples)):\n ia.seed(i)\n if random_images or self.random_subvolumes_in_DA:\n pos = random.randint(0,self.X.shape[0]-1) \n else:\n pos = i\n\n if self.random_subvolumes_in_DA:\n vol, vol_mask, ox, oy, oz,\\\n s_x, s_y, s_z = random_3D_crop(\n self.X[pos], self.Y[pos], self.shape, self.val,\n draw_prob_map_points=True,\n vol_prob=(self.prob_map[pos] if self.prob_map is not None else None))\n else:\n vol = np.copy(self.X[pos])\n vol_mask = np.copy(self.Y[pos])\n\n if not self.da:\n sample_x[i] = vol\n sample_y[i] = vol_mask\n self.trans_made = ''\n else:\n if not train:\n self.__draw_grid(vol)\n self.__draw_grid(vol_mask)\n\n extra_img = np.random.randint(0, self.X.shape[0])\n sample_x[i], sample_y[i] = self.apply_transform(\n vol, vol_mask, e_im=self.X[extra_img],\n e_mask=self.Y[extra_img])\n\n # Save transformed 3D volumes \n if save_to_dir:\n os.makedirs(out_dir, exist_ok=True)\n # Original image/mask\n f = os.path.join(out_dir, \"orig_x_\"+str(pos)+self.trans_made+'.tiff')\n aux = self.X[pos].copy()\n self.__draw_grid(aux)\n aux = np.expand_dims((np.transpose(aux, (2,0,1,3))*255).astype(np.uint8), 1)\n imsave(f, aux, imagej=True, metadata={'axes': 'ZCYXS'})\n f = os.path.join(out_dir, \"orig_y_\"+str(pos)+self.trans_made+'.tiff')\n aux = self.Y[pos].copy()\n self.__draw_grid(aux)\n aux = np.expand_dims((np.transpose(aux, (2,0,1,3))*255).astype(np.uint8), 1)\n imsave(f, aux, imagej=True, metadata={'axes': 'ZCYXS'})\n # Transformed\n f = os.path.join(out_dir, \"x_aug_\"+str(pos)+self.trans_made+'.tiff')\n aux = np.expand_dims((np.transpose(sample_x[i], (2,0,1,3))*255).astype(np.uint8), 1)\n imsave(f, aux, imagej=True, metadata={'axes': 'ZCYXS'})\n # Mask\n f = os.path.join(out_dir, \"y_aug_\"+str(pos)+self.trans_made+'.tiff')\n aux = np.expand_dims((np.transpose(sample_y[i], (2,0,1,3))*255).astype(np.uint8), 1)\n imsave(f, aux, imagej=True, metadata={'axes': 'ZCYXS'})\n\n # Save the original images with a red point and a blue square \n # that represents the point selected with the probability map \n # and the random volume extracted from the original data\n if self.random_subvolumes_in_DA and self.prob_map is not None and i == 0:\n rc_out_dir = os.path.join(out_dir, 'rd_crop' + str(pos))\n os.makedirs(rc_out_dir, exist_ok=True)\n\n print(\"The selected point on the random crop was [{},{},{}]\"\n .format(ox,oy,oz))\n\n d = len(str(self.X[pos].shape[2]))\n for i in range(self.X[pos].shape[2]):\n im = Image.fromarray((self.X[pos,:,:,i,0]).astype(np.uint8)) \n im = im.convert('RGB') \n px = im.load() \n mask = Image.fromarray((self.Y[pos,:,:,i,0]).astype(np.uint8))\n mask = mask.convert('RGB')\n py = mask.load()\n \n if i == oz:\n # Paint the selected point in red\n p_size=6\n for row in range(oy-p_size,oy+p_size):\n for col in range(ox-p_size,ox+p_size): \n if col >= 0 and col < self.X[pos].shape[0] and \\\n row >= 0 and row < self.X[pos].shape[1]:\n px[row, col] = (255, 0, 0) \n py[row, col] = (255, 0, 0) \n \n if i >= s_z and i < s_z+self.shape[2]: \n # Paint a blue square that represents the crop made \n for col in range(s_x, s_x+self.shape[0]):\n px[s_y, col] = (0, 0, 255)\n px[s_y+self.shape[0]-1, col] = (0, 0, 255)\n py[s_y, col] = (0, 0, 255)\n py[s_y+self.shape[0]-1, col] = (0, 0, 255)\n for row in range(s_y, s_y+self.shape[1]): \n px[row, s_x] = (0, 0, 255)\n px[row, s_x+self.shape[1]-1] = (0, 0, 255)\n py[row, s_x] = (0, 0, 255)\n py[row, s_x+self.shape[1]-1] = (0, 0, 255)\n \n im.save(os.path.join(\n rc_out_dir,'rc_x_'+str(i).zfill(d)+'.png'))\n mask.save(os.path.join(\n rc_out_dir,'rc_y_'+str(i).zfill(d)+'.png')) \n return sample_x, sample_y\n\n\ndef random_3D_crop(vol, vol_mask, random_crop_size, val=False, vol_prob=None, \n weights_on_data=False, weight_map=None,\n draw_prob_map_points=False):\n \"\"\"Random 3D crop \"\"\"\n\n rows, cols, deep = vol.shape[0], vol.shape[1], vol.shape[2]\n dx, dy, dz, c = random_crop_size\n if val:\n x = 0\n y = 0\n z = 0\n ox = 0\n oy = 0\n oz = 0\n else:\n if vol_prob is not None:\n prob = vol_prob.ravel() \n \n # Generate the random coordinates based on the distribution\n choices = np.prod(vol_prob.shape)\n index = np.random.choice(choices, size=1, p=prob)\n coordinates = np.unravel_index(index, shape=vol_prob.shape)\n z = int(coordinates[0])\n x = int(coordinates[1])\n y = int(coordinates[2])\n oz = int(coordinates[0])\n ox = int(coordinates[1])\n oy = int(coordinates[2])\n \n # Adjust the coordinates to be the origin of the crop and control to\n # not be out of the volume\n if x < int(random_crop_size[0]/2):\n x = 0\n elif x > vol.shape[0] - int(random_crop_size[0]/2):\n x = vol.shape[0] - random_crop_size[0]\n else: \n x -= int(random_crop_size[0]/2)\n \n if y < int(random_crop_size[1]/2):\n y = 0\n elif y > vol.shape[1] - int(random_crop_size[1]/2):\n y = vol.shape[1] - random_crop_size[1]\n else:\n y -= int(random_crop_size[1]/2)\n\n if z < int(random_crop_size[2]/2):\n z = 0\n elif z > vol.shape[2] - int(random_crop_size[2]/2):\n z = vol.shape[2] - random_crop_size[2]\n else:\n z -= int(random_crop_size[2]/2)\n else:\n ox = 0\n oy = 0\n oz = 0\n x = np.random.randint(0, rows - dx + 1) \n y = np.random.randint(0, cols - dy + 1)\n z = np.random.randint(0, deep - dz + 1)\n\n if draw_prob_map_points:\n return vol[x:(x+dx), y:(y+dy), z:(z+dz), :], \\\n vol_mask[x:(x+dx), y:(y+dy), z:(z+dz), :], ox, oy, oz, x, y, z\n else:\n if weights_on_data:\n return vol[x:(x+dx), y:(y+dy), z:(z+dz), :], \\\n vol_mask[x:(x+dx), y:(y+dy), z:(z+dz), :],\\\n weight_map[x:(x+dx), y:(y+dy), z:(z+dz), :] \n else:\n return vol[x:(x+dx), y:(y+dy), z:(z+dz), :], \\\n vol_mask[x:(x+dx), y:(y+dy), z:(z+dz), :]\n","sub_path":"generators/data_3D_generators_v2.py","file_name":"data_3D_generators_v2.py","file_ext":"py","file_size_in_byte":30661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"191289174","text":"# coding=utf-8\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom django.conf import global_settings\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'umm#73g_b@o=142n9y@qod4$&m&^m4%q@32aa-=4^hmzb(r3qq'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'app',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'aaftw.urls'\n\nWSGI_APPLICATION = 'aaftw.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'aaftw',\n 'USER': 'aaftw',\n 'PASSWORD': 'aaftw',\n 'HOST': 'localhost',\n 'PORT': '3306',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'es'\n\nTIME_ZONE = 'America/Argentina/Buenos_Aires'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n 'static/',\n)\n\n# API CONSTANTS\nAPI_KEY = '111233f9-a25d-42d1-88a1-d1688436884c'\n\nANALYTICS = False\n\nSTYLES_EXTENSION = 'less'\nSTYLES_EXTENSION_TYPE = '/less'\n\nfrom local_settings import *","sub_path":"aaftw/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"350566182","text":"\n# coding=utf-8\nfrom google.appengine.ext import db\nimport json\nfrom google.appengine.api import search, images\nimport logging\nfrom flask import url_for\n\ndef datetimeformat(value, format='%m-%d-%Y'):\n return value.strftime(format)\n\nclass Post(db.Model):\n title = db.StringProperty(required=True, default=\"\")\n content = db.TextProperty(required=True, default=\"\")\n when = db.DateTimeProperty(auto_now_add=True)\n tags = db.StringListProperty()\n is_index = db.BooleanProperty()\n is_about = db.BooleanProperty()\n is_about_summary = db.BooleanProperty()\n comments = db.ListProperty(db.Key)\n is_private = db.BooleanProperty(required=True, default=False)\n\n def get_comments(self):\n query = Comment.all()\n query.filter('__key__ IN', self.comments)\n query.order(\"when\")\n return query.run(read_policy=db.STRONG_CONSISTENCY)\n\n def add_comment(self, comment):\n if comment.key() not in self.comments:\n self.comments.append(comment.key())\n self.put()\n\n def save_post(self):\n tags = list(self.tags)\n add_tags(tags)\n self.put()\n add_post_index(self)\n\n def edit_post(self, title, content, tags, is_private=False):\n self.title = title\n self.content = content\n self.is_private = is_private\n new_tags = set(tags) - set(self.tags)\n removed_tags = set(self.tags) - set(tags)\n add_tags(list(new_tags))\n remove_tags(list(removed_tags))\n self.tags = tags\n self.put()\n modify_post_index(self)\n\n def delete_post(self):\n comments = self.get_comments()\n for comment in comments:\n comment.delete()\n remove_tags(self.tags)\n delete_post_index(self)\n self.delete()\n\n def to_dict(self):\n comment_list = []\n comments = self.get_comments()\n for comment in comments:\n comment_list.append(comment.to_dict())\n return {\n 'title': self.title, 'content': self.content, 'when': datetimeformat(self.when),\n 'comments': comment_list, 'tags': self.tags}\n\n\ndef get_posts(page_number, page_size, private=False):\n query = Post.all()\n query.filter(\"is_index =\", None)\n query.filter(\"is_about =\", None)\n query.filter(\"is_about_summary =\", None)\n if not private:\n query.filter(\"is_private =\", False)\n offset = page_number * page_size\n query.order(\"-when\")\n return query.fetch(read_policy=db.STRONG_CONSISTENCY, offset=offset, limit=page_size)\n\n\ndef get_recent_posts():\n query = Post.all()\n query.filter(\"is_index =\", None)\n query.filter(\"is_about =\", None)\n query.filter(\"is_about_summary =\", None)\n query.filter(\"is_private =\", False)\n query.order(\"-when\")\n return query.fetch(limit=10)\n\n\ndef get_posts_by_tag(tag_name, page_number, page_size, private=False):\n query = Post.all()\n query.filter('tags =', tag_name)\n if not private:\n query.filter(\"is_private =\", False)\n offset = page_number * page_size\n query.order(\"-when\")\n return query.fetch(read_policy=db.STRONG_CONSISTENCY, offset=offset, limit=page_size)\n\n\ndef get_post_by_comment(comment_id):\n query = Post.all()\n query.filter('comments =', comment_id)\n query.order(\"-when\")\n return query.get()\n\n\ndef get_index_post():\n query = Post.all()\n query.filter('is_index =', True)\n return query.get()\n\n\ndef get_about_post():\n query = Post.all()\n query.filter('is_about =', True)\n return query.get()\n\ndef get_about_summary_post():\n query = Post.all()\n query.filter('is_about_summary =', True)\n return query.get()\n\n\ndef export_posts_json():\n query = Post.all()\n post_list = []\n posts = query.run(read_policy=db.STRONG_CONSISTENCY)\n for post in posts:\n post_list.append(post.to_dict())\n return json.dumps(post_list, ensure_ascii=False)\n\n\ndef add_post_index(post):\n post_document = search.Document(\n doc_id=str(post.key().id()),\n language='zh',\n fields=[\n search.TextField(name='title', value=post.title),\n search.HtmlField(\n name='content', value=post.content)\n ])\n try:\n index = search.Index(name=\"Posts\")\n index.put(post_document)\n except search.Error:\n logging.exception('Put failed')\n\n\ndef delete_post_index(post):\n index = search.Index(name=\"Posts\")\n index.delete(str(post.key().id()))\n\n\ndef modify_post_index(post):\n delete_post_index(post)\n add_post_index(post)\n\n\nclass Comment(db.Model):\n content = db.TextProperty(required=True)\n when = db.DateTimeProperty(auto_now_add=True)\n author = db.StringProperty(required=True)\n email = db.StringProperty(required=True)\n website = db.StringProperty(required=True, default='#')\n is_guest_book = db.BooleanProperty(required=True, default=False)\n is_unread = db.BooleanProperty(required=True, default=True)\n # 0 male 1 female 2 secret\n gender = db.IntegerProperty(required=True, default=2)\n\n def to_dict(self):\n return {\n 'content': self.content, 'when': datetimeformat(self.when), 'author': self.author,\n 'email': self.email, 'is_guest_book': self.is_guest_book},\n\n def get_post(self):\n query = Post.all()\n query.filter(\"comments =\", self.key())\n return query.get()\n\n def delete_comment(self):\n post = self.get_post()\n if post:\n post.comments.remove(self.key())\n post.put()\n self.delete()\n return post.key().id()\n else:\n self.delete()\n return None\n\n\ndef get_guest_book_message(page_number, page_size):\n query = Comment.all()\n query.filter('is_guest_book =', True)\n query.order(\"-when\")\n offset = page_number * page_size\n return query.fetch(read_policy=db.STRONG_CONSISTENCY, offset=offset, limit=page_size)\n\n\ndef count_unread_message():\n query = Comment.all()\n query.filter('is_unread =', True)\n query.order(\"-when\")\n return query.count()\n\n\ndef get_unread_message():\n query = Comment.all()\n query.filter('is_unread =', True)\n query.order(\"-when\")\n return query.run()\n\n\nclass Tag(db.Model):\n name = db.StringProperty(required=True)\n count = db.IntegerProperty(required=True)\n\n\ndef remove_tags(removed_tags):\n query = Tag.all()\n query.filter(\"name IN\", removed_tags)\n for tag in query.run():\n tag.count -= 1\n if tag.count == 0:\n tag.delete()\n else:\n tag.put()\n\n\ndef add_tags(added_tags):\n query = Tag.all()\n query.filter(\"name IN\", added_tags)\n for tag in query.run():\n tag.count += 1\n tag.put()\n added_tags.remove(tag.name)\n for tag in added_tags:\n tag_entity = Tag(name=tag, count=1)\n tag_entity.put()\n\ndef get_popular_tags():\n query = Tag.all()\n query.order(\"-count\")\n return query.fetch(\n read_policy=db.STRONG_CONSISTENCY, limit=30)\n\nclass MultiMediaBlob(db.Model):\n name = db.StringProperty(required=True)\n data = db.BlobProperty(required=True)\n media_type = db.StringProperty(required=True)\n when = db.DateTimeProperty(auto_now_add=True)\n html_content_type = db.StringProperty(required=True)\n is_private = db.BooleanProperty(required=True, default=False)\n\n\ndef get_image_by_name(image_name):\n query = MultiMediaBlob.all()\n query.filter('name =', image_name)\n query.filter('media_type =', 'image')\n return query.get()\n\n\ndef save_image(image_name, file_name, image_data, is_private=False):\n content_type = \"image/\" + file_name.split(\".\")[-1]\n img = images.Image(image_data)\n width = img.width\n image = get_image_by_name(image_name)\n while len(db.Blob(image_data)) > 500000:\n width = int(width * 0.8)\n image_data = images.resize(image_data, width)\n if image:\n image.data = db.Blob(image_data)\n image.media_type = \"image\"\n image.html_content_type = content_type\n image.is_private = is_private\n image.put()\n else:\n image = MultiMediaBlob(name=image_name, data=db.Blob(\n image_data), media_type='image', html_content_type=content_type, is_private=is_private)\n image.put()\n\n\ndef get_image_list(page_number, page_size, private=False):\n query = MultiMediaBlob.all()\n query.filter(\"media_type =\", 'image')\n if not private:\n query.filter(\"is_private =\", False)\n query.order(\"-when\")\n offset = page_number * page_size\n results = query.fetch(\n read_policy=db.STRONG_CONSISTENCY, offset=offset, limit=page_size)\n image_list = []\n for image in results:\n image_list.append(image)\n return image_list\n\n\nclass User(db.Model):\n is_admin = db.BooleanProperty()\n password = db.StringProperty(required=True)\n\n\ndef get_admin():\n query = User.all()\n query.filter('is_admin =', True)\n return query.get()\n\n\nclass Uread_Entity():\n\n def __init__(self, name, url, post_id):\n self.name = name\n self.url = url\n self.comments = []\n self.id = post_id\n\n def __eq__(self, rhs):\n return self.__hash__() == rhs\n\n def __hash__(self):\n return self.id\n\n\ndef get_unread_list(comments):\n unread_list = []\n for comment in comments:\n if comment.is_guest_book:\n if -1 in unread_list:\n index = unread_list.index(-1)\n unread_item = unread_list[index]\n unread_item.comments.append(comment)\n else:\n ue = Uread_Entity(\"Guest Book\", url_for('guest_book'), -1)\n ue.comments.append(comment)\n unread_list.append(ue)\n else:\n post = get_post_by_comment(comment.key())\n if not post:\n continue\n if post.key().id() in unread_list:\n index = unread_list.index(post.key().id())\n unread_item = unread_list[index]\n unread_item.comments.append(comment)\n else:\n ue = Uread_Entity(\n post.title, url_for('detailed_post', post_id=post.key().id()), post.key().id())\n ue.comments.append(comment)\n unread_list.append(ue)\n return unread_list\n","sub_path":"coolshinesmart/application/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255900809","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2018, digitalsputnik and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport json\nimport re\nfrom frappe.model.document import Document\n\nclass ManufacturingRecipe(Document):\n\tdef autoname(self):\n\t\tself.name = self.item\n\n\tdef on_update(self):\n\t\t#insert current bom to article\n\t\tcur_doc = frappe.get_doc(\"Item\",self.item)\n\t\tcur_doc.bom = self.name\n\t\tcur_doc.save()\n\t\t#hmm?\n\t\tfrappe.db.commit()\n\n\n#remove the allready exesiting items from the list\n@frappe.whitelist()\ndef findNewItems(inputJson):\n\toutput = []\n\t#loop through all the lines and find wich ones dont exists\n\tinput = json.loads(inputJson)\n\tfor item in input:\n\t\t#return array only with the lines that dont allready exists\n\t\tif not frappe.db.exists({\"doctype\":\"Item\", \"Code\":item[0]}):\n\t\t\t#append the fixed item to output\n\t\t\toutput.append(item)\n\treturn output\n\n#create new articles based on the list\n@frappe.whitelist()\ndef createNewItems(inputJson):\n\tinput = json.loads(inputJson)\n\tfor item in input:\n\t\t#if same article is 2x in the BOM do not try to create second copy\n\t\tif frappe.db.exists({\"doctype\":\"Item\", \"Code\":item[0]}):\n\t\t\treturn\n\t\t#crate new article\n\t\titemdoc = frappe.get_doc({\"doctype\":\"Item\", \"code\":item[0], \"item\":item[1], \"uomtmp\":item[3]})\n\t\titemdoc.insert()\n\tfrappe.db.commit()\n\n#update the the comm and manuf data\n@frappe.whitelist()\ndef updateItems(inputJson):\n\tinput = json.loads(inputJson)\n\tfor item in input:\n\t\t#check if comm is filled in, if so add the data\n\t\tif item[4]:\n\t\t\t#deal with multiple entries sepparated by comma\n\t\t\tsubitems = item[4].split(', ')\n\t\t\t#get item to add the comm lines\n\t\t\titemName = frappe.get_all(\"Item\",filters={\"code\":item[0]})[0]['name']\n\t\t\titemDoc = frappe.get_doc(\"Item\", itemName)\n\t\t\tfor subitem in subitems:\n\t\t\t\titemDoc.addSource(subitem)\n\t\t#check if manuf is filled in and add the data\n\t\tif item[5]:\n\t\t\t#check if the line exists but is empty, if so pass this function\n\t\t\tif item[5].isspace():\n\t\t\t\tcontinue\n\n\t\t\t#get the code of the current line\n\t\t\titemName = frappe.get_all(\"Item\",filters={\"code\":item[0]})[0]['name']\n\n\t\t\t#check if the electronic component exists\n\t\t\tif frappe.db.exists(\"Electronic Component\",itemName):\n\t\t\t\t#if yes load it up\n\t\t\t\tcomp = frappe.get_doc(\"Electronic Component\",itemName)\n\t\t\telse:\n\t\t\t\t#if no create it\n\t\t\t\tcomp = frappe.get_doc({\"doctype\":\"Electronic Component\",\"code\":itemName})\n\t\t\t\tcomp.insert()\n\n\t\t\t#split the input first by tabs, second by semicolons\n\t\t\tmain = re.split('\\t',item[5])\n\t\t\treplacements = re.split(';\\s*',main[2])\n\t\t\t#split the replacement into mnfr & MPO\n\t\t\tfor i in range(len(replacements)):\n\t\t\t\t#skip all the replacements that start with 'any' (these are comments)\n\t\t\t\tif re.match('any.*',replacements[i]):\n\t\t\t\t\treplacements[i] = ['',replacements[i]]\n\t\t\t\telse:\n\t\t\t\t\treplacements[i] = re.split('[,\\s]\\s*',replacements[i])\n\n\t\t\t#make main and replacement component into single array\n\t\t\treplacements.insert(0,[main[0],main[1]])\n\t\t\telectronics = replacements\n\n\t\t\t#add line to electronic component\n\t\t\tfor epart in electronics:\n\t\t\t\t#check if the current component is not just whitespace\n\t\t\t\tif not epart[0] == \"\":\n\t\t\t\t\tcomp.addPart(epart[0],epart[1])\n\n\tfrappe.db.commit()\n","sub_path":"dsmanufacturing/manufacturing/doctype/manufacturing_recipe/manufacturing_recipe.py","file_name":"manufacturing_recipe.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"271131121","text":"\"\"\"\nThis module holds the classes necessary for the abstraction of the shopping\ncart.\n\"\"\"\nfrom .base import BasePage\nfrom selenium.webdriver.common.by import By\n\n\nclass ShoppingCart(BasePage):\n\n \"\"\"\n This class represents the shopping cart page and it is responsible for all\n of its functionality,\n\n Attributes:\n items: List of cart items scraped from a table. Essencialy, elements\n here are still webelements.\n products: List of processed elements of the item list. Some are still\n webelements, like remove and qty, used for interacting, and the rest\n are processed data, like Price, Title, and Total\n \"\"\"\n\n _url_modifier = '/mlgeducacao/carrinho'\n _validate = ''\n _items_locator = 'cart_item'\n _plus_locator = '.plus'\n _minus_locator = '.minus'\n _qty_locator = '.input-text.qty.text'\n _finish_purchase_locator = 'proceed' # NAME\n _update_cart_locator = 'update_cart' # NAME\n _product = {'web_remove': None,\n 'title': None,\n 'price': None,\n 'web_qty': None,\n 'total': None}\n\n def __init__(self, driver):\n super(ShoppingCart, self).__init__(driver)\n self.products = []\n self.items = []\n self._load_data()\n\n def _load_data(self):\n \"\"\"Docstring.\"\"\"\n self.items = self.driver.find_elements(By.CLASS_NAME,\n self._items_locator)\n self._get_products()\n\n def _get_products(self):\n for index, item in enumerate(self.items):\n d = {}\n d['index'] = index\n d['webe_remove'] = item[0].find_element(By.TAG_NAME, 'a')\n d['title'] = item[2].text.encode('utf-8')\n d['price'] = float(item[3].text.split(' ')[1].replace(',', '.'))\n d['webe_qty'] = item[4].find_element(By.CSS_SELECTOR,\n self._qty_locator)\n d['total'] = item[5].text.encode('utf-8')\n self.products.append(d)\n\n def get_products(self):\n products = []\n for index, product in enumerate(self.products):\n d = {}\n d['index'] = index\n d['price'] = product['price']\n element = product['web_qty'].find_element(By.CSS_SELECTOR,\n self._qty_locator)\n d['qty'] = element.get_attribute('value')\n d['total'] = product['total']\n products.append(d)\n return products\n\n def minus_qty(self, product_index):\n element = self.items[product_index].find_element(By.CSS_SELECTOR,\n self._minus_locator)\n element.click()\n\n def plus_qty(self, product_index):\n element = self.items[product_index].find_element(By.CSS_SELECTOR,\n self._plus_locator)\n element.click()\n\n def type_qty(self, product_index, number):\n element = self.items[product_index].find_element(By.CSS_SELECTOR,\n self._qty_locator)\n element.clear()\n element.send_keys(number)\n\n def get_qty(self, product_index):\n element = self.items[product_index].find_element(By.CSS_SELECTOR,\n self._qty_locator)\n value = element.get_attribute('value')\n return value\n\n def update_cart(self, product_index):\n element = self.driver.find_element(By.NAME, self._update_cart_locator)\n element.click()\n self._load_data()\n\n def finish_purchase(self, product_index):\n element = self.driver.find_element(By.NAME,\n self._finish_purchase_locator)\n element.click()\n","sub_path":"framework/mlg/pages/shopping_cart.py","file_name":"shopping_cart.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443876426","text":"# -*- coding: utf-8 -*-\n\"\"\"Rebinning the PSF\n\nThis script normalizes the drizzled PSF.\n\n\"\"\"\n\nimport os\nimport numpy as np\nimport argparse\nimport astropy.io.fits as pyfits\n\ndef parse_args():\n \"\"\"Parse command-line arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--save_dir', default='rebinned_dir', dest='save_dir', type=str,\n help='directory where the unnormalized drizzled PSF is located.')\n args = parser.parse_args()\n return args\n\ndef main():\n #args = parse_args()\n unnormalized_path = 'dripsf.fits'\n psf = pyfits.open(unnormalized_path)[0].data.copy()\n psf /= np.sum(psf) \n pyfits.PrimaryHDU(psf).writeto('drizzled_PSF.fits', overwrite=True)\n\nif __name__ == '__main__':\n main()","sub_path":"tinier_tim/normalize_drizzled_psf.py","file_name":"normalize_drizzled_psf.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"168104793","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom handlers import ApiHandler\nfrom schema import Schema, Optional, Use\nfrom models.tables import Customer, Order, safe_unicode\n\n\nclass Order_Handler(ApiHandler):\n\n def get(self, id):\n customer = self.session.query(Customer).get(int(id))\n orders = customer.orders\n order_info = [o.info for o in orders]\n self.success_response(order_info)\n\n def post(self, id):\n \"\"\"\n create order\n \"\"\"\n schema = Schema({\n \"measure\": {\n \"waist2tail\": Use(float),\n \"tail_length\": Use(float),\n \"sleeve_length\": Use(float),\n \"sleeve_bow\": Use(float),\n \"sleeve_cuff\": Use(float),\n \"zipper\": Use(float),\n \"tie\": Use(float),\n \"chest_pad\": Use(float),\n \"finsh_bone\": Use(float),\n \"bead\": Use(float),\n \"embroid\": Use(float),\n },\n \"style_requrest\": Use(safe_unicode),\n \"craft_request\": Use(safe_unicode),\n \"handcraft_request\": Use(safe_unicode),\n \"address\": Use(safe_unicode),\n \"already_paied\": Use(float),\n \"total_price\": Use(float),\n \"discount\": Use(float),\n \"deadline\": Use(safe_unicode),\n })\n current_user_id = self.current_user[\"id\"]\n data = self.get_data(schema)\n new_order = Order(measure=data[\"measure\"], status=\"new\", deadline=datetime.strptime(data.get(\"deadline\"), \"%Y-%m-%d\"),\n style_request=data.get(\"style_request\"),\n craft_request=data.get(\"craft_request\"),\n handcraft_request=data.get(\"handcraft_request\"),\n total_price=data[\"total_price\"],\n already_paied=data[\"already_paied\"],\n address=data[\"city\"],\n create_by_id = current_user_id,\n )\n customer = self.session.query(Customer).get(int(id))\n customer.orders.add(new_order)\n self.session.flush()\n self.success_response()\n\n def put(self, id):\n \"\"\"change order\"\"\"\n schema = Schema({\n \"measure\": {\n Optional(\"waist2tail\"): Use(float),\n Optional(\"tail_length\"): Use(float),\n Optional(\"sleeve_length\"): Use(float),\n Optional(\"sleeve_bow\"): Use(float),\n Optional(\"sleeve_cuff\"): Use(float),\n Optional(\"zipper\"): Use(float),\n Optional(\"tie\"): Use(float),\n Optional(\"chest_pad\"): Use(float),\n Optional(\"finsh_bone\"): Use(float),\n Optional(\"bead\"): Use(float),\n Optional(\"embroid\"): Use(float),\n },\n \"style_requrest\": Use(safe_unicode),\n \"craft_request\": Use(safe_unicode),\n \"handcraft_request\": Use(safe_unicode),\n \"city\": Use(safe_unicode),\n Optional(\"down_payment\"): Use(float),\n Optional(\"total_price\"): Use(float),\n Optional(\"deadline\"): Use(safe_unicode),\n })\n data = self.get_data(schema)\n\nclass Customer_Order_Handler(ApiHandler):\n def get(self):\n schema = Schema({\n Optional(\"myself\"): Use(bool),\n Optional(\"unfinised\"): Use(bool),\n Optional(\"finised\"): Use(bool),\n })\n","sub_path":"handlers/handler_order.py","file_name":"handler_order.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"99992903","text":"import re\nimport sys, os\nimport json\nimport csv\nfrom os import system\nfrom math import sqrt\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\nfrom pprint import pprint\nfrom random import uniform, randint, shuffle, choice, randrange\nfrom ipaddress import ip_address, IPv6Address, ip_network, IPv6Network, ip_interface\nimport numpy as np\nfrom collections import Counter\n\nNODE_RELATED_MESSAGES = 0\nSESSION_RELATED_MESSAGES = 1\n\nPFCP_ELEMS = OrderedDict([(NODE_RELATED_MESSAGES, { 'name': 'nodeRelatedMessages',\n 'procs': OrderedDict([(1, 'HeartbeatRequest'),\n (2, 'HeartbeatResponse'),\n (3, 'PFDManagementRequest'),\n (4, 'PFDManagementResponse'),\n (5, 'AssociationSetupRequest'),\n (6, 'AssociationSetupResponse'),\n (7, 'AssociationUpdateRequest'),\n (8, 'AssociationUpdateResponset'),\n (9, 'AssociationReleaseRequest'),\n (10, 'AssociationReleaseResponse'),\n (11, 'VersionNotSupportedResponse'),\n (12, 'NodeReportRequest'),\n (13, 'NodeReportResponse'),\n (14, 'SessionSetDeletionRequest'),\n (15, 'SessionSetDeletionResponse')]) }),\n (SESSION_RELATED_MESSAGES, { 'name': 'sessionRelatedMessages',\n 'procs': OrderedDict([(50, 'SessionEstablishmentRequest'),\n (51, 'SessionEstablishmentResponse'),\n (52, 'SessionModificationRequest'),\n (53, 'SessionModificationResponse'),\n (54, 'SessionDeletionRequest'),\n (55, 'SessionDeletionResponse'),\n (56, 'SessionReportRequest'),\n (57, 'SessionReportResponse')]) })])\n\n\ndef to_json_convertation(file):\n tshark_exe = r\"c:\\Program Files\\Wireshark\\tshark.exe\"\n if file.endswith('.pcap') or file.endswith('.pcapng'):\n cmd = '\"{}\" -r {} -T json >{}'.format(tshark_exe, file, '{}.json'.format(file.partition('.')[0]))\n print(\"command '\" + cmd + \"' is executed\")\n system(cmd)\n print(\"convertation to json successfylly ended\")\n elif file.endswith('.json'):\n print(\"no need to convert to json\")\n else:\n print('unknown command. Make sure, that you use pcap, pcapng or json file')\n\n\ndef save_to_csv(file_to_write, lines_to_write):\n with open(file_to_write, 'w') as file:\n for line in lines_to_write:\n file.write((','.join(line)) + '\\n')\n\n# start of the features extracting block\n\nclass CurrentPacketParser:\n def __init__(self, packet):\n #common stats\n self.ip_src = packet['_source']['layers']['ipv6']['ipv6.src_host']\n self.ip_dst = packet['_source']['layers']['ipv6']['ipv6.dst_host']\n self.port_src = int(packet['_source']['layers']['udp']['udp.srcport'])\n self.port_dst = int(packet['_source']['layers']['udp']['udp.dstport'])\n self.utc_time = float((packet['_source']['layers']['frame']['frame.time_epoch']))\n self.tcp_len = int(packet['_source']['layers']['frame']['frame.len'])\n self.hop_limit = int(packet['_source']['layers']['ipv6']['ipv6.hlim'])\n\n self.cmd_type = None\n for elem_code in PFCP_ELEMS.keys():\n proc_code = int(packet['_source']['layers']['pfcp']['pfcp.msg_type'])\n if proc_code in PFCP_ELEMS[elem_code]['procs'].keys():\n self.cmd_type = '{}:{}'.format(elem_code, proc_code)\n assert (self.cmd_type is not None)\n\nclass FeaturesExtracting:\n def __init__(self, ):\n self.features_dict = OrderedDict({})\n #this was in the header_creating() function\n self.field_names = ['src_ip', 'dst_ip', 'src_port', 'dst_port', 'first_pkt_time', 'last_pkt_time', 'avg_hops', 'avg_packet_len']\n for elem in PFCP_ELEMS.values():\n for proc in elem['procs'].values():\n self.field_names += ['{}:{}'.format(elem['name'], proc)]\n self.field_names += ['class']\n\n def create_keys(self, parsed_packet):\n if parsed_packet.ip_src < parsed_packet.ip_dst:\n ip_pair = '{} : {}'.format(parsed_packet.ip_src, parsed_packet.ip_dst)\n port_pair = '{}:{}'.format(parsed_packet.port_src, parsed_packet.port_dst)\n elif parsed_packet.ip_src > parsed_packet.ip_dst:\n ip_pair = '{} : {}'.format(parsed_packet.ip_dst, parsed_packet.ip_src)\n port_pair = '{}:{}'.format(parsed_packet.port_dst, parsed_packet.port_src)\n return (ip_pair, port_pair)\n\n def insert_highlevel_keys(self, parsed_packet):\n (ip_pair, port_pair) = self.create_keys(parsed_packet)\n if ip_pair not in self.features_dict.keys():\n self.features_dict[ip_pair] = OrderedDict({})\n if port_pair not in self.features_dict[ip_pair].keys():\n self.features_dict[ip_pair][port_pair] = [parsed_packet]\n else:\n self.features_dict[ip_pair][port_pair] = self.features_dict[ip_pair][port_pair] + [parsed_packet]\n\n def insert_packet_stats(self):\n temp_dict = []\n for port_key in self.features_dict.values():\n for packet_stats in port_key.values(): #port_key contains the link to the packets stats, in list format\n\n first_pkt_time = min(map(lambda x: x.utc_time, packet_stats))\n last_pkt_time = max(map(lambda x: x.utc_time, packet_stats))\n ip_src = packet_stats[0].ip_src #[0] since port_key is an ordered dict\n ip_dst = packet_stats[0].ip_dst\n port_src = packet_stats[0].port_src\n port_dst = packet_stats[0].port_dst\n hops_avg = sum(list(map(lambda x: x.hop_limit, packet_stats))) / len(list(map(lambda x: x.hop_limit, packet_stats)))\n pkt_len_avg = sum(list(map(lambda x: x.tcp_len, packet_stats))) / len(list(map(lambda x: x.tcp_len, packet_stats)))\n\n line = [str(ip_src), str(ip_dst), str(port_src), str(port_dst), '{:.6f}'.format(first_pkt_time),\n '{:.6f}'.format(last_pkt_time), '{:.0f}'.format(hops_avg), '{:.0f}'.format(pkt_len_avg)]\n\n for elem_code in PFCP_ELEMS.keys():\n for proc_code in PFCP_ELEMS[elem_code]['procs'].keys():\n line += [str(len(list(filter(lambda x: x.cmd_type == '{}:{}'.format(elem_code, proc_code), packet_stats))))]\n\n line += ['0'] # type of traffic, 0 for normal, 1 for abnormal\n\n temp_dict += [line]\n\n return temp_dict\n\n def get_field_names(self):\n return self.field_names\n\n def get_field_val_by_name(self, fields, name):\n field_pos = None\n try:\n field_pos = self.field_names.index(name)\n except ValueError:\n print(\"field with name '\" + name + \"' was not found\")\n assert(field_pos is not None)\n return fields[field_pos]\n\ndef avg(list_num):\n return sum(list(map(lambda x: float(x), list_num))) / len(list_num)\n\ndef stdev(list_num):\n return sqrt(avg(list(map(lambda x: (float(x) - avg(list_num)) ** 2, list_num))))\n\ndef rnd(dict_num):\n return dict_num['avg'] + (uniform(0, dict_num['max'] - dict_num['avg'])\n if randint(0, 1) == 0 else -uniform(0, dict_num['avg'] - dict_num['min']))\n\ndef rnd_ip(ips):\n temp_ip = choice(ips)\n #new_ip = int(ip_address(temp_ip)) + randint(-100, 200)\n new_ip = int(ip_address(temp_ip)) + randrange(-50, 50, 10)\n return ip_address(new_ip)\n\nclass DatasetExpansion:\n # statistical characteristics extraction\n def __init__(self, norm_features_recs, norm_features_stats=None):\n self.norm_features_recs = norm_features_recs\n\n if norm_features_stats is None:\n norm_features_stats = norm_features_recs.insert_packet_stats()\n\n fst_pkt_times = [float(norm_features_recs.get_field_val_by_name(elem, 'first_pkt_time')) for elem in norm_features_stats]\n durs = [float(norm_features_recs.get_field_val_by_name(elem, 'last_pkt_time')) - float(norm_features_recs.get_field_val_by_name(elem, 'first_pkt_time'))\n for elem in norm_features_stats]\n self.pkt_dur = {'avg': avg(durs), 'min': min(durs), 'max': max(durs)}\n self.first_time = min(fst_pkt_times)\n self.last_time = max(fst_pkt_times) + 300\n\n self.src_ips = [(norm_features_recs.get_field_val_by_name(elem, 'src_ip')) for elem in norm_features_stats]\n self.dst_ips = [(norm_features_recs.get_field_val_by_name(elem, 'dst_ip')) for elem in norm_features_stats]\n\n src_ports_range = [(norm_features_recs.get_field_val_by_name(elem, 'src_port')) for elem in norm_features_stats]\n dst_ports_range = [(norm_features_recs.get_field_val_by_name(elem, 'dst_port')) for elem in norm_features_stats]\n self.ports_range = set([int(i) for i in (src_ports_range + dst_ports_range)])\n self.min_port = min(self.ports_range)\n self.max_port = max(self.ports_range)\n\n # this block extracts column under current field and collects its statistical characteristics\n self.stat_field_vals = {}\n for field in self.norm_features_recs.get_field_names():\n if field not in ['src_ip', 'dst_ip', 'src_port', 'dst_port', 'first_pkt_time', 'last_pkt_time', 'class']:\n field_column = [float(norm_features_recs.get_field_val_by_name(elem, field)) for elem in norm_features_stats]\n self.stat_field_vals[field] = {'avg': avg(field_column), 'min': min(field_column), 'max': max(field_column), 'stdev': stdev(field_column)}\n\n def create_normal_record(self):\n temp_list = []\n first_pkt_time = 0.0\n for field in self.norm_features_recs.get_field_names():\n if field == 'first_pkt_time':\n ms = ''.join(str(randint(0, 9)) for _ in range(6))\n datestamp = uniform(self.first_time, self.last_time)\n first_pkt_time = float(f'{str(datestamp).partition(\".\")[0]}.{ms}')\n temp_list += [str(datestamp)]\n elif field == 'last_pkt_time':\n temp_list += [str(first_pkt_time + abs(rnd(self.pkt_dur)))]\n elif field == 'src_ip':\n temp_ip = choice(self.src_ips)\n new_ip = int(ip_address(temp_ip)) + randint(0, 5)\n temp_list += [str(ip_address(new_ip))]\n elif field == 'dst_ip':\n temp_ip = choice(self.dst_ips)\n new_ip = int(ip_address(temp_ip)) + randint(0, 5)\n temp_list += [str(ip_address(new_ip))]\n elif field == 'src_port' or field == 'dst_port':\n port = choice(list(self.ports_range))\n temp_list += [str(port + randint(-10, 10))]\n elif field == 'class':\n temp_list += ['0'] # type of traffic, 0 for normal, 1 for abnormal\n else:\n temp_list += [str(int(rnd(self.stat_field_vals[field])))]\n return temp_list\n\n# start of the anomaly generating block\nclass RndAnomalyGenerator:\n # statistical characteristics extraction\n def __init__(self, norm_features_recs, norm_features_stats=None):\n self.norm_features_recs = norm_features_recs\n\n if norm_features_stats is None:\n norm_features_stats = norm_features_recs.insert_packet_stats()\n\n # for date in utc-format\n self.last_first_pkt_time = float(\n norm_features_recs.get_field_val_by_name(norm_features_stats[-1], 'first_pkt_time'))\n fst_pkt_times = [float(norm_features_recs.get_field_val_by_name(elem, 'first_pkt_time')) for elem in\n norm_features_stats]\n diff_fst_pkt_times = list(map(lambda x: abs(float(x[1]) - float(x[0])), zip(fst_pkt_times, fst_pkt_times[1:])))\n durs = [float(norm_features_recs.get_field_val_by_name(elem, 'last_pkt_time')) - float(\n norm_features_recs.get_field_val_by_name(elem, 'first_pkt_time'))\n for elem in norm_features_stats]\n self.diff_fst_pkt_time = {'avg': avg(diff_fst_pkt_times), 'min': min(diff_fst_pkt_times),\n 'max': max(diff_fst_pkt_times)}\n self.pkt_dur = {'avg': avg(durs), 'min': min(durs), 'max': max(durs)}\n\n temp_ip_adresses = [(norm_features_recs.get_field_val_by_name(elem, 'src_ip')) for elem in norm_features_stats] +\\\n [(norm_features_recs.get_field_val_by_name(elem, 'dst_ip')) for elem in norm_features_stats]\n self.ip_adresses = list(set(temp_ip_adresses))\n\n # this block extracts column under current field and collects its statistical characteristics\n self.stat_field_vals = {}\n for field in self.norm_features_recs.get_field_names():\n if field not in ['src_ip', 'dst_ip', 'src_port', 'dst_port', 'first_pkt_time', 'last_pkt_time', 'class']:\n field_column = [float(norm_features_recs.get_field_val_by_name(elem, field)) for elem in\n norm_features_stats]\n self.stat_field_vals[field] = {'avg': avg(field_column), 'min': min(field_column),\n 'max': max(field_column), 'stdev': stdev(field_column)}\n\n def create_abnormal_record(self):\n fields_for_rnd = [field for field in self.norm_features_recs.get_field_names() if 'Request' in field]\n rnd_num = randint(1, len(fields_for_rnd))\n bin_lst = [1] * rnd_num + [0] * (len(fields_for_rnd) - rnd_num)\n shuffle(bin_lst)\n temp_list = []\n for field in self.norm_features_recs.get_field_names():\n if field == 'first_pkt_time':\n self.last_first_pkt_time += abs(rnd(self.diff_fst_pkt_time))\n temp_list += [str(self.last_first_pkt_time)]\n elif field == 'last_pkt_time':\n temp_list += [str(self.last_first_pkt_time + abs(rnd(self.pkt_dur)))]\n elif field == 'src_ip' or field == 'dst_ip':\n #temp_list += [str(ip_address(42540766411282592856903984951653826561))] # ivp6 = 2001:db8::1\n # temp_list += [str(IPv6Address(randint(0, 2 ** 128 - 1)))] #random ipv6\n # temp_list += [str(ip_address(randint(0, 2 ** 32 - 1)))] # random ipv4\n ip = rnd_ip(self.ip_adresses) if randint(0, 1) == 0 else ip_address(randint(0, 2 ** 32 - 1)) #choice rnd ip from two original networks or rnd IPv4\n temp_list += [str(ip)]\n elif field == 'src_port' or field == 'dst_port':\n temp_list += [str(randint(0, 2 ** 16 - 1))]\n elif field == 'class':\n temp_list += ['1'] # type of traffic, 0 for normal, 1 for abnormal\n elif 'Request' in field:\n if bin_lst[fields_for_rnd.index(field)] == 1: # insert anomaly value\n temp_list += [str(\n int(self.stat_field_vals[field]['avg'] + (3 + randint(0, 1)) * self.stat_field_vals[field]['stdev']))]\n else:\n temp_list += [str(int(rnd(self.stat_field_vals[field])))]\n else:\n temp_list += [str(int(rnd(self.stat_field_vals[field])))]\n return temp_list\n\n\nif __name__ == '__main__':\n # all this stuff only to make user enter command as 'script_name.py -f filename.pcap/json' -n num_anomalies\n # so we can extract filename to continue work\n\n '''\n arg_parser = ArgumentParser()\n arg_parser.add_argument('-f', '--files', type=str, dest='filename', required=True,\n help='set input files (extensions: pcap, pcapng or json)')\n arg_parser.add_argument('-n', '--n_anomalies', type=int, help='insert n anomalies')\n\n args = arg_parser.parse_args()\n n_anomalies = args.n_anomalies\n\n if len(args.filename.split(',')) > 1: # we want to work only with one file for one run\n right_command = 'script_name.py -f filename.pcap'\n print(r\"Unknown command. Make sure, that you use command's format as '{}'\".format(right_command))\n sys.exit()\n\n pcap_file = str(args.filename)\n print('n_anomalies: ' + str(n_anomalies))\n print('{} loading completed'.format(pcap_file))\n '''\n pcap_file = 'n4.pcap'\n\n to_json_convertation(pcap_file) # you can guess what this means\n json_file = pcap_file.partition('.')[0] + '.json'\n print('{} creating completed'.format(json_file))\n\n # working with json-file\n with open(json_file, 'r', encoding='utf-8') as file:\n packets = json.load(file)\n print('{} loading completed'.format(json_file))\n\n # features extracting\n features = FeaturesExtracting()\n\n for pkt in packets:\n if 'pfcp' not in pkt['_source']['layers']:\n continue\n record = CurrentPacketParser(pkt)\n features.insert_highlevel_keys(record)\n\n # only to inform that program is working\n print('len(tcp_records):', len(features.features_dict))\n for ip_pair in features.features_dict.keys():\n print('len(tcp_records[{}]): {}'.format(ip_pair, len(features.features_dict[ip_pair])))\n for port_pair in features.features_dict[ip_pair].keys():\n print('len(tcp_records[{}][{}]): {}'.format(ip_pair, port_pair, len(features.features_dict[ip_pair][port_pair])))\n\n n_anomalies = 493\n\n normal_csv_file = re.sub(r'^(.+)\\.json$', r'\\1_normal.csv', json_file)\n field_names = features.get_field_names()\n features_list = features.insert_packet_stats()\n save_to_csv(normal_csv_file, [field_names] + features_list)\n print('file {} was written'.format(normal_csv_file))\n\n normal_generated_csv_file = re.sub(r'^(.+)\\.json$', r'\\1_normal_generated.csv', json_file)\n exp_generator = DatasetExpansion(features, features_list)\n generated_normals_list = [exp_generator.create_normal_record() for _ in range(500)] # (x) - lines to create num\n print('normal records was generated')\n save_to_csv(normal_generated_csv_file, [field_names] + generated_normals_list)\n print('file {} was written'.format(normal_generated_csv_file))\n\n #merged_normal_csv_file = re.sub(r'^(.+)\\.json$', r'\\1_normals_merged.csv', json_file)\n merged_list = features_list + generated_normals_list\n print('merged normal origin and normal generated dataset was created')\n shuffle(merged_list)\n print('merged normal dataset was shuffled')\n #save_to_csv(merged_normal_csv_file, [field_names] + merged_list)\n #print('file {} was written'.format(merged_normal_csv_file))\n\n\n anomaly_csv_file = re.sub(r'^(.+)\\.json$', r'\\1_anomalies.csv', json_file)\n if n_anomalies is not None:\n rag = RndAnomalyGenerator(features, features_list)\n anomalies = [rag.create_abnormal_record() for _ in range(n_anomalies)]\n save_to_csv(anomaly_csv_file, [field_names] + anomalies)\n print('{} anomalies based on \"Requests\" feature was generated'.format(n_anomalies))\n print('file {} was written'.format(anomaly_csv_file))\n\n # united dataset creator\n the_df = merged_list + anomalies\n print('merged normal and abnormal records dataset was created')\n shuffle(the_df)\n print('merged dataset was shuffled')\n save_to_csv('the_big_df.csv', [field_names] + the_df)\n\n'''\n # dataset splitter\n df_len = len(the_df)\n train_len = round(df_len / 100 * 70) # на обучающий датасет выделяем 70% большого датасета\n train_df = the_df[:train_len]\n test_df = the_df[train_len:]\n save_to_csv('train.csv', [field_names] + train_df)\n save_to_csv('test.csv', [field_names] + test_df)\n'''\n","sub_path":"features_plus_generator_normal.py","file_name":"features_plus_generator_normal.py","file_ext":"py","file_size_in_byte":20881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"481226219","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView\nfrom django.utils.decorators import method_decorator\n\nfrom apps.geografico.forms import LocalidadesForm\nfrom apps.geografico.models import Localidades, Provincias\nfrom apps.mixins import ValidatePermissionRequiredMixin\n\nclass LocalidadesListView(LoginRequiredMixin, ValidatePermissionRequiredMixin, ListView):\n model = Localidades\n template_name = 'localidades/list.html'\n permission_required = 'geografico.view_localidades'\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'searchdata':\n data = []\n for i in Localidades.objects.all():\n data.append(i.toJSON())\n else:\n data['error'] = 'Ha ocurrido un error'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data, safe=False)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Listado de Localidades'\n context['create_url'] = reverse_lazy('geografico:localidades_create')\n context['list_url'] = reverse_lazy('geografico:localidades_list')\n context['entity'] = 'Localidades'\n return context\n\n\nclass LocalidadesCreateView(LoginRequiredMixin, ValidatePermissionRequiredMixin, CreateView):\n model = Localidades\n form_class = LocalidadesForm\n template_name = 'localidades/create.html'\n success_url = reverse_lazy('geografico:localidades_list')\n permission_required = 'geografico.add_localidades'\n url_redirect = success_url\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'search_provincia_id':\n data = [{'id': '', 'text': '---------'}]\n for i in Provincias.objects.filter(pais_id=request.POST['id']):\n data.append({'id': i.id, 'text': i.nombre})\n elif action == 'add':\n form = LocalidadesForm(request.POST)\n if form.is_valid():\n form = self.get_form()\n data = form.save()\n return redirect('geografico:localidades_list')\n else:\n data['error'] = 'Ha ocurrido un error'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data, safe=False)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Crear una Localidad'\n context['entity'] = 'Localidades'\n context['list_url'] = reverse_lazy('geografico:localidades_list')\n context['action'] = 'add'\n return context\n\n\nclass LocalidadesUpdateView(LoginRequiredMixin, ValidatePermissionRequiredMixin, UpdateView):\n model = Localidades\n form_class = LocalidadesForm\n template_name = 'localidades/update.html'\n success_url = reverse_lazy('geografico:localidades_list')\n permission_required = 'geografico.change_localidades'\n url_redirect = success_url\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'edit':\n form = self.get_form()\n data = form.save()\n else:\n data['error'] = 'No ha ingresado ninguna opción'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Editar Localidad'\n context['entity'] = 'Localidades'\n context['list_url'] = reverse_lazy('geografico:localidades_list')\n context['action'] = 'edit'\n return context\n\n\nclass LocalidadesDeleteView(LoginRequiredMixin, ValidatePermissionRequiredMixin, DeleteView):\n model = Localidades\n template_name = 'localidades/delete.html'\n success_url = reverse_lazy('geografico:localidades_list')\n permission_required = 'geografico.delete_localidades'\n url_redirect = success_url\n\n # @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n self.object.delete()\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Eliminar Localidad'\n context['entity'] = 'Localidades'\n context['list_url'] = reverse_lazy('geografico:localidades_list')\n return context\n","sub_path":"app/apps/geografico/views/localidades/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"585645469","text":"# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport logging\nimport time\nimport io\nimport threading\nimport random\n\nfrom celery.schedules import crontab\nfrom example.celery_conf import app\nfrom heavy_celery import base, utils\nfrom heavy_celery.cron import spawner as _cron_scheduler\n\nlogger = logging.getLogger(__name__)\n\n\n@app.task(base=base.Task)\ndef hello_world(*args, **kwargs):\n logger.info(\"test task {} {}\".format(args, kwargs))\n return 'hello world'\n\n\n@app.task(base=base.Task)\ndef test_exception(*args, **kwargs):\n raise Exception('hello world')\n\n\n@app.task(base=base.Task)\ndef hello_world2(*args, **kwargs):\n logger.info(\"test task started {} {}\".format(args, kwargs))\n time.sleep(50)\n logger.info(\"test task {} {}\".format(args, kwargs))\n time.sleep(50)\n logger.info(\"test task ended {} {}\".format(args, kwargs))\n return 'hello world2'\n\n\nglobal_mutex = threading.Lock()\n@app.task(base=base.Task)\ndef hello_mutex(*args, **kwargs):\n with global_mutex:\n logger.info(\"test task started {} {}\".format(args, kwargs))\n time.sleep(3)\n if random.random() < 0.5:\n raise Exception()\n else:\n logger.info(\"test running task {} {}\".format(args, kwargs))\n time.sleep(3)\n logger.info(\"test task ended {} {}\".format(args, kwargs))\n return 'hello world2'\n\n\n@app.task(base=base.FileTask(\"txt\"))\ndef create_file(*args, **kwargs):\n sio = io.StringIO()\n sio.write('args={} kwargs={}'.format(args, kwargs))\n sio.seek(0)\n return sio\n\n\n@app.task(base=base.FileTask(\"txt\"))\ndef create_file2(*args, **kwargs):\n return 'args={} kwargs={}'.format(args, kwargs)\n\n\n@app.task()\ndef cron_scheduler():\n _cron_scheduler()\n\n\n@app.task()\ndef celery_revoke(task_id):\n logger.info(\"revoking {}\".format(task_id))\n from celery.task.control import revoke\n revoke(task_id, terminate=True)\n logger.info(\"revoked! {}\".format(task_id))\n\n\napp.conf.beat_schedule = {\n 'cron_scheduler': {\n 'task': 'example.apps.sample.tasks.cron_scheduler',\n 'schedule': crontab(),\n 'args': (),\n 'options': dict(queue='time_sensitive', routing_key='time_sensitive_tasks'),\n },\n}\n","sub_path":"example/example/apps/sample/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"27694827","text":"\"\"\"\nAuthor: Uday Korlimarla \nCopyright (c) 2020, Checkmarx Australia.\n\"\"\"\nfrom projects import GetProject\nfrom utils import connection, str_to_json\n\n\nclass ScanDetails(GetProject):\n def __init__(self):\n super().__init__()\n self.projects_scans = None\n self.current_project_scan_ids = list()\n self.stats_summary = list()\n\n for project in self.projects:\n self.current_project_id = project['id']\n self.set_current_project_by_id(project['id'])\n self.get_projects_scans()\n self.get_current_project_scan_ids()\n self.get_current_project_scan_statistics()\n \n def get_projects_scans(self):\n self.connection = connection(self.https_flag, self.host)\n self.connection.request(\"GET\", \"/cxrestapi/sast/scans\", headers=self.auth_headers)\n res = self.connection.getresponse()\n data = res.read()\n self.projects_scans = str_to_json(data.decode(\"utf-8\"))\n \n def get_current_project_scan_ids(self):\n # Filter to get All scan IDs of a specific project in context\n self.current_project_scan_ids = [{'scan_id': scan['id']} for scan in self.projects_scans if scan['project']['id'] ==self.current_project_id]\n \n def get_current_project_scan_statistics(self):\n self.connection = connection(self.https_flag, self.host)\n for scan_id in self.current_project_scan_ids: \n rs_endpoint = \"/cxrestapi/sast/scans/{0}/resultsStatistics\".format(scan_id['scan_id'])\n self.connection.request(\"GET\", rs_endpoint, headers=self.auth_headers)\n res = self.connection.getresponse()\n data = res.read()\n result = str_to_json(data.decode(\"utf-8\"))\n result['id'], result['name'], result['scan_id'] = self.current_project_id, self.current_project_name, scan_id['scan_id']\n self.stats_summary.append(result)\n","sub_path":"scans.py","file_name":"scans.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"263294261","text":"from stack import Stack\n\nclass Node:\n def __init__(self, value=None, next_node=None):\n self.value = value\n self.next_node = next_node\n\n def get_value(self):\n return self.value\n\n def get_next(self):\n return self.next_node\n\n def set_next(self, new_next):\n self.next_node = new_next\n\n def __repr__(self):\n return f'value: {self.value}'\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def add_to_head(self, value):\n node = Node(value)\n\n if self.head is not None:\n node.set_next(self.head)\n\n self.head = node\n\n def contains(self, value):\n if not self.head:\n return False\n\n current = self.head\n\n while current:\n if current.get_value() == value:\n return True\n\n current = current.get_next()\n\n return False\n\n def reverse_list(self, node, s_=None):\n \"\"\"\n every node can be added to a stack and then removed one by one and links set up\n \"\"\"\n if self.head is None:\n return\n\n if self.head.next_node is None:\n return\n\n if s_ is None:\n s_ = Stack()\n \n current = node\n\n while current.next_node is not None:\n s_.push(current)\n current = current.next_node\n\n s_.push(current)\n\n self.head = s_.pop()\n node_2 = s_.pop()\n self.head.next_node = node_2\n\n new_current = node_2\n\n while s_.size > 0:\n node = s_.pop()\n new_current.next_node = node\n new_current = node\n\n new_current.next_node = s_.pop()\n\n def __repr__(self):\n ll_rep = list()\n curr = self.head\n\n if self.head is None:\n return '[]'\n\n while curr.next_node:\n ll_rep.append(curr.value)\n curr = curr.next_node\n\n ll_rep.append(curr.value)\n\n return f'{ll_rep}'\n","sub_path":"reverse/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"374867952","text":"from socket import *\nimport os, sys\n\n#客户端处理函数\ndef client_hander(c):\n print(\"客户端:\", c.getpeername())\n while True:\n data = c.recv(1024)\n if not data:\n break\n print(data.decode())\n c.send(b'Thank you')\n c.close()\n\n#创建套接字\nHOST = '0.0.0.0'\nPOST = 8888\nADDR = (HOST, POST)\n\ns = socket()\ns.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\ns.bind(ADDR)\ns.listen(5)\n\n#循环等待接收客户端连接请求\nprint(\"Listen to the port 8888...\")\nwhile True:\n try:\n c, addr = s.accept()\n except KeyboardInterrupt:\n sys.exit(\"退出服务器\")\n except Exception as e:\n print(\"Error:\", e)\n continue\n\n\n #创建新的进程处理客户端请求\n pid = os.fork()\n\n if pid < 0:\n pass\n if pid == 0:\n p = os.fork()\n if p == 0: #二级子进程\n s.close()\n client_hander(c) #处理具体请求\n sys.exit(0) #子进程处理完即退出\n else:\n os._exit(0) #退出一级子进程\n #父进程或者创建进程失败都等待下一个客户端连接\n else:\n c.close()\n os.wait()\n\n","sub_path":"my_project/PYTHONET/day08/fork_server.py","file_name":"fork_server.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"213874146","text":"__author__ = 'joh12041'\n\n# Code largely taken from: https://gist.github.com/endolith/2837160\n# with some help from https://github.com/ahwolf/meetup_location/blob/master/code/geo_median.py\n# and adapted to support great circle distances over Euclidean.\n\nfrom geopy.distance import vincenty\nfrom geopy.distance import great_circle\nimport csv\nimport numpy\nimport traceback\n\nLIMIT_MAD = 30 # acceptable km limit to median absolute deviation of points\nLIMIT_POINTS = 3 # acceptable minimum number of GPS points for a user\nDISTANCE_THRESHOLD = 1 # distance (meters) between iterations that determines end of search\nDATA_POINTS_FILE = '../sample_dataset/post_location_info.csv'\nOUTPUT_MEDIANS = '../sample_dataset/users.home-locations.geo-median.tsv'\nSNAP_TO_USER_POINTS = True\nOUTPUT_ALL_USERS = False\nOUTPUT_DELIMITER = '\\t'\n\ndef main():\n compute_medians()\n\ndef compute_medians(iterations=1000, already_computed=None):\n\n numIter = iterations # numIter depends on how long it take to get a suitable convergence of objFunc\n count = 0\n medians_found = 0\n\n already_computed_users = {}\n if already_computed:\n for file in already_computed:\n with open(file, 'r') as fin:\n csvreader = csv.reader(fin)\n assert next(csvreader) == ['uid', 'median']\n for line in csvreader:\n already_computed_users[line[0]] = True\n\n with open(DATA_POINTS_FILE, 'r') as fin:\n csvreader = csv.reader(fin)\n assert next(csvreader) == ['uid','lat','lon']\n with open(OUTPUT_MEDIANS, 'w') as fout:\n csvwriter = csv.writer(fout, delimiter=OUTPUT_DELIMITER)\n csvwriter.writerow(['uid','lat','lon'])\n line = next(csvreader)\n dataPoints = [(float(line[1]), float(line[2]))]\n current_uid = line[0]\n for line in csvreader:\n if line[0] == current_uid:\n dataPoints.append((float(line[1]), float(line[2])))\n else:\n count += 1\n if count % 2500 == 0:\n print(\"Processed {0} users and {1} medians found.\".format(count, medians_found))\n\n if current_uid not in already_computed_users:\n medians_found += compute_user_median(dataPoints, numIter, csvwriter, current_uid)\n\n # set user and restart array for new current user\n current_uid = line[0]\n dataPoints = [(float(line[1]), float(line[2]))]\n # compute final user's median\n medians_found += compute_user_median(dataPoints, numIter, csvwriter, current_uid)\n print(\"Processed {0} users and {1} medians found.\".format(count, medians_found))\n\n\ndef compute_user_median(dataPoints, numIter, csvwriter, current_uid):\n if len(dataPoints) < LIMIT_POINTS: # Insufficient points for the user - don't record median\n if OUTPUT_ALL_USERS:\n csvwriter.writerow([current_uid, None, None])\n return 0\n else:\n if SNAP_TO_USER_POINTS: # ensure median is one of the user's points\n lowestDev = float(\"inf\")\n for point in dataPoints:\n tmpAbsDev = objfunc(point, dataPoints)\n if tmpAbsDev < lowestDev:\n lowestDev = tmpAbsDev\n testMedian = point\n else:\n testMedian = candMedian(dataPoints) # Calculate centroid more or less as starting point\n if objfunc(testMedian, dataPoints) != 0: # points aren't all the same\n\n #iterate to find reasonable estimate of median\n for x in range(0, numIter):\n denom = denomsum(testMedian, dataPoints)\n nextLat = 0.0\n nextLon = 0.0\n\n for y in range(0, len(dataPoints)):\n nextLat += (dataPoints[y][0] * numersum(testMedian, dataPoints[y]))/denom\n nextLon += (dataPoints[y][1] * numersum(testMedian, dataPoints[y]))/denom\n\n prevMedian = testMedian\n testMedian = (nextLat, nextLon)\n try:\n if vincenty(prevMedian, testMedian).meters < DISTANCE_THRESHOLD: # 1 meter\n break\n except:\n if great_circle(prevMedian, testMedian).meters < DISTANCE_THRESHOLD: # 1 meter\n break\n\n if x == numIter - 1:\n print('{0}: failed to converge. Last change between iterations was {1} meters.'.format(current_uid, great_circle(prevMedian, testMedian).meters))\n\n # Check if user points are under the limit median absolute deviation\n if checkMedianAbsoluteDeviation(dataPoints, testMedian) <= LIMIT_MAD:\n csvwriter.writerow([current_uid, round(testMedian[0],6), round(testMedian[1],6)])\n return 1\n else:\n if OUTPUT_ALL_USERS:\n csvwriter.writerow([current_uid, None, None])\n return 0\n\n\n\ndef candMedian(dataPoints):\n #Calculate the first candidate median as the geometric mean\n tempLat = 0.0\n tempLon = 0.0\n\n for i in range(0, len(dataPoints)):\n tempLat += dataPoints[i][0]\n tempLon += dataPoints[i][1]\n\n return (tempLat / len(dataPoints), tempLon / len(dataPoints))\n\ndef checkMedianAbsoluteDeviation(dataPoints, median):\n # Calculate Median Absolute Deviation of a set of points\n distances = []\n for i in range(0, len(dataPoints)):\n try:\n distances.append(vincenty(median, dataPoints[i]).kilometers)\n except ValueError:\n # Vincenty doesn't always converge so fall back on great circle distance which is less accurate but always converges\n distances.append(great_circle(median, dataPoints[i]).kilometers)\n return(numpy.median(distances))\n\ndef numersum(testMedian, dataPoint):\n # Provides the denominator of the weiszfeld algorithm depending on whether you are adjusting the candidate x or y\n try:\n return 1 / vincenty(testMedian, dataPoint).kilometers\n except ZeroDivisionError:\n traceback.print_exc()\n return 0 # filter points that equal the median out (otherwise no convergence)\n except ValueError:\n # Vincenty doesn't always converge so fall back on great circle distance which is less accurate but always converges\n return 1 / great_circle(testMedian, dataPoint).kilometers\n\ndef denomsum(testMedian, dataPoints):\n # Provides the denominator of the weiszfeld algorithm\n temp = 0.0\n for i in range(0, len(dataPoints)):\n try:\n temp += 1 / vincenty(testMedian, dataPoints[i]).kilometers\n except ZeroDivisionError:\n print('zerodivisionerror', dataPoints[i])\n continue # filter points that equal the median out (otherwise no convergence)\n except ValueError:\n # Vincenty doesn't always converge so fall back on great circle distance which is less accurate but always converges\n temp += 1 / great_circle(testMedian, dataPoints[i]).kilometers\n return temp\n\ndef objfunc(testMedian, dataPoints):\n # This function calculates the sum of linear distances from the current candidate median to all points\n # in the data set, as such it is the objective function that we are minimising.\n temp = 0.0\n for i in range(0, len(dataPoints)):\n try:\n temp += vincenty(testMedian, dataPoints[i]).kilometers\n except ValueError:\n # Vincenty doesn't always converge so fall back on great circle distance which is less accurate but always converges\n temp += great_circle(testMedian, dataPoints[i]).kilometers\n return temp\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/src/geolocate/geolocation/geo_median.py","file_name":"geo_median.py","file_ext":"py","file_size_in_byte":7826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"194383425","text":"\"\"\" TO RUN THE CODE:\npython3 structural_feat.py ../Data/Clean_data_labels_golden_fixed.csv\n\"\"\"\n\n\nfrom allennlp.predictors.predictor import Predictor\nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\n\nimport argparse\nimport re\nimport itertools\nimport os\nimport wget\nimport pickle\n\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n#from nltk.corpus import verbnet\n#nltk.download('verbnet')\n\nfrom models import tense\nfrom models import modal\nfrom models import adjective\nfrom models import adverb\nfrom models import semantics\n\nsum_all = lambda x: sum(map(sum_all, x)) if isinstance(x, list) else x\nflatten = lambda l: [item for sublist in l for item in sublist]\n\ndef clean_output(data): # assigns 0 if no probability assigned\n complete = list(set(flatten([[i[0] for i in j] for j in data])))\n lst = []\n for i in data:\n mini_lst = []\n for j in complete: \n try:\n idx = [k[0] for k in i].index(j)\n mini_lst.append((j, i[idx][1]))\n except ValueError:\n mini_lst.append((j, 0)) \n lst.append(mini_lst)\n return lst\n\n\n\n\nclass structFeat(object):\n\n def __init__(self, file_path):\n\n pretrained_tree_path = './allennlp_pretrained/elmo-constituency-parser-2018.03.14.tar.gz'\n pretrained_coref_path = './allennlp_pretrained/allennlp_coref-model-2018.02.05.tar.gz'\n\n if not os.path.exists(pretrained_tree_path):\n tree_url = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo-constituency-parser-2018.03.14.tar.gz\"\n wget.download(tree_url, pretrained_tree_path)\n\n if not os.path.exists(pretrained_coref_path):\n coref_url = \"https://s3-us-west-2.amazonaws.com/allennlp/models/coref-model-2018.02.05.tar.gz\"\n wget.download(coref_url, pretrained_coref_path)\n\n self.tree_predictor = Predictor.from_path(pretrained_tree_path)\n self.coref_predictor = Predictor.from_path(pretrained_coref_path)\n\n self.data = pd.read_csv(file_path)\n self.lemmatizer = WordNetLemmatizer() \n self.NP_pattern = []\n\n\n def tree_parser(self, sent): # Parse const tree by depth -> returns (depth level, tree content)\n\n pred = self.tree_predictor.predict(sentence=sent)\n # pred.keys() >>> ['class_probabilities', 'spans', 'tokens', 'pos_tags', 'num_spans', 'hierplane_tree', 'trees']\n return ([([i[0], (''.join(i[1]).split())]) for i in list(self.parse(list(pred['trees'])))], pred) \n\n\n def parse(self, string): # Parsing str as stack-pop\n\n stack = []\n for i, char in enumerate(string):\n if char == '(':\n stack.append(i)\n elif char == ')' and stack:\n start = stack.pop()\n yield (len(stack), string[start + 1: i])\n\n\n def load_all(self, which='trees'): # Load all coref and trees\n\n if which == 'coref':\n output = []\n for i in self.data['Clean Sentence']:\n try:\n output.append(self.coref_predictor.predict(i))\n except: # when coref doesnt exists\n output.append({'top_spans': [], 'predicted_antecedents': [0, 0, 0, 0], \n 'document': word_tokenize(i), 'clusters': [[]]})\n return output\n\n elif which == 'trees':\n return [self.tree_parser(i) for i in self.data['Clean Sentence']] \n\n\n def save_data(self, trees, coref, file_path):\n data = [[i, tense.get_tense(i),modal.get_modal(i),adjective.get_adj(i,j),adverb.get_adv(i,j)] for i,j in zip(trees,coref)]\n df = pd.DataFrame.from_records(data, columns=[\"Index\",\"Tense\", \"Modal Type\", \"NP Pattern\", \"Adverb Exists\"], index=False)\n df.to_csv(file_path, encoding='utf-8')\n\n\n def count_all(self, trees, coref, which): # counting the result of each analysis by their confidence score\n full = np.where((self.data['Confidence'] == 1.0000) & (self.data['Final Label'] == 1))[0] # sents everyone thought was biased\n two_third = np.where((self.data['Confidence'] != 1.0000) & (self.data['Final Label'] == 1))[0]\n one_third = np.where((self.data['Confidence'] != 1.0000) & (self.data['Final Label'] == 0))[0]\n zero = np.where((self.data['Confidence'] == 1.0000) & (self.data['Final Label'] == 0))[0] #sents everyone thought was not biased\n if which == 'tense':\n return [Counter([tense.get_tense(trees[i]) for i in j]) for j in [full,two_third,one_third,zero]]\n elif which == 'modal':\n return [Counter(list(itertools.chain(*[modal.get_modal(trees[i]) for i in j]))) for j in [full,two_third,one_third,zero]]\n elif which == 'semantics':\n return [Counter([semantics.get_sem(trees[i]) for i in j]) for j in [full,two_third,one_third,zero]]\n elif which == 'adj':\n return [Counter([(adjective.get_adj(trees[i], coref[i])) for i in j]) for j in [full,two_third,one_third,zero]]\n elif which == 'adv':\n return [Counter([(adverb.get_adv(trees[i], coref[i])) for i in j]) for j in [full,two_third,one_third,zero]]\n \n\n def get_ratio(self, result_dict, which='by_group'):\n total = sum_all([[i for i in j.values()] for j in result_dict])\n ratio_output = []\n\n if which == 'by_total':\n for i in range(0, len(result_dict)):\n ratio_output.append([(j[0], j[1]/total) for j in result_dict[i].items()])\n\n elif which == 'by_group':\n for i in range(0, len(result_dict)):\n ratio_output.append([(j[0], j[1]/sum(result_dict[i].values())) for j in result_dict[i].items()])\n\n return ratio_output\n\n\n def analyze(self, save=False): # stores all results\n\n all_trees, all_coref = {}, {}\n tree_pickle_path = \"./pickle/trees.p\"\n coref_pickle_path = \"./pickle/coref.p\"\n\n\n # save const tree as pickle\n try:\n all_trees = pickle.load(open(tree_pickle_path, \"rb\"))\n except:\n pass\n try:\n assert (len(all_trees) == len(self.data))\n except (IOError, EOFError, AssertionError):\n all_trees = self.load_all()\n os.makedirs(os.path.dirname(tree_pickle_path), exist_ok=True)\n with open(tree_pickle_path, 'wb') as f:\n pickle.dump(all_trees, f)\n\n # save coref as pickle\n try:\n all_coref = pickle.load(open(coref_pickle_path, \"rb\"))\n except:\n pass\n try:\n assert (len(all_coref) == len(self.data))\n except (IOError, EOFError, AssertionError):\n all_coref = self.load_all('coref')\n os.makedirs(os.path.dirname(coref_pickle_path), exist_ok=True)\n with open(coref_pickle_path, 'wb') as f:\n pickle.dump(all_coref, f) \n \n # save as csv\n if save == True:\n self.save_data(all_trees, all_coref,'structFeat_out.csv')\n\n\n tense_result = self.count_all(all_trees, all_coref, 'tense')\n modal_result = self.count_all(all_trees, all_coref, 'modal')\n ##semantics_result = self.count_all(all_trees, 'semantics')\n adj_result = self.count_all(all_trees, all_coref, 'adj')\n adv_result = self.count_all(all_trees, all_coref, 'adv')\n\n by_group = [self.get_ratio(tense_result), self.get_ratio(modal_result), self.get_ratio(adj_result), self.get_ratio(adv_result)]\n by_total = [self.get_ratio(tense_result,'by_total'), self.get_ratio(modal_result,'by_total'), self.get_ratio(adj_result,'by_total'), self.get_ratio(adv_result,'by_total')]\n\n # returned ratio ->> (by group, by total)\n return [[clean_output(d) for d in k] for k in (by_group, by_total)]\n\n \n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('filename')\n args = parser.parse_args()\n analyzer = structFeat(args.filename)\n #ratio_result = analyzer.analyze()\n\n\n ratio_result = analyzer.analyze()\n\n print(ratio_result)\n \n ratio_result_pickle_path = \"./pickle/ratio_result.p\"\n os.makedirs(os.path.dirname(ratio_result_pickle_path), exist_ok=True)\n with open(ratio_result_pickle_path, 'wb') as f:\n pickle.dump(ratio_result, f) \n ","sub_path":"Structural_Analysis/structural_feat.py","file_name":"structural_feat.py","file_ext":"py","file_size_in_byte":8257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"426426524","text":"import matplotlib.pyplot as plt\nplt.style.use('bmh')\n\nclass EulerEstimator:\n def __init__(self, derivatives):\n self.functions = derivatives\n \n def calc_derivative_at_point(self, initial_point):\n result_dict = {}\n for key in self.functions:\n result_dict[key] = self.functions[key](initial_point[0], initial_point[1])\n return result_dict\n\n def step_forward(self, point, step_size):\n t = point[0]\n old_x = point[1]\n deriv = self.calc_derivative_at_point(point)\n new_x = {}\n for key in old_x:\n new_x[key] = old_x[key] + (deriv[key] * step_size)\n return (t + step_size, new_x)\n\n\n def calc_estimated_points(self, point, step_size, num_steps):\n points_list = [point]\n for num in range(num_steps):\n new_point = self.step_forward(point,step_size)\n points_list.append(new_point)\n point = new_point\n return points_list\n\n def plot(self, point, step_size, end_value):\n x_vals=[]\n y_vals={}\n plt.style.use('bmh')\n for key in point[1]:\n y_vals[key]=[]\n while True:\n if point[0]>end_value:\n break\n for key in point[1]:\n y_vals[key].append(point[1][key])\n x_vals.append(point[0])\n point=self.step_forward(point,step_size)\n \n for key in y_vals:\n plt.plot(x_vals,y_vals[key])\n plt.savefig('euler.png')","sub_path":"src/euler_estimator.py","file_name":"euler_estimator.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"288983332","text":"from gurobipy import *\nfrom import_data import *\nimport operator\n\ndef solve(full_path_instance):\n a, r, b, d,g,h,s,T,n = dig_data(full_path_instance)\n model = Model(\"Scheduling\")\n\n x = {}\n for i in range(n):\n x[i+1] = model.addVar(vtype=GRB.CONTINUOUS, name=\"x_\" + str(i+1))\n\n e = {}\n l = {}\n # penalty variable for being early or late\n for i in range(n):\n e[i+1] = model.addVar(vtype=GRB.CONTINUOUS, name=\"e_\" + str(i+1), obj=g[i+1])\n l[i+1] = model.addVar(vtype=GRB.CONTINUOUS, name=\"l_\" + str(i+1), obj=h[i+1])\n\n # plane i starts before j\n y = {}\n for i in range(n):\n for j in range(n):\n y[i+1,j+1] = model.addVar(vtype=GRB.BINARY, name=\"y_\"+str(i+1) + \"_\" + str(j+1))\n # Zielfunktionsorientierung ist minimieren\n model.modelSense = GRB.MINIMIZE\n\n # Variablen im Modell bekanntmachen\n model.update()\n\n\n # earliest and latest times for plane to land\n for i in range(n):\n model.addConstr(\n x[i+1] >= r[i+1]\n )\n model.addConstr(\n x[i+1] <= d[i+1]\n )\n\n\n # penalty for being too early\n for i in range(n):\n model.addConstr(\n b[i+1] - x[i+1] <= e[i+1]\n )\n model.addConstr(\n e[i+1] >= 0\n )\n model.addConstr(\n x[i+1] - b[i+1] <= l[i+1]\n )\n model.addConstr(\n l[i+1] >= 0\n )\n\n # i lands before j\n M = max(d.iteritems(), key=operator.itemgetter(1))[1] + max(s.iteritems(), key=operator.itemgetter(1))[1]\n\n for i in range(n):\n for j in range(n):\n if i!= j:\n model.addConstr(\n x[i+1] +s[i+1,j+1] <= x[j+1] + (1-y[i+1,j+1])*M\n )\n model.addConstr(\n x[j+1] + s[j+1,i+1] <= x[i+1] + y[i+1,j+1]*M\n )\n model.addConstr(\n y[i+1,j+1] + y[j+1,i+1] <= 1\n )\n\n # update um Modell in Datei schreiben zu koennen\n model.update()\n\n model.write(\"model.lp\")\n\n # Solve\n model.optimize()\n\n return model\n\nsolve('airland8.txt')","sub_path":"04_FlugzeugScheduling/runscheduling.py","file_name":"runscheduling.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"185594849","text":"'''\nCopyright 2019 Open Source Server Monitor project Dev Team\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport json\nimport os\nimport wx\nimport wx.lib.agw.aui as aui\nfrom Charter.PieChart import PieChart\nfrom Charter.PieChartConfiguration import PieChartConfiguration\nfrom Common.Version import Version\nfrom GUI.DiscoverMonitorsDialog import DiscoverMonitorsDialog\nfrom GUI.DockablePanel import DockablePanel\nfrom GUI.NewTabDialog import NewTabDialog\n\n\n# Main console dialog.\nclass MainDialog(wx.Frame):\n\n DefaultSize = (800, 600)\n DefaultTitle = f'Open Source Server Monitor Console V{Version}'\n\n LayoutFilename = 'consoleLayout.json'\n\n class LayoutFileKeys(object):\n WindowSize = 'windowSize'\n WindowPosition = 'windowPosition'\n Tabs = 'layoutTabs'\n\n class MenuItemID(object):\n FileSettings = 101\n FileDiscoverMonitors = 102\n LayoutSaveLayout = 201\n LayoutAddTab = 202\n LayoutAddChart = 203\n\n ## List of tabs property.\n # @param self The object pointer.\n @property\n def TabsList(self):\n return self.__tabsList\n\n\n ##\n # @param self The object pointer.\n # @param dataObject Instance of DataObject class containing app data.\n def __init__(self, dataObject):\n\n wx.Frame.__init__(self, None, wx.ID_ANY, self.DefaultTitle, \n pos = wx.DefaultPosition, size = self.DefaultSize,\n style = wx.DEFAULT_FRAME_STYLE)\n\n self.__dataObject = dataObject\n\n self.__currentPage = None\n\n self.__tabsList = []\n\n self.menuItems = {}\n\n self.__BuildMenuBar()\n\n self.__InitialiseGUI()\n\n\n ##\n # @param self The object pointer.\n def __BuildMenuBar(self):\n self.__menubar = wx.MenuBar()\n\n #############\n # File menu #\n #############\n fileMenu = wx.Menu()\n\n # File | Settings\n menu_fileSaveLayout = fileMenu.Append(self.MenuItemID.FileSettings,\n 'Settings', 'Console Settings')\n\n fileMenu.AppendSeparator()\n\n # File | Discover Monitors\n menu_fileDiscoverMonitors = fileMenu.Append(\n self.MenuItemID.FileDiscoverMonitors, 'Discover Monitors...',\n 'Manage and discover monitors')\n self.Bind(wx.EVT_MENU, self.__OnDiscoverMonitors,\n menu_fileDiscoverMonitors)\n\n fileMenu.AppendSeparator()\n\n # File | Quit (exit menu on Mac)\n wx.App.SetMacExitMenuItemId(wx.ID_EXIT)\n menu_fileQuit = fileMenu.Append(wx.ID_EXIT,\n 'Q&uit\\tCtrl-Q', 'Quit application')\n self.Bind(wx.EVT_MENU, self.__OnQuit, menu_fileQuit)\n\n self.__menubar.Append(fileMenu, '&File')\n\n ###############\n # Layout menu #\n ###############\n layoutMenu = wx.Menu()\n\n # Layout | Save Layout\n menu_layoutSaveLayout = layoutMenu.Append(\n self.MenuItemID.LayoutSaveLayout,\n 'Save Layout', 'Save the layout of console')\n self.Bind(wx.EVT_MENU, self.__OnLayoutSaveLayout, menu_layoutSaveLayout)\n\n # Layout | Add tab\n menu_layoutAddTab = layoutMenu.Append(self.MenuItemID.LayoutAddTab,\n 'Add Tab', 'Add a new tab')\n self.menuItems['Layout Add Tab'] = menu_layoutAddTab\n self.Bind(wx.EVT_MENU, self.__OnLayoutAddTab, menu_layoutAddTab)\n\n # Layout | Add chart\n menu_layoutAddChart = layoutMenu.Append(self.MenuItemID.LayoutAddChart,\n 'Add Chart', 'Add a chart to current tab')\n self.menuItems['Layout Add Chart'] = menu_layoutAddChart\n menu_layoutAddChart.Enable(False)\n self.Bind(wx.EVT_MENU, self.__OnLayoutAddChart, menu_layoutAddChart)\n\n self.__menubar.Append(layoutMenu, '&Layout')\n\n self.SetMenuBar(self.__menubar)\n\n\n ##\n # @param self The object pointer.\n def AddNewTab(self, tabName):\n dockablePanel = DockablePanel(self.__notebook, tabName)\n self.__notebook.AddPage(dockablePanel, tabName, False)\n\n if self.__notebook.GetPageCount() == 1:\n self.__currentPage = self.__notebook.GetCurrentPage()\n self.menuItems['Layout Add Chart'].Enable(True)\n \n return dockablePanel\n\n\n ##\n # @param self The object pointer.\n def __InitialiseGUI(self):\n\n # Construct top-level AUI manager item.\n auiManagerFlags = aui.AUI_MGR_ALLOW_FLOATING\n self.__auiManager = aui.AuiManager(agwFlags = auiManagerFlags)\n self.__auiManager.SetManagedWindow(self)\n\n self.__notebook = aui.AuiNotebook(self)\n\n # self.__notebook.Bind(aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.__OnQuit)\n\n # If a layout configuration file exists then open it and restore any\n # tabs and charts.\n if os.path.isfile(self.LayoutFilename):\n print('[INFO] Previously saved layout is being restored...')\n with open(self.LayoutFilename, 'r') as fileHandle:\n layout = json.load(fileHandle)\n\n self.SetPosition(layout[self.LayoutFileKeys.WindowPosition])\n self.SetSize(layout[self.LayoutFileKeys.WindowSize])\n\n # Restore each tab and then it's contents.\n for tab in layout[self.LayoutFileKeys.Tabs]:\n newTab = self.AddNewTab(tab)\n \n perspective, charts = layout[self.LayoutFileKeys.Tabs][tab]\n\n # Re-add all of the charts from the layout file.\n for c in charts:\n chartConfig = PieChartConfiguration(charts[c]['labels'],\n charts[c]['segments'])\n newTab.AddPieChart(c, c, chartConfig)\n \n # Once all of the charts have been added then re-load the\n # layout.\n newTab.UpdatePerspective(perspective)\n\n self.__auiManager.AddPane(self.__notebook,aui.AuiPaneInfo().\n Name(\"notebook_content\").CenterPane().PaneBorder(False))\n\n # tell the manager to \"commit\" all the changes just made\n self.__auiManager.Update()\n\n self.Bind(wx.EVT_CLOSE, self.__OnQuit)\n\n\n ##\n # @param self The object pointer.\n # @param event Unused.\n def __OnQuit(self, event):\n\n self.__OnLayoutSaveLayout()\n\n # De-initialise the frame manager\n self.__auiManager.UnInit()\n self.Destroy()\n\n\n ##\n # @param self The object pointer.\n # @param event Unused.\n def __OnDiscoverMonitors(self, event):\n dialog = DiscoverMonitorsDialog(self, self.__dataObject)\n\n\n ##\n # @param self The object pointer.\n # @param event Unused.\n def __OnLayoutAddTab(self, event):\n newTagDialog = NewTabDialog(self)\n newTagDialog.ShowModal()\n\n\n ##\n # @param self The object pointer.\n # @param event Unused.\n def __OnLayoutAddChart(self, event):\n\n # First test chart - Piechart\n config = PieChartConfiguration()\n config.Labels = ['Virt Mem', 'Real Mem', 'Usage']\n config.Segments = [25, 30, 45]\n \n from string import ascii_lowercase\n from random import choice\n chartName = ''.join(choice(ascii_lowercase) for i in range(10))\n \n self.__currentPage.AddPieChart(chartName, 'Memory Usage', config)\n\n ##\n # @param self The object pointer.\n # @param event Unused.\n def __OnLayoutSaveLayout(self, event = None):\n print('[INFO] Saving window layout...')\n \n tabs = {}\n for tab in self.__notebook:\n tabs[tab.GetName()] = tab.GetAllCharts(asJson = True)\n \n winWidth, winHeight = self.GetSize()\n winPosX, winPosY = self.GetPosition()\n\n layout = {\n self.LayoutFileKeys.WindowSize : (winWidth, winHeight),\n self.LayoutFileKeys.WindowPosition : (winPosX, winPosY),\n self.LayoutFileKeys.Tabs : tabs\n }\n\n try:\n with open(self.LayoutFilename, 'w') as fileHandle:\n json.dump(layout, fileHandle)\n\n except IOError:\n print('[TODO] handle exception here...')\n","sub_path":"src/Console/GUI/MainDialog.py","file_name":"MainDialog.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"371052615","text":"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom common.base_model_init import BaseModelInitializer\nfrom common.base_model_init import set_env_var\n\nimport os\nimport time\n\n\nclass ModelInitializer(BaseModelInitializer):\n \"\"\"initialize mode and run benchmark\"\"\"\n\n def __init__(self, args, custom_args, platform_util=None):\n super(ModelInitializer, self).__init__(args, custom_args, platform_util)\n\n self.benchmark_command = \"\" # use default batch size if -1\n self.results_file_path = \"\"\n\n if self.args.batch_size == -1:\n self.args.batch_size = 128\n\n # Set KMP env vars, if they haven't already been set\n self.set_kmp_vars()\n\n # set num_inter_threads and num_intra_threads\n self.set_num_inter_intra_threads()\n\n benchmark_script = os.path.join(\n self.args.intelai_models,\n self.args.precision, \"eval_image_classifier_inference.py\")\n\n self.benchmark_command = self.get_numactl_command(self.args.socket_id)\\\n + \"python \" + benchmark_script\n\n set_env_var(\"OMP_NUM_THREADS\", self.args.num_intra_threads)\n\n self.benchmark_command += \" --input-graph=\" + \\\n self.args.input_graph + \\\n \" --model-name=\" + \\\n str(self.args.model_name) + \\\n \" --inter-op-parallelism-threads=\" + \\\n str(self.args.num_inter_threads) + \\\n \" --intra-op-parallelism-threads=\" + \\\n str(self.args.num_intra_threads) + \\\n \" --batch-size=\" + \\\n str(self.args.batch_size)\n\n # if the data location directory is not empty, then include the arg\n if self.args.data_location and os.listdir(self.args.data_location):\n self.benchmark_command += \" --data-location=\" + \\\n self.args.data_location\n if self.args.accuracy_only:\n self.benchmark_command += \" --accuracy-only\"\n\n # if output results is enabled, generate a results file name and pass it to the inference script\n if self.args.output_results:\n self.results_filename = \"{}_{}_{}_results_{}.txt\".format(\n self.args.model_name, self.args.precision, self.args.mode,\n time.strftime(\"%Y%m%d_%H%M%S\", time.gmtime()))\n self.results_file_path = os.path.join(self.args.output_dir, self.results_filename)\n self.benchmark_command += \" --results-file-path {}\".format(self.results_file_path)\n\n def run(self):\n if self.benchmark_command:\n self.run_command(self.benchmark_command)\n\n if self.results_file_path:\n print(\"Inference results file in the output directory: {}\".format(self.results_filename))\n","sub_path":"benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py","file_name":"model_init.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"437753581","text":"\"\"\"A class to ingest txt files.\"\"\"\n\nfrom .QuoteModel import QuoteModel\nfrom .IngestorInterface import IngestorInterface\nfrom typing import List\nfrom .InvalidFileExtension import InvalidFileExtension\n\n\nclass TextIngestor(IngestorInterface):\n \"\"\"Class to parse txt files.\"\"\"\n\n input_files_formats = ['txt']\n\n @classmethod\n def parse(cls, path: str) -> List[QuoteModel]:\n \"\"\"Parse a text file.\"\"\"\n quote_objects = []\n\n try:\n is_file_exist = open(path)\n if cls.can_ingest(path):\n\n with open(path, 'r') as txt_file_to_read:\n txt_file = txt_file_to_read.readlines()\n\n for each_row in txt_file:\n print(each_row)\n if each_row not in ('\\n', '\\x0c'):\n body_text, author = each_row.split(' - ')\n quote_objects.append(\n QuoteModel(body_text.strip(), author.strip())\n )\n else:\n pass\n else:\n raise InvalidFileExtension('The file extension is not txt')\n\n except FileNotFoundError:\n print('The file is not found')\n\n return quote_objects\n","sub_path":"QuoteEngine/TextIngestor.py","file_name":"TextIngestor.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636796444","text":"import sys\nsys.setrecursionlimit(1 << 20)\nINF = float('inf')\n\n\ndef read_int_list():\n return list(map(int, input().split()))\n\n\ndef read_ints():\n return map(int, input().split())\n\n\ndef main():\n N = int(input())\n myP = {}\n for d in read_int_list():\n if d not in myP:\n myP[d] = 0\n myP[d] += 1\n\n M = int(input())\n for t in read_int_list():\n if t not in myP or myP[t] == 0:\n print('NO')\n return\n else:\n myP[t] -= 1\n print('YES')\n\n\nmain()\n","sub_path":"others/others_code_fes_2017b.py","file_name":"others_code_fes_2017b.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"146369792","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\n# height (cm)\nx = np.array([[147, 150, 153, 158, 163, 165, 168, 170, 173, 175, 178, 180, 183]]).T\n# weight (kg)\ny = np.array([[ 49, 50, 51, 54, 58, 59, 60, 62, 63, 64, 66, 67, 68]]).T\n\n#Visuaize data \ndef display(x, y):\n plt.plot(y, x , 'ro')\n plt.axis([ 45,75 ,140,190 ])\n plt.xlabel('Height (cm)')\n plt.ylabel('Weight (cm)')\n plt.show() \ndisplay(x, y)\n\n#Building Xbar \none = np.ones((x.shape[0] , 1))\nXbar = np.concatenate((one , x) , axis = 1 )\n\n#Caculating weights of the fiting line \nA = np.dot(Xbar.T , Xbar)\nb = np.dot(Xbar.T , y)\nW = np.dot(np.linalg.pinv(A) ,b )\nprint (W)\n\ndef guess ( xd,w):\n y0 = w[1][0]*xd + w[0][0]\n return y0 \n\nprint (guess(157 , W))\n\ndef print_fitting_line(w , x , y ):\n w0 = w[0][0]\n w1 = w[1][0]\n x0 = np.linspace(145,185 , 2)\n y0 = w1*x0 + w0\n plt.plot(x.T , y.T,'ro')\n plt.plot(x0 , y0)\n plt.axis([140, 190, 45, 75])\n plt.xlabel('Height (cm)')\n plt.ylabel('Weight (kg)')\n plt.show()\n\nprint_fitting_line(W ,x,y)\n","sub_path":"Linear_rgesion/Linear_rgression.py","file_name":"Linear_rgression.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"446800852","text":"import os\nimport logging\nimport requests\nimport facebook\nimport urllib.parse as urlparse\n\nfrom django.core.files.base import ContentFile\nfrom .models import Post, Hashtag, FacebookApp, Subscription\nfrom .app_settings import FACEBOOK_API_VERSION\n\nlogger = logging.getLogger('default')\n\n\ndef get_media_by_code(facebook_id, id_application):\n try:\n api = api_facebook(id_application)\n media_object = api.get_object(id=str(facebook_id), \n fields=\"id,name,created_time,from,link,images\")\n return None, media_object\n except:\n error = \"Error while fetching.\"\n logger.exception(error)\n return error, None\n\n\ndef save_post(app_id, feed_post, is_show, hashtag):\n media_id = feed_post['id']\n link = feed_post['permalink_url']\n caption = feed_post['message']\n media_url = feed_post['full_picture']\n created_at = feed_post['created_time']\n username = feed_post['from']['id']\n\n post, created = Post.objects.get_or_create(\n media_id=media_id, application_id=app_id,\n defaults={\n 'link': link,\n 'username': username,\n 'caption': caption,\n 'created_at': created_at,\n 'show': is_show\n }\n )\n\n if created:\n # save image\n photo_content = ContentFile(requests.get(media_url).content)\n post.photo.save(os.path.basename(media_url), photo_content)\n # save tags\n app_hashtags = Hashtag.objects.filter(application_id=app_id).iterator()\n hashtags_from_url_page = [word[1:].lower() for word in feed_post['message'].split() if word.startswith('#')]\n hashtags_list = [tag for tag in app_hashtags for hashtag in hashtags_from_url_page if tag.name == hashtag]\n for hashtag in hashtags_list:\n post.hashtags.add(hashtag)\n if hashtags_list:\n post.save()\n return post\n\n\ndef get_media_by_url(application, url):\n parsed = urlparse.urlparse(url)\n if urlparse.urlparse(url).path == '/photo.php':\n # Verify if it photo from group \n facebook_id = urlparse.parse_qs(parsed.query)['fbid'][0]\n elif list(filter(None, parsed.path.split('/')))[1] == 'photos':\n # Verify if it photo from page\n facebook_id = list(filter(None, parsed.path.split('/')))[-1]\n return get_media_by_code(facebook_id, application.id)\n\n\ndef sync_by_tag(app_id, tag, is_show, api):\n hashtag = '#' + str(tag.name)\n api = api_facebook(app_id)\n for subscription in Subscription.objects.all():\n feeds = api.get_all_connections(id=subscription.facebook_id, connection_name='feed',\n fields=\"id,message,created_time, \\\n permalink_url,type,from,full_picture\")\n for i, feed_post in enumerate(feeds):\n if subscription.last_synced_post.get(str(tag.name)) == feed_post['id']:\n break\n elif i == 0:\n subscription.last_synced_post[str(tag.name)] = feed_post['id']\n subscription.save()\n elif feed_post['type'] == 'photo' and feed_post.get('message') != None and hashtag in feed_post.get('message').lower().split():\n save_post(app_id, feed_post, is_show, tag)\n\ndef api_facebook(app_id):\n try:\n app = FacebookApp.objects.get(id=app_id)\n is_show = app.hashtag_is_show\n oauth_data = {}\n oauth_data['access_token'] = app.access_token\n except:\n message = \"Cannot get application.\"\n logger.exception(message)\n return None\n\n try:\n api = facebook.GraphAPI(access_token=oauth_data['access_token'], version=FACEBOOK_API_VERSION)\n return api\n except:\n message = \"Error. Verify your access token.\"\n logger.exception(message)\n return None","sub_path":"django_facebook_photo_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83478300","text":"__author__ = 'yuhymuk'\n\nsum = int(input('enter your sum money:'))\n\ndef choose_banknote():\n list_banknote = []\n while True:\n banknote = input('enter bank-note or ok for end input: ')\n if banknote == 'ok':\n break\n try:\n banknote = int(banknote)\n list_banknote.append(banknote)\n except:\n print('enter valid value')\n list_banknote.sort(reverse=True)\n return list_banknote\n\n\ndef count_banknote(a, sum):\n list_count = []\n for i in range(len(a)):\n count = sum//a[i]\n remaider = sum % a[i]\n list_count.append(count)\n sum = remaider\n if remaider == 0:\n break\n else: continue\n if sum != 0:\n print('you can not pay, because nonmultiple suma %s' %sum)\n return list_count\n\ndef result(lcount, lbanknote):\n print('count nominal')\n for i in range(len(lcount)):\n print(' %s - %s'% (lcount[i], lbanknote[i]))\n\ncurrent_banknote = choose_banknote()\nlist_count = count_banknote(current_banknote, sum)\nresult(list_count, current_banknote)\n\n\n","sub_path":"bankomat.py","file_name":"bankomat.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"190124310","text":"# author: Ailen Aspe\r\n\r\nfrom flask import Flask, jsonify, request\r\nfrom database import DBconnection\r\nfrom flask_httpauth import HTTPBasicAuth\r\nfrom flask import render_template, redirect, url_for, session, flash\r\nimport sys, flask, os\r\nimport warnings\r\nfrom flask.exthook import ExtDeprecationWarning\r\n\r\napp = Flask (__name__)\r\nauth = HTTPBasicAuth ()\r\n\r\n\r\ndef spcall(query, param, commit=False):\r\n try:\r\n dbo = DBconnection ()\r\n cursor = dbo.getcursor ()\r\n cursor.callproc (query, param)\r\n res = cursor.fetchall ()\r\n\r\n if commit:\r\n dbo.dbcommit ()\r\n return res\r\n\r\n except:\r\n res = [(\"Error: \" + str (sys.exc_info ()[0]) + \" \" + str (sys.exc_info ()[1]),)]\r\n\r\n return res\r\n\r\n\r\n@app.route ('/')\r\ndef index():\r\n return \"HI\"\r\n\r\n#search\r\n@app.route('/focaldata/', methods=['GET'])\r\ndef searchfocal(data):\r\n\r\n res =spcall('searchfocal',(data,), True)\r\n if 'Error' in str(res[0][0]):\r\n return jsonify({'status':'error', 'message':res[0][0]})\r\n\r\n recs=[]\r\n for r in res:\r\n recs.append({\"fname\": r[0], \"lname\": r[1], \"position\": str(r[2])})\r\n\r\n return jsonify({'status':'ok', 'entries':recs, 'count':len(recs)})\r\n\r\n\r\n@app.route('/childdata/', methods=['GET'])\r\ndef searchchild(data):\r\n res=spcall('searchchild', (data,), True)\r\n if 'Error' in str(res[0][0]):\r\n return jsonify({'status':'error', 'message':res[0][0]})\r\n\r\n recs=[]\r\n for r in res:\r\n recs.append({\"fname\":r[0], \"lname\":r[1], \"status\":r[2]})\r\n\r\n return jsonify({'status':'ok', 'entries':recs, 'count':len(recs)})\r\n\r\n\r\n# view existing focals\r\n@app.route ('/focalentries', methods=['GET'])\r\ndef getfocal():\r\n res = spcall ('getfocal', ())\r\n\r\n if 'Error' in str (res[0][0]):\r\n return jsonify ({'status': 'error', 'message': res[0][0]})\r\n recs = []\r\n\r\n for r in res:\r\n recs.append ({\"id\": r[0], \"first_name\": r[1], \"last_name\": r[2], \"position\": r[3]})\r\n return jsonify ({'status': 'ok', 'entries': recs, 'count': len (recs)})\r\n\r\n\r\n\r\n#view child data\r\n@app.route('/childentries', methods=['GET'])\r\ndef getchildren():\r\n res= spcall('getchildren', ())\r\n\r\n recs=[]\r\n if 'Error' in str(res[0][0]):\r\n return jsonify({'status':'ok', 'message':res[0][0]})\r\n for r in res:\r\n recs.append({\"id\":r[0], \"first_name\":r[1], \"last_name\":r[2], \"weight\":r[3], \"height\":r[4], \"status\":r[5]})\r\n\r\n return jsonify({'status':'ok', 'entries':recs, 'count':len(recs)})\r\n\r\n@app.route('/access//', methods=['POST'])\r\ndef login(id, name):\r\n \r\n res = spcall (\"getaccess\", (id, name), True)\r\n return jsonify({'status':res[0][0]})\r\n\r\n#statistics\r\n@app.route('/childstat')\r\ndef showchildstat():\r\n status= ['Obesity', 'Normal', 'Underweight']\r\n recs=[]\r\n for n in status:\r\n recs.append(spcall(\"countstat\", (str(n),), True))\r\n\r\n return jsonify({'status':'ok','count':len(recs), 'Obese':recs[0][0][0], 'Normal':recs[1][0][0], 'UW':recs[2][0][0]})\r\n\r\n@app.route('/focstat')\r\ndef showfocstat():\r\n\tres =spcall(\"countfoc\", (), True)\r\n\treturn jsonify({'status':'ok', 'data':res[0][0]})\r\n\t\r\n# new focal added\r\n@app.route ('/focal', methods=['POST'])\r\ndef addfocal():\r\n fid = request.form['id']\r\n lname = request.form['lname']\r\n fname = request.form['fname']\r\n pos = request.form['pos']\r\n res = spcall (\"newemployee\", (fid, lname, fname, pos), True)\r\n if 'Employee Exists' in res[0][0]:\r\n return jsonify ({'status': 'error', 'message': res[0][0]})\r\n\r\n return jsonify ({'status': 'ok', 'message': res[0][0]})\r\n\r\n#add new child\r\n\r\n@app.route('/child', methods=['POST'])\r\ndef addchild():\r\n childid = request.form['childid']\r\n childlname = request.form['childlname']\r\n childfname = request.form['childfname']\r\n childweight = request.form['weight']\r\n childheight =request.form['height']\r\n\r\n res =spcall(\"newchild2\", (childid, childfname, childlname, childweight, childheight), True)\r\n if 'Child exists' in res[0][0]:\r\n return jsonify ({'status':'error', 'message':res[0][0]})\r\n return jsonify({'status':'ok', 'message':res[0][0]})\r\n\r\n\r\n@app.after_request\r\ndef add_cors(resp):\r\n resp.headers['Access-Control-Allow-Origin'] = flask.request.headers.get('Origin', '*')\r\n #resp.headers['Access-Control-Allow-Origin'] = flask.request.headers.get ('Origin')\r\n resp.headers['Access-Control-Allow-Credentials'] = True\r\n resp.headers['Access-Control-Allow-Methods'] = 'POST, OPTIONS, GET, PUT, DELETE'\r\n resp.headers['Access-Control-Allow-Headers'] = flask.request.headers.get ('Access-Control-Request-Headers',\r\n 'Authorization')\r\n # set low for debugging\r\n\r\n if app.debug:\r\n resp.headers[\"Access-Control-Max-Age\"] = '1'\r\n return resp\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run (debug=True)\r\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210162060","text":"from django.urls import re_path, path\nfrom events.views import (\n EventView,\n EventsView,\n LocationView,\n LocationsView,\n EventInviteDownloadView,\n)\nfrom events.feeds import Calendar\n\nslug = '(?P[-\\\\w\\\\d]+)'\npk = '(?P[0-9]+)'\ndate = '(?P[0-9]{4}-?[0-9]{2}-?[0-9]{2})'\n\nurlpatterns = [\n # location\n re_path(r'^location/{slug}/{pk}/$'.format(\n slug=slug,\n pk=pk),\n LocationView.as_view(),\n name='location'),\n re_path(r'^locations/$', LocationsView.as_view(), name='locations'),\n\n # event\n re_path(r'^{date}/{slug}/{pk}/$'.format(\n date=date,\n slug=slug,\n pk=pk),\n EventView.as_view(),\n name='event'),\n re_path(r'^invite/{pk}/$'.format(\n pk=pk),\n EventInviteDownloadView.as_view(),\n name='event-invite'),\n re_path(r'^$', EventsView.as_view(), name='events'),\n\n # calendar\n path('feed/ical/', Calendar(), name='calendar'),\n]\n","sub_path":"events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296722355","text":"\"\"\"Tests of NeuronUnit test classes\"\"\"\nimport unittest\nimport os\nimport sys\nfrom sciunit.utils import NotebookTools\nimport dask\nfrom dask import bag\nimport dask.bag as db\nimport matplotlib as mpl\nmpl.use('Agg') # Avoid any problems with Macs or headless displays.\n\nfrom sciunit.utils import NotebookTools,import_all_modules\nfrom neuronunit import neuroelectro,bbp,aibs\n\nfrom base import *\n\n\ndef grid_points():\n from neuronunit.optimization.optimization_management import map_wrapper\n\n npoints = 2\n nparams = 10\n from neuronunit.optimization.model_parameters import model_params\n provided_keys = list(model_params.keys())\n USE_CACHED_GS = False\n from neuronunit.optimization import exhaustive_search\n ## exhaustive_search\n\n grid_points = exhaustive_search.create_grid(npoints = npoints,nparams = nparams)\n b0 = db.from_sequence(grid_points[0:2], npartitions=8)\n dtcpop = list(db.map(exhaustive_search.update_dtc_grid,b0).compute())\n assert dtcpop is not None\n return dtcpop\n\ndef test_compute_score(dtcpop):\n from neuronunit.optimization.optimization_management import map_wrapper\n from neuronunit.optimization import get_neab\n from neuronunit.optimization.optimization_management import dtc_to_rheo\n from neuronunit.optimization.optimization_management import nunit_evaluation\n from neuronunit.optimization.optimization_management import format_test\n dtclist = list(map(dtc_to_rheo,dtcpop))\n for d in dtclist:\n assert len(list(d.attrs.values())) > 0\n dtclist = map_wrapper(format_test, dtclist)\n dtclist = map_wrapper(nunit_evaluation, dtclist)\n return dtclist\n\nclass testOptimizationBackend(NotebookTools,unittest.TestCase):\n\n def setUp(self):\n self.predictions = None\n self.predictionp = None\n self.score_p = None\n self.score_s = None\n self.grid_points = grid_points()\n dtcpop = self.grid_points\n self.test_compute_score = test_compute_score\n self.dtcpop = test_compute_score(dtcpop)\n self.dtc = self.dtcpop[0]\n self.rheobase = self.dtc.rheobase\n from neuronunit.models.reduced import ReducedModel\n from neuronunit.optimization import get_neab\n self.standard_model = ReducedModel(get_neab.LEMS_MODEL_PATH, backend='NEURON')\n self.model = ReducedModel(get_neab.LEMS_MODEL_PATH, backend='NEURON')\n\n def test_rheobase_on_list(self):\n from neuronunit.optimization import exhaustive_search\n grid_points = self.grid_points\n second_point = grid_points[int(len(grid_points)/2)]\n three_points = [grid_points[0],second_point,grid_points[-1]]\n self.assertEqual(len(three_points),3)\n dtcpop = list(map(exhaustive_search.update_dtc_grid,three_points))\n for d in self.dtcpop:\n assert len(list(d.attrs.values())) > 0\n dtcpop = self.test_compute_score(self.dtcpop)\n self.dtcpop = dtcpop\n return dtcpop\n\n\n def test_map_wrapper(self):\n ''\n npoints = 2\n nparams = 3\n from neuronunit.optimization.model_parameters import model_params\n provided_keys = list(model_params.keys())\n USE_CACHED_GS = False\n from neuronunit.optimization import exhaustive_search\n from neuronunit.optimization.optimization_management import map_wrapper\n grid_points = exhaustive_search.create_grid(npoints = npoints,nparams = nparams)\n b0 = db.from_sequence(grid_points[0:2], npartitions=8)\n dtcpop = list(db.map(exhaustive_search.update_dtc_grid,b0).compute())\n assert dtcpop is not None\n dtcpop_compare = map_wrapper(exhaustive_search.update_dtc_grid,grid_points[0:2])\n for i,j in enumerate(dtcpop):\n for k,v in dtcpop_compare[i].attrs.items():\n print(k,v,i,j)\n self.assertEqual(j.attrs[k],v)\n return True\n\n def test_grid_dimensions(self):\n from neuronunit.optimization.model_parameters import model_params\n provided_keys = list(model_params.keys())\n USE_CACHED_GS = False\n from neuronunit.optimization import exhaustive_search\n from neuronunit.optimization.optimization_management import map_wrapper\n import dask.bag as db\n npoints = 2\n nparams = 3\n for i in range(1,10):\n for j in range(1,10):\n grid_points = exhaustive_search.create_grid(npoints = i, nparams = j)\n b0 = db.from_sequence(grid_points[0:2], npartitions=8)\n dtcpop = list(db.map(exhaustive_search.update_dtc_grid,b0).compute())\n self.assertEqual(i*j,len(dtcpop))\n self.assertNotEqual(dtcpop,type(None))\n\n dtcpop_compare = map_wrapper(exhaustive_search.update_dtc_grid,grid_points[0:2])\n self.assertNotEqual(dtcpop_compare,type(None))\n self.assertEqual(len(dtcpop_compare),len(dtcpop))\n for i,j in enumerate(dtcpop):\n for k,v in dtcpop_compare[i].attrs.items():\n print(k,v,i,j)\n self.assertEqual(j.attrs[k],v)\n\n return True\n\n\n\n def test_neuron_set_attrs(self):\n from neuronunit.models.reduced import ReducedModel\n from neuronunit.optimization import get_neab\n self.assertNotEqual(self.dtcpop,None)\n dtc = self.dtcpop[0]\n self.model = ReducedModel(get_neab.LEMS_MODEL_PATH, backend=('NEURON',{'DTC':dtc}))\n temp = [ v for v in self.model.attrs.values() ]\n assert len(temp) > 0\n self.assertGreater(len(temp),0)\n\n\n\n import numpy as np\n\n def test_set_model_attrs(self):\n from neuronunit.optimization.model_parameters import model_params\n provided_keys = list(model_params.keys())\n from bluepyopt.deapext.optimisations import DEAPOptimisation\n DO = DEAPOptimisation()\n for i in range(1,10):\n for j in range(1,10):\n provided_keys = list(model_params.keys())[j]\n DO.setnparams(nparams = i, provided_keys = provided_keys)\n\n\n\n def test_frp(self):\n from neuronunit.models.reduced import ReducedModel\n from neuronunit.optimization import get_neab\n model = ReducedModel(get_neab.LEMS_MODEL_PATH,name=str('vanilla'),backend=('NEURON'))\n attrs = {'a':0.02, 'b':0.2, 'c':-65+15*0.5, 'd':8-0.5**2 }\n from neuronunit.optimization.data_transport_container import DataTC\n dtc = DataTC()\n from neuronunit.tests import fi\n model.set_attrs(attrs)\n from neuronunit.optimization import get_neab\n rtp = get_neab.tests[0]\n rheobase = rtp.generate_prediction(model)\n self.assertTrue(float(rheobase['value']))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"neuronunit/unit_test/test_optimization.py","file_name":"test_optimization.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41507147","text":"import pandas as pd\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndata = pd.read_csv('./data/WorkListClose.csv', index_col=0)\nfeature_cols = ['account_count','wh_account_count','sales_account_count','sales_loc_account_count','period_count','refund_amount','back_log_days','close_days','approve_minute','approve_page_count']\nX = data[feature_cols]\ny = data.approve_flag\n\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X, y)\ny_pred = knn.predict(X)\n# Classification accuracy: Proportion of correct predictions\n# Common evaluation metric for classification problems\n# Also known as Training Accuracy: when you train and test the model on the same data\nprint(metrics.accuracy_score(y, y_pred))\n\n# Train/Test Split\n# Split the dataset into 2 pieces: a training set and a testing set.\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=4)\n\n# Model can be trained and tested on different data\n# Response values are known for the testing set, and thus predictions can be evaluated\n# Testing accuracy is a better estimate than training accuracy of out-of-sample performance\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X_train, y_train)\ny_pred = knn.predict(X_test)\nprint(metrics.accuracy_score(y_test, y_pred))\n\n# Regularization HyperParameter\nk_range = list(range(1, 26))\nscores = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n y_pred = knn.predict(X_test)\n scores.append(metrics.accuracy_score(y_test, y_pred))\n\nplt.xlabel('Value of K for KNN')\nplt.ylabel('Testing Accuracy')\nplt.plot(k_range, scores)\nplt.show()\n\nk_score_max = max(scores)\nprint(k_score_max)\n\nk_final = scores.index(k_score_max) + 1\nprint(k_final)\n\n# Training accuracy rises as model complexity increases\n# Testing accuracy penalizes models that are too complex or not complex enough\n# For KNN models, complexity is determined by the value of K (lower value = more complex)\n\n# Making predictions on out-of-sample data\nknn = KNeighborsClassifier(n_neighbors=k_final)\nknn.fit(X, y)\n# make a prediction for an out-of-sample observation\nprint(knn.predict([[144,12,12,120,204,988.19,732,732,10,5]]))\n\n# Downsides of Train/Test Split?\n# Provides a high-variance estimate of out-of-sample accuracy\n# K-fold cross-validation overcomes this limitation\n# But, train/test split is still useful because of its flexibility and speed\n","sub_path":"7.0_model_evaluation.py","file_name":"7.0_model_evaluation.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"58984494","text":"import json\nimport random\nfrom ws4py.client.threadedclient import WebSocketClient\n\n\nclass ExampleClient(WebSocketClient):\n\n def __init__(self, url):\n super().__init__(url)\n self.name = \"Agent1\"\n\n def opened(self):\n pass\n # self.send(json.dumps({\"name\": self.name}))w\n\n def closed(self, code, reason=None):\n print(\"Closed down\", code, reason)\n\n def received_message(self, message):\n content = json.loads(str(message))\n print(content)\n if \"action_list\" in content and content[\"action_list\"]:\n card_type = random.choice(list(content[\"action_list\"].keys()))\n print(\"Choose type \", card_type)\n rank = random.choice(list(content[\"action_list\"][card_type].keys()))\n action = random.choice(list(content[\"action_list\"][card_type][rank]))\n print(\"Choose action:\", action)\n self.send(json.dumps({\"action\": action, \"type\": card_type, \"rank\": rank}))\n\n\nif __name__ == '__main__':\n try:\n ws = ExampleClient('ws://127.0.0.1:23456/game/client4')\n ws.connect()\n ws.run_forever()\n except KeyboardInterrupt:\n ws.close()\n","sub_path":"gd/example-client/client4.py","file_name":"client4.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"462998728","text":"import os.path\nfrom os import path\n\n# Prints a bar\ndef print_split():\n print(\"\\n--------------------------------------------------\\n\")\n\n\ndef input_graph():\n print(\"---- NOTE ----\\nThe file must be in the following format:\\nX\\t\\t\\tX - Number of nodes\\nN0 N1\\nN1 N3\\nN0 N3\\n....\\n....\")\n print_split()\n file_name = input(\"Enter the file name: \")\n # Creating absolute path to the file in folder examples\n file_dir = os.path.dirname(__file__)\n rel_path = \"examples/\" + file_name\n abs_file_path = os.path.join(file_dir, rel_path)\n # Checking if the file exists\n if os.path.exists(abs_file_path) == False:\n # File not found, throw error\n print(\"The file doesn't exist!\")\n raise Exception(\"The file didn't load because it doesn't exist\")\n # File found, opening\n f = open(abs_file_path, 'r')\n\n # Read if the graph is directed or undirected\n no_nodes = next(f).split()\n no_nodes = int(no_nodes[0])\n\n # Read the graph\n graph = [[int(x) for x in line.split()] for line in f] \n f.close() # File not needed, all is in tmp_array\n\n \n return graph, no_nodes\n\nclass Graph:\n\tdef __init__(self, nodes): \n\t\tself.graph = []\n\t\tself.nodes = nodes\n\t\tself.total = 0\n\t\t\n\tdef add_edge(self, p, c, weight):\n\t\tself.graph.append([p, c, weight])\n \n\t# BFS iterative implementation using queue\n\tdef find(self, parent, i): \n\t\tif parent[i] == i: \n\t\t\treturn i \n\t\treturn self.find(parent, parent[i]) \n \n # A function that does union of two sets of x and y \n\tdef union(self, parent, rank, x, y): \n\t\txroot = self.find(parent, x) \n\t\tyroot = self.find(parent, y) \n \n # Attach smaller rank tree under root of \n # high rank tree (Union by Rank) \n\t\tif rank[xroot] < rank[yroot]: \n\t\t\tparent[xroot] = yroot \n\t\telif rank[xroot] > rank[yroot]: \n\t\t\tparent[yroot] = xroot \n \n # If ranks are the same, then make it a root \n # and increment the rank by one \n\t\telse: \n\t\t\tparent[yroot] = xroot \n\t\t\trank[xroot] += 1\n \n # The main function to construct MST using Kruskal's algorithm \n\tdef kruskal(self): \n\t\tresult = [] \n\t\ti = 0 # Sorted graph index\n\t\te = 0 # result[] index\n\n\t\t# Step 1: Sort the graph weights in non-decreasing order. \n\t\tself.graph = sorted(self.graph, key = lambda item:item[2]) \n\n\t\tparent = []\n\t\trank = [] \n\n # Create node subsets with single elements \n\t\tfor node in range(self.nodes): \n\t\t\tparent.append(node) \n\t\t\trank.append(0) \n\n # Number of graph to be taken is equal to V-1 \n\t\twhile e < self.nodes -1 : \n\n # Step 2: Pick the smallest edge and increment the index for next iteration \n\t\t\tp, c, weight = self.graph[i] \n\t\t\ti = i + 1\n\t\t\tx = self.find(parent, p) \n\t\t\ty = self.find(parent, c) \n\n # If inclusion of this edge doesn't create the cycle, include it in the result\n # and increment the index of result for next edge \n\t\t\tif x != y: \n\t\t\t\te = e + 1 \n\t\t\t\tresult.append([p, c, weight]) \n\t\t\t\tself.union(parent, rank, x, y)\n\t\t\t\tself.total = self.total + weight\n\t\t\t\t\n\n # Else discard the edge \n\n\t\tprint (\"Minimal spanning tree from the graph: \")\n\t\tfor p, c, weight in result: \n\t\t\tprint (str(p) + \" -- \" + str(c) + \", weight(\" + str(weight) + \")\")\n\ndef main():\n\tgraph, no_nodes = input_graph()\n\tg = Graph(no_nodes)\n\tfor i in range(len(graph)):\n g.add_edge(graph[i][0], graph[i][1], graph[i][2])\n\tg.kruskal() \n\tprint(\"Weight: \", g.total)\n\tprint_split()\n\n\nif __name__ == '__main__':\n main()","sub_path":"12. Kruskal’s Minimum Spanning Tree Algorithm/Kruskal_MST_algorithm.py","file_name":"Kruskal_MST_algorithm.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"434414552","text":"\"\"\"\n386. Lexicographical Numbers\nGiven an integer n, return 1 - n in lexicographical order.\n\nFor example, given 13, return: [1,10,11,12,13,2,3,4,5,6,7,8,9].\n\nPlease optimize your algorithm to use less time and space. The input size may be as large as 5,000,000.\n\"\"\"\n\n# use idea of 10-branch tree\n# Runtime: 116 ms, faster than 51.95% of Python3 online submissions for Lexicographical Numbers.\n# Memory Usage: 18.5 MB, less than 100.00% of Python3 online submissions for Lexicographical Numbers.\nclass Solution:\n def lexicalOrder(self, n: int) -> List[int]:\n res = [0 for _ in range(n)]\n curr = 1\n for i in range(n):\n res[i] = int(curr)\n if curr * 10 <= n:\n curr *= 10\n else:\n if curr >= n:\n curr = curr // 10\n curr += 1\n \n while curr % 10 == 0:\n curr /= 10\n return res\n ","sub_path":"Widen/LC386_Lexicographical_Numbers.py","file_name":"LC386_Lexicographical_Numbers.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"47497207","text":"import os\nimport boto3\ns3 = boto3.client('s3')\nBUCKET_NAME='sss-compressr'\n\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\nfrom werkzeug.utils import secure_filename\n\n@app.route('/')\ndef home():\n return render_template(\"index2.html\")\n\n@app.route('/upload',methods=['post'])\ndef upload():\n if request.method == 'POST':\n img = request.files['file']\n if img:\n filename = secure_filename(img.filename)\n img.save(filename)\n s3.upload_file(\n Bucket = BUCKET_NAME,\n Filename=filename,\n Key = filename\n )\n msg = \"Upload Done ! \"\n os.system(f\"rm {filename}\")\n return render_template(\"index2.html\",msg =msg)\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"code/s3_upload/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"451486160","text":"# coding: utf-8\n\n\"\"\"\nFMF Tests Discovery\n\nMinimal config example (all available tests from the current\nrepository used by default)::\n\n discover:\n how: fmf\n\nFull config example::\n\n discover:\n how: fmf\n repository: https://github.com/psss/tmt\n revision: master\n destination: tmt\n filter: 'tier: 1'\n\"\"\"\n\nimport os\nimport re\nimport fmf\nimport tmt\nimport shutil\nimport tmt.steps.discover\n\nfrom click import echo\nfrom fmf.utils import listed\n\nclass DiscoverFmf(tmt.steps.discover.DiscoverPlugin):\n \"\"\" Discover available tests from FMF metadata \"\"\"\n\n def __init__(self, data, step):\n \"\"\" Check supported attributes \"\"\"\n super(DiscoverFmf, self).__init__(\n data=data, step=step, name=data['name'])\n self.tree = None\n # Convert data into attributes for easy handling\n self.repository = data.get('repository')\n self.revision = data.get('revision')\n filtr = data.get('filter', [])\n self.filters = filtr if isinstance(filtr, list) else [filtr]\n self._tests = []\n\n def go(self):\n \"\"\" Discover available tests \"\"\"\n super(DiscoverFmf, self).go()\n testdir = os.path.join(self.workdir, 'tests')\n # Clone provided git repository\n if self.repository:\n self.info('repository', self.repository, 'green')\n self.debug(f\"Clone '{self.repository}' to '{testdir}'.\")\n self.run(f'git clone {self.repository} {testdir}')\n # Copy current directory to workdir\n else:\n directory = self.step.plan.run.tree.root\n self.info('directory', directory, 'green')\n self.debug(\"Copy '{}' to '{}'.\".format(directory, testdir))\n shutil.copytree(directory, testdir)\n # Checkout revision if requested\n if self.revision:\n self.info('revision', self.revision, 'green')\n self.debug(f\"Checkout revision '{self.revision}'.\")\n self.run(f\"git checkout -f {self.revision}\", cwd=testdir)\n # Show filters if provided\n if self.filters:\n for filter_ in self.filters:\n self.info('filter', filter_, 'green')\n # Initialize the metadata tree\n self.debug(f\"Check metadata tree in '{testdir}'.\")\n # Nothing more to do here when in dry mode\n if self.opt('dry'):\n return []\n tests = tmt.Tree(testdir).tests(filters=self.filters)\n # Modify test names and paths to make them unique\n for test in tests:\n test.name = f\"/{self.name}{test.name}\"\n test.path = f\"/{self.name}/tests{test.path}\"\n # Summary of selected tests, test list in verbose mode\n self.info('tests', listed(len(tests), 'test') + ' selected', 'green')\n for test in tests:\n self.verbose(test.name, color='red', shift=1)\n self._tests = tests\n\n def tests(self):\n \"\"\" Return all discovered tests \"\"\"\n return self._tests\n\n def dump(self):\n \"\"\" Dump current step data \"\"\"\n self.data['filter'] = self.filters\n return self.data\n","sub_path":"tmt/steps/discover/fmf.py","file_name":"fmf.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375868391","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nimport bs4\nimport json\nimport os\nimport re\nimport plistlib\nimport requests\nfrom io import StringIO\nimport tempfile\nimport zipfile\n\nSCHEMA_VERSION = 2\nCDN_URL = 'https://edge.ldscdn.org/mobile/gospelstudy/production/v1'\n\nITEM_URIS = [\n '/scriptures/ot',\n '/scriptures/nt',\n '/scriptures/bofm',\n '/scriptures/dc-testament',\n '/scriptures/pgp',\n]\n\ntestaments_data = []\n\nprint('Getting the current schema {} catalog version...'.format(SCHEMA_VERSION))\nindex_url = '{}/schemas/{}/index.json'.format(CDN_URL, SCHEMA_VERSION)\nr = requests.get(index_url)\nif r.status_code != 200:\n print(r.text)\nelse:\n index = r.json()\n catalog_version = index['catalogVersion']\n\n print('Getting catalog {}...'.format(catalog_version))\n catalog_url = '{}/catalog/{}.xml'.format(CDN_URL, SCHEMA_VERSION)\n r = requests.get(catalog_url)\n if r.status_code != 200:\n print(r.text)\n else:\n catalog = plistlib.readPlistFromString(r.content)\n \n for item_uri in ITEM_URIS:\n item = [item for item in catalog['items'] if item['uri'] == item_uri and item['languageCode'] == '000'][0]\n \n item_id = item['itemID']\n item_version = item['version']\n \n print('Getting item {}...'.format(item_uri))\n item_zip_url = '{}/content/{}/{}.zip'.format(CDN_URL, item_id, item_version)\n r = requests.get(item_zip_url)\n if r.status_code != 200:\n print(r.text)\n else:\n item_package_path = tempfile.mkdtemp()\n with zipfile.ZipFile(StringIO(r.content), 'r') as zip_file:\n zip_file.extractall(item_package_path)\n \n item_xml_path = '%s/%s/item.xml' % (item_package_path, item_id,)\n item_doc = bs4.BeautifulSoup(open(item_xml_path), 'lxml')\n \n for testament in item_doc.find_all('div', type='testament', uri=True):\n testament_name = os.path.basename(testament['uri'])\n \n books_data = []\n testament_data = dict(\n name=testament_name,\n books=books_data,\n )\n testaments_data.append(testament_data)\n \n for book in testament.find_all('div', type='book', uri=True):\n book_name = os.path.basename(book['uri'])\n \n chapters_data = []\n book_data = dict(\n name=book_name,\n chapters=chapters_data,\n )\n books_data.append(book_data)\n \n for chapter in book.find_all('div', type='chapter', uri=True):\n if re.match(r'^/scriptures/[^/]+/[^/]+/\\d+$', chapter['uri']):\n verses = len(chapter.find_all('p', class_='verse', uri=True))\n \n chapter_data = dict(\n verses=verses,\n )\n chapters_data.append(chapter_data)\n \n \n \nstructure_data = dict(testaments=testaments_data)\nprint(json.dumps(structure_data, sort_keys=True, indent=4, separators=(',', ': ')))\n","sub_path":"bin/download_structure.py","file_name":"download_structure.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"67511865","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib\nmatplotlib.use( \"Agg\" )\nimport pylab\nimport analyzeData as aD\nimport numpy as np\nimport sys\n\n\ndef plot( data, rangoX, rangoY, diode_num, dirname = \".\" ):\n# interp = [ 'none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos' ]\n interp = [ 'none', 'sinc' ]\n aD.plot_parameters()\n for tp in interp:\n fig = pylab.figure( 1, ( 8, 7 ) )\n fig.add_subplot( 111 )\n img = pylab.imshow( data, interpolation = tp, extent = [ min( rangoX ), max( rangoX ), min( rangoY ), max( rangoY ) ], origin = \"lower\" )\n pylab.colorbar( img, fraction = 0.046, pad = 0.04, format='%.1e' )\n pylab.xlabel( \"Position [um]\" )\n pylab.ylabel( \"Position [um]\" )\n pylab.tight_layout()\n pylab.savefig( dirname + \"/scan2D_\" + diode_num + \"_%s.png\" % tp)\n pylab.close()\n\ndef readResults():\n pairs = []\n x = []\n y = []\n data_files = []\n fd = open( \"scan2D.txt\", \"r\" )\n lines = fd.readlines()\n for i in range( 1, len( lines ) ):\n #if i > 200: continue\n line = lines[ i ].replace( \"\\n\", \"\" ).split( \"\\t\" )\n data_files.append( line[ 0 ] )\n x.append( int( line[ 1 ] ) )\n y.append( int( line[ 2 ] ) )\n pairs.append( float( line[ 3 ] ) )\n fd.close()\n return data_files, x, y, pairs\n\ndef main(argv):\n data = []\n xx = []\n yy = []\n data_files, x, y, pairs = readResults()\n for datax, datay, pair in zip( x, y, pairs ):\n if datax not in xx:\n data.append( [] )\n xx.append( datax )\n if datay not in yy:\n yy.append( datay )\n data[ -1 ].append( pair )\n plot( np.array( data ).T, xx, yy, argv[0] )\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"plotScan2DIsaac.py","file_name":"plotScan2DIsaac.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"56733453","text":"#!/usr/bin/python3\n\"\"\"\nUsing what you did in the task #0, extend your Python script to export\ndata in the JSON format.\n\"\"\"\nimport json\nimport requests\nfrom sys import argv\n\n\nif __name__ == \"__main__\":\n\n file_name = 'todo_all_employees.json'\n\n USERS = requests.get('https://jsonplaceholder.typicode.com/users').json()\n TASK = requests.get('https://jsonplaceholder.typicode.com/todos').json()\n data = {}\n\n for user in USERS:\n task = []\n data[user.get('id')] = []\n for todo in TASK:\n if todo.get('userId') == user.get('id'):\n data[user.get('id')].append({\"username\": user.get(\"username\"),\n \"task\": todo.get(\"title\"),\n \"completed\": todo.get(\"completed\")\n })\n\n with open(file_name, mode='w') as json_file:\n json.dump(data, json_file)\n","sub_path":"0x15-api/3-dictionary_of_list_of_dictionaries.py","file_name":"3-dictionary_of_list_of_dictionaries.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"365769283","text":"prev_pos=None\ndire = [0,0] \ndef get_dest(paddle_frect, other_paddle_frect, ball_frect, table_size):\n ''' return x and y destination of ball in flight''' \n global dire,prev_pos\n dire[0]=ball_frect.pos[0]-prev_pos[0]\n dire[1]=ball_frect.pos[1]-prev_pos[1]\n prev_pos[0]=ball_frect.pos[0]\n prev_pos[1]=ball_frect.pos[1]\n table_size=(table_size[0],table_size[1]-ball_frect.size[1])\n\n if dire[0]>0:\n x_dest = \"right\" \n y_travel = (max(paddle_frect.pos[0],other_paddle_frect.pos[0])-ball_frect.size[0]-ball_frect.pos[0])/dire[0]*dire[1]\n\n elif dire[0]<0:\n x_dest = \"left\"\n y_travel = (ball_frect.pos[0]-min(paddle_frect.pos[0],other_paddle_frect.pos[0])-paddle_frect.size[0])/(-dire[0])*dire[1]\n else:\n y_dest = ball_frect.pos[1] \n x_dest = None\n y_travel = 0\n y_dest = ball_frect.pos[1] +y_travel\n square_num =y_dest//table_size[1]\n if square_num==0:\n pass\n elif(square_num%2)==0:\n y_dest += (-square_num*table_size[1])\n elif(square_num%2)==1:\n y_dest += (-square_num*table_size[1])\n y_dest = table_size[1]-y_dest \n \n y_dest +=ball_frect.size[1]/2 \n return x_dest, y_dest \n\ndef pong_ai(paddle_frect, other_paddle_frect, ball_frect, table_size):\n global prev_pos\n if prev_pos == None:\n prev_pos=(ball_frect.pos[0],ball_frect.pos[1])\n prev_pos=[ball_frect.pos[0],ball_frect.pos[1]] \n if paddle_frect.pos[1]+paddle_frect.size[1]/2 < ball_frect.pos[1]+ball_frect.size[1]/2:\n return \"down\"\n else:\n return \"up\"\n if paddle_frect.pos[0] > other_paddle_frect.pos[0]:\n side = \"right\"\n else:\n side = \"left\" \n a, b = get_dest(paddle_frect, other_paddle_frect, ball_frect, table_size)\n \n if a == side:\n if b > (paddle_frect.pos[1]+paddle_frect.size[1]/2):\n return \"down\"\n else:\n return \"up\" \n else:\n if (paddle_frect.pos[1]+paddle_frect.size[1]/2)>table_size[1]/2:\n return \"up\"\n else:\n return \"down\"","sub_path":"GoldV.py","file_name":"GoldV.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558861534","text":"from dotenv import load_dotenv\nload_dotenv()\n\n\nimport asyncio\nimport logging\nimport os\nfrom logging.handlers import TimedRotatingFileHandler\n\nfrom discord.ext.commands import Bot\n\nimport commands\nimport clients.fortnite_api as fortnite_api\nimport clients.fortnite_tracker as fortnite_tracker\nimport clients.stats as stats\nimport clients.interactions as interactions\n\n\nDISCORD_BOT_TOKEN = os.getenv(\"DISCORD_BOT_TOKEN\")\nFORTNITE_DISCORD_ROLE = os.getenv(\"FORTNITE_DISCORD_ROLE\")\nFORTNITE_DISCORD_VOICE_CHANNEL_NAME = os.getenv(\"FORTNITE_DISCORD_VOICE_CHANNEL_NAME\")\n\nLOGGER_LEVEL = os.getenv(\"LOGGER_LEVEL\")\nLOG_FILE_PATH = os.getenv(\"LOG_FILE_PATH\")\n\nSQUAD_PLAYERS_LIST = os.getenv(\"SQUAD_PLAYERS_LIST\").split(\",\")\n\nbot = Bot(command_prefix=\"!\")\n\n\n@bot.event\nasync def on_ready():\n \"\"\" Event handler to setup logger on load \"\"\"\n logger = get_logger_with_context(identifier=\"Main\")\n logger.info(\"Started up %s\", bot.user.name)\n logger.info(\"Bot running on servers: %s\",\n \", \".join([guild.name for guild in bot.guilds]))\n\n\n@bot.event\nasync def on_guild_join(guild):\n \"\"\" Event handler to log when the bot joins a new server \"\"\"\n logger = get_logger_with_context(identifier=\"Main\")\n logger.info(\"Bot added to new server! Server name: %s\", guild.name)\n\n\n@bot.event\nasync def on_voice_state_update(member, _, after):\n \"\"\" Event handler to track squad stats on voice channel join \"\"\"\n if not in_fortnite_role(member) or \\\n not has_joined_fortnite_voice_channel(after) or \\\n not is_first_joiner_of_channel(after):\n return\n\n ctx, silent = await interactions.send_track_question_and_wait(\n bot,\n member.display_name)\n\n await track(ctx, silent)\n\ndef in_fortnite_role(member):\n \"\"\" Return True if the member is part of the \"fortnite\"\n Discord role, otherwise False\n \"\"\"\n return any(x.name == FORTNITE_DISCORD_ROLE for x in member.roles)\n\n\ndef has_joined_fortnite_voice_channel(voice_state):\n \"\"\" Return True if the channel joined is the Fortnite\n voice chat\n \"\"\"\n return voice_state.channel is not None \\\n and voice_state.channel.name == FORTNITE_DISCORD_VOICE_CHANNEL_NAME\n\n\ndef is_first_joiner_of_channel(voice_state):\n \"\"\" Return True if the member is the only person in the\n voice channel, otherwise False\n \"\"\"\n return len(voice_state.channel.members) > 0\n\n\n@bot.command(name=commands.HELP_COMMAND, help=commands.HELP_DESCRIPTION,\n aliases=commands.HELP_ALIASES)\nasync def help(ctx):\n \"\"\" Lists available commands \"\"\"\n interactions.send_commands_list(ctx)\n\n\n@bot.command(name=commands.PLAYER_SEARCH_COMMAND, help=commands.PLAYER_SEARCH_DESCRIPTION,\n aliases=commands.PLAYER_SEARCH_ALIASES)\nasync def player_search(ctx, *player_name, silent=False):\n \"\"\" Searches for a player's stats, output to Discord, and log in database \"\"\"\n player_name = \" \".join(player_name)\n\n logger = get_logger_with_context(ctx)\n logger.info(\"Looking up stats for '%s' \", player_name)\n\n if not player_name:\n await ctx.send(\"Please specify an Epic username after the command, \"\n \"ex: `!hunted LigmaBalls12`\")\n return\n\n try:\n await fortnite_tracker.get_player_stats(ctx, player_name, silent)\n except Exception as e:\n logger.warning(e, exc_info=should_log_traceback(e))\n\n # Fortnite API stats are unnecessary in silent mode\n if silent:\n return\n\n logger.warning(f\"Falling back to Fortnite API for '{player_name}'..\")\n await fortnite_api.get_player_stats(ctx, player_name)\n\n\n@bot.command(name=commands.TRACK_COMMAND, help=commands.TRACK_DESCRIPTION,\n aliases=commands.TRACK_ALIASES)\nasync def track(ctx, silent=False):\n \"\"\" Tracks and logs the current stats of the squad players \"\"\"\n tasks = [player_search(ctx, username, silent=silent) for username in SQUAD_PLAYERS_LIST]\n await asyncio.gather(*tasks)\n\n\n@bot.command(name=commands.STATS_COMMAND, help=\"returns the stats based on parameters provided\")\nasync def stats_operations(ctx, *params):\n \"\"\" Outputs stats based on the parameters provided.\n Valid parameters are:\n 1. today\n - Stats diff of the squad players today\n 2. played, opponents, noobs, enemy\n - Stats of the players faced today\n \"\"\"\n logger = get_logger_with_context(ctx)\n params = list(params)\n\n command = params.pop(0) if params else None\n if not command:\n message = \"Please specify a command, ex: `!stats diff` or `!stats played`\"\n logger.warning(message)\n await ctx.send(message)\n return\n\n usernames = params or SQUAD_PLAYERS_LIST\n\n if command in commands.STATS_DIFF_COMMANDS:\n logger.info(f\"Querying stats diff today for {', '.join(usernames)}\")\n await stats_diff_today(ctx, usernames)\n elif command in commands.STATS_OPPONENTS_COMMANDS:\n logger.info(\"Querying opponent stats today\")\n await opponent_stats_today(ctx)\n else:\n await ctx.send(f\"Command provided '{command}' is not valid\")\n\n\nasync def stats_diff_today(ctx, usernames):\n \"\"\" Outputs the stats diff of the squad players today.\n Perform a silent update of the player stats in the database first\n \"\"\"\n update_tasks = []\n calculate_tasks = []\n\n for username in usernames:\n update_tasks.append(player_search(ctx, username, silent=True))\n calculate_tasks.append(stats.get_stats_diff_today(ctx, username))\n\n await asyncio.gather(*update_tasks)\n await asyncio.gather(*calculate_tasks)\n\n\nasync def opponent_stats_today(ctx):\n \"\"\" Outputs the stats of the players faced today \"\"\"\n # TODO: Wrap this up\n res = await stats.get_opponent_stats_today()\n print(res)\n\n\ndef should_log_traceback(e):\n \"\"\" Returns True if a traceback should be logged,\n otherwise False\n \"\"\"\n # TODO: Change to subclass and check instance variable flag\n return e.__class__.__name__ not in (\"UserDoesNotExist\", \"NoSeasonDataError\")\n\n\ndef configure_logger():\n \"\"\" Abstract logger setup \"\"\"\n logging.root.setLevel(LOGGER_LEVEL)\n\n file_handler = TimedRotatingFileHandler(LOG_FILE_PATH, when=\"W0\", interval=7, backupCount=4)\n stream_handler = logging.StreamHandler()\n\n formatter = logging.Formatter(\"[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] [%(identifier)s] %(message)s\")\n file_handler.setFormatter(formatter)\n stream_handler.setFormatter(formatter)\n\n logger = logging.getLogger(__name__)\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n\n return logger\n\n\ndef get_logger_with_context(ctx=None, identifier=None):\n \"\"\" Returns a LoggerAdapter with context \"\"\"\n if not identifier:\n server = ctx.guild.name\n author = ctx.author\n identifier = server + \":\" + str(author)\n\n extra = {\n \"identifier\" : identifier\n }\n return logging.LoggerAdapter(logging.getLogger(__name__), extra)\n\n\nlogger = configure_logger()\nbot.run(DISCORD_BOT_TOKEN)\n","sub_path":"fortnite_bot.py","file_name":"fortnite_bot.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154352532","text":"from utility.common import CommonUtility as cu\nimport random\n\nclass SystemUtil(cu):\n\n @classmethod\n def get_part_list(cls, func, count):\n\n def wrapper():\n result = random.choices(func(), k=count)\n return result\n return wrapper\n\n @classmethod\n def get_employee_name_from_db(cls, count):\n try:\n result = cls.db_fetch_all('select employee_name from employee;')\n li = []\n for i in result:\n li.append(i[0])\n less_li = random.choices(li, k=count)\n return less_li\n except:\n return ['赵本山']\n\n @classmethod\n def get_user_name_from_db(cls, count):\n try:\n result = cls.db_fetch_all('select name from system_user;')\n li = []\n for i in result:\n li.append(i[0])\n less_li = random.choices(li, k=count)\n return less_li\n except:\n return ['赵本山']\n\n @classmethod\n def get_employee_and_user_from_db(cls, count):\n try:\n sql = 'select employee_name,`name` from employee inner join system_user ' \\\n 'on employee.employee_id=system_user.employee_id;'\n result = cls.db_fetch_all(sql)\n less_li = random.choices(result, k=count)\n return less_li\n except:\n return tuple(('王丹', '齐杜娟'))\n\n @classmethod\n def get_random_letters_list(cls, count):\n from miles.tools import RandomTools\n li = []\n for i in range(count):\n li.append(RandomTools.create_random_letters(random.randint(3, 12)))\n return li\n\n @classmethod\n def get_all_roles_list(cls):\n try:\n result = cls.db_fetch_all('select name from system_role;')\n li = []\n for i in result:\n li.append(i[0])\n return li\n except:\n return ['超级管理员']\n\n @classmethod\n def get_all_dictionary_types_list(cls):\n try:\n result = cls.db_fetch_all('select dict_typename from dictionary_type;')\n li = []\n for i in result:\n li.append(i[0])\n return li\n except:\n return ['资源状态']\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n print(type(SystemUtil.get_db_table_count('employee')))","sub_path":"utility/systemUtil.py","file_name":"systemUtil.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"165695243","text":"class Node:\n def __init__(self, value, next_node=None):\n self.value = value\n self.next_node = next_node\n\n def get_value(self):\n return self.value\n\n def get_next(self):\n return self.next_node\n\n def set_next(self, new_next):\n self.next_node = new_next\n\n\nclass LinkedList:\n def __init__(self, head=None, tail=None):\n self.head = head\n self.tail = tail\n self.size = 0\n\n def add_to_head(self, value):\n new_node = Node(value)\n if self.head is None and self.tail is None:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.set_next(self.head)\n self.head = new_node\n self.size += 1\n\n def remove_head(self):\n if self.head is None and self.tail is None:\n return None\n\n removed_node = self.head\n if self.head.get_next() == None:\n self.head = None\n self.tail = None\n else:\n self.head = removed_node.get_next()\n removed_node.set_next(None)\n self.size -= 1\n return removed_node.get_value()\n\n def add_to_tail(self, value):\n new_node = Node(value)\n if self.head is None and self.tail is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.set_next(new_node)\n self.tail = new_node\n self.size += 1\n\n def remove_tail(self):\n if self.head is None and self.tail is None:\n return None\n\n elif self.size == 1:\n removed_node = self.tail.get_value()\n self.head = None\n self.tail = None\n self.size = 0\n return removed_node\n\n else:\n previous_node = None\n current_node = self.head\n while current_node:\n if current_node.value == self.tail.value:\n self.tail = previous_node\n previous_node.set_next(None)\n self.size -= 1\n return current_node.value\n else:\n previous_node = current_node\n current_node = current_node.get_next()\n\n def contains(self, value):\n current_node = self.head\n\n while current_node:\n if current_node.get_value() == value:\n return True\n else:\n current_node = current_node.get_next()\n return False\n\n def get_max(self):\n if self.head is None:\n return None\n\n max_value = 0\n current_node = self.head\n\n while current_node:\n if current_node.get_value() > max_value:\n max_value = current_node.get_value()\n current_node = current_node.get_next()\n\n return max_value\n\n def get_middle(self):\n if self.head is None:\n return None\n\n mid = self.head\n end = self.head\n\n while end is not None and end.get_next() is not None:\n mid = mid.get_next()\n end = end.get_next().get_next()\n return mid.get_value()\n\n def add_to_middle(self, value):\n new_node = Node(value)\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n mid = self.head\n end = self.head\n\n while end is not None and end.get_next() is not None:\n mid = mid.get_next()\n end = end.get_next().get_next()\n new_node.set_next(mid.get_next())\n mid.set_next(new_node)\n self.size += 1\n\n def print(self):\n current = self.head\n while current:\n print(current.value)\n current = current.next_node\n\n def reverse(self):\n current = self.head\n self.head = self.tail\n self.tail = current\n\n prev = None\n next = None\n while current:\n next = current.get_next()\n current.set_next(prev)\n prev = current\n current = next\n\n\nl = LinkedList()\nl.add_to_head(1)\nl.add_to_tail(2)\nl.add_to_tail(3)\nl.add_to_tail(4)\nl.add_to_tail(5)\nl.add_to_middle(3)\nl.reverse()\nl.print()\n","sub_path":"singly_linked_list/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548367976","text":"from django import forms\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom .models import Application, Company, WebDomain, Recruiter, Email, Task, EmailResponse, Offer\n\n__all__ = [\"TaskForm\", \"ApplicationForm\", \"CompanyForm\", \"RecruiterForm\", \"EmailResponseForm\", \"OfferForm\"]\n\n\nclass BaseForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(BaseForm, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs[\"class\"] = \"form-control\"\n\n\nclass TaskForm(BaseForm):\n class Meta:\n model = Task\n fields = \"__all__\"\n exclude = [\"creation_date\", \"completed\"]\n\n\nclass EmailResponseForm(BaseForm):\n class Meta:\n model = EmailResponse\n fields = \"__all__\"\n\n\nclass OfferForm(BaseForm):\n class Meta:\n model = Offer\n fields = \"__all__\"\n\n\nclass ApplicationForm(BaseForm):\n def clean_position_title(self):\n position_title = self.cleaned_data[\"position_title\"]\n return position_title.title()\n\n class Meta:\n model = Application\n fields = \"__all__\"\n\n\nclass CompanyForm(BaseForm):\n associated_web_domains = forms.CharField(max_length=200)\n\n def clean_associated_web_domains(self):\n domain = self.cleaned_data[\"associated_web_domains\"]\n query = WebDomain.objects.filter(name=domain)\n if query.exists():\n obj = query.get()\n else:\n obj = WebDomain(name=domain)\n obj.save()\n\n return [obj.pk]\n\n class Meta:\n model = Company\n fields = \"__all__\"\n\n\nclass RecruiterForm(BaseForm):\n email = forms.EmailField()\n\n def clean_email(self):\n email = self.cleaned_data['email']\n company = self.cleaned_data['company']\n\n local_part, domain = email.split(\"@\")\n\n query = Email.objects.filter(email_address=email)\n if query.exists():\n obj = query.get()\n else:\n if company.associated_web_domains.filter(name=domain).exists():\n d = WebDomain.objects.filter(name=domain).get()\n else:\n raise ValueError()\n obj = Email(email_address=email, domain=d)\n obj.save()\n\n return obj\n\n class Meta:\n model = Recruiter\n fields = ['first_name', 'last_name', 'company', 'email']\n exclude = (\"active_recruiter\",)\n","sub_path":"appman/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"291073134","text":"l, res, resf = [], \"[ \", \"[ \"\nn = int(input())\nfor i in range(n):\n item = input()\n l.append(int(item))\n res+= item+\" \"\nres+=\"]\"\npos = int(input())\nval = int(input())\nprint(res)\nif(pos>n):\n print(\"A posicao \"+str(pos)+\" estah fora do intervalo\")\nelse:\n l = l[:(pos)] + [val] + l[pos:]\n for j in range(n+1):\n resf += str(l[j])+\" \"\n resf+= \"]\"\n print(resf)","sub_path":"TheHuxley/Roteiro 3 - Listas/INSERT.py","file_name":"INSERT.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122205445","text":"# coding=utf-8\nfrom selenium.webdriver.common.by import By\nfrom pages.base import BasePage\nfrom utils.utils import *\nfrom random import randint\nfrom time import sleep\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import *\n\nclass RelatInPage(BasePage):\n _title = \"RelatIn Page\"\n\n _add_client_button = (By.XPATH, \"//div[2]/div/div/div/div/button\")\n _client_first_name_field = (By.NAME, \"firstName\")\n _client_first_name_value = get_random_string(6)\n _client_last_name_field = (By.NAME, \"lastName\")\n _client_last_name_value = get_random_string(8)\n _client_phone_number_field = (By.NAME, \"phoneNumber\")\n _client_phone_number_value = get_random_integer(7)\n _client_email_field = (By.NAME, \"primaryEmail\")\n _client_email_value = get_random_string(7)+\"@\"+get_random_string(5)+\".pl\"\n _client_description_field = (By.XPATH, \"//textarea\")\n _client_description_value = get_random_string(4)+\" \"+get_random_string(5)+\" \"+get_random_string(7)\n _client_category_field = (By.NAME, \"category\")\n _client_category_value = get_random_string(6)\n _save_client_button = (By.XPATH, \"//div[2]/button\")\n _close_new_client_popup_button = (By.CSS_SELECTOR, \"button.close\")\n _added_client_first_name_field = (By.XPATH, \"//td/a\")\n _added_client_last_name_field = (By.XPATH, \"//td[2]/a\")\n _added_client_phone_number_field = (By.XPATH, \"//td[3]/a\")\n _added_client_email_field = (By.XPATH, \"//div[2]/div/div/table/tbody/tr/td[4]\")\n _added_client_category_field = (By.XPATH, \"//div[2]/div/div/table/tbody/tr/td[5]/span\")\n _remove_first_client_button = (By.XPATH, '//td[10]/button')\n\n def __init__(self, driver):\n super(RelatInPage, self).__init__(driver, self._title)\n\n def add_client(self):\n self.click(self._add_client_button, \"The add client button cannot be clicked or wasn't found on the relatIn page\")\n WebDriverWait(self.get_driver(), 15).until(EC.visibility_of_element_located(self._client_first_name_field), \"The client first name field on add client popup page didn't show, probably the popup didn't open\")\n self.click(self._client_first_name_field, \"While adding client - first name field couldn't be clicked or wasn't found on the relatIn page\")\n self.clear_field_and_send_keys(self._client_first_name_value, self._client_first_name_field, \"The client first name field on add client popup didn't show\")\n self.clear_field_and_send_keys(self._client_last_name_value, self._client_last_name_field, \"The client last name field on add client popup didn't show\")\n self.clear_field_and_send_keys(self._client_phone_number_value, self._client_phone_number_field, \"The client phone number field on add client popup didn't show\")\n self.clear_field_and_send_keys(self._client_email_value, self._client_email_field, \"The client email field on add client popup didn't show\")\n self.clear_field_and_send_keys(self._client_description_value, self._client_description_field, \"The client description field on add client popup didn't show\")\n self.clear_field_and_send_keys(self._client_category_value, self._client_category_field, \"The client category field on add client popup didn't show\")\n self.click(self._save_client_button, \"While adding client - save client button couldn't be clicked or wasn't found on the relatIn page\")\n self.click(self._close_new_client_popup_button, \"After adding client - close new client popup button couldn't be clicked or wasn't found on the relatIn page\")\n\n def remove_first_client(self):\n self.condition_click(self._remove_first_client_button, \"The remove first client button cannot be clicked or wasn't found on the relatIn page\")\n self.accept_alert()\n\n\n\n\n\n\n\n\n\n","sub_path":"pages/relatin_page.py","file_name":"relatin_page.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"461970913","text":"from .data import sort_markers_by_type\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras import activations, initializers, regularizers, constraints\nfrom tensorflow.keras.activations import softmax\nfrom tensorflow.keras.losses import categorical_crossentropy\nfrom tensorflow.keras.metrics import categorical_accuracy\nfrom tensorflow.keras.layers import InputSpec\n\n\ndef get_partially_dense_size(by_type):\n \"\"\"\n Returns the number of nodes in the partially dense layer.\n \"\"\"\n nodes = []\n for i, cell_type in enumerate(by_type):\n gene_n = len(by_type[cell_type])\n gene_n = round(np.log2(gene_n))\n nodes.append(gene_n)\n\n return int(np.sum(nodes))\n\n\ndef get_partially_dense_mask(by_cell_type, genes):\n \"\"\"\n Creates a binary mask for the partially dense layer.\n \"\"\"\n node_dim = get_partially_dense_size(by_cell_type)\n mask = np.zeros(shape=(len(genes), node_dim))\n\n i = 0\n for cell_type in by_cell_type:\n marker_genes = by_cell_type[cell_type]\n N = len(marker_genes)\n n = int(round(np.log2(N)))\n\n for node in range(n):\n for gene in by_cell_type[cell_type]:\n gene_index = genes.get_loc(gene)\n mask[gene_index][i] = 1.0\n i += 1\n\n return mask\n\n\ndef get_marker_mask(by_cell_type):\n \"\"\"\n Creates a binary mask for the marker layer.\n :param by_cell_type: Markers sorted by cell type\n \"\"\"\n node_dim = get_partially_dense_size(by_cell_type)\n mask = np.zeros(shape=(node_dim, len(by_cell_type)))\n\n i = 0\n for c, cell_type in enumerate(by_cell_type):\n marker_genes = by_cell_type[cell_type]\n N = len(marker_genes)\n n = int(round(np.log2(N)))\n\n for node in range(n):\n mask[i][c] = 1.0\n i += 1\n return mask\n\n\ndef get_weight_mask(shape, by_cell_type, genes):\n \"\"\"\n Deprecated.\n Creates a binary mask for the marker layer\n :param shape: shape of the matrix (n of cell types, n of genes)\n :param by_cell_type: Markers sorted by cell type\n :param genes: list of used genes (in the same order as in the data)\n \"\"\"\n mask = np.zeros(shape=shape)\n for i, cell_type in enumerate(by_cell_type):\n for gene in by_cell_type[cell_type]:\n gene_index = genes.get_loc(gene)\n mask[i][gene_index] = 1.0\n return mask\n\n\ndef one_hot_encode(labels, markers, aliases):\n \"\"\"\n One hot encodes a list of marker cell types\n :param labels: Used labels\n :param markers: Used markers\n :param aliases: Cell type aliases (saly.check_labels)\n \"\"\"\n by_type = sort_markers_by_type(markers)\n types = list(by_type.keys())\n\n one_hot = np.zeros(shape=(len(labels), len(by_type)))\n for i, label in enumerate(labels):\n if label == -1:\n one_hot[i] = np.repeat(-1, len(by_type))\n else:\n if label in types:\n label_index = types.index(label)\n elif label in aliases.keys():\n label_index = types.index(aliases[label])\n else:\n raise NameError(\"Unknown cell type!\", label)\n \n one_hot[i][label_index] = 1.0\n\n return one_hot\n\n\ndef marker_loss(y_true, y_pred):\n \"\"\"\n Get the marker cell type activations classification loss\n \"\"\"\n probs = softmax(y_pred)\n return categorical_crossentropy(y_true, probs)\n\n\ndef null_loss(y_true, y_pred):\n \"\"\"\n An empty loss function.\n \"\"\"\n return 0 * y_true\n\n\ndef celltype_accuracy(y_true, y_pred): \n return categorical_accuracy(y_true, y_pred)\n\n\nclass Partial(Layer):\n\n def __init__(self, units, weight_mask,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(Partial, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(min_ndim=2)\n self.supports_masking = True\n\n weight_mask = tf.convert_to_tensor(weight_mask, dtype=tf.float32)\n self.weight_mask = weight_mask\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs):\n conns = tf.multiply(self.kernel, self.weight_mask)\n output = K.dot(inputs, conns)\n if self.use_bias:\n output = K.bias_add(output, self.bias, data_format='channels_last')\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Partial, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n","sub_path":"saly/backend/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"188509651","text":"import os\r\nimport subprocess\r\nimport multiprocessing\r\nimport json\r\nimport argparse\r\n\r\nTIMEOUT_VALUE = 2000 # in seconds\r\n\r\nsolvers = ['GUROBI', 'LTIU', 'CLINGO', 'CP', 'MIP', 'GA']\r\n\r\n\r\ndef ASP_inputConverter(inputFile):\r\n # takes the inputFile(as path) and converts it into the input format for ASP solver.\r\n # the converted input will be written in a file called input_ASP_Version.lp\r\n # So, for each input file we will change this file only for ASP instead of creating new files for each input.\r\n\r\n f = open(inputFile)\r\n output_str = ''\r\n lines = f.read().split('\\n')\r\n f.close()\r\n m_size = int(lines[1])\r\n w_size = int(lines[2])\r\n\r\n output_str += 'man(1..{}).\\n'.format(m_size)\r\n output_str += 'woman(1..{}).\\n'.format(w_size)\r\n\r\n for line in lines[3:m_size + 3]:\r\n m = line.split(' ')[0]\r\n cnt = 1\r\n for group in line.split(' (')[1:]:\r\n gr = group.replace(')', '')\r\n if gr != '':\r\n for el in gr.split(' '):\r\n if el != '':\r\n output_str += 'mrank({},{},{}).\\n'.format(m, el, cnt)\r\n cnt += 1\r\n\r\n for line in lines[m_size + 3:m_size + w_size + 3]:\r\n w = line.split(' ')[0]\r\n cnt = 1\r\n for group in line.split(' (')[1:]:\r\n gr = group.replace(')', '')\r\n if gr != '':\r\n for el in gr.split(' '):\r\n if el != '':\r\n output_str += 'wrank({},{},{}).\\n'.format(w, el, cnt)\r\n cnt += 1\r\n\r\n f = open('input_ASP.lp', 'w')\r\n f.write(output_str)\r\n f.close()\r\n\r\n\r\ndef timeout(func, command, timeoutValue):\r\n manager = multiprocessing.Manager()\r\n return_dict = manager.dict()\r\n process = multiprocessing.Process(target=func, args=[command, return_dict])\r\n process.start()\r\n process.join(timeout=timeoutValue)\r\n\r\n if process.is_alive(): # TIMEOUT VALUE IS REACHED AND PROCESS IS STILL WORKING\r\n process.terminate()\r\n return False\r\n else: # PROCESS IS FINISHED\r\n return return_dict.values()[0]\r\n\r\n\r\ndef run_SMTI_Solver(command, return_dict):\r\n # subPro = subprocess.run(command, shell=True, capture_output=True, text=True)\r\n subPro = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\r\n # return subPro\r\n return_dict[0] = subPro\r\n\r\n\r\ndef solve(root, inputFile, outputFilesPath, dictKey, solverType):\r\n if solverType == 1:\r\n cmd = \"python3 Gurobi/MILP_Gurobi.py -f {}\".format(os.path.join(root, inputFile))\r\n elif solverType == 2:\r\n cmd = \"python3 LTIU/LTIU.py \" + os.path.join(root, inputFile)\r\n elif solverType == 3:\r\n cmd = \"clingo -V Clingo/smti.lp Clingo/maxcardinality.lp input_ASP.lp --stats\"\r\n ASP_inputConverter(os.path.join(root, inputFile))\r\n elif solverType == 4:\r\n cmd = \"python OR-Tools/OR-Tools_CP-SAT.py \" + os.path.join(root, inputFile)\r\n elif solverType == 5:\r\n cmd = \"python OR-Tools/OR-Tools_MIP.py \" + os.path.join(root, inputFile)\r\n elif solverType == 6:\r\n cmd = \"python GA/matching_ga.py \" + os.path.join(root, inputFile)\r\n\r\n subPro = timeout(func=run_SMTI_Solver, command=cmd, timeoutValue=TIMEOUT_VALUE)\r\n\r\n if subPro is False: # then the process of gurobi solver is terminated because timeout is being reached\r\n # print(\"A process is terminated due to timeout.\")\r\n # Writing the output to the file\r\n outputFileName = inputFile.replace(\"input\", \"output\")[:-4] + \"_{}.txt\".format(solvers[solverType - 1])\r\n outputFile = open(os.path.join(outputFilesPath, outputFileName), \"w\")\r\n outputFile.write(\"Solver reached to a timeout limit.\")\r\n outputFile.close()\r\n\r\n else: # Process is finished. subPro has a value (which has the stdout of the solver)\r\n # So gurobiSolver will print to console(stdout) ... TotalTime: 112s \\n NumberOfExpandedNode: 10 \\n ...\r\n processOutput = subPro.stdout.decode('utf-8')\r\n # the stdout of gurobi will contain license information in the first 2 lines runtime in 3rd, iteration number in 4th and explored nodes in 5th\r\n processOutput = processOutput.split(\"\\n\", 2)[2] # Getting rid of Gurobi information in the begining of the output.\r\n \r\n # Writing the output to the file\r\n outputFileName = inputFile.replace(\"input\", \"output\")[:-4] + \"_{}.txt\".format(solvers[solverType - 1])\r\n outputFile = open(os.path.join(outputFilesPath, outputFileName), \"w\")\r\n outputFile.write(processOutput)\r\n print(outputFileName)\r\n outputFile.close()\r\n\r\ndef main():\r\n argparser = argparse.ArgumentParser()\r\n\r\n argparser.add_argument('--solverType', '-sT', metavar='', help='Specify the solver you want to run(default will run them all)', type=int, default=-1, choices=[1, 2, 3, 4, 5, 6])\r\n # --solverType = 1 -> Gurobi will run\r\n # --solverType = 2 -> Local Search(LTIU) will run\r\n # --solverType = 3 -> ASP will run\r\n # --solverType = 4 -> OR-Tools CP_SAT will run\r\n # --solverType = 5 -> OR-Tools MIP will run\r\n # --solverType = 6 -> Genetic Algorithm will run\r\n # --solverType = -1 -> All of the solvers will run\r\n\r\n argparser.add_argument('--size', '-s', metavar='', help='Specify the size of the benchmark instances', type=int, default=-1, choices=[50,100])\r\n args = argparser.parse_args()\r\n selectedSolver = args.solverType\r\n size = args.size\r\n\r\n PATH_TO_INPUT_FILES = r\"benchmark-instances-{}\".format(size) # assume that this directory contains only input samples as .txt files\r\n PATH_TO_OUTPUT_FILES = r\"OUTPUT\"\r\n\r\n for root, dirs, files in os.walk(PATH_TO_INPUT_FILES):\r\n # root is the path of where the search takes place\r\n # dirs is the list of subdirectories inside the root.\r\n # files is the list of files inside the root\r\n # So, (for our case) a directory which contains only .txt files\r\n # -> root = PATH_TO_INPUT_FILES\r\n # -> dirs = []\r\n # -> files = [input1.txt, input2.txt, ....]\r\n for inputFile in files:\r\n # # parse the input file to get \"instance size\", \"p1\" and \"p2\" combination in order to obtain the dict key\r\n instance_size = inputFile[inputFile.find(\"s-\") + 2:inputFile.find(\"--i\")]\r\n p1 = inputFile[inputFile.find(\"--i-\") + 4:inputFile.find(\"pc-t\")]\r\n p2 = inputFile[inputFile.find(\"-t-\") + 3:inputFile.find(\"pc--\")]\r\n\r\n Dictionary_Key = instance_size + \"_\" + p1 + \"_\" + p2\r\n \r\n if selectedSolver == -1:\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, 1)\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, 2)\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, 3)\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, 4)\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, 5)\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, 6)\r\n elif selectedSolver != 6:\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, selectedSolver)\r\n else:\r\n try:\r\n solve(root, inputFile, PATH_TO_OUTPUT_FILES, Dictionary_Key, 6)\r\n except:\r\n print(\"A problem occured in file:\", inputFile)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"run_maxcard_experiments.py","file_name":"run_maxcard_experiments.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"597088091","text":"# Software Carpentry Final Project\n# Lincoln Kartchner\n# transform.py\n'''\nThis script takes care of the math behind the\nface averaging. It contains seven different functions:\n similarity_transform\n rectContains\n calculateDelaunayTriangles\n constrainPoint\n applyAffineTransform\n warpTriangle\n image_transform\n\nSources:\n 1. https://docs.opencv.org/3.4/d4/d61/tutorial_warp_affine.html\n 2. https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n 3. stack exchange\n'''\nimport math\nimport cv2\nimport numpy as np\n\n\ndef similarity_transform(inPoints, outPoints):\n \"\"\" similarity_transform takes in a set of input points\n and a set of output points and finds an affine transformation\n between the two. cv2.estimateAffinePartial2D requires\n a two sets of three coordinates in order to find an\n affine transformation between them. similarity_transform will\n take two sets of two coordinates and find the corresponding\n third coordinate by finding a point that forms an equilateral\n triangle with the original two points.\n\n For more information look at:\n https://docs.opencv.org/3.4/d4/d61/tutorial_warp_affine.html\n\n Source:\n https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n\n **Parameters**\n\n inPoints: list\n A list of tuples, in this case corresponding to specific\n image coordinate pairs\n outPoints: list\n A list of tuples, in this case corresponding to specific\n image coordinate pairs\n\n **Returns**\n\n tform[0]: array\n A 2 x 3 matrix corresponding to the optimal affine\n transformation between the two sets of points.\n \"\"\"\n s60 = math.sin(60*math.pi/180)\n c60 = math.cos(60*math.pi/180)\n inPts = np.copy(inPoints).tolist()\n outPts = np.copy(outPoints).tolist()\n xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0]\n yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1]\n inPts.append([np.int(xin), np.int(yin)])\n xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0]\n yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1]\n outPts.append([np.int(xout), np.int(yout)])\n tform = cv2.estimateAffinePartial2D(np.array([inPts]), np.array([outPts]))\n return tform[0]\n\n\ndef rectContains(rect, point):\n \"\"\" rectContains checks if a\n rectangle contains a given point.\n\n Source:\n https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n\n **Parameters**\n rect: tuple\n A tuple containing the four points\n corresponding to the four corners of the\n rectangle.\n\n point: tuple\n A tuple corresponding to the coordinates\n of the given point\n\n **Returns**\n True if point is in rectangle\n False otherwise\n \"\"\"\n if point[0] < rect[0]:\n return False\n elif point[1] < rect[1]:\n return False\n elif point[0] > rect[2]:\n return False\n elif point[1] > rect[3]:\n return False\n return True\n\n\ndef calculateDelaunayTriangles(points, width=600, height=600):\n \"\"\"calculateDelaunayTriangles finds the\n Delaunay Triangulation of a set of points. The\n Delaunay Triangluation is a method of triangulation\n in which for a given set of P points, DT triangles are\n created so that no point in P is within the circumcircle\n of any DT.\n\n Source:\n https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n\n **Parameters**\n points: list\n A list of tuples corresponding to the\n coordinates of specific image landmarks in a given image\n width: int\n The desired ouput image width. Default is 600.\n height: int\n The desired output image height. Default is 600.\n\n **Returns**\n delaunayTri: list\n A list of tuples corresponding to the triangles\n from any Delaunay Triangulation of a given set\n of image landmarks.\n \"\"\"\n rect = (0, 0, width, height)\n subdiv = cv2.Subdiv2D(rect)\n\n for p in points:\n subdiv.insert((p[0], p[1]))\n\n triangleList = subdiv.getTriangleList()\n\n delaunayTri = []\n for t in triangleList:\n pt = []\n pt.append((t[0], t[1]))\n pt.append((t[2], t[3]))\n pt.append((t[4], t[5]))\n pt1 = (t[0], t[1])\n pt2 = (t[2], t[3])\n pt3 = (t[4], t[5])\n if rectContains(rect, pt1) and rectContains(rect, pt2) and rectContains(rect, pt3):\n ind = []\n for j in range(0, 3):\n for k in range(0, len(points)):\n if(abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):\n ind.append(k)\n if len(ind) == 3:\n delaunayTri.append((ind[0], ind[1], ind[2]))\n return delaunayTri\n\n\ndef constrainPoint(p, width, height):\n \"\"\"constrainPoint constrains a given\n point to be within specified dimensions\n\n Source:\n https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n\n **Parameters**\n p: tuple\n A tuple correponding to a point coordinate\n width: int\n The desired ouput image width. Default is 600.\n height: int\n The desired output image height. Default is 600.\n\n **Returns**\n p: tuple\n The constrained point, now within the image boundaries\n \"\"\"\n p = (min(max(p[0], 0), width - 1), min(max(p[1], 0), height - 1))\n return p\n\n\ndef applyAffineTransform(src, srcTri, dstTri, size):\n \"\"\"applyAffineTransform takes in a source image,\n and a source triangle, and then finds the affine\n transformation between the source triangle and some\n target triangle.\n\n Sources:\n https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n https://docs.opencv.org/3.4/d4/d61/tutorial_warp_affine.html\n\n **Parameters**\n src: list\n A list of tuples\n srcTri: list\n A list of tuples corresponding to the image\n triangles\n dstTri: list\n A list of tuples corresponding to the target\n triangles\n size: tuple\n A tuple corresponding to the size of the target\n image\n\n **Returns**\n dst: numpy array\n A numpy array of the warped image\n \"\"\"\n # Given a pair of triangles, find the affine transform.\n warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))\n # Apply the Affine Transform just found to the src image\n dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)\n return dst\n\n\ndef warpTriangle(img1, img2, t1, t2):\n \"\"\" warpTriangle takes care of matching\n warped triangles between sets of images\n\n Source:\n https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n\n **Parameters**\n img1: numpy array\n A numpy array of the source image\n img2: numpy array\n A numpy array of the target image\n t1: list\n A list of tuples corresponding to\n specific points in the source image\n t2: list\n A list of tuples corresponding to\n specific points in the target image\n\n **Returns**\n\n None\n \"\"\"\n # Find bounding rectangle for each triangle\n r1 = cv2.boundingRect(np.float32([t1]))\n r2 = cv2.boundingRect(np.float32([t2]))\n\n # Offset points by left top corner of the respective rectangles\n t1Rect = []\n t2Rect = []\n t2RectInt = []\n\n for i in range(0, 3):\n t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))\n t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\n t2RectInt.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\n\n # Get mask by filling triangle\n mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)\n cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0)\n\n # Apply warpImage to small rectangular patches\n img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\n size = (r2[2], r2[3])\n img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)\n img2Rect = img2Rect * mask\n # Copy triangular region of the rectangular patch to the output image\n img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ((1.0, 1.0, 1.0) - mask)\n img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect\n\n\ndef image_transform(scaled_images, pointsNorm, pointsAvg, dt, width=600, height=600):\n \"\"\"image_transform uses the helper functions above\n to actually transform specific images to a target space\n\n Modified from:\n https://github.com/spmallick/learnopencv/tree/master/FaceAverage\n\n **Parameters**\n scaled_images: list\n A list of tuples corresponding to the\n coordinates of facial landmarks from\n individual images scaled to a common space\n pointsNorm: list\n A list of tuples corresponding to the\n norm of all the coordinates of facial landmarks\n from all the images in the original filepath\n pointsAvg: list\n A list of tuples corresponding to the\n average of all the coordinates of facial landmarks\n from all the images in the original filepath\n dt: list\n A list of tuples corresponding to the triangles\n from any Delaunay Triangulation of a given set\n of image landmarks.\n width: int\n The desired ouput image width. Default is 600.\n height: int\n The desired output image height. Default is 600.\n\n **Returns**\n output: numpy array\n A numpy array corresponding to the final\n face average image\n \"\"\"\n output = np.zeros((height, width, 3), np.float32())\n for i in range(len(scaled_images)):\n img = scaled_images[i]/len(scaled_images)\n # Warp input images to average image landmarks\n for i in range(0, len(scaled_images)):\n img = np.zeros((height, width, 3), np.float32())\n # Transform triangles one by one\n for j in range(0, len(dt)):\n tin = []\n tout = []\n for k in range(0, 3):\n pIn = pointsNorm[i][dt[j][k]]\n pIn = constrainPoint(pIn, width, height)\n pOut = pointsAvg[dt[j][k]]\n pOut = constrainPoint(pOut, width, height)\n tin.append(pIn)\n tout.append(pOut)\n warpTriangle(scaled_images[i], img, tin, tout)\n # Add image intensities for averaging\n output += img\n # Divide by number of images to get average\n output = output / len(scaled_images)\n return output\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"gui_imp/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":10671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"276640087","text":"import nn_model as nn\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cbook\nfrom matplotlib import cm\nfrom matplotlib.colors import LightSource\nimport itertools\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv, json\n\nRESULT_FILE = \"5m_rms.csv\"\nJSON_FILE = \"nn_5m.json\"\n\nclass Optimizer():\n def __init__(self):\n self.net = nn.Network()\n self.results = []\n \n\n def test_net(self, variables, record = True):\n \"\"\"\n Function tests network through the various variables \n NOTE: Input length, number of layers cannot be changed\n \"\"\"\n\n ## Load data for training and prediction\n with open(JSON_FILE) as f:\n data = json.loads(f.read())\n in_data = np.asarray([x[0] for x in data])\n out_data = np.asarray([x[1] for x in data]) \n\n ## Cycle through every call, and record percentage result for each call\n vars_list = list(variables.values())\n vars_keys = list(variables.keys())\n vars_comb = list(itertools.product(*vars_list))\n print(\"Number of calls: {}\".format(len(vars_comb)))\n\n for comb in vars_comb:\n ## For each call, set vars in nn_model to current vars\n for i in range(len(comb)):\n nn.variables[vars_keys[i]] = comb[i]\n ## Train and collect prediction for each combination\n self.net.train(in_data, out_data)\n rate = self.net.predict(in_data, out_data)\n ## Setup data to be saved to results csv\n result_list = nn.variables\n result_list[\"rate\"] = rate\n self.results.append(result_list)\n \n ## Save results in file\n fields = vars_keys\n fields.append(\"rate\")\n with open(RESULT_FILE, \"w\") as f:\n writer = csv.DictWriter(f, delimiter=\",\", fieldnames=fields)\n writer.writeheader()\n for line in self.results:\n writer.writerow(line) \n\nif __name__ == \"__main__\":\n opt = Optimizer()\n opt.test_net({\"train_epochs\":[30], \n \"batch_size\":[32],\n \"lstm_units\":[[200,150]], \n## \"dropout_rate\":[[0.2, 0.2]],\n \"activation_final\":[\"sigmoid\"],\n \"loss_function\":[\"binary_crossentropy\"],\n \"learning_rate\":[100.0]})\n\n\n","sub_path":"nn_optimizer.py","file_name":"nn_optimizer.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"364092016","text":"\n\n#calss header\nclass _TOO():\n\tdef __init__(self,): \n\t\tself.name = \"TOO\"\n\t\tself.definitions = [u'more than is needed or wanted; more than is suitable or enough: ', u'used before an adjective or adverb to emphasize a negative meaning: ', u'used before an adjective to emphasize a positive meaning: ', u'(especially at the end of a sentence) in addition, also: ', u'used to show surprise: ', u'very, or completely: ', u'used to emphasize a positive answer to a negative statement: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adverbs'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adverbs/_too.py","file_name":"_too.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"631780782","text":"from copy import copy as copy\nimport numpy as np\nfrom functools import partial\nfrom hqca.state_tomography import *\n\ndef _newton_step(acse):\n coeff_best = 0 \n rdm_best = copy(acse.Store.rdm)\n e_best = copy(acse.e0)\n\n testS = copy(acse.A)\n max_val = 0\n for s in testS.op:\n if abs(s.c)>abs(max_val):\n max_val = copy(s.c)\n print('Maximum value: {:+.10f}'.format(max_val))\n print('Running first point...')\n e1,rdm1 = acse._test_acse_function([acse.delta],testS)\n # \n if e10.1:\n e_best = copy(ef)\n rdm_best = copy(df)\n coeff_best = copy(coeff)\n acse.Store.update(df)\n print('Current: {:.10f}'.format(np.real(ef)))\n def m_qk(s):\n return acse.e0 + s*acse.grad+0.5*s*acse.hess*s\n acse.tr_taylor = acse.e0-m_qk(coeff)\n acse.tr_object = acse.e0-ef\n print('Coefficient: {}'.format(coeff))\n print('Taylor series step: {:.14f}'.format(\n np.real(acse.tr_taylor)))\n print('Objective fxn step: {:.14f}'.format(\n np.real(acse.tr_object)))\n if abs(acse.tr_object)<=acse.tr_obj_crit:\n trust=True\n print('Convergence in objective function.')\n elif abs(acse.tr_taylor)<=acse.tr_ts_crit:\n trust=True\n print('Convergence in Taylor series model.')\n else:\n rho = acse.tr_object/acse.tr_taylor\n if rho>=nv:\n print('Result in trust region. Increasing TR.')\n trust = True\n acse.tr_Del*=gi\n elif rho>=ns:\n print('Trust region held. Continuing.')\n trust = True\n else:\n acse.tr_Del*=gd\n print('Trust region did not hold. Shrinking.')\n print('Trial energy: {:.10f}'.format(ef))\n print('Current trust region: {:.14f}'.format(\n np.real(acse.tr_Del)))\n #if abs(coeff)>0.1:\n # acse.Store.update(df)\n trust_iter+=1\n if trust_iter>=2:\n trust=True\n if abs(coeff_best)<0.1:\n acse.accept_previous_step = False\n print('Rejecting Newton Step...')\n else:\n acse.accept_previous_step = True\n for f in testS:\n f.c*= coeff_best\n acse.S = acse.S+testS\n else:\n acse.S = acse.S+testS\n # eval energy is in check step\n Ins = acse.Instruct(\n operator=acse.S.op_form(),\n Nq=acse.QuantStore.Nq,\n quantstore=acse.QuantStore,\n )\n Psi= StandardTomography(\n QuantStore=acse.QuantStore,\n preset=acse.tomo_preset,\n Tomo=acse.tomo_Psi,\n verbose=acse.verbose,\n )\n if not acse.tomo_preset:\n Psi.generate(real=True,imag=False)\n Psi.set(Ins)\n Psi.simulate()\n Psi.construct(processor=acse.process)\n acse.Store.update(Psi.rdm)\n Psi.rdm.switch()\n acse.circ = Psi\n\n print('Current S: ')\n print(acse.S)\n","sub_path":"hqca/acse/_newton_acse.py","file_name":"_newton_acse.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"414507940","text":"from models import db, Team, Venue, Game, Player, PlayerGameStats, app\nimport unittest\n# -------\n# TestIDB\n# -------\n\n\nclass TestIDB (unittest.TestCase):\n @classmethod\n def setUpClass(self):\n app.config['TESTING'] = True\n self.app = app.test_client()\n #Connect to database in the future\n\n #db, meta = connect('root', 'test123!', 'postgres')\n\n # oracle_arena = Venue(id=1, venue_name=\"Oracle Arena\", city=\"San Francisco\", capacity=9000, phone=\"1234567890\", sponsor=\"Oracle\")\n # moda_center = Venue(id=2, venue_name=\"Moda Center\", city=\"Portland\", capacity=9000, phone=\"1234567890\", sponser=\"Moda Health\")\n # quicken_loans Venue(id=3, venue_name=\"Quicken Loans Arena\", city=\"Cleveland\", capacity=9000, phone=\"1234567890\", sponsor=\"Quicken Loans\")\n # GSW = Team(id=1, team_name=\"Golden State Warriors\", coach=\"Steve Kerr\", win_percent=85.2, ppg=120, venue=1, owner=\"Joe Lacob\", conference=Conference.WESTERN)\n # POR = Team(id=2, team_name=\"Portland Trail Blazers\", coach=\"Terry Stotts\", win_percent=55.7, ppg=80, venue=2, owner=\"Paul Allen\", conference=Conference.WESTERN)\n # CLE = Team(id=3, team_name=\"Cleveland Cavaliers\", coach=\"Tyronn Lue\", win_percent=65.4, ppg=115, venue=3, owner=\"Dan Gilbert\", conference=Conference.EASTERN)\n # curry = Player(id=1, first_name=\"Stephen\", last_name=\"Curry\", ppg=30, rpg=25, apg=40, team_id=1)\n # lillard = Player(id=2, first_name=\"Damian\", last_name=\"Lillard\", ppg=15, rpg=20, apg=25, team_id=2)\n # james = Player(id=3, first_name=\"Lebron\", last_name=\"James\", ppg=25, rpg=28, apg=20, team_id=3)\n # game1 = Game(id=1, date=\"2008-11-22\", duration=250, home=1, away=2, venue=1, home_score=120, away_score=110, winner_id=1)\n # game2 = Game(id=2, date=\"2008-11-22\", duration=250, home=3, away=2, venue=3, home_score=90, away_score=75, winner_id=3)\n return\n\n\n def test_player_model_1(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n\n example1 = Player(\"bla\", \"bla\", 30.5,\n 25.8, 40.2, \"1\", \"1\")\n db.session.add(example1)\n db.session.commit()\n player = db.session.query(Player).filter_by(first_name=\"bla\").filter_by(last_name=\"bla\").first()\n self.assertEqual(player.ppg, 30.5)\n self.assertEqual(player.rpg, 25.8)\n self.assertEqual(player.apg, 40.2)\n db.session.delete(player)\n db.session.commit()\n\n\n def test_player_model_2(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example2 = Player(\"Lebroner\", \"James\", 35.5,\n 30.8, 38.2, \"3\", \"\")\n db.session.add(example2)\n db.session.commit()\n player = db.session.query(Player).filter_by(first_name=\"Lebroner\").filter_by(last_name=\"James\").first()\n self.assertEqual(player.ppg, 35.5)\n self.assertEqual(player.rpg, 30.8)\n self.assertEqual(player.apg, 38.2)\n\n db.session.delete(player)\n db.session.commit()\n\n def test_player_model_3(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n player = Player.query.all()\n example2 = Player(\"Lebroner\", \"James\", 35.5,\n 30.8, 38.2, \"3\", \"\")\n self.assertFalse(example2 in player)\n\n\n\n\n\n def test_team_model_1(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example1 = Team(\"Golden State Warriorser\", 85.2,\n 120.5, \"1\", \"Western\", \"Pacific\", \"https://nba-players.herokuapp.com/players/westbrook/russell\")\n\n db.session.add(example1)\n db.session.commit()\n\n team = db.session.query(Team).filter_by(team_name=\"Golden State Warriorser\").first()\n self.assertEqual(team.win_pct, 85.2)\n self.assertEqual(team.ppg, 120.5)\n self.assertEqual(team.home_venue, 1)\n self.assertEqual(team.conference, \"Western\")\n self.assertEqual(team.division, \"Pacific\")\n\n db.session.delete(team)\n db.session.commit()\n\n def test_team_model_2(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example2 = Team(\"Cleveland Cavalierser\", 90.5,\n 112.8, \"1\", \"Conference.EASTERN\", \"Division.CENTRAL\", \"\")\n\n db.session.add(example2)\n db.session.commit()\n\n team = db.session.query(Team).filter_by(team_name=\"Cleveland Cavalierser\").first()\n self.assertEqual(team.win_pct, 90.5)\n self.assertEqual(team.ppg, 112.8)\n self.assertEqual(team.home_venue, 1)\n self.assertEqual(team.conference, \"Conference.EASTERN\")\n self.assertEqual(team.division, \"Division.CENTRAL\")\n\n db.session.delete(team)\n db.session.commit()\n\n def test_team_model_3(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example2 = Team(\"Cleveland Cavalierser\", 90.5,\n 112.8, \"1\", \"Conference.EASTERN\", \"Division.CENTRAL\", \"\")\n\n team = Team.query.all()\n self.assertFalse(example2 in team)\n\n def test__game_model_1(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example1 = Game(\"2008-11-22\", \"250\", \"1\", \"2\", \"1\",\n 100, 90, \"1\", \"\")\n\n db.session.add(example1)\n db.session.commit()\n\n game = db.session.query(Game).filter_by(date=\"2008-11-22\").first()\n self.assertEqual(game.home_score, 100)\n self.assertEqual(game.away_score, 90)\n\n db.session.delete(game)\n db.session.commit()\n\n def test__game_model_2(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example1 = Game(\"2016-4-20\", \"300\", \"1\", \"3\", \"1\",\n 120, 115, \"1\", \"\")\n\n db.session.add(example1)\n db.session.commit()\n\n game = db.session.query(Game).filter_by(date=\"2016-4-20\").first()\n self.assertEqual(game.home_score, 120)\n self.assertEqual(game.away_score, 115)\n\n db.session.delete(game)\n db.session.commit()\n\n def test_venue_model_1(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example1 = Venue(\"Moda Center1\", \"Portland\", 19980,\n \"1995\", \"Paul Allen\", \"\")\n\n db.session.add(example1)\n db.session.commit()\n\n venue = db.session.query(Venue).filter_by(venue_name=\"Moda Center1\").first()\n self.assertEqual(venue.city, \"Portland\")\n self.assertEqual(venue.capacity, 19980)\n self.assertEqual(venue.owner, \"Paul Allen\")\n\n db.session.delete(venue)\n db.session.commit()\n\n def test_venue_model_2(self):\n \"\"\"Test querying the database by attribute using simple keywords\"\"\"\n\n with app.test_request_context():\n example2 = Venue(\"Oracle Arena1\", \"San Francisco\", 20000,\n \"1980\", \"Larry Ellison\", \"\")\n\n db.session.add(example2)\n db.session.commit()\n\n venue = db.session.query(Venue).filter_by(venue_name=\"Oracle Arena1\").first()\n self.assertEqual(venue.city, \"San Francisco\")\n self.assertEqual(venue.capacity, 20000)\n self.assertEqual(venue.owner, \"Larry Ellison\")\n\n db.session.delete(venue)\n db.session.commit()\n\n\n@classmethod\ndef tearDownClass(cls):\n # Close connection\n pass\n# ----\n# main\n# ----\n\nif __name__ == \"__main__\": # pragma: no cover\n unittest.main()\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"326634556","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 22\n\n@author: daniil khaninaev\n\"\"\"\ndef factor(n): \n lst = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n lst.append(d)\n n //= d\n else: d += 1\n if n > 1 and lst ==[]:\n lst = [1, n]\n else: lst.append(n)\n return lst\n\nif __name__ == '__main__':\n n = int(input('Enter number N: '))\n answers = [factor(number) for number in range(1, n+1)]\n\n for index, answer in enumerate(answers):\n print('{0} => {1}'.format(index+1, answer))","sub_path":"question_1.py","file_name":"question_1.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"546091890","text":"#upper confidence bound\nimport numpy as np\nimport matplotlib.pyplot\nimport pandas as pd\nimport math\n\n#importing dataset\ndataset = pd.read_csv('Ads_CTR_Optimisation.csv') \n\n#implementing UCB\n\nN = 10000\nd = 10\nads_selected = []\nnumber_of_selections = [0] * d\nsum_of_rewards = [0] * d\nfor i in range (0,N):\n max_upper_bound = 0\n ad = 0\n for i in range(0, d):\n if (number_of_selections[i] > 0):\n average_reward = sum_of_rewards[i]/number_of_selections[i]\n delta_i =math.sqrt(3/2*math.log(n + 1) / number_of_selections[i])\n upper_bound = average_reward + delta_i\n else:\n upper_bound = 1e400\n if upper_bound > max_upper_bound:\n max_upper_bound = upper_bound\n ad = i\n ads_selected.append(ad)\n number_of_selections[ad] = number_of_selections[ad] + 1\n reward = dataset.values[n ,ad]\n sums_of_rewards[ad] = sums_of_rewards[ad] + reward\n total_reward = total_reward + reward\n \n \n \n \n \n \n \n\n","sub_path":"Part 6 - Reinforcement Learning/Upper Confidence Bound (UCB)/UCB.py","file_name":"UCB.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"285043457","text":"# greedy\n\nclass Solution:\n def jump(self, nums):\n idx = 0\n next_idx = 0\n steps = 0\n\n for i, num in enumerate(nums):\n if idx + 1 >= len(nums):\n return steps\n\n next_idx = max(next_idx, i + num)\n\n if i == idx:\n steps += 1\n idx = next_idx\n\n","sub_path":"leetcode/py/45.py","file_name":"45.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"474075824","text":"#\n# Copyright (c) 2008-2009 Grigori Goronzy \n#\n# Permission to use, copy, modify, and distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n#\n\nimport os\n\nclass EeePc700:\n def __init__(self, action):\n self.action = action\n # FIXME: some 700s have a camera, some don't \n self.features = \"wifi\", \"reader\", \"camera\", \"touchpad\"\n\n # Settings for extended brightness range.\n # superhigh is on the safe side to not kill your backlight.\n self.extended_brightness = True\n self.brightness_superlow = 0x05\n self.brightness_superhigh = 0xc0\n \n self.action_map = {\n 0x10: self.action.wifi_toggle,\n 0x11: self.action.wifi_toggle,\n 0x13: self.action.mute,\n 0x14: self.action.vol_down,\n 0x15: self.action.vol_up,\n }\n \n self.hotkey_map = {\n 0x30: \"(Fn-F5)\",\n 0x12: \"(Fn-F6)\",\n }\n\n # FIXME: Check if we're using the ath5k module on Kernel 2.6.27 or\n # later. If set self.wlan_module=\"ath5k\", self.wlan_dev=\"wlan0\"\n self.wlan_module = \"ath_pci\"\n self.wlan_dev = \"ath0\"\n\n def wifi_off(self):\n # rfkill is still very flaky, at least with rt2860sta\n # it often freezes the machine after toggling.\n if bool(self.wlan_dev):\n os.spawnlp(os.P_WAIT, \"ifconfig\", \"ifconfig\", self.wlan_dev, \"down\")\n if bool(self.wlan_module):\n os.spawnlp(os.P_WAIT, \"modprobe\", \"modprobe\", \"-r\", self.wlan_module)\n if self.wlan_path.find(\"rfkill\") == -1: pass\n if os.path.exists(self.wlan_path):\n f = open(self.wlan_path, \"w\")\n f.write(\"0\\n\")\n f.close()\n\n def wifi_on(self):\n if os.path.exists(self.wlan_path):\n f = open(self.wlan_path, \"w\")\n f.write(\"1\\n\")\n f.close()\n if self.wlan_path.find(\"rfkill\") == -1: pass\n if bool(self.wlan_module):\n os.spawnlp(os.P_WAIT, \"modprobe\", \"modprobe\", self.wlan_module)\n\n\nclass EeePc700SE(EeePc700):\n def __init__(self, action):\n EeePc700.__init__(self, action)\n \n self.wlan_module = \"r8180\"\n self.wlan_dev = \"wlan0\"\n\n\nclass EeePc900(EeePc700):\n def __init__(self, action):\n EeePc700.__init__(self, action)\n\n self.features = \"wifi\", \"reader\", \"camera\", \"touchpad\"\n self.extended_brightness = True\n\n\nclass EeePc900A(EeePc900):\n def __init__(self, action):\n EeePc900.__init__(self, action)\n \n self.features = \"wifi\", \"reader\", \"camera\", \"touchpad\"\n\n\nclass EeePc901(EeePc700):\n def __init__(self, action):\n self.action = action\n \n # TODO: think of a method to recognize 901go\n # maybe possible through eeepc_laptop control files\n # note that 901go doesn't have bluetooth\n \n self.features = \"wifi\", \"bt\", \"reader\", \"camera\", \"touchpad\"\n \n # Settings for extended brightness range.\n # superhigh is on the safe side to not kill your backlight.\n self.extended_brightness = True\n self.brightness_superlow = 0x20\n self.brightness_superhigh = 0xd0\n \n self.action_map = {\n 0x10: self.action.wifi_toggle,\n 0x11: self.action.wifi_toggle,\n 0x13: self.action.mute,\n 0x14: self.action.vol_down,\n 0x15: self.action.vol_up,\n }\n \n self.hotkey_map = {\n 0x1a: \"Hotkey 1\",\n 0x1b: \"Hotkey 2\",\n 0x1c: \"Hotkey 3\",\n 0x1d: \"Hotkey 4\",\n 0x30: \"(Fn-F5)\",\n 0x12: \"(Fn-F6)\",\n }\n\n self.wlan_module = \"rt2860sta\"\n self.wlan_dev = \"ra0\"\n\n\nclass EeePc1000(EeePc901):\n def __init__(self, action):\n EeePc901.__init__(self, action)\n\n self.extended_brightness = False\n self.hotkey_map = {\n 0x1a: \"Hotkey 1\",\n 0x1b: \"Hotkey 2\",\n 0x1c: \"Hotkey 3\",\n 0x1d: \"Hotkey 4\",\n 0x16: \"(Fn-F7)\",\n 0x30: \"(Fn-F8)\",\n 0x12: \"(Fn-F9)\",\n }\n\n\nclass EeePc1000HE(EeePc1000):\n def __init__(self, action):\n EeePc1000.__init__(self, action)\n self.hotkey_map = {\n 0x39: \"Hotkey\",\n 0x1a: \"Hotkey 1\",\n 0x1b: \"Hotkey 2\",\n 0x1c: \"Hotkey 3\",\n 0x1d: \"Hotkey 4\",\n 0x37: \"(Fn-F3)\",\n 0x1b: \"(Fn-F4)\",\n 0x16: \"(Fn-F7)\",\n 0x30: \"(Fn-F8)\",\n 0x12: \"(Fn-F9)\",\n }\n self.wlan_module = \"ath9k\"\n self.wlan_dev = \"wlan0\"\n\n\nclass EeePc1000HD(EeePc1000):\n def __init__(self, action):\n EeePc1000.__init__(self, action)\n\n self.wlan_module = \"ath5k\"\n self.wlan_dev = \"wlan0\"\n\n\nclass EeePc1002HA(EeePc1000HE):\n def __init__(self, action):\n EeePc1000HE.__init__(self, action)\n self.action_map = {\n 0x10: self.action.wifi_toggle,\n 0x11: self.action.wifi_toggle,\n 0x13: self.action.mute,\n 0x14: self.action.vol_down,\n 0x15: self.action.vol_up,\n }\n\nclass EeePc904HD(EeePc900):\n def __init__(self, action):\n EeePc900.__init__(self, action)\n self.hotkey_map = {\n 0x1a: \"Hotkey 1\",\n 0x1b: \"Hotkey 2\",\n 0x1c: \"Hotkey 3\",\n 0x1d: \"Hotkey 4\",\n 0x16: \"(Fn-F7)\",\n 0x30: \"(Fn-F8)\",\n 0x12: \"(Fn-F9)\",\n }\n\n\nclass EeePcAutodetect:\n def __init__(self, action):\n self.action = action\n self.extended_brightness = False\n self.brightness_superlow = 0\n self.brightness_superhigh = 0\n self.features = [\"wifi\", \"bt\", \"touchpad\"]\n self.action_map = {\n 0x10: self.action.wifi_toggle,\n 0x11: self.action.wifi_toggle,\n 0x13: self.action.mute,\n 0x14: self.action.vol_down,\n 0x15: self.action.vol_up,\n }\n # Cram everything into the mapping\n self.hotkey_map = {\n 0x39: \"Hotkey\",\n 0x37: \"(Fn-F3)\",\n 0x1a: \"Hotkey 1\",\n 0x1b: \"Hotkey 2\",\n 0x1c: \"Hotkey 3\",\n 0x1d: \"Hotkey 4\",\n 0x16: \"(Fn-F7)\",\n 0x30: \"(Fn-F8/F5)\",\n 0x12: \"(Fn-F9/F6)\",\n }\n self.detect_features()\n\n def detect_features(self):\n features = {\n \"camera\": \"camera\",\n \"cardr\": \"reader\",\n }\n\n for k, v in features.iteritems():\n if os.path.exists(os.path.join(self.action.acpi_base, k)):\n self.features.append(v)\n\n # This does not care about the special needs of crappy drivers\n # like rt2860sta -- let's just hope it will not blow up\n def wifi_off(self):\n if os.path.exists(self.wlan_path):\n f = open(self.wlan_path, \"w\")\n f.write(\"0\\n\")\n f.close()\n\n def wifi_on(self):\n if os.path.exists(self.wlan_path):\n f = open(self.wlan_path, \"w\")\n f.write(\"1\\n\")\n f.close()\n\n\n# Map system-product-name to the abstractions\nMODEL_MAP = {\n \"700\": EeePc700,\n \"700SE\": EeePc700SE,\n \"701SD\": EeePc700SE,\n \"701\": EeePc700,\n \"900\": EeePc900,\n \"900SD\": EeePc900,\n \"900HD\": EeePc900,\n \"904HD\": EeePc904HD,\n \"904HA\": EeePc901,\n \"900A\": EeePc900A,\n \"900HA\": EeePc900A,\n \"901\": EeePc901,\n \"1000\": EeePc1000,\n \"1000H\": EeePc1000,\n \"1000HV\": EeePc1000HE,\n \"1000HD\": EeePc1000HD,\n \"1000HE\": EeePc1000HE,\n \"1002HA\": EeePc1002HA,\n \"1008HA\": EeePc1002HA,\n \"1005HA\": EeePc1002HA,\n \"702\": EeePc700,\n \"AUTODETECT\": EeePcAutodetect,\n}\n","sub_path":"EeeControl/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"321224876","text":"# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Kernel managers that operate against a remote process.\"\"\"\n\nimport asyncio\nimport errno\nimport getpass\nimport os\nimport sys\nimport time\nimport re\nimport json\n\nimport pexpect\nimport random\n\nfrom abc import abstractmethod\nfrom enum import Enum\nfrom jupyter_client import KernelProvisionerBase, localinterfaces, launch_kernel, KernelConnectionInfo\n\nfrom socket import socket, timeout,\\\n AF_INET, SOCK_STREAM, SHUT_WR\n\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom zmq.ssh import tunnel\n\nfrom .config_mixin import RemoteProvisionerConfigMixin\nfrom .response_manager import ResponseManager\n\n# Pop certain env variables that don't need to be logged, e.g. remote_pwd\nenv_pop_list = ['RP_REMOTE_PWD', 'LS_COLORS']\ndefault_kernel_launch_timeout = float(os.getenv('KERNEL_LAUNCH_TIMEOUT', '30'))\n\nmax_poll_attempts = int(os.getenv('RP_MAX_POLL_ATTEMPTS', '10'))\npoll_interval = float(os.getenv('RP_POLL_INTERVAL', '0.5'))\nsocket_timeout = float(os.getenv('RP_SOCKET_TIMEOUT', '0.005'))\n\n# Minimum port range size and max retries\nmin_port_range_size = int(os.getenv('RP_MIN_PORT_RANGE_SIZE', '1000'))\nmax_port_range_retries = int(os.getenv('RP_MAX_PORT_RANGE_RETRIES', '5'))\n\n# Number of seconds in 100 years as the max keep-alive interval value.\nmax_keep_alive_interval_default = 100 * 365 * 24 * 60 * 60\nmax_keep_alive_interval = int(os.getenv(\"RP_TUNNEL_MAX_KEEP_ALIVE\", max_keep_alive_interval_default))\n\nssh_port = int(os.getenv('RP_SSH_PORT', '22'))\ntunneling_enabled = bool(os.getenv('EG_ENABLE_TUNNELING', 'False').lower() == 'true')\n\nlocal_ip = localinterfaces.public_ips()[0]\n\nrandom.seed()\n\n\nclass KernelChannel(Enum):\n \"\"\"Enumeration used to better manage tunneling \"\"\"\n SHELL = \"SHELL\"\n IOPUB = \"IOPUB\"\n STDIN = \"STDIN\"\n HEARTBEAT = \"HB\"\n CONTROL = \"CONTROL\"\n COMMUNICATION = \"EG_COMM\" # Optional channel for remote launcher to issue interrupts - NOT a ZMQ channel\n\n\nclass RemoteProvisionerBase(RemoteProvisionerConfigMixin, KernelProvisionerBase):\n \"\"\"Base class for remote provisioners.\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.start_time = None\n self.assigned_ip = None\n self.assigned_host = ''\n self.comm_ip = None\n self.comm_port = 0\n self.kernel_username = None\n self.tunneled_connect_info = None\n self.tunnel_processes = {}\n\n # Represents the local process (from popen) if applicable. This will likely be non-None\n # for a short while until the script has launched the remote process, then will typically\n # go away.\n self.local_proc = None\n self.ip = None\n self.pid = 0\n self.pgid = 0\n\n self.response_manager = ResponseManager.instance() # This will create the key pair and socket on first use\n self.response_address = self.response_manager.response_address\n self.public_key = self.response_manager.public_key\n self.lower_port, self.upper_port = self._validate_port_range()\n\n async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]:\n \"\"\"Perform any steps in preparation for kernel process launch.\n\n This includes applying additional substitutions to the kernel launch command and env.\n It also includes preparation of launch parameters.\n\n Returns potentially updated kwargs.\n \"\"\"\n self.start_time = None\n self.assigned_ip = None\n self.assigned_host = ''\n self.comm_ip = None\n self.comm_port = 0\n self.tunneled_connect_info = None\n self.tunnel_processes = {}\n self.local_proc = None\n self.ip = None\n self.pid = 0\n self.pgid = 0\n self.response_manager.register_event(self.kernel_id)\n\n cmd = self.kernel_spec.argv # Build launch command, provide substitutions\n if self.response_address or self.port_range or self.kernel_id or self.public_key:\n ns = kwargs.copy()\n if self.response_address:\n ns['response_address'] = self.response_address\n if self.public_key:\n ns['public_key'] = self.public_key\n if self.port_range:\n ns['port_range'] = self.port_range\n if self.kernel_id:\n ns['kernel_id'] = self.kernel_id\n\n pat = re.compile(r'{([A-Za-z0-9_]+)}')\n\n def from_ns(match):\n \"\"\"Get the key out of ns if it's there, otherwise no change.\"\"\"\n return ns.get(match.group(1), match.group())\n\n cmd = [pat.sub(from_ns, arg) for arg in cmd]\n\n kwargs = await super().pre_launch(cmd=cmd, **kwargs)\n\n env = kwargs.get('env', {})\n self.kernel_username = env.get('KERNEL_USERNAME', getpass.getuser()) # Let env override\n env['KERNEL_USERNAME'] = self.kernel_username # reset in env in case its not there\n\n self._enforce_authorization(**kwargs)\n\n self.log.debug(f\"RemoteProvisionerBase.pre_launch() env: {env}\")\n return kwargs\n\n async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnectionInfo:\n \"\"\"Launch the kernel process returning the class instance and connection info.\"\"\"\n\n launch_kwargs = RemoteProvisionerBase._scrub_kwargs(kwargs)\n self.local_proc = launch_kernel(cmd, **launch_kwargs)\n self.pid = self.local_proc.pid\n self.ip = local_ip\n\n self.log_kernel_launch(cmd)\n\n await self.confirm_remote_startup()\n return self.connection_info\n\n @property\n @abstractmethod\n def has_process(self) -> bool:\n pass\n\n @abstractmethod\n async def poll(self) -> Optional[int]:\n pass\n\n async def wait(self) -> Optional[int]:\n \"\"\"Waits for kernel process to terminate.\"\"\"\n # If we have a local_proc, call its wait method. This will cleanup any defunct processes when the kernel\n # is shutdown (when using waitAppCompletion = false). Otherwise (if no local_proc) we'll use polling to\n # determine if a (remote or revived) process is still active.\n if self.local_proc:\n return self.local_proc.wait()\n\n poll_val = 0\n for i in range(max_poll_attempts):\n poll_val = await self.poll()\n if poll_val is None:\n await asyncio.sleep(poll_interval)\n else:\n break\n else:\n self.log.warning(\"Wait timeout of {} seconds exhausted. Continuing...\".\n format(max_poll_attempts * poll_interval))\n return poll_val\n\n async def send_signal(self, signum: int) -> None:\n \"\"\"\n Sends `signum` via the communication port.\n The kernel launcher listening on its communication port will receive the signum and perform\n the necessary signal operation local to the process.\n \"\"\"\n signal_delivered = await self._send_signal_via_listener(signum)\n if not signal_delivered:\n # Fallback\n # if we have a local process, use its method, else determine if the ip is local or remote and issue\n # the appropriate version to signal the process.\n if self.local_proc:\n if self.pgid > 0 and hasattr(os, \"killpg\"):\n try:\n os.killpg(self.pgid, signum)\n return\n except OSError:\n pass\n self.local_proc.send_signal(signum)\n # else:\n # if self.ip and self.pid > 0:\n # if ip_is_local(self.ip):\n # self.local_signal(signum)\n # else:\n # self.remote_signal(signum)\n return\n\n async def _send_signal_via_listener(self, signum: int) -> bool:\n \"\"\"Sends signal 'signum' to kernel process via listener.\n\n :returns: True if request delivered, false otherwise.\n \"\"\"\n # If the launcher returned a comm_port value, then use that to send the signal,\n # else, defer to the superclass - which will use a remote shell to issue kill.\n # Note that if the target process is running as a different user than the REMOTE_USER,\n # using anything other than the socket-based signal (via signal_addr) will not work.\n if self.comm_port > 0:\n signal_request = dict()\n signal_request['signum'] = signum\n\n try:\n await self._send_listener_request(signal_request)\n if signum > 0: # Polling (signum == 0) is too frequent\n self.log.debug(\"Signal ({}) sent via gateway communication port.\".format(signum))\n return True\n except Exception as e:\n if isinstance(e, OSError) and e.errno == errno.ECONNREFUSED: # Return since there's no process.\n return True\n\n self.log.warning(f\"An unexpected exception occurred sending signal ({signum}) \"\n f\"via listener for KernelID '{self.kernel_id}': {e}\")\n return False\n\n @abstractmethod\n async def kill(self, restart: bool = False) -> None:\n \"\"\"Kills the kernel process. This is typically accomplished via a SIGKILL signal, which\n cannot be caught.\n\n restart is True if this operation precedes a start launch_kernel request.\n \"\"\"\n pass\n\n @abstractmethod\n async def terminate(self, restart=False) -> None:\n \"\"\"Terminates the kernel process. This is typically accomplished via a SIGTERM signal, which\n can be caught, allowing the kernel provisioner to perform possible cleanup of resources.\n\n restart is True if this operation precedes a start launch_kernel request.\n \"\"\"\n pass\n\n async def cleanup(self, restart=False) -> None:\n \"\"\"Cleanup any resources allocated on behalf of the kernel provisioner.\n\n restart is True if this operation precedes a start launch_kernel request.\n \"\"\"\n self.assigned_ip = None\n\n for kernel_channel, process in self.tunnel_processes.items():\n self.log.debug(f\"cleanup: terminating {kernel_channel} tunnel process.\")\n process.terminate()\n\n self.tunnel_processes.clear()\n\n async def shutdown_requested(self, restart=False) -> None:\n \"\"\"Called after KernelManager sends a `shutdown_request` message to kernel.\n\n This method is optional and is primarily used in scenarios where the provisioner communicates\n with a sibling (nanny) process to the kernel.\n \"\"\"\n await self.shutdown_listener()\n\n @staticmethod\n def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Remove any keyword arguments that Popen does not tolerate.\"\"\"\n keywords_to_scrub: List[str] = ['extra_arguments', 'kernel_id']\n scrubbed_kwargs = kwargs.copy()\n for kw in keywords_to_scrub:\n scrubbed_kwargs.pop(kw, None)\n\n return scrubbed_kwargs\n\n @abstractmethod\n def log_kernel_launch(self, cmd: List[str]) -> None:\n \"\"\"Logs the kernel launch from the respective remote provisioner\"\"\"\n pass\n\n async def post_launch(self, **kwargs: Any) -> None:\n \"\"\"Perform any steps following the kernel process launch.\"\"\"\n pass\n\n # Done\n async def handle_launch_timeout(self):\n \"\"\"\n Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.\n \"\"\"\n await asyncio.sleep(poll_interval)\n time_interval = RemoteProvisionerBase.get_time_diff(self.start_time)\n\n if time_interval > self.launch_timeout:\n reason = f\"Waited too long ({self.launch_timeout}s) to get connection file\"\n timeout_message = f\"KernelID: '{self.kernel_id}' launch timeout due to: {reason}\"\n await self.kill()\n self.log_and_raise(TimeoutError(timeout_message))\n\n @abstractmethod\n async def confirm_remote_startup(self):\n \"\"\"Confirms the remote process has started and returned necessary connection information.\"\"\"\n pass\n\n async def get_provisioner_info(self) -> Dict:\n \"\"\"Captures the base information necessary for kernel persistence relative to the provisioner.\n\n The superclass method must always be called first to ensure proper ordering. Since this is the\n most base class, no call to `super()` is necessary.\n \"\"\"\n provisioner_info = {}\n return provisioner_info\n\n async def load_provisioner_info(self, provisioner_info: Dict) -> None:\n \"\"\"Loads the base information necessary for kernel persistence relative to the provisioner.\n\n The superclass method must always be called first to ensure proper ordering. Since this is the\n most base class, no call to `super()` is necessary.\n \"\"\"\n pass\n\n def get_shutdown_wait_time(self, recommended: Optional[float] = 5.0) -> float:\n \"\"\"Returns the time allowed for a complete shutdown. This may vary by provisioner.\n\n The recommended value will typically be what is configured in the kernel manager.\n \"\"\"\n return recommended\n\n def _finalize_env(self, env: Dict[str, str]) -> None:\n \"\"\" Ensures env is appropriate prior to launch. \"\"\"\n\n # add the applicable kernel_id and language to the env dict\n env['KERNEL_ID'] = self.kernel_id\n\n kernel_language = 'unknown-kernel-language'\n if len(self.kernel_spec.language) > 0:\n kernel_language = self.kernel_spec.language.lower()\n # if already set in env: stanza, let that override.\n env['KERNEL_LANGUAGE'] = env.get('KERNEL_LANGUAGE', kernel_language)\n\n # Remove any potential sensitive (e.g., passwords) or annoying values (e.g., LG_COLORS)\n for k in env_pop_list:\n env.pop(k, None)\n\n def detect_launch_failure(self):\n \"\"\"\n Helper method called from implementations of `confirm_remote_startup()` that checks if\n self.local_proc (a popen instance) has terminated prior to the confirmation of startup.\n This prevents users from having to wait for the kernel timeout duration to know if the\n launch fails. It also helps distinguish local invocation issues from remote post-launch\n issues since the failure will be relatively immediate.\n\n Note that this method only applies to those process proxy implementations that launch\n from the local node. Proxies like DistributedProcessProxy use rsh against a remote\n node, so there's not `local_proc` in play to interrogate.\n \"\"\"\n\n # Check if the local proc has faulted (poll() will return non-None with a non-zero return\n # code in such cases). If a fault was encountered, raise server error (500) with a message\n # indicating to check the EG log for more information.\n if self.local_proc:\n poll_result = self.local_proc.poll()\n if poll_result and poll_result > 0:\n self.local_proc.wait() # FIXME\n error_message = f\"Error occurred during launch of KernelID: {self.kernel_id}. \" \\\n \"Check Enterprise Gateway log for more information.\"\n self.local_proc = None\n self.log_and_raise(RuntimeError(error_message))\n\n # Done\n def _enforce_authorization(self, **kwargs):\n \"\"\"Applies any authorization configuration using the kernel user.\n\n Regardless of impersonation enablement, this method first adds the appropriate value for\n EG_IMPERSONATION_ENABLED into environment (for use by kernelspecs), then ensures that KERNEL_USERNAME\n has a value and is present in the environment (again, for use by kernelspecs). If unset, KERNEL_USERNAME\n will be defaulted to the current user.\n\n Authorization is performed by comparing the value of KERNEL_USERNAME with each value in the set of\n unauthorized users. If any (case-sensitive) matches are found, HTTP error 403 (Forbidden) will be raised\n - preventing the launch of the kernel. If the authorized_users set is non-empty, it is then checked to\n ensure the value of KERNEL_USERNAME is present in that list. If not found, HTTP error 403 will be raised.\n\n It is assumed that the kernelspec logic will take the appropriate steps to impersonate the user identified\n by KERNEL_USERNAME when impersonation_enabled is True.\n \"\"\"\n # Get the env\n env_dict = kwargs.get('env')\n\n # Although it may already be set in the env, just override in case it was only set via command line or config\n # Convert to string since execve() (called by Popen in base classes) wants string values.\n env_dict['EG_IMPERSONATION_ENABLED'] = str(self.impersonation_enabled) # TODO - Leave EG_ for kernelspec?\n\n # Now perform authorization checks\n if self.kernel_username in self.unauthorized_users:\n self._raise_authorization_error(\"not authorized\")\n\n # If authorized users are non-empty, ensure user is in that set.\n if self.authorized_users.__len__() > 0:\n if self.kernel_username not in self.authorized_users:\n self._raise_authorization_error(\"not in the set of users authorized\")\n\n # Done\n def _raise_authorization_error(self, differentiator_clause):\n \"\"\"Raises a 403 status code after building the appropriate message.\"\"\"\n kernel_name = self.kernel_spec.display_name\n kernel_clause = f\" '{kernel_name}'.\" if kernel_name is not None else \"s.\"\n error_message = f\"User '{self.kernel_username}' is {differentiator_clause} to start kernel{kernel_clause} \" \\\n \"Ensure KERNEL_USERNAME is set to an appropriate value and retry the request.\"\n self.log_and_raise(PermissionError(error_message))\n\n # Done\n def _validate_port_range(self) -> Tuple[int, int]:\n \"\"\"Validates the port range configuration option to ensure appropriate values.\"\"\"\n\n lower_port = upper_port = 0\n port_range = self.port_range\n try:\n port_ranges = port_range.split(\"..\")\n\n lower_port = int(port_ranges[0])\n upper_port = int(port_ranges[1])\n\n port_range_size = upper_port - lower_port\n if port_range_size != 0:\n if port_range_size < min_port_range_size:\n self.log_and_raise(ValueError(f\"Port range validation failed for range: '{port_range}'. \"\n f\"Range size must be at least {min_port_range_size} as specified by \"\n \"env EG_MIN_PORT_RANGE_SIZE\"))\n\n # According to RFC 793, port is a 16-bit unsigned int. Which means the port\n # numbers must be in the range (0, 65535). However, within that range,\n # ports 0 - 1023 are called \"well-known ports\" and are typically reserved for\n # specific purposes. For example, 0 is reserved for random port assignment,\n # 80 is used for HTTP, 443 for TLS/SSL, 25 for SMTP, etc. But, there is\n # flexibility as one can choose any port with the aforementioned protocols.\n # Ports 1024 - 49151 are called \"user or registered ports\" that are bound to\n # services running on the server listening to client connections. And, ports\n # 49152 - 65535 are called \"dynamic or ephemeral ports\". A TCP connection\n # has two endpoints. Each endpoint consists of an IP address and a port number.\n # And, each connection is made up of a 4-tuple consisting of -- client-IP,\n # client-port, server-IP, and server-port. A service runs on a server with a\n # specific IP and is bound to a specific \"user or registered port\" that is\n # advertised for clients to connect. So, when a client connects to a service\n # running on a server, three out of 4-tuple - client-IP, client-port, server-IP -\n # are already known. To be able to serve multiple clients concurrently, the\n # server's IP stack assigns an ephemeral port for the connection to complete\n # the 4-tuple.\n #\n # In case of JEG, we will accept ports in the range 1024 - 65535 as these days\n # admins use dedicated hosts for individual services.\n def validate_port(port: int) -> None:\n if port < 1024 or port > 65535:\n self.log_and_raise(ValueError(f\"Invalid port range '{port_range}' specified. \"\n \"Range for valid port numbers is (1024, 65535).\"))\n validate_port(lower_port)\n validate_port(upper_port)\n except IndexError as ie:\n self.log_and_raise(RuntimeError(f\"Port range validation failed for range: '{port_range}'.\"), chained=ie)\n\n return lower_port, upper_port\n\n # Done\n def log_and_raise(self, ex: Exception, chained: Optional[Exception] = None) -> None:\n \"\"\"Helper method that logs the stringized exception 'ex' and raises that exception.\n\n If a chained exception is provided that exception will be in the raised exceptions's from clause.\n\n Parameters\n ----------\n ex : Exception\n The exception to log and raise\n chained : Exception (optional)\n The exception to use in the 'from' clause.\n \"\"\"\n\n self.log.error(str(ex))\n if chained:\n raise ex from chained\n else:\n raise ex\n\n async def shutdown_listener(self):\n \"\"\"\n Sends a shutdown request to the kernel launcher listener.\n \"\"\"\n # If a comm port has been established, instruct the listener to shutdown so that proper\n # kernel termination can occur. If not done, the listener keeps the launcher process\n # active, even after the kernel has terminated, leading to less than graceful terminations.\n\n if self.comm_port > 0:\n shutdown_request = dict()\n shutdown_request['shutdown'] = 1\n\n try:\n await self._send_listener_request(shutdown_request, shutdown_socket=True)\n self.log.debug(\"Shutdown request sent to listener via gateway communication port.\")\n except Exception as e:\n if not isinstance(e, OSError) or e.errno != errno.ECONNREFUSED:\n self.log.warning(\"An unexpected exception occurred sending listener shutdown to {}:{} for \"\n \"KernelID '{}': {}\"\n .format(self.comm_ip, self.comm_port, self.kernel_id, str(e)))\n\n # Also terminate the tunnel process for the communication port - if in play. Failure to terminate\n # this process results in the kernel (launcher) appearing to remain alive following the shutdown\n # request, which triggers the \"forced kill\" termination logic.\n\n comm_port_name = KernelChannel.COMMUNICATION.value\n comm_port_tunnel = self.tunnel_processes.get(comm_port_name, None)\n if comm_port_tunnel:\n self.log.debug(\"shutdown_listener: terminating {} tunnel process.\".format(comm_port_name))\n comm_port_tunnel.terminate()\n del self.tunnel_processes[comm_port_name]\n\n async def receive_connection_info(self) -> bool:\n \"\"\"\n Monitors the response address for connection info sent by the remote kernel launcher.\n \"\"\"\n # Polls the socket using accept. When data is found, returns ready indicator and encrypted data.\n ready_to_connect = False\n try:\n connect_info = await self.response_manager.get_connection_info(self.kernel_id)\n self._setup_connection_info(connect_info)\n ready_to_connect = True\n except Exception as e:\n if type(e) is timeout or type(e) is TimeoutError or type(e) is asyncio.exceptions.TimeoutError:\n self.log.debug(f\"Waiting for KernelID '{self.kernel_id}' to send connection \"\n f\"info from host '{self.assigned_host}' - retrying...\")\n else:\n error_message = f\"Exception occurred waiting for connection file response for \" \\\n f\"KernelId '{self.kernel_id}' on host '{self.assigned_host}': {e}\"\n await self.kill()\n self.log_and_raise(RuntimeError(error_message), chained=e)\n\n return ready_to_connect\n\n def _setup_connection_info(self, connect_info: dict) -> None:\n \"\"\"\n Take connection info (returned from launcher or loaded from session persistence) and properly\n configure port variables for the 5 kernel and (possibly) the launcher communication port. If\n tunneling is enabled, these ports will be tunneled with the original port information recorded.\n \"\"\"\n\n self.log.debug(f\"Host assigned to the kernel is: '{self.assigned_host}' '{self.assigned_ip}'\")\n\n connect_info['ip'] = self.assigned_ip # Set connection to IP address of system where the kernel was launched\n\n if tunneling_enabled is True:\n # Capture the current(tunneled) connect_info relative to the IP and ports (including the\n # communication port - if present).\n self.tunneled_connect_info = dict(connect_info)\n\n # Open tunnels to the 5 ZMQ kernel ports\n tunnel_ports = self._tunnel_to_kernel(connect_info, self.assigned_ip)\n self.log.debug(f\"Local ports used to create SSH tunnels: '{tunnel_ports}'\")\n\n # Replace the remote connection ports with the local ports used to create SSH tunnels.\n connect_info['ip'] = '127.0.0.1'\n connect_info['shell_port'] = tunnel_ports[0]\n connect_info['iopub_port'] = tunnel_ports[1]\n connect_info['stdin_port'] = tunnel_ports[2]\n connect_info['hb_port'] = tunnel_ports[3]\n connect_info['control_port'] = tunnel_ports[4]\n\n # If a communication port was provided, tunnel it\n if 'comm_port' in connect_info:\n self.comm_ip = connect_info['ip']\n tunneled_comm_port = int(connect_info['comm_port'])\n self.comm_port = self._tunnel_to_port(KernelChannel.COMMUNICATION, self.assigned_ip,\n tunneled_comm_port, self.assigned_ip)\n connect_info['comm_port'] = self.comm_port\n self.log.debug(f\"Established communication to: {self.assigned_ip}:{tunneled_comm_port} \"\n f\"for KernelID '{self.kernel_id}' via tunneled port 127.0.0.1:{self.comm_port}\")\n\n else: # tunneling not enabled, still check for and record communication port\n if 'comm_port' in connect_info:\n self.comm_ip = connect_info['ip']\n self.comm_port = int(connect_info['comm_port'])\n self.log.debug(f\"Established communication to: {self.assigned_ip}:{self.comm_port} \"\n f\"for KernelID '{self.kernel_id}'\")\n\n # If no communication port was provided, record that fact as well since this is useful to know\n if 'comm_port' not in connect_info:\n self.log.debug(f\"Communication port has NOT been established for KernelID '{self.kernel_id}' (optional).\")\n\n self._update_connection(connect_info)\n\n def _update_connection(self, connect_info: dict) -> None:\n \"\"\"\n Updates the connection info with that received from launcher. Also pulls the PID and PGID\n info, if present, in case we need to use it for lifecycle management.\n Note: Do NOT update connect_info with IP and other such artifacts in this method/function.\n \"\"\"\n\n if not connect_info:\n error_message = f\"Unexpected runtime encountered for Kernel ID '{self.kernel_id}' - \" \\\n f\"connection information is null!\"\n self.log_and_raise(RuntimeError(error_message))\n\n # Load new connection information into memory. No need to write back out to a file or track loopback, etc.\n # The launcher may also be sending back process info, so check and extract\n self._extract_pid_info(connect_info)\n self.log.debug(f\"Received connection info for KernelID '{self.kernel_id}' \"\n f\"from host '{self.assigned_host}': {connect_info}...\")\n\n self.connection_info.update(connect_info)\n\n def _extract_pid_info(self, connect_info: dict) -> None:\n \"\"\"\n Extracts any PID, PGID info from the payload received on the response socket.\n \"\"\"\n pid = connect_info.pop('pid', None)\n if pid:\n try:\n self.pid = int(pid)\n except ValueError:\n self.log.warning(f\"pid returned from kernel launcher is not an integer: {pid} - ignoring.\")\n pid = None\n pgid = connect_info.pop('pgid', None)\n if pgid:\n try:\n self.pgid = int(pgid)\n except ValueError:\n self.log.warning(f\"pgid returned from kernel launcher is not an integer: {pgid} - ignoring.\")\n pgid = None\n if pid or pgid: # if either process ids were updated, update the ip as well and don't use local_proc\n self.ip = self.assigned_ip\n if not RemoteProvisionerBase.ip_is_local(self.ip): # only unset local_proc if we're remote\n # FIXME - should we wait prior to unset?\n self.local_proc = None\n\n # TODO - convert to async\n async def _send_listener_request(self, request: dict, shutdown_socket: Optional[bool] = False) -> None:\n \"\"\"\n Sends the request dictionary to the kernel listener via the comm port. Caller is responsible for\n handling any exceptions.\n \"\"\"\n if self.comm_port > 0:\n sock = socket(AF_INET, SOCK_STREAM)\n try:\n sock.settimeout(socket_timeout)\n await asyncio.get_event_loop().sock_connect(sock, (self.comm_ip, self.comm_port)) # TODO - validate\n # sock.connect((self.comm_ip, self.comm_port))\n sock.send(json.dumps(request).encode(encoding='utf-8'))\n finally:\n if shutdown_socket:\n try:\n sock.shutdown(SHUT_WR)\n except Exception as e2:\n if isinstance(e2, OSError) and e2.errno == errno.ENOTCONN:\n pass # Listener is not connected. This is probably a follow-on to ECONNREFUSED on connect\n else:\n self.log.warning(\"Exception occurred attempting to shutdown communication socket to {}:{} \"\n \"for KernelID '{}' (ignored): {}\".format(self.comm_ip, self.comm_port,\n self.kernel_id, str(e2)))\n sock.close()\n\n @staticmethod\n def get_current_time() -> int:\n \"\"\" Return the current time (in milliseconds) from epoch.\n\n This method is intended for use in determining timeout values.\n \"\"\"\n float_time = time.time()\n return int(float_time * 1000) # Convert to ms and int\n\n @staticmethod\n def get_time_diff(start_time_ms: int) -> float:\n \"\"\" Return the difference (in seconds) between the given start_time and the current time \"\"\"\n end_time_ms = RemoteProvisionerBase.get_current_time()\n time_diff = float((end_time_ms - start_time_ms)/1000)\n return time_diff\n\n @staticmethod\n def ip_is_local(ip):\n \"\"\"Returns True if `ip` is considered local to this server, False otherwise. \"\"\"\n return localinterfaces.is_public_ip(ip) or localinterfaces.is_local_ip(ip)\n\n def _tunnel_to_kernel(self, connection_info: dict, server: str, port: int = ssh_port, key: Optional[str] = None):\n \"\"\"\n Tunnel connections to a kernel over SSH\n\n This will open five SSH tunnels from localhost on this machine to the\n ports associated with the kernel.\n See jupyter_client/connect.py for original implementation.\n \"\"\"\n cf = connection_info\n\n lports = self.select_ports(5)\n\n rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'], cf['control_port']\n\n channels = KernelChannel.SHELL, KernelChannel.IOPUB, KernelChannel.STDIN, \\\n KernelChannel.HEARTBEAT, KernelChannel.CONTROL\n\n remote_ip = cf['ip']\n\n if not tunnel.try_passwordless_ssh(server + \":\" + str(port), key):\n self.log_and_raise(PermissionError(\"Must use password-less scheme by setting up the \"\n \"SSH public key on the cluster nodes\"))\n\n for lp, rp, kc in zip(lports, rports, channels):\n self._create_ssh_tunnel(kc, lp, rp, remote_ip, server, port, key)\n\n return tuple(lports)\n\n def _tunnel_to_port(self, kernel_channel: KernelChannel, remote_ip: str, remote_port: int, server: str,\n port: int = ssh_port, key: Optional[str] = None):\n \"\"\"\n Analogous to _tunnel_to_kernel, but deals with a single port. This will typically be called for\n any one-off ports that require tunnelling. Note - this method assumes that passwordless ssh is\n in use and has been previously validated.\n \"\"\"\n local_port = self.select_ports(1)[0]\n self._create_ssh_tunnel(kernel_channel, local_port, remote_port, remote_ip, server, port, key)\n return local_port\n\n def _create_ssh_tunnel(self, kernel_channel: KernelChannel, local_port: int, remote_port: int, remote_ip: str,\n server: str, port: int, key: Optional[str] = None):\n \"\"\"\n Creates an SSH tunnel between the local and remote port/server for the given kernel channel.\n \"\"\"\n channel_name = kernel_channel.value\n self.log.debug(f\"Creating SSH tunnel for '{channel_name}': 127.0.0.1:'{local_port}' \"\n f\"to '{remote_ip}':'{remote_port}'\")\n try:\n process = RemoteProvisionerBase._spawn_ssh_tunnel(local_port, remote_port, remote_ip, server, port, key)\n self.tunnel_processes[channel_name] = process\n except Exception as e:\n self.log_and_raise(RuntimeError(f\"Could not open SSH tunnel for port {channel_name}. Exception: '{e}'\"),\n chained=e)\n\n @staticmethod\n def _spawn_ssh_tunnel(local_port: int, remote_port: int, remote_ip: str,\n server: str, port: int, key: Optional[str] = None):\n \"\"\"\n This method spawns a child process to create an SSH tunnel and returns the spawned process.\n ZMQ's implementation returns a pid on UNIX based platforms and a process handle/reference on\n Win32. By consistently returning a process handle/reference on both UNIX and Win32 platforms,\n this method enables the caller to deal with the same currency regardless of the platform. For\n example, on both UNIX and Win32 platforms, the developer will have the option to stash the\n child process reference and manage it's lifecycle consistently.\n\n On UNIX based platforms, ZMQ's implementation is more generic to be able to handle various\n use-cases. ZMQ's implementation also requests the spawned process to go to background using\n '-f' command-line option. As a result, the spawned process becomes an orphan and any references\n to the process obtained using it's pid become stale. On the other hand, this implementation is\n specifically for password-less SSH login WITHOUT the '-f' command-line option thereby allowing\n the spawned process to be owned by the parent process. This allows the parent process to control\n the lifecycle of it's child processes and do appropriate cleanup during termination.\n \"\"\"\n if sys.platform == 'win32':\n ssh_server = server + \":\" + str(port)\n return tunnel.paramiko_tunnel(local_port, remote_port, ssh_server, remote_ip, key)\n else:\n ssh = \"ssh -p %s -o ServerAliveInterval=%i\" % (port, max_keep_alive_interval)\n cmd = \"%s -S none -L 127.0.0.1:%i:%s:%i %s\" % (ssh, local_port, remote_ip, remote_port, server)\n return pexpect.spawn(cmd, env=os.environ.copy().pop('SSH_ASKPASS', None))\n\n def select_ports(self, count: int) -> List[int]:\n \"\"\"\n Selects and returns n random ports that adhere to the configured port range, if applicable.\n\n Parameters\n ----------\n count : int\n The number of ports to return\n\n Returns\n -------\n List - ports available and adhering to the configured port range\n \"\"\"\n ports: List[int] = []\n sockets: List[socket] = []\n for i in range(count):\n sock = self.select_socket()\n ports.append(sock.getsockname()[1])\n sockets.append(sock)\n for sock in sockets:\n sock.close()\n return ports\n\n def select_socket(self, ip: str = '') -> socket:\n \"\"\"\n Creates and returns a socket whose port adheres to the configured port range, if applicable.\n\n Parameters\n ----------\n ip : str\n Optional ip address to which the port is bound\n\n Returns\n -------\n socket - Bound socket that is available and adheres to configured port range\n \"\"\"\n sock = socket(AF_INET, SOCK_STREAM)\n found_port = False\n retries = 0\n while not found_port:\n try:\n sock.bind((ip, self._get_candidate_port()))\n found_port = True\n except Exception:\n retries = retries + 1\n if retries > max_port_range_retries:\n self.log_and_raise(RuntimeError(f\"Failed to locate port within range {self.port_range} \"\n f\"after {max_port_range_retries} retries!\"))\n return sock\n\n def _get_candidate_port(self):\n \"\"\"Randomly selects a port number within the configured range.\n\n If no range is configured, the 0 port is used - allowing the server to choose from the full range.\n \"\"\"\n range_size = self.upper_port - self.lower_port\n if range_size == 0:\n return 0\n return random.randint(self.lower_port, self.upper_port)\n\n # FIXME - Use jupyter_client/utils once its available.\n @staticmethod\n def run_sync(coro):\n def wrapped(*args, **kwargs):\n import nest_asyncio\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n nest_asyncio.apply(loop)\n return loop.run_until_complete(coro(*args, **kwargs))\n wrapped.__doc__ = coro.__doc__\n return wrapped\n","sub_path":"remote_provisioners/remote_provisioner.py","file_name":"remote_provisioner.py","file_ext":"py","file_size_in_byte":39417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25377607","text":"class Nodo:\n def __init__(self,dato=None):\n self.dato = dato\n self.siguiente = None\n self.anterior = None\n\nclass ListaCircularDoble:\n\n def __init__(self):\n self.primero = None\n self.ultimo = None\n\n def vacia(self):\n if self.primero == None:\n return True\n else:\n return False\n \n def agregar_inicio(self,dato):\n if self.vacia():\n self.primero = self.ultimo = Nodo(dato)\n else:\n aux = Nodo(dato)\n aux.siguiente = self.primero\n self.primero.anterior = aux\n self.primero = aux\n self.unir_nodos()\n \n def agregar_final(self,dato):\n if self.vacia():\n self.primero = self.ultimo = Nodo(dato)\n else:\n aux = self.ultimo\n self.ultimo = aux.siguiente = Nodo(dato)\n self.ultimo.anterior = aux\n self.unir_nodos()\n\n def EliminarPrimero(self):\n if self.vacia():\n print(\"Tu estructura esta vacia\")\n elif self.primero == self.ultimo:\n self.primero = self.ultimo = None\n else:\n self.primero = self.primero.siguiente\n self.unir_nodos()\n\n def EliminarUltimo(self):\n if self.vacia():\n print(\"Tu estructura esta vacia\")\n elif self.primero == self.ultimo:\n self.primero = self.ultimo = None\n else:\n self.ultimo = self.ultimo.anterior\n self.unir_nodos()\n \n def unir_nodos(self):\n self.primero.anterior = self.ultimo\n self.ultimo.siguiente = self.primero\n\n def RecorrerInicioFin(self):\n aux = self.primero\n while aux:\n print(aux.dato)\n aux = aux.siguiente\n if aux == self.primero:\n break\n \n def RecorrerFinInicio(self):\n aux = self.ultimo\n while aux:\n print(aux.dato)\n aux = aux.anterior\n if aux == self.ultimo:\n break\n\n def longitud(self):\n nodo = self.primero\n numero = 1\n while nodo != self.ultimo:\n nodo=nodo.siguiente\n numero+=1\n return numero\n\n def getPrimero(self):\n return self.primero\n\n def getUltimo(self):\n return self.ultimo\n","sub_path":"ListaCircularDoble.py","file_name":"ListaCircularDoble.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"222697130","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 30 15:25:01 2020\r\n\r\n@author: Indu\r\n\"\"\"\r\n\r\n\r\nimport turtle\r\nimport math\r\n\r\ndef polyline(t, n, length, angle):\r\n \"\"\"Draws n line segments.\r\n\r\n t: Turtle object\r\n n: number of line segments\r\n length: length of each segment\r\n angle: degrees between segments\r\n \"\"\"\r\n for i in range(n):\r\n t.fd(length)\r\n t.lt(angle)\r\n\r\ndef arc(t, r, angle):\r\n arc_length = 2 * math.pi * r * abs(angle) / 360\r\n n = int(arc_length / 4) + 3\r\n step_length = arc_length / n\r\n step_angle = float(angle) / n\r\n\r\n # making a slight left turn before starting reduces\r\n # the error caused by the linear approximation of the arc\r\n t.lt(step_angle/2)\r\n polyline(t, n, step_length, step_angle)\r\n t.rt(step_angle/2)\r\n\r\n\r\ndef petal(t, r, angle):\r\n \"\"\"Draws a petal using two arcs.\r\n\r\n t: Turtle\r\n r: radius of the arcs\r\n angle: angle (degrees) that subtends the arcs\r\n \"\"\"\r\n for i in range(2):\r\n arc(t, r, angle)\r\n t.lt(180-angle)\r\n\r\n\r\ndef flower(t, n, r, angle):\r\n \"\"\"Draws a flower with n petals.\r\n\r\n t: Turtle\r\n n: number of petals\r\n r: radius of the arcs\r\n angle: angle (degrees) that subtends the arcs\r\n \"\"\"\r\n for i in range(n):\r\n petal(t, r, angle)\r\n t.lt(360.0/n)\r\n\r\n\r\ndef move(t, length):\r\n \"\"\"Move Turtle (t) forward (length) units without leaving a trail.\r\n Leaves the pen down.\r\n \"\"\"\r\n t.pu()\r\n t.fd(length)\r\n t.pd()\r\n\r\n\r\nbob = turtle.Turtle()\r\n\r\n# draw a sequence of three flowers, as shown in the book.\r\nmove(bob, -100)\r\nflower(bob, 7, 60.0, 60.0)\r\n\r\nmove(bob, 100)\r\nflower(bob, 10, 40.0, 80.0)\r\n\r\nmove(bob, 100)\r\nflower(bob, 20, 140.0, 20.0)\r\n\r\nbob.hideturtle()\r\nturtle.mainloop()","sub_path":"Flowers.py","file_name":"Flowers.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"277086322","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport copy\n\nimport six\n\nimport chainer\nfrom chainer.training import extensions\nfrom chainer.dataset import convert\nfrom chainer.dataset import iterator as iterator_module\nfrom chainer import link\nfrom chainer import reporter as reporter_module\nfrom chainer.training import extension\nfrom chainer import variable\nfrom chainer import cuda\nimport numpy as np\nimport cupy\nimport os\nimport csv\nimport chainer.functions as F\nimport ioutil\nimport variableutil as Vutil\nimport math\nfrom tqdm import tqdm\nfrom multiprocessing import Process\n\nclass DeconvAcquirer(extensions.Evaluator):\n lastname = 'validation/main/loss'\n layer_rank = None\n layer_name = None\n operation = 'max'\n top = None\n n_features = None\n mean = None\n switch_1stlayer = False\n guided = True\n ignore_bias = True\n fixed_RMS = 0.020\n rms_axis = 0\n gamma = 2.0\n\n\n '''trigger = 1, 'epoch'\n default_name = 'validation'\n priority = extension.PRIORITY_WRITER\n\n def __init__(self, iterator, target, converter=convert.concat_examples,\n device=None, eval_hook=None, eval_func=None):\n if isinstance(iterator, iterator_module.Iterator):\n iterator = {'main': iterator}\n self._iterators = iterator\n\n if isinstance(target, link.Link):\n target = {'main': target}\n self._targets = target\n\n self.converter = converter\n self.device = device\n self.eval_hook = eval_hook\n self.eval_func = eval_func'''\n\n def deconv(self, variable):\n v = variable\n # 今の層から入力層に辿り着くまで繰り返す\n while (v.creator is not None):\n bottom_blob = v.creator.inputs[0]\n #print(v.creator.label, v.rank)\n # Convolution -> Deconvolution\n if (v.creator.label == 'Convolution2DFunction'):\n bottom_blob.data = Vutil.invert_convolution(v,\n guided=self.guided, ignore_bias=self.ignore_bias,\n rms=self.fixed_RMS, gamma=self.gamma)\n # relu -> relu\n elif (v.creator.label == 'ReLU'):\n bottom_blob.data = Vutil.invert_relu(v)\n # Pooling -> UnPooling\n elif (v.creator.label == 'MaxPooling2D'):\n bottom_blob.data = Vutil.invert_maxpooling(v, guided=self.guided)\n # Fully-connected: transpose\n elif (v.creator.label == 'LinearFunction'):\n bottom_blob.data = Vutil.invert_linear(v,\n guided=self.guided, ignore_bias=self.ignore_bias,\n rms=self.fixed_RMS, gamma=self.gamma)\n # その他(LRN等)\n else:\n bottom_blob.data = v.data\n # 1つ前の層をたどる\n v = v.creator.inputs[0]\n\n def get_deconv(self, variable, indices):\n # 1. 最も活性した場所以外を0にする\n #maxbounds = self.get_max_patch_bounds(loss, rank, indices)\n isfc = Vutil.has_fc_layer(variable)\n # 全結合層の可視化の場合\n if isfc:\n values = Vutil.get_fc_info(variable, indices)\n variable.data.fill(0)\n for i, (j, v) in enumerate(zip(indices, values)):\n variable.data[i, j] = v\n # 畳み込み層やプーリング層などの可視化の場合\n else:\n maxinfo = Vutil.get_max_info(variable, indices)\n variable.data.fill(0)\n for i, (c, info) in enumerate(zip(indices, maxinfo)):\n variable.data[i, c, info[1], info[0]] = info[2]\n\n # 2. 入力層まで逆操作を繰り返す\n data_layer = Vutil.get_data_layer(variable)\n xp = cuda.get_array_module(data_layer.data)\n\n fixed_RMS = 300\n if xp == cupy:\n rms = cupy.sqrt(cupy.sum(data_layer.data ** 2, axis=(1,2,3)) / np.product(data_layer.data.shape[1:]))\n #rms = cupy.sqrt(cupy.sum(convW ** 2, axis=(2, 3)) / np.product(convW.shape[2:]))\n else:\n rms = np.linalg.norm(data_layer.data, axis=(1,2,3)) ** 2 / np.product(data_layer.data.shape[1:])\n #rms = np.linalg.norm(convW, axis=(2, 3)) ** 2 / np.product(convW.shape[2:])\n scale = fixed_RMS / rms\n scale = scale.reshape(-1,1,1,1)\n #print(rms, scale)\n #data_layer.data *= scale\n\n self.deconv(variable)\n\n return data_layer.data\n\n def save_deconv(self, k, f, d, b):\n # deconvされた入力層に平均画像を足して画像化\n img = ioutil.deprocess(d, self.mean)\n # 最大値の位置の計算に必要な入力層の領域だけクロッピングして保存\n img.crop((b[0], b[2], b[1], b[3])).save(\n os.path.join(self.deconv_image_dir,\n \"{0:0>4}_{1:0>2}.png\".format(f, k)))\n\n def __call__(self, trainer):\n \"\"\"override method of extensions.Evaluator.\"\"\"\n # set up a reporter\n reporter = reporter_module.Reporter()\n if hasattr(self, 'name'):\n prefix = self.name + '/'\n else:\n prefix = ''\n for name, target in six.iteritems(self._targets):\n reporter.add_observer(prefix + name, target)\n reporter.add_observers(prefix + name,\n target.namedlinks(skipself=True))\n\n with reporter:\n self.deconv_image_dir = os.path.join(trainer.out, 'deconv_' + self.layer_name)\n if not os.path.exists(self.deconv_image_dir):\n os.makedirs(self.deconv_image_dir)\n result, locs, bounds = self.evaluate()\n if not os.path.exists(trainer.out):\n os.makedirs(trainer.out)\n #print(bounds)\n #ioutil.savetxt(os.path.join(trainer.out, self.layer_name + '.txt'),\n # features, delimiter='\\t')\n #cupy.savez(os.path.join(trainer.out, self.layer_name + '.npz'),\n # **{self.layer_name: features})\n '''self.save_tuple_list(os.path.join(trainer.out,\n 'maxloc_' + self.layer_name + '.txt'), locs)\n self.save_tuple_list(os.path.join(trainer.out,\n 'maxbounds_' + self.layer_name + '.txt'), bounds)'''\n reporter_module.report(result)\n return result\n\n def evaluate(self):\n \"\"\"override method of extensions.Evaluator.\"\"\"\n\n iterator = self._iterators['main']\n target = self._targets['main']\n eval_func = self.eval_func or target\n\n if self.eval_hook:\n self.eval_hook(self)\n it = copy.copy(iterator)\n summary = reporter_module.DictSummary()\n max_locs = []\n bounds = []\n n_processed = 0\n filter_idx = 0\n pbar = tqdm(total=len(iterator.dataset))\n for batch in it:\n observation = {}\n with reporter_module.report_scope(observation):\n in_arrays = self.converter(batch, self.device)\n if isinstance(in_arrays, tuple):\n in_vars = tuple(variable.Variable(x, volatile='off')\n for x in in_arrays)\n eval_func(*in_vars)\n elif isinstance(in_arrays, dict):\n in_vars = {key: variable.Variable(x, volatile='off')\n for key, x in six.iteritems(in_arrays)}\n eval_func(**in_vars)\n else:\n in_var = variable.Variable(in_arrays, volatile='off')\n eval_func(in_var)\n pbar.update(len(batch))\n\n indices = np.arange(filter_idx, filter_idx + len(batch)) % self.n_features\n #print('x', in_vars[0].data[0])\n # deconv対象の層のVariableを取得\n layer_variable = Vutil.get_variable(\n observation[self.lastname], self.layer_rank)\n # 最大値の位置の計算に必要な入力層の領域を取得\n isfc = Vutil.has_fc_layer(layer_variable)\n if isfc:\n batch_bounds = Vutil.get_data_bounds(layer_variable)\n else:\n batch_bounds = Vutil.get_max_bounds(layer_variable, indices)\n # deconvを実行\n deconv_data = self.get_deconv(\n layer_variable, indices)\n\n topk = np.arange(n_processed, n_processed + len(batch)) // self.n_features\n\n jobs = []\n for k, f, d, b in zip(topk, indices, deconv_data, batch_bounds):\n # deconvされた入力層に平均画像を足して画像化\n img = ioutil.deprocess(d.get(), self.mean)\n # 最大値の位置の計算に必要な入力層の領域だけクロッピングして保存\n img.crop((b[0], b[2], b[1], b[3])).save(\n os.path.join(self.deconv_image_dir,\n \"{0:0>4}_{1:0>2}.png\".format(f, k)))\n #p=Process(target=self.save_deconv, args=(k,f,d.get(),b))\n #p.start()\n #jobs.append(p)\n #for j in jobs: j.join()\n\n '''max_locs.extend(self.get_max_locs(\n observation[self.lastname], self.layer_rank, indices))\n bounds.extend(batch_bounds))'''\n filter_idx = (filter_idx + len(batch)) % self.n_features\n n_processed += len(batch)\n\n #self.add_to_confmat(self.confmat, in_vars[1].data, self.getpred(observation[self.lastname]))\n summary.add(observation)\n pbar.close()\n #print(self.confmat)\n #print(np.diag(self.confmat))\n #print(1.0 * np.diag(self.confmat).sum() / self.confmat.sum())\n return summary.compute_mean(), max_locs, bounds\n","sub_path":"utils/deconvacquirer.py","file_name":"deconvacquirer.py","file_ext":"py","file_size_in_byte":9758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368596154","text":"# Answer to exercise 1 in chapter 11 \n#\n# Exercise from: A Common-Sense Guide to Data Structures and Algorithms \n# Level Up Your Core Programming Skills \n# by Jay Wengrow and edited by Brian MacDonald\n\nimport sys\n\ndef count_characters(array: list):\n\t\"\"\"given a list:array of strings return the character count\"\"\" \n\tif len(array) == 1: # base case\n\t\treturn len(array[0])\t\n\telif len(array) == 0: # edge case of empty array\n\t\treturn 0 \n\telse:\n\t\treturn len(array[0]) + count_characters(array[1:])\n\ndef main(array: str):\n\tprint(f'Total characters in the array: {count_characters(array)}')\n\nif __name__ == '__main__':\n\t# example usage:\n # $python count_characters_in_array.py ab,c,def,ghij \n # -> Total characters in the array: 10 \n # argv[1] is a list of characters separated by commas with no spaces\n\tmain(str(sys.argv[1]).split(','))\n","sub_path":"chapter_11/count_characters_in_array.py","file_name":"count_characters_in_array.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443308576","text":"import pandas as pd\n\ndef readXLSX(fname):\n xl = pd.read_excel(fname)\n pyram = xl.loc[xl['cell_type'] == 'P']\n return pyram\n\nwt = pd.DataFrame(columns =\n ['cell_name', 'moving_rate', 'stopped_rate', \n 'info/spike', 'info/sec'])\n\nko = pd.DataFrame(columns =\n ['cell_name', 'moving_rate', 'stopped_rate', \n 'info/spike', 'info/sec'])\n\nWTs = ['10547PFs.xlsx', '10603PFs.xlsx']\nKOs = ['10601PFs.xlsx', '10551PFs.xlsx', '10608PFs.xlsx']\n \nfor dataset in WTs:\n df = readXLSX(dataset)\n wt = wt.append(df)\n\nfor dataset in KOs:\n df = readXLSX(dataset)\n ko = ko.append(df)\n\n##sort by info/spike\nwt_sorted = wt.sort_values('info/spike')\nko_sorted = ko.sort_values('info/spike')\n\nprint(wt_sorted[['cell_name','info/spike', 'nspikes']])\n\nprint(ko_sorted[['cell_name', 'info/spike','nspikes']])\n\n##for each t file in the spreadsheet make a place field map\n\n##add the spike waveform and ISI\n\n##output by rank (e.g.) WT_PF_1.png WT_PF_2.png etc.\n\n##combine into single PDF for WT and KO\n\n\n","sub_path":"ShowBestPlaceFields.py","file_name":"ShowBestPlaceFields.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"44447844","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 23 15:32:10 2017\n\n@author: limchaos\n\"\"\"\n\nimport os\nimport shutil\nimport time\nstart = time.clock()\npath='/home/limchaos/MYCNN/datasets/unfire'\nnewpath='/home/limchaos/MYCNN/datasets/test'\nqueue = []\nqueue.append(path)\ni=1\nwhile len(queue) > 0:\n tmp = queue.pop(0)\n if (os.path.isdir(tmp)):#如果该路径是文件夹\n for item in os.listdir(tmp):#遍历该路径中文件和文件夹\n queue.append(os.path.join(tmp, item))#将所得路径加入队列queue\n elif (os.path.isfile(tmp)):#如果该路径是文件\n name = os.path.basename(tmp) # 获取文件名\n dirname = os.path.dirname(tmp) # 获取文件目录\n full_path = os.path.join(dirname, name) # 将文件名与文件目录连接起来,形成完整路径\n des_path = newpath+'/fire.'+str(i)+'.jpg'\n i=i+1#目标路径,将该文件夹信息添加进最后的文件名中\n shutil.move(full_path, des_path)#移动文件到目标路径(移动+重命名)\nend = time.clock()\nprint(end-start)\n","sub_path":"tool/movetool.py","file_name":"movetool.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"648181132","text":"from __future__ import print_function\nimport datetime\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torch.optim.lr_scheduler import StepLR\nimport torch.optim as optim\nimport numpy as np\nimport torch\nimport SimpleITK as sitk\nfrom tqdm import tqdm\nimport argparse\nimport pickle\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nfrom cxr_resunet import UNet\nfrom cxr_attention_resunet import Attention_UNet, init_weights\nimport losses\nimport dataloader_cxr\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, default = 1)\nparser.add_argument('--LR', type=float, default = 0.001)\nparser.add_argument('--restart_training', type=str, default = 'true')\nparser.add_argument('--datapath', type=str)\nparser.add_argument('--checkpoint_plot_dir', type=str)\nparser.add_argument('--model_path', type=str)\nargs = parser.parse_args()\n\nbatch_size = args.batch_size\nLR = args.LR\ndata_path = args.datapath\nmodel_path = args.model_path\nexperiment_dir = args.checkpoint_plot_dir\n\nload_old_lists = False\n\nmodeltype = 'resunet'\nuse_focal = False\nuse_attention = False\nuse_multiinput_architecture = True\ninit_weights_xavier = False\ninvert = False\nremove_wires=False\n# lr best working is 0.00001 with schedular (10,0.95)\n\nn_classes = 2\nclubbed = []\nclasses = [0,1]\nmodel_depth = 5\nwf = 5\n\nfocal_alpha = 0.5\nfocal_gamma = 1.0\n\ntrain_epoch = 300\nprint_every = 50000000\nsave_every = 1000 # epoch\nvalid_every = 5 # epoch\nimage_resolution = [512,512]\n\ngamma0 = 1\ngamma1 = 1\n\n# lambda_l1 = 0.0\n# lambda_l2 = 0.0\nscheduler_step_size = 10\nscheduler_gamma = 0.95\n\nif classes[0] != 0:\n exclude_0 = True\nelse:\n exclude_0 = False\n\nif not os.path.isdir(experiment_dir):\n os.mkdir(experiment_dir)\n\nmodel_checkpoint_dir = os.path.join(experiment_dir,'checkpoint_dir')\nplot_main_dir = os.path.join(experiment_dir,'Plots')\nplots_dir = os.path.join(experiment_dir,'Plots/fig')\nplots_pickle_dir = os.path.join(experiment_dir,'Plots/pickle') \n\nif args.restart_training == 'true':\n if os.path.isdir(model_checkpoint_dir):\n shutil.rmtree(model_checkpoint_dir, ignore_errors=True)\n if os.path.isdir(plot_main_dir):\n shutil.rmtree(plot_main_dir, ignore_errors=True)\n \n os.mkdir(model_checkpoint_dir)\n os.mkdir(plot_main_dir)\n os.mkdir(plots_dir)\n os.mkdir(plots_pickle_dir)\n\nelif args.restart_training == 'false':\n \n if not os.path.isdir(model_checkpoint_dir):\n os.mkdir(model_checkpoint_dir)\n\n if not os.path.isdir(plot_main_dir):\n os.mkdir(plot_main_dir)\n \n if not os.path.isdir(plots_dir):\n os.mkdir(plots_dir)\n \n if not os.path.isdir(plots_pickle_dir):\n os.mkdir(plots_pickle_dir)\n\n\n# gpu settings\nuse_cuda = torch.cuda.is_available()\nprint('gpu status =', use_cuda)\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nkwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n# using seed so to be deterministic\nnp.random.seed(0)\ntorch.manual_seed(0)\ntorch.cuda.empty_cache()\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.benchmark = True\n\ndef main():\n if args.restart_training == 'true':\n if use_multiinput_architecture is False:\n if modeltype == 'unet':\n model = UNet(n_classes=n_classes, padding=True, depth=model_depth, wf=wf, up_mode='upsample', batch_norm=True, residual=False).double().to(device)\n elif modeltype == 'resunet':\n model = UNet(n_classes=n_classes, padding=True, depth=model_depth, wf=wf, up_mode='upsample', batch_norm=True, residual=True).double().to(device)\n \n elif use_multiinput_architecture is True:\n if modeltype == 'unet':\n model = Attention_UNet(n_classes=n_classes, padding=True, up_mode='upconv', batch_norm=True, residual=False, wf=wf, use_attention=use_attention).double().to(device)\n elif modeltype == 'resunet':\n model = Attention_UNet(n_classes=n_classes, padding=True, up_mode='upconv', batch_norm=True, residual=True, wf=wf, use_attention=use_attention).double().to(device)\n \n if init_weights_xavier is True:\n init_weights(model)\n\n else:\n if use_multiinput_architecture is False:\n if modeltype == 'unet':\n model = UNet(n_classes=n_classes, padding=True, depth=model_depth, wf=wf, up_mode='upsample', batch_norm=True, residual=False).double().to(device)\n elif modeltype == 'resunet':\n model = UNet(n_classes=n_classes, padding=True, depth=model_depth, wf=wf, up_mode='upsample', batch_norm=True, residual=True).double().to(device)\n\n checkpoint = torch.load(args.model_path, map_location=lambda storage, loc: storage)\n pretrained_dict = checkpoint['model_state_dict']\n\n model_dict = model.state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k not in ['last.weight', 'last.bias']}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict) \n # 3. load the new state dict\n model.load_state_dict(model_dict)\n\n elif use_multiinput_architecture is True:\n if modeltype == 'unet':\n model = Attention_UNet(n_classes=n_classes, padding=True, up_mode='upconv', batch_norm=True, residual=False, wf=wf, use_attention=use_attention).double().to(device)\n elif modeltype == 'resunet':\n model = Attention_UNet(n_classes=n_classes, padding=True, up_mode='upconv', batch_norm=True, residual=True, wf=wf, use_attention=use_attention).double().to(device)\n \n checkpoint = torch.load(args.model_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint['model_state_dict'])\n\n train_loader = dataloader_cxr.DataLoader(data_path, dataloader_type='train', batchsize=batch_size, device=device, image_resolution=image_resolution, invert=invert, remove_wires=remove_wires)\n print('trainloader loaded')\n valid_loader = dataloader_cxr.DataLoader(data_path, dataloader_type='valid', batchsize=batch_size, device=device, image_resolution=image_resolution, invert=invert, remove_wires=remove_wires)\n print('validloader loaded')\n \n\n loss_list_train_epoch = [None] \n dice_score_list_train_epoch = [None]\n epoch_data_list = [None]\n loss_list_train = [None]\n dice_score_list_train = [None]\n index_data_list = [None]\n\n loss_list_validation = [None]\n loss_list_validation_index = [None]\n dice_score_list_validation = [None]\n dice_score_list_validation_0 = [None]\n dice_score_list_validation_1 = [None]\n \n epoch_old = 0\n if load_old_lists == True:\n if args.restart_training == 'false':\n epoch_old = checkpoint['epochs'] \n \n if checkpoint['train_loss_list_epoch'][-1] == None:\n loss_list_train = [None]\n index_data_list = [None]\n dice_score_list_train = [None]\n dice_score_list_train_epoch = [None]\n loss_list_train_epoch = [None]\n epoch_data_list = [None] \n\n else:\n loss_list_train = checkpoint['train_loss_list']\n index_data_list = checkpoint['train_loss_index']\n dice_score_list_train = checkpoint['train_dice_score_list']\n dice_score_list_train_epoch = checkpoint['train_dice_score_list_epoch']\n loss_list_train_epoch = checkpoint['train_loss_list_epoch']\n epoch_data_list = checkpoint['train_loss_index_epoch']\n \n if checkpoint['valid_loss_list'][-1] == None:\n loss_list_validation = [None]\n loss_list_validation_index = [None]\n\n dice_score_list_validation = [None]\n dice_score_list_validation_0 = [None]\n dice_score_list_validation_1 = [None]\n\n else:\n loss_list_validation = checkpoint['valid_loss_list'] \n loss_list_validation_index = checkpoint['valid_loss_index']\n dice_score_list_validation = checkpoint['valid_dice_score_list'] \n dice_score_list_validation_0 = checkpoint['valid_dice_score_list_0']\n dice_score_list_validation_1 = checkpoint['valid_dice_score_list_1']\n best_model_accuracy = np.max(dice_score_list_validation[1:])\n\n if len(train_loader.data_list)%batch_size ==0:\n total_idx_train = len(train_loader.data_list)//batch_size\n else:\n total_idx_train = len(train_loader.data_list)//batch_size + 1\n\n if len(valid_loader.data_list)%batch_size ==0:\n total_idx_valid = len(valid_loader.data_list)//batch_size\n else:\n total_idx_valid = len(valid_loader.data_list)//batch_size + 1\n \n if epoch_old != 0:\n power_factor = epoch_old//scheduler_step_size \n LR_ = LR*(scheduler_gamma**power_factor)\n else:\n LR_ = LR\n\n LR_ = LR\n optimizer = optim.Adam(model.parameters(), lr=LR_)\n # optimizer = optim.SGD(model.parameters(), lr=LR_, momentum=0.9)\n scheduler = StepLR(optimizer, step_size=scheduler_step_size, gamma=scheduler_gamma)\n\n for epoch in range(epoch_old, train_epoch):\n\n if (epoch+1)%10 == 0:\n # l1 = l1*1.0\n # l2 = l2*1.0\n scheduler.step()\n \n\n running_loss = 0.0\n running_dice_score = 0.0\n running_train_count = 0\n \n epoch_loss = 0.0\n epoch_dice_score = 0.0\n train_count = 0\n\n model.train() \n for idx in range(total_idx_train):\n\n optimizer.zero_grad()\n\n batch_images_input, batch_label_input = train_loader[idx]\n output = model(batch_images_input)\n\n if use_multiinput_architecture is False:\n if use_focal is False:\n loss = losses.dice_loss(output, batch_label_input, exclude_0, weights=torch.Tensor([gamma0,gamma1]).double().to(device))\n else:\n loss = losses.focal_tversky_loss(output, batch_label_input, exclude_0, weights=torch.Tensor([gamma0,gamma1]).double().to(device), alpha=focal_alpha, focal_gamma=focal_gamma)\n\n elif use_multiinput_architecture is True:\n loss = losses.focal_tversky_loss_deep_supervised(output, batch_label_input, exclude_0, weights=torch.Tensor([gamma0,gamma1]).double().to(device), alpha=focal_alpha, focal_gamma=focal_gamma)\n \n # l1 = 0\n # l2 = 0\n # for p in model.parameters():\n # l1 = l1 + p.abs().sum()\n # l2 = l2 + (p**2).sum()\n # loss = loss + lambda_l1 * l1 + lambda_l2 * l2\n\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()*batch_images_input.shape[0]\n running_train_count += batch_images_input.shape[0]\n\n if use_multiinput_architecture is False:\n score = losses.dice_score(output, batch_label_input, exclude_0)\n else:\n score = losses.dice_score(output[-1], batch_label_input, exclude_0)\n\n running_dice_score += (score.sum().item() / score.size(0)) * batch_images_input.shape[0]\n epoch_dice_score += (score.sum().item() / score.size(0)) * batch_images_input.shape[0]\n\n epoch_loss += loss.item()*batch_images_input.shape[0]\n train_count += batch_images_input.shape[0] \n\n if (idx+1) % print_every == 0: # print every print_every folder of data\n print('Epoch Data Completed:%d Loss:%.6f Dice Score:%.6f' %\n ((idx+1)*batch_size, running_loss / running_train_count, running_dice_score / running_train_count),' Time:',datetime.datetime.now())\n \n index_data_list.append(epoch*total_idx_train*batch_size+(idx+1)*batch_size)\n loss_list_train.append(running_loss / running_train_count)\n dice_score_list_train.append(running_dice_score / running_train_count)\n\n running_loss = 0.0\n running_dice_score = 0.0\n running_train_count = 0\n \n plt.plot(index_data_list[1:], loss_list_train[1:], label = \"Training\", color='green', marker='o', markerfacecolor='blue', markersize=5)\n plt.xlabel('Slices encountered') \n plt.ylabel('Loss') \n plt.savefig(plots_dir + '/train_plot_loss_running.png')\n plt.clf()\n\n plt.plot(index_data_list[1:], dice_score_list_train[1:], label = \"Training\", color='green', marker='o', markerfacecolor='blue', markersize=5)\n plt.xlabel('Slices encountered') \n plt.ylabel('Dice Score') \n plt.savefig(plots_dir + '/train_plot_dice_score_running.png')\n plt.clf()\n \n training_pickle = open(plots_pickle_dir + \"/loss_list_train_running.npy\",'wb')\n pickle.dump(loss_list_train,training_pickle)\n training_pickle.close()\n\n training_pickle = open(plots_pickle_dir + \"/index_list_train_running.npy\",'wb')\n pickle.dump(index_data_list,training_pickle)\n training_pickle.close()\n\n training_pickle = open(plots_pickle_dir + \"/dice_score_list_train_running.npy\",'wb')\n pickle.dump(dice_score_list_train,training_pickle)\n training_pickle.close()\n \n loss_list_train_epoch.append(epoch_loss/train_count)\n epoch_data_list.append(epoch+1)\n dice_score_list_train_epoch.append(epoch_dice_score/train_count)\n\n print('Epoch %d Training Loss: %.3f Dice Score: %.3f' % (epoch + 1, loss_list_train_epoch[-1], dice_score_list_train_epoch[-1]),' Time:',datetime.datetime.now() )\n\n plt.plot(epoch_data_list[1:], loss_list_train_epoch[1:], label = \"Training\", color='red', marker='o', markerfacecolor='yellow', markersize=5)\n plt.xlabel('Epoch') \n plt.ylabel('Training Loss') \n plt.savefig(plots_dir + '/train_loss_plot.png')\n plt.clf()\n\n plt.plot(epoch_data_list[1:], dice_score_list_train_epoch[1:], label = \"Training\", color='red', marker='o', markerfacecolor='yellow', markersize=5)\n plt.xlabel('Epoch') \n plt.ylabel('Training Dice Score') \n plt.savefig(plots_dir + '/train_dice_score_plot.png')\n plt.clf()\n \n # training_pickle = open(plots_pickle_dir + \"/loss_list_train.npy\",'wb')\n # pickle.dump(loss_list_train_epoch,training_pickle)\n # training_pickle.close()\n\n # training_pickle = open(plots_pickle_dir + \"/epoch_list_train.npy\",'wb')\n # pickle.dump(epoch_data_list,training_pickle)\n # training_pickle.close()\n\n # training_pickle = open(plots_pickle_dir + \"/dice_score_list_train_epoch.npy\",'wb')\n # pickle.dump(dice_score_list_train_epoch,training_pickle)\n # training_pickle.close()\n\n\n if (epoch+1) % save_every == 0:\n print('Saving model at %d epoch' % (epoch + 1),' Time:',datetime.datetime.now()) # save every save_every mini_batch of data\n torch.save({\n 'epochs': epoch+1,\n 'batchsize': batch_size,\n 'train_loss_list': loss_list_train,\n 'train_loss_list_epoch':loss_list_train_epoch,\n 'train_dice_score_list': dice_score_list_train,\n 'train_dice_score_list_epoch': dice_score_list_train_epoch,\n 'train_loss_index': index_data_list,\n 'train_loss_index_epoch': epoch_data_list,\n 'valid_loss_list': loss_list_validation,\n 'valid_dice_score_list': dice_score_list_validation,\n 'valid_dice_score_list_0': dice_score_list_validation_0,\n 'valid_dice_score_list_1': dice_score_list_validation_1,\n 'valid_loss_index': loss_list_validation_index,\n 'model_state_dict': model.state_dict(),\n }, model_checkpoint_dir + '/model_%d.pth' % (epoch + 1))\n \n\n if (epoch+1) % valid_every == 0:\n model.eval() \n optimizer.zero_grad() \n\n valid_count = 0\n total_loss_valid = 0.0\n valid_dice_score = 0.0\n if 0 in classes:\n valid_dice_score_0 = 0.0\n if 1 in classes:\n valid_dice_score_1 = 0.0\n\n for idx in range(total_idx_valid):\n with torch.no_grad():\n\n batch_images_input, batch_label_input = valid_loader[idx]\n\n output = model(batch_images_input)\n \n if use_multiinput_architecture is False:\n loss = losses.dice_loss(output, batch_label_input, exclude_0)\n else:\n loss = losses.dice_loss(output[-1], batch_label_input, exclude_0)\n\n total_loss_valid += loss.item()*batch_images_input.shape[0] \n valid_count += batch_images_input.shape[0]\n\n if use_multiinput_architecture is False:\n score = losses.dice_score(output, batch_label_input, exclude_0)\n else:\n score = losses.dice_score(output[-1], batch_label_input, exclude_0)\n\n valid_dice_score += (score.sum().item() / score.size(0)) * batch_images_input.shape[0] \n\n if 0 in classes and 1 in classes and len(classes) == 2 and len(clubbed) == 0:\n valid_dice_score_0 += score[0].item() * batch_images_input.shape[0] \n valid_dice_score_1 += score[1].item() * batch_images_input.shape[0]\n elif 1 in classes and len(classes)==1:\n valid_dice_score_1 += score[0].item() * batch_images_input.shape[0]\n\n loss_list_validation.append(total_loss_valid/valid_count)\n dice_score_list_validation.append(valid_dice_score/valid_count)\n\n if 0 in classes:\n dice_score_list_validation_0.append(valid_dice_score_0/valid_count)\n if 1 in classes: \n dice_score_list_validation_1.append(valid_dice_score_1/valid_count)\n\n loss_list_validation_index.append(epoch+1)\n\n print('Epoch %d Valid Loss: %.3f' % (epoch + 1, loss_list_validation[-1]),' Time:',datetime.datetime.now() )\n\n if 0 in classes and 1 in classes and len(classes) == 2 and len(clubbed) == 0:\n print('Valid Dice Score: ', dice_score_list_validation[-1], ' Valid Dice Score 0: ', dice_score_list_validation_0[-1], ' Valid Dice Score 1: ', dice_score_list_validation_1[-1])\n elif 1 in classes and len(classes)==1:\n print('Valid Dice Score: ', dice_score_list_validation[-1], ' Valid Dice Score 1: ', dice_score_list_validation_1[-1])\n\n plt.plot(loss_list_validation_index[1:], loss_list_validation[1:], label = \"Validation\", color='red', marker='o', markerfacecolor='yellow', markersize=5)\n plt.xlabel('Epoch') \n plt.ylabel('Validation Loss') \n plt.savefig(plots_dir + '/valid_loss_plot.png')\n plt.clf()\n\n plt.plot(loss_list_validation_index[1:], dice_score_list_validation[1:], label = \"Validation\", color='red', marker='o', markerfacecolor='yellow', markersize=5)\n plt.xlabel('Epoch') \n plt.ylabel('Validation Dice Score') \n plt.savefig(plots_dir + '/valid_dice_score_plot.png')\n plt.clf()\n\n if 0 in classes:\n plt.plot(loss_list_validation_index[1:], dice_score_list_validation_0[1:], label = \"Validation\", color='red', marker='o', markerfacecolor='yellow', markersize=5)\n plt.xlabel('Epoch') \n plt.ylabel('Validation Dice Score') \n plt.savefig(plots_dir + '/valid_dice_score_0_plot.png')\n plt.clf()\n\n if 1 in classes: \n plt.plot(loss_list_validation_index[1:], dice_score_list_validation_1[1:], label = \"Validation\", color='red', marker='o', markerfacecolor='yellow', markersize=5)\n plt.xlabel('Epoch') \n plt.ylabel('Validation Dice Score') \n plt.savefig(plots_dir + '/valid_dice_score_1_plot.png')\n plt.clf()\n\n # validation_pickle = open(plots_pickle_dir + \"/loss_list_validation.npy\",'wb')\n # pickle.dump(loss_list_validation,validation_pickle)\n # validation_pickle.close()\n\n # validation_pickle = open(plots_pickle_dir + \"/index_list_validation.npy\",'wb')\n # pickle.dump(loss_list_validation_index,validation_pickle)\n # validation_pickle.close()\n\n # validation_pickle = open(plots_pickle_dir + \"/dice_score_list_validation.npy\",'wb')\n # pickle.dump(dice_score_list_validation,validation_pickle)\n # validation_pickle.close()\n\n if len(loss_list_validation) >= 3:\n if dice_score_list_validation[-1] > best_model_accuracy:\n best_model_accuracy = dice_score_list_validation[-1]\n torch.save({\n 'epochs': epoch+1,\n 'batchsize': batch_size,\n 'train_loss_list': loss_list_train,\n 'train_loss_list_epoch':loss_list_train_epoch,\n 'train_dice_score_list': dice_score_list_train,\n 'train_dice_score_list_epoch': dice_score_list_train_epoch,\n 'train_loss_index': index_data_list,\n 'train_loss_index_epoch': epoch_data_list,\n 'valid_loss_list': loss_list_validation,\n 'valid_dice_score_list': dice_score_list_validation,\n 'valid_dice_score_list_0': dice_score_list_validation_0,\n 'valid_dice_score_list_1': dice_score_list_validation_1,\n 'valid_loss_index': loss_list_validation_index,\n 'model_state_dict': model.state_dict(),\n }, model_checkpoint_dir + '/model_best.pth')\n\n else:\n best_model_accuracy = dice_score_list_validation[-1]\n torch.save({\n 'epochs': epoch+1,\n 'batchsize': batch_size,\n 'train_loss_list': loss_list_train,\n 'train_loss_list_epoch':loss_list_train_epoch,\n 'train_dice_score_list': dice_score_list_train,\n 'train_dice_score_list_epoch': dice_score_list_train_epoch,\n 'train_loss_index': index_data_list,\n 'train_loss_index_epoch': epoch_data_list,\n 'valid_loss_list': loss_list_validation,\n 'valid_dice_score_list': dice_score_list_validation,\n 'valid_dice_score_list_0': dice_score_list_validation_0,\n 'valid_dice_score_list_1': dice_score_list_validation_1,\n 'valid_loss_index': loss_list_validation_index,\n 'model_state_dict': model.state_dict(),\n }, model_checkpoint_dir + '/model_best.pth')\n\n\n \nif __name__ == '__main__':\n main()\n\n","sub_path":"Segmentation/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":23335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642598678","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nETA = 0.6\nEPOCHS = 16\n\nn = 1000\nxs = []\nys = []\nfor i in range(n):\n W = 0.1\n b = 0.4\n x1 = np.random.normal(0., 1.)\n noise = np.random.normal(0., 0.05)\n y1 = W*x1 + b + noise\n xs.append(x1)\n ys.append(y1)\nplt.plot(xs, ys, 'bo', alpha=0.2)\nplt.show()\n\n\nW = tf.Variable(tf.random_uniform([1], -1., 1.))\nb = tf.Variable(tf.zeros([1]))\ny = W*xs + b\nloss = tf.reduce_mean(tf.square(y - ys))\noptim = tf.train.GradientDescentOptimizer(ETA)\ntrain = optim.minimize(loss)\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\nfor i in range(EPOCHS):\n sess.run(train)\n print(f'{i}: W: {sess.run(W)}, b: {sess.run(b)}, loss: {sess.run(loss)}')\n\nwith tf.name_scope('LinearRegression') as scope:\n W = tf.Variable(tf.random_uniform([1], -1., 1.), name='W')\n b = tf.Variable(tf.zeros([1]))\n y = W*xs + b\n\nwith tf.name_scope('LossFunction') as scope:\n loss = tf.reduce_mean(tf.square(y - ys))\n\nloss_summary = tf.summary.scalar('loss', loss)\nw_ = tf.summary.histogram('W', W)\nb_ = tf.summary.histogram('b', b)\nmerged_op = tf.summary.merge_all()\nwriter_tensorboard = tf.summary.FileWriter('.', sess.graph_def)\n","sub_path":"python/tensorflow/tensorflow_ppa/02_linear_regression/lin_reg.py","file_name":"lin_reg.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"621475235","text":"'''\nCreated on 2016. 2. 13.\n\n@author: User\n'''\nletter2code = {' ': 'SbBsSsBsS', '%': 'SsSbSbSbS', '$': 'SbSbSbSsS', '+': 'SbSsSbSbS', \n '*': 'SbSsBsBsS', '-': 'SbSsSsBsB', '/': 'SbSbSsSbS', '.': 'BbSsSsBsS', \n '1': 'BsSbSsSsB', '0': 'SsSbBsBsS', '3': 'BsBbSsSsS', '2': 'SsBbSsSsB', \n '5': 'BsSbBsSsS', '4': 'SsSbBsSsB', '7': 'SsSbSsBsB', '6': 'SsBbBsSsS', \n '9': 'SsBbSsBsS', '8': 'BsSbSsBsS', 'A': 'BsSsSbSsB', 'C': 'BsBsSbSsS', \n 'B': 'SsBsSbSsB', 'E': 'BsSsBbSsS', 'D': 'SsSsBbSsB', 'G': 'SsSsSbBsB', \n 'F': 'SsBsBbSsS', 'I': 'SsBsSbBsS', 'H': 'BsSsSbBsS', 'K': 'BsSsSsSbB', \n 'J': 'SsSsBbBsS', 'M': 'BsBsSsSbS', 'L': 'SsBsSsSbB', 'O': 'BsSsBsSbS', \n 'N': 'SsSsBsSbB', 'Q': 'SsSsSsBbB', 'P': 'SsBsBsSbS', 'S': 'SsBsSsBbS', \n 'R': 'BsSsSsBbS', 'U': 'BbSsSsSsB', 'T': 'SsSsBsBbS', 'W': 'BbBsSsSsS', \n 'V': 'SbBsSsSsB', 'Y': 'BbSsBsSsS', 'X': 'SbSsBsSsB', 'Z': 'SbBsBsSsS'}\n\n\n#dict[key] = value\n#dict[]여기에 key 를 넣으면 value가 나온다\n\ndef reverse(dict1):\n #왼쪽이 키 오른쪽이 발류 \n # 1 dict2 = {} \n # 1 for x,y in letter2code.items():\n # 1 dict2[y] = x\n # 2 dict2 = {} \n # 2 for x,y in letter2code.items():\n # 2 dict2.setdefault(y,x)\n dict2 = {y:x for x,y in letter2code.items()}\n # 3 번쨰 방법\n return dict2 \n\ndef code39(word,dict1):\n new = ''\n for char in word:\n if char.isalpha():\n char = char.upper()\n #given char 이 key 에 없을때\n if not char in dict1.keys():\n return None \n new += dict[char] + 's'\n #마지막 단어 끝에s가 들어가면 안됨\n return new[:-1] \n\ndef decode39(code,dict1):\n pos = 0\n pos2 = 9 \n word = ''\n while pos2 <= len(code):\n if code[pos:pos2] in dict1.keys():\n #dict 의 key를 합쳐 놓은것\n word += dict1[code[pos:pos2]] \n pos += 10\n pos2 += 10\n else:\n return None\n return word \n\nprint( decode39('BsBsSbSsSsBsBbSsSsSsSsBsBsSbSsSsSbBsBsS', code2letter)) ","sub_path":"informatics/previous informatics/Infomatics-1/8/code39.py","file_name":"code39.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431478004","text":"from django.conf.urls import url\nfrom .views import (\n VerificationView,\n VerificationStep1FormView,\n VerificationStep2View,\n VerificationStep3FormView,\n VerificationStep4FormView,\n VerificationStep5FormView,\n VerificationStep6FormView,\n VerificationStep7FormView,\n # VerificationStep8FormView,\n VerificationFinishView,\n UserListView,\n CheckUser,\n DeleteVerification,\n DeleteImageView,\n NeedSomeDocument,\n)\n\napp_name = 'verification'\nurlpatterns = [\n url(r'^$', VerificationView.as_view(), name='start'),\n url(r'^step-1/$', VerificationStep1FormView.as_view(), name='step-1'),\n url(r'^step-2/$', VerificationStep2View.as_view(), name='step-2'),\n url(r'^step-3/$', VerificationStep3FormView.as_view(), name='step-3'),\n url(r'^step-4/$', VerificationStep4FormView.as_view(), name='step-4'),\n url(r'^step-5/$', VerificationStep5FormView.as_view(), name='step-5'),\n url(r'^step-6/$', VerificationStep6FormView.as_view(), name='step-6'),\n url(r'^step-7/$', VerificationStep7FormView.as_view(), name='step-7'),\n # url(r'^step-8/$', VerificationStep8FormView.as_view(), name='step-8'),\n url(r'^finish/$', VerificationFinishView.as_view(), name='finish'),\n url(r'^users/$', UserListView.as_view(), name='users'),\n url(r'^check-user/(?P\\d+)/$', CheckUser.as_view(), name='check-user'),\n url(r'^delete-verification/(?P\\d+)/$', DeleteVerification.as_view(), name='delete-verification'),\n url(r'^delete-image/$', DeleteImageView.as_view(), name='delete-image'),\n url(r'^need-document/$', NeedSomeDocument.as_view(), name='need-document'),\n]\n","sub_path":"verification/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"472179908","text":"from FINE.component import Component, ComponentModeling\nfrom FINE import utils\nimport warnings\nimport pyomo.environ as pyomo\nimport pandas as pd\n\n\nclass Transmission(Component):\n \"\"\"\n Doc\n \"\"\"\n def __init__(self, esM, name, commodity, losses=0, distances=None,\n hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1,\n hasIsBuiltBinaryVariable=False, bigM=None,\n operationRateMax=None, operationRateFix=None, tsaWeight=1,\n locationalEligibility=None, capacityMin=None, capacityMax=None, sharedPotentialID=None,\n capacityFix=None, isBuiltFix=None,\n investPerCapacity=0, investIfBuilt=0, opexPerOperation=0, opexPerCapacity=0,\n opexIfBuilt=0, interestRate=0.08, economicLifetime=10):\n # TODO add unit checks\n # Set general component data\n utils.checkCommodities(esM, {commodity})\n self._name, self._commodity = name, commodity\n self._distances = utils.checkAndSetDistances(esM, distances)\n self._losses = utils.checkAndSetTransmissionLosses(esM, losses, distances)\n\n # Set design variable modeling parameters\n utils.checkDesignVariableModelingParameters(capacityVariableDomain, hasCapacityVariable,\n hasIsBuiltBinaryVariable, bigM)\n self._hasCapacityVariable = hasCapacityVariable\n self._capacityVariableDomain = capacityVariableDomain\n self._capacityPerPlantUnit = capacityPerPlantUnit\n self._hasIsBuiltBinaryVariable = hasIsBuiltBinaryVariable\n self._bigM = bigM\n\n # Set economic data\n self._investPerCapacity = utils.checkAndSetCostParameter(esM, name, investPerCapacity, '2dim')\n self._investIfBuilt = utils.checkAndSetCostParameter(esM, name, investIfBuilt, '2dim')\n self._opexPerOperation = utils.checkAndSetCostParameter(esM, name, opexPerOperation, '2dim')\n self._opexPerCapacity = utils.checkAndSetCostParameter(esM, name, opexPerCapacity, '2dim')\n self._opexIfBuilt = utils.checkAndSetCostParameter(esM, name, opexIfBuilt, '2dim')\n self._interestRate = utils.checkAndSetCostParameter(esM, name, interestRate, '2dim')\n self._economicLifetime = utils.checkAndSetCostParameter(esM, name, economicLifetime, '2dim')\n self._CCF = self.getCapitalChargeFactor()\n\n # Set location-specific operation parameters\n if operationRateMax is not None and operationRateFix is not None:\n operationRateMax = None\n warnings.warn('If operationRateFix is specified, the operationRateMax parameter is not required.\\n' +\n 'The operationRateMax time series was set to None.')\n utils.checkOperationTimeSeriesInputParameters(esM, operationRateMax, locationalEligibility, '2dim')\n utils.checkOperationTimeSeriesInputParameters(esM, operationRateFix, locationalEligibility, '2dim')\n\n self._fullOperationRateMax = utils.setFormattedTimeSeries(operationRateMax)\n self._aggregatedOperationRateMax = None\n self._operationRateMax = utils.setFormattedTimeSeries(operationRateMax)\n\n self._fullOperationRateFix = utils.setFormattedTimeSeries(operationRateFix)\n self._aggregatedOperationRateFix = None\n self._operationRateFix = utils.setFormattedTimeSeries(operationRateFix)\n\n self._tsaWeight = tsaWeight\n\n # Set location-specific design parameters\n self._sharedPotentialID = sharedPotentialID\n utils.checkLocationSpecficDesignInputParams(esM, hasCapacityVariable, hasIsBuiltBinaryVariable,\n capacityMin, capacityMax, capacityFix,\n locationalEligibility, isBuiltFix, sharedPotentialID,\n '2dim')\n self._capacityMin, self._capacityMax, self._capacityFix = capacityMin, capacityMax, capacityFix\n self._isBuiltFix = isBuiltFix\n\n # Set locational eligibility\n operationTimeSeries = operationRateFix if operationRateFix is not None else operationRateMax\n self._locationalEligibility = utils.setLocationalEligibility(esM, locationalEligibility, capacityMax,\n capacityFix, isBuiltFix,\n hasCapacityVariable, operationTimeSeries,\n '2dim')\n\n # Variables at optimum (set after optimization)\n self._capacityVariablesOptimum = None\n self._isBuiltVariablesOptimum = None\n self._operationVariablesOptimum = None\n\n def getCapitalChargeFactor(self):\n \"\"\" Computes and returns capital charge factor (inverse of annuity factor) \"\"\"\n return 1 / self._interestRate - 1 / (pow(1 + self._interestRate, self._economicLifetime) * self._interestRate)\n\n def addToEnergySystemModel(self, esM):\n esM._isTimeSeriesDataClustered = False\n if self._name in esM._componentNames:\n if esM._componentNames[self._name] == TransmissionModeling.__name__:\n warnings.warn('Component identifier ' + self._name + ' already exists. Data will be overwritten.')\n else:\n raise ValueError('Component name ' + self._name + ' is not unique.')\n else:\n esM._componentNames.update({self._name: TransmissionModeling.__name__})\n mdl = TransmissionModeling.__name__\n if mdl not in esM._componentModelingDict:\n esM._componentModelingDict.update({mdl: TransmissionModeling()})\n esM._componentModelingDict[mdl]._componentsDict.update({self._name: self})\n\n def setTimeSeriesData(self, hasTSA):\n self._operationRateMax = self._aggregatedOperationRateMax if hasTSA else self._fullOperationRateMax\n self._operationRateFix = self._aggregatedOperationRateFix if hasTSA else self._fullOperationRateFix\n\n def getDataForTimeSeriesAggregation(self):\n fullOperationRate = self._fullOperationRateFix if self._fullOperationRateFix is not None \\\n else self._fullOperationRateMax\n if fullOperationRate is not None:\n fullOperationRate = fullOperationRate.copy()\n uniqueIdentifiers = [self._name + \"_operationRate_\" + locationIn + '_' + locationOut\n for locationIn, locationOut in fullOperationRate.columns]\n compData = pd.DataFrame(index=fullOperationRate.index, columns=uniqueIdentifiers)\n compDict = {}\n for locationIn, locationOut in fullOperationRate.columns:\n uniqueIdentifier = self._name + \"_operationRate_\" + locationIn + '_' + locationOut\n compData[uniqueIdentifier] = fullOperationRate.pop((locationIn, locationOut))\n compDict.update({uniqueIdentifier: self._tsaWeight})\n return compData, compDict\n else:\n return None, {}\n\n def setAggregatedTimeSeriesData(self, data):\n fullOperationRate = self._fullOperationRateFix if self._fullOperationRateFix is not None \\\n else self._fullOperationRateMax\n if fullOperationRate is not None:\n uniqueIdentifiers = [self._name + \"_operationRate_\" + locationIn + '_' + locationOut\n for locationIn, locationOut in fullOperationRate.columns]\n compData = data[uniqueIdentifiers].copy()\n compData = pd.DataFrame(index=data.index, columns=fullOperationRate.columns)\n for locationIn, locationOut in compData.columns:\n compData.loc[:, (locationIn, locationOut)] = \\\n data.loc[:, self._name + \"_operationRate_\" + locationIn + '_' + locationOut]\n if self._fullOperationRateFix is not None:\n self._aggregatedOperationRateFix = compData\n else:\n self._aggregatedOperationRateMax = compData\n\n\nclass TransmissionModeling(ComponentModeling):\n \"\"\" Doc \"\"\"\n def __init__(self):\n self._componentsDict = {}\n self._capacityVariablesOptimum = None\n self._isBuiltVariablesOptimum = None\n self._operationVariablesOptimum = None\n\n ####################################################################################################################\n # Declare sparse index sets #\n ####################################################################################################################\n\n def declareSets(self, esM, pyM):\n \"\"\" Declares sets and dictionaries \"\"\"\n compDict = self._componentsDict\n\n ################################################################################################################\n # Declare design variables sets #\n ################################################################################################################\n\n def initDesignVarSet(pyM):\n return ((loc, loc_, compName) for loc in esM._locations for loc_ in esM._locations\n for compName, comp in compDict.items()\n if comp._locationalEligibility[loc][loc_] == 1 and comp._hasCapacityVariable)\n pyM.designDimensionVarSet_trans = pyomo.Set(dimen=3, initialize=initDesignVarSet)\n\n def initContinuousDesignVarSet(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName, in pyM.designDimensionVarSet_trans\n if compDict[compName]._capacityVariableDomain == 'continuous')\n pyM.continuousDesignDimensionVarSet_trans = pyomo.Set(dimen=3, initialize=initContinuousDesignVarSet)\n\n def initDiscreteDesignVarSet(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName in pyM.designDimensionVarSet_trans\n if compDict[compName]._capacityVariableDomain == 'discrete')\n pyM.discreteDesignDimensionVarSet_trans = pyomo.Set(dimen=3, initialize=initDiscreteDesignVarSet)\n\n def initDesignDecisionVarSet(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName in pyM.designDimensionVarSet_trans\n if compDict[compName]._hasIsBuiltBinaryVariable)\n pyM.designDecisionVarSet_trans = pyomo.Set(dimen=3, initialize=initDesignDecisionVarSet)\n\n ################################################################################################################\n # Declare operation variables sets #\n ################################################################################################################\n\n def initOpVarSet(pyM):\n return ((loc, loc_, compName) for loc in esM._locations for loc_ in esM._locations\n for compName, comp in compDict.items() if comp._locationalEligibility[loc][loc_] == 1)\n pyM.operationVarSet_trans = pyomo.Set(dimen=3, initialize=initOpVarSet)\n pyM.operationVarDict_transOut = {loc: {loc_: {compName for compName in compDict\n if (loc, loc_, compName) in pyM.operationVarSet_trans}\n for loc_ in esM._locations} for loc in esM._locations}\n pyM.operationVarDict_transIn = {loc: {loc_: {compName for compName in compDict\n if (loc_, loc, compName) in pyM.operationVarSet_trans}\n for loc_ in esM._locations} for loc in esM._locations}\n\n ################################################################################################################\n # Declare sets for case differentiation of operating modes #\n ################################################################################################################\n\n def initOpConstrSet1(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if\n compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateMax is None\n and compDict[compName]._operationRateFix is None)\n pyM.opConstrSet1_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet1)\n\n def initOpConstrSet2(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if\n compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateFix is not None)\n pyM.opConstrSet2_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet2)\n\n def initOpConstrSet3(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if\n compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateMax is not None)\n pyM.opConstrSet3_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet3)\n\n def initOpConstrSet4(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if not\n compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateFix is not None)\n pyM.opConstrSet4_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet4)\n\n def initOpConstrSet5(pyM):\n return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if not\n compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateMax is not None)\n pyM.opConstrSet5_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet5)\n\n potentialDict = {} # TODO adapt for 2dim components\n for compName, comp in compDict.items():\n if comp._sharedPotentialID is not None:\n potentialDict.setdefault(comp._sharedPotentialID, []).append(compName)\n pyM.sharedPotentialTransmissionDict = potentialDict\n\n ####################################################################################################################\n # Declare variables #\n ####################################################################################################################\n\n def declareVariables(self, esM, pyM):\n \"\"\" Declares design and operation variables \"\"\"\n # Function for setting lower and upper capacity bounds\n def capBounds(pyM, loc, loc_, compName):\n comp = self._componentsDict[compName]\n return (comp._capacityMin[loc][loc_]\n if (comp._capacityMin is not None and not comp._hasIsBuiltBinaryVariable) else 0,\n comp._capacityMax[loc][loc_] if comp._capacityMax is not None else None)\n\n # Capacity of components [powerUnit]\n pyM.cap_trans = pyomo.Var(pyM.designDimensionVarSet_trans, domain=pyomo.NonNegativeReals, bounds=capBounds)\n # Number of components [-]\n pyM.nbReal_trans = pyomo.Var(pyM.continuousDesignDimensionVarSet_trans, domain=pyomo.NonNegativeReals)\n # Number of components [-]\n pyM.nbInt_trans = pyomo.Var(pyM.discreteDesignDimensionVarSet_trans, domain=pyomo.NonNegativeIntegers)\n # Binary variables [-], indicate if a component is considered at a location or not\n pyM.designBin_trans = pyomo.Var(pyM.designDecisionVarSet_trans, domain=pyomo.Binary)\n # Operation of component [energyUnit]\n pyM.op_trans = pyomo.Var(pyM.operationVarSet_trans, pyM.timeSet, domain=pyomo.NonNegativeReals)\n\n ####################################################################################################################\n # Declare component constraints #\n ####################################################################################################################\n\n def declareComponentConstraints(self, esM, pyM):\n \"\"\" Declares time independent and dependent constraints\"\"\"\n compDict = self._componentsDict\n\n ################################################################################################################\n # Declare time independent constraints #\n ################################################################################################################\n\n # Determine the components' capacities from the number of installed units\n def capToNbReal_trans(pyM, loc, loc_, compName):\n return pyM.cap_trans[loc, loc_, compName] == \\\n pyM.nbReal_trans[loc, loc_, compName] * compDict[compName]._capacityPerPlantUnit\n pyM.ConstrCapToNbReal_trans = pyomo.Constraint(pyM.continuousDesignDimensionVarSet_trans,\n rule=capToNbReal_trans)\n\n # Determine the components' capacities from the number of installed units\n def capToNbInt_trans(pyM, loc, loc_, compName):\n return pyM.cap_trans[loc, loc_, compName] == \\\n pyM.nbInt_trans[loc, loc_, compName] * compDict[compName]._capacityPerPlantUnit\n pyM.ConstrCapToNbInt_trans = pyomo.Constraint(pyM.discreteDesignDimensionVarSet_trans,\n rule=capToNbInt_trans)\n\n # Enforce the consideration of the binary design variables of a component\n def bigM_trans(pyM, loc, loc_, compName):\n return pyM.cap_trans[loc, loc_, compName] <= \\\n compDict[compName]._bigM * pyM.designBin_trans[loc, loc_, compName]\n pyM.ConstrBigM_trans = pyomo.Constraint(pyM.designDecisionVarSet_trans, rule=bigM_trans)\n\n # Enforce the consideration of minimum capacities for components with design decision variables\n def capacityMinDec_trans(pyM, loc, loc_, compName):\n return (pyM.cap_trans[loc, loc_, compName] >= compDict[compName]._capacityMin[loc][loc_] *\n pyM.designBin_trans[loc, loc_, compName] if compDict[compName]._capacityMin is not None\n else pyomo.Constraint.Skip)\n pyM.ConstrCapacityMinDec_trans = pyomo.Constraint(pyM.designDecisionVarSet_trans, rule=capacityMinDec_trans)\n\n # Sets, if applicable, the installed capacities of a component\n def capacityFix_trans(pyM, loc, loc_, compName):\n return (pyM.cap_trans[loc, loc_, compName] == compDict[compName]._capacityFix[loc][loc_]\n if compDict[compName]._capacityFix is not None else pyomo.Constraint.Skip)\n pyM.ConstrCapacityFix_trans = pyomo.Constraint(pyM.designDimensionVarSet_trans, rule=capacityFix_trans)\n\n # Sets, if applicable, the binary design variables of a component\n def designBinFix_trans(pyM, loc, loc_, compName):\n return (pyM.designBin_trans[loc, loc_, compName] == compDict[compName]._isBuiltFix[loc][loc_]\n if compDict[compName]._isBuiltFix is not None else pyomo.Constraint.Skip)\n pyM.ConstrDesignBinFix_trans = pyomo.Constraint(pyM.designDecisionVarSet_trans, rule=designBinFix_trans)\n\n def sharedPotentialTransmission(pyM, key, loc, loc_):\n return sum(pyM.cap_trans[loc, loc_, compName] / compDict[compName].capacityMax[loc][loc_]\n for compName in compDict if compDict[compName]._sharedPotentialID == key\n and (loc, loc_, compName) in pyM.designDimensionVarSet_trans)\n pyM.ConstSharedPotential_trans = \\\n pyomo.Constraint(pyM.sharedPotentialTransmissionDict.keys(), esM._locations, esM._locations,\n rule=sharedPotentialTransmission)\n\n def symmetricalCapacity_trans(pyM, loc, loc_, compName):\n return pyM.cap_trans[loc, loc_, compName] == pyM.cap_trans[loc_, loc, compName]\n pyM.ConstrSymmetricalCapacity_trans = \\\n pyomo.Constraint(pyM.designDimensionVarSet_trans, rule=symmetricalCapacity_trans)\n\n ################################################################################################################\n # Declare time dependent constraints #\n ################################################################################################################\n\n # Operation [energyUnit] limited by the installed capacity [powerUnit] multiplied by the hours per time step\n def op1_trans(pyM, loc, loc_, compName, p, t):\n return pyM.op_trans[loc, loc_, compName, p, t] <= \\\n pyM.cap_trans[loc, loc_, compName] * esM._hoursPerTimeStep\n pyM.ConstrOperation1_trans = pyomo.Constraint(pyM.opConstrSet1_trans, pyM.timeSet, rule=op1_trans)\n\n # Operation [energyUnit] equal to the installed capacity [powerUnit] multiplied by operation time series\n # [powerUnit/powerUnit] and the hours per time step [h])\n def op2_trans(pyM, loc, loc_, compName, p, t):\n return pyM.op_trans[loc, loc_, compName, p, t] == pyM.cap_trans[loc, loc_, compName] * \\\n compDict[compName]._operationRateFix[loc, loc_][p, t] * esM._hoursPerTimeStep\n pyM.ConstrOperation2_trans = pyomo.Constraint(pyM.opConstrSet2_trans, pyM.timeSet, rule=op2_trans)\n\n # Operation [energyUnit] limited by the installed capacity [powerUnit] multiplied by operation time series\n # [powerUnit/powerUnit] and the hours per time step [h])\n def op3_trans(pyM, loc, loc_, compName, p, t):\n return pyM.op_trans[loc, loc_, compName, p, t] <= pyM.cap_trans[loc, loc_, compName] * \\\n compDict[compName]._operationRateMax[loc, loc_][p, t] * esM._hoursPerTimeStep\n pyM.ConstrOperation3_trans = pyomo.Constraint(pyM.opConstrSet3_trans, pyM.timeSet, rule=op3_trans)\n\n # Operation [energyUnit] equal to the operation time series [energyUnit]\n def op4_trans(pyM, loc, loc_, compName, p, t):\n return pyM.op_trans[loc, loc_, compName, p, t] == compDict[compName]._operationRateFix[loc, loc_][p, t]\n pyM.ConstrOperation4_trans = pyomo.Constraint(pyM.opConstrSet4_trans, pyM.timeSet, rule=op4_trans)\n\n # Operation [energyUnit] limited by the operation time series [energyUnit]\n def op5_trans(pyM, loc, loc_, compName, p, t):\n return pyM.op_trans[loc, loc_, compName, p, t] <= compDict[compName]._operationRateMax[loc, loc_][p, t]\n pyM.ConstrOperation5_trans = pyomo.Constraint(pyM.opConstrSet5_trans, pyM.timeSet, rule=op5_trans)\n\n ####################################################################################################################\n # Declare component contributions to basic EnergySystemModel constraints and its objective function #\n ####################################################################################################################\n\n def getSharedPotentialContribution(self, pyM, key, loc):\n return 0\n\n def hasOpVariablesForLocationCommodity(self, esM, loc, commod):\n return any([comp._commodity == commod and\n (comp._locationalEligibility[loc][loc_] == 1 or comp._locationalEligibility[loc_][loc] == 1)\n for comp in self._componentsDict.values() for loc_ in esM._locations])\n\n def getCommodityBalanceContribution(self, pyM, commod, loc, p, t): # TODO losses connected to distances\n return sum(pyM.op_trans[loc_, loc, compName, p, t] *\n (1 - self._componentsDict[compName]._losses[loc_][loc] *\n self._componentsDict[compName]._distances[loc_][loc])\n for loc_ in pyM.operationVarDict_transIn[loc].keys()\n for compName in pyM.operationVarDict_transIn[loc][loc_]\n if commod in self._componentsDict[compName]._commodity) - \\\n sum(pyM.op_trans[loc, loc_, compName, p, t]\n for loc_ in pyM.operationVarDict_transOut[loc].keys()\n for compName in pyM.operationVarDict_transOut[loc][loc_]\n if commod in self._componentsDict[compName]._commodity)\n\n def getObjectiveFunctionContribution(self, esM, pyM):\n # TODO replace 0.5 with factor which is one when non-directional and 0.5 when bi-directional\n compDict = self._componentsDict\n\n capexDim = sum(compDict[compName]._investPerCapacity[loc][loc_] * pyM.cap_trans[loc, loc_, compName] *\n compDict[compName]._distances[loc][loc_] /\n compDict[compName]._CCF[loc][loc_] for loc, loc_, compName in pyM.cap_trans) * 0.5\n\n capexDec = sum(compDict[compName]._investIfBuilt[loc][loc_] *\n pyM.designBin_trans[loc, loc_, compName] * compDict[compName]._distances[loc][loc_] /\n compDict[compName]._CCF[loc][loc_] for loc, loc_, compName in pyM.designBin_trans) * 0.5\n\n opexDim = sum(compDict[compName]._opexPerCapacity[loc][loc_] * pyM.cap_trans[loc, loc_, compName] *\n compDict[compName]._distances[loc][loc_] for loc, loc_, compName in pyM.cap_trans) * 0.5\n\n opexDec = sum(compDict[compName]._opexIfBuilt[loc][loc_] * pyM.designBin_trans[loc, loc_, compName] *\n compDict[compName]._distances[loc][loc_] for loc, loc_, compName in pyM.designBin_trans) * 0.5\n\n opexOp = sum(compDict[compName]._opexPerOperation[loc][loc_] *\n sum(pyM.op_trans[loc, loc_, compName, p, t] * esM._periodOccurrences[p] for p, t in pyM.timeSet)\n for loc, subDict in pyM.operationVarDict_transOut.items()\n for loc_, compNames in subDict.items()\n for compName in compNames) / esM._numberOfYears\n\n return capexDim + capexDec + opexDim + opexDec + opexOp\n\n def setOptimalValues(self, esM, pyM):\n optVal = utils.formatOptimizationOutput(pyM.cap_trans.get_values(), 'designVariables', '1dim')\n self._capacityVariablesOptimum = optVal\n utils.setOptimalComponentVariables(optVal, '_capacityVariablesOptimum', self._componentsDict)\n\n optVal = utils.formatOptimizationOutput(pyM.designBin_trans.get_values(), 'designVariables', '1dim')\n self._isBuiltVariablesOptimum = optVal\n utils.setOptimalComponentVariables(optVal, '_isBuiltVariablesOptimum', self._componentsDict)\n\n optVal = utils.formatOptimizationOutput(pyM.op_trans.get_values(), 'operationVariables', '1dim',\n esM._periodsOrder)\n self._operationVariablesOptimum = optVal\n utils.setOptimalComponentVariables(optVal, '_operationVariablesOptimum', self._componentsDict)\n\n def getOptimalCapacities(self):\n return self._capacitiesOpt","sub_path":"FINE/transmissionModeling.py","file_name":"transmissionModeling.py","file_ext":"py","file_size_in_byte":27181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70213584","text":"\"\"\"\n\n test_common_args.py\n\n\n Lukas Puehringer \n\n\n Jan 16, 2020\n\n\n See LICENSE for licensing information.\n\n\n Test in_toto/common_args.py\n\n\"\"\"\nimport unittest\nimport argparse\nimport in_toto.common_args\n\n\nclass TestArgparseActionGroupHelpers(unittest.TestCase):\n # pylint: disable=protected-access\n\n def setUp(self):\n \"\"\"Create an empty parser and perform some basic assertions prior to\n testing parser action group (i.e. argument group) helper functions. \"\"\"\n # Create empty argument parser\n self.parser = argparse.ArgumentParser()\n\n # Assert parser has the protected member \"_action_groups\" and it is a list\n # NOTE: argparse could remove this at any time without notice\n self.assertTrue(type(getattr(self.parser, \"_action_groups\", None)) == list) # pylint: disable=unidiomatic-typecheck\n\n # Assert default action groups with default titles' case and default order\n self.assertListEqual([group.title for group in self.parser._action_groups],\n [\"positional arguments\", \"optional arguments\"])\n\n\n def test_title_case_action_groups(self):\n \"\"\"Test title_case_action_groups title cases action group titles. \"\"\"\n # Make titles title-case (default is asserted in setUp)\n in_toto.common_args.title_case_action_groups(self.parser)\n\n # Assert successful title-casing\n self.assertListEqual([group.title for group in self.parser._action_groups],\n [\"Positional Arguments\", \"Optional Arguments\"])\n\n\n def test_sort_action_groups(self):\n \"\"\"Test sort_action_groups sorts action groups by custom title order. \"\"\"\n # Create custom order for titles (default is asserted in setUp)\n custom_order = [\"optional arguments\", \"positional arguments\"]\n in_toto.common_args.sort_action_groups(\n self.parser, title_order=custom_order)\n # Assert successful re-ordering\n self.assertListEqual([group.title for group in self.parser._action_groups],\n custom_order)\n\n\n # Add custom group to parser that exists in most in-toto command line tools\n self.parser.add_argument_group(\"required named arguments\")\n\n # Test default custom order of action groups titles (which are title-cased)\n in_toto.common_args.title_case_action_groups(self.parser)\n in_toto.common_args.sort_action_groups(self.parser)\n default_custom_order = [\"Required Named Arguments\", \"Positional Arguments\",\n \"Optional Arguments\"]\n\n # Assert successful(title-casing) re-ordering\n self.assertListEqual([group.title for group in self.parser._action_groups],\n default_custom_order)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_common_args.py","file_name":"test_common_args.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"593437808","text":"from typing import Optional\n\n\nclass Human:\n def __init__(self, name: str, cars: list[str]) -> None:\n self.name = name\n self.cars = cars\n self.current_car: Optional[str] = None\n\n def set_current_car(self, car: str) -> None:\n if car not in self.cars:\n raise ValueError(\"No such car found {car}\")\n self.current_car = car\n\ndef choose_a_car(human: Human) -> None:\n selected_car = input(f\"Please select a car from {human.cars}\")\n try:\n human.set_current_car(selected_car)\n except ValueError:\n print(\"No such cars, try again!\")\n choose_a_car(human)\n\nhuman = Human(\"Michael\", [\"Tesla\", \"2HK6784LXG34H78R1\", \"Roadster)))\"])\nhuman2 = Human(\"Gordon\", [\"Porsche\", \"9K7395CC185I7G9NB\", \"Taycan)))\"])\nchoose_a_car(human2)\nchoose_a_car(human)\nprint(human.current_car)","sub_path":"homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"342863542","text":"import psycopg2\nimport settings\nimport smtplib\nfrom typing import Dict, Tuple, Union\nimport simplejson as json\nfrom email.header import Header\nfrom email.mime.text import MIMEText\n\nfrom parser.download_current import download_current_flights_list\n\n\ndef get_db_conn():\n return psycopg2.connect(\n dbname=settings.DB['NAME'],\n user=settings.DB['USER'],\n password=settings.DB['PASSWORD'],\n host=settings.DB['HOST'],\n port=settings.DB['PORT'],\n )\n\n\ndef send_email(recipients: Union[str, list], subject: str, body_data: list):\n if settings.DEBUG:\n subject = '[DEBUG]' + subject\n gmail_user = from_email = settings.GMAIL['USER']\n gmail_pwd = settings.GMAIL['PASSWORD']\n to_email = recipients if isinstance(recipients, list) else [recipients]\n message = MIMEText(''.join(body_data).encode('utf-8'), 'plain', 'utf-8')\n message['Subject'] = Header(subject, 'utf-8')\n message['From'] = from_email\n message['To'] = \", \".join(to_email)\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.login(gmail_user, gmail_pwd)\n server.sendmail(from_email, to_email, message.as_string())\n server.close()\n\n\ndef insert_data(user: dict, data: Dict[int, Tuple[int, Dict]]=None, cur=None) -> None:\n def insert(cursor):\n if data is None:\n flights_data = download_current_flights_list(user)\n else:\n flights_data = data\n data_list = []\n for k, v in flights_data.items():\n data_list.append(\"('{}', '{}', {}, '{}')\".format(k, user['USERNAME'], v[0] if v[0] else 'NULL', json.dumps(v[1])))\n data_to_insert = ', '.join(data_list)\n cursor.execute(\n \"\"\"\n INSERT INTO flights(id, aviabit_user, status, data) VALUES {}\n \"\"\".format(data_to_insert)\n )\n if cur is None:\n with get_db_conn() as conn:\n cur = conn.cursor()\n insert(cur)\n conn.commit()\n else:\n insert(cur)\n\n\ndef update_data(user: dict, data: Dict[int, Tuple[int, dict]], cur=None) -> None:\n def update(cursor):\n for flight_id, (flight_status, flight_data) in data.items():\n cursor.execute(\n \"\"\"\n UPDATE flights SET (status, data) = ({}, '{}')\n WHERE id = '{}' AND aviabit_user='{}'\n \"\"\".format(flight_status if flight_status else 'NULL', json.dumps(flight_data), flight_id, user['USERNAME'])\n )\n if cur is None:\n with get_db_conn() as conn:\n cur = conn.cursor()\n update(cur)\n conn.commit()\n else:\n update(cur)\n\n\nif __name__ == '__main__':\n send_email('evgeniymsev@gmail.com', 'Test!', ['Flight data changed!'])\n","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"213881823","text":"#%%\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.fft\r\nfrom scipy.fftpack import fftshift, ifftshift\r\nimport sys\r\nimport os\r\nimport pickle\r\n\r\n#%%\r\n\r\njob_ID = int(os.environ.get('SLURM_ARRAY_JOB_ID', default=-1)) # job ID\r\ntask_ID = int(os.environ.get('SLURM_ARRAY_TASK_ID', default=-1)) # task ID\r\n\r\n# T = 1000\r\nwmax = 512\r\nN = 2**25\r\nT = np.pi/wmax * N # T is tmax\r\neta_conv = 1/T * 5\r\nJ = 1\r\nJ = J/np.sqrt(2) # choose same convention as stephan\r\nmus = np.concatenate([np.linspace(0,0.01,11), np.linspace(0.01,0.1,21)[1:]])\r\n\r\n#if task_ID == -1:\r\nmu = mus[task_ID]\r\n\r\nif mu < 0.01:\r\n temp = 15e-5\r\nelse:\r\n temp = 5e-4\r\nbeta = 1/temp\r\n\r\netas = [0,1,0.1]\r\n\r\nalpha = 0.1\r\n#%%\r\n\r\ndef fermi(eps):\r\n return 1 / ( np.exp(eps * beta) + 1)\r\n\r\ndef fft_(x):\r\n return fftshift(scipy.fft.fft(fftshift(x)))\r\n\r\ndef fft(x):\r\n return fftshift(scipy.fft.ifft(fftshift(x))) * len(x)\r\n\r\nfor eta in etas:\r\n xx = []\r\n t = np.arange(-N/2, N/2) / N * T\r\n # t = np.linspace(-T/2, T/2, N)\r\n w = np.arange(-N/2, N/2) * 2 * np.pi / T\r\n # w = np.linspace(-N/2, N/2, N) * 2 * np.pi / T\r\n\r\n dt = T / N\r\n dw = 2 * np.pi / T\r\n\r\n # initialize randomly\r\n np.random.seed(1)\r\n GRR = - 1j * np.abs(np.random.rand(N))\r\n GLL = - 1j * np.abs(np.random.rand(N))\r\n GLR = - 1j * np.abs(np.random.rand(N))\r\n\r\n # initialize once\r\n nf = fermi(w)\r\n nf_ = fermi(-w)\r\n\r\n ax = np.newaxis\r\n\r\n exp_eta_t = np.exp( - eta_conv * t)\r\n exp_eta_t[:N//2] = 0\r\n\r\n # rhoRR = - 1 / np.pi * GRR.imag\r\n # rhoLL = - 1 / np.pi * GLL.imag\r\n # rhoLR = 1j / np.pi * GLR.real\r\n rhoRR = np.ones(N)\r\n rhoLL = np.ones(N)\r\n rhoLR = np.zeros(N)\r\n rhoRR[0] = 0\r\n rhoLL[0] = 0\r\n\r\n #normalize\r\n rhoRR /= dw * np.sum(rhoRR, axis=0)\r\n rhoLL /= dw * np.sum(rhoLL, axis=0)\r\n\r\n # %%\r\n\r\n\r\n # perform self-consistency equation\r\n for i in np.arange(250):\r\n nRR = dw * fft_(rhoRR * nf)\r\n nLL = dw * fft_(rhoLL * nf)\r\n nLR = dw * fft_(rhoLR * nf)\r\n\r\n nRR_ = dw * fft_(rhoRR * nf_)\r\n nLL_ = dw * fft_(rhoLL * nf_)\r\n nLR_ = dw * fft_(rhoLR * nf_)\r\n\r\n SRR = 2 * J**2 * (1 - eta)**2 * (-1j) * dt * fft(exp_eta_t * (nRR**3 + nRR_**3))\r\n SLL = 2 * J**2 * (1 + eta)**2 * (-1j) * dt * fft(exp_eta_t * (nLL**3 + nLL_**3))\r\n SLR = 2 * J**2 * (1 - eta**2) * (-1j) * dt * fft(exp_eta_t * (nLR**3 + nLR_**3))\r\n\r\n D = (w - SRR) * ( w - SLL) + (1j*mu - SLR)**2\r\n\r\n GLL = (w - SRR) / D\r\n GRR = (w - SLL) / D\r\n GLR = - (1j*mu - SLR) / D\r\n\r\n rhoRR_ = -1 / np.pi * GRR.imag\r\n rhoLL_ = -1 / np.pi * GLL.imag\r\n rhoLR_ = 1j / np.pi * GLR.real\r\n\r\n if i % 10 == 0:\r\n x = (np.sum(np.abs(rhoRR_-rhoRR)**2) + np.sum(np.abs(rhoLL_-rhoLL)**2) + np.sum(np.abs(rhoLR_-rhoLR)**2)) / N\r\n xx.append(x)\r\n print(i, x)#, 'sum_rule:', dw * np.sum(rhoRR), dw * np.sum(rhoLL))\r\n\r\n rhoRR = rhoRR * (1-alpha) + alpha * rhoRR_\r\n rhoLL = rhoLL * (1-alpha) + alpha * rhoLL_\r\n rhoLR = rhoLR * (1-alpha) + alpha * rhoLR_\r\n\r\n # symmetrize\r\n rhoRR[1:] = 0.5 * (rhoRR[1:] + np.flip(rhoRR[1:]))\r\n rhoLL[1:] = 0.5 * (rhoLL[1:] + np.flip(rhoLL[1:]))\r\n rhoLR[1:] = 0.5 * (rhoLR[1:] - np.flip(rhoLR[1:]))\r\n\r\n # normalize\r\n rhoRR /= dw * np.sum(rhoRR, axis=0)\r\n rhoLL /= dw * np.sum(rhoLL, axis=0)\r\n\r\n if i % 10 == 0 and x < 1e-8:\r\n break\r\n\r\n del rhoRR_\r\n del rhoLL_\r\n del rhoLR_\r\n\r\n w_re = 4*mu**(2/3)/(2*np.pi)\r\n sel2 = np.logical_and(t>=0, t*w_re<10*np.pi)\r\n if mu == 0:\r\n sel2 = np.logical_and(t>=0, t=0, w<100*mu**(2/3))\r\n if mu == 0:\r\n sel = np.logical_and(w>=0, w<10)\r\n res = {\r\n 'N': N,\r\n 'wmax': wmax,\r\n 'w': w[sel],\r\n 'T': T,\r\n 'eta_conv': eta_conv,\r\n 'temp': temp,\r\n 'beta': beta,\r\n 'J': J,\r\n 'mu': mu,\r\n 'eta': eta,\r\n 'rhoRR': rhoRR[sel],\r\n 'rhoLL': rhoLL[sel],\r\n 'rhoLR': rhoLR[sel],\r\n 'iterations': i,\r\n 'convergence_x': x,\r\n 'convergence_xx': xx,\r\n 'alpha': alpha,\r\n 't': t,\r\n 'GRRg_t': GRRg_t[::15],\r\n 'GLLg_t': GLLg_t[::15],\r\n 'GLRg_t': GLRg_t[::15]\r\n }\r\n\r\n f1 = open(f'{task_ID}.pickle', 'ab')\r\n pickle.dump(res, f1)\r\n f1.close()\r\n\r\n","sub_path":"real_time/3/real_time_sd2.py","file_name":"real_time_sd2.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"27365722","text":"from tool.file import getAllImg\nfrom django.test import TestCase\nfrom unittest import mock\n\nclass GetAllImgTestCase(TestCase):\n\n @mock.patch(\"tool.file.listdir\")\n @mock.patch(\"imghdr.what\")\n def testGetPicList(self, mock_what, mock_listdir):\n mock_listdir.return_value = [\n \"test1.jpg\",\n \"test2\",\n \"test3.txt\",\n \"test4.png\"\n ]\n mock_what.side_effect = [\n True,\n False,\n False,\n True\n ]\n self.assertEqual(getAllImg(\"testPath\\\\\"), [\n \"testPath\\\\test1.jpg\",\n \"testPath\\\\test4.png\"\n ])\n","sub_path":"bookmark/tool/testFile.py","file_name":"testFile.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"621395263","text":"from numpy import loadtxt\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import plot_model\nimport datetime as dt\n\n# Train the model and save it to models folder #\n\ndef main():\n # values required to save model\n name = 'diabetes_recognition'\n date = dt.date.today()\n\n # load the dataset\n dataset = loadtxt('pima-indians-diabetes.csv', delimiter=',')\n # split into input (X) and output (Y) variables\n X = dataset[:, 0:8]\n Y = dataset[:, 8]\n\n # models in keras are defined as a sequence of layers\n # we add layers on at a time until we have our network architecture\n # define the keras model:\n model = Sequential()\n # the first hidden layer has 12 nodes and uses the relu activation function\n # in the same line we define the input layer with 8 inputs\n model.add(Dense(12, input_dim=8, activation='relu'))\n # the second hidden layer has 8 nodes and uses the relu\n model.add(Dense(8, activation='relu'))\n # the output layer has one node and uses the sigmoid function\n model.add(Dense(1, activation='sigmoid'))\n # we use cross entropy as the loss argument\n # adam algorithm as the optimizer\n # collect and report the classification accuracy defined with metrics arg\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n # training occurs over epochs splitted into batches\n # epoch: one pass through all of the rows in the training dataset\n # batch: one or more samples considered by the model\n # within an epoch before weights are updated\n # these configurations can be chosen experimentally by trial-error\n\n # fit the keras model on the dataset\n # verbose set to 0 to hide outputs from each epoch\n model.fit(X, Y, epochs=400, batch_size=10)\n\n # model evaluation with training set\n _, accuracy = model.evaluate(X, Y)\n print('Accuracy: %.2f' % (accuracy*100))\n\n # plot the image of the network\n filename = \"./models/\" + name + \"_\" + date.strftime(\"%Y-%m-%d\") + \".png\"\n plot_model(model, to_file=filename)\n\n # # making probability predictions with the model\n # predictions = model.predict(X)\n # # we use sigmoid as output, which is why we will round the outcoming value\n # rounded = [round(x[0]) for x in predictions]\n\n # we can do the same as in commented code without rounding\n predictions = model.predict_classes(X)\n # summarize for the first 5 classes:\n for i in range(5):\n print('%s => %d (expected %d)' % (X[i].tolist(), predictions[i], Y[i]))\n\n # we can also save the trained model for future use:\n filename = \"./models/\" + name + \"_\" + date.strftime(\"%Y-%m-%d\") + \".h5\"\n model.save(filename)\n print(\"Model saved\")\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"Diabetes_Recognition.py","file_name":"Diabetes_Recognition.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"19188457","text":"#!/usr/bin/env python3\n# -*- config: utf-8 -*-\n\nfrom tkinter import *\nfrom random import random\n\n\ndef rand_spawn():\n new_x = random()\n new_y = random()\n btn_1.place(relx=new_x, rely=new_y)\n\nroot = Tk()\nroot['bg'] = 'white'\nroot.title('AMOGUS')\nroot.geometry('1000x1000')\nimg = PhotoImage(file='amogus.png')\n\nbtn_1 = Button(image=img, bg='white', borderwidth=0,\n activebackground='white', command=rand_spawn)\nbtn_1.place(relx=0.5, rely=0.5, anchor=CENTER)\n\nroot.mainloop()\n","sub_path":"task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"128397397","text":"import pygame\nfrom players.Player import Player\nfrom src.Cooldown import Cooldown\n\n\nclass Kemul(Player):\n\n def __init__(self, x, y, handler):\n health = 1100\n damage = 50\n win_quote = \"Chewbacca\"\n lose_quote = \"Chewbacca\"\n name = \"Kemul\"\n defense = .7\n movespeed = 5\n\n super().__init__(health, damage, win_quote, lose_quote, name, x, y, movespeed, handler.getPlatformArray(), handler.getAttackList(), handler, defense)\n\n self.special_active = False\n self.special_cooldown = Cooldown(3)\n self.special_duration = Cooldown(2)\n self.caravan_sprite = pygame.image.load(\"media/Players/Kemul/Kemul 2.png\").convert_alpha()\n self.caravan_x = 1100\n self.caravan_y = 0\n self.caravan_loaded = True\n\n def special(self):\n if self.special_cooldown.isDone():\n self.special_active = True\n self.caravan_y = self.handler.getOtherPlayer(self).rect.y\n\n def update(self, screen):\n super().update(screen)\n\n if not self.special_cooldown.isDone():\n self.special_cooldown.update()\n\n if self.special_active and not self.sleeping:\n self.special_duration.update()\n if not self.special_duration.isDone():\n self.caravan_x -= 20\n screen.blit(self.caravan_sprite, [self.caravan_x, self.caravan_y])\n if -15 < (self.handler.getOtherPlayer(self).rect.x - self.caravan_x) < 15 and -15 < (self.handler.getOtherPlayer(self).rect.y - self.caravan_y) < 15 and self.caravan_loaded:\n self.handler.getOtherPlayer(self).takeTrueDamage(100)\n self.caravan_loaded = False\n else:\n self.special_active = False\n self.caravan_x = 1100\n self.caravan_loaded = True\n self.special_cooldown.update()\n","sub_path":"players/Kemul.py","file_name":"Kemul.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596918324","text":"import time\nfrom core.scheduling import algorithm\nfrom core import flags\nfrom core import util\nfrom core.network import network_service\n\nFLAGS = flags.FLAGS\n\n\nclass Scheduler(object):\n \"\"\"\n Scheduler object to manage jobs and schedule placements\n \"\"\"\n\n def __init__(self, infrastructure, jobs_manager):\n self.infrastructure = infrastructure\n # jobs manager maintains running jobs / finished jobs\n # NOTE: the queues are in jobs_manager\n self.jobs_manager = jobs_manager\n self.placement = infrastructure.flags.scheme\n self.schedule = infrastructure.flags.schedule\n self.pending_time = 0.0\n # TODO: RL agent\n self.agent = None\n\n def add_rack(self, rack):\n self.infrastructure.racks.append(rack)\n\n def collate_all_nodes(self):\n result = self.infrastructure.nodes\n return result\n\n def num_free_nodes(self):\n all_nodes = self.collate_all_nodes()\n return sum([n.is_free() for n in iter(all_nodes.values())])\n\n def _schedule(self, delta):\n if self.num_free_nodes() < 1:\n return\n jobs_all = self.jobs_manager.total_jobs(delta)\n scheduling_algo = algorithm.scheduling_algorithms[self.schedule]\n placement_algo = algorithm.placement_algorithms[self.placement]\n nodes, job, success = scheduling_algo(placement_algo, self.infrastructure, self.jobs_manager,delta)\n if success:\n if self.infrastructure.enable_network_costs:\n extras = network_service.calculate_network_costs(self.infrastructure, job)\n orginal_duration = job.duration\n job.add_network_costs(extras)\n util.print_fn(\"Job %s : Original duration %f , New duration %f\" %\n (job.job_id, orginal_duration, job.duration))\n self.add_to_running(nodes, job.job_id)\n else:\n assert (jobs_all == self.jobs_manager.total_jobs(delta))\n\n def _gen_jobs(self, delta_time):\n self.jobs_manager.gen_jobs(delta_time)\n\n def unfinished_node_count(self):\n nodes = self.collate_all_nodes()\n count = sum([not node.is_finished for node in iter(nodes.values())])\n return count\n\n def release_finished_jobs(self, current_time):\n jobs_to_finish = self.jobs_manager.prepare_finish_tasks(current_time)\n for jtf in jobs_to_finish:\n success = False\n for task_id, node_id in iter(jtf.tasks_running_on.items()):\n running_task = self.infrastructure.nodes[node_id].running_tasks.pop(task_id)\n assert not running_task.finished\n jtf.task_finished(task_id)\n self.infrastructure.nodes[node_id].release_allocated_resources(running_task)\n success = jtf.try_finished()\n if success:\n self.jobs_manager.running_jobs.pop(jtf.job_id)\n if jtf.job_id not in self.jobs_manager.finished_jobs:\n self.jobs_manager.finished_jobs[jtf.job_id] = jtf\n if len(self.infrastructure.nodes[node_id].running_tasks) == 0:\n self.jobs_manager.busy_nodes.remove(node_id)\n assert success\n\n def add_to_running(self, nodes, job_id):\n for k, v in iter(nodes.items()):\n self.jobs_manager.start_job(v, job_id)\n assert (k in self.jobs_manager.busy_nodes)\n\n def _clear_nodes(self):\n for k, v in iter(self.infrastructure.nodes.items()):\n if len(v.running_tasks) == 0:\n v.reset_resource()\n else:\n sum_workers = sum([1 for w in iter(v.running_tasks.values()) if not w.is_ps])\n if sum_workers != v.gpu_used:\n v.reset_resource(sum_workers)\n\n def start(self):\n start_time = time.time()\n delta_time = 0\n current_remaining = self.jobs_manager.total_jobs(delta_time)\n running_jobs = len(self.jobs_manager.running_jobs)\n steps = 0\n while current_remaining + running_jobs > 0:\n # NOTE: Make decision on whether to:\n # 1. Done: schedule new jobs \n # 2. TODO: preempt running jobs \n # 3. TODO: migrate running jobs\n # 4. TODO: stochastic job arrival process\n self._gen_jobs(delta_time)\n time.sleep(1)\n if current_remaining > 0:\n # TODO: this will likely to be changed\n self._schedule(delta_time)\n new_current_remaining = self.jobs_manager.total_jobs(delta_time)\n time.sleep(1)\n end_time = time.time()\n self.release_finished_jobs(end_time)\n delta_time = end_time - start_time\n current_remaining = new_current_remaining\n running_jobs = len(self.jobs_manager.running_jobs)\n self.pending_time = self.jobs_manager.average_pending_time()\n steps += 1\n util.print_fn(\"Remaining jobs: %d, Running Jobs: %d Finished Jobs %d\" %\n (new_current_remaining, running_jobs, len(self.jobs_manager.finished_jobs)))\n util.print_fn(self.jobs_manager.running_jobs.keys())\n for k, v in iter(self.infrastructure.nodes.items()):\n util.print_fn(\"Node %s is %s, GPU used %d, each node has tasks %s, gpu_utilizations %s\" %\n (k,\n 'busy' if len(v.running_tasks) > 0 else 'free',\n v.gpu_used,\n str(v.running_tasks.keys()),\n str(v.gpu_mem_utilizations)))\n\n finished_time = time.time()\n total_time_taken = finished_time - start_time\n util.print_fn(\"Total Time Taken in seconds: %d\" % total_time_taken)\n\n def sort_job_trace(self):\n self.jobs_manager.sort_job_trace()\n","sub_path":"core/scheduling/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"117742366","text":"import dash\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n\nexternal_stylesheets = [dbc.themes.BOOTSTRAP]\napp = dash.Dash(\"SeattleADU\", external_stylesheets=external_stylesheets)\n\nmodal_address = html.Div(\n [\n dbc.Button(\"Open modal\", id=\"open\"),\n dbc.Modal(\n [\n dbc.ModalHeader(\"Header\"),\n dbc.ModalBody(\"This is the content of the modal\"),\n dbc.ModalFooter(\n dbc.Button(\"Close\", id=\"close\", className=\"ml-auto\")\n ),\n ],\n id=\"modal\",\n ),\n ]\n)\n\napp.layout = html.Div([\n modal_address,\n ])\n\n\n@app.callback(\n Output(\"modal\", \"is_open\"),\n [Input(\"open\", \"n_clicks\"), Input(\"close\", \"n_clicks\")],\n [State(\"modal\", \"is_open\")],\n)\ndef toggle_modal(n1, n2, is_open):\n if n1 or n2:\n return not is_open\n return is_open\n\n# # hide/show modal\n# @app.callback(Output('modal', 'style'),\n# [Input('instructions-button', 'n_clicks')])\n# def show_modal(n):\n# if n > 0:\n# return {\"display\": \"block\"}\n# return {\"display\": \"none\"}\n\n# # Close modal by resetting info_button click to 0\n# @app.callback(Output('instructions-button', 'n_clicks'),\n# [Input('modal-close-button', 'n_clicks')])\n# def close_modal(n):\n# return 0\n\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8888)\n\n","sub_path":"ADUniverse/modal.py","file_name":"modal.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"381015115","text":"#! /usr/bin/python\r\n# coding=utf-8\r\nebp0x8=0xfac0f685\r\nebp0xc=0xe0911505\r\nebp0x10=0xaee1f319\r\n\r\n# mov eax,0x27\r\n# xor al,al\r\neax=0x27\r\nal=eax%256\r\nal=al^al\r\n\r\n# mov ah,BYTE PTR [ebp+0xb]\r\n# sal ax,0x10\r\nah=0x85\r\nax=ah<<8+al\r\nax=(ax<<0x10)%(2**16)\r\n\r\n# sub al,BYTE PTR [ebp+0xc]\r\n# add ah,BYTE PTR [ebp+0xf]\r\nal=-0x05+2**8\r\nah=0xe0\r\nax+=(ah<<8)+al\r\n\r\n# xor ax,WORD PTR [ebp+0x12]\r\nax=ax^0xaee1\r\nprint(hex(ax))","sub_path":"REVERSE/picoCTF/assembly-3/reassemble.py","file_name":"reassemble.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210223197","text":"\"\"\"\n# 699. Falling Squares\n\n# On an infinite number line (x-axis), we drop given squares in the order they are given.\n\n# The i-th square dropped (positions[i] = (left, side_length)) is a square with the left-most point\n# being positions[i][0] and sidelength positions[i][1].\n\n# The square is dropped with the bottom edge parallel to the number line, and from a higher height than\n# all currently landed squares. We wait for each square to stick before dropping the next.\n\n# The squares are infinitely sticky on their bottom edge, and will remain fixed to any positive length surface\n# they touch (either the number line or another square).\n# Squares dropped adjacent to each other will not stick together prematurely.\n\n\n# Return a list ans of heights. Each height ans[i] represents the current highest height of any square we have dropped,\n# after dropping squares represented by positions[0], positions[1], ..., positions[i].\n\n# Example 1:\n# Input: [[1, 2], [2, 3], [6, 1]]\n# Output: [2, 5, 5]\n# Explanation:\n\n# After the first drop of positions[0] = [1, 2]:\n# _aa\n# _aa\n# -------\n# The maximum height of any square is 2.\n\n\n# After the second drop of positions[1] = [2, 3]:\n# __aaa\n# __aaa\n# __aaa\n# _aa__\n# _aa__\n# --------------\n# The maximum height of any square is 5. \n# The larger square stays on top of the smaller square despite where its center\n# of gravity is, because squares are infinitely sticky on their bottom edge.\n\n\n# After the third drop of positions[1] = [6, 1]:\n# __aaa\n# __aaa\n# __aaa\n# _aa\n# _aa___a\n# --------------\n# The maximum height of any square is still 5.\n\n# Thus, we return an answer of [2, 5, 5].\n\n\n# Example 2:\n# Input: [[100, 100], [200, 100]]\n# Output: [100, 100]\n# Explanation: Adjacent squares don't get stuck prematurely - only their bottom edge can stick to surfaces.\n\n# Note:\n\n# 1 <= positions.length <= 1000.\n# 1 <= positions[i][0] <= 10^8.\n# 1 <= positions[i][1] <= 10^6.\n\n\"\"\"\nimport bisect\n\n\nclass FallingSquares:\n\n def fallingSquares(self, positions):\n import bisect\n\n heights = [0, 0] \n axis = [0, float('inf')]\n res = [0]\n\n for start, high in positions:\n\n pos1 = bisect.bisect_right(axis, start)\n pos2 = bisect.bisect_left(axis, start + high)\n\n new_h= max(heights[pos1-1:pos2]) + high\n axis[pos1: pos2] = [start, start + high]\n\n heights[pos1: pos2] = [new_h, heights[pos2 - 1]] \n res.append(max(res[-1], new_h)) \n\n return res[1:]\n\n\n def doit_segmenttree(self, positions):\n \"\"\"\n :type positions: List[List[int]]\n :rtype: List[int]\n \"\"\"\n # compress positions into consecutive indices\n all_pos = set()\n for p in positions:\n all_pos.add(p[0])\n all_pos.add(p[0] + p[1])\n \n pos_ind = {}\n for i, pos in enumerate(sorted(all_pos)):\n pos_ind[pos] = i\n \n n = len(pos_ind)\n segtree = [0] * (4 * n)\n lazy = [0] * (4 * n)\n \n def _insert(left, right, h, root, start, end):\n if lazy[root] != 0:\n if start != end:\n lazy[root*2] = max(lazy[root*2], lazy[root])\n lazy[root*2 + 1] = max(lazy[root*2+1], lazy[root])\n segtree[root] = max(segtree[root], lazy[root])\n lazy[root] = 0 \n \n if left > end or right < start:\n return\n \n if start == end:\n segtree[root] = max(segtree[root], h)\n return \n \n if left <= start and right >= end:\n segtree[root] = max(segtree[root], h)\n lazy[root*2] = max(lazy[root*2], h)\n lazy[root*2 + 1] = max(lazy[root*2+1], h)\n return\n \n mid = (start + end) // 2\n _insert(left, right, h, root*2, start, mid)\n _insert(left, right, h, root*2 + 1, mid + 1, end)\n segtree[root] = max(segtree[2*root], segtree[2*root + 1], h)\n return\n \n def _query(left, right, root, start, end):\n if lazy[root] != 0:\n if start != end:\n lazy[root*2] = max(lazy[root*2], lazy[root])\n lazy[root*2 + 1] = max(lazy[root*2+1], lazy[root])\n segtree[root] = max(segtree[root], lazy[root])\n lazy[root] = 0\n \n if left > end or right < start:\n return 0\n \n if start == end: \n return segtree[root]\n \n if left <= start and right >= end:\n return segtree[root]\n \n mid = (start + end) // 2\n return max(_query(left, right, root*2, start, mid), _query(left, right, root*2+1, mid + 1, end))\n \n def insert(left, right, h):\n _insert(left, right, h, 1, 0, n-1)\n \n def query(left, right):\n return _query(left, right, 1, 0, n-1)\n \n res = []\n accu_max = 0\n for p in positions:\n left_raw, h = p\n right_raw = left_raw + h\n left = pos_ind[left_raw]\n right = pos_ind[right_raw] - 1\n cur_max = query(left, right)\n new_max = cur_max + h\n accu_max = max(accu_max, new_max)\n res.append(accu_max)\n insert(left, right, new_max)\n return res\n\n \"\"\"\n Approach 4: Segment Tree with Lazy Propagation\n Intuition\n\n If we were familiar with the idea of a segment tree (which supports queries and updates on intervals), we can immediately crack the problem.\n\n Algorithm\n\n Segment trees work by breaking intervals into a disjoint sum of component intervals, whose number is at most log(width).\n The motivation is that when we change an element, we only need to change log(width) many intervals that aggregate on an interval containing that element.\n\n When we want to update an interval all at once, we need to use lazy propagation to ensure good run-time complexity. This topic is covered in more depth here.\n\n With such an implementation in hand, the problem falls out immediately.\n\n Complexity Analysis\n\n Time Complexity: O(NlogN), where NN is the length of positions. This is the run-time complexity of using a segment tree.\n\n Space Complexity: O(N), the space used by our tree.\n \"\"\"\n class SegmentTree(object):\n def __init__(self, N, update_fn, query_fn):\n self.N = N\n self.H = 1\n while 1 << self.H < N:\n self.H += 1\n\n self.update_fn = update_fn\n self.query_fn = query_fn\n self.tree = [0] * (2 * N)\n self.lazy = [0] * N\n\n def _apply(self, x, val):\n self.tree[x] = self.update_fn(self.tree[x], val)\n if x < self.N:\n self.lazy[x] = self.update_fn(self.lazy[x], val)\n\n def _pull(self, x):\n while x > 1:\n x /= 2\n self.tree[x] = self.query_fn(self.tree[x * 2], self.tree[x * 2 + 1])\n self.tree[x] = self.update_fn(self.tree[x], self.lazy[x])\n\n def _push(self, x):\n for h in range(self.H, 0, -1):\n y = x >> h\n if self.lazy[y]:\n self._apply(y * 2, self.lazy[y])\n self._apply(y * 2 + 1, self.lazy[y])\n self.lazy[y] = 0\n\n def update(self, L, R, h):\n L += self.N\n R += self.N\n L0, R0 = L, R\n while L <= R:\n if L & 1:\n self._apply(L, h)\n L += 1\n if R & 1 == 0:\n self._apply(R, h)\n R -= 1\n L /= 2\n R /= 2\n self._pull(L0)\n self._pull(R0)\n\n def query(self, L, R):\n L += self.N\n R += self.N\n self._push(L)\n self._push(R)\n ans = 0\n while L <= R:\n if L & 1:\n ans = self.query_fn(ans, self.tree[L])\n L += 1\n if R & 1 == 0:\n ans = self.query_fn(ans, self.tree[R])\n R -= 1\n L /= 2\n R /= 2\n return ans\n\n def doit_segement_tree(self, positions):\n # Coordinate compression\n # index = ...\n\n tree = FallingSquares.SegmentTree(len(index), max, max)\n best = 0\n ans = []\n for left, size in positions:\n L, R = index[left], index[left + size - 1]\n h = tree.query(L, R) + size\n tree.update(L, R, h)\n best = max(best, h)\n ans.append(best)\n\n return ans\n\n \"\"\"\n Approach 3: Block (Square Root) Decomposition\n Intuition\n\n Whenever we perform operations (like update and query) on some interval in a domain, we could segment that domain with size WW into blocks of size \\sqrt{W}\n W\n ​\n .\n\n Then, instead of a typical brute force where we update our array heights representing the board, we will also hold another array blocks, where blocks[i] represents the B = \\lfloor \\sqrt{W} \\rfloorB=⌊\n W\n ​\n ⌋ elements heights[B*i], heights[B*i + 1], ..., heights[B*i + B-1]. This allows us to write to the array in O(B)O(B) operations.\n\n Algorithm\n\n Let's get into the details. We actually need another array, blocks_read. When we update some element i in block b = i / B, we'll also update blocks_read[b]. If later we want to read the entire block, we can read from here (and stuff written to the whole block in blocks[b].)\n\n When we write to a block, we'll write in blocks[b]. Later, when we want to read from an element i in block b = i / B, we'll read from heights[i] and blocks[b].\n\n Our process for managing query and update will be similar. While left isn't a multiple of B, we'll proceed with a brute-force-like approach, and similarly for right. At the end, [left, right+1) will represent a series of contiguous blocks: the interval will have length which is a multiple of B, and left will also be a multiple of B.\n\n Complexity Analysis\n\n Time Complexity: O(N\\sqrt{N})O(N\n N\n ​\n ), where NN is the length of positions. Each query and update has complexity O(\\sqrt{N})O(\n N\n ​\n ).\n\n Space Complexity: O(N)O(N), the space used by heights.\n\n \"\"\"\n def doit_1(self, positions):\n #Coordinate compression\n #index = ...\n\n W = len(index)\n B = int(W**.5)\n heights = [0] * W\n blocks = [0] * (B+2)\n blocks_read = [0] * (B+2)\n\n def query(left, right):\n ans = 0\n while left % B and left <= right:\n ans = max(ans, heights[left], blocks[left / B])\n left += 1\n while right % B != B-1 and left <= right:\n ans = max(ans, heights[right], blocks[right / B])\n right -= 1\n while left <= right:\n ans = max(ans, blocks[left / B], blocks_read[left / B])\n left += B\n return ans\n\n def update(left, right, h):\n while left % B and left <= right:\n heights[left] = max(heights[left], h)\n blocks_read[left / B] = max(blocks_read[left / B], h)\n left += 1\n while right % B != B-1 and left <= right:\n heights[right] = max(heights[right], h)\n blocks_read[right / B] = max(blocks_read[right / B], h)\n right -= 1\n while left <= right:\n blocks[left / B] = max(blocks[left / B], h)\n left += B\n\n best = 0\n ans = []\n for left, size in positions:\n L = index[left]\n R = index[left + size - 1]\n h = query(L, R) + size\n update(L, R, h)\n best = max(best, h)\n ans.append(best)\n\n return ans\n\n\n def doit1(self, positions):\n \"\"\"\n :type positions: List[List[int]]\n :rtype: List[int]\n \"\"\"\n heights = {}\n ans = []\n\n for left, side in positions:\n\n right = left + side\n\n nearby = [h for h in heights.keys() if not (h[0] >= right or h[1] <= left)]\n\n if len(nearby) > 0:\n h = max(heights[s] for s in nearby) + side\n else:\n h = side\n\n heights[(left, right)] = h\n\n if len(ans) == 0:\n ans.append(h)\n else:\n ans.append(max(ans[-1], h))\n\n return ans\n\n def doit(self, positions):\n \"\"\"\n :type positions: List[List[int]]\n :rtype: List[int]\n \"\"\"\n import bisect\n\n pos = [0]\n heights = [0]\n res = []\n max_h = 0\n\n for left, side in positions:\n\n i = bisect.bisect_right(pos, left)\n j = bisect.bisect_left(pos, left + side)\n\n h = max(heights[i - 1:j] or [0]) + side\n\n pos[i:j] = [left, left + side]\n heights[i:j] = [h, heights[j - 1]]\n\n max_h = max(max_h, h)\n res.append(max_h)\n\n return res\n\n\nif __name__ == \"__main__\":\n\n res = FallingSquares().doit([[4,9],[8,8],[6,8],[8,2],[1,2]])\n\n res = FallingSquares().doit([[1, 2], [2, 3], [6, 1]])\n\n res = FallingSquares().doit([[100, 100], [200, 100]])\n\n pass\n\n\n","sub_path":"PythonLeetcode/Leetcode/699_FallingSquares.py","file_name":"699_FallingSquares.py","file_ext":"py","file_size_in_byte":13576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"565717081","text":"import numpy as np \nimport os \nimport json\nimport cv2 \n\npath_mask = \"../../mask_roi\"\nPATH_ROI = \"../../zones-movement_paths\"\n\ndef gen_mask():\n if os.path.isdir(path_mask) == False:\n os.mkdir(path_mask)\n\n for annot_file in os.listdir(PATH_ROI):\n if annot_file.endswith(\".json\"):\n\n with open(os.path.join(PATH_ROI, annot_file)) as f_p:\n data = json.load(f_p)\n\n file_name = annot_file[:-5]\n polygon = data[\"shapes\"]\n width = data[\"imageWidth\"]\n height = data[\"imageHeight\"]\n for each in polygon:\n if each[\"label\"] == \"zone\":\n empty_mask = np.zeros((height, width),dtype='int32') \n\n list_points = each[\"points\"]\n label = each[\"label\"]\n new_mask = cv2.fillConvexPoly(empty_mask, np.array(list_points, dtype=\"int32\"), 255)\n \n cv2.imwrite(os.path.join(path_mask, file_name+\".jpg\"), new_mask)\n np.save(os.path.join(path_mask, file_name), new_mask)\n\nif __name__ == \"__main__\":\n gen_mask()","sub_path":"utils/gen_mask_from_roi.py","file_name":"gen_mask_from_roi.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328431717","text":"import os\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport cv2\nfrom imutils.io import TempFile\n\nfrom constants import SEND_EMAIL, ENTER_LOG_FILE_NAME, EXIT_LOG_FILE_NAME, Direction\nfrom logger import Logger\nfrom send_receive_messages import SendReceiveMessages\n\n\nclass HumanValidator:\n enter_log_file = None\n exit_log_file = None\n weekly_log_file = None\n monthly_log_file = None\n\n @classmethod\n def close_log_file(cls):\n # check if the log file object exists, if it does, then close it\n if not cls.enter_log_file:\n cls.enter_log_file.close()\n\n if not cls.exit_log_file:\n cls.exit_log_file.close()\n\n @classmethod\n def initialize_log_file(cls):\n if not cls.enter_log_file:\n cls.enter_log_file = open(os.path.join(Path(__file__).parent, ENTER_LOG_FILE_NAME), mode=\"a\")\n # set the file pointer to end of the file\n if cls.enter_log_file.seek(0, os.SEEK_END) == 0:\n cls.enter_log_file.write(\"Year,Month,Day,Time,Direction\\n\")\n\n if not cls.exit_log_file:\n cls.exit_log_file = open(os.path.join(Path(__file__).parent, EXIT_LOG_FILE_NAME), mode=\"a\")\n # set the file pointer to end of the file\n if cls.exit_log_file.seek(0, os.SEEK_END) == 0:\n cls.exit_log_file.write(\"Year,Month,Day,Time,Direction\\n\")\n\n @classmethod\n def validate_column_movement(cls, trackable_object, time_stamp, frame, objectID):\n # Initialize log file.\n if not cls.enter_log_file or not cls.exit_log_file or not cls.weekly_log_file or not cls.monthly_log_file:\n cls.initialize_log_file()\n\n # check if the object has not been logged\n if not trackable_object.logged and trackable_object.estimated and trackable_object.direction:\n Logger.logger().info(\"For objectID={}, direction ={}\".format(\n objectID,\n repr(trackable_object.direction)))\n\n # set the current year, month, day, and time\n year = time_stamp.strftime(\"%Y\")\n month = time_stamp.strftime(\"%m\")\n day = time_stamp.strftime(\"%d\")\n time = time_stamp.strftime(\"%H:%M:%S\")\n\n if SEND_EMAIL and frame is not None:\n # initialize the image id, and the temporary file\n imageID = time_stamp.strftime(\"%H%M%S%f\")\n tempFile = TempFile()\n\n # write the date and speed on the image.\n cv2.putText(frame, datetime.now().strftime(\"%A %d %B %Y %I:%M:%S%p\"),\n (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 1)\n # write the speed: first get the size of the text\n size, base = cv2.getTextSize(\"%s \" % repr(trackable_object.direction), cv2.FONT_HERSHEY_SIMPLEX, 2,\n 3)\n # then center it horizontally on the image\n cntr_x = int((frame.shape[1] - size[0]) / 2)\n cv2.putText(frame, \"%s \" % repr(trackable_object.direction),\n (cntr_x, int(frame.shape[0] * 0.2)), cv2.FONT_HERSHEY_SIMPLEX, 2.00, (0, 255, 0), 3)\n cv2.imwrite(tempFile.path, frame)\n\n # log the event in the log file\n info = \"{},{},{},{},{},{}\\n\".format(year, month,\n day, time, repr(trackable_object.direction), imageID)\n else:\n # log the event in the log file\n info = \"{},{},{},{},{}\\n\".format(year, month,\n day, time, repr(trackable_object.direction))\n cls.initialize_log_file()\n if trackable_object.direction == Direction.ENTER:\n cls.enter_log_file.write(info)\n cls.enter_log_file.flush()\n SendReceiveMessages().increment_face_detected_locally()\n elif trackable_object.direction == Direction.EXIT:\n cls.exit_log_file.write(info)\n cls.exit_log_file.flush()\n SendReceiveMessages().decrement_face_detected_locally()\n\n # set the object has logged\n trackable_object.logged = True\n","sub_path":"Occupancy_Tracker/human_validator.py","file_name":"human_validator.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"35193349","text":"from logistic_model import *\nimport matplotlib\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n# Stochastic nodes in the model (prior specifications)\nclass par:\n '''Prior parameters (ranges for uniform distributions)'''\n r_min,r_max=0.0,10.0\n K_min,K_max=0.0,1.0\n x0_min,x0_max=0.0,0.1\n tau_min,tau_max=0,1500000\n\niters=25000\nburnin=1000\nthinning=10\n\n# Comparing performance of ode and vectorised logistic using simulated, fairly typical data\ndata=sim(n_pred=50)\ninocVal=data.x0_true\n\nstart=time.time()\nM=inference(data,par,iter=iters,burn=burnin,thin=thinning,fixInoc=False,logfun=logistic)\nprint(\"Time taken for vectorised logistic model: \"+sinceStart(start))\n\n#start=time.time()\n#M_ode=inference(data,par,iter=iters,burn=burnin,thin=thinning,fixInoc=False,logfun=logisticode)\n#print(\"Time taken for numerical solution of competition ode: \"+sinceStart(start))\n#posteriorPriorPlots(M_ode,data,par,50)\n\n# Diagnostic plots demonstrating convergance (without posterior predictive points)\ndiag=sim(n_pred=0)\nM_diag=inference(data,par,iter=iters,burn=burnin,thin=thinning,fixInoc=False,logfun=logistic)\n\n# Comparing fixed and varying inoculum density using simulated, fairly typical data\nM_fix=inference(data,par,iter=iters,burn=burnin,thin=thinning,fixInoc=True,inocVal=inocVal,logfun=logistic)\n\n# Demonstrating identifiability problems for dead/missing strains\ndead=sim(r=0,K=0.001,n_pred=50)\nM_dead=inference(dead,par,iter=iters,burn=burnin,thin=thinning,fixInoc=False,logfun=logistic)\n\nmc.Matplot.plot(M_diag,format=\"pdf\")\nwith PdfPages(\"MCMCReport.pdf\") as pdf:\n posteriorPriorPlots(M,data,par,50,show=False,main=\"Healthy strain, inferring x0, simulated data\")\n pdf.savefig()\n plt.close()\n posteriorPriorPlots(M_diag,data,par,50,show=False,main=\"Healthy strain, inferring x0, simulated data\")\n pdf.savefig()\n plt.close()\n posteriorPriorPlots(M_fix,data,par,50,show=False,main=\"Healthy strain, fixing x0, simulated data\")\n pdf.savefig()\n plt.close()\n posteriorPriorPlots(M_dead,dead,par,50,show=False,main=\"Dead/missing strain, inferring x0, simulated data\")\n pdf.savefig()\n plt.close()\n plotCorrs(M,\"Healthy strain (simulated data)\",show=False)\n pdf.savefig()\n plt.close()\n plotCorrs(M_dead,\"Dead or missing strain (simulated data)\",show=False)\n pdf.savefig()\n plt.close()\n comparePosteriors(M,M_fix,data,main=\"Effect of fixing initial condition\",lab1=\"IC varies\",lab2=\"IC fixed\")\n pdf.savefig()\n plt.close()\n","sub_path":"python/pyMC/logistic_inference_simulated.py","file_name":"logistic_inference_simulated.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"564202042","text":"\"\"\"\nModule EM_MoE.py\n================\n\tDefinition of the following ML models useful for developing a full MoE model:\n\t\tMixture of Experts model\n\t\t\tclass MoE_model: implements a MoE model with methods for fitting and making predictions\n\t\tSoftmax regression\n\t\t\tclass softmax_regression: implements a softmax regression with methods for fitting and making predictions. This is the standard classifier used by MoE model.\n\"\"\"\n#################\n\nimport scipy.stats\nimport scipy.optimize\nimport numpy as np\nimport sys\nimport os\n\n################# MoE_model class\nclass MoE_model(object):\n\t\"\"\"\nMoE_model\n=========\n\tIt represents and fits a MoE model of the form:\n\t\tp(y_i | x_i, params) = sum_{k=1}^K S(x_i)_k * N(y_i|+b_k, sigma_k)\n\twhere S(x_i) is any model. A default for S(x_i) is the softmax model S(x_i)= softmax(V*x_i).\n\tThe model is fitted trough EM algorithm.\n\t\"\"\"\n\tdef __init__(self, D, K, gating_function = None, bias = True):\n\t\t\"\"\"\n\t\tInitialize the model.\n\t\tThe gating function must be specified through an object with methods:\n\t\t\tfit(x_train(N,D)\t\tlabels_train (N,K)) returning\n\t\t\tpredict((x_test(N,D)))\treturning labels_predicted (N,K)\n\t\t\tsave\t\t\t\t\tfor saving to file the entire model\n\t\t\tload\t\t\t\t\tto load from file the entire model\n\t\tGating function must use cross entropy loss function.\n\t\tIf None a default softmax regression model is used built from class softmax_regression.\n\t\tThe gating function obkect must be already initialized properly.\n\t\tInput:\n\t\t\tD\t\t\t\t\t\tdimensionality of input space\n\t\t\tK\t\t\t\t\t\tnumber of experts for the model\n\t\t\tgating_function (obj)\tan object which represents the gating function\n\t\t\tbias\t\t\t\t\twhether to use a bias in the expert model\n\t\t\"\"\"\n\t\tself.D = D \n\t\tself.K = K \n\t\tif gating_function is None:\n\t\t\tself.gating = softmax_regression(D,K)\n\t\telse:\n\t\t\tself.gating = gating_function\n\t\tself.W = np.zeros((self.D, self.K)) #[w_1, ..., w_K]\n\t\tself.sigma = np.ones((self.K,))\n\t\tself.bias = bias\n\t\tself.b = np.zeros((self.K,))\n\t\tself.initialized = False\n\n\tdef get_iperparams(self):\n\t\t\"\"\"\n\tget_iperparams\n\t==============\n\t\tReturns values of D and K.\n\t\tOutpu:\n\t\t\t(D,K)\t(dimensionality of input space, number of experts)\n\t\t\"\"\"\n\t\treturn (self.D, self.K)\n\n\tdef save(self, exp_file, gat_file):\n\t\t\"\"\"\n\tsave\n\t====\n\t\tSaves the model to file.\n\t\tInput:\n\t\t\texp_file\tfile to save the expert model to\n\t\t\tgat_file\tfile to save the model for gating function\n\t\t\"\"\"\n\t\tto_save = np.stack((self.b, self.sigma)) #(2, K)\n\t\tto_save = np.concatenate((self.W,to_save) , axis = 0) #(D+2,K)\n\t\tnp.savetxt(exp_file, to_save)\n\t\tself.gating.save(gat_file)\n\t\treturn\n\t\n\tdef load(self, exp_file, gat_file, load_function = None):\n\t\t\"\"\"\n\tload\n\t====\n\t\tLoad the model from file. It changes parameters D and K if required.\n\t\tInput:\n\t\t\texp_file\t\tfile to load the expert model from\n\t\t\tgat_file\t\tfile to load the model for gating function from (using function load_function)\n\t\t\tgating_function\tfunction for loading the gating function model from file\n\t\t\"\"\"\n\t\tweights = np.loadtxt(exp_file)\n\t\tself.W = weights[:weights.shape[0]-2,:]\n\t\tself.b = weights[weights.shape[0]-2,:]\n\t\tself.sigma = weights[weights.shape[0]-1,:]\n\t\tif np.all(self.b==0):\n\t\t\tself.bias = False\n\t\telse:\n\t\t\tself.bias = True\n\t\tself.D = self.W.shape[0]\n\t\tself.K = self.W.shape[1]\n\t\t\n\t\tif load_function is None:\n\t\t\ttemp_load = softmax_regression(1,1)\n\t\t\tload_function = temp_load.load\n\n\t\tdel self.gating\n\t\tself.gating = load_function(gat_file)\n\t\tself.initialized = True\n\t\treturn\n\n\tdef experts_predictions(self, X):\n\t\t\"\"\"\n\texperts_predictions\n\t===================\n\t\tReturns the predictions of the experts.\n\t\tInput:\n\t\t\tX_test (N,D)\ttest points\n\t\tOutput:\n\t\t\ty_test (N,K)\texperts predictions\n\t\t\"\"\"\n\t\tif X.ndim ==1:\n\t\t\tX = X[:,np.newaxis]\n\t\treturn np.matmul(X,self.W) + self.b\n\n\tdef get_gating_probs(self,X):\n\t\t\"\"\"\n\tget_gating_probs\n\t================\n\t\tReturns the probability p(z=k|x, params)\n\t\tInput:\n\t\t\tX (N,D)\tdata points\n\t\tOutput:\n\t\t\tp_gating (N,K)\tprobabilities of each gating function\n\t\t\"\"\"\n\t\tpi = self.gating.predict(X) #p(z_i = k|x_i) (N,K)\n\t\t#pi = np.divide(pi.T, np.sum(pi, axis = 1)).T\n\t\treturn pi\n\n\tdef predict(self, X):\n\t\t\"\"\"\n\tpredict\n\t=======\n\t\tReturn the predictions of the model\n\t\tInput:\n\t\t\tX (N,D)\ttest points\n\t\tOutput:\n\t\t\ty (N,)\tmodel value at test points\n\t\t\"\"\"\n\t\tif X.ndim ==1:\n\t\t\tX = X[:,np.newaxis]\n\t\t\n\t\tpi = self.gating.predict(X) #p(z_i = k|x_i) (N,K)\n\n\t\t#indices = np.argmax(pi, axis =1)\n\t\t#for i in range(pi.shape[0]):\n\t\t#\tpi[i,indices[i]] = 1.\n\n\t\tpi = np.divide(pi.T, np.sum(pi, axis = 1)).T\n\t\tres = np.multiply(pi, self.experts_predictions(X))\n\t\treturn np.sum(res, axis = 1)\n\n\tdef expert_likelihood(self, X, y): #give to it a proper name!!!\n\t\t\"\"\"\n\texpert_likelihood\n\t=================\n\t\tComputes the quantity p(y_i|x_i, z_i=k) = N(y_i| ). This corresponds to the likelihood of each expert for having generated the data.\n\t\tInput:\n\t\t\tX (N,D)\tdata\n\t\t\ty (N,)\ttargets\n\t\tOutput:\n\t\t\tpi_k (N,K)\tN(y_i| )\n\t\t\"\"\"\n\t\tgaussians_mean = self.experts_predictions(X) #(N,K) X*W + b\n\t\ty = np.repeat( np.reshape(y, (len(y),1)), self.K, axis = 1) #(N,K)\n\n\t\t#print('sigma: ', self.sigma)\n\t\tres = scipy.stats.norm.pdf( np.divide((y - gaussians_mean), self.sigma) ) #(N,K)\n\t\treturn np.divide(res, self.sigma) #normalizing result\n\n\tdef log_likelihood(self, X, y):\n\t\t\"\"\"\n\tlog_likelihood\n\t==============\n\t\tComputes the log_likelihood for the data given with formula: (Are you sure of the formula??)\n\t\t\tLL \t= sum_{i=1}^N log sum_{k=1}^K p(y_i|x_i, z_i=k) * p(z_i=k |x_i) =\n\t\t\t\t= sum_{i=1}^N log sum_{k=1}^K N(y_i | ) S(x_i)_k\n\t\tInput:\n\t\t\tX (N,D)\tdata\n\t\t\ty (N,)\ttargets for regression\n\t\tOutput:\n\t\t\tLL\tlog-likelihood for the model\n\t\t\"\"\"\n\t\tif X.ndim == 1:\n\t\t\tX = np.reshape(X, (X.shape[0],1))\n\t\texp_likelihood = self.expert_likelihood(X, y)\n\t\tres = np.multiply(self.gating.predict(X), exp_likelihood) #(N,K)\n\t\tres[np.where(np.abs(res)<1e-30)] = 1e-30 #small regularizer for LL\n\t\tres = np.log(np.sum(res,axis=1)) #(N,)\n\t\treturn np.sum(res) / X.shape[0]\n\n\tdef __initialise_smart(self, X, args):\n\t\t\"\"\"\n\t__initialise_smart\n\t==================\n\t\tHaving seen the data makes a smart first guess (with farhtest point clustering) for responsibilities and fit gating function model with those responbility.\n\t\tInput:\n\t\t\tX (N,D)\t\ttrain data\n\t\t\targs\t\targuments to be given to fit method of gating function\n\t\t\"\"\"\n\t\tcentroids = np.zeros((self.K,self.D))\n\t\tif X.shape[0] > 10*self.K:\n\t\t\tdata = X[:10*self.K,:]\n\t\telse:\n\t\t\tdata = X\n\t\tN = data.shape[0]\n\n\t\t\t#choosing centroids\n\t\t\t#points are chosen from dataset with farhtest point clustering\n\t\tran_index = np.random.choice(N)\n\t\tcentroids[0,:] = data[ran_index]\n\n\t\tfor k in range(1,self.K):\n\t\t\tdistances = np.zeros((N,k)) #(N,K)\n\t\t\tfor k_prime in range(k):\n\t\t\t\tdistances[:,k_prime] = np.sum(np.square(data - centroids[k_prime,:]), axis =1) #(N,K')\n\t\t\tdistances = np.min(distances, axis = 1) #(N,)\n\t\t\tdistances /= np.sum(distances) #normalizing distances to make it a prob vector\n\t\t\tnext_cl_arg = np.random.choice(range(data.shape[0]), p = distances) #chosen argument for the next cluster center\n\t\t\tcentroids[k,:] = data[next_cl_arg,:]\n\n\t\tvar = np.var(X, axis = 0) #(D,)\n\n\t\t\t#computing initial responsibilities\n\t\tr_0 = np.zeros((X.shape[0],self.K))\n\t\tfor k in range(self.K):\n\t\t\tr_0[:,k] = np.sum(np.divide(np.square(X - centroids[k,:]), var), axis = 1) + 1e-5\n\t\tr_0 = np.divide(r_0.T, np.sum(r_0,axis=1)).T\n\n\t\tself.gating.fit(X,r_0, *args)\n\n\t\treturn r_0\n\n\n\n\tdef __initialise(self, X, args):\n\t\t\"\"\"\n\t__initialise\n\t============\n\t\tHaving seen the data makes a first guess for responsibilities and fit gating function model with those responbility.\n\t\tInput:\n\t\t\tX (N,D)\t\ttrain data\n\t\t\targs\t\targuments to be given to fit method of gating function\n\t\t\"\"\"\n\t\t\t#getting centroids\n\t\tindices = np.random.choice(range(X.shape[0]), size = self.K, replace = False)\n\t\tcentroids = X[indices,:] #(K,D) #K centroids are chosen\n\t\t\t#getting variances\n\t\tvar = np.var(X, axis = 0) #(D,)\n\n\t\t\t#computing initial responsibilities\n\t\tr_0 = np.zeros((X.shape[0],self.K))\n\t\tfor k in range(self.K):\n\t\t\tr_0[:,k] = np.divide(np.square(X - centroids[k,:]), var)[:,0] + 1e-10\n\t\tr_0 = np.divide(r_0.T, np.sum(r_0,axis=1)).T\n\n\t\tself.gating.fit(X,r_0, *args)\n\n\t\treturn r_0\n\n\tdef fit(self, X_train, y_train, N_iter=None, threshold = 1e-2, args= [], verbose = False, val_set = None, pick_best = False):\n\t\t\"\"\"\n\tfit\n\t===\n\t\tFit the model using EM algorithm.\n\t\tInput:\n\t\t\tX_train (N,D)\ttrain data\n\t\t\ty_train (N,)\ttrain targets for regression\n\t\t\tN_iter\t\t\tMaximum number of iteration (if None only threshold is applied)\n\t\t\tthreshold\t\tMinimum change in LL below which algorithm is terminated\n\t\t\targs\t\t\targuments to be given to fit method of gating function\n\t\t\tverbose \t\twhether to print values during fit\n\t\t\tval_set\t\t\ttuple (X_val, y_val) with a validation set to test performances\n\t\t\tpick_best\t\tif True the model with best validation mse is chosen as best model (doesn't apply if val_set is None)\n\t\tOutput:\n\t\t\thistory\t\tlist of value for the LL of the model at every epoch\n\t\t\"\"\"\n\t\tif X_train.ndim == 1:\n\t\t\tX_train = np.reshape(X_train, (X_train.shape[0],1))\n\t\tif X_train.shape[1] != self.D:\n\t\t\traise TypeError(\"Wrong shape for X_train matrix \"+str(X_train.shape)+\". Second dimension should have lenght \"+str(self.D))\n\t\tif y_train.ndim > 1:\n\t\t\ty_train = y_train[:,0]\n\n\t\tif threshold is None:\n\t\t\tthreshold = 0\n\n\t\t\t#initialization\n\t\tif not self.initialized:\n\t\t\tr_0 = self.__initialise_smart(X_train,args)\n\t\t\tself.EM_step(X_train,y_train, r_0, args)\n\n\t\ti = 0\n\t\told_LL = (-1e5,)\n\t\tLL = (-1e4,)\n\t\tif pick_best: best_mse = 1e6 #best mse so far (only if val_set is not None)\n\t\thistory=[]\n\t\twhile(LL[0] - old_LL[0] > threshold ): #exit condition is decided by train LL (even if val_set is not None): don't know why...\n\t\t\t\t#do batch update!!!\n\t\t\told_LL = LL\n\t\t\tgat_history = self.EM_step(X_train,y_train, args = args)\n\t\t\tLL=(self.log_likelihood(X_train, y_train),)\n\t\t\tif isinstance(val_set,tuple) :\n\t\t\t\tLL = (LL[0], self.log_likelihood(val_set[0], val_set[1]))\n\t\t\thistory.append(LL)\n\t\t\ti += 1\n\t\t\tif verbose:\n\t\t\t\tprint(\"LL at iter \"+str(i)+\"= \",LL)\n\t\t\t\ttry:\n\t\t\t\t\tprint(\" Gating loss: \", gat_history[0], gat_history[-1])\n\t\t\t\texcept TypeError:\n\t\t\t\t\tpass\n\t\t\t\tif isinstance(val_set,tuple) :\n\t\t\t\t\tmse = np.sum(np.square( self.predict(val_set[0])-val_set[1]))/val_set[0].shape[0]\n\t\t\t\t\tprint(\" Val loss: \", mse)\n\t\t\t\t\tif pick_best:\n\t\t\t\t\t\tif mse < best_mse:\n\t\t\t\t\t\t\tbest_mse = mse\n\t\t\t\t\t\t\tprint(\"Chosen the best!\")\n\t\t\t\t\t\t\ttry: #saving best model so far\n\t\t\t\t\t\t\t\tself.save(\"temp_exp\", \"temp_gat\") \n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tos.system(\"rm -f temp_exp temp_gat\")\n\t\t\tif N_iter is not None:\n\t\t\t\tif i>= N_iter:\n\t\t\t\t\tbreak\n\t\t\ttry:\n\t\t\t\tassert LL[0] - old_LL[0] >=0 #train LL must always increase in EM algorithm. Useful check\n\t\t\texcept:\n\t\t\t\tbreak #if LL increase (and it shouldn't) EM should terminate\n\n\t\tself.initialized = True\n\n\t\tif isinstance(val_set,tuple) and pick_best: #loading best model so far\n\t\t\tfiles = os.listdir(\".\")\n\t\t\tif \"temp_exp\" in files and \"temp_gat\" in files:\n\t\t\t\tprint(\"loaded the best\")\n\t\t\t\tself.load = (\"temp_exp\", \"temp_gat\")\n\t\t\t\tos.system(\"rm -f temp_exp temp_gat\")\n\n\t\treturn history\n\n\tdef EM_step(self, X, y, r = None, args = []):\n\t\t\"\"\"\n\tEM_step\n\t=======\n\t\tDoes one EM update.\n\t\tInput:\n\t\t\tX (N,D)\ttrain data\n\t\t\ty (N,)\ttrain targets for regression\n\t\t\tr (N,K)\tresponsibilities for the E step (if None they are computed with method get_responsibilities)\n\t\t\targs\tsome arguments to pass to fit method of gating function\n\t\tOutput:\n\t\t\tgat_history\t\thistory for the gating function fit\n\t\t\"\"\"\n\t\t\t#E step\n\t\tif r is None:\n\t\t\tr= self.get_responsibilities(X,y) #(N,K)\n\n\t\t\t#M step\n\t\t\t#M step for experts\n\t\t\t\t#weights is updated by solving a linear fit with weights r_{ik} in loss function\n\t\tif self.bias:\n\t\t\tX_temp = np.concatenate((np.ones((X.shape[0],1)),X), axis = 1)\n\t\telse:\n\t\t\tX_temp = X\n\t\tfor k in range(self.K):\n\t\t\tR = np.diag(r[:,k]) #(N,N)\n\t\t\ttemp = np.linalg.inv(np.matmul(X_temp.T,np.matmul(R,X_temp))) #(D,D)/(D+1,D+1)\n\t\t\ttemp = np.matmul(np.matmul(temp, X_temp.T),R) #(D,N)/(D+1,N)\n\t\t\ttemp = np.matmul(temp, y) #(D,)/(D+1,)\n\t\t\tif self.bias:\n\t\t\t\tself.b[k] = temp[0] #()\n\t\t\t\tself.W[:,k] = temp[1:] #(D,)\n\t\t\telse:\n\t\t\t\tself.W[:,k] = temp #(D,)\n\t\t\tsigma_square = np.sum(np.multiply(r[:,k], np.square(y-self.experts_predictions(X)[:,k])) ) / np.sum(r[:,k])\n\t\t\tself.sigma[k] = np.sqrt(sigma_square)\n\n\t\t\t#M step for gating functions\n\t\tgat_history = self.gating.fit(X,r, *args)\n\t\treturn gat_history\n\n\tdef get_responsibilities(self,X,y):\n\t\t\"\"\"\n\tget_responsibilities\n\t====================\n\t\tComputes responsibilities for the given input data:\n\t\t\tr_k = p(y=k|x)\n\t\tInput:\n\t\t\tX (N,D)\tdata\n\t\t\ty (N,)\tdata labels\n\t\t\"\"\"\n\t\tpi = self.gating.predict(X) #p(z_i = k|x_i) (N,K)\n\t\tpi = np.divide(pi.T, np.sum(pi, axis = 1)).T\n\t\texp_term = self.expert_likelihood(X, y)\n\n\t\tr = np.multiply(pi, exp_term) + 1e-10 # (N,K) responsibilities matrix\n\t\tr = np.divide(r.T, np.sum(r, axis =1)).T\n\t\tassert np.all(r>=0) \t\t\t\t\t\t\t\t#checking that all r_{ik} are more than zero\n\t\tassert np.all(np.abs(np.sum(r,axis=1) - 1)<1e-5) \t#checking proper normalization\n\n\t\t#r_new = np.zeros(r.shape)\n\t\t#print(\"r\",r)\n\t\t#indices = np.argmax(r, axis =1)\n\t\t#print(indices)\n\t\t#for i in range(r.shape[0]):\n\t\t#\tr_new[i,indices[i]] = 1.\n\t\t#print(\"r_new\", r_new)\n\t\treturn r\n\n\tdef get_gradient(self,X):\n\t\t\"\"\"\n\tget_gradient\n\t============\n\t\tReturns the gradient of the prediction y:\n\t\t\tgrad_ni = D_i y_n\n\t\twhere grad has shape (D,) and D_i denotes the partial derivative w.r.t. x_i.\n\t\tInput:\n\t\t\tX (N,D)\n\t\tOutput:\n\t\t\tgrad (N,D)\n\t\t\"\"\"\n\t\tassert X.shape[1] == self.D\n\t\tjac_S = self.gating.get_jacobian(X) #(N,K,D)\n\t\tS = self.gating.predict(X) #(N,K)\n\t\t\t#self.W (D,K)\n\t\tpred = np.matmul(X, self.W)+ self.b #(N,K)\n\t\tgrad = np.multiply(pred[:,:,None],jac_S) #(N,K,D)\n\t\tgrad = np.sum(grad, axis = 1)\n\t\tgrad = grad + np.matmul(S, self.W.T) #(N,D)\n\t\t#print(X.shape,S.shape,jac_S.shape, grad.shape) #DEBUG\n\t\treturn grad\n\n################# softmax_regression class\nclass softmax_regression(object):\n\t\"\"\"\nsoftmax_regression\n==================\n\tImplements a class for softmax regression with K labels. It has the form:\n\t\tp(y= k|x,V) = exp( V_k*x ) / sum_{k=1}^K exp( V_k*x + b_k )\n\tIt has methods for getting predictions from the model and to fit the model.\n\t\"\"\"\n\tdef __init__(self, D, K):\n\t\t\"\"\"\n\t__init__\n\t========\n\t\tInitialize the model with K classes for regressions.\n\t\tInput:\n\t\t\tD \tdimensionality of input space\n\t\t\tK\tnumber or classes for regression\n\t\t\"\"\"\n\t\tself.D = D \n\t\tself.K = K \n\t\tself.V = np.zeros((D+1,K))\n\t\treturn\n\n\tdef save(self, filename):\n\t\t\"\"\"\n\tsave\n\t====\n\t\tSave softmax model to file.\n\t\tInput:\n\t\t\tfilename\tname of the file to save the model to\n\t\t\"\"\"\n\t\tnp.savetxt(filename, self.V)\n\t\treturn\n\n\tdef load(self, filename):\n\t\t\"\"\"\n\tload\n\t====\n\t\tLoad the model from file.\n\t\tInput:\n\t\t\tfilename\tname of the file to load the model from\n\t\t\"\"\"\n\t\tself.V = np.loadtxt(filename)\n\t\tself.D = self.V.shape[0]-1\n\t\tself.K = self.V.shape[1]\n\t\treturn self\n\n\tdef predict(self, X_test, V = None):\n\t\t\"\"\"\n\tpredict\n\t=======\n\t\tMakes predictions for the softmax regression model. Weights can be freely specified by the user.\n\t\tInput:\n\t\t\tX_test (N,D)/(N,)\ttest points\n\t\t\tV (D+1,K)\t\t\tweight of the model that gives predictions (if None, internal weights are used)\n\t\tOutput:\n\t\t\ty_test (N,K)\tmodel prediction\n\t\t\"\"\"\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = X_test[:,np.newaxis]\n\t\tif X_test.shape[1] == self.D:\n\t\t\tX_test = np.concatenate((np.ones((X_test.shape[0],1)), X_test), axis = 1) #adding dummy variable for the bias\n\t\tif V is None:\n\t\t\tV = self.V\n\t\tres = np.matmul(X_test,V) #(N,K)\n\t\tres = np.exp(res) + 1e-5 #(N,K)\n\t\tres = np.divide(res.T, np.sum(res, axis = 1)).T #(N,K) normalizing\n\n\t\treturn res\n\n\tdef get_jacobian(self,X):\n\t\t\"\"\"\n\tget_jacobian\n\t============\n\t\tReturns the jacobian of the softmax function:\n\t\t\tJ_ki = D_i S(V^T x)_k\n\t\twhere J has shape (K,D) and D_i denotes the partial derivative w.r.t. x_i.\n\t\tInput:\n\t\t\tX (N,D)\n\t\tOutput:\n\t\t\tgrad (N,K,D)\n\t\t\"\"\"\n\t\tassert X.shape[1] == self.D\n\t\tV = self.V[1:,:].T #(K,D)\n\t\tsoftmax = self.predict(X) #(N,K)\n\t\tjac1 = np.multiply(softmax[:,:,None], V[None,:,:]) #(N,K,D)\n\t\tjac2 = np.matmul(softmax,V) #(N,D)\n\t\tjac2 = np.multiply(jac2[:,None,:], softmax[:,:,None]) #(N,K,D)\n\t\tjac = jac1 -jac2 #(N,K,D)\n\t\t#print(\"ciao\",softmax.shape,jac1.shape,jac2.shape, jac.shape) #DEBUG\n\t\treturn jac #(N,K,D)\n\t\t\n\n\tdef fit_single_loop(self, X_train, y_train):\n\t\t\"\"\"\n\tfit_single_loop\n\t===============\n\t\t######## DOES NOT WORK YET!! WE ARE SORRY FOR THE INCONVENIENT ######\n\t\tFit the model using the closed form of LL of the problem.\n\t\tSee: https://link.springer.com/content/pdf/10.1007%2F978-3-642-01510-6_109.pdf \n\t\tInput:\n\t\t\tX_train (N,D)\ttrain data\n\t\t\ty_train (N,)\ttrain targets for regression\n\t\t\"\"\"\n\t\tif X_train.shape[1] == self.D:\n\t\t\tX_train = np.concatenate((np.ones((X_train.shape[0],1)), X_train), axis = 1) #adding dummy variable for the bias\n\n\t\tfor k in range(self.K-1):\n\t\t\t#print(np.where(y_train[:,-1]+2e-5 ==0))\n\t\t\tdiv = np.divide(y_train[:,k],y_train[:,-1]+2e-10)\n\t\t\tdiv[np.where(div ==0)] = 1e-20\n\t\t\tH = np.log(div) # (N,) \"new targets for linear regression\"\n\t\t\tfitted_V = np.matmul(np.linalg.inv(np.matmul(X_train.T,X_train)), X_train.T) #(D+1,N)\n\t\t\tfitted_V = np.matmul(fitted_V, H)\n\t\t\tself.V[:,k] = fitted_V\n\n\t\tself.V[:,-1] = np.zeros((self.D+1,)) #last is zero by default! Remember it!\n\n\t\tnon_zero = np.where(self.predict(X_train)<0)\n\t\tprint(\"<0: \",non_zero)\n\t\treturn \n\n\tdef accuracy(self, X_test, y_test):\n\t\t\"\"\"\n\taccuracy\n\t========\n\t\tComputes the accuracy of the model (i.e. the fraction of misclassified points).\n\t\tThis measure is meaningful only in the case of hard clustering where there is only one label for each data point.\n\t\tInput:\n\t\t\tX_test (N,D)\ttest points\n\t\t\ty_test (N,K)\ttrue labels of test points\n\t\tOutput:\n\t\t\taccuracy\taccuracy of the predictions made at test points\n\t\t\"\"\"\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\ty_pred = self.predict(X_test)\n\t\treturn np.sum(np.argmax(y_pred,axis=1)==np.argmax(y_test,axis=1))/float(y_test.shape[0])\n\n\n\tdef LL(self, X_test, y_test):\n\t\t\"\"\"\n\tLL\n\t==\n\t\tEvaluate the log-likelihood for the model given X and their labels.\n\t\tInput:\n\t\t\tX_test (N,D)\ttest points\n\t\t\ty_test (N,K)\tlabels of test points\n\t\tOutput:\n\t\t\tLL\tlog-likelihood for the model\n\t\t\"\"\"\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\treturn self.loss(self.V, [X_test, y_test, 0.]) * X_test.shape[0]\n\t\t\n\n\tdef loss(self, V, data):\n\t\t\"\"\"\n\tloss\n\t====\n\t\tLoss function to minimize wrt. V. It is the function:\n\t\t\tNLL(V) = - log[p(D|V)]\n\t\tInput:\n\t\t\tV ((D+1)*K,)\tweights of logreg\n\t\t\tdata \t\t\tlist [X_train (N,D), y_train (N,K), lambda ()]\n\t\tOutput:\n\t\t\tloss\tvalue for the loss function evaluated at V\t\n\t\t\"\"\"\n\t\tV = np.reshape(V, (self.D+1,self.K))\n\t\tX = data[0]\n\t\ty = data[1]\n\t\treg_constant = data[2]\n\n\t\tmu = self.predict(X, V) #(N,K)\n\t\t\t#mu must be regularized in logarithm. Otherwise it might give Nan if a label prob is 0\n\t\tLL = -(np.sum(np.multiply(y, np.log(mu+1e-40))) / X.shape[0]) + 0.5* reg_constant * np.sum(np.square(V))\n\t\treturn LL\n\n\tdef grad(self, V, data):\n\t\t\"\"\"\n\tgrad\n\t====\n\t\tGradient of the loss function to minimize wrt. V (see function loss())\n\t\tInput:\n\t\t\tV ((D+1)*K,)\tweights of logreg\n\t\t\tdata \t\t\tlist [X_train (N,D), y_train (N,K), lambda ()]\n\t\tOutput:\n\t\t\tgrad\tvalue for the gradient of loss function evaluated at V\t\n\t\t\"\"\"\n\t\tto_reshape = False\n\t\tif V.ndim == 1:\n\t\t\tV = np.reshape(V, (self.D+1,self.K))\n\t\t\tto_reshape = True\n\t\tX = data[0]\n\t\ty = data[1]\n\t\treg_constant = data[2] #regularizer\n\t\t\n\t\tmu = self.predict(X, V) #(N,K)\n\t\tdelta = (mu - y) + 1e-4\n\t\tgrad = np.matmul(X.T,delta) / X.shape[0] + reg_constant*V #(N,D).T (N,K) = (D,K)\n\t\tif to_reshape:\n\t\t\treturn np.reshape(grad, ((self.D+1)*self.K,))\n\t\telse:\n\t\t\treturn grad\n\t\n\tdef get_weights(self):\n\t\t\"\"\"\n\tget_weights\n\t===========\n\t\tReturns the weights of the model.\n\t\tInput:\n\t\tOutput:\n\t\t\tV (D+1,K)\tweights for the model\n\t\t\"\"\"\n\t\treturn self.V\n\n\tdef fit(self, X_train, y_train, opt = \"adam\", val_set = None, reg_constant = 1e-4, verbose = False, threshold = 1e-2, N_iter = 30, learning_rate = 1e-3):\n\t\t\"\"\"\n\tfit\n\t===\n\t\tFit the model using gradient descent.\n\t\tCan use adam for adaptive step: https://arxiv.org/abs/1412.6980v8\n\t\tCan use bfgs method provided by scipy: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_bfgs.html\n\t\tUsing a small regularizer can help convergence (especially for bfgs).\n\t\tInput:\n\t\t\tX_train (N,D)\ttrain data\n\t\t\ty_train (N,)\ttrain targets for regression\n\t\t\topt\t\t\t\twhich optimizer to use (\"adam\" or \"bsfg\")\n\t\t\tval_set\t\t\ttuple (X_val, y_val) with a validation set to test performances\n\t\t\treg_constant\tregularization constants\n\t\t\tverbose\t\t\twhether to print values of loss function at every train step\n\t\t\tthreshold\t\tminimun improvement of validation erorr on 10 iteration before stopping (train error if val_set =None)\n\t\t\tN_iter\t\t\tnumber of iteration to be performed (doesn't apply to bfgs or if threshold is not None)\n\t\t\tlearning_rate\tlearning rate used for gradient update (doesn't apply to bfgs)\n\t\tOutput:\n\t\t\thistory\t\tlist of value for the loss function\n\t\t\"\"\"\n\t\tif X_train.ndim == 1:\n\t\t\tX_train = np.reshape(X_train, (X_train.shape[0],1))\n\t\tif X_train.shape[1] == self.D:\n\t\t\tX_train = np.concatenate((np.ones((X_train.shape[0],1)), X_train), axis = 1) #not necessary but might be useful to speed up the code\n\n\t\targs = (X_train, y_train, reg_constant) #arguments for loss and gradients\n\t\t\n\t\tif opt == \"adam\":\n\t\t\treturn self.__optimize_adam(args, threshold, N_iter, learning_rate, verbose, val_set)\n\t\tif opt == \"bfgs\":\n\t\t\treturn self.__optimise_bfgs(args, verbose, val_set)\n\n\tdef __optimise_bfgs(self, args, verbose, val_set = None):\n\t\t\"\"\"\n\t__optimise_bfgs\n\t===============\n\t\tWrapper to scipy.optimize.fmin_bfgs (https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_bfgs.html) to minimise loss function.\n\t\tInput:\n\t\t\targs\t\ttuple of arguments to be passed to loss and gradient [X_train (N,D), y_train (N,K), lambda ()]\n\t\t\tverbose\t\twhether to print scipy convergence message\n\t\t\tval_set\t\ttuple (X_val, y_val) with a validation set to test performances\n\t\tOutput:\n\t\t\thistory\t\tinitial and final value of loss function\n\t\t\"\"\"\n\t\tloss_0 = (self.loss(self.V, args),)\n\t\tif isinstance(val_set,tuple):\n\t\t\targs_val = (val_set[0], val_set[1], args[2])\n\t\t\tloss_val = self.loss(self.V, args_val)\n\t\t\tloss_0 = (loss_0, loss_val)\n\n\t\t\t#wrapper to self.loss and self.grad to make them suitable for scipy\n\t\tloss = lambda V, a,b,c: self.loss(V,(a,b,c))\n\t\tgrad = lambda V, a,b,c: self.grad(V,(a,b,c))\n\n\t\tres = scipy.optimize.fmin_bfgs(loss, self.V.reshape(((self.D+1)*self.K,)), grad, args , disp = verbose)\n\t\tself.V = res.reshape((self.D+1,self.K))\n\n\t\tif isinstance(val_set,tuple):\n\t\t\tloss_fin = (self.loss(self.V, args), self.loss(self.V, args_val))\n\t\telse:\n\t\t\tloss_fin = (self.loss(self.V, args), )\n\t\treturn [loss_0, loss_fin]\n\n\n\tdef __optimize_adam(self, args, threshold, N_iter, learning_rate, verbose, val_set = None):\n\t\t\"\"\"\n\t__optimise_adam\n\t===============\n\t\tImplements optimizer with to perform adaptive step gradient descent.\n\t\tThe implementation follows: https://arxiv.org/abs/1412.6980v8\n\t\tInput:\n\t\t\targs\t\t\ttuple of arguments to be passed to loss and gradient [X_train (N,D), y_train (N,K), lambda ()]\n\t\t\tthreshold\t\tminimun improvement of train erorr before stopping fitting procedure\n\t\t\tN_iter\t\t\tnumber of iteration to be performed\n\t\t\tlearning_rate\tlearning rate to be set for adam\n\t\t\tverbose\t\t\twhether to print loss at each step\n\t\t\tval_set\t\t\ttuple (X_val, y_val) with a validation set to test performances\n\t\tOutput:\n\t\t\thistory\t\tlist of loss function value (train, )/(train,val) at each iteration step\n\t\t\"\"\"\n\t\t\t#setting parameters for learning rate\n\t\tbeta1 = .9\t\t#forgetting factor for first moment\n\t\tbeta2 = .999\t#forgetting factor for second moment\n\t\tepsilon = 1e-8\n\t\tm = np.zeros(self.V.shape) #first moment\n\t\tv = np.zeros(self.V.shape) #second moment\n\t\thistory = []\n\t\tif threshold is not None:\n\t\t\tN_iter = 1000000000 # if threshold, no maximum iteration should be used\n\t\tfor i in range(0,N_iter):\n\t\t\tg = self.grad(self.V, args)\n\t\t\tm = beta1*m + (1-beta1)*g\n\t\t\tv = beta2*v + (1-beta2)*np.square(g)\n\t\t\tm_corr = m / (1-beta1)\n\t\t\tv_corr = v / (1-beta2)\n\n\t\t\tupdate = np.divide(m_corr, np.sqrt(v_corr)+epsilon)\n\t\t\tif np.any(np.isnan(update)): #debug\n\t\t\t\tquit()\n\t\t\tself.V = self.V - learning_rate * update\n\t\t\tself.V[:,-1] = np.zeros((self.D+1,))\n\n\t\t\tif isinstance(val_set,tuple):\n\t\t\t\targs_val = (val_set[0], val_set[1], args[2])\n\t\t\t\thistory.append((self.loss(self.V, args), self.loss(self.V, args_val)) ) #(train_err, val_err)\n\t\t\telse:\n\t\t\t\thistory.append((self.loss(self.V, args),))\n\n\t\t\tif verbose:\n\t\t\t\tprint(\"Loss at iter= \",i, history[i])\n\n\t\t\tif threshold is not None and i>10:\n\t\t\t\tif history[-10][-1] - history[-1][-1] < threshold:\n\t\t\t\t\tbreak\n\t\treturn history\n\n\n\n\n","sub_path":"dev/mlgw_v1/EM_MoE.py","file_name":"EM_MoE.py","file_ext":"py","file_size_in_byte":24651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229214787","text":"import numpy\nimport read_data\nimport data_analysis\nfrom threading import Thread\nfrom data_storage import dt\nimport data_storage\n\n\ndebug = True\nif debug:\n # flags = ['-t', '1', '-o', '-', '/home/ivakin/replay.bin.gz']\n flags = ['-t', '1', '-o', '-', '/home/lsrp/Experiments/2017-05-14/beam3/replay.bin.gz.2017-05-15_08:00:05']\nelse:\n flags = ['-t', '1', '-o', '-', '/home/lsrp/Polarimeter/replay.bin.gz']\npath_to_sniffer = '/home/lsrp/Polarimeter/extract_hitdump'\n\n\nclass AddPoint(Thread):\n \"\"\"\n Подбирает данные по их появлению, записывает их в чанки, обновляет графики\n \"\"\"\n\n def __init__(self, experimental_plot, hist_plot):\n super(AddPoint, self).__init__()\n self.experimental_plot = experimental_plot\n self.hist_plot = hist_plot\n self._is_active = False\n self.last_created_point_time = None\n self.chunk_pointer = 0 # указатель на номер чанка для обработки в усредненную точку\n\n def run(self):\n self._is_active = True\n self.experimental_plot.plot_signal.emit('clear_variables', [])\n self.experimental_plot.plot_signal.emit('clear_plot', [])\n self.add_point()\n\n def add_point(self):\n \"\"\"\n Создание chunk\n \"\"\"\n start_chunk_time = None\n experimental_data = []\n last_point_time_step = data_storage.point_time_step\n while self._is_active:\n\n data = data_storage.experiment_data_queue.get(block=True)\n\n data_time = data[0]\n if start_chunk_time is None:\n start_chunk_time = data_time\n\n if self.last_created_point_time is None:\n self.last_created_point_time = data_time\n\n if last_point_time_step != data_storage.point_time_step:\n # Обновление графика по обновлению шага времени\n self.plot_points()\n self.last_created_point_time = data_time\n last_point_time_step = data_storage.point_time_step\n\n if data_time - start_chunk_time > data_storage.chunk_time_step:\n numpy_data = numpy.array([tuple(i) for i in experimental_data], dtype=dt)\n experimental_data.clear()\n data_analysis.chunk_list.add_chunk(numpy_data)\n self.hist_plot.plot_signal.emit()\n if data_time - self.last_created_point_time > data_storage.point_time_step:\n # Обновление графика с шагом {point_time_step}\n self.plot_points()\n self.last_created_point_time = data_time\n start_chunk_time = data_time\n else:\n experimental_data.append(data)\n else:\n return\n\n def plot_points(self):\n \"\"\"Отрисовка точек\"\"\"\n # TODO: выбор режима отрисовки: асимметрия или разница средних\n # time, y_data, error = data_analysis.chunk_list.create_asym_points()\n time, y_data, error = data_analysis.chunk_list.create_dif_points()\n self.experimental_plot.plot_signal.emit('set_data', [time, y_data, error])\n self.experimental_plot.plot_signal.emit('plot', [])\n\n def stop(self):\n self._is_active = False\n\n def __del__(self):\n pass\n\n\nclass Experiment:\n\n def start(self, experimental_plot, hist_plot):\n \"\"\"\n Организация чтения и обработки данных.\n \"\"\"\n self.data_update_thread = AddPoint(experimental_plot, hist_plot)\n self.pipe_read_thread = read_data.PipeRead(data_storage.experiment_data_queue, path_to_sniffer, flags)\n self.pipe_read_thread.daemon = True\n self.data_update_thread.daemon = True\n self.pipe_read_thread.start()\n self.data_update_thread.start()\n print('Experiment is started!')\n\n def stop(self):\n print('Experiment is stopped')\n self.data_update_thread.stop()\n self.pipe_read_thread.stop()\n del self.data_update_thread\n del self.pipe_read_thread\n data_storage.experiment_data_queue.queue.clear()\n data_analysis.chunk_list.clear()\n data_storage.big_hist = None\n\n def pause(self):\n self.pipe_read_thread.pause()\n\n def exp_continue(self):\n self.pipe_read_thread.resume()\n\n\nexperiment = Experiment()\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505722812","text":"import os\nimport numpy as np\nfrom PIL import Image\n\n\ndef read_batch(root_path, name, size, NimBatch):\n\n root_path = os.path.join(root_path, name)\n\n Nim = 4000\n\n batch = np.zeros((NimBatch,) + size + (3,), dtype=np.float32)\n list_im = np.random.permutation(Nim)[0:NimBatch]\n for i in range(NimBatch):\n infile = os.path.join(root_path, name+\".{:0}.jpg\".format(list_im[i]))\n file, ext = os.path.splitext(infile)\n pointer_im = Image.open(infile)\n pointer_im = pointer_im.resize(size, Image.ANTIALIAS)\n batch[i, :, :, :] = np.asarray(pointer_im, dtype=float)\n\n return batch\n\n\ndef read_batch_mixed(root_path, size, NimBatch):\n batch_x = np.zeros((NimBatch,) + size + (3,), np.uint8)\n\n vect_alea = np.random.randint(0, high=2, size=NimBatch)\n\n Ndogs = np.sum(vect_alea)\n Ncats = NimBatch - Ndogs\n\n vect_alea = vect_alea.astype(np.float32)\n\n batch_dog = read_batch(root_path, 'dog', size, Ndogs)\n batch_cat = read_batch(root_path, 'cat', size, Ncats)\n\n batch_x[vect_alea == 1, :, :, :] = batch_dog\n batch_x[vect_alea == 0, :, :, :] = batch_cat\n\n batch_x = batch_x / np.float32(255)\n batch_y = np.transpose(np.array([vect_alea, 1 - vect_alea]))\n\n return batch_x, batch_y\n","sub_path":"lecture.py","file_name":"lecture.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70244123","text":"\n\ndef print_info(case_data, response_data_str, res):\n \"\"\"打印详细数据\"\"\"\n result_data = \"\"\"\n
\n 请求方式:%s ,请求URL:%s
\n 请求params:%s
\n 预期:%s
\n 用例测试结果:%s
\n 实际返回结果:
\n %s\"\"\" % (case_data['method'], case_data['basic_url'], case_data['parameter'], case_data['expected'],\n res, response_data_str)\n return result_data","sub_path":"Extend/printInfo.py","file_name":"printInfo.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"446322490","text":"# importing Telegram API\nimport networkx as nx\nimport pickle\nimport matplotlib.pyplot as plt\nfrom telegram import ParseMode\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import Updater\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import MessageHandler, Filters\nfrom telegram.ext import ConversationHandler\n#t.me/LPQuizbot\n\n# defining callback function for the /start command\ndef start(bot, update, user_data):\n username = update.message.chat.username\n missatge = \"Hola %s\" % (username)\n pickle_in = open('enq.pickle','rb')\n enquestes = pickle.load(pickle_in)\n pickle_in.close()\n if 'report' not in user_data:\n user_data['report'] = {}\n user_data['report'] = enquestes\n bot.send_message(chat_id=update.message.chat_id, text=\"Benvingut al QuizBot %s\"%username)\n\ndef help(bot, update):\n info = '''\n Comandes del bot:\n /start\n Inicia la conversa amb el Bot.\n\n /help\n Llista totes les comandes possibles del bot\n amb una breu descripcio\n\n /author\n Mostra el nom complet de l'autor del projecte \n i el seu correu electrònic de la facultat.\n\n /quiz \n Inicia la enquesta seleccionada per ID.\n \n /bar \n Diagrama de barres de les respostes a la pregunta donada.\n \n /pie \n Grafica de formatgets amb el percentatge de les respostes a la pregunta donada.\n \n /report\n Taula amb el nombre de preguntes respostes per cada valor de cada pregunta.\n\n '''\n bot.send_message(chat_id=update.message.chat_id, text=info)\n\ndef author(bot, update):\n missatge = \"\"\"\n David Carballo Montalbán\n (david.carballo@est.fib.upc.edu)\"\"\"\n bot.send_message(chat_id=update.message.chat_id, text=missatge)\n\ndef quiz(bot, update, args, user_data):\n if 'graph' not in user_data:\n user_data['graph'] = {}\n # Llegim el nostre graph\n graph = nx.read_gpickle(\"graph.gpickle\")\n user_data['graph'] = graph\n enquestes = user_data['report']\n if 'enquesta' not in user_data:\n user_data['enquesta'] = {}\n if 'act_enquesta' not in user_data:\n user_data['act_enquesta'] = \"\"\n if 'act_question' not in user_data:\n user_data['act_question'] = \"\" \n\n # Creació del pickle per la solucio\n if 'respostes' not in user_data:\n user_data['respostes'] = {} \n\n # Obtenir l'enquesta solicitada\n\n id_enq = args[0]\n bot.send_message(chat_id=update.message.chat_id, text=\"Enquesta: %s\" % (id_enq))\n \n succ = []\n for u,v,color in graph.edges.data('color'):\n if color == 'black':\n succ.append((u,v))\n\n sub = graph.edge_subgraph(succ)\n succ = nx.shortest_path(sub,id_enq,'END')\n succ.remove(id_enq)\n \n user_data['act_enquesta'] = id_enq\n user_data['enquesta'][id_enq] = succ\n \n # Fem la primera pregunta de l'enquesta\n quest = succ[0]\n if enquestes.get(quest) == None:\n enquestes[quest] = {}\n\n pregunta = \" \".join(graph.nodes[quest]['content'])\n bot.send_message(chat_id=update.message.chat.id, text=\"E> %s\" % (pregunta))\n user_data['act_question'] = quest\n\n for u,v in graph.edges(quest):\n if(graph[quest][v]['color'] == 'blue'):\n respostes = graph.nodes[v]['content']\n for cont,x in enumerate(respostes):\n bot.send_message(chat_id=update.message.chat.id, text=\"%s: %s\" % (cont,\" \".join(x)))\n\ndef answer(bot, update, user_data):\n enquestes = user_data['report']\n\n quest = user_data['act_question']\n if enquestes.get(quest) == None:\n enquestes[quest] = {}\n next_quest = quest\n id_enq = user_data['act_enquesta']\n graph = user_data['graph']\n\n # Actualitzar dades\n msg = update.message.text\n if enquestes[quest].get(msg) == None:\n enquestes[quest][msg] = 1\n else:\n enquestes[quest][msg] += 1\n #print(enquestes) \n\n # Seguent pregunta\n succ = user_data['enquesta'][id_enq]\n #print(succ)\n succ.remove(quest)\n for u,v in graph.edges(quest):\n #print(\"%s,%s\"%(u,v))\n if(graph[u][v]['color'] == 'green' and graph[u][v]['tag'] == msg):\n #print(graph[u][v]['tag'])\n succ.insert(0,v)\n next_quest = v\n\n if next_quest == quest:\n next_quest = quest = succ[0]\n\n if succ[0] == 'END':\n bot.send_message(chat_id=update.message.chat.id, text=\"Gràcies pel teu temps!\")\n #print(enquestes)\n pickle_out = open('enq.pickle','wb')\n pickle.dump(enquestes,pickle_out)\n pickle_out.close()\n\n else:\n user_data['act_question'] = next_quest\n user_data['enquesta'][id_enq] = succ\n pregunta = \" \".join(graph.nodes[next_quest]['content'])\n bot.send_message(chat_id=update.message.chat.id, text=\"E> %s\" % (pregunta))\n\n for u,v in graph.edges(next_quest):\n if(graph[next_quest][v]['color'] == 'blue'):\n respostes = graph.nodes[v]['content']\n for cont,x in enumerate(respostes):\n bot.send_message(chat_id=update.message.chat.id, text=\"%s: %s\" % (cont,\" \".join(x)))\n\ndef bar(bot, update, args):\n try:\n pickle_in = open('enq.pickle','rb')\n enq = pickle.load(pickle_in)\n pickle_in.close()\n\n fitxer = \"bar%s.png\" % (args[0])\n fig = plt.figure()\n ax = fig.add_axes([0,0,1,1])\n x = enq[args[0]].keys()\n y = enq[args[0]].values()\n ax.bar(x,y)\n plt.savefig(fitxer, bbox_inches='tight')\n bot.send_photo(chat_id=update.message.chat_id, photo=open(fitxer, 'rb'))\n except Exception as e:\n print(e)\n bot.send_message(chat_id=update.message.chat_id, text='Error al crear la gràfica')\n\ndef pie(bot, update, args):\n try:\n pickle_in = open('enq.pickle','rb')\n enq = pickle.load(pickle_in)\n pickle_in.close()\n fitxer = \"pie%s.png\" % (args[0])\n\n total = sum(enq[args[0]].values())\n\n perc = [x/total for x in enq[args[0]].values()]\n labels = enq[args[0]].keys()\n explode = [0.05] * len(labels)\n fig,ax = plt.subplots()\n ax.pie(perc, labels=labels,explode=explode,shadow=True, autopct='%1.1f%%',startangle=90)\n ax.axis('equal')\n plt.savefig(fitxer, bbox_inches='tight')\n bot.send_photo(chat_id=update.message.chat_id, photo=open(fitxer, 'rb'))\n except Exception as e:\n print(e)\n bot.send_message(chat_id=update.message.chat_id, text='Error al crear la gràfica')\n\ndef report(bot, update):\n try:\n pickle_in = open('enq.pickle','rb')\n enq = pickle.load(pickle_in)\n pickle_in.close()\n\n table = \"*Pregunta*\\t*Valor*\\t*Respostes*\\n\"\n for question in enq:\n for u in enq[question]:\n table += \"%s\\t%s\\t%s\\n\"%(question,u,enq[question][u])\n bot.send_message(chat_id=update.message.chat_id, text=table, parse_mode=ParseMode.MARKDOWN)\n\n except Exception as e:\n print(e)\n bot.send_message(chat_id=update.message.chat_id, text='Error al carregar el report')\n\n# loading the access token from token.txt\nTOKEN = open('token.txt').read().strip()\n\n# call main Telegram objects\nupdater = Updater(token=TOKEN)\ndispatcher = updater.dispatcher\n\n# handling callbacks functions to the commands\ndispatcher.add_handler(CommandHandler('start', start, pass_user_data=True))\ndispatcher.add_handler(CommandHandler('help', help))\ndispatcher.add_handler(CommandHandler('author', author))\ndispatcher.add_handler(CommandHandler('quiz', quiz, pass_args=True, pass_user_data=True))\ndispatcher.add_handler(CommandHandler('bar', bar, pass_args=True))\ndispatcher.add_handler(CommandHandler('pie', pie, pass_args=True))\ndispatcher.add_handler(CommandHandler('report', report))\n\ndispatcher.add_handler(MessageHandler(Filters.text, answer, pass_user_data=True))\n\n# starting the bot\nupdater.start_polling()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"469930121","text":"'''\r\nCreated on 21 May 2018\r\n\r\n@author: neeraj.mahajan\r\n'''\r\n\r\nfrom delivery_db_api.database.config import db\r\nfrom delivery_db_api.models.abstract_model import AbstractModel\r\nfrom delivery_db_api.utils import ExceptionUtils\r\n\r\n\r\nclass HostSubscriptionModel(db.Model, AbstractModel):\r\n '''\r\n This model class defines the database mapping for host_subscription table and\\\r\n methods for retrieving and saving the records into host_subscription table.\r\n\r\n '''\r\n __tablename__ = 'host_subscription'\r\n\r\n host_subscription_id = db.Column(db.Integer, primary_key=True)\r\n host_subscription_key = db.Column(db.String(255))\r\n host_subscription_description = db.Column(db.String(255))\r\n host_region_id = db.Column(db.Integer, db.ForeignKey(\r\n 'host_region.host_region_id'))\r\n system_network_set_id = db.Column(db.Integer, db.ForeignKey(\r\n 'system_network_set.system_network_set_id'))\r\n environment_subscription_type_id = db.Column(db.Integer, db.ForeignKey(\r\n 'environment_subscription_type.environment_subscription_type_id'))\r\n environment_type_id = db.Column(db.Integer, db.ForeignKey(\r\n 'environment_type.environment_type_id'))\r\n network_sets = db.relationship(\r\n 'NetworkSetModel',\r\n backref='host_subscription',\r\n lazy=True)\r\n\r\n @classmethod\r\n def find_by_id(cls, _id):\r\n '''\r\n This method is used to lookup host_subscription from database, based on provided host_subscription_id\r\n '''\r\n host_subscription = cls.query.filter_by(\r\n host_subscription_id=_id).first()\r\n ExceptionUtils.raise_exception_if_object_not_found(\r\n host_subscription, \"HostSubscription\", _id)\r\n return host_subscription\r\n\r\n @classmethod\r\n def find_by_name(cls, name):\r\n '''\r\n This method is used to lookup host_subscription from database, based on provided host_subscription\r\n '''\r\n host_subscription = cls.query.filter_by(\r\n host_subscription_key=name).first()\r\n return host_subscription\r\n\r\n def save_to_db(self):\r\n '''\r\n This method save HostSubscription record to the database\r\n '''\r\n db.session.add(self)\r\n db.session.commit()\r\n return self\r\n\r\n @staticmethod\r\n def get_hsv_host_subscriptions():\r\n '''\r\n get list of host subscriptions for list of host subscriptions screen\r\n '''\r\n data = []\r\n sql_connection = db.engine.connect()\r\n result = sql_connection.execute(\r\n \"select hs.host_subscription_id, hs.host_subscription_key, hs.host_subscription_description, hs.host_region_id, hr.host_region_description, hs.system_network_set_id, sns.system_network_set_name, hs.environment_subscription_type_id, est.environment_subscription_type_name, hs.environment_type_id, et.environment_type_name from host_subscription hs join host_region hr on hs.host_region_id = hr.host_region_id join system_network_set sns on hs.system_network_set_id = sns.system_network_set_id join environment_subscription_type est on hs.environment_subscription_type_id = est.environment_subscription_type_id join environment_type et on hs.environment_type_id = et.environment_type_id;\")\r\n for row in result:\r\n data.append(row)\r\n return data\r\n\r\n def json(self, detail_required=True, host_region=True):\r\n '''\r\n Convenience method to retrieve Json representation of this model class\r\n '''\r\n host_subscription_json = {\r\n \"host_subscription_id\": self.host_subscription_id,\r\n \"host_subscription_key\": self.host_subscription_key,\r\n \"host_subscription_description\": self.host_subscription_description,\r\n }\r\n\r\n if detail_required:\r\n if host_region:\r\n host_subscription_json[\"host_region\"] = self.host_region.json(\r\n detail_required=False)\r\n if self.system_network_set is not None:\r\n host_subscription_json[\"system_network_set\"] = self.system_network_set.json(\r\n detail_required=False)\r\n if self.environment_subscription_type is not None:\r\n host_subscription_json[\"environment_subscription_type\"] = self.environment_subscription_type.json(\r\n detail_required=False)\r\n if self.environment_type is not None:\r\n host_subscription_json[\"environment_type\"] = self.environment_type.json(\r\n detail_required=False)\r\n host_subscription_json[\"network_sets\"] = list(map(lambda x: x.json(\r\n detail_required=True, host_subscription=False), self.network_sets))\r\n return host_subscription_json\r\n","sub_path":"flask/delivery_db_api/models/host_subscription.py","file_name":"host_subscription.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"196334529","text":"#Using names.txt, a 46k text file containing over five thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiple this value by its alphabetical position in the list to obtain a name score.\n#For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938*53 = 49714.\n#What is the total of all the name scores in the file?\nimport time\n\n\nFILENAME = 'names.txt'\n\ndef names_score():\n\tfileRaw = open(FILENAME, 'r').read()\n\tnoQuotes = fileRaw.translate(None, '\"')\n\tourList = []\n\twhile len(noQuotes):\n\t\tif ',' in noQuotes:\n\t\t\tname = noQuotes[0:noQuotes.index(',')]\n\t\t\tourList.append(name)\n\t\t\tnoQuotes = noQuotes[noQuotes.index(',')+1:]\n\t\telse:\n\t\t\tourList.append(noQuotes)\n\t\t\tnoQuotes = ''\n\tourList = sorted(ourList)\n\treturn sum([(i+1)*word_val(ourList[i]) for i in xrange(len(ourList))])\n\ndef word_val(word):\n\treturn sum([' ABCDEFGHIJKLMNOPQRSTUVWXYZ'.index(word[b]) for b in xrange(len(word))])\n\ndef ans(times):\n l = []\n for k in xrange(times):\n start_t = time.time()\n z = names_score()\n end_t = time.time()\n time_taken = end_t - start_t\n l.append(time_taken)\n return sorted(l)\n","sub_path":"solved/und1sec/p22/p22.py","file_name":"p22.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"456386479","text":"import math\r\n\r\nfrom django import template\r\nfrom django.template.defaultfilters import stringfilter\r\nfrom django.utils import timezone\r\nfrom django.utils.safestring import mark_safe\r\nfrom django.utils.translation import ngettext, gettext_lazy as _\r\n\r\nfrom precise_bbcode.bbcode import get_parser\r\n\r\nfrom ..helper import calculate_days_interval\r\n\r\nregister = template.Library()\r\n\r\n\r\n@register.filter\r\n@stringfilter\r\ndef first_char(chars):\r\n return chars[0].lower()\r\n\r\n\r\n@register.filter(expects_localtime=True)\r\ndef time_ago(param):\r\n now = timezone.now()\r\n diff = now - param\r\n currently = _('just now')\r\n if diff.days == 0 and 30 < diff.seconds < 60:\r\n currently = _('%s seconds ago') % diff.seconds\r\n elif diff.days == 0 and 60 <= diff.seconds < 3600:\r\n minutes = math.floor(diff.seconds / 60)\r\n currently = ngettext(_('%(minutes)d minute ago'),\r\n _('%(minutes)d minutes ago'), minutes) % {\r\n 'minutes': minutes\r\n }\r\n elif diff.days == 0 and 3600 <= diff.seconds < 86400:\r\n hours = math.floor(diff.seconds / 3600)\r\n currently = ngettext(_('%(hours)d hour ago'),\r\n _('%(hours)d hours ago'), hours) % {\r\n 'hours': hours\r\n }\r\n elif 1 <= diff.days <= 7:\r\n days = diff.days\r\n currently = ngettext(_('%(days)d day ago'),\r\n _('%(days)d days ago'), days) % {\r\n 'days': days\r\n }\r\n elif 7 <= diff.days <= 30:\r\n weeks = math.floor(diff.days / 7)\r\n currently = ngettext(_('%(weeks)d week ago'),\r\n _('%(weeks)d weeks ago'), weeks) % {\r\n 'weeks': weeks\r\n }\r\n elif 30 <= diff.days <= 365:\r\n months = math.floor(diff.days / 30)\r\n currently = ngettext(_('%(months)d month ago'),\r\n _('%(months)d months ago'), months) % {\r\n 'months': months\r\n }\r\n elif diff.days >= 365:\r\n years = math.floor(diff.days / 365)\r\n currently = ngettext(_('%(years)d year ago'),\r\n _('%(years)d years ago'), years) % {\r\n 'years': years\r\n }\r\n return currently\r\n\r\n\r\n@register.filter\r\ndef pretty_count(value, decimal_place=1):\r\n if isinstance(value, int):\r\n views = str(value)\r\n if 3 <= len(views) <= 6:\r\n views = round(int(views) / 1000, decimal_place)\r\n return '%(views)sk' % {'views': views}\r\n elif 6 <= len(views) <= 9:\r\n views = round(int(views) / 1000000, decimal_place)\r\n return '%(views)sM' % {'views': views}\r\n else:\r\n return views\r\n\r\n\r\n@register.filter\r\ndef unit_to_tens(value):\r\n \"Convert pagination in unit digit to tens.\"\r\n value = str(value)\r\n if len(value) < 2:\r\n value = '0' + value\r\n return value\r\n\r\n\r\n@register.filter\r\ndef get_dictionary_value(result, key):\r\n try:\r\n return result[key]\r\n except KeyError:\r\n return None\r\n\r\n\r\n@register.simple_tag(name='days_past_interval')\r\ndef interval_calculator(date_obj):\r\n return calculate_days_interval(date_obj)\r\n\r\n\r\n@register.filter(need_autoescape=False, is_safe=True)\r\n@stringfilter\r\ndef dont_escape(text):\r\n parser = get_parser()\r\n return mark_safe(parser.render(text))\r\n","sub_path":"src/sleekapps/cores/templatetags/cores_util_tags.py","file_name":"cores_util_tags.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"361065938","text":"import pickle\nimport random\nimport re\nimport urllib\nimport urllib2\nfrom modules import filename\n\nMAX_LOGS = 30\nMAX_QUOTES = 100\nQVERSION = 2\n\n\ndef upgrade(self, cur_version):\n if cur_version == 1:\n self.quotes = {\n user: [(msg, None) for msg in msgs]\n for user, msgs in self.quotes.items()\n }\n cur_version += 1\n else:\n return\n save_quotes(self)\n\n\ndef setup(self):\n self.logs = []\n self.quotes = {}\n try:\n f = open(filename(self, \"quotes\"), \"r\")\n num, self.quotes = pickle.load(f)\n f.close()\n except IOError:\n pass\n upgrade(self, num)\n\n\ndef save_quotes(self):\n try:\n f = open(filename(self, \"quotes\"), \"w\")\n pickle.dump((QVERSION, self.quotes), f)\n f.close()\n except IOError:\n pass\n\n\ndef log(phenny, input):\n if MAX_LOGS is not None:\n phenny.logs.append((input.nick.lower(), input.group(1).replace(\"\\n\", \"\").lstrip(\" \")))\n phenny.logs = phenny.logs[-MAX_LOGS:]\nlog.rule = r\"(.*)\"\n\n\ndef quote_me(phenny, input):\n if input.group(2) is None or input.group(3) is None:\n return phenny.say(\"I'm not convinced you're even trying to quote someone???\")\n user, msg = input.group(2), input.group(3)\n user = re.sub(r\"[\\[\\]<>: +@]\", \"\", user.lower())\n if (user, msg) in phenny.logs:\n try:\n phenny.logs.remove((user, msg))\n except ValueError: # well it's gone now anyway (threads amirite)\n pass\n phenny.quotes.setdefault(user, []).append((msg, input.nick))\n phenny.quotes[user] = phenny.quotes[user][-MAX_QUOTES:]\n save_quotes(phenny)\n phenny.say(\"Quote added\")\n else:\n phenny.say(\"I'm not convinced %s ever said that.\" % user)\nquote_me.rule = ('$nick', ['quote'], r'\\[?(?:\\d\\d?:?\\s?)*\\]?(<[\\[\\]@+ ]?\\S+>|\\S+:?)\\s+(.*)')\n\n\ndef get_quote(phenny, input):\n if input.group(2) is None:\n if not phenny.quotes:\n return phenny.say(\"You guys don't even have any quotes.\")\n nick = random.choice(phenny.quotes.keys())\n else:\n nick = input.group(2).lower()\n if nick in phenny.quotes:\n return phenny.say(u\"<{}> {}\".format(nick, random.choice(phenny.quotes[nick])[0]))\n return phenny.say(\"%s has never said anything noteworthy.\" % input.group(2))\nget_quote.rule = ([\"quote\"], r\"(\\S+)\", r\"? *$\")\n\ndef get_quotes(phenny, input):\n if input.group(2) is None:\n quotes_string = u\"\\n\".join(u\"<{}> {}\".format(nick, quote) for nick, quotes in phenny.quotes.items() for quote, submitter in quotes)\n else:\n nick = input.group(2).lower()\n quotes_string = u\"\\n\".join(u\"<{}> {}\".format(nick, quote) for quote, submitter in phenny.quotes.get(nick, []))\n if quotes_string:\n try:\n data = urllib.urlencode({\"content\": quotes_string.encode(\"utf-8\")})\n request = urllib2.Request(\"http://dpaste.com/api/v2\",data)\n response = urllib2.urlopen(request)\n except urllib2.HTTPError as e:\n return phenny.say(u\"Could not create quotes file: error code {}, reason: {}\".format(\n e.code, e.reason))\n else:\n return phenny.say(response.geturl())\n else:\n return phenny.say(\"No quotes were found.\")\nget_quotes.rule = ([\"quotes\"], r\"(\\S+)\", r\"? *$\")\n\ndef qnuke(phenny, input):\n if input.group(2) is None:\n return\n if input.nick not in phenny.ident_admin: return phenny.notice(input.nick, 'Requires authorization. Use .auth to identify')\n nick = input.group(2).lower()\n if nick in phenny.quotes:\n del phenny.quotes[nick]\n save_quotes(phenny)\n return phenny.say(\"All of %s's memorable quotes erased.\" % nick)\n return phenny.say(\"Yeah whatever.\")\nqnuke.rule = ([\"qnuke\"], r\"(\\S+)\")\n\n\ndef debug_log(phenny, input):\n if input.nick not in phenny.ident_admin: return phenny.notice(input.nick, 'Requires authorization. Use .auth to identify')\n tor = \"[\"\n for log in phenny.logs:\n if len(tor) + len(log) >= 490:\n phenny.notice(tor)\n tor = \"\"\n tor += log + \", \"\n return phenny.notice(input.nick, tor + \"]\")\ndebug_log.rule = ([\"debuglog\"], )\n","sub_path":"modules/user_quote.py","file_name":"user_quote.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"65921819","text":"from floodsystem.stationdata import build_station_list\nfrom floodsystem.geo import stations_within_radius\n\n\ndef run():\n \"\"\"Requirements for Task 1C\"\"\"\n\n # Build list of stations\n stations = build_station_list()\n centre=(52.2053, 0.1218)\n r=10\n x=stations_within_radius(stations, centre, r)\n print (x)\n\n\n \n\n\nif __name__ == \"__main__\":\n print(\"*** Task 1C: CUED Part IA Flood Warning System ***\")\n run()\n\n","sub_path":"partia-flood-warning-system-master/Task1C.py","file_name":"Task1C.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"403105973","text":"from tkinter import *\n\ndef draw_haouse_at(x,y,w,h,roof_color,wall_color):\n rtop_x = x+w/2\n wtop_y = y+h/2\n bottom_x = x+w\n bottom_y = y+h\n canvas.create_polygon(rtop_x,y,\n x,wtop_y,\n x+w,wtop_y,\n outline=roof_color,fill=roof_color)\n canvas.create_rectangle(x,wtop_y,bottom_x,bottom_y,outline=wall_color,fill=wall_color)\ntk = Tk()\ncanvas = Canvas(tk,width = 500,height=400,bd=0)\ncanvas.pack()\n\ndraw_haouse_at(0,100,200,200,\"red\",\"gray\")\ntk.mainloop()\n\n","sub_path":"Block_Breaking/01-house12.py","file_name":"01-house12.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"467158375","text":"import argparse\nimport logging\nimport sys\n\nimport pandas as pd\n\nfrom datasets.chemprot_abstract_dataset_factory import ChemprotAbstractDatasetFactory\n\n\nclass StaticMarkerChemprotAbstract:\n \"\"\"\n Writes static file with the protein markers\n \"\"\"\n\n def create(self, abstract_file_or_df, output_file, additional_cols=None):\n dataset_factory = ChemprotAbstractDatasetFactory()\n dataset = dataset_factory.get_dataset(abstract_file_or_df)\n label_mapper = dataset.label_transformer\n dataset.label_transformer = None\n\n data = [{\"x\": x, \"y\": label_mapper.map(y), \"y_raw\": y} for x, y in dataset]\n\n df = pd.DataFrame(data)\n\n if additional_cols:\n cols_to_copy = [c.strip(\" \") for c in additional_cols.split(\",\")]\n raw_df = pd.read_json(abstract_file_or_df) if isinstance(abstract_file_or_df,\n str) else abstract_file_or_df\n for c in cols_to_copy:\n df[c] = raw_df[c].tolist()\n\n df.to_json(output_file, orient=\"records\")\n\n\ndef run_main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--inputfile\",\n help=\"The input ppi multiclass file\", required=True)\n\n parser.add_argument(\"--outputfile\",\n help=\"The output json file\", required=True)\n\n parser.add_argument(\"--additionalcols\",\n help=\"The additional columns, comma separated, to copy to output\", required=False, default=None)\n\n parser.add_argument(\"--log-level\", help=\"Log level\", default=\"INFO\", choices={\"INFO\", \"WARN\", \"DEBUG\", \"ERROR\"})\n\n args = parser.parse_args()\n # Set up logging\n logging.basicConfig(level=logging.getLevelName(args.log_level), handlers=[logging.StreamHandler(sys.stdout)],\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n print(args.__dict__)\n\n StaticMarkerChemprotAbstract().create(args.inputfile, args.outputfile, args.additionalcols)\n\n\nif __name__ == \"__main__\":\n run_main()\n","sub_path":"src/utils/static_markers_chemprot_abstract.py","file_name":"static_markers_chemprot_abstract.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"646888344","text":"from setuptools import setup, find_packages\n\nversion = '0.9.3'\n\nsetup(name='rt.zps',\n version=version,\n description=\"A zope processes inspector\",\n long_description=(file('README.rst').read() +\n '\\n\\n' +\n file('docs/HISTORY.txt').read()\n ),\n classifiers=[],\n keywords='',\n author='alert',\n author_email='alessandro.pisa@redturtle.it',\n url='http://www.redturtle.it',\n license='GPL',\n packages=find_packages(exclude=['ez_setup', 'examples']),\n include_package_data=True,\n scripts=['rt/zps/zps'],\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n 'psutil'\n ],\n test_suite=\"rt.zps.tests\",\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"580180745","text":"import enum\n\n\nclass TokenType(enum.Enum):\n # Single-character tokens.\n LEFT_PAREN, RIGHT_PAREN, LEFT_BRACE, RIGHT_BRACE, COMMA, DOT, MINUS, PLUS, SEMICOLON, SLASH, STAR, = range(11)\n\n # One or two character tokens.\n BANG, BANG_EQUAL, EQUAL, EQUAL_EQUAL, GREATER, GREATER_EQUAL, LESS, LESS_EQUAL = range(12, 20)\n # Literals.\n IDENTIFIER, STRING, NUMBER = range(20, 23)\n\n # Keywords.\n AND, BREAK, CLASS, ELSE, FALSE, FUN, FOR, IF, NIL, OR, PRINT, RETURN, SUPER, THIS, TRUE, VAR, WHILE = range(23, 40)\n\n EOF = 40","sub_path":"lox/token_type.py","file_name":"token_type.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"123598362","text":"# !/usr/bin/env/ python3\n#\n# Copyright 2019 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Also Referenced ROS Documents\n# https://docs.ros.org/en/foxy/Tutorials/Actions/Writing-a-Py-Action-Server-Client.html#id4\n\nimport time\n\nfrom custom_interfaces.action import Fibonacci\n\nimport rclpy\nfrom rclpy.action import ActionServer, CancelResponse, GoalResponse\nfrom rclpy.callback_groups import ReentrantCallbackGroup\nfrom rclpy.executors import MultiThreadedExecutor\nfrom rclpy.node import Node\n\n\nclass FibonacciActionServer(Node):\n\n def __init__(self):\n super().__init__('fibonacci_action_server')\n self.action_server = ActionServer(\n self,\n Fibonacci,\n 'fibonacci',\n callback_group=ReentrantCallbackGroup(),\n execute_callback=self.execute_callback,\n goal_callback=self.goal_callback,\n cancel_callback=self.cancel_callback,\n )\n\n self.get_logger().info('=== Fibonacci Action Server Started ====')\n\n async def execute_callback(self, goal_handle):\n self.get_logger().info('Executing goal...')\n\n feedback_msg = Fibonacci.Feedback()\n feedback_msg.partial_sequence = [0, 1]\n\n for i in range(1, goal_handle.request.order):\n\n if goal_handle.is_cancel_requested:\n goal_handle.canceled()\n self.get_logger().info('Goal canceled')\n return Fibonacci.Result()\n\n feedback_msg.partial_sequence.append(\n feedback_msg.partial_sequence[i] + feedback_msg.partial_sequence[i - 1]\n )\n\n self.get_logger().info(f'Feedback: {feedback_msg.partial_sequence}')\n goal_handle.publish_feedback(feedback_msg)\n time.sleep(1)\n\n goal_handle.succeed()\n self.get_logger().warn('==== Succeed ====')\n\n result = Fibonacci.Result()\n result.sequence = feedback_msg.partial_sequence\n return result\n\n def goal_callback(self, goal_request):\n \"\"\"Accept or reject a client request to begin an action.\"\"\"\n # This server allows multiple goals in parallel\n self.get_logger().info('Received goal request')\n return GoalResponse.ACCEPT\n\n def cancel_callback(self, goal_handle):\n \"\"\"Accept or reject a client request to cancel an action.\"\"\"\n self.get_logger().info('Received cancel request')\n return CancelResponse.ACCEPT\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n fibonacci_action_server = FibonacciActionServer()\n\n executor = MultiThreadedExecutor()\n rclpy.spin(fibonacci_action_server, executor=executor)\n\n fibonacci_action_server.destroy()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"py_action_pkg/py_action_pkg/fibonacci_action_server_cancel.py","file_name":"fibonacci_action_server_cancel.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"641086267","text":"import pandas as pd\nimport codecs\nfrom app.commons.paths import *\nfrom app.models.usuario import Usuario\nfrom app.models.permiso_usuario_horario import Permiso_usuario_horario\nfrom app.models.semestre import Semestre\nfrom app.models.curso import Curso\nfrom app.models.horario import Horario\nfrom app.models.especialidad import Especialidad\nfrom app.models.permiso_usuario_horario import Permiso_usuario_horario\ndef SplitNombres( nombre ):\n u\"\"\"\n Autor original en código PHP: eduardoromero.\n https://gist.github.com/eduardoromero/8495437\n \n Separa los nombres y los apellidos y retorna una tupla de tres\n elementos (string) formateados para nombres con el primer caracter\n en mayuscula. Esto es suponiendo que en la cadena los nombres y \n apellidos esten ordenados de la forma ideal:\n \n 1- nombre o nombres.\n 2- primer apellido.\n 3- segundo apellido.\n \n SplitNombres( '' )\n >>> ('Nombres', 'Primer Apellido', 'Segundo Apellido')\n \"\"\"\n a1,a2 = nombre.split(',')\n a2 = a2[1:]\n nombre = a2 +' ' + a1\n # Separar el nombre completo en espacios.\n tokens = nombre.split(\" \")\n \n # Lista donde se guarda las palabras del nombre.\n names = []\n \n # Palabras de apellidos y nombres compuestos.\n especial_tokens = ['da', 'de', 'di', 'do', 'del', 'la', 'las', \n 'le', 'los', 'mac', 'mc', 'van', 'von', 'y', 'i', 'san', 'santa']\n \n prev = \"\"\n for token in tokens:\n _token = token.lower()\n \n if _token in especial_tokens:\n prev += token + \" \"\n \n else:\n names.append(prev + token)\n prev = \"\"\n \n num_nombres = len(names)\n nombres, apellido1, apellido2 = \"\", \"\", \"\"\n \n # Cuando no existe nombre.\n if num_nombres == 0:\n nombres = \"\"\n \n # Cuando el nombre consta de un solo elemento.\n elif num_nombres == 1:\n nombres = names[0]\n \n # Cuando el nombre consta de dos elementos.\n elif num_nombres == 2:\n nombres = names[0]\n apellido1 = names[1]\n \n # Cuando el nombre consta de tres elementos.\n elif num_nombres == 3:\n nombres = names[0]\n apellido1 = names[1]\n apellido2 = names[2]\n \n # Cuando el nombre consta de más de tres elementos.\n else:\n nombres = names[0] + \" \" + names[1]\n apellido1 = names[2]\n apellido2 = names[3]\n \n # Establecemos las cadenas con el primer caracter en mayúscula.\n nombres = nombres.title()\n apellido1 = apellido1.title()\n apellido2 = apellido2.title()\n \n return (nombres, apellido1, apellido2)\n\ndef getCorreoPucp(correos):\n if ',' in correos:\n cpucp,_ = correos.split(',')\n return cpucp\n else:\n return correos\n\n\ndef cargaMasivaHorarios(datos,idCurso,idEspecialidad):\n semestre=Semestre().getOne()\n idSemestre=semestre.id_semestre\n print(\"=\"*20)\n print(idSemestre)\n name = pathCargaMasivaAlumnoHorario+datos.filename\n data = datos.read()\n with open(name,'wb') as file:\n file.write(data)\n doc= codecs.open(name,'rU','latin1')\n for i in range(6):\n doc.readline()\n df = pd.read_csv(doc ,sep ='\\t',encoding = 'latin1')\n #print(df)\n df['E-mail'] = df['E-mail'].apply( lambda x: getCorreoPucp(x))\n df['nombres'] = df['Nombre'].apply(lambda x : SplitNombres(x)[0])\n df['apellido_paterno']= df['Nombre'].apply(lambda x : SplitNombres(x)[1]) \n df['apellido_materno'] = df['Nombre'].apply(lambda x : SplitNombres(x)[2]) \n longitud = len(df)\n \n \n \n for i in range(longitud):\n idHorario = Horario().addOne(str(df.iat[i,2]),idCurso,idSemestre) \n codigoPucp = str(df.iat[i,0])\n nombre = str(df.iat[i,5])\n email = str(df.iat[i,4])\n apellidoPaterno = str(df.iat[i,6])\n apellidoMaterno = str(df.iat[i,7])\n objUsuario = Usuario(nombre = nombre,email = email,apellido_paterno = apellidoPaterno , \n apellido_materno = apellidoMaterno, flg_admin =0 ,codigo_pucp = codigoPucp, clave = codigoPucp)\n idUsuario = Usuario().addOne(objUsuario) \n objAlumnoHorario = Permiso_usuario_horario(id_horario = idHorario,id_usuario = idUsuario, id_permiso = 2,id_semestre = idSemestre)\n Permiso_usuario_horario().addOne(objAlumnoHorario)\n \n return {'message' : 'leyo bien'}\n\ndef cargaMasivaCursos(datos,idEspecialidad):\n semestre = Semestre().getOne()\n idSemestre = semestre.id_semestre #\n print(datos)\n print(idEspecialidad)\n name = pathCargaMasivaCursoHorario + datos.filename\n data = datos.read()\n with open(name,'wb') as file:\n file.write(data)\n df = pd.read_excel(name,enconding = 'latin1')\n longitud = len(df)\n\n for i in range(longitud):\n nombreCurso = df.iat[i,0]\n codigoCurso = df.iat[i,1]\n horarios = []\n horarios = str(df.iat[i,2]).split(',')\n objCurso = Curso(id_especialidad = idEspecialidad,id_semestre =idSemestre,nombre = nombreCurso,codigo = codigoCurso)\n idCurso = Curso().addOne(objCurso)\n for horario in horarios:\n horario = horario.replace(' ','')\n Horario().addOne(horario,idCurso,idSemestre)\n return {'message' : 'leyo bien'}\n\ndef cargaMasivaProfesorJP(datos,idEspecialidad):\n semestre=Semestre().getOne()\n idSemestre=semestre.id_semestre\n name = pathCargaMasivaCursoHorario + datos.filename\n data = datos.read()\n with open(name,'wb') as file:\n file.write(data)\n df = pd.read_excel(name,enconding = 'latin1')\n \n longitud = len(df)\n\n for i in range(longitud):\n codigoCurso= str(df.iat[i,0])\n codigoPucp = str(df.iat[i,1])\n nombreCompleto = str(df.iat[i,2])\n aux = SplitNombres(nombreCompleto)\n nombres = aux[0]\n apellidoPaterno = aux[1]\n apellidoMaterno = aux[2]\n email = str(df.iat[i,3])\n objUsuario = Usuario(nombre = nombres,codigo_pucp = codigoPucp,email= email,clave = codigoPucp, apellido_paterno = apellidoPaterno, apellido_materno = apellidoMaterno,flg_admin =0)\n idUsuario = Usuario().addOne(objUsuario)\n idCurso = Curso().getOneClave(codigoCurso,idSemestre)\n tipo = str(df.iat[i,4])\n if tipo == \"1\":\n horarios = str( df.iat[i,5]).split(',')\n for horario in horarios: \n horario = horario.replace(' ','')\n print(idCurso,horario,idSemestre)\n idHorario = Horario().getOneClave(idCurso,idSemestre,horario)\n objUsuaHorario = Permiso_usuario_horario(id_horario = idHorario,id_usuario =idUsuario, id_permiso = 1,id_semestre = idSemestre)\n Permiso_usuario_horario().addOne(objUsuaHorario)\n \n else:\n horarios = Horario().getAll(idCurso,idSemestre)\n for horario in horarios:\n \n idHorario = horario.id_horario\n objUsuaHorario = Permiso_usuario_horario(id_horario = idHorario,id_usuario =idUsuario, id_permiso = 3,id_semestre = idSemestre)\n Permiso_usuario_horario().addOne(objUsuaHorario)\n \n\n\n return {'message' : 'leyo bien'}","sub_path":"backend/app/controller/CTR_Carga_Masiva.py","file_name":"CTR_Carga_Masiva.py","file_ext":"py","file_size_in_byte":7100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"288748833","text":"#!/usr/bin/env python3\n\nimport sys\nimport requests\nimport subprocess\nimport time\nimport os\nimport random\nimport string\nimport json\nimport base64\nimport argparse\nimport shutil\nimport hashlib\nimport glob\nfrom datetime import datetime,timezone\nfrom libnmap.parser import NmapParser, NmapParserException\nimport ipaddress\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nimport threading\nimport queue\n\n\n# my script for headshotting servers\nfrom getheadshot import getheadshot\nfrom config import Config\n\nERR = {\"INVALIDTARGET\":1,\"SCANTIMEOUT\":2, \"DATANOTFOUND\":3, \"INVALIDDATA\": 4}\n\nconfig = Config()\nMAX_QUEUE_SIZE = int(config.max_threads) # only queue enough work for each of our active threads\n\nRTTVAR_MSG = \"RTTVAR has grown to over\"\n\n\ndef print_err(message):\n threadname = threading.current_thread().name\n print(\"[!] %s: %s\" % (threadname, message))\n\ndef print_info(message):\n threadname = threading.current_thread().name\n print(\"[+] %s: %s\" % (threadname, message))\n\nif config.ignore_ssl_warn:\n requests.packages.urllib3.disable_warnings(InsecureRequestWarning)\ndef make_request(endpoint, reqType=\"GET\", postData=None, contentType=\"application/json\", statusCode=200):\n headers = {'user-agent': 'natlas-agent/{}'.format(config.NATLAS_VERSION)}\n if config.agent_id and config.auth_token:\n authheader = config.agent_id + \":\" + config.auth_token\n headers['Authorization'] = 'Bearer {}'.format(authheader)\n try:\n if reqType == \"GET\":\n req = requests.get(config.server+endpoint, timeout=config.request_timeout, headers=headers, verify=(not config.ignore_ssl_warn))\n if req.status_code == 200:\n if 'message' in req.json():\n print_info(\"[Server] \" + req.json()['message'])\n return req\n if req.status_code == 403:\n if 'message' in req.json():\n print_err(\"[Server] \" + req.json()['message'])\n if 'retry' in req.json() and not req.json()['retry']:\n os._exit(403)\n if req.status_code == 400:\n if 'message' in req.json():\n print_info(\"[Server] \" + req.json()['message'])\n return req\n if req.status_code != statusCode:\n print_err(\"Expected %s, received %s\" % (statusCode, req.status_code))\n if 'message' in req.json():\n print_err(\"[Server] \" + req.json()['message'])\n return req\n if req.headers['content-type'] != contentType:\n print_err(\"Expected %s, received %s\" % (contentType, req.headers['content-type']))\n return False\n elif reqType == \"POST\" and postData:\n req = requests.post(config.server+endpoint, json=postData, timeout=config.request_timeout, headers=headers, verify=(not config.ignore_ssl_warn))\n if req.status_code == 200:\n if 'message' in req.json():\n print_info(\"[Server] \" + req.json()['message'])\n return req\n if req.status_code == 403:\n if 'message' in req.json():\n print_err(\"[Server] \" + req.json()['message'])\n if 'retry' in req.json() and not req.json()['retry']:\n os._exit(403)\n if req.status_code == 400:\n if 'message' in req.json():\n print_info(\"[Server] \" + req.json()['message'])\n return req\n if req.status_code != statusCode:\n print_err(\"Expected %s, received %s\" % (statusCode, req.status_code))\n if 'message' in req.json():\n print_err(\"[Server] \" + req.json()['message'])\n return req\n except requests.ConnectionError as e:\n print_err(\"Connection Error connecting to %s\" % config.server)\n return False\n except requests.Timeout as e:\n print_err(\"Request timed out after %s seconds.\" % config.request_timeout)\n return False\n except ValueError as e:\n print_err(\"Error: %s\" % e)\n return False\n\n return req\n\ndef backoff_request(giveup=False, *args, **kwargs):\n attempt = 0\n result = None\n while not result:\n result = make_request(*args, **kwargs)\n RETRY=False\n\n if not result:\n RETRY=True\n if 'retry' in result.json() and result.json()['retry']:\n RETRY=True\n elif not 'retry' in result.json() or not result.json()['retry']:\n return result\n\n if RETRY:\n attempt += 1\n if giveup and attempt == config.max_retries:\n print_err(\"Request to %s failed %s times. Giving up\" % (config.server, config.max_retries))\n return None\n jitter = random.randint(0,1000) / 1000 # jitter to reduce chance of locking\n current_sleep = min(config.backoff_max, config.backoff_base * 2 ** attempt) + jitter\n print_err(\"Request to %s failed. Waiting %s seconds before retrying.\" % (config.server, current_sleep))\n time.sleep(current_sleep)\n return result\n\ndef get_services_file():\n print_info(\"Fetching natlas-services file from %s\" % config.server)\n response = backoff_request(endpoint=\"/api/natlas-services\")\n if response:\n serviceData = response.json()\n if serviceData[\"id\"] == \"None\":\n print_err(\"%s doesn't have a service file for us\" % config.server)\n return False\n if not hashlib.sha256(serviceData[\"services\"].encode()).hexdigest() == serviceData[\"sha256\"]:\n print_err(\"hash provided by %s doesn't match locally computed hash of services\" % config.server)\n return False\n with open(\"natlas-services\", \"w\") as f:\n f.write(serviceData[\"services\"])\n with open(\"natlas-services\", \"r\") as f:\n if not hashlib.sha256(f.read().rstrip('\\r\\n').encode()).hexdigest() == serviceData[\"sha256\"]:\n print_err(\"hash of local file doesn't match hash provided by server\")\n return False\n else:\n return False # return false if we were unable to get a response from the server\n return serviceData[\"sha256\"] # return True if we got a response and everything checks out\n\n\ndef fetch_target():\n print_info(\"Fetching Target from %s\" % config.server)\n response = backoff_request(endpoint=\"/api/getwork\")\n if response:\n target_data = response.json()\n else:\n return False # failed to fetch target from server\n return target_data\n\ndef validate_target(target):\n try:\n iptarget = ipaddress.ip_address(target)\n if iptarget.is_private and not config.scan_local:\n print_err(\"We're not configured to scan local addresses!\")\n return False\n except ipaddress.AddressValueError:\n print_err(\"%s is not a valid IP Address\" % target)\n return False\n return True\n\ndef generate_scan_id():\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))\n\ndef cleanup_files(scan_id):\n print_info(\"Cleaning up files for %s\" % scan_id)\n for file in glob.glob(\"data/*.\"+scan_id+\".*\"):\n os.remove(file)\n\ndef scan(target_data=None):\n\n if not validate_target(target_data[\"target\"]):\n return ERR[\"INVALIDTARGET\"]\n\n result = {}\n\n # If agent authentication is required, this agent id has to match a server side agent id\n # If it's not required and an agent_id is set, we'll use that in scan data\n # If it's not required and an agent_id is not set, we'll consider it an anonymous scan.\n if config.agent_id:\n result['agent'] = config.agent_id\n else:\n result['agent'] = \"anonymous\"\n result[\"agent_version\"] = config.NATLAS_VERSION\n\n target = target_data[\"target\"]\n result['ip'] = target\n result['scan_reason'] = target_data['scan_reason']\n result['tags'] = target_data['tags']\n scan_id = target_data[\"scan_id\"]\n result['scan_id'] = scan_id\n agentConfig = target_data[\"agent_config\"]\n result['scan_start'] = datetime.now(timezone.utc).isoformat()\n\n command = [\"nmap\", \"-oA\", \"data/natlas.\"+scan_id, \"--servicedb\", \"./natlas-services\"]\n if agentConfig[\"versionDetection\"]:\n command.append(\"-sV\")\n if agentConfig[\"osDetection\"]:\n command.append(\"-O\")\n if agentConfig[\"enableScripts\"] and agentConfig[\"scripts\"]:\n command.append(\"--script\")\n command.append(agentConfig[\"scripts\"])\n if agentConfig[\"scriptTimeout\"]:\n command.append(\"--script-timeout\")\n command.append(str(agentConfig[\"scriptTimeout\"]))\n if agentConfig[\"hostTimeout\"]:\n command.append(\"--host-timeout\")\n command.append(str(agentConfig[\"hostTimeout\"]))\n if agentConfig[\"osScanLimit\"]:\n command.append(\"--osscan-limit\")\n if agentConfig[\"noPing\"]:\n command.append(\"-Pn\")\n if agentConfig[\"onlyOpens\"]:\n command.append(\"--open\")\n\n command.append(target_data[\"target\"])\n\n TIMEDOUT = False\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n try:\n out, err = process.communicate(timeout=int(agentConfig[\"scanTimeout\"]))\n except:\n try:\n print_err(\"Scan %s timed out\" % scan_id)\n process.kill()\n TIMEDOUT = True\n except:\n pass\n\n if TIMEDOUT:\n result['is_up'] = False\n result['port_count'] = 0\n result['scan_stop'] = datetime.now(timezone.utc).isoformat()\n result['timed_out'] = True\n cleanup_files(scan_id)\n print_info(\"Submitting scan timeout notice for %s\" % result['ip'])\n response = backoff_request(giveup=True, endpoint=\"/api/submit\", reqType=\"POST\", postData=json.dumps(result))\n return\n else:\n print_info(\"Scan %s Complete\" % scan_id)\n\n for ext in 'nmap', 'gnmap', 'xml':\n try:\n result[ext+\"_data\"] = open(\"data/natlas.\"+scan_id+\".\"+ext).read()\n except:\n print_err(\"Couldn't read natlas.%s.%s\" % (scan_id, ext))\n return ERR[\"DATANOTFOUND\"]\n\n nmap_report = NmapParser.parse(result['xml_data'])\n\n if nmap_report.hosts_total < 1:\n print_err(\"No hosts found in scan data\")\n return \"[!] No hosts found in scan data\"\n elif nmap_report.hosts_total > 1:\n print_err(\"Too many hosts found in scan data\")\n return \"[!] Too many hosts found in scan data\"\n elif nmap_report.hosts_down == 1:\n # host is down\n result['is_up'] = False\n result['port_count'] = 0\n result['scan_stop'] = datetime.now(timezone.utc).isoformat()\n cleanup_files(scan_id)\n print_info(\"Submitting host down notice for %s\" % (result['ip']))\n response = backoff_request(giveup=True, endpoint=\"/api/submit\", reqType=\"POST\", postData=json.dumps(result))\n return\n elif nmap_report.hosts_up == 1 and len(nmap_report.hosts) == 0:\n # host is up but no reportable ports were found\n result['is_up'] = True\n result['port_count'] = 0\n result['scan_stop'] = datetime.now(timezone.utc).isoformat()\n cleanup_files(scan_id)\n print_info(\"Submitting %s ports for %s\" % (result['port_count'], result['ip']))\n response = backoff_request(giveup=True, endpoint=\"/api/submit\", reqType=\"POST\", postData=json.dumps(result))\n return\n else:\n # host is up and reportable ports were found\n result['is_up'] = nmap_report.hosts[0].is_up()\n result['port_count'] = len(nmap_report.hosts[0].get_ports())\n\n if target_data[\"agent_config\"][\"webScreenshots\"] and shutil.which(\"aquatone\") is not None:\n if \"80/tcp\" in result['nmap_data']:\n if getheadshot(target, scan_id, 'http') is True:\n print_info(\"Attempting to take HTTP screenshot for %s\" % result['ip'])\n screenshotPath = \"data/aquatone.\" + scan_id + \".http/screenshots/http__\" +target.replace('.','_') + \".png\"\n if not os.path.isfile(screenshotPath):\n shutil.rmtree(\"data/aquatone.\" + scan_id + \".http/\")\n else:\n result['httpheadshot'] = str(base64.b64encode(\n open(screenshotPath, 'rb').read()))[2:-1]\n shutil.rmtree(\"data/aquatone.\" + scan_id + \".http/\")\n print_info(\"HTTP screenshot acquired for %s\" % result['ip'])\n else:\n print_err(\"Failed to acquire HTTP screenshot for %s\" % result['ip'])\n\n if \"443/tcp\" in result['nmap_data']:\n if getheadshot(target, scan_id, 'https') is True:\n print_info(\"Attempting to take HTTPS screenshot for %s\" % result['ip'])\n screenshotPath = \"data/aquatone.\" + scan_id + \".https/screenshots/https__\" +target.replace('.','_') + \".png\"\n if not os.path.isfile(screenshotPath):\n shutil.rmtree(\"data/aquatone.\" + scan_id + \".https/\")\n else:\n result['httpsheadshot'] = str(base64.b64encode(\n open(screenshotPath, 'rb').read()))[2:-1]\n shutil.rmtree(\"data/aquatone.\" + scan_id + \".https/\")\n print_info(\"HTTPS screenshot acquired for %s\" % result['ip'])\n else:\n print_err(\"Failed to acquire HTTPS screenshot for %s\" % result['ip'])\n\n if target_data[\"agent_config\"][\"vncScreenshots\"] and shutil.which(\"vncsnapshot\") is not None:\n if \"5900/tcp\" in result['nmap_data']:\n print_info(\"Attempting to take vnc screenshot for %s\" % result['ip'])\n if getheadshot(target, scan_id, 'vnc') is True:\n result['vncsheadshot'] = str(base64.b64encode(\n open(\"data/natlas.\"+scan_id+\".vnc.headshot.jpg\", 'rb').read()))[2:-1]\n os.remove(\"data/natlas.\"+scan_id+\".vnc.headshot.jpg\")\n print_info(\"VNC screenshot acquired for %s\" % result['ip'])\n else:\n print_err(\"Failed to acquire screenshot for %s\" % result['ip'])\n\n # submit result\n result['scan_stop'] = datetime.now(timezone.utc).isoformat()\n cleanup_files(scan_id)\n print_info(\"Submitting %s ports for %s\" % (result['port_count'], result['ip']))\n response = backoff_request(giveup=True, endpoint=\"/api/submit\", reqType=\"POST\", postData=json.dumps(result))\n\nclass ThreadScan(threading.Thread):\n def __init__(self, queue, auto=False, servicesSha=''):\n threading.Thread.__init__(self)\n self.queue = queue\n self.auto = auto\n self.servicesSha = servicesSha\n\n def run(self):\n # If we're in auto mode, the threads handle getting work from the server\n if self.auto:\n while True:\n target_data = fetch_target()\n # We hit this if we hit an error that we shouldn't recover from.\n # Primarily version mismatch, at this point.\n if not target_data:\n os._exit(400)\n if target_data and target_data[\"services_hash\"] != self.servicesSha:\n self.servicesSha = get_services_file()\n if not self.servicesSha:\n print_err(\"Failed to get updated services from %s\" % config.server)\n result = scan(target_data)\n\n else:\n #If we're not in auto mode, then the queue is populated with work from local data\n while True:\n print_info(\"Fetching work from queue\")\n target_data = self.queue.get()\n if target_data is None:\n break\n if target_data.get(\"services_hash\") != self.servicesSha:\n self.servicesSha = get_services_file()\n if not self.servicesSha:\n print_err(\"Failed to get updated services from %s\" % config.server)\n print_info(\"Manual Target: %s\" % target_data[\"target\"])\n result = scan(target_data)\n self.queue.task_done()\n\ndef main():\n PARSER_DESC = \"Scan hosts and report data to a configured server. The server will reject your findings if they are deemed not in scope.\"\n PARSER_EPILOG = \"Report problems to https://github.com/natlas/natlas\"\n parser = argparse.ArgumentParser(description=PARSER_DESC, epilog=PARSER_EPILOG, prog='natlas-agent')\n parser.add_argument('--version', action='version', version='%(prog)s {}'.format(config.NATLAS_VERSION))\n mutually_exclusive = parser.add_mutually_exclusive_group()\n mutually_exclusive.add_argument('--target', metavar='IPADDR', help=\"An IPv4 address or CIDR range to scan. e.g. 192.168.0.1, 192.168.0.1/24\", dest='target')\n mutually_exclusive.add_argument('--target-file', metavar='FILENAME', help=\"A file of line separated target IPv4 addresses or CIDR ranges\", dest='tfile')\n args = parser.parse_args()\n\n\n if not os.geteuid() == 0:\n raise SystemExit(\"Please run as a privileged user in order to use nmap's features.\")\n if not os.path.isdir(\"data\"):\n os.mkdir(\"data\")\n\n\n autoScan = True\n if args.target or args.tfile:\n autoScan = False\n\n q = queue.Queue(maxsize=MAX_QUEUE_SIZE)\n\n servicesSha = \"\"\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n SERVICESPATH = os.path.join(BASEDIR, \"natlas-services\")\n if os.path.isfile(SERVICESPATH):\n servicesSha = hashlib.sha256(open(SERVICESPATH, \"r\").read().rstrip('\\r\\n').encode()).hexdigest()\n else:\n servicesSha = get_services_file()\n if not servicesSha:\n raise SystemExit(\"[!] Failed to get valid services file from %s\" % config.server)\n\n # Start threads that will wait for items in queue and then scan them\n for i in range(int(config.max_threads)):\n t = ThreadScan(q, autoScan, servicesSha)\n t.setDaemon(True)\n t.start()\n\n # Use a default agent config of all options enabled if we are in standalone mode\n defaultAgentConfig = {\n \"id\": 0,\n \"versionDetection\": True,\n \"osDetection\": True,\n \"enableScripts\": True,\n \"onlyOpens\": True,\n \"scanTimeout\": 660,\n \"webScreenshots\": True,\n \"vncScreenshots\": True,\n \"scriptTimeout\": 60,\n \"hostTimeout\": 600,\n \"osScanLimit\": True,\n \"noPing\": False,\n \"scripts\": \"default\"\n }\n target_data_template = {\"agent_config\": defaultAgentConfig, \"scan_reason\":\"manual\", \"tags\":[]}\n if args.target:\n print_info(\"Scanning: %s\" % args.target)\n\n targetNetwork = ipaddress.ip_interface(args.target)\n if targetNetwork.with_prefixlen.endswith('/32'):\n target_data = target_data_template.copy()\n target_data[\"target\"] = str(targetNetwork.ip)\n target_data[\"scan_id\"] = generate_scan_id()\n q.put(target_data)\n else:\n # Iterate over usable hosts in target, queue.put will block until a queue slot is available\n for t in targetNetwork.network.hosts():\n target_data = target_data_template.copy()\n target_data[\"target\"] = str(t)\n target_data[\"scan_id\"] = generate_scan_id()\n q.put(target_data)\n\n q.join()\n print_info(\"Finished scanning: %s\" % args.target)\n return\n\n elif args.tfile:\n print_info(\"Reading scope from file: %s\" % args.tfile)\n\n for target in open(args.tfile, \"r\"):\n targetNetwork = ipaddress.ip_interface(target.strip())\n if targetNetwork.with_prefixlen.endswith('/32'):\n target_data = target_data_template.copy()\n target_data[\"target\"] = str(targetNetwork.ip)\n target_data[\"scan_id\"] = generate_scan_id()\n q.put(target_data)\n else:\n for t in targetNetwork.network.hosts():\n target_data = target_data_template.copy()\n target_data[\"target\"] = str(t)\n target_data[\"scan_id\"] = generate_scan_id()\n q.put(target_data)\n q.join()\n print_info(\"Finished scanning the target file %s\" % args.tfile)\n return\n\n # This is the default behavior of fetching work from the server\n else:\n while True:\n time.sleep(60)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"natlas-agent/natlas-agent.py","file_name":"natlas-agent.py","file_ext":"py","file_size_in_byte":20400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"422177421","text":"\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom datetime import datetime\n\nfrom event import Event\nfrom util import determine_year\n\n# Parser superclass and factory\nclass Parser:\n def __init__(self, venue, url):\n self.venue = venue\n self.url = url\n\n\n def retrieve_soup(self):\n return BeautifulSoup(urlopen(self.url).read().decode('utf-8'), 'lxml')\n\n\n def parse_events(self, event_html):\n # parse out event title, date, and time from chunk of html\n raise Exception('Undefined parse_event function in Parser subclass')\n\n\n def band_names_from_title(self, title):\n # determine individual band names from an event title\n # search over database of band names?\n # generate list of band names from songkick?\n return None\n\n\n def retrieve_sample(self, band_name):\n # retrieve a soundcloud or youtube sample from the band(s)\n # will require parsing out the band names from the event title\n # --find giant list of band names to search over?\n return None\n\n\n @staticmethod\n def factory(url):\n if 'flourcitystation.com' in url:\n return FlourCityStationParser('Flour City Station', url)\n elif 'bugjar.com' in url:\n return BugJarParser('Bug Jar', url)\n elif 'dinosaurbarbque.com' in url:\n return DinosaurBBQParser('Dinosaur BBQ', url)\n elif 'rochester.edu/Eastman' in url:\n return EastmanParser('Eastman School of Music', url)\n elif 'bouldercoffee.info' in url:\n return BoulderParser('Boulder Coffee Co.', url)\n else:\n raise Exception('Missing parser for: {}'.format(url))\n\n\n# Venue subclasses\nclass FlourCityStationParser(Parser):\n def parse_events(self, soup):\n events = []\n for li in soup.find_all('li', 'twistercontent'):\n title = li.find('h5', 'cro_cust_font').string\n details_page = li.find('div', 'clarlabel').a['href']\n\n day = li.find('span', 'first').string\n month = li.find('span', 'second').string\n time = li.find('span', 'cro_foodprice').string\n dt = datetime.strptime('{} {} {}'.format(month, day, time), '%b %d %I:%M %p')\n dt = dt.replace(year=determine_year(dt))\n\n events.append(Event(self.venue, title, [], dt, details_page)) \n return events\n\n\nclass BugJarParser(Parser):\n def parse_events(self, soup):\n events = []\n cal = soup.find('div', 'gigs-calendar')\n for tr in cal.find_all('tr'):\n a = tr.find('a')\n title = a.string\n details_page = a['href']\n\n datestr = tr.find('td', 'date').string\n time = tr.find('td', 'time').string \n dt = datetime.strptime('{} {}'.format(datestr, time), '%m/%d/%y %I:%M%p')\n\n events.append(Event(self.venue, title, [], dt, details_page)) \n return events\n\n\nclass DinosaurBBQParser(Parser):\n def parse_events(self, soup):\n events = []\n for div in soup.find_all('div', 'seven columns'):\n title = div.find('h1').string\n\n datestr = div.find_all('span')[1].string\n dt = datetime.strptime(datestr, '%B %d, %Y | %I%p')\n\n events.append(Event(self.venue, title, [], dt, self.url)) \n return events\n\n\nclass EastmanParser(Parser):\n def parse_events(self, soup):\n events = []\n for chunk in soup.find_all('div', 'Event'):\n a = chunk.find('a')\n title = a.string\n\n datestr = a['href'].split('from=')[1].split('&')[0]\n timestr = chunk.find('div', 'EventTime').string\n try:\n dt = datetime.strptime('{} {}'.format(datestr, timestr), '%d%B%Y %I:%M %p')\n except ValueError:\n #for empty time fields\n dt = datetime.strptime(datestr, '%d%B%Y')\n \n events.append(Event(self.venue, title, [], dt, self.url)) \n return events\n\n\nclass BoulderParser(Parser):\n def parse_events(self, soup):\n events = []\n for table in soup.find_all('table', 'normalfont'):\n title, datestr = text_split = table.find('div', 'searchtitlefont').li.string.rsplit(' (', 1)\n timestr = table.find('div', 'searchtextfont').contents[3].split('\\n')[0]\n dt = datetime.strptime('{} {}'.format(datestr, timestr), '%A, %d %B %Y) %I:%M%p')\n\n events.append(Event(self.venue, title, [], dt, self.url)) \n return events\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"203525252","text":"from helpers import alphabet_position, rotate_character\r\n\r\ndef encrypt(text,rot):\r\n '''This function takes in a text message and a number and rotates the letters by the amount of the number given.'''\r\n\r\n encrypt_str = ''\r\n\r\n for i in text:\r\n if i.isalpha() == False:\r\n encrypt_str += i\r\n continue\r\n new_char = rotate_character(i,rot)\r\n encrypt_str += new_char\r\n return encrypt_str\r\n\r\ndef main():\r\n '''This function calls the encrypt function and prints out the result.'''\r\n\r\n from sys import argv,exit\r\n\r\n try:\r\n rotate = int(argv[1])\r\n except IndexError:\r\n print('Please enter a valid integer: (python caesar.py *number*)\\nA valid integer must contain no decimals,special or alphabetic characters.')\r\n exit()\r\n except ValueError:\r\n print('Please enter a valid integer: (python caesar.py *number*)\\nA valid integer must contain no decimals,special or alphabetic characters.')\r\n exit()\r\n\r\n message = input('Type a message you would like to encript:')\r\n print(encrypt(message,rotate))\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"crypto/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"168188012","text":"import numpy as np\nfrom .OFCPokerLogic import OFCEvaluator\n\n\nclass RandomPlayer():\n def __init__(self, game):\n self.game = game\n\n def play(self, board):\n a = np.random.randint(self.game.getActionSize())\n valids = self.game.getValidMoves(board, 1)\n while valids[a]!=1:\n a = np.random.randint(self.game.getActionSize())\n return a\n\n\nclass HumanOFCPokerPlayer():\n def __init__(self, game):\n self.game = game\n\n def play(self, board):\n # display(board)\n valid = self.game.getValidMoves(board, 1)\n valid_moves = [i for i, v in enumerate(valid) if v]\n print(\"Valid moves:\", valid_moves)\n\n while True:\n a = input()\n a = int(a)\n if a in (0, 1, 2) and valid[a]:\n break\n else:\n print('Invalid move')\n\n return a\n\n\n# class GreedyOFCPokerPlayer():\n# def __init__(self, game):\n# self.game = game\n# self.evaluator = OFCEvaluator()\n\n# def play(self, board):\n# valids = self.game.getValidMoves(board, 1)\n# candidates = [x for x]\n# for a in range(self.game.getActionSize()):\n# if valids[a]==0:\n# continue\n# nextBoard, _ = self.game.getNextState(board, 1, a)\n# score = self.game.getScore(nextBoard, 1)\n# candidates += [(-score, a)]\n# candidates.sort()\n# return candidates[0][1]\n","sub_path":"ofcpoker/OFCPokerPlayers.py","file_name":"OFCPokerPlayers.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636934394","text":"class disk:\r\n\tdef __init__(self, size):\r\n\t\tself.size = size\r\n\t\r\n\tdef __str__(self):\r\n\t\treturn str(self.size)\r\n\t\t\r\nclass tower:\r\n\tdef __init__(self, index):\r\n\t\tself.index = index\r\n\t\tself.disks = []\r\n\t\t\r\n\tdef move_disks(self, n, dest, other):\r\n\t\tif(n > 0):\r\n\t\t\tself.move_disks(n-1, other, dest)\r\n\t\t\tdest.disks.append(self.disks.pop())\r\n\t\t\tother.move_disks(n-1, dest, self)\r\n\t\r\n\tdef __str__(self):\r\n\t\tresult = \"Tower \" + str(self.index) + \": \"\r\n\t\tfor d in self.disks:\r\n\t\t\tresult += str(d)\r\n\t\treturn result\r\n\r\ndef towers_of_hanoi(towers, n):\r\n\ttowers[0].move_disks(n, towers[2], towers[1])\r\n\r\ntowers = [tower(0),tower(1),tower(2)]\r\nn = 10\r\nfor i in range(1,n+1):\r\n\ttowers[0].disks.append(disk(i))\r\n\r\nfor t in towers:\r\n\tprint(str(t))\r\ntowers_of_hanoi(towers, n)\r\nfor t in towers:\r\n\tprint(str(t))","sub_path":"Chapter8/p6.py","file_name":"p6.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"284811825","text":"\r\n'''\r\nTextBased Adventure Game\r\n\r\nThe Goal: Remember Adventure? Well, we’re going to build a more basic version of that.\r\nA complete text game, the program will let users move through rooms based on user input and get descriptions of each room.\r\nTo create this, you’ll need to establish the directions in which the user can move, a way to track how far the user has moved (and therefore which room he/she is in), and to print out a description.\r\nYou’ll also need to set limits for how far the user can move.\r\nIn other words, create “walls” around the rooms that tell the user, “You can’t move further in this direction.”\r\n\r\nConcepts to keep in mind:\r\n\r\nStrings\r\nVariables\r\nInput/Output\r\nIf/Else Statements\r\nPrint\r\nList\r\nIntegers\r\n\r\n\r\nThe tricky parts here will involve setting up the directions and keeping track of just how far the user has “walked” in the game.\r\nI suggest sticking to just a few basic descriptions or rooms, perhaps 6 at most. This project also continues to build on using userinputted data.\r\nIt can be a relatively basic game, but if you want to build this into a vast, complex word, the coding will get substantially harder, especially if you want your user to start interacting with actual objects within the game.\r\nThat complexity could be great, if you’d like to make this into a longterm project. *Hint hint.\r\n'''\r\n\r\n# variables\r\npath = 0\r\n\r\ndirection = str\r\n\r\n# restart loop\r\n\r\nwhile path != 3:\r\n\r\n\r\n # restart loop\r\n\r\n while path != 3:\r\n\r\n # game loop\r\n while path != 5:\r\n # start screen loop\r\n while path == 0:\r\n begin = str(input(\"Welcome to the new dungeon crawler adventure game! Type \\\"start\\\" to begin!\\n\"))\r\n\r\n if begin == \"start\":\r\n print(\"As you walk through the dreary night a shady figure follows you home from work.\")\r\n path += 1\r\n else:\r\n print(\"Invalid response!\")\r\n\r\n print(\"You've had a long day and don't notice the tall dark shadow distancing himself from you.\")\r\n\r\n direction = str(input(\"Suddenly you come to an intersection do you want to go left, right, or straight?\"))\r\n\r\n # direction if statement 1\r\n if direction == \"left\":\r\n path = 10\r\n print(\"You chose to take the left path, still the shady character persists but he begins to speed up!\")\r\n elif direction == \"right\":\r\n path = 15\r\n print(\"You chose to take the right path, the shady character begins to fall behind as you outpace him!\")\r\n else:\r\n path = 20\r\n print(\"You finally notice the shady character and begin to pick up the pace going down the straight path.\")\r\n\r\n # response if statement 1\r\n if path == 10:\r\n print(\"Once again you come to an intersection and finally you notice the character\")\r\n direction = str(input(\"will you go left, right, or straight?\"))\r\n elif path == 15:\r\n print(\"Once again you come to an intersection and finally you notice the character\")\r\n direction = str(input(\"will you go left, right, or straight?\"))\r\n else:\r\n print(\"As you continue running you come across another intersection,\")\r\n direction = str(input(\"will you go left, right or straight?\"))\r\n\r\n # direction if statement 2\r\n if direction == \"left\":\r\n path = 25\r\n print(\"You chose to take the left path, the tall, dark man is on your heels! you begin sprinting.\")\r\n elif direction == \"right\":\r\n path = 30\r\n print(\"You chose to take the right path, the man is beginning to catch up!\")\r\n else:\r\n path = 35\r\n print(\"You chose to go straight and continue running as fast as you can!\")\r\n\r\n # response if statement 2\r\n if path == 25:\r\n print(\"The man unrelentingly hits you over the head with a club and knocks you out on site\")\r\n path = 5\r\n elif path == 30:\r\n print(\"The man unrelentingly hits you over the head with a club and knocks you out on site\")\r\n path = 5\r\n else:\r\n print(\"You look back after running 3 blocks at full speed and no longer see the strange man\")\r\n path = 7\r\n break\r\n\r\n # Game over statement\r\n if path == 5:\r\n direction = str(input(\"\\nGame over! The shady character murdered you! \\nWould you like to try again? (y/n)\"))\r\n else:\r\n direction = str(input(\"\\nYou win! The shady character didn't catch you this time! \\nWould you like to try again? (y/n)\"))\r\n if direction == \"y\":\r\n path = 0\r\n else:\r\n path = 3\r\n\r\nprint(\"Thanks for playing!\")\r\n\r\n","sub_path":"text_adventure_game.py","file_name":"text_adventure_game.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"438050005","text":"import os\r\nfrom tqdm import tqdm\r\nimport json\r\nimport torch\r\nimport numpy as np\r\nimport operator\r\nimport pickle\r\nimport time\r\nfrom collections import defaultdict\r\nfrom copy import deepcopy\r\nimport plotly.graph_objects as go\r\n\r\nclass PostProcessRunner:\r\n def __init__(self, raw_results):\r\n self.raw_results = raw_results if isinstance(raw_results, dict) else json.load(open(raw_results, 'r'))\r\n self.word2id = json.load(open(\"./data/dataset/Charades/Charades_word2id.json\", 'r'))\r\n\r\n def _postprocess_raw_results(self, update_score=False, score_weight=1):\r\n process_results_dict = {}\r\n for vid_name, vid_results in tqdm(self.raw_results.items()):\r\n for query_result in vid_results:\r\n query = query_result['query']\r\n gt = query_result['gt']\r\n node_predictions = torch.from_numpy(np.array(query_result['node_predictions']))\r\n edge_predictions = torch.from_numpy(np.array(query_result['edge_predictions']))\r\n num_props = edge_predictions.size(0)\r\n update_props_list = []\r\n for i in range(num_props):\r\n # (start, end, match_scores)\r\n prop = node_predictions[i]\r\n related_props = []\r\n # select neighbor proposal(iou > 0)\r\n for j in range(num_props):\r\n if i != j:\r\n neighbor_prop = node_predictions[j]\r\n iou = self.temporal_iou(prop, neighbor_prop)\r\n if iou > 0:\r\n neighbor_prop = neighbor_prop.numpy().tolist()\r\n edge_score = edge_predictions[i, j, 0].item()\r\n # (start, end, match_score, edge_score)\r\n neighbor_prop += [edge_score]\r\n related_props.append(neighbor_prop)\r\n related_props = sorted(related_props, key=lambda x: x[-1], reverse=True)\r\n # merge props\r\n most_related_props = related_props[0]\r\n updated_prop = self.merge_two_proposals(prop, most_related_props, update_score, score_weight)\r\n update_props_list.append(updated_prop)\r\n update_props_list = sorted(update_props_list, key=lambda x: x[-1], reverse=True)\r\n temp_dict = {\r\n 'query': query,\r\n 'gt': gt,\r\n 'node_predictions': update_props_list\r\n }\r\n try:\r\n process_results_dict[vid_name].append(temp_dict)\r\n except KeyError:\r\n process_results_dict[vid_name] = []\r\n process_results_dict[vid_name].append(temp_dict)\r\n\r\n # only save specific 100 items\r\n visual_data_index = pickle.load(open('/home/xuhaoming/Projects/CVPR2020/utils/visual_data_index.pkl', 'rb'))\r\n visual_data = {}\r\n for idx in visual_data_index:\r\n visual_data[idx] = process_results_dict[idx]\r\n\r\n results_folder = './results/Evaluate/Raw_results'\r\n os.makedirs(results_folder, exist_ok=True)\r\n date = time.strftime('%Y-%m-%d-%H-%M', time.localtime(time.time()))\r\n json.dump(visual_data, open(os.path.join(results_folder, f'processed_results_{date}.json'), 'w'),\r\n indent=4)\r\n\r\n self.processed_results = process_results_dict\r\n # is_update_score = \"no_update_scores\" if not self.update_score else f\"update_scores_{self.score_weight}\"\r\n # os.makedirs(self.processed_results_output_path, exist_ok=True)\r\n # save_name = os.path.join(f'processed_{input_file_name}_{is_update_score}.json')\r\n # json.dump(process_results_dict, open(save_name, 'w'), indent=4)\r\n\r\n def merge_two_proposals(self, prop_a, prop_b, update_score, score_weight):\r\n start = min(prop_a[0].item(), prop_b[0])\r\n end = max(prop_a[1].item(), prop_b[1])\r\n if update_score:\r\n # update score of prop_a\r\n match_score = score_weight * prop_a[-1].item() + (1 - score_weight) * prop_b[-1]\r\n else:\r\n match_score = prop_a[-1].item()\r\n merged_props = [start, end, match_score]\r\n\r\n return merged_props\r\n\r\n def _postprocess_raw_results_no_merge(self, save_processed_res):\r\n process_results_dict = {}\r\n for vid_name, vid_results in tqdm(self.raw_results.items()):\r\n for query_result in vid_results:\r\n query = query_result['query']\r\n gt = query_result['gt']\r\n node_predictions = list(sorted(query_result['node_predictions'], key=lambda x: x[-1], reverse=True))\r\n temp_dict = {\r\n 'query': query,\r\n 'gt': gt,\r\n 'node_predictions': node_predictions,\r\n 'level': query_result['level']\r\n }\r\n try:\r\n process_results_dict[vid_name].append(temp_dict)\r\n except KeyError:\r\n process_results_dict[vid_name] = []\r\n process_results_dict[vid_name].append(temp_dict)\r\n\r\n # only save specific 100 items\r\n visual_data_index = pickle.load(open('/home/xuhaoming/Projects/CVPR2020/utils/visual_data_index.pkl', 'rb'))\r\n visual_data = {}\r\n for idx in visual_data_index:\r\n visual_data[idx] = process_results_dict[idx]\r\n\r\n # results_folder = './results/Evaluate/Raw_results'\r\n results_folder = save_processed_res\r\n baseline_setting = os.path.basename(results_folder).strip().rsplit('_', 2)[0]\r\n os.makedirs(results_folder, exist_ok=True)\r\n date = time.strftime('%Y-%m-%d-%H-%M', time.localtime(time.time()))\r\n json.dump(visual_data, open(os.path.join(results_folder, f'processed_results_{baseline_setting}.json'), 'w'), indent=4)\r\n\r\n self.processed_results = process_results_dict\r\n # input_file_name = os.path.basename(raw_results_path).split('.')[0]\r\n # save_name = os.path.join(os.path.dirname(results_file_path), f'processed_{input_file_name}_no_merge.json')\r\n # json.dump(process_results_dict, open(save_name, 'w'), indent=4)\r\n\r\n # return process_results_dict\r\n\r\n def compute_IoU_recall_top_n_ours(self, top_n, iou_thresh, nms=False):\r\n correct_num = 0.0\r\n total_num = 0.0\r\n nms_pick_itmes = defaultdict(list)\r\n # sclips: gt (start, end); iclips: props (start, end); sim_v: match_scores.\r\n for vid_name, vid_results in self.processed_results.items():\r\n for query_result in vid_results:\r\n total_num += 1\r\n gt = query_result['gt']\r\n gt_start = gt[0]\r\n gt_end = gt[1]\r\n props_predictions = query_result['node_predictions']\r\n # print gt +\" \"+str(gt_start)+\" \"+str(gt_end)\r\n sim_v = [v[-1] for v in props_predictions]\r\n # sim_v = [v for v in sentence_image_mat[k]]\r\n starts = [v[0] for v in props_predictions]\r\n ends = [v[1] for v in props_predictions]\r\n # starts = [float(iclip.split(\"_\")[1]) for iclip in iclips]\r\n # ends = [float(iclip.split(\"_\")[2]) for iclip in iclips]\r\n if nms:\r\n picks = self.nms_temporal(starts, ends, sim_v, iou_thresh - 0.05)\r\n raw_levels = {f\"level_{i}\": item for i, item in enumerate(query_result['level'])}\r\n merge_level = []\r\n for item in query_result['level']:\r\n merge_level.extend(item)\r\n merge_level = np.array(merge_level)\r\n picks_level = merge_level[picks]\r\n temp_new_dict = deepcopy(query_result)\r\n temp_new_dict['node_predictions'] = [query_result['node_predictions'][i] for i in picks]\r\n temp_new_dict['level'] = picks_level.tolist()\r\n nms_pick_itmes[vid_name].append(temp_new_dict)\r\n else:\r\n picks = list(range(len(sim_v)))\r\n merge_level = []\r\n for item in query_result['level']:\r\n merge_level.extend(item)\r\n merge_level = np.array(merge_level)\r\n picks_level = merge_level[picks]\r\n temp_new_dict = deepcopy(query_result)\r\n # temp_new_dict['node_predictions'] = [query_result['node_predictions'][i] for i in picks]\r\n temp_new_dict['level'] = picks_level.tolist()\r\n nms_pick_itmes[vid_name].append(temp_new_dict)\r\n # sim_argsort=np.argsort(sim_v)[::-1][0:top_n]\r\n if top_n < len(picks): picks = picks[0:top_n]\r\n for index in picks:\r\n # pred_start = float(iclips[index].split(\"_\")[1])\r\n pred_start = props_predictions[index][0]\r\n # pred_end = float(iclips[index].split(\"_\")[2])\r\n pred_end = props_predictions[index][1]\r\n iou = self.calculate_IoU((gt_start, gt_end), (pred_start, pred_end))\r\n if iou >= iou_thresh:\r\n correct_num += 1\r\n break\r\n # if self.viz_nms:\r\n self.viz_processed_results = nms_pick_itmes\r\n\r\n # else:\r\n # self.viz_processed_results = self.processed_results\r\n return correct_num, total_num, correct_num / total_num\r\n\r\n def nms_temporal(self, x1, x2, s, overlap):\r\n pick = []\r\n assert len(x1) == len(s)\r\n assert len(x2) == len(s)\r\n if len(x1) == 0:\r\n return pick\r\n\r\n union = list(map(operator.sub, x2, x1)) # union = x2-x1\r\n I = [i[0] for i in sorted(enumerate(s), key=lambda x: x[1])] # sort and get index\r\n\r\n while len(I) > 0:\r\n i = I[-1]\r\n pick.append(i)\r\n\r\n xx1 = [max(x1[i], x1[j]) for j in I[:-1]]\r\n xx2 = [min(x2[i], x2[j]) for j in I[:-1]]\r\n inter = [max(0.0, k2 - k1) for k1, k2 in zip(xx1, xx2)]\r\n o = [inter[u] / (union[i] + union[I[u]] - inter[u]) for u in range(len(I) - 1)]\r\n I_new = []\r\n for j in range(len(o)):\r\n if o[j] <= overlap:\r\n I_new.append(I[j])\r\n I = I_new\r\n return pick\r\n\r\n def temporal_iou(self, span_A, span_B):\r\n \"\"\"\r\n Calculates the intersection over union of two temporal \"bounding boxes\"\r\n\r\n span_A: (start, end)\r\n span_B: (start, end)\r\n \"\"\"\r\n union = min(span_A[0], span_B[0]), max(span_A[1], span_B[1])\r\n inter = max(span_A[0], span_B[0]), min(span_A[1], span_B[1])\r\n\r\n if inter[0] >= inter[1]:\r\n return 0\r\n else:\r\n return float(inter[1] - inter[0]) / float(union[1] - union[0])\r\n\r\n def calculate_IoU(self, i0, i1):\r\n union = (min(i0[0], i1[0]), max(i0[1], i1[1]))\r\n inter = (max(i0[0], i1[0]), min(i0[1], i1[1]))\r\n iou = 1.0 * (inter[1] - inter[0]) / (union[1] - union[0])\r\n return iou\r\n\r\n def visualize(self, save_folder):\r\n\r\n data_file = self.viz_processed_results\r\n\r\n frames_dict = json.load(open(\"/home/xuhaoming/Projects/CVPR2020/data/Charades_frames_info.json\", 'r'))\r\n fps_dict = json.load(open(\"/home/xuhaoming/Projects/CVPR2020/data/Charades_fps_dict.json\", 'r'))\r\n duration_info = json.load(open(\"/home/xuhaoming/Projects/cvpr_baseline/One-stage-moment-retrieval/data/Charades/Charades_duration.json\", 'r'))\r\n visual_data_index = pickle.load(open('/home/xuhaoming/Projects/CVPR2020/utils/visual_data_index.pkl', 'rb'))\r\n visual_data = {}\r\n for idx in visual_data_index:\r\n visual_data[idx] = data_file[idx]\r\n # vid_name_list = list(data_file.keys())[:100]\r\n # visual_data = {vid: data_file[vid] for vid in vid_name_list}\r\n # input_file_name = os.path.basename(json_file_path).split('.')[0]\r\n # json.dump(data_file[:100], open('test_data.json', 'w'))\r\n for vid_name, vid_info in tqdm(visual_data.items()):\r\n # fps = float(frames_dict[vid_name]) / float(fps_dict[vid_name])\r\n fps = float(duration_info[vid_name])\r\n for query_results in vid_info:\r\n query = query_results['query'] + '.'\r\n gt = query_results['gt']\r\n gt_start, gt_end = gt[0], gt[1]\r\n node_predictions = query_results['node_predictions']\r\n levels = query_results['level']\r\n assert len(levels) == len(node_predictions)\r\n tags = []\r\n for i, lev in enumerate(levels):\r\n if lev == 0:\r\n tags.append(f'First {i}')\r\n elif lev == 1:\r\n tags.append(f\"Second {i}\")\r\n elif lev == 2:\r\n tags.append(f'Third {i}')\r\n head = list(reversed(tags)) + ['GT']\r\n props_start = list(reversed(list(map(lambda x: x[0], node_predictions)))) + [gt_start]\r\n props_end = list(reversed(list(map(lambda x: x[1] - x[0], node_predictions)))) + [gt_end - gt_start]\r\n scores = list(reversed(list(map(lambda x: \"{:.2f}\".format(x[2]), node_predictions)))) + [1]\r\n iou = list(reversed([self.temporal_iou(gt, props) for props in node_predictions])) + [1]\r\n text = list(map(lambda x: f\"s: {float(x[0]):.2f}, iou: {float(x[1]):.2f}\", zip(scores, iou)))\r\n # annotations = [dict(text=f\"{x}\") for x in scores]\r\n # assert list(reversed(np.array(node_predictions)[:, 0].tolist())) == props_start[:-1]\r\n # assert list(reversed(np.array(node_predictions)[:, 1].tolist())) == props_end[:-1]\r\n fig = go.Figure()\r\n # draw gt\r\n fig.add_trace(go.Bar(\r\n y=head,\r\n x=props_start,\r\n text=[\"\"] * len(head),\r\n textposition='inside',\r\n textfont=dict(size=30),\r\n name='props_start',\r\n orientation='h',\r\n marker=dict(\r\n color='rgba(255, 255, 255, 0.6)',\r\n line=dict(color='rgba(255, 255, 255, 1.0)', width=3),\r\n ),\r\n\r\n ))\r\n fig.add_trace(go.Bar(\r\n y=head,\r\n x=props_end,\r\n name='props_end',\r\n orientation='h',\r\n text=text,\r\n textposition='auto',\r\n textfont=dict(size=30),\r\n\r\n marker=dict(\r\n color='rgba(66, 218, 202, 0.6)',\r\n line=dict(color='rgba(66, 218, 202 , 1.0)', width=3),\r\n\r\n ),\r\n ))\r\n\r\n fig.update_layout(barmode='stack',\r\n paper_bgcolor='rgb(255, 255, 255)',\r\n plot_bgcolor='rgb(248, 248, 255)',\r\n showlegend=False,\r\n width=1200, height= 1500,\r\n title=dict(\r\n text=f\"vid: {vid_name}, {query} \\n\" + \"GT: {:.2f} {:.2f}\".format(gt_start * fps,\r\n gt_end * fps),\r\n font=dict(size=30, color=\"rgb(61, 198, 232)\"),\r\n )\r\n ) \r\n gt_start = \"{:.2f}\".format(gt_start * fps)\r\n gt_end = \"{:.2f}\".format(gt_end * fps)\r\n query_tokens = \"\".join([str(self.word2id[word]) for word in query.strip().replace(\".\", \"\").split(' ')])\r\n fig.write_image(os.path.join(save_folder, f'{vid_name}_{gt_start}_{gt_end}_{query_tokens}.jpg'))\r\n\r\n def run_evaluate(self, iou_topk_dict:dict, do_merge=False, update_score=False, score_weight=1.0, temporal_nms=False, viz_nms=True, do_viz=\"\"):\r\n assert isinstance(iou_topk_dict, dict)\r\n self.viz_nms = viz_nms\r\n ious = iou_topk_dict['iou']\r\n topks = iou_topk_dict['topk']\r\n # print('Post processing raw results...')\r\n if do_merge:\r\n self._postprocess_raw_results(update_score=update_score, score_weight=score_weight, save_processed_res=do_viz)\r\n else:\r\n self._postprocess_raw_results_no_merge(save_processed_res=do_viz)\r\n # print('Done')\r\n accuracy_topks = []\r\n for iou_thresh in ious:\r\n for topk in topks:\r\n # print(f'Setting: topk: {topk}, iou: {iou_thresh}, update_score: {update_score}, NMS: {temporal_nms}')\r\n correct_num_topk, total_num_topk, accuracy_topk = self.compute_IoU_recall_top_n_ours(topk, iou_thresh, temporal_nms)\r\n # print(f\"IoU thresh: {iou_thresh}, NMS: {temporal_nms}, \\nR@{topk}: {accuracy_topk}\\ncorrect: {correct_num_topk},\"\r\n # f\" total_num: {total_num_topk}\")\r\n # print(f\"\\nR@{topk}: {accuracy_topk}\\n\")\r\n accuracy_topks.append(accuracy_topk)\r\n if do_viz:\r\n print(f'Creating save folder {do_viz}')\r\n os.makedirs(do_viz, exist_ok=True)\r\n print(f'Conducting visualization...')\r\n self.visualize(do_viz)\r\n print('Done')\r\n return topks, accuracy_topks\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # raw_results_path = \"/home/xuhaoming/Projects/CVPR2020/results/Evaluate/Raw_results/raw_results_2019-07-27-00-33.json\"\r\n# # raw_data = json.load(open(raw_results_path, 'r'))\r\n# # viz_images_folder = \"/home/xuhaoming/Projects/CVPR2020/utils/viz_images/test/\"\r\n# # process_runner = PostProcessRunner(raw_data)\r\n# # iou_topk_dict = {\"iou\": [0.5], 'topk': [1, 5]}\r\n# # process_runner.run_evaluate(do_merge=False, iou_topk_dict=iou_topk_dict)\r\n results_dict = json.load(open(\"/home/xuhaoming/Projects/cvpr_baseline/\"\r\n \"projects/SEED666_baseline6_6_5.6_11_prop32/raw_results.json\", 'r'))\r\n iou_topk_dict = {\"iou\": [0.5], 'topk': [1, 5]}\r\n postprocess_runner = PostProcessRunner(results_dict)\r\n topks, accuracy_topks = postprocess_runner.run_evaluate(iou_topk_dict=iou_topk_dict, temporal_nms=False,\r\n viz_nms=False,\r\n do_viz=os.path.join(os.getcwd(),\r\n f'{os.path.basename(os.getcwd())}_viz_images'))\r\n\r\n","sub_path":"utils/evaluate_utils_raw.py","file_name":"evaluate_utils_raw.py","file_ext":"py","file_size_in_byte":19019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558770245","text":"\"\"\"Add hash sizes\n\nRevision ID: bd31b068164e\nRevises: cd7c4df10683\nCreate Date: 2018-01-26 15:40:18.590393\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bd31b068164e'\ndown_revision = 'cd7c4df10683'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('s3_blob', sa.Column('size', sa.BigInteger(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('s3_blob', 'size')\n # ### end Alembic commands ###\n","sub_path":"registry/migrations/versions/bd31b068164e_add_hash_sizes.py","file_name":"bd31b068164e_add_hash_sizes.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596433000","text":"import numpy as np\nimport math\nimport cv2 as cv\nfrom PIL import Image\n\n#################################### INNER FUNCTIONS ########################################\n'''\nImplementation of the code proposed in: 'Digital Mammographic Computer Aided Diagnosis (CAD)\nusing Adaptive Level Set Segmentation' from John E.Ball and Lori Mann Bruce\n'''\ndef __normalize(img):\n tmp = img-np.amin(img)\n image = tmp/np.amax(img)\n return image\n\ndef __regional_mean(img, k_size):\n blurred_img = cv.blur(img,(k_size[0],k_size[1]))\n return cv.resize(blurred_img, (img.shape[0], img.shape[1]), interpolation=cv.INTER_LINEAR)\n\ndef __enhancing_structures(src_gray):\n zeroToOne_img = __normalize(src_gray)\n\n clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n img_clahe = clahe.apply(src_gray)\n\n mu = np.average(src_gray)\n tmp = __normalize(img_clahe)\n img3 = np.zeros((tmp.shape[0], tmp.shape[1]))\n for i in range(img_clahe.shape[0]):\n for j in range(img_clahe.shape[1]):\n img3[i, j] = tmp[i, j] * (1 - math.exp(-(zeroToOne_img[i, j] / mu)))\n img3 = __normalize(img3)\n blur_img = __regional_mean(img3, [16, 16])\n img5 = __normalize(img3 + __regional_mean(blur_img, [16, 16]))\n\n return img5\n##########################################################################################\n\ndef preprocessing(predicted_mass):\n '''\n The function preprocesses all the mammogram images predicted as masses by the SVM classifier\n by using the Adaptive Level Set Segmentation (ALSS). The aim is to enhance internal structures.\n :param predicted_mass: the list of mammogram images predicted as masses.\n :return: the list of enhanced mammogram images.\n '''\n print(\"--------- [STATUS] Preprocessing images ---------\")\n i = 1\n enhanced_mass = []\n for mass in predicted_mass:\n print(\"Processing image n.\" + str(i) + \" ...\")\n mass = cv.cvtColor(mass, cv.COLOR_BGR2GRAY)\n prep_img = __enhancing_structures(mass)\n enhanced_mass.append(prep_img)\n i +=1\n print(\"-------------------- [NOTIFY] Image preprecessed ---------------------\")\n return enhanced_mass\n\n##########################################################################################\ndef build_true_path(path):\n path = path.split(\"_\")\n path.pop(0)\n true_path = \"\"\n for p in path:\n true_path += p + \"_\"\n\n return true_path[:-4] + \"mask.png\"\n##########################################################################################\n\ndef cropping(mask_path, mass_images, path_predicted_mass):\n '''\n Since our CNN (U-Net) works with cropped images, in order to remove background and avoid\n to learn useless details, the function crops all the predicted images in order to give them\n to the U-Net.\n :param mask_path: the list of masks from which the bounding boxes have been extracted.\n :param mass_images: the list of mammogram images to crop.\n :param path_predicted_mass: the list of paths of the mass_images list.\n :return: the list of cropped images.\n '''\n print(\"-------------------- [STATUS] Cropping images for U-Net --------------\")\n cropped_images = []\n i = 0\n for p in path_predicted_mass:\n # Find rectangles\n path = build_true_path(p)\n mask = cv.imread(mask_path + \"\\\\\" + path, cv.IMREAD_GRAYSCALE)\n retval, labels = cv.connectedComponents(mask, ltype=cv.CV_16U)\n labels = np.asarray(labels, np.uint8)\n contours, hierarchy = cv.findContours(labels, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)\n x, y, width, height = cv.boundingRect(contours[0])\n\n # Cropping images\n print(\"Cropping image n.\", i+1)\n image = Image.fromarray(mass_images[i])\n img_cropped = image.crop(box=(x, y, x+width, y+height))\n cropped_images.append(img_cropped)\n\n # Saving images\n print(\"Saving image n.\", i+1)\n print(\"------------------------------\")\n img_cropped.save(\"dataset\\\\unet_input\\\\\" + path_predicted_mass[i])\n i += 1\n\n print(\"-------------------- [NOTIFY] All images have been cropped -----------\")\n return cropped_images\n","sub_path":"utils/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320722452","text":"import csv\nimport json\nimport pymysql\nimport sys, getopt\nfrom pymysql.cursors import SSDictCursor, DictCursor\n\ndef binaryFieldsToUTF8(row):\n for l in row:\n if isinstance(row[l], bytes):\n row[l] = row[l].decode(\"utf-8\")\n return row\n\ndef writeToCSV(conn, sql, path, amount=None, msg=None):\n cursor = conn.cursor(cursor=SSDictCursor)\n cursor.execute(sql)\n header = [ele[0] for ele in cursor.description]\n file = open(path, \"w\", newline='', encoding=\"UTF-8\")\n writer = csv.DictWriter(file, fieldnames=header, dialect=csv.unix_dialect)\n writer.writeheader()\n\n count = 0\n while True:\n row = cursor.fetchone()\n if row:\n binaryFieldsToUTF8(row)\n writer.writerow(row)\n count += 1\n if amount:\n updateProgress(count, amount, msg)\n else:\n break\n if amount:\n print()\n cursor.close()\n file.close()\n return count\n\ndef writeSubcategoriesAndGetCategories(conn, path, maxDepth, categorySet, categoryBlackset):\n visitedCategories = set(categorySet)\n if (maxDepth < 1):\n return visitedCategories\n \n newCategories = set(categorySet)\n depth = 0\n\n file = open(path, \"w\", newline='', encoding=\"utf-8\")\n writer = csv.DictWriter(file, fieldnames=[\"category\", \"subcategory\"], dialect=csv.unix_dialect)\n writer.writeheader()\n \n sql = \"\"\"SELECT cl.cl_to AS \"category\", p.page_title AS \"subcategory\" FROM page AS p\n INNER JOIN categorylinks AS cl ON p.page_id = cl.cl_from\n WHERE cl_type = \"subcat\"\n AND NOT p.page_id IN (SELECT pp_page FROM page_props WHERE pp_propname = \"hiddencat\")\n AND cl.cl_to IN (%s);\"\"\"\n with conn.cursor(cursor=SSDictCursor) as cursor:\n updateProgress(0, maxDepth, \"depths analized.\")\n while depth < maxDepth and newCategories:\n cursor.execute(sql.replace(\"%s\", '\", \"'.join(newCategories).join(['\"', '\"'])))\n newCategories = set()\n while True:\n row = cursor.fetchone()\n if row:\n row = binaryFieldsToUTF8(row)\n if row[\"subcategory\"] in categoryBlackset:\n continue\n writer.writerow(row)\n if row[\"subcategory\"] not in visitedCategories:\n visitedCategories.add(row[\"subcategory\"])\n newCategories.add(row[\"subcategory\"])\n else:\n break\n depth += 1\n updateProgress(depth, maxDepth, \"depths analized.\")\n print()\n\n file.close()\n return visitedCategories\n\n\ndef writePageCategoryMappingAndGetPageids(conn, path, uniqueCategories):\n file = open(path, \"w\", newline='', encoding=\"UTF-8\")\n writer = csv.DictWriter(file, fieldnames=[\"category\", \"page_title\"], dialect=csv.unix_dialect, extrasaction=\"ignore\")\n writer.writeheader()\n\n pageIds = set()\n\n sql = \"\"\"SELECT cl.cl_to AS \"category\", p.page_id, p.page_title FROM categorylinks AS cl\n INNER JOIN page AS p ON p.page_id = cl.cl_from\n WHERE p.page_namespace = 0 AND p.page_is_redirect = 0 AND cl.cl_type = \"page\" AND cl.cl_to IN (%s);\n \"\"\".replace(\"%s\", '\", \"'.join(uniqueCategories).join(['\"', '\"']))\n with conn.cursor(cursor=SSDictCursor) as cursor:\n cursor.execute(sql)\n while True:\n row = cursor.fetchone()\n if row:\n row = binaryFieldsToUTF8(row)\n writer.writerow(row)\n pageIds.add(str(row[\"page_id\"]))\n else:\n break\n\n file.close()\n return pageIds\n\ndef updateProgress(current, max, msg):\n print(\"\\r(%i/%i) %.2f%% %s\" % (current, max, current/max*100, msg), end=\"\")\n\nif __name__ == \"__main__\":\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"c:\", [\"connection=\"])\n if len(args) < 5:\n raise getopt.GetoptError(\"Missing args.\")\n except getopt.GetoptError:\n print(sys.argv[0] + \" [-c CONNECTION-FILE | --connection=CONNECTION-FILE] CATEGORY-FILE CATEGORY-PAGE-MAPPING-FILE PAGE-FILE MAXDEPTH CATEGORY...\")\n sys.exit()\n\n if opts:\n print(\"file found\")\n with open(opts[0][1], \"r\", encoding=\"UTF-8\") as f:\n connectionDetails = json.load(f)\n else:\n connectionDetails = {\"host\":\"localhost\", \"port\":3306, \"database\":\"enwiki\", \"username\":\"root\", \"password\":None}\n\n conn = pymysql.Connect(host=connectionDetails[\"host\"], port=connectionDetails[\"port\"], db=connectionDetails[\"database\"], user=connectionDetails[\"username\"], passwd=connectionDetails[\"password\"])\n categorySet = set(filter(lambda x: not x.startswith(\"-\"), args[4:]))\n categoryBlackset = set(map(lambda x: x[1:], filter(lambda x: x.startswith(\"-\"), args[4:])))\n\n print(\"Searching for subcategories...\")\n uniqueCategories = writeSubcategoriesAndGetCategories(conn, args[0], int(args[3]), categorySet, categoryBlackset)\n del categorySet\n del categoryBlackset\n print(\"%i unique categories found.\" % len(uniqueCategories))\n print(\"Searching for pages in selected categories...\")\n\n pageIds = writePageCategoryMappingAndGetPageids(conn, args[1], uniqueCategories)\n del uniqueCategories\n amount = len(pageIds)\n print(\"%i pages found.\" % amount)\n updateProgress(0, amount, \"pages written.\")\n\n sql=\"\"\"SELECT p.page_title, t.old_text AS \"text\" FROM page AS p\n INNER JOIN revision AS r ON r.rev_page = p.page_id\n INNER JOIN text AS t ON t.old_id = r.rev_text_id\n WHERE p.page_id IN (%s);\n \"\"\".replace(\"%s\", \",\".join(pageIds))\n \n count = writeToCSV(conn, sql, args[2], amount=amount, msg=\"pages written.\")\n if count != amount:\n print(\"Missmatch between unique pages (%i) and written pages (%i).\" % (amount, count))\n conn.close()\n","sub_path":"export/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219053417","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n head, p1, p2 = ListNode(0), l1, l2\n tail = head\n carry = 0\n while p1 and p2:\n num = p1.val + p2.val + carry\n if num > 9:\n num = num - 10\n carry = 1\n else:\n carry = 0\n\n tail.next = ListNode(num)\n tail = tail.next\n p1 = p1.next\n p2 = p2.next\n\n if p2:\n p1 = p2\n while p1:\n num = p1.val + carry\n if num > 9:\n num = num - 10\n carry = 1\n else:\n carry = 0\n tail.next = ListNode(num)\n tail = tail.next\n p1 = p1.next\n\n if carry:\n tail.next = ListNode(carry)\n tail = tail.next\n tail.next = None\n return head.next\n\nif __name__ == \"__main__\":\n l1 = ListNode(2)\n l1.next = ListNode(4)\n l1.next.next = ListNode(3)\n\n l2 = ListNode(5)\n l2.next = ListNode(6)\n l2.next.next = ListNode(4)\n\n\n\n","sub_path":"leecode/algorithm/addtwosum.py","file_name":"addtwosum.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529760223","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Article, Person\n\ndef index(request):\n # article = Article()\n # article.save()\n # article = Article.objects.get(pk=2)\n # article.title = '背影'\n # article.removed = False\n # article.save()\n from datetime import datetime\n article = Article.objects.get(pk=2)\n create_time = article.create_time\n print(create_time)\n print('==='*30)\n # print(localtime(create_time))\n return render(request, 'index.html', context={'create_time': create_time})\n return HttpResponse('Success')\n\ndef email_view(request):\n p = Person(email='qq.com')\n p.save()\n return HttpResponse('Success')","sub_path":"django-tutorial/chapter04/orm_field_demo/article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"581698403","text":"import shutil\n\n#define the filename to use as output\nmotd=\"/etc/motd\"\n# motd=\"motd.test\"\n\n# The different colours as variables\nW=\"\\033[01;37m\" # WHITE\nB=\"\\033[01;34m\" # BLUE\nG=\"\\033[01;32m\" # GREEN\nR=\"\\033[01;31m\" # RED\nRESET=\"\\033[0m\"\n\ntib = 2 ** 40 # tib == tibibyte\ngb = 10 ** 9 # GB == gigabyte\n\n# get disk space\ntotals, useds, frees = shutil.disk_usage(\"/nfs/scratch\")\ntotals2, useds2, frees2 = shutil.disk_usage(\"/nfs/scratch2\")\n\n#Write file\nwith open(motd,'w') as out:\n out.writelines([\n \" \\n\", \n G ,\n \"______ _ \\n\",\n \"| ___ \\ (_) \\n\",\n \"| |_/ /__ _ __ _ _ __ ___ _ \\n\",\n \"| // _` |/ _` | '_ \\ / _ \\| | \\n\",\n \"| |\\ \\ (_| | (_| | |_) | (_) | | \\n\",\n \"\\_| \\_\\__,_|\\__,_| .__/ \\___/|_| \\n\",\n \" | | \\n\",\n \" |_| \\n\",\n RESET,\n \" * Documentation: https://vuw-research-computing.github.io/raapoi-docs/ \\n\",\n \" * Slack: https://uwrc.slack.com/ \\n\",\n \" * Cluster Reports: http://raapoi.vuw.ac.nz/ \\n\",\n \" \\n\",\n \" * Scratch : \"+R+\"{:3.0f}%\".format(useds*100/totals)+W+ \" full with \"+R+\"{:6.2f} TiB\".format(frees/tib)+W +\" remaining\\n\",\n \" * Scratch2: \"+R+\"{:3.0f}%\".format(useds2*100/totals2)+W+ \" full with \"+R+\"{:6.2f} TiB\".format(frees2/tib)+W +\" remaining\",\n RESET +\"\\n\\n\\n\"\n ])","sub_path":"motd-scripts/motd.py","file_name":"motd.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"336339652","text":"import threading\nimport time\nimport random\ncounter = threading.local()\n\n\nclass TestThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n # TestThread.counter = threading.local()\n counter.count = 0\n\n for i in range(100):\n time.sleep(random.randint(1, 5))\n counter.count = random.randint(1, 1000)\n print(\"Thread %d counter: %d\\n\" % (self.ident, counter.count))\n\n\nif __name__ == \"__main__\":\n for i in range(3):\n th = TestThread()\n th.start()\n","sub_path":"test1/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"337912538","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Michael Liao'\n\n'''\nBuild release package.\n'''\n\nfrom datetime import datetime\nfrom fabric.api import *\n\nenv.user = 'ubuntu'\nenv.hosts = ['aws.itranswarp.com']\n\n_TAR_FILE = 'brightercv.tar.gz'\n_REMOTE_TMP_TAR = '/tmp/%s' % _TAR_FILE\n\n_REMOTE_DIST_LINK = '/srv/brightercv.com/www'\n_REMOTE_DIST_DIR = '/srv/brightercv.com/www-%s' % datetime.now().strftime('%y-%m-%d_%H.%M.%S')\n\ndef build(*files):\n includes = ['static', 'templates', 'transwarp', 'api.py', 'auth.py', 'conf_prod.py', 'favicon.ico', 'loader.py', 'memcache.py', 'resume.py', 'wsgi.py', 'wsgiapp.py']\n includes.extend(files)\n excludes = ['.*', '*.pyc', '*.pyo', '*.psd', 'static/css/less/*']\n local('rm -f %s' % _TAR_FILE)\n cmd = ['tar', '--dereference', '-czvf', _TAR_FILE]\n cmd.extend(['--exclude=\\'%s\\'' % ex for ex in excludes])\n cmd.extend(includes)\n local(' '.join(cmd))\n\ndef scp():\n local('ssh-add /Users/michael/.ssh/michaelonamazon.pem')\n run('rm -f %s' % _REMOTE_TMP_TAR)\n put(_TAR_FILE, _REMOTE_TMP_TAR)\n run('sudo mkdir %s' % _REMOTE_DIST_DIR)\n with cd(_REMOTE_DIST_DIR):\n run('sudo tar -xzvf %s' % _REMOTE_TMP_TAR)\n run('sudo chown -R www-data:www-data %s' % _REMOTE_DIST_DIR)\n run('sudo rm -f %s' % _REMOTE_DIST_LINK)\n run('sudo ln -s %s %s' % (_REMOTE_DIST_DIR, _REMOTE_DIST_LINK))\n run('sudo chown www-data:www-data %s' % _REMOTE_DIST_LINK)\n with settings(warn_only=True):\n run('sudo supervisorctl stop brightercv')\n run('sudo supervisorctl start brightercv')\n\ndef build_gunicorn():\n build()\n scp()\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"361625077","text":"# Найти максимальный элемент среди минимальных элементов столбцов матрицы.\n\nimport random\n\nn = int(input('Количество строк матрицы: '))\nm = int(input('Количество столбцов матрицы: '))\na = []\n\nfor i in range(n):\n b = []\n for j in range(m):\n b.append(random.randint(0, 100))\n a.append(b)\n\nfor i in range(n):\n for j in range(m):\n print(a[i][j], end=' ')\n print()\n\nmins = []\n\nfor j in range(m):\n min = max(a[0])\n for i in range(n):\n if a[i][j] < min:\n min = a[i][j]\n mins.append(min)\n\nprint(mins)\nprint(f'Максимальный элемент среди минимальных элементов столбцов: {max(mins)}')","sub_path":"dz3_task9.py","file_name":"dz3_task9.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577731856","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport json\nfrom combatant import load_combatant\nfrom schema import deserialize_state\n\nALLOWED_ACTIONS = {\n \"PANTHER\": [\n \"1\", # Up\n \"2\", # Up right\n \"3\", # Down right\n \"4\", # Down\n \"5\", # Down left\n \"6\", # Up left\n \"end\",\n ],\n \"PELICAN\": [\n \"1\", # Up\n \"2\", # Up right\n \"3\", # Down right\n \"4\", # Down\n \"5\", # Down left\n \"6\", # Up left\n \"drop_buoy\",\n \"drop_torpedo\",\n \"end\",\n ],\n}\n\nAGENT_NAME = \"\"\n\nTEST_PATH = os.path.join(\"/plark_ai_public\", \"Combatant\", \"tests\", \"states\")\n\nagent_type = sys.argv[1]\n\nbasic_agents_path = os.path.join(\n \"/plark_ai_public\",\n \"Components\",\n \"plark-game\",\n \"plark_game\",\n \"agents\",\n \"basic\",\n)\n\nagent_path = os.path.join(\n \"/plark_ai_public\", \"data\", \"agents\", \"models\", \"latest\"\n)\n\n\nif agent_type == \"PELICAN\":\n subdirs = os.listdir(os.path.join(agent_path, \"pelican\"))\n for subdir in subdirs:\n agent_path = os.path.join(agent_path, \"pelican\", subdir)\n break\n\n state = deserialize_state(\n json.load(open(os.path.join(TEST_PATH, \"state_10x10_pelican.json\")))\n )\nelif agent_type == \"PANTHER\":\n\n subdirs = os.listdir(os.path.join(agent_path, \"panther\"))\n for subdir in subdirs:\n agent_path = os.path.join(agent_path, \"panther\", subdir)\n break\n\n state = deserialize_state(\n json.load(open(os.path.join(TEST_PATH, \"state_10x10_panther.json\")))\n )\nelse:\n raise RuntimeError(\"Unknown agent_type - must be 'PELICAN' or 'PANTHER'\")\n\nprint(\"agent_path: \", agent_path)\n\nagent = load_combatant(agent_path, AGENT_NAME, basic_agents_path)\n\naction = agent.getAction(state)\n\nif action not in ALLOWED_ACTIONS[agent_type]:\n raise RuntimeError(\"NO!\")\n","sub_path":"Combatant/tests/test_combatant.py","file_name":"test_combatant.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"30339025","text":"import pandas as pd\n\ndff = pd.read_csv(\"data/maindata.csv\")\ndata = input(\"Enter Location:\")\ndata1 = input(\"Enter Soil:\")\ndata2 = int(input(\"Enter Area:\"))\n\ndf1 = dff[dff['Location'].str.contains(data)]\ndf2 = df1[df1['Soil'].str.contains(data1)]\n# print(\"df2:\",df2)\n\narea = (df2['Area'])\nyeilds = (df2['yeilds'])\nprice = (df2['price'])\n\nres2 = price / yeilds\nprint(\"res2\" ,res2)\n\narea_input = data2\nres3 = res2 * area_input\nprint(\"res3:\" ,res3)\n\nres = yeilds / area\n# print(res)\n\nres4 = res * area_input\nprint(\"res4:\" ,res4)\n\ndf2.insert(11, \"calculation\", res3)\ndf2.to_csv('data/file.csv', index=False)\n\ndf2.insert(12, \"res4\", res4)\ndf2.to_csv('data/file.csv', index=False)\n\ndata = pd.read_csv(\"data/file.csv\", usecols=range(13))\nType_new = pd.Series([])\n\nfor i in range(len(data)):\n if data[\"Crops\"][i] == \"Coconut\":\n Type_new[i] = \"Coconut\"\n\n elif data[\"Crops\"][i] == \"Cocoa\":\n Type_new[i] = \"Cocoa\"\n\n elif data[\"Crops\"][i] == \"Coffee\":\n Type_new[i] = \"Coffee\"\n\n elif data[\"Crops\"][i] == \"Cardamum\":\n Type_new[i] = \"Cardamum\"\n\n elif data[\"Crops\"][i] == \"Pepper\":\n Type_new[i] = \"Pepper\"\n\n elif data[\"Crops\"][i] == \"Arecanut\":\n Type_new[i] = \"Arecanut\"\n\n elif data[\"Crops\"][i] == \"Ginger\":\n Type_new[i] = \"Ginger\"\n\n elif data[\"Crops\"][i] == \"Tea\":\n Type_new[i] = \"Tea\"\n\n else:\n Type_new[i] = data[\"Crops\"][i]\n\ndata.insert(13, \"Crop val\", Type_new)\ndata.drop([\"Year\", \"Location\", \"Soil\", \"Irrigation\", \"Crops\", \"yeilds\", \"calculation\", \"price\"], axis=1,\n inplace=True)\ndata.to_csv(\"data/train.csv\", header=False, index=False)\ndata.head()\n\navg1 = data['Rainfall'].mean()\nprint('Rainfall avg:', avg1)\navg2 = data['Temperature'].mean()\nprint('Temperature avg:', avg2)\navg3 = data['Humidity'].mean()\nprint('Humidity:', avg3)\n\ntestdata = {'Area': area_input,\n 'Rainfall': avg1,\n 'Temperature': avg2,\n 'Humidity': avg3}\n\ndf7 = pd.DataFrame([testdata])\ndf7.to_csv('data/test.csv',mode=\"a\", header=False, index=False)\n\n\nimport csv\nimport math\nimport operator\n\n\ndef euclideanDistance(instance1, instance2, length):\n distance = 0\n for x in range(length):\n distance += (pow((float(instance1[x]) - float(instance2[x])), 2))\n return math.sqrt(distance)\n\n\ndef getNeighbors(trainingSet, testInstance, k):\n distances = []\n length = len(testInstance) - 1\n\n for x in range(len(trainingSet)):\n dist = euclideanDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n distances.sort(key=operator.itemgetter(1))\n neighbors = []\n for x in range(k):\n neighbors.append(distances[x][0])\n return neighbors\n\n\ndef getResponse(neighbors):\n classVotes = {}\n for x in range(len(neighbors)):\n response = neighbors[x][-1]\n if response in classVotes:\n classVotes[response] += 1\n else:\n classVotes[response] = 1\n sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)\n return sortedVotes[0][0]\n\ndef getAccuracy(testSet, predictions):\n correct = 0\n for x in range(len(testSet)):\n if testSet[x][-1] == predictions[x]:\n correct += 1\n return (correct / float(len(testSet))) * 100.0\n\n\ntrainingSet = []\ntestSet = []\nwith open('data/train.csv', 'r') as csvfile:\n lines = csv.reader(csvfile)\n dataset = list(lines)\n # print(dataset)\n\n\n\n for x in range(len(dataset) - 1):\n for y in range(5):\n dataset[x][y] = float(dataset[x][y])\n trainingSet.append(dataset[x])\n\nwith open('data/test.csv', 'r') as csvfile1:\n lines1 = csv.reader(csvfile1)\n # print(lines1)\n dataset1 = list(lines1)\n # print(dataset1)\n\n for p in range(len(dataset1)):\n for q in range(5):\n dataset[p][q] = float(dataset[p][q])\n testSet.append(dataset1[p])\n\nprint(\"trainingset:\", trainingSet)\nprint(\"testingset:\", testSet)\n# print(\"1:\",len(trainingSet))\n# print(\"2:\",len(testSet))\nk = 1\npredictions = []\nfor x in range(len(testSet)):\n neighbors = getNeighbors(trainingSet, testSet[x], k)\n response = getResponse(neighbors)\n print(\"\\nNeighbors:\", neighbors)\n print('\\nResponse:', response)\n\n predictions.append(response)\n\naccuracy = getAccuracy(testSet, predictions)\nprint('Accuracy: ' + repr(accuracy) + '%')\n\n\nimport matplotlib.pyplot as plt\n\nx = [0, 1, 2]\ny = [accuracy, 0, 0]\nplt.title('Accuracy')\nplt.bar(x, y)\nplt.show()","sub_path":"maya_main/knnagriculture/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"106514428","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport wave\nfrom get import get\ndef naive_encode(num:int):\n if num > 127:\n return 127\n elif num < -128:\n return -128\n else:\n return num\ndef factor_encode(num:int,a:float=5,bits=4):\n x = 2 ** (bits-1)\n if num > (x-1)*a:\n return (x-1)\n elif num < -x * a:\n return -x\n else:\n return num//a + 1\ndef naive_DPCM(wave_data:np.ndarray):\n x = wave_data\n x_hat = np.zeros(wave_data.shape)\n d = np.zeros(wave_data.shape)\n d[0] = x[0]\n x_hat[0] = wave_data[0]\n c = np.zeros(wave_data.shape)\n c[0] = naive_encode(d[0])\n for i in range(1,wave_data.shape[0]):\n d[i] = x[i] - x_hat[i-1]\n c[i] = naive_encode(d[i])\n x_hat[i] = x_hat[i-1] + c[i]\n return c\ndef factor_DPCM(wave_data:np.ndarray,a:float,bits:int=4):\n x = wave_data\n x_hat = np.zeros(wave_data.shape)\n d = np.zeros(wave_data.shape)\n d[0] = x[0]\n x_hat[0] = wave_data[0]\n c = np.zeros(wave_data.shape)\n c[0] = factor_encode(d[0],a,bits)\n for i in range(1, wave_data.shape[0]):\n d[i] = x[i] - x_hat[i - 1]\n c[i] = factor_encode(d[i],a,bits)\n x_hat[i] = x_hat[i - 1] + (c[i]-1)*a\n return c\ndef naive_decode_DPCM(c:np.ndarray):\n x_hat = np.zeros(c.shape)\n x_hat[0] = c[0]\n for i in range(1,c.shape[0]):\n x_hat[i] = x_hat[i-1] + c[i]\n return x_hat\n\ndef factor_decode_DPCM(c:np.ndarray,a:int,bits:int=4):\n x = 2 ** (bits - 1)\n x_hat = np.zeros(c.shape)\n x_hat[0] = c[0]\n for i in range(1, c.shape[0]):\n x_hat[i] = x_hat[i - 1] + (c[i]-1)*a\n return x_hat\ndef save_data(filename:str,code:np.ndarray,bits:int):\n mask ='0b'+ bits * '1'\n mask = int(mask,2)\n l = []\n for item in code:\n item = int(item)\n item = item & mask\n tmp = item\n for i in range(0,bits):\n l.append(tmp & 0b1)\n tmp = tmp >> 1\n while len(l) % 8 != 0:\n l.append(0)\n with open(filename,'wb') as f:\n for i in range(0,len(l)//8):\n x = '0b'\n for j in range(0,8):\n x = x + str((l[i*8+j]))\n x = int(x,2)\n x = [x]\n x = bytes(x)\n f.write(x)\n #print(l)\ndef load_data(filename,bits):\n l = []\n with open(filename,'rb') as f:\n while True:\n s = f.read(1)\n if not s:\n break\n #print(s)\n s = int.from_bytes(s, byteorder='little', signed=True)\n for i in range(0,8):\n l.append((s>>7)&1)\n s = s << 1\n l = np.array(l)\n l = l.reshape((-1,bits))\n ans = []\n def calculate(item):\n ans = 0\n for i in range(0,len(item)-1):\n ans = ans + item[i] * (2 ** i)\n ans = ans - item[len(item)-1] *(2 ** (len(item)-1))\n return ans\n for item in l:\n a = calculate(item)\n ans.append(a)\n return np.array(ans)\n\ndef SNR(orig:np.ndarray,new:np.ndarray):\n size = len(orig)\n up = 0\n down = 0\n for i in range(0,size):\n up += orig[i] ** 2\n down += (orig[i] - new[i]) ** 2\n if down == 0:\n return np.inf\n return 10 * np.log10(up/down)\ndef wav_to_pcm(filename,pcmfilename):\n f = open(filename,'rb')\n f.seek(0)\n f.read(44)\n data = np.fromfile(f,dtype=np.int16)\n data.tofile(pcmfilename)\ndef writeaspcm(filename:str,wave_data:np.ndarray):\n \"\"\"\n filename should be a string without suffix\n :param filename:\n :param wave_data:\n :return:\n \"\"\"\n with wave.open(filename+'.wav', 'wb') as f:\n data_to_write = wave_data.flatten()\n data_to_write = np.array(data_to_write, dtype=np.dtype(np.int16))\n f.setnchannels(1)\n f.setsampwidth(2)\n f.setframerate(16000)\n f.setcomptype('NONE', 'Uncompressed')\n f.writeframes(data_to_write)\n wav_to_pcm(filename+'.wav',filename+'.pcm')\ndef implement1():\n wave_data = get(1,False)\n y = naive_DPCM(wave_data)\n save_data('1_8bit.dpc',y,8)\n data = load_data('1_8bit.dpc',8)\n z = naive_decode_DPCM(data)\n print('SNR: '+str(SNR(wave_data,z)))\n writeaspcm('1_8bit',z)\ndef implement2():\n wave_data = get(1,False)\n y = naive_DPCM(wave_data)\n save_data('1_4bit.dpc',y,4)\n data = load_data('1_4bit.dpc',4)\n z = naive_decode_DPCM(data)\n print('SNR: '+str(SNR(wave_data,z)))\n writeaspcm('1_4bit',z)\ndef findarguments():\n wave_data = get(1, False)\n wave_data = np.array(wave_data,dtype=np.int16)\n SNRmax = 0\n amax = 1\n for a in range(570,600):\n y = factor_DPCM(wave_data,a)\n z = factor_decode_DPCM(y,a)\n snr = SNR(wave_data,z)\n if snr > SNRmax:\n SNRmax = snr\n amax = a\n print(\"The best a :\" + str(amax))\n y = factor_DPCM(wave_data,amax,4)\n save_data('1_4bit.dpc', y, 4)\n data = load_data('1_4bit.dpc', 4)\n z = factor_decode_DPCM(data,amax,4)\n print('SNR: ' + str(SNR(wave_data, z)))\n writeaspcm('1_4bit', z)\nfindarguments()","sub_path":"audiolab2/lab2/DPCM.py","file_name":"DPCM.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573298407","text":"#-*- coding: utf-8 -*-\n\n# Copyright 2008-2010 Calculate Ltd. http://www.calculate-linux.org\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport os\nimport types\nimport pwd, grp\nimport cl_overriding\nimport socket\n\nfrom math import sqrt\nfrom cl_vars_share import varsShare, clLocale\nfrom os.path import exists as pathexists\nfrom os import path,readlink\nfrom cl_utils import isMount, genpassword, \\\n getAvailableVideo, process, \\\n listDirectory,isPkgInstalled,lspci, readLinesFile, \\\n getUdevDeviceInfo,getPkgInstalled\nfrom utils import ip\nfrom encrypt import getHash\n\nclass fillVars(varsShare):\n \"\"\"Auxilary object for creating variables\n \n Contains filling methods\"\"\"\n addDn = lambda x,*y: \",\".join(y)\n genDn = lambda x,*y: \"=\".join(y)\n\n def get_cl_env_path(self):\n \"\"\"Path to env files\"\"\"\n envData = self.Get(\"cl_env_data\")\n if envData:\n return map(lambda x: x[1], envData)\n else:\n cl_overriding.printERROR(_(\"Error:\") + \" \" +\\\n _(\"Template variable cl_env_data is empty\"))\n cl_overriding.exit(1)\n\n def get_cl_env_location(self):\n \"\"\"Aliases to env files\"\"\"\n envData = self.Get(\"cl_env_data\")\n if envData:\n return map(lambda x: x[0], envData)\n else:\n cl_overriding.printERROR(_(\"Error:\") + \" \" +\\\n _(\"Template variable cl_env_data is empty\"))\n cl_overriding.exit(1)\n\n def get_cl_env_server_path(self):\n \"\"\"Paths to clt-template files\"\"\"\n return '/var/calculate/remote/server.env'\n\n def get_cl_template_path(self):\n \"\"\"Paths to template files\"\"\"\n return [\"/var/lib/layman/calculate/profiles/templates\",\n \"/var/calculate/templates\",\n \"/var/calculate/remote/templates\"]\n\n def get_cl_template_clt_path(self):\n \"\"\"Path to 'clt' files\"\"\"\n if \"CONFIG_PROTECT\" in os.environ:\n protectPaths = [\"/etc\"] + filter(lambda x: x.strip(),\n os.environ[\"CONFIG_PROTECT\"].split(\" \"))\n else:\n protectPaths = [\"/etc\", \"/usr/share/X11/xkb\", \"var/lib/hsqldb\",\n \"/usr/share/config\"]\n return filter(path.exists, protectPaths)\n\n def get_os_net_domain(self):\n \"\"\"Get net domain name\"\"\"\n if path.exists('/proc/self/fd/1') and \\\n readlink('/proc/self/fd/1') == '/dev/console' and \\\n self.Get('os_root_dev') == '/dev/nfs':\n return \"local\"\n textLines = self._runos(\"hostname -d 2>&1\")\n if textLines is False:\n cl_overriding.printERROR(_(\"Error executing 'hostname -d'\"))\n return cl_overriding.exit(1)\n domain = \"\"\n if textLines:\n domain = textLines[0]\n if not domain:\n cl_overriding.printERROR(_(\"Error:\") + \" \" +\\\n _(\"Domain name not found\"))\n cl_overriding.printERROR(\\\n _(\"Command 'hostname -d' returns an empty value\"))\n return cl_overriding.exit(1)\n elif re.search(\"^hostname: \",domain):\n return \"local\"\n else:\n return domain\n\n def get_os_linux_shortname(self):\n \"\"\"Get short system name\"\"\"\n systemRoot = \"/\"\n return self.getShortnameByMakeprofile(systemRoot) or \\\n self.getShortnameByIni(systemRoot) or \\\n self.detectOtherShortname(systemRoot) or \\\n \"Linux\"\n\n def get_os_linux_name(self):\n \"\"\"Get full system name\"\"\"\n linuxShortName = self.Get(\"os_linux_shortname\")\n return self.dictLinuxName.get(linuxShortName,\"Linux\")\n\n def get_os_linux_subname(self):\n \"\"\"Get posfix name of system\"\"\"\n linuxShortName = self.Get(\"os_linux_shortname\")\n return self.dictLinuxSubName.get(linuxShortName,\"\")\n\n def get_os_linux_ver(self):\n \"\"\"Get system version\"\"\"\n linuxShortName = self.Get(\"os_linux_shortname\")\n return self.getVersionFromMetapackage('/',linuxShortName) or \\\n self.getVersionFromCalculateIni('/') or \\\n self.getVersionFromGentooFiles('/') or \\\n self.getVersionFromUname() or \"0\"\n\n def get_os_net_hostname(self):\n \"\"\"Get hostname of computer\"\"\"\n if path.exists('/proc/self/fd/1') and \\\n readlink('/proc/self/fd/1') == '/dev/console' and \\\n self.Get('os_root_dev') == '/dev/nfs':\n return socket.gethostname()\n textLines = self._runos(\"hostname -s 2>&1\")\n hostname = \"\"\n if textLines:\n hostname = textLines[0]\n if not hostname:\n return \"\"\n if re.search(\"^hostname: \",hostname):\n textLines = self._runos(\"hostname 2>&1\")\n if not textLines:\n return \"\"\n hostname = textLines[0]\n if re.search(\"^hostname: \",hostname):\n return self.Get('os_linux_shortname')\n else:\n if hostname=='livecd':\n return self.Get('os_linux_shortname')\n return hostname\n\n def get_os_net_ip(self):\n \"\"\"All computer ip addresses, comma delimeter\"\"\"\n IPs = []\n netInterfaces=self.Get(\"os_net_interfaces\")\n for i in netInterfaces:\n ipaddr, mask = ip.receiveIpAndMask(i)\n if ipaddr:\n IPs.append(ipaddr)\n return \",\".join(IPs)\n\n def get_os_net_interfaces(self):\n \"\"\"All net interfaces\"\"\"\n return filter(lambda x: x!=\"lo\", self.getDirList(\"/sys/class/net\"))\n\n def get_os_net_allow(self):\n \"\"\"Allowed networks\"\"\"\n networks=[]\n netInterfaces=self.Get(\"os_net_interfaces\")\n for i in netInterfaces:\n ipaddr, mask = ip.receiveIpAndMask(i)\n if ipaddr and mask:\n networks.append(ip.getIpNet(ipaddr, mask))\n else:\n networks.append(\"\")\n return \",\".join(filter(lambda x:x,networks))\n\n def get_os_arch_machine(self):\n \"\"\"Processor architecture\"\"\"\n textLines = self._runos(\"uname -m\")\n if not textLines:\n return \"\"\n march = textLines[0]\n return march\n\n def get_os_root_dev(self):\n \"\"\"Root filesystem device\"\"\"\n record = open('/proc/cmdline','rb').read().strip()\n re_resRealRoot=re.search('(?:^|\\s)real_root=(\\S+)(\\s|$)',record)\n re_resFakeRoot=re.search('(?:^|\\s)root=(\\S+)(\\s|$)',record)\n # param real_root priority that root\n re_res = re_resRealRoot or re_resFakeRoot\n if re_res:\n rootparam=re_res.group(1)\n # check root for /dev/sd view\n if re.match(\"^\\/dev\\/[a-z]+.*$\", rootparam):\n return getUdevDeviceInfo(\n name=rootparam.strip()).get('DEVNAME',rootparam)\n # check root set by uuid\n if re.match(\"^UUID=.*$\",rootparam):\n uuid = rootparam[5:].strip(\"\\\"'\")\n blkidProcess = process('/sbin/blkid','-c','/dev/null','-U',\n uuid)\n if blkidProcess.success():\n return getUdevDeviceInfo(\n name=blkidProcess.read().strip()).get('DEVNAME','')\n # check root set by label\n if re.match(\"^LABEL=.*$\",rootparam):\n uuid = rootparam[6:].strip(\"\\\"'\")\n blkidProcess = process('/sbin/blkid','-c','/dev/null','-L',\n uuid)\n if blkidProcess.success():\n return getUdevDeviceInfo(\n name=blkidProcess.read().strip()).get('DEVNAME','')\n # get device mounted to root\n dfLines = self._runos(\"LANG=C df /\")\n if not dfLines:\n return \"\"\n if type(dfLines) == types.ListType and len(dfLines)>1:\n root_dev = dfLines[1].split(\" \")[0].strip()\n if root_dev:\n return {'none':'/dev/ram0'}.get(root_dev,root_dev)\n return \"\"\n\n def get_os_root_type(self):\n \"\"\"Root device type (ram, hdd, livecd)\"\"\"\n def link2pair(linkfile):\n \"\"\"Return pair (target,link) from link\"\"\"\n basedir = os.path.dirname(linkfile)\n targetfile = os.readlink(linkfile)\n return (path.normpath(os.path.join(basedir,targetfile)),linkfile)\n rootDev = self.Get(\"os_root_dev\")\n if rootDev:\n if \"/dev/ram\" in rootDev or \"/dev/nfs\" in rootDev:\n return \"livecd\"\n idDict = dict(map(link2pair,\n filter(lambda x:path.islink(x),\n map(lambda x:path.join('/dev/disk/by-id',x),\n listDirectory('/dev/disk/by-id')))))\n if \"usb-\" in idDict.get(rootDev,\"\"):\n return \"usb-hdd\"\n return \"hdd\"\n\n def get_hr_cdrom_set(self):\n \"\"\"Cdrom variable\"\"\"\n if os.path.exists('/sys/block/sr0'):\n textLines = self._runos(\n \"udevadm info --query=all --name=/dev/dvdrw\")\n if not textLines is False:\n for line in textLines:\n if \"ID_CDROM=1\" in line:\n return \"on\"\n return \"off\"\n\n def get_hr_virtual(self):\n \"\"\"Virtual machine name (virtualbox, vmware, qemu)\"\"\"\n pciLines = self._runos(\"/usr/sbin/lspci\")\n if not pciLines:\n return False\n virtSysDict = {'VirtualBox':'virtualbox',\n 'VMware':'vmware',\n 'Qumranet':'qemu'}\n virtName = ''\n for vName in virtSysDict.keys():\n if filter(lambda x: vName in x, pciLines):\n virtName = virtSysDict[vName]\n break\n return virtName\n\n def get_hr_board_model(self):\n \"\"\"Get motherboard model\"\"\"\n modelFile = \"/sys/class/dmi/id/board_name\"\n try:\n return open(modelFile,\"r\").read().strip()\n except:\n return \"\"\n\n def get_hr_board_vendor(self):\n \"\"\"Get motherboard vendor\"\"\"\n vendorFile = \"/sys/class/dmi/id/board_vendor\"\n try:\n return open(vendorFile,\"r\").read().strip()\n except:\n return \"\"\n\n def get_hr_cpu_num(self):\n \"\"\"Get processors count\"\"\"\n cpuinfoFile = \"/proc/cpuinfo\"\n try:\n return len([\"\" for line in open(cpuinfoFile,\"r\").readlines()\n if line.startswith(\"processor\")])\n except:\n return 1\n\n def get_os_locale_locale(self):\n \"\"\"locale (example: ru_RU.UTF-8)\"\"\"\n locale = clLocale()\n # get locale from boot calculate param\n localeVal = self.getValueFromCmdLine(\"calculate\",0)\n if locale.isLangExists(localeVal):\n return locale.getFieldByLang('locale',localeVal)\n else:\n localeVal = self.getValueFromConfig('/etc/env.d/02locale','LANG')\n if locale.isValueInFieldExists('locale',localeVal):\n return localeVal\n if os.environ.has_key(\"LANG\") and os.environ[\"LANG\"] != \"C\":\n return os.environ[\"LANG\"]\n return locale.getFieldByLang(\"locale\",\"default\")\n\n def get_os_locale_lang(self):\n \"\"\"lang (example: ru_RU)\"\"\"\n locale = clLocale()\n return locale.getLangByField(\"locale\",self.Get('os_locale_locale'))\n\n def get_os_locale_language(self):\n \"\"\"language (example: ru)\"\"\"\n locale = clLocale()\n return locale.getFieldByLang(\"language\",self.Get('os_locale_lang'))\n\n def get_os_locale_xkb(self):\n \"\"\"xkb layouts (example: en,ru)\"\"\"\n locale = clLocale()\n return locale.getFieldByLang(\"xkblayout\",\n self.Get('os_locale_lang'))\n\n def get_os_locale_xkbname(self):\n \"\"\"названия используемых раскладок клавиатуры для X\"\"\"\n localeXkb = self.Get(\"os_locale_xkb\")\n if localeXkb:\n return localeXkb.split(\"(\")[0]\n return \"\"\n\n def get_ur_login(self):\n \"\"\"User login\"\"\"\n uid = os.getuid()\n try:\n userName = pwd.getpwuid(uid).pw_name\n except:\n return \"\"\n return userName\n\n def get_ur_group(self):\n \"\"\"User group\"\"\"\n userName = self.Get('ur_login')\n groupName = \"\"\n if userName:\n try:\n gid = pwd.getpwnam(userName).pw_gid\n groupName = grp.getgrgid(gid).gr_name\n except:\n return \"\"\n return groupName\n\n def get_ur_fullname(self):\n \"\"\"Full user name\"\"\"\n userName = self.Get('ur_login')\n fullName = \"\"\n if userName:\n try:\n fullName = pwd.getpwnam(userName).pw_gecos\n except:\n return \"\"\n return fullName\n\n def get_ur_jid(self):\n \"\"\"Get user Jabber id\"\"\"\n userInfo = self.getUserInfo()\n userJID = \"\"\n if userInfo:\n userJID = userInfo[\"jid\"] \n return userJID\n\n def get_ur_mail(self):\n \"\"\"Get user email\"\"\"\n userInfo = self.getUserInfo()\n userMail = \"\"\n if userInfo:\n userMail = userInfo[\"mail\"] \n return userMail\n\n def get_ur_home_path(self):\n \"\"\"Get user home directory\"\"\"\n userName = self.Get('ur_login')\n homeDir = \"\"\n if userName:\n try:\n homeDir = pwd.getpwnam(userName).pw_dir\n except:\n return \"\"\n return homeDir\n\n def get_os_linux_system(self):\n \"\"\"Get linux system (server or desktop)\"\"\"\n shortName = self.Get('os_linux_shortname')\n return self.dictNameSystem.get(shortName,\"\")\n\n def get_os_x11_video_drv(self):\n \"\"\"Get video driver used by xorg\"\"\"\n xorg_conf = '/etc/X11/xorg.conf'\n # Try analize Xorg.{DISPLAY}.log\n display = os.environ.get('DISPLAY')\n list_available_drivers = \\\n getAvailableVideo(prefix=self.Get('cl_chroot_path'))\n if display and list_available_drivers:\n reDriver = re.compile('|'.join(map(lambda x: \"%s_drv.so\"%x,\n list_available_drivers)))\n display_number = re.search(r':(\\d+)(\\..*)?', display)\n reDriverName = re.compile(r'([^/]+)_drv.so')\n if display_number:\n xorg_log_file = '/var/log/Xorg.%s.log' % \\\n display_number.group(1)\n if path.exists(xorg_log_file):\n matchStrs = \\\n map(lambda x:x.group(1),\n filter(lambda x:x,\n map(reDriverName.search,\n filter(lambda x:\"drv\" in x and reDriver.search(x),\n readLinesFile(xorg_log_file)))))\n if matchStrs:\n return matchStrs[-1]\n\n # analize /etc/X11/xorg.conf\n if path.exists(xorg_conf):\n matchSect = re.search(r'Section \"Device\".*?EndSection',\n open('/etc/X11/xorg.conf').read(),re.S)\n if matchSect:\n resDriver = re.search(r'^\\s*Driver\\s*\"([^\"]+)\"',\n matchSect.group(0),re.M)\n if resDriver and resDriver.group(1) in list_available_drivers:\n return resDriver.group(1)\n\n videoVal = self.getValueFromCmdLine(\"calculate\",\"video\")\n videoVal = {'i915':'intel'}.get(videoVal,videoVal)\n if not isPkgInstalled('xorg-server') or \\\n videoVal in list_available_drivers:\n return videoVal\n return self.getVideoByDefault(list_available_drivers)\n\n def getResByXDpyInfo(self):\n \"\"\"Get resolution by xdpyinfo utility\"\"\"\n lines=self._runos(\"xdpyinfo\")\n if not lines:\n return \"\"\n reRes = re.compile(\"dimensions:\\s+(\\d+)x(\\d+)\\s+pixels\")\n searchRes=False\n for line in lines:\n searchRes = reRes.search(line)\n if searchRes:\n break\n if searchRes:\n return \"%sx%s\"%(searchRes.group(1), searchRes.group(2))\n return \"\"\n\n def get_os_x11_resolution(self):\n \"\"\"\n Return current screen resolution (width, height).\n Try detect by xdpyinfo, then Xorg.log, xorg.conf\n \"\"\"\n resolution = self.getResByXDpyInfo() \n if resolution:\n return resolution\n if self.Get('os_root_type') != 'usb-hdd':\n xlog = \"/var/log/Xorg.0.log\"\n if os.access(xlog,os.R_OK):\n reXorgLogParser = re.compile(\"\"\"\n Virtual\\ screen\\ size\\ determined\\ to\\ be\n \\ ([0-9]+)\\s*x\\s*([0-9]+)|\n Setting\\ mode\\ \"(\\d+)x(\\d+)[0-9\\@]\"|\n Output\\ [\\S]+\\ using\\ initial\\ mode\\ (\\d+)x(\\d+)|\n Virtual\\ size\\ is\\ (\\d+)x(\\d+)\"\"\", re.X | re.S)\n resXorgLogParser = reXorgLogParser.search(open(xlog,'r').read())\n if resXorgLogParser:\n return \"%sx%s\"%filter(lambda x:x,\n resXorgLogParser.groups())[:2]\n\n # get resolution from xorg.conf\n xorgconf = \"/etc/X11/xorg.conf\"\n reScreenSections = re.compile('Section \"Screen\"(.*?)EndSection',\n re.S)\n reModes = re.compile('Modes\\s+\"(\\d+x\\d+)')\n if os.access(xorgconf,os.R_OK):\n sectionsScreen = filter(lambda x:\"Modes\" in x,\n reScreenSections.findall(open('/etc/X11/xorg.conf',\n 'r').read()))\n modes = map(lambda x:x.groups()[0],\n filter(lambda x:x,\n map(reModes.search, sectionsScreen)))\n if modes:\n return max(modes,key=lambda x:int(x.partition('x')[0]))\n\n # get resolution from command line\n reRightResolution = re.compile(\"^(\\d+x\\d+|auto)$\",re.S)\n kernelResolution = self.getValueFromCmdLine(\"calculate\",3)\n if kernelResolution and reRightResolution.match(kernelResolution):\n return {'auto':''}.get(kernelResolution,kernelResolution)\n else:\n return \"\"\n\n def get_os_x11_height(self):\n \"\"\"Get screen height in pixeles\"\"\"\n return self.Get('os_x11_resolution').partition('x')[2] or \"768\"\n\n def get_os_x11_width(self):\n \"\"\"Get screen width in pixeles\"\"\"\n return self.Get('os_x11_resolution').partition('x')[0] or \"1024\"\n\n def get_os_x11_standart(self):\n \"\"\"Get the nearest standard size of image relative current\n screen resolution\"\"\"\n #Стандартные разрешения\n widthVal = self.Get('os_x11_width')\n heightVal = self.Get('os_x11_height')\n if not widthVal or not heightVal:\n return \"\"\n width = int(widthVal)\n height = int(heightVal)\n gep = sqrt(height**2+width**2)\n k = float(width)/float(height)\n for themePkg in ['media-gfx/cldx-themes',\n 'media-gfx/cld-themes',\n 'media-gfx/cldg-themes']:\n installed = getPkgInstalled(themePkg,\n prefix=self.Get('cl_chroot_path'))\n if installed and installed[0]['PV'].startswith('12'):\n res = [(1024,576), (1024,600),\n (1024,768), (1200,800), (1280,1024),\n (1280,720), (1280,768), (1280,800),\n (1360,768), (1366,768), (1368,768),\n (1400,1050), (1440,900), (1600,1200),\n (1600,768), (1600,900), (1680,1050),\n (1680,945), (1920,1080), (1920,1200),\n (2048,1152), (2560,1440), (2560,1600),\n (640,480), (800,480) ]\n break\n else:\n res = [(1024,600), (1024,768),\n (1280,1024), (1280,800),\n (1366,768), (1440,900),\n (1600,1200), (1680,1050),\n (1920,1200)] \n bestRes = min(res,\n key=lambda x:(abs(x[0]/float(x[1])-k),\n abs(gep-sqrt(x[0]**2+x[1]**2))))\n return \"%sx%s\"%bestRes\n\n def get_os_x11_composite(self):\n \"\"\"On or off composite mode\"\"\"\n state = self.get_composite_from_xorgconf()\n return state or \"off\"\n\n def get_hr_laptop(self):\n \"\"\"Laptop vendor\"\"\"\n chassisType = '/sys/class/dmi/id/chassis_type'\n boardVendor = '/sys/class/dmi/id/board_vendor'\n if os.access(chassisType,os.R_OK) and \\\n os.access(boardVendor,os.R_OK):\n chassis = open(chassisType,'r').read().strip()\n notebookChassis = ['1','8','10']\n if chassis in notebookChassis:\n valBoardVendor = open(boardVendor,'r').read().strip()\n return valBoardVendor.split(\" \")[0].lower() or \\\n \"unknown\"\n return \"\"\n\n def get_hr_laptop_model(self):\n \"\"\"Laptop name\"\"\"\n boardName = '/sys/class/dmi/id/board_name'\n if self.Get('hr_laptop') and os.access(boardName,os.R_OK):\n valBoardName = open(boardName,'r').read().strip()\n return valBoardName or \"unknown\"\n return \"\"\n\n def get_hr_video_name(self):\n \"\"\"Get video name\"\"\"\n pciVideo = list(sorted(lspci(\"VGA compatible\").items()))\n if pciVideo:\n pciVideo = pciVideo[0][1]\n vendor=pciVideo.get(\"vendor\",\"\").split(\" \")[0]\n name=pciVideo.get(\"name\",\"\")\n if \"[\" in name and \"]\" in name:\n name = name.partition(\"[\")[2].partition(\"]\")[0]\n return \"{vendor} {name}\".format(vendor=vendor,name=name)\n return \"\"\n\n def get_hr_video(self):\n \"\"\"Videocard vendor\"\"\"\n line = self.Get('hr_video_name').lower()\n if any(x in line for x in (\"nvidia\",\"geforce\")):\n return \"nvidia\"\n if any(x in line for x in (\"ati\",\"radeon\")):\n return \"ati\"\n elif \"intel\" in line:\n return \"intel\"\n elif \"via\" in line:\n return \"via\"\n elif \"vmware\" in line:\n return \"vmware\"\n else:\n return \"other\"\n\n def get_cl_kernel_uid(self):\n \"\"\"Get UID of symlink kernel, initramfs and System.map\"\"\"\n return self.getKernelUid(self.Get('os_root_dev'))\n\n def get_cl_chroot_status(self):\n \"\"\"Detect chroot mode by mtab content\"\"\"\n try:\n return \"on\" if self.isChroot(os.getpid()) else \"off\"\n except:\n return \"off\"\n\n def get_os_scratch(self):\n \"\"\"Current system is scratch\"\"\"\n if self.Get('os_root_type') == 'livecd':\n return \"on\" if isMount('/mnt/scratch/workspace') else \"off\"\n else:\n return \"on\" if isMount('/mnt/scratch') else \"off\"\n\n def get_cl_root_path(self):\n \"\"\"Path to directory relative which perform joining templates to \n\n system files (sandbox)\"\"\"\n return '/'\n\n def get_cl_chroot_path(self):\n \"\"\"Path to directory which contain other system\"\"\"\n return '/'\n\n def get_cl_autoupdate_set(self):\n \"\"\"(on or off) autoupdate config from install program\"\"\"\n return 'off'\n\n def get_cl_api(self):\n \"\"\"The path to the module api,\n\n and additional parameters caluclate packages\"\"\"\n return {}\n\n def get_ld_encrypt(self):\n \"\"\"hash crypto algoritm\"\"\"\n return 'ssha'\n\n def get_ld_bind_login(self):\n \"\"\"bind login\"\"\"\n return 'proxyuser'\n\n def get_ld_base_root(self):\n \"\"\"base name LDAP\"\"\"\n return 'calculate'\n\n def get_ld_base_dn(self):\n \"\"\"base DN LDAP\"\"\"\n return self.genDn(\"dc\", self.Get('ld_base_root'))\n\n def get_ld_bind_dn(self):\n \"\"\"bind DN LDAP\"\"\"\n return self.addDn(self.genDn(\"cn\", self.Get('ld_bind_login')),\n self.Get('ld_base_dn'))\n\n def get_ld_bind_pw(self):\n \"\"\"bind password\"\"\"\n return 'calculate'\n\n def get_ld_bind_hash(self):\n \"\"\"hash bind\"\"\"\n return getHash(self.Get('ld_bind_pw'), self.Get('ld_encrypt'))\n\n def get_ld_admin_login(self):\n \"\"\"administrator name\"\"\"\n return 'ldapadmin'\n\n def get_ld_admin_dn(self):\n \"\"\"root DN\"\"\"\n return self.addDn(self.genDn(\"cn\", self.Get('ld_admin_login')), \n self.Get('ld_base_dn'))\n\n def get_ld_admin_hash(self):\n \"\"\"root hash\"\"\"\n return getHash(self.Get('ld_admin_pw'), self.Get('ld_encrypt'))\n\n def get_ld_admin_pw(self):\n \"\"\"password root\"\"\"\n return genpassword()\n\n def get_ld_services(self):\n \"\"\"Name from all services\"\"\"\n return 'Services'\n\n def get_ld_services_dn(self):\n \"\"\"DN from all services\"\"\"\n return self.addDn(self.genDn(\"ou\", self.Get('ld_services')), \n self.Get('ld_base_dn'))\n\n def get_cl_ca_cert(self):\n \"\"\"CA certificate\"\"\"\n return 'CA.crt'\n\n def get_cl_ca_key(self):\n \"\"\"CA key\"\"\"\n return 'CA.key'\n\n def get_cl_ca_path(self):\n \"\"\"CA path\"\"\"\n return '/var/calculate/ssl/main'\n\n def get_os_clock_timezone(self):\n \"\"\"Current clock timezone\"\"\"\n zoneinfodir = \"/usr/share/zoneinfo/\"\n localtimefile = \"/etc/localtime\"\n timezonefile = \"/etc/timezone\"\n # try get timezone from kernel calculate param\n timezone = self.getValueFromCmdLine(\"calculate\",2)\n if timezone and \\\n path.exists(path.join(zoneinfodir,timezone)):\n return timezone\n # get timezone from /etc/timezone\n if path.exists(timezonefile):\n return open(timezonefile,\"r\").read().strip()\n return \"UTC\"\n\n def get_os_lang(self):\n \"\"\"Supported languages\"\"\"\n return list(sorted(list(set(clLocale().getLangs()) &\n set([\"en_US\",\"de_DE\",\"es_ES\",\"fr_FR\",\"it_IT\",\"pl_PL\",\"pt_BR\",\n \"uk_UA\",\"bg_BG\",\"ru_RU\",\"ro_RO\",\"pt_PT\"]))))\n","sub_path":"pym/cl_fill.py","file_name":"cl_fill.py","file_ext":"py","file_size_in_byte":26852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"317877094","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Miscellaneous Functions for Regression File.\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.ensemble import RandomForestRegressor\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import neighbors\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n#from sklearn.svm import SVR\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.svm import SVC\nfrom sklearn.qda import QDA\nimport os\n\n#NN\nfrom keras.models import Sequential\nfrom keras.layers.core import Activation, Dropout\nfrom keras.layers import Dense, Activation, LSTM\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras.utils import plot_model\n\n#SVM\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.kernel_ridge import KernelRidge\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport time\n\nfrom sklearn import tree\n\ndef load_dataset(path_directory, symbol): \n \"\"\"\n Import DataFrame from Dataset.\n \"\"\"\n\n path = os.path.join(path_directory, symbol)\n\n out = pd.read_csv(path, index_col=2, parse_dates=[2])\n out.drop(out.columns[0], axis=1, inplace=True)\n\n #name = path_directory + '/sp.csv'\n #sp = pd.read_csv(name, index_col=0, parse_dates=[1])\n \n #name = path_directory + '/GOOGL.csv'\n #nasdaq = pd.read_csv(name, index_col=1, parse_dates=[1])\n \n #name = path_directory + '/treasury.csv'\n #treasury = pd.read_csv(name, index_col=0, parse_dates=[1])\n \n #return [sp, nasdaq, djia, treasury, hkong, frankfurt, paris, nikkei, london, australia]\n #return [out, nasdaq, djia, frankfurt, hkong, nikkei, australia]\n return [out] \n\ndef count_missing(dataframe):\n \"\"\"\n count number of NaN in dataframe\n \"\"\"\n return (dataframe.shape[0] * dataframe.shape[1]) - dataframe.count().sum()\n\n \ndef addFeatures(dataframe, adjclose, returns, n):\n \"\"\"\n operates on two columns of dataframe:\n - n >= 2\n - given Return_* computes the return of day i respect to day i-n. \n - given AdjClose_* computes its moving average on n days\n\n \"\"\"\n \n return_n = adjclose[9:] + \"Time\" + str(n)\n dataframe[return_n] = dataframe[adjclose].pct_change(n)\n \n roll_n = returns[7:] + \"RolMean\" + str(n)\n dataframe[roll_n] = dataframe[returns].rolling(window=n,center=False).mean()\n\n exp_ma = returns[7:] + \"ExponentMovingAvg\" + str(n)\n dataframe[exp_ma] = dataframe[returns].ewm(halflife=30,ignore_na=False,min_periods=0,adjust=True).mean()\n \ndef mergeDataframes(datasets):\n \"\"\"\n Merge Datasets into Dataframe.\n \"\"\"\n return pd.concat(datasets)\n\n \ndef applyTimeLag(dataset, lags, delta):\n \"\"\"\n apply time lag to return columns selected according to delta.\n Days to lag are contained in the lads list passed as argument.\n Returns a NaN free dataset obtained cutting the lagged dataset\n at head and tail\n \"\"\"\n maxLag = max(lags)\n\n columns = dataset.columns[::(2*max(delta)-1)]\n for column in columns:\n newcolumn = column + str(maxLag)\n dataset[newcolumn] = dataset[column].shift(maxLag)\n\n return dataset.iloc[maxLag:-1, :]\n\n# CLASSIFICATION \ndef prepareDataForClassification(dataset, start_test):\n \"\"\"\n generates categorical to be predicted column, attach to dataframe \n and label the categories\n \"\"\"\n le = preprocessing.LabelEncoder()\n \n dataset['UpDown'] = dataset['Return_Out']\n dataset.UpDown[dataset.UpDown >= 0] = 'Up'\n dataset.UpDown[dataset.UpDown < 0] = 'Down'\n dataset.UpDown = le.fit(dataset.UpDown).transform(dataset.UpDown)\n \n features = dataset.columns[1:-1]\n X = dataset[features] \n y = dataset.UpDown \n \n X_train = X[X.index < start_test]\n y_train = y[y.index < start_test] \n \n X_test = X[X.index >= start_test] \n y_test = y[y.index >= start_test]\n \n return X_train, y_train, X_test, y_test \n\ndef prepareDataForModelSelection(X_train, y_train, start_validation):\n \"\"\"\n gets train set and generates a validation set splitting the train.\n The validation set is mandatory for feature and model selection.\n \"\"\"\n X = X_train[X_train.index < start_validation]\n y = y_train[y_train.index < start_validation] \n \n X_val = X_train[X_train.index >= start_validation] \n y_val = y_train[y_train.index >= start_validation] \n \n return X, y, X_val, y_val\n\n \ndef performClassification(X_train, y_train, X_test, y_test, method, parameters={}):\n \"\"\"\n Perform Classification with the help of serveral Algorithms.\n \"\"\"\n\n print('Performing ' + method + ' Classification...')\n print('Size of train set: ', X_train.shape)\n print('Size of test set: ', X_test.shape)\n print('Size of train set: ', y_train.shape)\n print('Size of test set: ', y_test.shape)\n \n\n classifiers = [\n RandomForestClassifier(n_estimators=100, n_jobs=-1),\n neighbors.KNeighborsClassifier(),\n SVC(degree=100, C=10000, epsilon=.01),\n AdaBoostRegressor(),\n AdaBoostClassifier(**parameters)(),\n GradientBoostingClassifier(n_estimators=100),\n QDA(),\n ]\n\n scores = []\n\n for classifier in classifiers:\n scores.append(benchmark_classifier(classifier, \\\n X_train, y_train, X_test, y_test))\n\n print(scores)\n\ndef benchmark_classifier(clf, X_train, y_train, X_test, y_test):\n clf.fit(X_train, y_train)\n accuracy = clf.score(X_test, y_test)\n #auc = roc_auc_score(y_test, clf.predict(X_test))\n return accuracy\n\n# REGRESSION\n \ndef getFeatures(X_train, y_train, X_test, num_features):\n ch2 = SelectKBest(chi2, k=5)\n X_train = ch2.fit_transform(X_train, y_train)\n X_test = ch2.transform(X_test)\n return X_train, X_test\n\n#### Works well for KNN only:begin\n\ndef discretize(value, min_val, max_val, range_touple=(0, 100)):\n unit = (max_val - min_val) / range_touple[1];\n return (value - min_val) * unit;\n\ndef analogize(value, min_val, max_val, range_touple=(0, 100)):\n unit = (max_val - min_val) / range_touple[1];\n return (value/unit) + min_val;\n \n### Works well for KNN only:end\n\n\n#LSTM \ndef create_dataset(dataset, features, output, look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-look_back-1):\n a = dataset.iloc[i:(i+look_back)][features].as_matrix()\n dataX.append(a)\n b = dataset.iloc[i + look_back][output];\n dataY.append(b)\n #print('__________a:', a)\n #print('__________b:', b)\n return np.array(dataX), np.array([dataY]);\n\ndef performRegression(dataset, split, symbol, output_dir):\n \"\"\"\n Performing Regression on \n Various algorithms\n \"\"\"\n \n dataset_cp = dataset.copy();\n minMaxScalerMap = {}; \n for i in dataset:\n scaler = MinMaxScaler(feature_range=(0, 1))\n minMaxScalerMap[i] = scaler\n dataset[i] = scaler.fit_transform(dataset[i].reshape(-1, 1))\n \n #touple = (4,5);\n \n# dataset['close'] = 0#dataset['close']/2\n# dataset['low'] = 0#dataset['low']/2\n# dataset['high'] = 0#dataset['high']/2\n# dataset['adj_close'] = 0#dataset['adj_close']/2\n\n features = dataset.columns[1:]\n \n #print(\"features::::::::::\", features)\n #features = features[touple[0]:touple[1]]\n #print(\"features::::::::::\", features)\n \n index = int(np.floor(dataset.shape[0]*split))\n train, test = dataset[:index], dataset[index:]\n\n print('*'*80)\n train_cp, test_cp = dataset_cp[:index], dataset_cp[index:]\n print('Size of train set: ', train.shape)\n print('Size of test set: ', test.shape)\n \n #train, test = getFeatures(train[features], \\\n # train[output], test[features], 16)\n \n\n # discretization\n # minMaxMap = {};\n # for i in features:\n # minMaxMap[i] = {\n # 'min': train[i].min(),\n # 'max': train[i].max()\n # };\n\n # for i in features:\n # train[i] = train[i].apply(lambda col: discretize(col, minMaxMap[i]['min'], minMaxMap[i]['max']))\n # for i in features:\n # test1[i] = test[i].apply(lambda col: discretize(col, minMaxMap[i]['min'], minMaxMap[i]['max']))\n \n output = dataset.columns[0]\n\n out_params = (symbol, output_dir);\n predicted_values = []\n \n svr = GridSearchCV(SVR(kernel='rbf'), cv=5,\n param_grid={\"C\": [1e0, 1e1, 1e2, 1e3], \"epsilon\": [0.0001, 0.00001, 0.000001, 0.0000001]})\n \n kr = GridSearchCV(KernelRidge(kernel='rbf'), cv=5,\n param_grid={\"alpha\": [1e0, 0.1, 1e-2, 1e-3]})\n \n \n \n rand_forest_r = RandomForestRegressor(n_estimators=10, n_jobs=-1);\n \n\n classifiers = [\n rand_forest_r,\n SVR(C=100000, kernel='rbf', epsilon=0.1, gamma=1, degree=2),#original: learnes fast workes not well\n SVR(C=1, kernel='rbf', epsilon=0.0000001, tol=0.00000001),#: learnes slow workes well, only common features\n #svr,#GridSearchCV, workes not well\n #kr,#GridSearchCV, KernelRidge svm works better\n BaggingRegressor(),\n AdaBoostRegressor(),\n KNeighborsRegressor(),\n GradientBoostingRegressor(),\n ]\n \n classifiers = []\n\n for classifier in classifiers:\n pred = benchmark_model(classifier, \\\n train, test, features, output, out_params, False, minMaxScalerMap, test_cp)\n\n predicted_values.append(pred)\n s = score(pred, test_cp, output)\n print(s)\n time.sleep(4) \n \n #return print_feature_importance(rand_forest_r, features)\n \n \n epochs=1\n batch_size=1\n\n # fix random seed for reproducibility\n seed = 7\n np.random.seed(seed)\n estimator = KerasRegressor(build_fn=baseline_model, epochs=epochs, batch_size=batch_size, verbose=1, shuffle=False) \n classifier1 = estimator #seems working better than simple baseline_model()\n \n classifier2 = lstm(look_back=1)\n \n print('begin: classifier1-classifier1'*5)\n \n \n# predicted_values.append(benchmark_model(classifier1, \\\n# train, test, features, output, out_params, True, minMaxScalerMap, test_cp))\n \n print('end: classifier1-classifier1'*5)\n \n print('begin: classifier2-classifier2'*5)\n \n predicted_values.append(benchmark_model(classifier2, \\\n train, test, features, output, out_params, 'LSTM', minMaxScalerMap, test_cp, epochs=epochs, batch_size=batch_size, verbose=1, shuffle=False))\n \n print('end: classifier2-classifier2'*5)\n\n print('-'*80)\n\n mean_squared_errors = []\n\n r2_scores = []\n\n for pred in predicted_values:\n s = score(pred, test_cp, output)\n mean_squared_errors.append(s[0])\n r2_scores.append(s[1])\n\n print(mean_squared_errors, r2_scores)\n\n return mean_squared_errors, r2_scores\n\ndef score(pred, test_cp, output):\n mean_squared_errors = (mean_squared_error(test_cp[output].as_matrix(), \\\n pred))\n r2_scores = r2_score(test_cp[output], pred)\n return mean_squared_errors, r2_scores\n\ndef print_feature_importance(rand_forest_r, features):\n print('00'*80)\n \n print('rand_forest_r:', rand_forest_r)\n \n print('len feature_importances_:', len(rand_forest_r.feature_importances_))\n \n print('feature_importances_:', rand_forest_r.feature_importances_)\n \n f_i_list = []; \n \n for i,f in enumerate(features):\n f_i_list.append((rand_forest_r.feature_importances_[i], f))\n \n f_i_list.sort(key=lambda x: x[0], reverse=True)\n \n for i in f_i_list:\n print(i)\n \n\n \n #my_file = '../../playground/output/tree.png';\n #for r_tree in rand_forest_r.estimators_:\n #r_tree = rand_forest_r.estimators_[0]\n #tree.export_graphviz(r_tree, out_file = my_file)\n \n \n print('010'*80)\n \n\ndef baseline_model():\n\t# create model\n model = Sequential()\n feature_count = 82\n model.add(Dense(feature_count, input_dim=feature_count, kernel_initializer='normal', activation='relu'))\n model.add(Dense(feature_count*2 +1, input_dim=feature_count, kernel_initializer='normal', activation='relu'))\n model.add(Dense(feature_count, input_dim=feature_count*2 +1, kernel_initializer='normal', activation='relu'))\n \n model.add(Dense(1, input_dim=feature_count, kernel_initializer='normal'))\n\t# Compile model\n model.compile(loss='mean_squared_error', optimizer='adam')\n \n model_img_output = '../../playground/output/mode_nn.png';\n plot_model(model, to_file=model_img_output, show_shapes=True)\n print('output model to:', model_img_output)\n return model\n\ndef lstm(look_back=1):\n feature_count = 82\n units = 500\n model = Sequential()\n model.add(LSTM(units, batch_input_shape=(None, look_back , feature_count ), return_sequences=True, activation='tanh', recurrent_activation='hard_sigmoid' )) #input_dim=feature_count\n \n model.add(LSTM(units, return_sequences=True, activation='tanh', recurrent_activation='hard_sigmoid'))\n \n model.add(LSTM(units, activation='tanh', recurrent_activation='hard_sigmoid'))\n\n model.add(Dropout(.2))\n model.add(Dense(1))\n model.add(Activation('linear'))\n model.compile(loss='mean_squared_error', optimizer='adam')\n \n model_img_output = '../../playground/output/mode_lstm.png';\n plot_model(model, to_file=model_img_output, show_shapes=True)\n print('output model to:', model_img_output)\n return model\n\ndef benchmark_model(model, train, test, features, output,\\\n output_params, isNN, minMaxScalerMap, test_cp, *args, **kwargs):\n '''\n Performs Training and Testing of the Data on the Model.\n '''\n\n print('-'*80)\n model_name = model.__str__().split('(')[0].replace('Regressor', ' Regressor')\n print(model_name)\n\n '''\n if 'SVR' in model.__str__():\n tuned_parameters = [{'kernel': ['rbf', 'polynomial'], 'gamma': [1e-3, 1e-4],\n 'C': [1, 10, 100, 1000]},\n {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]\n model = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,\n scoring='%s_weighted' % 'recall')\n '''\n\n symbol, output_dir = output_params\n \n if (not isNN):\n print('begin: fit')\n model.fit(train[features].as_matrix(), train[output].as_matrix(), *args, **kwargs)\n print('end: fit')\n print('begin: predict')\n predicted_value = model.predict(test[features].as_matrix())\n #predicted_value = analogize(predicted_value, minMaxMap[output]['min'], minMaxMap[output]['max'])\n print('end: predict')\n plt.plot(test_cp[output].as_matrix(), color='r', ls='-', label='Original Value')\n #test_cp.plot(y=output, color='r', ls='-', label='Original Value')\n plt.plot(minMaxScalerMap[output].inverse_transform(predicted_value), color='b', ls='-', label='predicted_value Value')\n# test_cp_2 = test_cp.copy()\n# test_cp_2['x'] = minMaxScalerMap[output].inverse_transform(predicted_value);\n# plt.plot(test_cp_2['x'], color='b', ls='-', label='predicted_value Value')\n elif isNN == True:\n print('begin: fit')\n model.fit(train[features].as_matrix(), train[output].as_matrix(), *args, **kwargs)\n print('end: fit')\n print('begin: predict')\n predicted_value = model.predict(test[features].as_matrix(), batch_size=5, verbose=1)\n print('predicted_value:', predicted_value)\n print('end: predict')\n plt.plot(test_cp[output].as_matrix(), color='r', ls='-', label='Original Value')\n #test_cp.plot(y=output, color='r', ls='-', label='Original Value')\n plt.plot(minMaxScalerMap[output].inverse_transform(predicted_value), color='b', ls='-', label='predicted_value Value')\n elif isNN == 'LSTM':\n train_cp = train.copy()\n train_cp['one'] = 1\n trainX = []\n epochs=120\n batch_size=1\n look_back = 50\n# for index, item in enumerate(train_cp['one']):\n# trainX.append((train[features].as_matrix()[index], 1, train[output].as_matrix()[index]))\n# print('trainX:')\n# \n# trainY = []\n# for index, item in enumerate(train_cp['one']):\n# trainY.append((train[features].as_matrix()[index], 1, train[output].as_matrix()[index]))\n# print('trainY:')\n\n trainX, trainY = create_dataset(train, features, output, look_back=look_back)\n \n train_fm = train[features].as_matrix()\n\n trainX = np.reshape(train_fm, (train_fm.shape[0], 1, train_fm.shape[1]))\n #trainX = np.reshape(trainX, (trainX.shape[0], look_back, trainX.shape[1]))\n\n print('begin: fit')\n model.fit(trainX, train[output].as_matrix(), epochs=epochs, batch_size=batch_size, verbose=1)\n #model.fit(trainX, trainY, epochs=1, batch_size=1, verbose=1)\n print('end: fit')\n print('begin: predict')\n \n test_fm = test[features].as_matrix()\n\n testX = np.reshape(test_fm, (test_fm.shape[0], 1, test_fm.shape[1]))\n\n predicted_value = model.predict(testX)\n print('predicted_value:', predicted_value)\n print('end: predict')\n plt.plot(test_cp[output].as_matrix(), color='r', ls='-', label='Original Value')\n #test_cp.plot(y=output, color='r', ls='-', label='Original Value')\n plt.plot(minMaxScalerMap[output].inverse_transform(predicted_value), color='b', ls='-', label='predicted_value Value')\n \n\n plt.xlabel('Number of Set')\n plt.ylabel('Output Value')\n\n plt.title(model_name)\n plt.legend(loc='best')\n plt.tight_layout()\n if (output_dir):\n plt.savefig(os.path.join(output_dir, str(symbol) + '_' \\\n + model_name + '.png'), dpi=100)\n else:\n plt.show()\n plt.clf()\n\n return predicted_value\n","sub_path":"scripts/Algorithms/regression_helpers.py","file_name":"regression_helpers.py","file_ext":"py","file_size_in_byte":18205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"185717911","text":"from weppy import App, request\n\napp = App(__name__)\n\n\n\n\nif __name__ == \"__main__\":\n # important !!!!!!! 5000 or PORT env of Heroku\n port = os.getenv('PORT', 5000)\n app.run(host='0.0.0.0', port=int(port), debug=True)\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"417042845","text":"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mat_line\nimport os.path\n\n\nclass Plotter():\n '''\n Plot prepared data as pdf and png files using\n matplotlib.\n\n :param repo: Reference to the central Repo object\n '''\n\n def __init__(self, repo):\n self.repo = repo\n\n def plot_to_file(self):\n r'''Plots all the variations of chosen Result as .svg and .png.\n\n Files are named using :attr:`repo.input_file_name` and a\n sufix which describes the plot.\n\n This includes plots:\n * :attr:`repo.input_file_name`.svg/.pnd as model curve only\n * :attr:`repo.input_file_name`\\_input_points.svg/.pnd as model curve\n with input points.\n * :attr:`repo.input_file_name`\\_input_curve.svg/.pnd as model curve\n with input points plotted as a curve.\n * :attr:`repo.input_file_name`\\_input_curve\\_difference.svg/.pnd as\n model curve, input points as a curve, and difference curve.\n * :attr:`repo.input_file_name`\\_difference\\_fft.svg/.pnd as as\n difference curve and its fft spectar.\n '''\n\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n\n # just the model curve\n model_curve = mat_line.Line2D(\n self.repo.plot_data.model['t'],\n self.repo.plot_data.model['y'],\n zorder=2,\n color='red'\n )\n\n self.ax.add_line(model_curve)\n\n for exponential in self.repo.plot_data.exponentials:\n exponential_curve = mat_line.Line2D(\n exponential['t'],\n exponential['y'],\n zorder=3,\n color='black'\n )\n self.ax.add_line(exponential_curve)\n\n if self.repo.plot_data.constant['y'].all():\n\n constant_curve = mat_line.Line2D(\n self.repo.plot_data.constant['t'],\n self.repo.plot_data.constant['y'],\n zorder=3,\n color='black'\n )\n self.ax.add_line(constant_curve)\n self.ax.autoscale_view()\n\n self.ax.autoscale_view()\n self.ax.set_xlabel('Time t')\n self.ax.set_ylabel('y')\n self.ax.set_title(\"Model curve only\")\n\n plot_path = os.path.join(self.repo.plot_folder,\n self.repo.input_file_name)\n\n fig.savefig(plot_path + \".svg\")\n\n # fitted curve + input data as points\n input_points = mat_line.Line2D(\n self.repo.plot_data.input_points['t'],\n self.repo.plot_data.input_points['y'],\n marker='o',\n markerfacecolor='blue',\n zorder=1,\n linewidth=0\n )\n\n self.ax.add_line(input_points)\n self.ax.set_title(\"Data points + model curve\")\n self.ax.autoscale_view()\n\n fig.savefig(plot_path + \"_input_points\" + \".svg\")\n\n # fitted curve + input data as curve\n self.ax.cla()\n self.ax.add_line(model_curve)\n input_curve = mat_line.Line2D(\n self.repo.plot_data.input_points['t'],\n self.repo.plot_data.input_points['y'],\n zorder=1,\n color='blue'\n )\n self.ax.add_line(input_curve)\n self.ax.set_title(\"Data curve + model curve\")\n self.ax.autoscale_view()\n\n fig.savefig(plot_path + \"_input_curve\" + \".svg\")\n\n # fitted curve + input data as curve + difference curve\n difference_line = mat_line.Line2D(\n self.repo.plot_data.difference['t'],\n self.repo.plot_data.difference['y'],\n zorder=1,\n color='black',\n )\n self.ax.add_line(difference_line)\n self.ax.set_title(\"Data curve + model curve + difference curve\")\n self.ax.autoscale_view()\n\n fig.savefig(plot_path + \"_input_curve_difference\" + \".svg\")\n\n difference_fft = mat_line.Line2D(\n self.repo.plot_data.difference_fft['t'],\n self.repo.plot_data.difference_fft['y'],\n zorder=1,\n color='red',\n )\n\n difference_line = mat_line.Line2D(\n self.repo.plot_data.difference['t'],\n self.repo.plot_data.difference['y'],\n zorder=1,\n color='black',\n )\n\n fig_2 = plt.figure()\n self.bx = fig_2.add_subplot(2, 1, 1)\n self.bx.yaxis.major.formatter.set_powerlimits((-2, 2))\n self.bx.ticklabel_format(style='sci', axis='y')\n self.bx.add_line(difference_line)\n self.bx.set_xlabel('Time')\n self.bx.set_ylabel('Amplitude')\n self.bx.autoscale_view()\n self.bx.set_title('Difference curve + Fourier transform spectrum')\n\n self.cx = fig_2.add_subplot(2, 1, 2)\n self.cx.yaxis.major.formatter.set_powerlimits((-2, 2))\n self.cx.ticklabel_format(style='sci', axis='y')\n self.cx.add_line(difference_fft)\n self.cx.set_xlabel('Freq (Hz)')\n self.cx.set_ylabel('|Y(freq)|')\n self.cx.autoscale_view()\n\n fig_2.savefig(plot_path + \"_difference_fft\" + \".svg\")\n","sub_path":"py/expofit_web/apps/expofit/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"291054833","text":"from django import template\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom mysite.models import Wallpaper\n\nregister = template.Library()\n\n\nclass TemplateWallpaper(template.Node):\n def render(self, context):\n context['wallpaper'] = Wallpaper.get_random_wallpaper()\n return ''\n\n\n@register.tag\ndef get_wallpaper(parser, token):\n return TemplateWallpaper()\n\n\n@register.simple_tag(takes_context=True)\ndef url_qs(context, *args, **kwargs):\n \"\"\" takes the current url and query string from the request context\n and accepts named arguments and constructs a complete url string\n with the query string parameters cleaned swapped out with the\n named arguments.\n\n NOTE: needs 'django.core.context_processors.request' under the settings\n variable TEMPLATE_CONTEXT_PROCESSORS\n\n e.g.) request.path -> http://www.slackerparadise.com/ideas\n request.META.QUERY_STRING -> idea=miscellaneous&p=4\n kwargs -> { 'p' : 23 }\n\n returns ->\n http://www.mysite.com/ideas?idea=miscellaneous&p=23\n \"\"\"\n request = context['request']\n\n # get query string parameters\n params = {}\n for pair in request.META['QUERY_STRING'].split(\"&\"):\n parts = pair.split(\"=\")\n if not parts:\n continue\n\n if len(parts) >= 2:\n key, value = parts[0], parts[1]\n else:\n key, value = parts[0], ''\n\n if not key:\n continue\n\n params[str(key)] = str(value)\n\n # add/replace query string parameters in kwargs\n for kwarg, value in kwargs.items():\n params[str(kwarg)] = str(value)\n\n query_string = \"&\".join([k + \"=\" + v for k, v in params.items()])\n if query_string:\n query_string = '?' + query_string\n\n return reverse_lazy(args[0]) + query_string","sub_path":"mysite/templatetags/mysite_tags.py","file_name":"mysite_tags.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"432754317","text":"\"\"\"Test the write database.\n\"\"\"\nimport json\nimport pytest\nimport os\nimport time\nimport numpy as np\nfrom vectorai.models.deployed import ViText2Vec\nfrom vectorai.write import ViWriteClient\nfrom vectorai.errors import APIError, MissingFieldError, MissingFieldWarning, CollectionNameError\nfrom vectorai.client import ViClient\nfrom .utils import TempClientWithDocs\n\nclass TestCollectionBasics:\n @pytest.mark.use_client\n def test_create_collection(self, test_client, test_collection_name, test_vector_field):\n collection_name = test_collection_name\n if collection_name in test_client.list_collections():\n test_client.delete_collection(collection_name)\n response = test_client.create_collection(\n collection_name=collection_name, collection_schema={test_vector_field: 512}\n )\n assert response is None\n\n @pytest.mark.use_client\n def test_prevent_collection_overwrite(self, test_client, test_collection_name):\n \"\"\"\n Test prevention of the overwriting of the collections.\n \"\"\"\n if test_collection_name not in test_client.list_collections():\n test_client.create_collection(test_collection_name)\n with pytest.raises(APIError):\n response = test_client.create_collection(collection_name=test_collection_name)\n\n @pytest.mark.use_client\n def test_list_collections(self, test_collection_name, test_client):\n response = test_client.list_collections()\n assert response.count(test_collection_name) == 1\n\n @pytest.mark.use_client\n def test_delete_collection(self, test_client, test_collection_name):\n response = test_client.delete_collection(collection_name=test_collection_name)\n assert response['status'] == 'complete'\n\ndef test_dummy_vector(test_client):\n \"\"\"\n Test the dummy vector\n \"\"\"\n assert len(test_client.dummy_vector(512)) == 512\n\ndef test_set_field_on_new_field(test_client):\n \"\"\"\n Assert when set on new field.\n \"\"\"\n doc = {}\n test_client.set_field('balls', doc, 3)\n assert doc['balls'] == 3\n\ndef test_set_field_on_new_dict(test_client):\n doc = {}\n test_client.set_field('check.balls', doc, 3)\n assert test_client.get_field('check.balls', doc) == 3\n\ndef test_vector_name(test_client):\n text_encoder = ViText2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY'])\n test_client.set_name(text_encoder, 'vectorai_text')\n vector_name = test_client._get_vector_name_for_encoding(\"color\", text_encoder, model_list=[text_encoder])\n assert vector_name == \"color_vectorai_text_vector_\"\n\ndef test_vector_name_2(test_client):\n text_encoder = ViText2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY'])\n text_encoder_2 = ViText2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY'])\n test_client.set_name(text_encoder, \"vectorai\")\n test_client.set_name(text_encoder_2, \"vectorai_2\")\n vector_name = test_client._get_vector_name_for_encoding(\"color\", text_encoder, model_list=[text_encoder, text_encoder_2])\n assert vector_name == \"color_vectorai_vector_\"\n vector_name = test_client._get_vector_name_for_encoding(\"color\", text_encoder_2, model_list=[text_encoder, text_encoder_2])\n assert vector_name == 'color_vectorai_2_vector_'\n\ndef test_vector_name_same_name(test_client):\n text_encoder = ViText2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY'])\n with pytest.raises(ValueError):\n vector_name = test_client._check_if_multiple_models_have_same_name(models={'color':[text_encoder, text_encoder]})\n\ndef test_encode_documents_With_models_using_encode(test_client):\n docs = test_client.create_sample_documents(5)\n text_encoder = ViText2Vec(os.environ['VI_USERNAME'], os.environ['VI_API_KEY'])\n test_client.set_name(text_encoder, \"vectorai_text\")\n test_client.encode_documents_with_models_using_encode(docs, models={'color': [text_encoder]})\n assert 'color_vectorai_text_vector_' in docs[0].keys()\n\n@pytest.mark.use_client\ndef test_raises_warning_if_no_id(test_client, test_collection_name):\n docs = test_client.create_sample_documents(10)\n {x.pop('_id') for x in docs}\n with pytest.warns(MissingFieldWarning) as record:\n test_client.insert_documents(test_collection_name, docs)\n assert len(record) > 1\n assert record[1].message.args[0] == test_client.NO_ID_WARNING_MESSAGE\n\n@pytest.mark.use_client\ndef test_raises_warning_if_only_one_id_is_present(test_client, test_collection_name):\n docs = test_client.create_sample_documents(10)\n {x.pop('_id') for x in docs[1:]}\n with pytest.warns(MissingFieldWarning) as record:\n test_client.insert_documents(test_collection_name, docs)\n assert record[0].message.args[0] == test_client.NO_ID_WARNING_MESSAGE\n\n@pytest.mark.use_client\ndef test_retrieve_and_encode_simple(test_client, test_collection_name):\n \"\"\"Test retrieving documents and encoding them with vectors.\n \"\"\"\n VECTOR_LENGTH = 100\n def fake_encode(x):\n return test_client.generate_vector(VECTOR_LENGTH)\n with TempClientWithDocs(test_client, test_collection_name, 100) as client:\n results = client.retrieve_and_encode(test_collection_name,\n models={'country': fake_encode})\n assert list(client.collection_schema(test_collection_name)['country_vector_'].keys())[0] == 'vector'\n assert len(results['failed_document_ids']) == 0\n assert 'country_vector_' in client.collection_schema(test_collection_name)\n docs = client.retrieve_documents(test_collection_name)['documents']\n assert len(docs[0]['country_vector_']) == VECTOR_LENGTH\n\n@pytest.mark.parametrize('collection_name',['HIUFE', 'HUIF_;', 'fheuwiHF'])\ndef test_collection_name_error(test_client, collection_name):\n with pytest.raises(CollectionNameError):\n test_client._typecheck_collection_name(collection_name)\n\n@pytest.mark.parametrize('collection_name', ['fehwu'])\ndef test_collection_name_not_error(test_client, collection_name):\n test_client._typecheck_collection_name(collection_name)\n assert True\n","sub_path":"tests/test_write_collection_basics.py","file_name":"test_write_collection_basics.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"74875770","text":"# -*- coding: utf-8 -*-\n\"\"\"\nActions module.\n\nActions are each one of the steps that form a \"Campaign\".\nActions glue together a Technique, a Worker and a Target.\n\"\"\"\n\nimport random\nimport socket\nimport logging\nimport time\nimport uuid\n\nfrom .errors import ActionExecutionError\n\n\nclass Action():\n def __init__(\n self,\n phase,\n name,\n technique,\n goals,\n targets=None,\n targets_query=None,\n max_targets=10,\n wait=True,\n timeout=None\n ):\n self.uid = f'a-{str(uuid.uuid4())[-4:]}'\n self.phase = phase\n self.name = name\n self.technique = technique\n self.goals = goals\n self.targets = set(targets) if targets else set()\n self.targets_query = targets_query\n self.max_targets = max_targets\n self.wait = wait\n self.timeout = timeout\n self.attempts = 0\n self.succeeded = False\n self._session_id = None # XXX remove\n if not targets and not targets_query:\n raise ValueError(\n 'An Action requires either a list of targets or a target query expression'\n )\n\n def find_targets(self, worker):\n # XXX implement in workers\n # either use resource files (rc/ruby/python)\n # or use simple commands like vulns, then parse in this python\n if 'session' in self.targets_query:\n self.targets.add('172.19.0.7')\n session_id, _ = worker.find_last_session(\n self.targets_query['session'],\n {}\n )\n if session_id:\n print(f'Found session={session_id}')\n self._session_id = session_id\n return session_id\n if not self.targets:\n self.targets = set([\n '172.19.0.3',\n '172.19.0.4',\n '172.19.0.5',\n '172.19.0.6',\n '172.19.0.7',\n '172.19.0.8',\n ])\n return self.targets\n\n def execute(self, worker):\n if not self.targets:\n self.find_targets(worker)\n execution_targets = random.sample(\n self.targets,\n min(len(self.targets), self.max_targets)\n )\n parameters = {\n 'RHOSTS': ','.join(execution_targets),\n }\n if self._session_id:\n parameters['SESSION'] = self._session_id\n kwargs = {}\n if self.wait:\n kwargs['wait'] = self.wait\n if self.timeout:\n kwargs['timeout'] = self.timeout\n self.technique.execute(worker, parameters, **kwargs)\n\n def verify_goals(self, worker, refresh=False):\n if self.succeeded and not refresh:\n return True\n for target in self.targets:\n if worker.verify_goals(self.goals, target):\n logging.info(f'Goal achieved for target {target}')\n self.succeeded = True\n return True\n self.succeeded = False\n return False\n\n def __str__(self):\n return f'{self.uid} ({self.phase}/{self.name})'\n","sub_path":"src/director/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"387122477","text":"# -*- coding:utf-8 -*-\n\n\n# Given an integer n, count the total number of digit 1 appearing in all non-negative integers less than or equal to n.\r\n#\n# Example:\r\n#\n#\n# Input: 13\r\n# Output: 6 \r\n# Explanation: Digit 1 occurred in the following numbers: 1, 10, 11, 12, 13.\r\n#\n#\n\n\nclass Solution(object):\n def countDigitOne(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n '''\n https://blog.csdn.net/dd523762588/article/details/46816133\n 对于一个数 abcd\n当 d > 1的时候 个位上出现1的个数为 (abc + 1)*1\n当d = 1 的时候 个位上出现1的个数为 abc * 1 + 0 + 1, 0表示 d后面的数。对于11 来说,个位上出现1的数为 11,1\n当d = 0的时候 个位上出现1的个数为 abc * 1\n对于其它数位,相对的1变为10,100,1000..\n\n\n123123~12312\n12312~1231\n1231~123\n123~12\n12~1\n\n1231 23\n---------------------\n\n本文来自 dinglin0xff 的CSDN 博客 ,全文地址请点击:https://blog.csdn.net/dd523762588/article/details/46816133?utm_source=copy \n '''\n \n if n <=0:\n return 0\n \n count = 0\n base = 1\n last = 0\n while n:\n unit = n%10\n n = n//10\n \n if unit ==0:\n count = n * base + count\n elif unit == 1:\n count = n * base + (1 + last) +count # 1是给10的,last是后面的\n else:\n count = (n + 1) * base + count # must exceed 1\n \n last = base * unit + last\n base = base * 10\n return count\n\n \n","sub_path":"233-number-of-digit-one/number-of-digit-one.py","file_name":"number-of-digit-one.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"621987984","text":"import random, sys, time, math, pygame\r\nfrom pygame.locals import *\r\n\r\nFPS = 30 # frames per second to update the screen\r\n#not used yet, should be\r\nWINWIDTH = 640 # width of the program's window, in pixels\r\nWINHEIGHT = 480 # height in pixels\r\n\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\n\r\n\r\npygame.init()\r\n\r\nDISPLAYSURF = pygame.display.set_mode((WINWIDTH, WINHEIGHT))\r\nBASICFONT = pygame.font.Font('freesansbold.ttf', 32)\r\n\r\n\r\n\r\nquit=False\r\n\r\nup=False\r\nleft=False\r\nright=False\r\ndown=False\r\n\r\n\r\nFPSCLOCK = pygame.time.Clock()\r\n\r\nship=pygame.image.load(\"ship.png\")\r\n\r\npx=200\r\npy=200\r\n\r\nwhile quit==False:\r\n #treating events\r\n for event in pygame.event.get(): # event handling loop\r\n if event.type == QUIT:\r\n quit=True\r\n #terminate()\r\n elif event.type == KEYDOWN:\r\n if event.key == (K_UP):\r\n up=True\r\n elif event.key == (K_DOWN):\r\n down=True \r\n elif event.key == (K_LEFT):\r\n left=True\r\n elif event.key == (K_RIGHT):\r\n right=True\r\n elif event.type == KEYUP:\r\n if event.key ==K_LEFT:\r\n left=False\r\n elif event.key == (K_RIGHT):\r\n right=False\r\n elif event.key == (K_UP):\r\n up=False\r\n elif event.key == (K_DOWN):\r\n down=False\r\n elif event.key == K_ESCAPE:\r\n quit=True\r\n #updating model\r\n if up ==True:\r\n py=py-1\r\n \r\n\r\n #display\r\n DISPLAYSURF.fill(WHITE)\r\n DISPLAYSURF.blit(ship,(px,py,64,64))\r\n \r\n\r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\npygame.quit()\r\n\r\n\r\n\r\n\r\n\r\n##def titleScreen(ctx):\r\n## titlepic=pygame.image.load(\"title.png\")\r\n## ctx.DISPLAYSURF.blit(titlepic,\r\n## pygame.Rect( (0 ,\r\n## 0,\r\n## WINWIDTH,\r\n## WINHEIGHT )) \r\n## )\r\n## pygame.display.update()\r\n##\r\n## waitForPlayer=True\r\n## while waitForPlayer :\r\n## for event in pygame.event.get(): # event handling loop\r\n## if event.type == KEYDOWN:\r\n## if event.key == ( K_j):\r\n## waitForPlayer=False \r\n## elif event.type == JOYBUTTONDOWN:\r\n## print(\"Joystick button pressed.\")\r\n## waitForPlayer=False \r\n## elif event.type == pygame.JOYBUTTONUP:\r\n## print(\"Joystick button released.\")\r\n##\r\n##\r\n##def main():\r\n## \r\n## pygame.init()\r\n## # Initialize the joysticks\r\n## pygame.joystick.init()\r\n## # Get count of joysticks\r\n## joystick_count = pygame.joystick.get_count()\r\n## potJoy1=None\r\n## potJoy2=None\r\n##\r\n## print(\"Number of joysticks: \"+str(joystick_count) )\r\n## for i in range(joystick_count):\r\n## joystick = pygame.joystick.Joystick(i)\r\n## joystick.init()\r\n## if i==0:\r\n## potJoy1=joystick\r\n## elif i==1:\r\n## potJoy2=joystick\r\n##\r\n## poller = InputPoller(potJoy1,potJoy2)\r\n## ctx=RdrContext()\r\n### DISPLAYSURF = pygame.display.set_mode((WINWIDTH, WINHEIGHT),pygame.FULLSCREEN)\r\n## ctx.DISPLAYSURF = pygame.display.set_mode((WINWIDTH, WINHEIGHT))\r\n## ctx.BASICFONT = pygame.font.Font('freesansbold.ttf', 32)\r\n## #lvlFolder=\"level2\"\r\n## #lvlFolder=\"level1\"\r\n##\r\n## stageList=[\"level1\",\"level2\"]\r\n## current=-1\r\n## #not to refactor too many things, let's return an outcome for the level\r\n## # VICTORY , or GAME OVER )\r\n## # VICTORY goes to next level\r\n## outcome=\"next\"\r\n### while True:\r\n## while outcome!=\"quit\":\r\n## if outcome ==\"next\":\r\n## #TODO check if current in range, otherwise victory screen\r\n## current+=1\r\n## if current <= ( len(stageList)-1 ): \r\n## outcome=runGame(ctx,poller,stageList[current])\r\n## else:\r\n## print(' last level finished, game won ')\r\n## outcome = \"quit\"\r\n##\r\n##\r\n##\r\n##def runGame(ctx,poller,lvlFolder):\r\n##\r\n## ctx.FPSCLOCK = pygame.time.Clock()\r\n## \r\n## pygame.display.set_icon(pygame.image.load('gameicon.png'))\r\n## pygame.display.set_caption('PAPER POCKY')\r\n##\r\n## # load the image files\r\n## L_POCK_IMG = pygame.image.load('pocky.png')\r\n## R_POCK_IMG = pygame.transform.flip(L_POCK_IMG, True, False)\r\n## L_ROCK_IMG = pygame.image.load('rocky.png')\r\n## R_ROCK_IMG = pygame.transform.flip(L_ROCK_IMG, True, False)\r\n## two_p_folder='kids/'\r\n## one_p_folder='kids/'\r\n### two_p_folder='./'\r\n## one_p_down_img = pygame.image.load(one_p_folder+'1pdown.png')\r\n## two_p_down_img = pygame.image.load(two_p_folder+'2pdown.png')\r\n## one_p_left_img = pygame.image.load(one_p_folder+'1pleft.png')\r\n## two_p_left_img = pygame.image.load(two_p_folder+'2pleft.png')\r\n## one_p_right_img = pygame.image.load(one_p_folder+'1pright.png')\r\n## two_p_right_img = pygame.image.load(two_p_folder+'2pright.png')\r\n## one_p_up_img = pygame.image.load(one_p_folder+'1pup.png')\r\n## two_p_up_img = pygame.image.load(two_p_folder+'2pup.png')\r\n##\r\n## BULLET_IMG= pygame.image.load('bullet.png')\r\n## BADDYBULLET_IMG = pygame.image.load('baddybullet.png')\r\n## RACKET_IMG= pygame.image.load('racket.png')\r\n##\r\n## img_bd_pool={}\r\n##\r\n## #wip refactor\r\n## tstImgName='redknight.png'\r\n## img_bd_pool[tstImgName]=pygame.image.load(tstImgName)\r\n## tstImgName='funkyspider.png'\r\n## img_bd_pool[tstImgName]=pygame.image.load(tstImgName)\r\n## tstImgName='straxus.png'\r\n## img_bd_pool[tstImgName]=pygame.image.load(tstImgName)\r\n## tstImgName='miniskel.png'\r\n## img_bd_pool[tstImgName]=pygame.image.load(tstImgName)\r\n## tstImgName='exit.png'\r\n## img_bd_pool[tstImgName]=pygame.image.load(tstImgName)\r\n## #TST_ENNEMY_IMG=pygame.image.load(tstImgName)\r\n## \r\n##\r\n## #after pygame init, pygame stuff inside\r\n##\r\n##\r\n## bgRect=pygame.Rect(0,0,640,480);\r\n##\r\n##\r\n## # create the surfaces to hold game text\r\n### gameOverSurf = BASICFONT.render('Game Over', True, WHITE)\r\n### gameOverRect = gameOverSurf.get_rect()\r\n### gameOverRect.center = (HALF_WINWIDTH, HALF_WINHEIGHT)\r\n##\r\n##\r\n## #firstLvlFolder=\"level1\"\r\n##\r\n## # stores the player object:\r\n## \r\n## lvl = LvlRun(lvlFolder)\r\n#### lvl.hello()\r\n##\r\n##\r\n## ply1=Player(L_POCK_IMG,R_POCK_IMG,RACKET_IMG,one_p_down_img,one_p_up_img,one_p_left_img,one_p_right_img,LEFT,lvl) \r\n## ply2=Player(L_ROCK_IMG,R_ROCK_IMG,RACKET_IMG,two_p_down_img,two_p_up_img,two_p_left_img,two_p_right_img,LEFT,lvl) \r\n## ply1.set_other_ply(ply2)\r\n## ply2.set_other_ply(ply1)\r\n##\r\n## lvl.players.append(ply1)\r\n## lvl.players.append(ply2)\r\n## \r\n## \r\n## pygame.mixer.music.load(lvlFolder+'/bgmusic.wav')\r\n## pygame.mixer.music.play(-1)\r\n##\r\n##\r\n## titleScreen(ctx)\r\n##\r\n### poller = InputPoller()\r\n##\r\n## \r\n## while True: # main game loop\r\n## # draw the green background\r\n## ctx.DISPLAYSURF.fill(WHITE)\r\n##\r\n## ctx.DISPLAYSURF.blit(lvl.bgDict[ 'x'+str(lvl.xScreen)+'y'+str(lvl.yScreen) ],bgRect)\r\n##\r\n##\r\n## # draw the player squirrel\r\n## #if True :\r\n## ply1.rect = pygame.Rect( (ply1.x ,\r\n## ply1.y - getBounceAmount(ply1.bounce, ply1.BOUNCERATE, ply1.BOUNCEHEIGHT),\r\n## ply1.surface.get_width(),\r\n## ply1.surface.get_height()) )\r\n## ctx.DISPLAYSURF.blit(ply1.surface, ply1.rect)\r\n##\r\n## #TODO blit head\r\n## ply1.rect = pygame.Rect( (ply1.x ,\r\n## ply1.y - getBounceAmount(ply1.bounce, ply1.BOUNCERATE, ply1.BOUNCEHEIGHT)-32,\r\n## ply1.surface.get_width(),\r\n## ply1.surface.get_height()) )\r\n## ctx.DISPLAYSURF.blit(ply1.head_img, ply1.rect)\r\n##\r\n## #display wipe if active\r\n## if ply1.currentlyWiping:\r\n## #get x y then blit\r\n## ply1.wipeRect = pygame.Rect( (ply1.xWipe ,\r\n## ply1.yWipe,\r\n## ply1.wipe_img.get_width(),\r\n## ply1.wipe_img.get_height() ))\r\n## ctx.DISPLAYSURF.blit(ply1.wipe_img, ply1.wipeRect)\r\n##\r\n##\r\n## ply2.rect = pygame.Rect( (ply2.x ,\r\n## ply2.y - getBounceAmount(ply2.bounce, ply2.BOUNCERATE, ply2.BOUNCEHEIGHT),\r\n## ply2.surface.get_width(),\r\n## ply2.surface.get_height()) )\r\n## ctx.DISPLAYSURF.blit(ply2.surface, ply2.rect)\r\n## #TODO blit head\r\n## ply2.rect = pygame.Rect( (ply2.x ,\r\n## ply2.y - getBounceAmount(ply2.bounce, ply2.BOUNCERATE, ply2.BOUNCEHEIGHT)-32,\r\n## ply2.surface.get_width(),\r\n## ply2.surface.get_height()) )\r\n## ctx.DISPLAYSURF.blit(ply2.head_img, ply2.rect)\r\n##\r\n## #draw the bullets of the players\r\n## for bul in ply1.bullets:\r\n## ctx.DISPLAYSURF.blit(BULLET_IMG,pygame.Rect(bul['x'],bul['y'],BULLET_IMG.get_width(),BULLET_IMG.get_height()))\r\n##\r\n## for bul in ply2.bullets:\r\n## ctx.DISPLAYSURF.blit(BULLET_IMG,pygame.Rect(bul['x'],bul['y'],BULLET_IMG.get_width(),BULLET_IMG.get_height()))\r\n##\r\n## try:\r\n###TODO replace with \"current\" maintained list in lvlrun\r\n## tmpScrDat = lvl.screenDataDict['x'+str(lvl.xScreen)+'y'+str(lvl.yScreen)]\r\n## #displaying ennemies\r\n## ennemies=tmpScrDat['ennemies']\r\n## for ennemy in ennemies:\r\n## tmpic=img_bd_pool[ennemy['pic']]\r\n### ctx.DISPLAYSURF.blit(TST_ENNEMY_IMG,pygame.Rect(ennemy['x'],ennemy['y'],TST_ENNEMY_IMG.get_width(),TST_ENNEMY_IMG.get_height()))\r\n## ctx.DISPLAYSURF.blit(tmpic,pygame.Rect(ennemy['x'],ennemy['y'],tmpic.get_width(),tmpic.get_height()))\r\n## except KeyError:\r\n## pass\r\n## for gen in lvl.genericEnnemies:\r\n## ctx.DISPLAYSURF.blit(BADDYBULLET_IMG,pygame.Rect(gen.x,gen.y,BADDYBULLET_IMG.get_width(),BADDYBULLET_IMG.get_height()))\r\n##\r\n## poller.consumeEvents()\r\n##\r\n##\r\n## #ply1\r\n## if poller.p1Left:\r\n## ply1.notif_left()\r\n## else:\r\n## ply1.moveLeft=False\r\n## \r\n## if poller.p1Right:\r\n## ply1.notif_right()\r\n## else:\r\n## ply1.moveRight=False\r\n## \r\n## if poller.p1Down:\r\n## ply1.notif_down()\r\n## else:\r\n## ply1.moveDown=False\r\n## \r\n## if poller.p1Up:\r\n## ply1.notif_up()\r\n## else:\r\n## ply1.moveUp=False\r\n## \r\n## if poller.p1Fire:\r\n## ply1.fire=True\r\n## else:\r\n## ply1.fire=False\r\n##\r\n## if poller.p1Swipe:\r\n## ply1.wipe=True \r\n##\r\n## #ply2\r\n## if poller.p2Left:\r\n## ply2.notif_left()\r\n## else:\r\n## ply2.moveLeft=False\r\n## \r\n## if poller.p2Right:\r\n## ply2.notif_right()\r\n## else:\r\n## ply2.moveRight=False\r\n## \r\n## if poller.p2Down:\r\n## ply2.notif_down()\r\n## else:\r\n## ply2.moveDown=False\r\n## \r\n## if poller.p2Up:\r\n## ply2.notif_up()\r\n## else:\r\n## ply2.moveUp=False\r\n## \r\n## if poller.p2Fire:\r\n## ply2.fire=True\r\n## else:\r\n## ply2.fire=False\r\n##\r\n## if poller.p2Swipe:\r\n## ply2.wipe=True \r\n##\r\n## #next level check\r\n##\r\n##\r\n##\r\n## lvl.update_model()\r\n## if lvl.triggerNextLevel:\r\n## return \"next\"\r\n## if poller.quit:\r\n## terminate()\r\n##\r\n## pygame.display.update()\r\n## ctx.FPSCLOCK.tick(FPS)\r\n##\r\n##\r\n##\r\n##\r\n##\r\n##\r\n##def terminate():\r\n## pygame.quit()\r\n## sys.exit()\r\n##\r\n##\r\n##def getBounceAmount(currentBounce, bounceRate, bounceHeight):\r\n## # Returns the number of pixels to offset based on the bounce.\r\n## # Larger bounceRate means a slower bounce.\r\n## # Larger bounceHeight means a higher bounce.\r\n## # currentBounce will always be less than bounceRate\r\n## return int(math.sin( (math.pi / float(bounceRate)) * currentBounce ) * bounceHeight)\r\n##\r\n##\r\n##\r\n##\r\n##\r\n##\r\n##if __name__ == '__main__':\r\n## main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465251620","text":"\"\"\"Convert hits to an undirected graph.\"\"\"\n\nimport multiprocessing as mp\nimport os\nimport pandas as pd\nfrom itertools import permutations\n\n\ndef load_hit(qspid, sspid):\n df = pd.read_table(f'../../ortho_search/hsps2hits/out/{qspid}/{sspid}.tsv',\n usecols=dtypes.keys(), dtype=dtypes, memory_map=True)\n r = pd.read_table(f'../../ortho_search/hits2reciprocal/out/{qspid}/{sspid}.tsv',\n usecols=['reciprocal'], memory_map=True)\n\n return df[r['reciprocal']]\n\n\ndtypes = {'qppid': 'string', 'sppid': 'string',\n 'bitscore': float}\nnum_processes = 2\n\nif __name__ == '__main__':\n # Load genomes\n spids = []\n with open('../config/genomes.tsv') as file:\n field_names = file.readline().rstrip('\\n').split('\\t')\n for line in file:\n fields = {key: value for key, value in zip(field_names, line.rstrip('\\n').split('\\t'))}\n spids.append(fields['spid'])\n\n # Load data\n with mp.Pool(processes=num_processes) as pool:\n hits = pd.concat(pool.starmap(load_hit, permutations(spids, 2)))\n\n graph = {}\n for row in hits.itertuples():\n qppid, sppid, bitscore = row.qppid, row.sppid, row.bitscore\n try:\n graph[qppid].append((sppid, float(bitscore)))\n except KeyError:\n graph[qppid] = [(sppid, float(bitscore))]\n\n # Write to file\n if not os.path.exists('out/'):\n os.mkdir('out/')\n\n with open('out/hit_graph.tsv', 'w') as file:\n for qppid, edges in graph.items():\n file.write(qppid + '\\t' + ','.join([sppid + ':' + str(bitscore) for sppid, bitscore in edges]) + '\\n')\n\n\"\"\"\nDEPENDENCIES\n../../ortho_search/hsps2hits/hsps2hits.py\n ../../ortho_search/hsps2hits/out/*/*.tsv\n../../ortho_search/hits2reciprocal/hits2reciprocal.py\n ../../ortho_search/hits2reciprocal/out/*/*.tsv\n../config/genomes.tsv\n\"\"\"","sub_path":"analysis/ortho_tree/hits2graph/hits2graph.py","file_name":"hits2graph.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78258114","text":"def common():\n list1 = [1, 342, 75, 23, 98]\n list2 = [75, 23, 98, 12, 78, 10, 1]\n i=0\n new=[]\n while i640 or bullet[2]<-64 or bullet[2]>480:\r\n arrows.pop(index)\r\n index+=1\r\n for projectile in arrows:\r\n arrow1 = pygame.transform.rotate(arrow,360-projectile[0]*57.29)\r\n screen.blit(arrow1,(projectile[1],projectile[2]))\r\n #badguys\r\n if badtimer ==0:\r\n badguys.append([640,random.randint(50,430)])\r\n badtimer=100-badtimer1\r\n if badtimer1>=35:\r\n badtimer1=35\r\n else:\r\n badtimer1+=5\r\n index=0\r\n for badguy in badguys:\r\n if badguy[0]<-64:\r\n badguys.pop(index)\r\n badguy[0]-=7\r\n badrect= pygame.Rect(badguyimg.get_rect())\r\n badrect.top = badguy[1]\r\n badrect.left= badguy[0]\r\n if badrect.left <64:\r\n heathvalue -= random.randint(5,20)\r\n badguys.pop(index)\r\n index1=0\r\n #collision\r\n for bullet in arrows:\r\n bullrect=pygame.Rect(arrow.get_rect())\r\n bullrect.left=bullet[1]\r\n bullrect.top=bullet[2]\r\n if badrect.colliderect(bullrect):\r\n acc[0]+=1\r\n badguys.pop(index)\r\n arrows.pop(index1)\r\n index1+=1\r\n index+=1\r\n for badguy in badguys:\r\n screen.blit(badguyimg,badguy)\r\n screen.blit(demon,badguy)\r\n\r\n #HUD\r\n font = pygame.font.Font(None,24)\r\n survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+\":\"\r\n + str((90000-pygame.time.get_ticks())/1000%60).zfill(2),True,(0,0,0))\r\n textRect = survivedtext.get_rect()\r\n textRect.topright = [635,5]\r\n screen.blit(survivedtext, textRect)\r\n screen.blit(healthbar,(5,5))\r\n for health1 in range(heathvalue):\r\n screen.blit(health,(health1+8,8))\r\n pygame.display.flip()\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n exit(0)\r\n if event.type ==pygame.KEYDOWN:\r\n if event.key == K_w:\r\n keys[0] = True\r\n elif event.key == K_a:\r\n keys[1] = True\r\n elif event.key ==K_s:\r\n keys[2] = True\r\n elif event.key == K_d:\r\n keys[3] = True\r\n if event.type ==pygame.KEYUP:\r\n if event.key == K_w:\r\n keys[0] = False\r\n elif event.key == K_a:\r\n keys[1] = False\r\n elif event.key ==K_s:\r\n keys[2] = False\r\n elif event.key == K_d:\r\n keys[3] = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n postion = pygame.mouse.get_pos()\r\n acc[1]+=1\r\n arrows.append([math.atan2(postion[1]-(playerpos1[1]+32),postion[0]-(playerpos1[0]+26)),\r\n playerpos1[0]+32,playerpos1[1]+32])\r\n if keys[0]:\r\n playerpos[1]-=5\r\n elif keys[2]:\r\n playerpos[1]+=5\r\n if keys[1]:\r\n playerpos[0]-=5\r\n elif keys[3]:\r\n playerpos[0]+=5\r\n if pygame.time.get_ticks()>=90000:\r\n running = 0\r\n exitcode = 1\r\n if heathvalue <= 0:\r\n running = 0\r\n exitcode = 0\r\n if acc[1]!=0:\r\n accuracy = acc[0]*1.0/acc[1]*1000\r\n else:\r\n accuracy = 0\r\nif exitcode==0:\r\n pygame.font.init()\r\n font = pygame.font.Font(None, 24)\r\n text = font.render(\"Accuracy: \"+str(accuracy)+\"%\", True, (255,0,0))\r\n textRect = text.get_rect()\r\n textRect.centerx = screen.get_rect().centerx\r\n textRect.centery = screen.get_rect().centery+24\r\n screen.blit(gameover, (0,0))\r\n screen.blit(text, textRect)\r\nelse:\r\n pygame.font.init()\r\n font = pygame.font.Font(None, 24)\r\n text = font.render(\"Accuracy: \"+str(accuracy)+\"%\", True, (0,255,0))\r\n textRect = text.get_rect()\r\n textRect.centerx = screen.get_rect().centerx\r\n textRect.centery = screen.get_rect().centery+24\r\n screen.blit(youwin, (0,0))\r\n screen.blit(text, textRect)\r\nwhile 1:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit(0)\r\n pygame.display.flip()\r\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"559004399","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\n\nclass ScrapycrawlerPipeline(object):\n def process_item(self, item, spider):\n # print(\"*****\")\n # print(item)\n # print(\"*****\")\n return item\n\n# class QsbkPipeline(object):\n# def __init__(self):\n# self.fp = open(\"qsbk.json\", \"w\", encoding=\"utf-8\")\n#\n# def open_spider(self,spider):\n# print(\"爬虫开启了\")\n#\n# def process_item(self, item, spider):\n# # print(type(item))\n# # item_json = json.dumps(item,ensure_ascii=False)\n# item_json = json.dumps(dict(item),ensure_ascii=False) # 需要把item格式数据转换成dict字典格式数据\n# self.fp.write(item_json + \"\\n\")\n# print(\"数据写入完成...\")\n# return item\n#\n# def close_spider(self,spider):\n# self.fp.close()\n# print(\"爬虫关闭了\")\n\nfrom scrapy.exporters import JsonItemExporter\n# class QsbkPipeline(object):\n# def __init__(self):\n# self.fp = open(\"qsbk.json\", \"wb\")\n# self.exporters = JsonItemExporter(self.fp,ensure_ascii=False,encoding=\"utf-8\")\n# self.exporters.start_exporting()\n#\n# def open_spider(self,spider):\n# print(\"爬虫开启了\")\n#\n# def process_item(self, item, spider):\n# self.exporters.export_item(item)\n# print(\"数据写入完成...\")\n# return item\n#\n# def close_spider(self,spider):\n# self.exporters.finish_exporting()\n# self.fp.close()\n# print(\"爬虫关闭了\")\n\nfrom scrapy.exporters import JsonLinesItemExporter\nclass QsbkPipeline(object):\n def __init__(self):\n self.fp = open(\"qsbk.json\", \"wb\")\n self.exporters = JsonLinesItemExporter(self.fp,ensure_ascii=False,encoding=\"utf-8\")\n\n def open_spider(self,spider):\n print(\"爬虫开启了\")\n\n def process_item(self, item, spider):\n # print(type(item))\n self.exporters.export_item(item)\n print(\"数据写入完成...\")\n return item\n\n def close_spider(self,spider):\n self.fp.close()\n print(\"爬虫关闭了\")\n\nfrom scrapy.exporters import JsonLinesItemExporter\nclass WxappPipeline(object):\n def __init__(self):\n self.fp = open(\"wxapp.json\", \"wb\")\n self.exporters = JsonLinesItemExporter(self.fp,ensure_ascii=False,encoding=\"utf-8\")\n\n def process_item(self, item, spider):\n # print(type(item))\n self.exporters.export_item(item)\n print(\"数据写入完成...\")\n return item # 返回item,是为了给其他pipeline使用\n\n def close_spider(self,spider):\n self.fp.close()\n print(\"爬虫关闭了\")\n\n\nimport os\nfrom urllib import request\nclass Bmw1Pipeline(object):\n def __init__(self):\n # 图片存放总目录: 即:E:\\workspace_PyCharm\\WebCrawler\\ScrapyCrawler\\ScrapyCrawler\\images\n self.path = os.path.join(os.path.dirname(__file__),\"images\")\n # 判断文件夹是否存在,不存在就创建\n if not os.path.exists(self.path):\n os.mkdir(self.path)\n\n def process_item(self, item, spider):\n title = item['title']\n urls = item['urls']\n title_path = os.path.join(self.path,title)\n # 判断图片总目录下是否存在图片类别的目录,不存在就创建\n if not os.path.exists(title_path):\n os.mkdir(title_path)\n # 根据分类目录下载图片\n for url in urls: # ��处图片是一个一个的下载\n # 把图片下载地址分割,取最后面的作为图片名称\n imagename = url.split('__')[-1]\n # print(imagename)\n # 下载图片并保存到相应的目录下\n request.urlretrieve(url,os.path.join(title_path,imagename))\n return item\n\n\n# 自定义图片根据分类存储缩略图\nfrom scrapy.pipelines.images import ImagesPipeline\nclass Bmw2Pipeline(ImagesPipeline):\n\n def get_media_requests(self, item, info):\n \"\"\"这个方法是在发送下载请求之前调用,也是发送下载请求的\"\"\"\n request_objs = super(Bmw2Pipeline, self).get_media_requests(item,info)\n for request_obj in request_objs:\n request_obj.item = item\n return request_objs\n\n def file_path(self, request, response=None, info=None):\n \"\"\"这个方法是图片将要被存储的时候调用,用来获取这个图片存储的路径\"\"\"\n path = super(Bmw2Pipeline,self).file_path(request,response,info)\n title = request.item.get(\"title\")\n from . import settings # from ScrapyCrawler import settings\n images_store = settings.IMAGES_STORE # 从setting.py文件获取图片存储位置路径信息\n title_path = os.path.join(images_store,title) # 拼接图片分类目录\n # 判断图片分类的目录是否存在,不存在就创建\n if not title_path:\n os.mkdir(title_path)\n image_name = path.replace(\"full/\",\"\")\n image_path = os.path.join(title_path,image_name)\n return image_path\n\n# 自定义图片根据分类存储高清图片\nfrom scrapy.pipelines.images import ImagesPipeline\nclass Bmw3Pipeline(ImagesPipeline):\n\n def get_media_requests(self, item, info):\n \"\"\"这个方法是在发送下载请求之前调用,也是发送下载请求的\"\"\"\n request_objs = super(Bmw3Pipeline, self).get_media_requests(item,info)\n for request_obj in request_objs:\n request_obj.item = item\n return request_objs\n\n def file_path(self, request, response=None, info=None):\n \"\"\"这个方法是图片将要被存储的时候调用,用来获取这个图片存储的路径\"\"\"\n path = super(Bmw3Pipeline,self).file_path(request,response,info)\n category = request.item.get(\"category\")\n from . import settings # from ScrapyCrawler import settings\n images_store = settings.IMAGES_STORE # 从setting.py文件获取图片存储位置路径信息\n category_path = os.path.join(images_store,category) # 拼接图片分类目录\n # 判断图片分类的目录是否存在,不存在就创建\n if not category_path:\n os.mkdir(category_path)\n image_name = path.replace(\"full/\",\"\")\n image_path = os.path.join(category_path,image_name)\n return image_path\n\n","sub_path":"04.ScrapyDoc/ScrapyCrawler/ScrapyCrawler/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":6445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"112704610","text":"import numpy as np\n\n\ndef HMMViterbi(a, b, o, pi):\n # Implements HMM Viterbi algorithm\n # a是状态转移 b是 发射 O是状态 pi是初始\n N = np.shape(b)[0]\n T = np.shape(o)[0]\n\n path = np.zeros(T)\n delta = np.zeros((N, T))\n phi = np.zeros((N, T))\n\n phi[:, 0] = np.zeros(N)\n delta[:, 0] = pi * b[:, o[0]]\n for i in range(1, T):\n delta[:, i] = np.max(((delta[:, i - 1] * a.T).T * b[:, o[i]]).T, 1)\n phi[:, i] = np.argmax(((delta[:, i - 1] * a.T).T * b[:, o[i]]).T, 1)\n\n path[T - 1] = np.argmax(delta[:, T - 1], 0)\n t = path[T - 1]\n for i in range(1, T):\n path[T - i - 1] = phi[int(t), T - i]\n return path\n","sub_path":"lab_7/viterbi3.py","file_name":"viterbi3.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"543620618","text":"class Solution(object):\n def numTilings(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n dp = [0 for _ in range(max(N+1, 4))]\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n dp[3] = 5\n for i in range(4, N+1):\n dp[i] += dp[i-1] + dp[i-2]\n for j in range(i-2):\n dp[i] += 2*dp[j]\n return dp[N] % (10 ** 9 + 7)\n","sub_path":"normal/790_domino_and_tromino_tiling.py","file_name":"790_domino_and_tromino_tiling.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"263459836","text":"#!/usr/bin/env kpython3\n\n# inherent python libraries\nfrom time import sleep\nfrom configparser import ConfigParser\nimport os, sys\n\n# installs\nimport numpy as np\nimport astropy.io.fits as fits\nimport scipy.interpolate as sinterp\n\n# nfiuserver libraries\nimport ktl\nfrom KPIC_shmlib import Shm\nfrom Star_Tracker_cmds import Tracking_cmds\n\n\"\"\"\nThis script distorts astrophysical sep/pa for the CRED2 tracking camera\n\"\"\"\n\n# instantiate star_tracker_cmds to get goal\ntracking = Tracking_cmds()\n\n# if tracking script isn't alive, this script shouldn't be alive\n\n# get RELDIR location which has all kroot made files\nRELDIR = os.environ.get(\"RELDIR\")\nif RELDIR[-1] == \"/\": RELDIR = RELDIR[:-1]\n\nconfig = ConfigParser()\nconfig.read(RELDIR+\"/data/Star_Tracker.ini\")\n\nsep = config.get(\"Shm Info\", \"SEP\").split(\",\")\npa = config.get(\"Shm Info\", \"PA\").split(\",\")\ndist_sol = config.get(\"Shm Info\", \"Dist_sol\").split(\",\")\n\nif not os.path.isdir(\"/tmp/Star_Tracker\"):\n os.mkdir(\"/tmp/Star_Tracker\")\n\ntype_ = {\"int8\":np.int8, \"int16\":np.int16, \"int32\":np.int32, \"int64\":np.int64,\n \"uint8\":np.uint8, \"uint16\":np.uint16, \"uint32\":np.uint32,\n \"uint64\":np.uint64, \"intp\":np.intp, \"uintp\":np.uintp, \"float16\":np.float16,\n \"float32\":np.float32, \"float64\":np.float64, \"complex64\":np.complex64,\n \"complex128\":np.complex128, \"U\":np.string_}\n\n# open the sep and pa shms\nsep = Shm(sep[0], np.array([0,0], type_[sep[1]]),\n mmap = sep[2] == \"1\")\npa = Shm(pa[0], np.array([0,0], type_[pa[1]]),\n mmap = pa[2] == \"1\")\n# open dist_sol shm\ndist_sol = Shm(dist_sol[0], data=\"/nfiudata/sol/distortion_solution.fits\",\n mmap = dist_sol[2] == \"1\")\n\n# TODO:\n# subscribe to ktl service in charge of rotator\n# monitor the rotator keyword so we always have the most recent value\n\n# subscribe to ktl services we need\nserv = ktl.Service(\"dcs2\", populate=True)\nserv[\"rotposn\"].monitor()\nserv[\"instangl\"].monitor()\n\norig_coords = None\nundistor_coords = None\nplatescale = None\nnorthangle = None\ndistort_x = None\ndistort_y = None\n\ndef read_dist():\n \"\"\"A function to read the distortion solution at the file location\n in the shared memory\"\"\"\n\n fname = dist_sol.get_data(reform=True)\n\n if not os.path.isfile(fname):\n raise FileNotFoundError(\"Distortion solution file not found\")\n\n global orig_coords, undistor_coords, platescale, northangle, distort_x, distort_y\n\n with fits.open(fname) as hdulist:\n orig_coords = hdulist[0].data[0]\n undistor_coords = hdulist[0].data[1]\n platescale = hdulist[0].header['PS']\n northangle = hdulist[0].header['TN']\n\n # create an interpolant to be used for solution\n distort_x = sinterp.LinearNDInterpolator(undistor_coords.T, orig_coords[0])\n distort_y = sinterp.LinearNDInterpolator(undistor_coords.T, orig_coords[1])\n\n# try to start with distortion solution\ntry: read_dist()\n# ignore if we don't have a distortion solution since this is default\nexcept: pass\n\n# create a fake \"old\" variable so that the first time through the loop, values\n# get updated.\nold = [-5000, -5000, -5000, -5000, -5000]\n\n#### Begin conversions #####\nwhile os.getppid() != 1:\n try:\n # check for new dist_sol file\n if dist_sol.mtdata[\"cnt0\"] != dist_sol.get_counter():\n read_dist()\n\n # The location of the science fiber, which we are assuming is where we want\n # the companion to be\n # NOTE: for now, x and y goal are inverted.\n _, _, _, (comp_x, comp_y) = tracking.get_goal()\n\n #### Astrometry of the companion\n # location of the rotator\n try:\n rot_posang = float(serv[\"rotposn\"]) - float(serv[\"instangl\"])\n # If there's not keyword available, wait and restart loop\n except ValueError:\n sleep(20)\n continue\n # undistorted sep\n comp_sep = sep.get_data()[0]\n # undistorted pa\n comp_pa = pa.get_data()[0]\n\n comp_pa += rot_posang\n\n # check to see if any values have changed\n new = [comp_x, comp_y, comp_sep, comp_pa, rot_posang]\n if new == old:\n # if there are no new values, we don't have to recalculate anything.\n # store old values so that update time gets changed\n sep.set_data(sep.get_data())\n pa.set_data(pa.get_data())\n # rest\n sleep(30)\n # restart loop\n continue\n \n # convert to pixels and add PA offset to CRED2\n comp_r_undist = comp_sep / platescale # pix\n comp_pa_off = comp_pa - northangle\n \n # convert to offset of star from companion (which is at the location of the\n # fiber) in undistorted detector frame\n # NOTE: that the CRED2 has x = -RA, so we need to multiply by negative 1\n star_x_undist = comp_x + -(comp_r_undist * np.sin(np.radians(comp_pa_off)))\n star_y_undist = comp_y -(comp_r_undist * np.cos(np.radians(comp_pa_off)))\n \n # distort the star x/y to get the x/y on the detector it should be at\n star_x = distort_x((star_x_undist, star_y_undist))\n star_y = distort_y((star_x_undist, star_y_undist))\n \n # convert to comp separation and PA, but after distortion\n comp_sep_distor = np.sqrt((star_x - comp_x)**2 + (star_y - comp_y)**2)\n comp_pa_distor = np.degrees(np.arctan2((comp_x - star_x),\\\n (comp_y - star_y))) + northangle\n comp_sep_distor *= platescale\n comp_pa_distor %= 360\n \n comp_pa_distor -= rot_posang\n \n # store values in shared memory\n if sep.get_data()[0] != -1:\n _ = sep.get_data()\n _[1] = comp_sep_distor\n sep.set_data(_)\n \n if pa.get_data()[0] != -1:\n _ = pa.get_data()\n _[1] = comp_pa_distor\n pa.set_data(_)\n\n # store the values to calculate these values\n old = new\n\n # constant updates aren't necessary, so sleep to avoid hogging cpu\n sleep(20)\n # if there were any errors\n except Exception as e:\n print(e)\n # wait 1 second\n sleep(1)\n # then try again\n continue","sub_path":"nsfiu/tracking/distort.py","file_name":"distort.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"324293189","text":"# coling18-multimodalSurvey\n# code to reproduce results reported in paper\n\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\nfrom scipy.stats import spearmanr\nimport matplotlib.pyplot as plt\n\n# adapt your paths here, to the data and embeddings you downloaded\nstr_path_simVerb = \"SimVerb-3500.txt\"\nstr_path_embodimentVerb = \"kwan_et_al_verb_embodiment_ratings_VERB.txt\"\nstr_path_embodimentScoreMean = \"kwan_et_al_verb_embodiment_ratings_SCOREMEAN.txt\"\n\n# textual: glove.6B.300d.w2vt\nstr_path_embeddings_textual = \"glove.6B.300d.w2vt\"\n\n# visualGoogle: out_google-googlenet.w2vt\nstr_path_embeddings_visualGoogle = \"out_google-googlenet.w2vt\"\n\n# imaginedGoogle: weights_imagined_vanilia_glove_googlenet.w2vt\nstr_path_embeddings_imaginedGoogle = \"weights_imagined_vanilia_glove_googlenet.w2vt\"\n\n# visualImSitu: imSitu_verbs_averagedEmbeddings.w2vt\nstr_path_embeddings_visualImSitu = \"imSitu_verbs_averagedEmbeddings.w2vt\"\n\n# imaginedImSitu: weights_imagined_vanilia_glove_imSitu.w2vt\nstr_path_embeddings_imaginedImSitu = \"weights_imagined_vanilia_glove_imSitu.w2vt\"\n\nstr_path_out = \"coling18-multimodalSurvey_experiments_out\"\nstr_path_embeddings_visual_textual_concat = str_path_out\n\ndef load_word2VecEmbedding(path):\n word2VecEmbeddings = KeyedVectors.load_word2vec_format(path, binary=False, unicode_errors='ignore')\n return word2VecEmbeddings\n\ndef word_to_vec(word2VecEmbeddings, word):\n return word2VecEmbeddings.word_vec(word) \n\ndef contains_word(word2VecEmbeddings, word):\n return word in word2VecEmbeddings.vocab \n\nembeddings_textual = load_word2VecEmbedding(str_path_embeddings_textual)\nprint(\"loaded embeddings_textual\")\nembeddings_visualGoogle = load_word2VecEmbedding(str_path_embeddings_visualGoogle)\nprint(\"loaded embeddings_visualGoogle\")\nembeddings_imaginedGoogle = load_word2VecEmbedding(str_path_embeddings_imaginedGoogle)\nprint(\"loaded embeddings_imaginedGoogle\")\nembeddings_visualImSitu = load_word2VecEmbedding(str_path_embeddings_visualImSitu)\nprint(\"loaded embeddings_visualImSitu\")\nembeddings_imaginedImSitu = load_word2VecEmbedding(str_path_embeddings_imaginedImSitu)\nprint(\"loaded embeddings_imaginedImSitu\")\n\ndef concat_vis_text_embs(embs_vis, embs_text, str_path_embeddings_vis_text_concat_filename):\n embs_vis.init_sims(replace=True) # Precompute L2-normalized vectors. \n embs_text.init_sims(replace=True) # Precompute L2-normalized vectors. \n embeddings_visual_textual_concat_prep = {}\n for w in embs_vis.index2word:\n vec_vis = word_to_vec(embs_vis, w)\n if contains_word(embs_text, w):\n vec_text = word_to_vec(embs_text, w)\n vec_vis_text = np.concatenate((vec_vis, vec_text))\n embeddings_visual_textual_concat_prep[w] = vec_vis_text\n with open(str_path_embeddings_vis_text_concat_filename, \"w\") as text_file_out:\n num = len(embeddings_visual_textual_concat_prep.keys())\n dim = len(embeddings_visual_textual_concat_prep[list(embeddings_visual_textual_concat_prep.keys())[0]])\n text_file_out.write(str(num) + \" \" + str(dim) + \"\\n\")\n for key,val in embeddings_visual_textual_concat_prep.items(): \n text_file_out.write(str(key))\n for v in val:\n text_file_out.write(\" \" + str(v))\n text_file_out.write(\"\\n\")\n embeddings_vis_text_concat = load_word2VecEmbedding(str_path_embeddings_vis_text_concat_filename)\n return embeddings_vis_text_concat\n\nembs_text = embeddings_textual\n\nembs_vis = embeddings_visualGoogle\nstr_filename = \"embeddings_visualGoogle_textual_concat\"\nstr_path_embeddings_vis_text_concat_filename = str_path_embeddings_visual_textual_concat + \"/\" + str_filename + \".w2vt\"\nembeddings_visualGoogle_textual_concat = concat_vis_text_embs(embs_vis, embs_text, str_path_embeddings_vis_text_concat_filename)\nprint(\"num of words in original visual space: {}\".format(len(embs_vis.vocab)))\nprint(\"num of words in space of concatenation: {}\".format(len(embeddings_visualGoogle_textual_concat.vocab)))\nprint(\"len of embs in space of concatenation: {}\".format(len(embeddings_visualGoogle_textual_concat.word_vec(embeddings_visualGoogle_textual_concat.index2word[0]))))\n\nembs_vis = embeddings_visualImSitu\nstr_filename = \"embeddings_visualImSitu_textual_concat\"\nstr_path_embeddings_vis_text_concat_filename = str_path_embeddings_visual_textual_concat + \"/\" + str_filename + \".w2vt\"\nembeddings_visualImSitu_textual_concat = concat_vis_text_embs(embs_vis, embs_text, str_path_embeddings_vis_text_concat_filename)\nprint(\"num of words in original visual space: {}\".format(len(embs_vis.vocab)))\nprint(\"num of words in space of concatenation: {}\".format(len(embeddings_visualImSitu_textual_concat.vocab)))\nprint(\"len of embs in space of concatenation: {}\".format(len(embeddings_visualImSitu_textual_concat.word_vec(embeddings_visualImSitu_textual_concat.index2word[0]))))\n\ndef fuse_vis_text_embs(embeddings_vis_text_concat, str_path_embeddings_vis_text_concat_filename):\n num_w = len(embeddings_vis_text_concat.vocab)\n len_w = len(embeddings_vis_text_concat.word_vec(embeddings_vis_text_concat.index2word[0]))\n matrix_embeddings_vis_text_concat = np.ones((num_w, len_w))\n print(np.shape(matrix_embeddings_vis_text_concat))\n counter_i = 0\n for word in embeddings_vis_text_concat.index2word:\n matrix_embeddings_vis_text_concat[counter_i, :] = word_to_vec(embeddings_vis_text_concat, word)\n counter_i += 1\n print(np.shape(matrix_embeddings_vis_text_concat))\n U, s, V = np.linalg.svd(matrix_embeddings_vis_text_concat, full_matrices=False)\n print(np.shape(U))\n embeddings_visual_textual_concat_svd_prep = {}\n counter_j = 0\n for w in embeddings_vis_text_concat.index2word:\n vec_vis_text_svd = U[counter_j, 0:300]\n embeddings_visual_textual_concat_svd_prep[w] = vec_vis_text_svd\n counter_j += 1\n with open(str_path_embeddings_vis_text_concat_filename, \"w\") as text_file_out:\n num = len(embeddings_visual_textual_concat_svd_prep.keys())\n dim = len(embeddings_visual_textual_concat_svd_prep[list(embeddings_visual_textual_concat_svd_prep.keys())[0]])\n text_file_out.write(str(num) + \" \" + str(dim) + \"\\n\")\n for key,val in embeddings_visual_textual_concat_svd_prep.items(): \n text_file_out.write(str(key))\n for v in val:\n text_file_out.write(\" \" + str(v))\n text_file_out.write(\"\\n\")\n embeddings_vis_text_concat_svd = load_word2VecEmbedding(str_path_embeddings_vis_text_concat_filename)\n return embeddings_vis_text_concat_svd\n\nembeddings_vis_text_concat = embeddings_visualGoogle_textual_concat\nstr_filename = \"embeddings_visualGoogle_textual_concat_svd\"\nstr_path_embeddings_vis_text_concat_filename = str_path_embeddings_visual_textual_concat + \"/\" + str_filename + \".w2vt\"\nembeddings_visualGoogle_textual_concat_svd = fuse_vis_text_embs(embeddings_vis_text_concat, str_path_embeddings_vis_text_concat_filename)\nprint(\"num of words in original visual space: {}\".format(len(embeddings_visualGoogle.vocab)))\nprint(\"num of words in space of concatenation: {}\".format(len(embeddings_visualGoogle_textual_concat_svd.vocab)))\nprint(\"len of embs in space of concatenation: {}\".format(len(embeddings_visualGoogle_textual_concat_svd.word_vec(embeddings_visualGoogle_textual_concat_svd.index2word[0]))))\n\nembeddings_vis_text_concat = embeddings_visualImSitu_textual_concat\nstr_filename = \"embeddings_visualImSitu_textual_concat_svd\"\nstr_path_embeddings_vis_text_concat_filename = str_path_embeddings_visual_textual_concat + \"/\" + str_filename + \".w2vt\"\nembeddings_visualImSitu_textual_concat_svd = fuse_vis_text_embs(embeddings_vis_text_concat, str_path_embeddings_vis_text_concat_filename)\nprint(\"num of words in original visual space: {}\".format(len(embeddings_visualImSitu.vocab)))\nprint(\"num of words in space of concatenation: {}\".format(len(embeddings_visualImSitu_textual_concat_svd.vocab)))\nprint(\"len of embs in space of concatenation: {}\".format(len(embeddings_visualImSitu_textual_concat_svd.word_vec(embeddings_visualImSitu_textual_concat_svd.index2word[0]))))\n\nprint(\"reading: {} and {}\".format(str_path_embodimentVerb, str_path_embodimentScoreMean))\ndict_embodimentVerb_ScoreMean = {}\nlist_embodimentScoreMean = []\nwith open(str_path_embodimentVerb, \"r\") as file_embodimentVerb, open(str_path_embodimentScoreMean, \"r\") as file_embodimentScoreMean:\n for line_embodimentVerb, line_embodimentScoreMean in zip(file_embodimentVerb, file_embodimentScoreMean):\n dict_embodimentVerb_ScoreMean[line_embodimentVerb[:-1]] = line_embodimentScoreMean[:-1]\n list_embodimentScoreMean.append(float(line_embodimentScoreMean[:-1]))\nprint(len(dict_embodimentVerb_ScoreMean))\nprint(\"dict_embodimentVerb_ScoreMean['absorb']: {}\".format(dict_embodimentVerb_ScoreMean['absorb'])) \nprint(len(list_embodimentScoreMean))\n\nembodimentScoreMean_firstQuartile = np.percentile(np.array(list_embodimentScoreMean), 25)\nembodimentScoreMean_fourthQuartile = np.percentile(np.array(list_embodimentScoreMean), 75)\nprint(\"embodimentScoreMean_firstQuartile: {}\".format(embodimentScoreMean_firstQuartile))\nprint(\"embodimentScoreMean_fourthQuartile: {}\".format(embodimentScoreMean_fourthQuartile))\n\nprint(\"reading: {}\".format(str_path_simVerb))\nsimVerb_data_all = []\nwith open(str_path_simVerb, \"r\") as in_text_file:\n for line in in_text_file:\n l = line.split(\"\\t\")\n word1 = l[0]\n word2 = l[1]\n POS = l[2]\n score = l[3]\n relation = l[4][0:-1]\n l_tuple = (word1, word2, POS, score, relation)\n simVerb_data_all.append(l_tuple)\nprint(len(simVerb_data_all))\nprint(len(simVerb_data_all[0]))\nprint(simVerb_data_all[0])\nprint(simVerb_data_all[0:2])\n\nsimVerb_data_embodimentScores = []\nsimVerb_data_embodimentScores_bothScored = []\nsimVerb_data_embodimentScores_oneScored = []\nsimVerb_data_embodimentScores_notScored = []\nsimVerb_data_embodimentScores_bothScored_firstQuartile = [] # both low embodied\nsimVerb_data_embodimentScores_bothScored_fourthQuartile = [] # both highly embodied\nsimVerb_data_embodimentScores_bothScored_mixed = [] # anything else\n\nprint(\"embodimentScoreMean_firstQuartile: {}\".format(embodimentScoreMean_firstQuartile))\nprint(\"embodimentScoreMean_fourthQuartile: {}\".format(embodimentScoreMean_fourthQuartile))\nquart_1 = embodimentScoreMean_firstQuartile\nquart_4 = embodimentScoreMean_fourthQuartile\n\nfor l_tuple in simVerb_data_all:\n (word1, word2, POS, score, relation) = l_tuple\n \n if word1 in dict_embodimentVerb_ScoreMean.keys():\n word1_embodimentScore = float(dict_embodimentVerb_ScoreMean[word1])\n else: # -1 if no score available\n word1_embodimentScore = -1\n \n if word2 in dict_embodimentVerb_ScoreMean.keys():\n word2_embodimentScore = float(dict_embodimentVerb_ScoreMean[word2])\n else: # -1 if no score available\n word2_embodimentScore = -1\n \n l_tuple_embodiment = (word1, word2, POS, score, relation, word1_embodimentScore, word2_embodimentScore)\n simVerb_data_embodimentScores.append(l_tuple_embodiment)\n \n if word1_embodimentScore == -1 and word2_embodimentScore == -1: # notEmbodied\n simVerb_data_embodimentScores_notScored.append(l_tuple_embodiment)\n elif word1_embodimentScore == -1 or word2_embodimentScore == -1: # oneEmbodied\n simVerb_data_embodimentScores_oneScored.append(l_tuple_embodiment)\n else: # bothEmbodied\n simVerb_data_embodimentScores_bothScored.append(l_tuple_embodiment)\n \n if word1_embodimentScore <= quart_1 and word2_embodimentScore <= quart_1: # both low embodied\n simVerb_data_embodimentScores_bothScored_firstQuartile.append(l_tuple_embodiment)\n elif word1_embodimentScore >= quart_4 and word2_embodimentScore >= quart_4: # both highly embodied\n simVerb_data_embodimentScores_bothScored_fourthQuartile.append(l_tuple_embodiment)\n else: # anything else\n simVerb_data_embodimentScores_bothScored_mixed.append(l_tuple_embodiment)\n\nprint(\"{} simVerb_data_embodimentScores\".format(len(simVerb_data_embodimentScores)))\nprint(simVerb_data_embodimentScores[0:2])\n\nprint(\"---------------------\")\nprint(\"{} simVerb_data_embodimentScores_bothScored\".format(len(simVerb_data_embodimentScores_bothScored)))\nprint(simVerb_data_embodimentScores_bothScored[0:2])\n\nprint(\"{} simVerb_data_embodimentScores_oneScored\".format(len(simVerb_data_embodimentScores_oneScored)))\nprint(simVerb_data_embodimentScores_oneScored[0:2])\n\nprint(\"{} simVerb_data_embodimentScores_notScored\".format(len(simVerb_data_embodimentScores_notScored)))\nprint(simVerb_data_embodimentScores_notScored[0:2])\n\nprint(\"---------------------\")\nprint(\"{} simVerb_data_embodimentScores_bothScored_firstQuartile\".format(len(simVerb_data_embodimentScores_bothScored_firstQuartile)))\nprint(simVerb_data_embodimentScores_bothScored_firstQuartile[0:2])\n\nprint(\"{} simVerb_data_embodimentScores_bothScored_fourthQuartile\".format(len(simVerb_data_embodimentScores_bothScored_fourthQuartile)))\nprint(simVerb_data_embodimentScores_bothScored_fourthQuartile[0:2])\n\nprint(\"{} simVerb_data_embodimentScores_bothScored_mixed\".format(len(simVerb_data_embodimentScores_bothScored_mixed)))\nprint(simVerb_data_embodimentScores_bothScored_mixed[0:2])\n\ndef get_simVerbScore_and_cosSim(simVerb_data, word2VecEmbeddings):\n list_simVerbScore = []\n list_cosSim = []\n simVerb_data_extended = []\n counter_wordPair_Embedding = 0\n counter_wordPair_noEmbedding = 0\n for l_tuple in simVerb_data:\n word1 = l_tuple[0]\n word2 = l_tuple[1]\n if contains_word(word2VecEmbeddings, word1) and contains_word(word2VecEmbeddings, word2):\n word1_emb = word_to_vec(word2VecEmbeddings, word1)\n word2_emb = word_to_vec(word2VecEmbeddings, word2)\n sim = cosine_similarity(word1_emb.reshape(1, -1), word2_emb.reshape(1, -1))[0][0]\n list_cosSim.append(sim)\n score = l_tuple[3]\n list_simVerbScore.append(float(score))\n counter_wordPair_Embedding += 1\n simVerb_data_extended.append(l_tuple + (sim,))\n else:\n counter_wordPair_noEmbedding += 1\n print(\"counter_wordPair_noEmbedding: {} / counter_wordPair_Embedding: {}\".format(counter_wordPair_noEmbedding, counter_wordPair_Embedding))\n return list_simVerbScore, list_cosSim, simVerb_data_extended, counter_wordPair_noEmbedding, counter_wordPair_Embedding\n\n_, _, simVerb_data_extended_all_visualGoogle, _, _ = get_simVerbScore_and_cosSim(simVerb_data_all, embeddings_visualGoogle)\nprint(len(simVerb_data_extended_all_visualGoogle))\n_, _, simVerb_data_extended_firstQuartile_visualGoogle, _, _ = get_simVerbScore_and_cosSim(simVerb_data_embodimentScores_bothScored_firstQuartile, embeddings_visualGoogle)\nprint(len(simVerb_data_extended_firstQuartile_visualGoogle))\n_, _, simVerb_data_extended_fourthQuartile_visualGoogle, _, _ = get_simVerbScore_and_cosSim(simVerb_data_embodimentScores_bothScored_fourthQuartile, embeddings_visualGoogle)\nprint(len(simVerb_data_extended_fourthQuartile_visualGoogle))\n\n_, _, simVerb_data_extended_all_visualImSitu, _, _ = get_simVerbScore_and_cosSim(simVerb_data_all, embeddings_visualImSitu)\nprint(len(simVerb_data_extended_all_visualImSitu))\n_, _, simVerb_data_extended_firstQuartile_visualImSitu, _, _ = get_simVerbScore_and_cosSim(simVerb_data_embodimentScores_bothScored_firstQuartile, embeddings_visualImSitu)\nprint(len(simVerb_data_extended_firstQuartile_visualImSitu))\n_, _, simVerb_data_extended_fourthQuartile_visualImSitu, _, _ = get_simVerbScore_and_cosSim(simVerb_data_embodimentScores_bothScored_fourthQuartile, embeddings_visualImSitu)\nprint(len(simVerb_data_extended_fourthQuartile_visualImSitu))\n\nlist_word2VecEmbeddings = [embeddings_textual, embeddings_visualGoogle, embeddings_visualGoogle_textual_concat, embeddings_visualGoogle_textual_concat_svd, embeddings_imaginedGoogle]\nlist_str_word2VecEmbeddings = ['embeddings_textual', 'embeddings_visualGoogle', 'embeddings_visualGoogle_textual_concat', 'embeddings_visualGoogle_textual_concat_svd', 'embeddings_imaginedGoogle']\nlist_simVerb_data = [simVerb_data_extended_all_visualGoogle, simVerb_data_extended_fourthQuartile_visualGoogle]\nlist_str_simVerb_data = ['simVerb_data_extended_all_visualGoogle', 'simVerb_data_extended_fourthQuartile_visualGoogle']\n\n#list_word2VecEmbeddings = [embeddings_textual, embeddings_visualImSitu, embeddings_visualImSitu_textual_concat, embeddings_visualImSitu_textual_concat_svd, embeddings_imaginedImSitu]\n#list_str_word2VecEmbeddings = ['embeddings_textual', 'embeddings_visualImSitu', 'embeddings_visualImSitu_textual_concat', 'embeddings_visualImSitu_textual_concat_svd', 'embeddings_imaginedImSitu']\n#list_simVerb_data = [simVerb_data_extended_all_visualImSitu, simVerb_data_extended_fourthQuartile_visualImSitu]\n#list_str_simVerb_data = ['simVerb_data_extended_all_visualImSitu', 'simVerb_data_extended_fourthQuartile_visualImSitu']\n\nfor simVerb_data, str_simVerb_data, in zip(list_simVerb_data, list_str_simVerb_data):\n for word2VecEmbeddings, str_word2VecEmbeddings, in zip(list_word2VecEmbeddings, list_str_word2VecEmbeddings):\n print(\"simVerb_data: {}, embeddings: {}\".format(str_simVerb_data, str_word2VecEmbeddings))\n list_simVerbScore, list_cosSim, simVerb_data_extended, counter_wordPair_noEmbedding, counter_wordPair_Embedding = get_simVerbScore_and_cosSim(simVerb_data, word2VecEmbeddings)\n spearman_rank_correlation = spearmanr(list_simVerbScore, list_cosSim)\n print(spearman_rank_correlation)\n\n\n\n\n","sub_path":"coling18-multimodalSurvey_experiments.py","file_name":"coling18-multimodalSurvey_experiments.py","file_ext":"py","file_size_in_byte":17624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386500007","text":"# -*- coding: utf-8 -*-\n\nfrom bson.objectid import ObjectId\nimport datetime\n\nfrom girder import events\nfrom girder.models.model_base import AccessControlledModel\nfrom girder.models.item import Item\nfrom girder.models.folder import Folder\nfrom girder.models.token import Token\nfrom girder.constants import AccessType\nfrom girder.exceptions import AccessException\nfrom girder.plugins.jobs.constants import JobStatus\n\nfrom ..constants import WORKSPACE_NAME, DATADIRS_NAME, SCRIPTDIRS_NAME, TaleStatus\nfrom ..utils import getOrCreateRootFolder, init_progress\nfrom ..lib.license import WholeTaleLicense\n\nfrom gwvolman.tasks import build_tale_image, BUILD_TALE_IMAGE_STEP_TOTAL\n\n\n# Whenever the Tale object schema is modified (e.g. fields are added or\n# removed) increase `_currentTaleFormat` to retroactively apply those\n# changes to existing Tales.\n_currentTaleFormat = 7\n\n\nclass Tale(AccessControlledModel):\n\n def initialize(self):\n self.name = 'tale'\n self.ensureIndices(('imageId', ([('imageId', 1)], {})))\n self.ensureTextIndex({\n 'title': 10,\n 'description': 1\n })\n self.modifiableFields = {\n 'title', 'description', 'public', 'config', 'updated', 'authors',\n 'category', 'icon', 'iframe', 'illustration', 'dataSet', 'licenseSPDX',\n 'workspaceModified', 'publishInfo', 'imageId'\n }\n self.exposeFields(\n level=AccessType.READ,\n fields=({'_id', 'folderId', 'imageId', 'creatorId', 'created',\n 'format', 'dataSet', 'narrative', 'narrativeId', 'licenseSPDX',\n 'imageInfo', 'publishInfo', 'workspaceId',\n 'workspaceModified', 'dataSetCitation', 'copyOfTale',\n 'status'} | self.modifiableFields))\n events.bind('jobs.job.update.after', 'wholetale', self.updateTaleStatus)\n\n def validate(self, tale):\n if 'status' not in tale:\n tale['status'] = TaleStatus.READY\n\n if 'iframe' not in tale:\n tale['iframe'] = False\n\n if '_id' not in tale:\n return tale\n\n if 'publishInfo' not in tale:\n tale['publishInfo'] = []\n\n if 'dataSet' not in tale:\n tale['dataSet'] = []\n\n if 'licenseSPDX' not in tale:\n tale['licenseSPDX'] = WholeTaleLicense.default_spdx()\n tale_licenses = WholeTaleLicense()\n if tale['licenseSPDX'] not in tale_licenses.supported_spdxes():\n tale['licenseSPDX'] = WholeTaleLicense.default_spdx()\n\n if tale.get('config') is None:\n tale['config'] = {}\n\n if tale.get('dataSetCitation') is None:\n tale['dataSetCitation'] = []\n\n if 'copyOfTale' not in tale:\n tale['copyOfTale'] = None\n\n tale['format'] = _currentTaleFormat\n\n if not isinstance(tale['authors'], list):\n tale['authors'] = []\n return tale\n\n def list(self, user=None, data=None, image=None, limit=0, offset=0,\n sort=None, currentUser=None, level=AccessType.READ):\n \"\"\"\n List a page of jobs for a given user.\n\n :param user: The user who created the tale.\n :type user: dict or None\n :param data: The object array that's being used by the tale.\n :type data: dict or None\n :param image: The Image that's being used by the tale.\n :type image: dict or None\n :param limit: The page limit.\n :param offset: The page offset\n :param sort: The sort field.\n :param currentUser: User for access filtering.\n \"\"\"\n cursor_def = {}\n if user is not None:\n cursor_def['creatorId'] = user['_id']\n if data is not None:\n cursor_def['dataSet'] = data\n if image is not None:\n cursor_def['imageId'] = image['_id']\n\n cursor = self.find(cursor_def, sort=sort)\n for r in self.filterResultsByPermission(\n cursor=cursor, user=currentUser, level=level,\n limit=limit, offset=offset):\n yield r\n\n def createTale(self, image, data, creator=None, save=True, title=None,\n description=None, public=None, config=None, authors=None,\n icon=None, category=None, illustration=None, narrative=None,\n licenseSPDX=WholeTaleLicense.default_spdx()):\n\n if creator is None:\n creatorId = None\n else:\n creatorId = creator.get('_id', None)\n\n if title is None:\n title = '{} with {}'.format(image['name'], DATADIRS_NAME)\n # if illustration is None:\n # Get image from SILS\n\n now = datetime.datetime.utcnow()\n tale = {\n 'authors': authors,\n 'category': category,\n 'config': config or {},\n 'copyOfTale': None,\n 'creatorId': creatorId,\n 'dataSet': data or [],\n 'description': description,\n 'format': _currentTaleFormat,\n 'created': now,\n 'icon': icon,\n 'iframe': image.get('iframe', False),\n 'imageId': ObjectId(image['_id']),\n 'illustration': illustration,\n 'narrative': narrative or [],\n 'title': title,\n 'public': public,\n 'updated': now,\n 'licenseSPDX': licenseSPDX\n }\n if public is not None and isinstance(public, bool):\n self.setPublic(tale, public, save=False)\n else:\n public = False\n\n if creator is not None:\n self.setUserAccess(tale, user=creator, level=AccessType.ADMIN,\n save=False)\n if tale['dataSet']:\n eventParams = {'tale': tale, 'user': creator}\n event = events.trigger('tale.update_citation', eventParams)\n if len(event.responses):\n tale = event.responses[-1]\n\n if save:\n tale = self.save(tale)\n workspace = self.createWorkspace(tale, creator=creator)\n data_folder = self.createDataMountpoint(tale, creator=creator)\n tale['folderId'] = data_folder['_id']\n tale['workspaceId'] = workspace['_id']\n narrative_folder = self.createNarrativeFolder(\n tale, creator=creator, default=not bool(tale['narrative']))\n for obj_id in tale['narrative']:\n item = Item().load(obj_id, user=creator)\n Item().copyItem(item, creator, folder=narrative_folder)\n tale['narrativeId'] = narrative_folder['_id']\n tale = self.save(tale)\n\n return tale\n\n def createNarrativeFolder(self, tale, creator=None, default=False):\n if default:\n rootFolder = getOrCreateRootFolder(SCRIPTDIRS_NAME)\n auxFolder = self.model('folder').createFolder(\n rootFolder, 'default', parentType='folder',\n public=True, reuseExisting=True)\n else:\n auxFolder = self._createAuxFolder(\n tale, SCRIPTDIRS_NAME, creator=creator)\n return auxFolder\n\n def createDataMountpoint(self, tale, creator=None):\n return self._createAuxFolder(tale, DATADIRS_NAME, creator=creator)\n\n def createWorkspace(self, tale, creator=None):\n return self._createAuxFolder(tale, WORKSPACE_NAME, creator=creator)\n\n def _createAuxFolder(self, tale, rootFolderName, creator=None):\n if creator is None:\n creator = self.model('user').load(tale['creatorId'], force=True)\n\n if tale['public'] is not None and isinstance(tale['public'], bool):\n public = tale['public']\n else:\n public = False\n\n rootFolder = getOrCreateRootFolder(rootFolderName)\n auxFolder = self.model('folder').createFolder(\n rootFolder, str(tale['_id']), parentType='folder',\n public=public, reuseExisting=True)\n self.setUserAccess(\n auxFolder, user=creator, level=AccessType.ADMIN,\n save=True)\n auxFolder = self.model('folder').setMetadata(\n auxFolder, {'taleId': str(tale['_id'])})\n return auxFolder\n\n def updateTale(self, tale):\n \"\"\"\n Updates a tale.\n\n :param tale: The tale document to update.\n :type tale: dict\n :returns: The tale document that was edited.\n \"\"\"\n tale['updated'] = datetime.datetime.utcnow()\n return self.save(tale)\n\n def setAccessList(self, doc, access, save=False, user=None, force=False,\n setPublic=None, publicFlags=None):\n \"\"\"\n Overrides AccessControlledModel.setAccessList to encapsulate ACL\n functionality for a tale.\n\n :param doc: the tale to set access settings on\n :type doc: girder.models.tale\n :param access: The access control list\n :type access: dict\n :param save: Whether the changes should be saved to the database\n :type save: bool\n :param user: The current user\n :param force: Set this to True to set the flags regardless of the passed in\n user's permissions.\n :type force: bool\n :param setPublic: Pass this if you wish to set the public flag on the\n resources being updated.\n :type setPublic: bool or None\n :param publicFlags: Pass this if you wish to set the public flag list on\n resources being updated.\n :type publicFlags: flag identifier str, or list/set/tuple of them,\n or None\n \"\"\"\n if setPublic is not None:\n self.setPublic(doc, setPublic, save=False)\n\n if publicFlags is not None:\n doc = self.setPublicFlags(doc, publicFlags, user=user, save=False,\n force=force)\n\n doc = super().setAccessList(\n doc, access, user=user, save=save, force=force)\n\n for id_key in ('folderId', 'workspaceId', 'narrativeId'):\n try:\n folder = Folder().load(doc[id_key], user=user, level=AccessType.ADMIN)\n except AccessException:\n _folder = Folder().load(doc[id_key], force=True)\n if id_key != 'narrativeId' or _folder['name'] != 'default':\n raise\n folder = None\n\n if folder:\n Folder().setAccessList(\n folder, access, user=user, save=save, force=force, recurse=True,\n setPublic=setPublic, publicFlags=publicFlags)\n\n return doc\n\n def buildImage(self, tale, user, force=False):\n \"\"\"\n Build the image for the tale\n \"\"\"\n\n resource = {\n 'type': 'wt_build_image',\n 'tale_id': tale['_id']\n }\n\n token = Token().createToken(user=user, days=0.5)\n\n notification = init_progress(\n resource, user, 'Building image',\n 'Initializing', BUILD_TALE_IMAGE_STEP_TOTAL)\n\n buildTask = build_tale_image.signature(\n args=[str(tale['_id']), force],\n girder_job_other_fields={\n 'wt_notification_id': str(notification['_id']),\n },\n girder_client_token=str(token['_id']),\n ).apply_async()\n\n return buildTask.job\n\n @staticmethod\n def updateTaleStatus(event):\n job = event.info['job']\n if job['type'] == 'wholetale.copy_workspace' and job.get('status') is not None:\n status = int(job['status'])\n workspace = Folder().load(job['args'][1], force=True)\n tale = Tale().load(workspace['meta']['taleId'], force=True)\n if status == JobStatus.SUCCESS:\n tale['status'] = TaleStatus.READY\n elif status == JobStatus.ERROR:\n tale['status'] = TaleStatus.ERROR\n Tale().updateTale(tale)\n","sub_path":"server/models/tale.py","file_name":"tale.py","file_ext":"py","file_size_in_byte":11835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"325218247","text":"from abc import *\nfrom bangtal import *\n\n\nclass Formatter:\n @staticmethod\n def get_image(name, idx):\n prefix = 'images/'\n postfix = '.png'\n return prefix + name + str(idx) + postfix\n\n @staticmethod\n def get_effect(name, idx):\n effect = '_effect'\n return Formatter.get_image(name+effect, idx)\n\n\nclass Creature(Object, metaclass=ABCMeta):\n def __init__(self, x, y, scene):\n super().__init__('')\n self.x = x\n self.y = y\n self.img = None\n self.handler = None\n self.scene = scene\n\n def add_handler(self, handler):\n self.handler = handler\n\n def onMouseAction(self, x, y, action):\n self.handler.handle_action(self.x, self.y)\n\n @abstractmethod\n def action(self, x, y, damage):\n pass\n\n\nclass Monster(Creature):\n def __init__(self, x, y, scene):\n super().__init__(x, y, scene)\n self.hp = 0\n self.status = 1\n self.dist = 15\n self.delay = 0\n self.damage = 0\n self.movable = True\n self.movement = None\n\n def start(self):\n self.movement = Movement(self, self.delay)\n self.movement.start()\n\n def hide(self):\n self.stop()\n super().hide()\n\n def stop(self):\n self.movable = False\n self.movement.stop()\n\n def action(self, x, y, damage):\n if self.x == x and self.y == y:\n self.hp -= damage\n if self.hp <= 0:\n self.handler.remove_creature(self)\n\n def move(self):\n if self.movable:\n self.x -= self.dist\n self.status = 3-self.status\n super().locate(self.scene, self.x, self.y)\n super().setImage(Formatter.get_image(self.img, self.status))\n self.handler.decrease_life(self)\n\n\nclass Movement(Timer):\n def __init__(self, monster, delay):\n self.delay = delay\n self.monster = monster\n super().__init__(self.delay)\n\n def onTimeout(self):\n self.monster.move()\n self.set(self.delay)\n self.start()\n\n\nclass Boss(Monster):\n def __init__(self, x, y, scene):\n super().__init__(x, y, scene)\n self.hp = 8\n self.img = 'boss'\n self.delay = 0.15\n self.damage = self.hp\n super().setImage(Formatter.get_image(self.img, 1))\n super().locate(scene, self.x, self.y)\n super().show()\n super().start()\n\n\nclass Warrior(Monster):\n def __init__(self, x, y, scene):\n super().__init__(x, y, scene)\n self.hp = 4\n self.img = 'warrior'\n self.delay = 0.125\n self.damage = self.hp\n super().setImage(Formatter.get_image(self.img, 1))\n super().locate(scene, self.x, self.y)\n super().show()\n super().start()\n\n\nclass Zombie(Monster):\n def __init__(self, x, y, scene):\n super().__init__(x, y, scene)\n self.hp = 2\n self.img = 'zombie'\n self.delay = 0.2\n self.damage = self.hp\n super().setImage(Formatter.get_image(self.img, 1))\n super().locate(scene, self.x, self.y)\n super().show()\n super().start()\n\n\nclass Tower(Creature):\n def __init__(self, x, y, scene):\n super().__init__(x, y, scene)\n self.img = 'cannon'\n self.status = 3\n self.damage = 1\n self.size = 0.7\n self.diff = -20\n self.upgraded = False\n\n super().setImage(Formatter.get_image(self.img, self.status))\n super().locate(scene, self.x, self.y)\n super().show()\n\n def onMouseAction(self, x, y, action):\n pass\n\n def action(self, x, y, damage):\n self.status = int(y/140)+1\n super().setImage(Formatter.get_image(self.img, self.status))\n Bomb(x+self.diff, y, self.size, self.scene).start()\n\n def upgrade(self):\n if self.upgraded is False:\n self.img = 'tank'\n self.damage = 2\n self.size = 1.2\n self.diff = -60\n self.upgraded = True\n super().setImage(Formatter.get_image(self.img, self.status))\n\n\nclass Bomb(Timer):\n def __init__(self, x, y, size, scene):\n self.delay = 0.09\n super().__init__(self.delay)\n self.count = 1\n self.img = 'tower'\n self.object = Object(Formatter.get_effect(self.img, self.count))\n self.object.setScale(size)\n self.object.locate(scene, x, y)\n self.object.show()\n\n def onTimeout(self):\n self.count += 1\n self.object.setImage(Formatter.get_effect(self.img, self.count))\n if self.count < 4:\n self.set(self.delay)\n self.start()\n else:\n self.object.hide()\n","sub_path":"project/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"147482799","text":"import logging\n\nimport sublime\nfrom . import util\nfrom jsonschema import validate, FormatChecker, ValidationError\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Settings:\n \"\"\"This class provides global access to and management of plugin settings.\"\"\"\n\n def __init__(self):\n self._storage = {}\n\n def load(self):\n \"\"\"Load the plugin settings.\"\"\"\n self.observe()\n self.on_update()\n\n @property\n def settings(self):\n return sublime.load_settings(\"SublimeLinter.sublime-settings\")\n\n def has(self, name):\n \"\"\"Return whether the given setting exists.\"\"\"\n return self.settings.has(name)\n\n def get(self, name, default=None):\n \"\"\"Return a plugin setting, defaulting to default if not found.\"\"\"\n return self.settings.get(name, default)\n\n def has_changed(self, name):\n current_value = self.get(name)\n try:\n old_value = self._storage[name]\n except KeyError:\n return False\n else:\n return (old_value != current_value)\n finally:\n self._storage[name] = current_value\n\n def observe(self):\n \"\"\"Observe changes.\"\"\"\n settings = sublime.load_settings(\"SublimeLinter.sublime-settings\")\n settings.clear_on_change('sublimelinter-persist-settings')\n settings.add_on_change('sublimelinter-persist-settings', self.on_update)\n\n def unobserve(self):\n settings = sublime.load_settings(\"SublimeLinter.sublime-settings\")\n settings.clear_on_change('sublimelinter-persist-settings')\n\n def on_update(self):\n \"\"\"\n Update state when the user settings change.\n\n The settings before the change are compared with the new settings.\n Depending on what changes, views will either be redrawn or relinted.\n\n \"\"\"\n if not validate_global_settings():\n return\n\n if self.has_changed('gutter_theme'):\n from . import style\n style.read_gutter_theme()\n\n sublime.run_command('sublime_linter_config_changed')\n\n\ndef get_settings_objects():\n for name in sublime.find_resources(\"SublimeLinter.sublime-settings\"):\n try:\n yield name, util.load_json(name, from_sl_dir=False)\n except (IOError, ValueError):\n pass\n\n\ndef validate_global_settings():\n return validate_settings(get_settings_objects())\n\n\ndef validate_settings(filename_settings_pairs):\n status_msg = \"SublimeLinter - Settings invalid!\"\n schema_file = \"resources/settings-schema.json\"\n schema = util.load_json(schema_file, from_sl_dir=True)\n window = sublime.active_window()\n good = True\n\n for name, settings in filename_settings_pairs:\n if settings:\n try:\n validate(settings, schema, format_checker=FormatChecker())\n except ValidationError as error:\n good = False\n path_to_err = (' > '.join(\n repr(part)\n for part in error.path\n if not isinstance(part, int) # drop array indices\n ) + ': ') if error.path else ''\n\n logger.warning(\"Invalid settings in '{}'\".format(name))\n util.show_message(\n \"Invalid settings in '{}':\\n\"\n '{}{}'.format(name, path_to_err, error.message)\n )\n window.status_message(status_msg)\n\n if good:\n util.clear_message()\n\n return good\n\n\ndef validate_project_settings(filename):\n try:\n with open(filename, 'r') as fh:\n contents = fh.read()\n except IOError:\n return True # Very optimistic\n\n try:\n obj = sublime.decode_value(contents)\n except ValueError:\n return False\n\n settings = obj.get('SublimeLinter', {})\n if len(settings.keys()) >= 2:\n logger.error(\n \"Invalid settings in '{}':\\n\"\n \"Only the key 'linters' is allowed. \"\n \"Got {}.\".format(filename, ', '.join(map(repr, settings.keys())))\n )\n return False\n\n return validate_settings([(filename, settings)])\n","sub_path":"lint/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"49381646","text":"#!/usr/bin/env python\nimport os\nimport math\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_style('darkgrid')\n\nclass SeriePlots(object):\n \"\"\"Create QC plot per serie.\"\"\"\n\n def __init__(self, capture, serie, pdf):\n \"\"\"Set capture, serie and PDF.\"\"\"\n self.capture = capture\n self.serie = serie\n self.pdf = pdf\n\n def plot_qc_serie(self, df_new, df_arch, nrarchive=None):\n \"\"\"Plot mean serie vs mean archive and stddev serie and archive.\"\"\"\n fig = plt.figure(figsize=(12, 9))\n ax = plt.subplot(211)\n plt.title('{} QC {}'.format(self.serie, self.capture),\n size=20)\n\n ax.scatter(df_arch['Mean'], df_new['Mean'], label=self.serie)\n ax.set_xlim([0, 3.5])\n ax.set_ylim([0, 3.5])\n ax.set_title('Gemiddelde genormaliseerde coverage per target',\n size=20)\n if nrarchive is not None:\n ax.set_xlabel('Archief: {} samples'.format(nrarchive),\n size=15)\n ax.set_ylabel('{}'.format(self.serie), size=15)\n ax.legend()\n\n ax2 = plt.subplot(223)\n ax2.hist(df_arch['Std'].dropna().values, 150)\n ax2.set_xlim([0, 0.5])\n ax2.set_title('Standaarddeviatie archief')\n\n ax3 = plt.subplot(224)\n ax3.hist(df_new['Std'].dropna().values, 150)\n ax3.set_xlim([0, 0.5])\n ax3.set_title('Standaarddeviatie nieuwe serie')\n\n fig.tight_layout()\n self.pdf.savefig()\n plt.close()\n\n\nclass SamplePlots(object):\n\n def __init__(self, sample, outdir=os.getcwd(), badsamples=[]):\n\n self.sample = sample\n self.outdir = outdir\n if self.sample in badsamples:\n self.badsample = True\n if self.sample not in badsamples:\n self.badsample = False\n\n def sample_qc(self, df, serie):\n axmax = list()\n axmin = list()\n\n genes = df.gen.unique()\n\n fig = plt.figure(figsize=(12, 9))\n ax = plt.subplot(111)\n\n colorlist = sns.color_palette(\"husl\", len(genes))\n\n for i, g in enumerate(genes):\n intervalstoplot = df[df['gen'] == g]\n if intervalstoplot.empty:\n continue\n x = list(intervalstoplot['Mean'].values)\n y = list(intervalstoplot[self.sample].values)\n axmax.append(max(x+y))\n axmin.append(min(x+y))\n\n try:\n ax.scatter(x, y, label=g, color=colorlist[i])\n except ValueError as e:\n ax.scatter(x, y, label=g, color='grey')\n print(e, i)\n\n if len(genes) > 100:\n ax.legend(ncol=2, loc='center left', fontsize=2)\n else:\n ax.legend(ncol=2, loc='center left')\n axmax = math.ceil(max(axmax))\n axmin = math.floor(min(axmin))\n\n if axmax < 2:\n axmax = 2\n if axmin > 0:\n axmin = 0\n\n ax.set_xlim([axmin, axmax])\n ax.set_ylim([axmin, axmax])\n a = [_ for _ in range(axmin, axmax+1, 1)]\n b = [_ for _ in range(axmin, axmax+1, 1)]\n ax.plot(a, b)\n if self.badsample:\n plt.title('{} nonarchive'.format(self.sample), size=15)\n elif not self.badsample:\n plt.title(self.sample, size=15)\n fig.tight_layout()\n plt.savefig('{}/QC/{}.png'.format(self.outdir, self.sample),\n dpi=80)\n plt.close()\n\n def plot_cnv_calls(self, data, gene, pdf, targetinfo, serie, poscons=None):\n if poscons is None:\n poscons = dict()\n fig = plt.figure(figsize=(12, 9))\n ax = plt.subplot2grid((8, 1), (0, 0), rowspan=4)\n ax2 = plt.subplot2grid((8, 1), (4, 0), rowspan=2, sharex=ax)\n ax3 = plt.subplot2grid((8, 1), (6, 0), rowspan=2, sharex=ax)\n\n for pat in data:\n y = data[pat].values\n x = np.arange(1, len(y) + 1, 1.0)\n\n if pat == self.sample:\n if self.badsample:\n ax.plot(x, y, 'ro', markersize=8,\n label='{}\\nnonarchive'.format(pat))\n elif not self.badsample:\n ax.plot(x, y, 'ro', markersize=8, label=pat)\n elif pat in serie:\n ax.plot(x, y, 'bo', markersize=4)\n # Pos control AND correct gene => label = PosConDnr + Gene\n elif pat in poscons and gene in poscons[pat]:\n ax.plot(x, y, '--', markersize=4, label=('PosCon'))\n\n elif pat in poscons and gene not in poscons[pat]:\n continue\n # Archive = black label\n else:\n ax.plot(x, y, 'ko', markersize=4)\n\n if max(data.max().sort_values()) > 10:\n maxax = max(data.max().sort_values())\n maxax += 0.5\n else:\n maxax = 10\n if min(data.min().sort_values()) < -10:\n minax = min(data.min().sort_values())\n minax -= 0.5\n else:\n minax = -10\n\n ax.set_ylim([minax, maxax])\n ax.set_ylabel('Z-score', size=15)\n ax.set_title(gene, size=20)\n ax.axhline(y=3)\n ax.axhline(y=-3)\n\n z = targetinfo['Std'].values\n x = range(1, len(z) + 1)\n ax2.plot(x, z, 'ko', markersize=6)\n ax2.axhline(y=0.15, c='k')\n ax2.set_ylim([0, 0.5])\n ax2.set_ylabel('Stdev. genorm. coverage', size=8)\n ax2.set_title('Variatie')\n\n z = targetinfo['Mean'].values\n x = range(1, len(z) + 1)\n ax3.plot(x, z, 'bo', markersize=6)\n ax3.axhline(y=0.2, c='b')\n ax3.set_ylim([0, 2])\n ax3.set_ylabel('Genorm. coverage', size=8)\n ax3.set_title('Gemiddelde per regio')\n\n xticks = np.arange(0, len(data) + 2, 1.0)\n\n ax.xaxis.set_ticks(xticks)\n ax2.xaxis.set_ticks(xticks)\n ax3.xaxis.set_ticks(xticks)\n\n ax.legend(fontsize=8, ncol=1, loc='upper right',\n bbox_to_anchor=(1.05, 1))\n\n fig.tight_layout(pad=4)\n pdf.savefig()\n\n plt.close()\n","sub_path":"pycnv/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"110211730","text":"import asyncio\r\nimport functools\r\nimport logging\r\nimport os\r\nimport pathlib\r\nimport discord\r\nimport discord.ext.commands as commands\r\nimport youtube_dl\r\nfrom discord import opus\r\n\r\nOPUS_LIBS = ['libpus-0.x86.dll', 'libopus-0.x64.dll', 'libopus-0.dll', 'libopus.so.0', 'libopus.0.dylib']\r\ndef load_opus_lib(opus_libs=OPUS_LIBS):\r\n if opus.is_loaded():\r\n return True\r\n for opus_lib in opus_libs:\r\n try:\r\n opus.load_opus(opus_lib)\r\n return\r\n except OSError:\r\n pass\r\n raise RuntimeError('Could not load an opus lib. Tried %s' % (', '.join(opus_libs)))\r\nload_opus_lib()\r\n\r\ndef duration_to_str(seconds):\r\n hours = seconds // 3600\r\n seconds %= 3600\r\n minutes = seconds // 60\r\n seconds %= 60\r\n if hours == 0:\r\n return f\"{minutes} minutes {seconds} seconds\"\r\n elif minutes == 0:\r\n return f\"{hours} hours {minutes} minutes {seconds} seconds\"\r\n else:\r\n return f\"{seconds} seconds\"\r\n\r\nclass MusicError(commands.UserInputError):\r\n pass\r\n\r\nclass Song(discord.PCMVolumeTransformer):\r\n def __init__(self, song_info):\r\n self.info = song_info.info\r\n self.requester = song_info.requester\r\n self.channel = song_info.channel\r\n self.filename = song_info.filename\r\n super().__init__(discord.FFmpegPCMAudio(self.filename, before_options='-nostdin', options='-vn'))\r\n\r\nclass SongInfo:\r\n ytdl_opts = {\r\n 'default_search': 'auto',\r\n 'format': 'bestaudio/best',\r\n 'ignoreerrors': True,\r\n 'source_address': '0.0.0.0', # Make all connections via IPv4\r\n 'nocheckcertificate': True,\r\n 'restrictfilenames': True,\r\n 'logger': logging.getLogger(__name__),\r\n 'logtostderr': False,\r\n 'no_warnings': True,\r\n 'quiet': True,\r\n 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',\r\n 'noplaylist': True\r\n }\r\n ytdl = youtube_dl.YoutubeDL(ytdl_opts)\r\n\r\n def __init__(self, info, requester, channel):\r\n self.info = info\r\n self.requester = requester\r\n self.channel = channel\r\n self.filename = info.get('_filename', self.ytdl.prepare_filename(self.info))\r\n self.downloaded = asyncio.Event()\r\n self.local_file = '_filename' in info\r\n def __str__(self):\r\n title = f\"**{self.info['title']}**\"\r\n creator = f\"**{self.info.get('creator') or self.info['uploader']}**\"\r\n duration = f\" (duration: {duration_to_str(self.info['duration'])})\" if 'duration' in self.info else ''\r\n return f'{title} by {creator}{duration}'\r\n\r\n @classmethod\r\n async def create(cls, query, requester, channel, loop=None):\r\n try:\r\n # Path.is_file() can throw a OSError on syntactically incorrect paths, like urls.\r\n if pathlib.Path(query).is_file():\r\n return cls.from_file(query, requester, channel)\r\n except OSError:\r\n pass\r\n\r\n return await cls.from_ytdl(query, requester, channel, loop=loop)\r\n\r\n @classmethod\r\n def from_file(cls, file, requester, channel):\r\n path = pathlib.Path(file)\r\n if not path.exists():\r\n raise MusicError(f'File {file} not found.')\r\n\r\n info = {\r\n '_filename': file,\r\n 'title': path.stem,\r\n 'creator': 'local file',\r\n }\r\n return cls(info, requester, channel)\r\n\r\n @classmethod\r\n async def from_ytdl(cls, request, requester, channel, loop=None):\r\n loop = loop or asyncio.get_event_loop()\r\n\r\n # Get sparse info about our query\r\n partial = functools.partial(cls.ytdl.extract_info, request, download=False, process=False)\r\n sparse_info = await loop.run_in_executor(None, partial)\r\n\r\n if sparse_info is None:\r\n raise MusicError(f'Could not retrieve info from input : {request}')\r\n\r\n # If we get a playlist, select its first valid entry\r\n if \"entries\" not in sparse_info:\r\n info_to_process = sparse_info\r\n else:\r\n info_to_process = None\r\n for entry in sparse_info['entries']:\r\n if entry is not None:\r\n info_to_process = entry\r\n break\r\n if info_to_process is None:\r\n raise MusicError(f'Could not retrieve info from input : {request}')\r\n\r\n # Process full video info \r\n url = info_to_process.get('url', info_to_process.get('webpage_url', info_to_process.get('id')))\r\n partial = functools.partial(cls.ytdl.extract_info, url, download=False)\r\n processed_info = await loop.run_in_executor(None, partial)\r\n\r\n if processed_info is None:\r\n raise MusicError(f'Could not retrieve info from input : {request}')\r\n\r\n # Select the first search result if any\r\n if \"entries\" not in processed_info:\r\n info = processed_info\r\n else:\r\n info = None\r\n while info is None:\r\n try:\r\n info = processed_info['entries'].pop(0)\r\n except IndexError:\r\n raise MusicError(f'Could not retrieve info from url : {info_to_process[\"url\"]}')\r\n\r\n return cls(info, requester, channel)\r\n\r\n async def download(self, loop):\r\n if not pathlib.Path(self.filename).exists():\r\n partial = functools.partial(self.ytdl.extract_info, self.info['webpage_url'], download=True)\r\n self.info = await loop.run_in_executor(None, partial)\r\n self.downloaded.set()\r\n async def wait_until_downloaded(self):\r\n await self.downloaded.wait()\r\n\r\nclass Playlist(asyncio.Queue):\r\n def __iter__(self):\r\n return self._queue.__iter__()\r\n def __str__(self):\r\n info = 'Current playlist:\\n'\r\n for line_num, song in enumerate(self):\r\n if line_num == 0:\r\n info += f'Next - {song}\\n'\r\n elif line_num >= 1:\r\n info += f'#{line_num} - {song}\\n'\r\n return info\r\n\r\n def clear(self):\r\n for song in self._queue:\r\n try:\r\n os.remove(song.filename)\r\n except:\r\n pass\r\n self._queue.clear()\r\n def get_song(self):\r\n return self.get_nowait()\r\n def add_song(self, song):\r\n self.put_nowait(song)\r\n\r\nclass ServerMusicState:\r\n def __init__(self, loop):\r\n self.playlist = Playlist(maxsize=50)\r\n self.voice_client = None\r\n self.loop = loop\r\n self.player_volume = 0.5\r\n self.now_playing_embed, self.now_playing_message, self.thumbnail_message = None, None, None\r\n self.skips = set()\r\n self.min_skips = 5\r\n\r\n @property\r\n def current_song(self):\r\n return self.voice_client.source\r\n\r\n @property\r\n def volume(self):\r\n return self.player_volume\r\n\r\n @volume.setter\r\n def volume(self, value):\r\n self.player_volume = value\r\n if self.voice_client:\r\n self.voice_client.source.volume = value\r\n\r\n def is_playing(self):\r\n return self.voice_client and self.voice_client.is_playing()\r\n\r\n async def stop(self):\r\n self.playlist.clear()\r\n if self.voice_client:\r\n await self.voice_client.disconnect()\r\n self.voice_client = None\r\n\r\n async def cool(self, ctx):\r\n EMOJI_NAME = 'cooldoge'\r\n emoji = discord.utils.get(ctx.bot.emojis, name=EMOJI_NAME)\r\n return(str(emoji))\r\n\r\n async def play_next_song(self, requester, song=None, error=None):\r\n \r\n if error:\r\n await self.current_song.channel.send(f'An error has occurred while playing {self.current_song}: {error}')\r\n\r\n if song and not song.local_file and song.filename not in [s.filename for s in self.playlist]:\r\n os.remove(song.filename)\r\n\r\n if self.playlist.empty():\r\n await self.stop()\r\n else:\r\n next_song_info = self.playlist.get_song()\r\n await next_song_info.wait_until_downloaded()\r\n source = Song(next_song_info)\r\n source.volume = self.player_volume\r\n self.voice_client.play(source, after=lambda e: asyncio.run_coroutine_threadsafe(self.play_next_song(next_song_info, e), self.loop).result())\r\n self.now_playing_embed = discord.Embed(color=0x4B0082)\r\n self.now_playing_embed.set_thumbnail(url=next_song_info.info['thumbnail'])\r\n self.now_playing_embed.set_author(name=\"Requested\", icon_url=requester.avatar_url)\r\n self.now_playing_embed.add_field(name=\"now Playing\", value=next_song_info.info['title'], inline=False)\r\n self.now_playing_embed.add_field(name=\"🎤 Artist\", value=next_song_info.info['uploader'], inline=True)\r\n self.now_playing_embed.add_field(name=\":spy: Requested By\", value=requester.mention, inline=True)\r\n self.now_playing_embed.add_field(name=\":thumbsup: likes\", value=next_song_info.info['like_count'], inline=True)\r\n self.now_playing_embed.add_field(name=\"⏱ Duration\", value=duration_to_str(next_song_info.info['duration']), inline=True)\r\n self.now_playing_embed.add_field(name=\"🔗 URL\", value=next_song_info.info['webpage_url'], inline=True)\r\n self.now_playing_message = await next_song_info.channel.send(embed=self.now_playing_embed)\r\n reactions = ['▶', '⏸', '⏹', '⏭', '🔉', '🔊']\r\n for reaction in reactions:\r\n await self.now_playing_message.add_reaction(reaction)\r\n\r\nclass Music:\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.playlist = Playlist(maxsize=50)\r\n self.music_states = {}\r\n self.thumbnail_message = None\r\n\r\n def __unload(self):\r\n for state in self.music_states.values():\r\n self.bot.loop.create_task(state.stop())\r\n\r\n def __local_check(self, ctx):\r\n if not ctx.guild:\r\n raise commands.NoPrivateMessage('This command cannot be used in a private message.')\r\n return True\r\n\r\n async def __before_invoke(self, ctx):\r\n ctx.music_state = self.get_music_state(ctx.guild.id)\r\n\r\n async def __error(self, ctx, error):\r\n if not isinstance(error, commands.UserInputError):\r\n raise error\r\n\r\n try:\r\n await ctx.send(error)\r\n except discord.Forbidden:\r\n pass # /shrug\r\n\r\n def get_music_state(self, guild_id):\r\n return self.music_states.setdefault(guild_id, ServerMusicState(self.bot.loop))\r\n\r\n @commands.command()\r\n async def status(self, ctx):\r\n \"\"\"Displays the currently played song.\"\"\"\r\n if ctx.music_state.is_playing():\r\n song = ctx.music_state.current_song\r\n await ctx.send(f'Playing {song}. Volume at {song.volume * 100}% in {ctx.voice_client.channel.mention}')\r\n else:\r\n await ctx.send('Not playing.')\r\n\r\n @commands.command()\r\n @commands.has_permissions(manage_guild=True)\r\n async def join(self, ctx, *, channel: discord.VoiceChannel = None):\r\n \"\"\"Summons the bot to a voice channel.\r\n If no channel is given, summons it to your current voice channel.\r\n \"\"\"\r\n if channel is None and not ctx.author.voice:\r\n raise MusicError('You are not in a voice channel nor specified a voice channel for me to join.')\r\n\r\n destination = channel or ctx.author.voice.channel\r\n\r\n if ctx.voice_client:\r\n await ctx.voice_client.move_to(destination)\r\n else:\r\n ctx.music_state.voice_client = await destination.connect()\r\n\r\n \r\n\r\n @commands.command(aliases=['p', 'music'])\r\n async def play(self, ctx, *, song_name: str):\r\n\r\n await ctx.message.add_reaction('⏳')\r\n\r\n if not ctx.voice_client:\r\n if not ctx.author.voice:\r\n await ctx.message.delete()\r\n embed = discord.Embed(title=\"❌ | You are not connected to a Voice Channel.\", color=16711680)\r\n await ctx.send(embed=embed)\r\n ctx.music_state.voice_client = await ctx.author.voice.channel.connect()\r\n\r\n # Create the SongInfo\r\n song = await SongInfo.from_ytdl(song_name, ctx.author, ctx.channel, loop=ctx.bot.loop)\r\n\r\n # Add the info to the playlist\r\n try:\r\n ctx.music_state.playlist.add_song(song)\r\n except asyncio.QueueFull:\r\n await ctx.message.delete()\r\n embed = discord.Embed(title=\"❌ | Sorry, the queue is full, try again later...\", color=16711680)\r\n await ctx.send(embed=embed)\r\n\r\n if not ctx.music_state.is_playing():\r\n # Download the song and play it\r\n await song.download(ctx.bot.loop)\r\n await ctx.music_state.play_next_song(ctx.author)\r\n else:\r\n # Schedule the song's download\r\n ctx.bot.loop.create_task(song.download(ctx.bot.loop))\r\n embed = discord.Embed(color=0x4B0082)\r\n embed.set_author(name=\"Added to queue\", icon_url=ctx.author.avatar_url)\r\n embed.set_thumbnail(url=song.info['thumbnail'])\r\n embed.add_field(name=\"🎺 Queued\", value=song.info['title'], inline=False)\r\n embed.add_field(name=\"🎤 Artist\", value=song.info['uploader'], inline=True)\r\n embed.add_field(name=\"🥇 Position\", value=f\"#**{ctx.music_state.playlist.qsize()}**\", inline=True)\r\n embed.add_field(name=\":spy: Added to Queue By\", value=ctx.author.mention, inline=True)\r\n embed.add_field(name=\"⏱ Duration\", value=duration_to_str(song.info['duration']), inline=True)\r\n embed.add_field(name=\"🔗 URL\", value=song.info['webpage_url'], inline=True)\r\n await ctx.send(embed=embed)\r\n await ctx.message.remove_reaction('⏳', ctx.me)\r\n await ctx.message.add_reaction('✅')\r\n\r\n @commands.command()\r\n async def pause(self, ctx):\r\n \"\"\"Pauses the player.\"\"\"\r\n if not ctx.music_state.is_playing():\r\n embed = discord.Embed(title=\"❌ I am not playing anything to pause right now.\", color=16711680)\r\n await ctx.send(embed=embed)\r\n if ctx.voice_client:\r\n ctx.voice_client.pause()\r\n await ctx.send('Music paused :thumbsup: ')\r\n\r\n @commands.command()\r\n async def resume(self, ctx):\r\n \"\"\"Resumes the player.\"\"\"\r\n if not ctx.music_state.is_playing():\r\n embed = discord.Embed(title=\"❌ I am not playing anything to resume right now.\", color=16711680)\r\n await ctx.send(embed=embed)\r\n if ctx.voice_client:\r\n ctx.voice_client.resume()\r\n await ctx.send('Music resumed :thumbsup: ')\r\n\r\n @commands.command()\r\n async def stop(self, ctx):\r\n \"\"\"Stops the player, clears the playlist and leaves the voice channel.\"\"\"\r\n if not ctx.music_state.is_playing():\r\n embed = discord.Embed(title=\"❌ I'm currently not playing music!.\", color=16711680)\r\n await ctx.send(embed=embed)\r\n else:\r\n await ctx.music_state.stop()\r\n await ctx.send('Music stopped :thumbsup: ')\r\n\r\n @commands.command(aliases=['vol'])\r\n async def volume(self, ctx, volume: int = None):\r\n if volume < 0 or volume > 100:\r\n embed = discord.Embed(title=\"ℹ | The volume has to be between 0 and 100.\", color=16711680)\r\n await ctx.send(embed=embed)\r\n ctx.music_state.volume = volume / 100\r\n await ctx.send(f'Volume set to {volume}')\r\n\r\n @commands.command()\r\n async def empty(self, ctx):\r\n \"\"\"Clears the playlist.\"\"\"\r\n ctx.music_state.playlist.clear()\r\n await ctx.send('Playlist cleared :thumbsup: .')\r\n\r\n @commands.command()\r\n async def skip(self, ctx):\r\n if not ctx.music_state.is_playing():\r\n await ctx.message.delete()\r\n embed = discord.Embed(title=\"❌ | I am not playing anything to skip right now.\", color=16711680)\r\n await ctx.send(embed=embed)\r\n ctx.voice_client.stop()\r\n\r\n @commands.command(aliases=['playlist'])\r\n async def pl(self, ctx):\r\n try:\r\n embed = discord.Embed(color=0x4B0082)\r\n embed.add_field(name=\"💽 | Current Playlist\", value=ctx.music_state.playlist)\r\n await ctx.send(embed=embed)\r\n except:\r\n embed = discord.Embed(title=\"ℹ | The playlist is empty.\", color=14733520)\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command()\r\n async def vskip(self, ctx):\r\n \"\"\"Votes to skip the current song.\r\n To configure the minimum number of votes needed, use `minskips`\r\n \"\"\"\r\n\r\n if not ctx.music_state.is_playing():\r\n raise MusicError('Not playing anything to skip.')\r\n k = await self.bot.db.music.find_one({\"gid\" : ctx.guild.id})\r\n minskips = k['minskips']\r\n if ctx.author.id in ctx.music_state.skips:\r\n raise MusicError(f'{ctx.author.mention} You already voted to skip that song')\r\n\r\n # Count the vote\r\n ctx.music_state.skips.add(ctx.author.id)\r\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')\r\n\r\n # Check if the song has to be skipped\r\n if len(ctx.music_state.skips) > minskips or ctx.author == ctx.music_state.current_song.requester:\r\n ctx.music_state.skips.clear()\r\n ctx.voice_client.stop()\r\n\r\n @commands.command()\r\n @commands.has_permissions(manage_guild=True)\r\n async def minskips(self, ctx, number: int):\r\n \"\"\"Sets the minimum number of votes to skip a song.\r\n Requires the `Manage Guild` permission.\r\n \"\"\"\r\n ctx.music_state.min_skips = number\r\n await self.bot.db.music.update_one({\"gid\" : ctx.guild.id}, {\"$set\" : {\"minskips\" : number}}, upsert = True)\r\n\r\n @commands.command(aliases=['cp'])\r\n async def control_panel(self, ctx):\r\n embed = discord.Embed(title='🎵 | Music Control Panel', color=0x4B0082)\r\n panel = await ctx.send(embed=embed)\r\n panel_objects = ['▶', '⏸', '⏹', '⏭', '🔉', '🔊']\r\n for reaction in panel_objects:\r\n await panel.add_reaction(reaction)\r\n\r\n async def on_reaction_add(self, reaction, user):\r\n if user != self.bot.user:\r\n if reaction.emoji == '⏸':\r\n reaction.message.guild.voice_client.pause()\r\n await reaction.message.remove_reaction(reaction, user)\r\n if reaction.emoji == '▶':\r\n reaction.message.guild.voice_client.resume()\r\n await reaction.message.remove_reaction(reaction, user)\r\n if reaction.emoji == '⏹':\r\n try:\r\n reaction.message.guild.voice_client.stop()\r\n self.get_music_state(reaction.message.guild.id).playlist.clear()\r\n await reaction.message.guild.voice_client.disconnect()\r\n except AttributeError:\r\n return\r\n await reaction.message.clear_reactions()\r\n if reaction.emoji == '⏭':\r\n reaction.message.guild.voice_client.stop()\r\n await reaction.message.remove_reaction(reaction, user)\r\n if reaction.emoji == '🔉':\r\n self.get_music_state(reaction.message.guild.id).volume -= 10 / 100\r\n await reaction.message.remove_reaction(reaction, user)\r\n if reaction.emoji == '🔊':\r\n self.get_music_state(reaction.message.guild.id).volume += 10 / 100\r\n await reaction.message.remove_reaction(reaction, user)\r\n async def __before_invoke(self, ctx):\r\n ctx.music_state = self.get_music_state(ctx.guild.id)\r\n\r\ndef setup(bot):\r\n bot.add_cog(Music(bot))","sub_path":"cogs/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":19684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164272179","text":"from threading import Event\nfrom threading import Thread\nfrom time import sleep\n\nimport Player\nimport Map_\nimport Enemy\n\nfrom tools import dg_decorators\nfrom tools.IO import game_save\nfrom tools.IO import game_load\nfrom tools.dg_exсeptios import LoadGameError\nfrom tools.dg_exсeptios import OutOfMapRangeError\nfrom tools.dg_logging import DG_loger as log\n\n\nclass Game:\n\n _player_move_comand = { 1:'Up', 2:'Dawn', 3:'Right', 4:'Left', 5:'Save game', 6:'Quite'}\n\n\n @dg_decorators.decorator_start_end_logging\n def __init__(self):\n self.player = Player.Player()\n self.map_ = Map_.Map_(10)\n self.enemy = Enemy.Enemy()\n self.__game_end_event = Event()\n\n self.player.position = self.map_.get_free_cell()\n self.enemy.position = self.map_.get_free_cell()\n\n\n @dg_decorators.decorator_start_end_logging\n def loop(self):\n\n self._print_the_spawn_location()\n\n enemy_updater = Thread(target= self.__enemy_update)\n player_updater = Thread(target=self.__player_update)\n\n player_updater.start()\n enemy_updater.start()\n\n player_updater.join()\n self.__game_end_event.set()\n enemy_updater.join()\n\n\n @dg_decorators.decorator_start_end_logging\n def save(self):\n save_data = [self.map_, self.player]\n game_save(save_data)\n\n\n @dg_decorators.decorator_start_end_logging\n def load(self):\n save_data = game_load()\n\n if len(save_data) == 2 and isinstance(save_data[0], Map_.Map_) and isinstance(save_data[1], Player.Player):\n self.map_ = save_data[0]\n self.player = save_data[1]\n else:\n raise LoadGameError(\"Incorect data in save file\")\n\n\n @dg_decorators.decorator_start_end_logging\n def _player_input(self):\n print(f\"Your action : {[f'{key} - {val},' for key, val in self._player_move_comand.items()]}\")\n action_type = int(input('-> '))\n\n result = self._player_move_comand[action_type]\n return result\n\n\n @dg_decorators.decorator_start_end_logging\n def _proscess_player_move(self):\n\n sell_type = self.map_.get_cell_type(self.player.position)\n if sell_type == self.map_.cell_treasure:\n self.map_.set_cell_type(self.player.position, self.map_.cell_free)\n self.player.score += 1\n elif sell_type == self.map_.cell_trap:\n self.map_.set_cell_type(self.player.position, self.map_.cell_free)\n self.player.HP -= 1\n elif sell_type == self.map_.cell_border:\n print(\"Wow look like wall. Try another way\")\n self.player.undo_move()\n\n\n @dg_decorators.decorator_start_end_logging\n def _print_the_spawn_location(self):\n top_left_cell = [(cord - 1) for cord in self.player.position]\n location = []\n for i in range(3):\n y = top_left_cell[1] + i\n location.append([self.map_.get_cell_type([(top_left_cell[0] + j), y]) for j in range(3)])\n \n location[1][1] = self.map_.cell_player\n print(*[f'\\n{line}' for line in location[::-1]], '\\n----------')\n\n\n @dg_decorators.decorator_start_end_logging\n def _spell_game_state(self):\n\n for val in self.player.move_types.values():\n y = self.player.position[1] + val[1]\n x = self.player.position[0] + val[0]\n cell_type = self.map_.field[y][x]\n\n if cell_type == self.map_.cell_trap:\n print('Trap is near')\n elif cell_type == self.map_.cell_treasure:\n print('Treasure is near')\n\n if self.enemy.position == [x, y]:\n print('Enemy is near')\n\n\n @dg_decorators.decorator_start_end_logging\n def __enemy_update(self):\n \"\"\"\n Moving enemy while game_end event does not occur\n \"\"\"\n while self.__game_end_event.is_set() == False and self.player.HP > 0:\n self.enemy.move()\n self.__proscess_enemy_move()\n sleep(0.1)\n\n\n @dg_decorators.decorator_start_end_logging\n def __player_update(self):\n while self.player.HP > 0 and self.player.score < 3:\n self._spell_game_state()\n try:\n players_action = self._player_input()\n except Exception:\n log.error('Incorect input')\n else:\n if players_action in self.player.move_types:\n self.player.move(players_action)\n self._proscess_player_move()\n elif players_action == 'Save game':\n try:\n self.save()\n except Ellipsis as error:\n log.error('Can not save game')\n log.error(error)\n elif players_action == 'Quite':\n return\n\n else:\n print(f'You {\"Won\" if self.player.score == 3 else \"Lose\"}')\n self.map_.set_cell_type(self.player.position, self.map_.cell_player)\n self.map_.set_cell_type(self.enemy.position, self.map_.cell_enemy)\n print(self)\n\n\n @dg_decorators.decorator_start_end_logging\n def __proscess_enemy_move(self):\n try:\n sell_type = self.map_.get_cell_type(self.enemy.position)\n except OutOfMapRangeError:\n self.enemy.position = self.map_.get_free_cell()\n\n else:\n if self.enemy.position == self.player.position:\n self.player.HP -= 1\n print(\"Enemy found you!!!!\")\n self.enemy.position = self.map_.get_free_cell()\n\n if sell_type == self.map_.cell_border:\n self.enemy.undo_move()\n\n\n @dg_decorators.decorator_start_end_logging\n def __str__(self):\n return f'Player:\\n{self.player}\\n\\nMap:\\n{self.map_}\\n'","sub_path":"Andrii_Fokin/10/build/lib/dungeon_game_afokin/Game_.py","file_name":"Game_.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"180329937","text":"# -*- coding: utf-8 -*-\nfrom ludaweb import app\nfrom ludaweb.models.applications import Application\nfrom ludaweb.models.models import db\nfrom flask import Flask, request, session, url_for, redirect, render_template, abort, g, flash, _app_ctx_stack\n\nfrom ludaweb.models.params import Param\n\n\n@app.endpoint('applications')\ndef index():\n return render_template('applications.html')\n\n\n@app.endpoint('addapplications')\ndef add():\n return render_template('applications_add.html')\n\n\n@app.endpoint('saveapplications')\ndef save():\n appId = request.form['appId']\n appName = request.form['appName']\n appDes = request.form['appDes']\n appUrl = request.form['appUrl']\n\n argNames = request.values.getlist('argName')\n\n argNames = map(lambda x: Param(appId=appId, argName=x), argNames)\n\n argNames = list(argNames)\n\n application = Application(appId=appId, appName=appName, appDes=appDes, url=appUrl, xybz=1, args=argNames)\n\n db.session.add(application)\n try:\n #db.session.commit()\n app.logger.debug('db.session.commit()')\n except Exception as err:\n app.logger.error(err)\n db.session.rollback()\n return \"fail\"\n return \"success\"\n","sub_path":"ludaweb/controllers/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"485567805","text":"'''\n BiLSTM-CRF for NER\n'''\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nimport os\nimport sys\nimport matplotlib.pylab as plt\nfrom tensorflow.contrib.data import AUTOTUNE\n\ntf.flags.DEFINE_integer('batch_size', 100, 'batch size for training')\ntf.flags.DEFINE_integer('epochs', 200000, 'number of iterations')\ntf.flags.DEFINE_integer('embedding_size', 50, 'embedding size for word embedding')\ntf.flags.DEFINE_integer('rnn_size', 128, 'units of rnn')\ntf.flags.DEFINE_string('model_save_path', 'model/nercrf1/', 'directory of model file saved')\ntf.flags.DEFINE_float('lr', 0.001, 'learning rate for training')\ntf.flags.DEFINE_float('keep_prob', 0.5, 'rate for dropout')\ntf.flags.DEFINE_integer('per_save', 50, 'save model once every per_save iterations')\ntf.flags.DEFINE_string('mode', 'train0', 'The mode of train or predict as follows: '\n 'train0: train first time or retrain'\n 'train1: continue train'\n 'predict: predict')\n\nCONFIG = tf.flags.FLAGS\n\n\ndef single_example_parser(serialized_example):\n context_features = {\n 'length': tf.FixedLenFeature([], tf.int64)\n }\n sequence_features = {\n 'sen': tf.FixedLenSequenceFeature([],\n tf.int64),\n 'ner': tf.FixedLenSequenceFeature([],\n tf.int64)}\n\n context_parsed, sequence_parsed = tf.parse_single_sequence_example(\n serialized=serialized_example,\n context_features=context_features,\n sequence_features=sequence_features\n )\n\n length = context_parsed['length']\n\n sen = sequence_parsed['sen']\n ner = sequence_parsed['ner']\n return sen, ner, length\n\n\ndef batched_data(tfrecord_filename, single_example_parser, batch_size, padded_shapes, buffer_size=1000,\n shuffle=True):\n dataset = tf.data.TFRecordDataset(tfrecord_filename) \\\n .map(single_example_parser) \\\n .padded_batch(batch_size, padded_shapes=padded_shapes) \\\n .repeat() \\\n .prefetch(buffer_size=AUTOTUNE)\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n return dataset.make_one_shot_iterator().get_next()\n\n\nclass NER():\n def __init__(self, config, word_dict_len, ner_dict_len):\n self.config = config\n self.word_dict_len = word_dict_len\n self.ner_dict_len = ner_dict_len\n\n def build_model(self):\n with tf.name_scope('input'):\n sen = tf.placeholder(tf.int32, [None, None], name='sentences')\n ner = tf.placeholder(tf.int32, [None, None], name='ners')\n length = tf.placeholder(tf.int32, [None], name='length')\n with tf.name_scope('embedding'):\n transition_params = tf.Variable(tf.random_uniform([self.ner_dict_len, self.ner_dict_len], 0.0, 1.0),\n dtype=tf.float32)\n\n embedding_matrx = tf.Variable(\n tf.random_uniform([self.word_dict_len, self.config.embedding_size], -1.0, 1.0),\n dtype=tf.float32)\n embedded_sen = tf.nn.embedding_lookup(embedding_matrx, sen)\n\n with tf.name_scope('birnn'):\n rnn_fw = tf.nn.rnn_cell.GRUCell(self.config.rnn_size, name='fw')\n rnn_bw = tf.nn.rnn_cell.GRUCell(self.config.rnn_size, name='bw')\n\n birnn_outputs, _ = tf.nn.bidirectional_dynamic_rnn(rnn_fw,\n rnn_bw,\n embedded_sen,\n sequence_length=length,\n dtype=tf.float32)\n fb_outputs = tf.concat(birnn_outputs, 2)\n\n logits = tf.reshape(\n tf.layers.dense(tf.reshape(fb_outputs, [-1, 2 * self.config.rnn_size]), self.ner_dict_len),\n [tf.shape(sen)[0], -1, self.ner_dict_len])\n\n with tf.name_scope('loss'):\n log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(logits, ner, length, transition_params)\n viterbi_sequence, _ = tf.contrib.crf.crf_decode(logits, transition_params, length)\n\n prediction = tf.identity(viterbi_sequence, name='prediction')\n loss = tf.reduce_mean(-log_likelihood, name='loss')\n\n masks = tf.sequence_mask(length, tf.reduce_max(length))\n accuracy = tf.cast(tf.equal(viterbi_sequence, ner), tf.float32)\n accuracyf = tf.zeros_like(accuracy)\n accuracy = tf.div(tf.reduce_sum(tf.where(masks, accuracy, accuracyf)),\n tf.cast(tf.reduce_sum(length), tf.float32), name='accuracy')\n\n optimizer = tf.train.AdamOptimizer(learning_rate=self.config.lr, name='optimizer')\n gradients = optimizer.compute_gradients(loss)\n clipped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(clipped_gradients, name='train_op')\n\n writer = tf.summary.FileWriter(self.config.model_save_path, graph=tf.get_default_graph())\n writer.flush()\n writer.close()\n print('Graph saved successfully!')\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n number_trainable_variables = 0\n variable_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variable_names)\n for k, v in zip(variable_names, values):\n print(\"Variable: \", k)\n print(\"Shape: \", v.shape)\n number_trainable_variables += np.prod([s for s in v.shape])\n print('Number of parameters: %d' % number_trainable_variables)\n\n saver = tf.train.Saver(max_to_keep=1)\n saver.save(sess, self.config.model_save_path)\n sess.close()\n print('Model saved successfully!')\n\n def train(self):\n train_file = ['data/train.tfrecord']\n valid_file = ['data/valid.tfrecord']\n\n train_batch = batched_data(train_file, single_example_parser, self.config.batch_size,\n padded_shapes=([-1], [-1], []))\n valid_batch = batched_data(valid_file, single_example_parser, 110,\n padded_shapes=([-1], [-1], []),\n shuffle=False)\n sess = tf.Session()\n\n newsaver = tf.train.import_meta_graph(self.config.model_save_path + '.meta')\n newsaver.restore(sess, self.config.model_save_path)\n\n graph = tf.get_default_graph()\n sen = graph.get_operation_by_name('input/sentences').outputs[0]\n ner = graph.get_operation_by_name('input/ners').outputs[0]\n length = graph.get_operation_by_name('input/length').outputs[0]\n\n accuracy = graph.get_tensor_by_name('loss/accuracy:0')\n loss = graph.get_tensor_by_name('loss/loss:0')\n train_op = graph.get_operation_by_name('loss/train_op')\n\n loss_ = []\n acc_ = []\n for epoch in range(1, self.config.epochs + 1):\n train_batch_ = sess.run(train_batch)\n\n feed_dict = {sen: train_batch_[0],\n ner: train_batch_[1],\n length: train_batch_[2]\n }\n loss_batch, _, acc_batch = sess.run([loss, train_op, accuracy], feed_dict=feed_dict)\n loss_.append(loss_batch)\n acc_.append(acc_batch)\n\n sys.stdout.write('\\r>> %d/%d | loss_batch: %f acc_batch:%.3f' % (\n epoch, self.config.epochs, loss_batch, acc_batch))\n sys.stdout.flush()\n\n if epoch % self.config.per_save == 0:\n valid_batch_ = sess.run(valid_batch)\n\n valid_feed_dict = {sen: valid_batch_[0],\n ner: valid_batch_[1],\n length: valid_batch_[2]\n }\n valid_acc = sess.run(accuracy, feed_dict=valid_feed_dict)\n\n sys.stdout.write(' train_loss: %f train_acc: %.3f | valid_acc:%.3f\\n'\n % (np.mean(loss_[-self.config.batch_size:]),\n np.mean(acc_[-self.config.batch_size:]),\n valid_acc))\n sys.stdout.flush()\n\n newsaver.save(sess, self.config.model_save_path)\n print('model saved successfully!')\n\n sess.close()\n\n fig = plt.figure(figsize=(10, 8))\n plt.plot(loss_)\n plt.savefig(self.config.model_save_path + 'loss.png')\n plt.close(fig)\n\n def predict(self, word_dict, ner_dict):\n ner_reverse_dict = {v: k for k, v in ner_dict.items()}\n\n sentences = [\n '第二十二届 国际 检察官 联合会 年会 暨 会员 代表大会 11 日 上午 在 北京 开幕 。 国家 主席 习近平 发来 贺信 , 对 会议 召开 表示祝贺 。',\n '重庆市 江边 未建 投放 垃圾 的 设施 , 居民 任意 向 江边 倒 脏物 。',\n '伪造 、 买卖 、 非法 提供 、 非法 使用 武装部队 专用 标志 罪'\n ]\n\n m_samples = len(sentences)\n\n sent = []\n leng = []\n for sentence in sentences:\n sen2id = [word_dict[word] if word in word_dict.keys() else word_dict[''] for word in\n sentence.split(' ')]\n sent.append(sen2id)\n leng.append(len(sen2id))\n\n max_len = np.max(leng)\n for i in range(m_samples):\n if leng[i] < max_len:\n sent[i] += [word_dict['']] * (max_len - leng[i])\n\n sess = tf.Session()\n\n newsaver = tf.train.import_meta_graph(self.config.model_save_path + '.meta')\n newsaver.restore(sess, self.config.model_save_path)\n\n graph = tf.get_default_graph()\n sen = graph.get_operation_by_name('input/sentences').outputs[0]\n length = graph.get_operation_by_name('input/length').outputs[0]\n\n prediction = graph.get_tensor_by_name('loss/prediction:0')\n\n feed_dict = {sen: sent,\n length: leng\n }\n prediction_ = sess.run(prediction, feed_dict=feed_dict)\n\n for i in range(m_samples):\n tmp = []\n for idx in prediction_[i]:\n tmp.append(ner_reverse_dict[idx])\n sys.stdout.write('SEN: %s\\n' % (sentences[i]))\n sys.stdout.write('NER: %s\\n\\n' % (' '.join(tmp[:leng[i]])))\n sys.stdout.flush()\n\n sess.close()\n\n\ndef main(unused_argv):\n with open('data/word_dict.txt', 'rb') as f:\n word_dict = pickle.load(f)\n with open('data/ner_dict.txt', 'rb') as f:\n ner_dict = pickle.load(f)\n\n ner = NER(CONFIG, len(word_dict), len(ner_dict))\n if CONFIG.mode == 'train0':\n if not os.path.exists(CONFIG.model_save_path):\n os.makedirs(CONFIG.model_save_path)\n ner.build_model()\n ner.train()\n elif CONFIG.mode == 'train1':\n ner.train()\n elif CONFIG.mode == 'predict':\n ner.predict(word_dict, ner_dict)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"NER/myner_crf1.py","file_name":"myner_crf1.py","file_ext":"py","file_size_in_byte":11266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499577000","text":"# -*- coding:utf-8 -*-\nclass Solution:\n def reOrderArray(self, array):\n # write code here\n array.sort() #从小到大排序\n oushu = []; jishu = []\n for i in range(len(array)):\n if array[i]%2 == 0:\n oushu.append(array[i])\n else:\n jishu.append(array[i])\n return jishu+oushu\n \nS = Solution()\ntest = [1,2,3,4,5,6,7]\nprint(S.reOrderArray(test))","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"82738844","text":"import math\nimport collections\nimport heapq\nimport bisect\nimport functools\n\n\ndef solve(a, r, n):\n for i in range(n):\n if a[i] > i:\n return -1\n if r[i] > i:\n return -1\n b = [0] * n\n s = set()\n for i in range(n - 1, -1, -1):\n v = n - (a[i] + r[n - 1 - i])\n if v in s or v <= 0:\n return -1\n b[i] = v\n s.add(v)\n return \" \".join(str(x) for x in b)\n\n\nif __name__ == '__main__':\n t = int(input())\n for _ in range(t):\n n = int(input())\n ia = list(map(int, input().split()))\n ir = list(map(int, input().split()))\n res = solve(ia, ir, n)\n print(res)\n","sub_path":"hackerearth/2022/ds/1d_arr/double-inversions.py","file_name":"double-inversions.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398365321","text":"# vim:ts=4:sw=4:et:\n# Copyright 2012-present Facebook, Inc.\n# Licensed under the Apache License, Version 2.0\nimport WatchmanTestCase\nimport tempfile\nimport os\nimport os.path\nimport json\nfrom pywatchman import bser\nimport subprocess\nimport WatchmanInstance\nimport unittest\n\n\nclass TestDashJCliOption(unittest.TestCase):\n\n def getSockPath(self):\n return WatchmanInstance.getSharedInstance().getSockPath()\n\n def doJson(self, addNewLine):\n sockname = self.getSockPath()\n watchman_cmd = json.dumps(['get-sockname'])\n if addNewLine:\n watchman_cmd = watchman_cmd + \"\\n\"\n\n cli_cmd = [\n 'watchman',\n '--sockname={}'.format(sockname),\n '--no-spawn',\n '--no-local',\n '-j',\n ]\n proc = subprocess.Popen(cli_cmd,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n\n stdout, stderr = proc.communicate(input=watchman_cmd)\n self.assertEqual(proc.poll(), 0, stderr)\n # the response should be json because that is the default\n result = json.loads(stdout)\n self.assertEqual(result['sockname'], sockname)\n\n def test_jsonInputNoNewLine(self):\n self.doJson(False)\n\n def test_jsonInputNewLine(self):\n self.doJson(True)\n\n def test_bserInput(self):\n sockname = self.getSockPath()\n watchman_cmd = bser.dumps(['get-sockname'])\n cli_cmd = [\n 'watchman',\n '--sockname={}'.format(sockname),\n '--no-spawn',\n '--no-local',\n '-j',\n ]\n proc = subprocess.Popen(cli_cmd,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n\n stdout, stderr = proc.communicate(input=watchman_cmd)\n self.assertEqual(proc.poll(), 0, stderr)\n # the response should be bser to match our input\n result = bser.loads(stdout)\n self.assertEqual(result['sockname'], sockname, stdout.encode('hex'))\n","sub_path":"tests/integration/test_bser_cli.py","file_name":"test_bser_cli.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107492337","text":"'''\n\nThis module has three purpose:\n1. change the user input to a list\n2. change a dictionary to a list of strings\n3. create another way for keyboard button, facilitate the process of calling a keyboard.\n\n'''\nimport telepot\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton, ForceReply\n\nclass Tools:\n\t@staticmethod\n\tdef msg_processor(raw_info):\n\t\tfield_component = raw_info.split(\",\")\n\t\tfield = []\n\t\tfor item in field_component:\n\t\t\tif item != '':\n\t\t\t\tfield.append(item)\n\t\treturn field\n\n\t@staticmethod\n\tdef dict_processor(dict):\n\t\tstring = str(dict)\n\t\tlist = string.strip('{}').replace(':',' : ').split(',')\n\t\treturn list\n\n\n\n\t# this is the normal keyboard, parameters are multiple lists which will have both text and callback data\t\n\t@staticmethod\n\tdef keyboard(*args):\n\t\tkb_list=[]\n\t\tfor item in args:\n\t\t\tif item != []:\n\t\t\t\tkb_list.append([InlineKeyboardButton(text=item[0],callback_data=item[1])])\n\t\treturn InlineKeyboardMarkup(inline_keyboard=kb_list)\n\n\t# this is a special keyboard with one column of keyboard button, taking a list of values of text.\n\t# text will be set as callback data\n\t@staticmethod\n\tdef dynamic_keyboard_1(list):\n\t\tkb_list=[]\n\t\tfor item in list:\n\t\t\tkb_list.append([InlineKeyboardButton(text=str(item),callback_data=str(item))])\n\t\treturn InlineKeyboardMarkup(inline_keyboard=kb_list)\n\n\t# function is the same as the dynamic_keyboard_1\n\t# but will show two columns of keyboard butotn\n\t@staticmethod\n\tdef dynamic_keyboard_2(list):\n\t\tkb_list=[]\n\t\tfor i in range(0,len(list),2):\n\t\t\ttry:\n\t\t\t\ta=[InlineKeyboardButton(text=list[i],callback_data=list[i])]\n\t\t\t\ta.extend([InlineKeyboardButton(text=list[i+1],callback_data=list[i+1])])\n\t\t\t\tkb_list.append(a)\n\t\t\texcept Exception:\n\t\t\t\tkb_list.append(a)\n\t\t# kb_list.append(InlineKeyboardButton(text='Exit',callback_data='exit'))\n\t\treturn InlineKeyboardMarkup(inline_keyboard=kb_list)\n","sub_path":"_Project_Example/Python_Project_Example/Telegram_chatbot/project_tools.py","file_name":"project_tools.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"287274481","text":"\nimport cv2\nimport numpy as np\nfrom Return_Contour import Return_Contour\n\nimg_1_full = cv2.imread('W_1.jpg')\ncv2.imwrite('W_1_compressed.jpg', img_1_full, [cv2.IMWRITE_JPEG_QUALITY, 9])\nimg_2_full = cv2.imread('W_use.jpg')\ncv2.imwrite('W_use_compressed.jpg', img_2_full, [cv2.IMWRITE_JPEG_QUALITY, 9])\n\nimg_1 = cv2.imread('W_1_compressed.jpg')\nimg_2 = cv2.imread('W_use_compressed.jpg')\ncnt_1, sp_1 = Return_Contour(img_1)\ncnt_2, sp_2 = Return_Contour(img_2, True)\n\nif sp_1 is True:\n cnt_1 = cnt_1[0]\nif sp_2 is True:\n cnt_2 = cnt_2[0]\n\n# print(type(cnt_1))\n# print(type(np.array(cnt_2)))\n\nhull_1 = cv2.convexHull(cnt_1)\ncv2.drawContours(img_1, [hull_1], -1, (0, 255, 0), 3)\n\nhull_2 = cv2.convexHull(cnt_2)\ncv2.drawContours(img_2, [hull_2], -1, (0, 255, 0), 3)\n\nnp.save('./octagon_standard_contour.npy', hull_2)\nhull_standard = np.load('octagon_standard_contour.npy')\n\nret = cv2.matchShapes(hull_1, hull_standard, 1, 0.0)\nprint(ret)\n\ncv2.imshow(\"Image_1\", img_1)\ncv2.imshow('Image_2', img_2)\ncv2.waitKey(0)\n\n","sub_path":"Test_For_Contour_Matching.py","file_name":"Test_For_Contour_Matching.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"578509132","text":"import pipeline, pandas, json\nfrom sklearn.naive_bayes import MultinomialNB as mod\nfrom sklearn.ensemble import RandomForestClassifier as mod2\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndef choiceClassifier(i):\n if (i == 1):\n classifier = mod\n else:\n classifier = mod2\n return classifier\n\n\ndef constructModel(cc, frequencies):\n classifieur = choiceClassifier(1) # A modifier pour changer le type dalgorythme\n result=[]\n df_train = pandas.read_csv('forClassification.csv')\n final = pandas.DataFrame(data=df_train)\n\n vecteurClasseTrain = final[\"Classe\"][:cc]\n vecteurQuestion = final[\"Texte\"][:cc]\n\n classifier = classifieur()\n\n targetsClasse = vecteurClasseTrain.values\n\n vecteurClasseTest = final[\"Classe\"][cc:].values\n\n if not frequencies:\n count_vectorizer = CountVectorizer()\n else:\n count_vectorizer = TfidfVectorizer(min_df=6) ## Retire tous ce qui a une frequence max de 10\n\n counts = count_vectorizer.fit_transform(vecteurQuestion.values)\n\n classifier.fit(counts, targetsClasse)\n examples = final[\"Texte\"][cc:len(final)]\n\n example_counts = count_vectorizer.transform(examples)\n predictions = classifier.predict(example_counts)\n\n\n result.append(predictions)\n result.append(vecteurClasseTest)\n result.append(examples)\n return result\n\ndef construcTableRP(predictions, trueclass, Model):\n result = {}\n predictions=Model[0]\n trueclass=Model[1]\n for i in range(0, len(Model[0])):\n if (predictions[i] == trueclass[i]):\n\n result[str(i)] = ({\n \"class\": predictions[i],\n \"bool\": True\n })\n else:\n result[str(i)] = ({\n \"class\": predictions[i],\n \"bool\": False\n })\n return result\n\ndef truePositive(classe, Model):\n data = construcTableRP(Model[0],\n Model[1],Model)\n result = 0\n for i in range(0, len(data)):\n if ((classe == data[str(i)][\"class\"]) & (data[str(i)][\"bool\"])):\n result += 1\n return result\n\n\ndef falsePositive(classe, Model):\n data = construcTableRP(Model[0],\n Model[1], Model)\n result = 0\n for i in range(0, len(data)):\n if ((classe == data[str(i)][\"class\"]) & (data[str(i)][\"bool\"] == False)):\n result += 1\n return result\n\n\ndef trueNegative(classeOption,Model):\n data = Model[1]\n data.sort()\n result = 0\n for classe in data:\n if (classe != classeOption):\n result += 1\n return result\n\n\ndef falseNegative(classeOption, Model):\n data = Model[1]\n data.sort()\n result = 0\n for classe in data:\n if (classe == classeOption):\n result += 1\n return result\n\n\ndef precision(classe, Model):\n return truePositive(classe,Model) / (\n truePositive(classe, Model) + falsePositive(classe, Model))\n\n\ndef recall(classe, Model):\n return truePositive(classe, Model) / (falseNegative(classe, Model))\n\n\nwith open('dicoClass.json') as json_data:\n dico = json.load(json_data)\n\n\npipeline.createliste(dico)\n\nprint(\"Predicition sans le pretraitement de frequence\")\n\nModel=constructModel(1500, False)\nprint(precision(\"Positif\",Model))\nprint(precision(\"Negatif\", Model))\nprint(\"\")\nprint(\"=========================\")\nprint(\"\")\nprint(\"Predicition avec le pretraitement de frequence\")\n\nModel=constructModel(1500, True)\nprint(precision(\"Positif\", Model))\nprint(precision(\"Negatif\", Model))\n","sub_path":"3_SentimentAnalysis/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402968814","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/coils/protocol/dav/files/csvobject.py\n# Compiled at: 2012-10-12 07:02:39\nimport hashlib, gc\nfrom StringIO import StringIO\nfrom coils.core import BLOBManager, Contact\nfrom coils.net import DAVObject, CachedData\n\ndef none_to_empty(value):\n if value is None:\n return ''\n else:\n return value\n\n\nclass CSVObject(DAVObject):\n\n def __init__(self, parent, name, **params):\n self.contents = None\n self._cached_object = None\n DAVObject.__init__(self, parent, name, **params)\n return\n\n @property\n def cached_object(self):\n return self._cached_object\n\n def get_property_webdav_getcontenttype(self):\n return 'text/plain'\n\n def get_property_getetag(self):\n return self.ctag\n\n def get_property_webdav_getcontentlength(self):\n self.load_contents()\n return str(self.cached_object.size)\n\n def _load_contents(self):\n if self._cached_object is None:\n self._cached_object = CachedData(self.context.account_id, self.webdav_url, self.ctag)\n if self.cached_object.not_current:\n self.log.debug('Cached representation of object is not current, loading contents')\n if self.contents is None:\n self.contents = self.context.run_command(self.command, properties=[Contact])\n stream = self._render_contact_list()\n self.cached_object.write_from_stream(stream)\n BLOBManager.Close(stream)\n else:\n self.log.debug('Cached representation of object is current.')\n return True\n\n def _render_contact_list(self):\n handle = BLOBManager.ScratchFile()\n self.log.debug(('Generating CSV content of {0} entities').format(len(self.contents)))\n start = self.context.get_timestamp()\n handle.write('objectId|firstName|lastName|department|name1|name2|name3|street|postalCode|city|province|country|homePhone|workPhone|officePhone|mobilePhone|faxPhone|email|url|title|position|displayName\\r\\n')\n for contact in self.contents:\n mailing = contact.addresses['mailing']\n tel_home = none_to_empty(contact.telephones['05_tel_private'])\n tel_work = none_to_empty(contact.telephones['02_tel'])\n tel_off = none_to_empty(contact.telephones['01_tel'])\n tel_cell = none_to_empty(contact.telephones['03_tel_funk'])\n tel_fax = none_to_empty(contact.telephones['10_fax'])\n title = none_to_empty(contact.companyvalues['job_title'].string_value)\n position = none_to_empty(contact.companyvalues['job_title1'].string_value)\n handle.write(('{0}|{1}|{2}|{3}|').format(contact.object_id, none_to_empty(contact.first_name), none_to_empty(contact.last_name), none_to_empty(contact.department)))\n if mailing is not None:\n handle.write(('{0}|{1}|{2}|{3}|{4}|{5}|{6}|{7}|').format(none_to_empty(mailing.name1), none_to_empty(mailing.name2), none_to_empty(mailing.name3), none_to_empty(mailing.street), none_to_empty(mailing.postal_code), none_to_empty(mailing.city), none_to_empty(mailing.province), none_to_empty(mailing.country)))\n else:\n handle.write('|||||')\n handle.write(('{0}|{1}|{2}|{3}|{4}|').format(none_to_empty(tel_home), none_to_empty(tel_work), none_to_empty(tel_off), none_to_empty(tel_cell), none_to_empty(tel_fax)))\n email = contact.get_company_value_text('email1')\n if email is None:\n email = contact.get_company_value_text('email2')\n if email is None:\n email = contact.get_company_value_text('email3')\n handle.write(('{0}|').format(none_to_empty(email)))\n handle.write(('{0}|').format(none_to_empty(contact.URL)))\n handle.write(('{0}|').format(none_to_empty(title)))\n handle.write(('{0}|').format(none_to_empty(position)))\n handle.write(('{0}|').format(none_to_empty(contact.display_name)))\n handle.write('\\r\\n')\n\n end = self.context.get_timestamp()\n self.log.debug(('Generation of CSV content consumed {0}s ({1}s per entry)').format(end - start, (end - start) / len(self.contents)))\n self.contents = None\n handle.seek(0)\n return handle\n\n def do_GET(self):\n if self.load_contents():\n self.request.stream_response(200, stream=self.cached_object.get_stream(), mimetype='text/plain', headers={'etag': self.ctag})\n self.cached_object.close_cache()\n self.context.commit()\n self.context.db_session().expunge_all()\n print ('GC:{0}').format(gc.collect())\n from coils.foundation.api.objgraph import get_most_common_types, by_type\n import pprint\n pprint.pprint(get_most_common_types())","sub_path":"pycfiles/OpenGroupware-0.1.48-py2.6/csvobject.py","file_name":"csvobject.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"175295600","text":"\r\nimport requests\r\nimport zipfile,os.path\r\n\r\ndef download_file(url):\r\n local_filename = url.split('/')[-1]\r\n # NOTE the stream=True parameter\r\n r = requests.get(url, stream=True)\r\n with open(local_filename, 'wb') as f:\r\n for chunk in r.iter_content(chunk_size=1024): \r\n if chunk: # filter out keep-alive new chunks\r\n f.write(chunk)\r\n f.flush()\r\n return local_filename\r\n\r\ndef unzip(source_filename, dest_dir):\r\n with zipfile.ZipFile(source_filename) as zf:\r\n for member in zf.infolist():\r\n # Path traversal defense copied from\r\n # http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789\r\n words = member.filename.split('/')\r\n path = dest_dir\r\n for word in words[:-1]:\r\n drive, word = os.path.splitdrive(word)\r\n head, word = os.path.split(word)\r\n if word in (os.curdir, os.pardir, ''): continue\r\n path = os.path.join(path, word)\r\n zf.extract(member, path)\r\n\r\n# example:\r\n# download_file(\"https://github.com/dhg/Skeleton/releases/download/2.0.4/Skeleton-2.0.4.zip\")\r\n# unzip(\"Skeleton-2.0.4.zip\", \".\")\r\n\r\n\r\n\r\n","sub_path":"envtools/envdownl.py","file_name":"envdownl.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"112397272","text":"# -- coding:utf8 --\n\n\"\"\"\nUsage:\tdocker [OPTIONS] COMMAND\n\nA self-sufficient runtime for containers\n\nOptions:\n --config string Location of client config files (default \"/root/.docker\")\n -c, --context string Name of the context to use to connect to the daemon (overrides DOCKER_HOST env var\n and default context set with \"docker context use\")\n -D, --debug Enable debug mode\n -H, --host list Daemon socket(s) to connect to\n -l, --log-level string Set the logging level (\"debug\"|\"info\"|\"warn\"|\"error\"|\"fatal\") (default \"info\")\n --tls Use TLS; implied by --tlsverify\n --tlscacert string Trust certs signed only by this CA (default \"/root/.docker/ca.pem\")\n --tlscert string Path to TLS certificate file (default \"/root/.docker/cert.pem\")\n --tlskey string Path to TLS key file (default \"/root/.docker/key.pem\")\n --tlsverify Use TLS and verify the remote\n -v, --version Print version information and quit\n\nManagement Commands:\n builder Manage builds\n config Manage Docker configs\n container Manage containers\n context Manage contexts\n engine Manage the docker engine\n image Manage images\n network Manage networks\n node Manage Swarm nodes\n plugin Manage plugins\n secret Manage Docker secrets\n service Manage services\n stack Manage Docker stacks\n swarm Manage Swarm\n system Manage Docker\n trust Manage trust on Docker images\n volume Manage volumes\n\nCommands:\n attach Attach local standard input, output, and error streams to a running container\n build Build an image from a Dockerfile\n commit Create a new image from a container's changes\n cp Copy files/folders between a container and the local filesystem\n create Create a new container\n diff Inspect changes to files or directories on a container's filesystem\n events Get real time events from the server\n exec Run a command in a running container\n export Export a container's filesystem as a tar archive\n history Show the history of an image\n images List images\n import Import the contents from a tarball to create a filesystem image\n info Display system-wide information\n inspect Return low-level information on Docker objects\n kill Kill one or more running containers\n load Load an image from a tar archive or STDIN\n login Log in to a Docker registry\n logout Log out from a Docker registry\n logs Fetch the logs of a container\n pause Pause all processes within one or more containers\n port List port mappings or a specific mapping for the container\n ps List containers\n pull Pull an image or a repository from a registry\n push Push an image or a repository to a registry\n rename Rename a container\n restart Restart one or more containers\n rm Remove one or more containers\n rmi Remove one or more images\n run Run a command in a new container\n save Save one or more images to a tar archive (streamed to STDOUT by default)\n search Search the Docker Hub for images\n start Start one or more stopped containers\n stats Display a live stream of container(s) resource usage statistics\n stop Stop one or more running containers\n tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE\n top Display the running processes of a container\n unpause Unpause all processes within one or more containers\n update Update configuration of one or more containers\n version Show the Docker version information\n wait Block until one or more containers stop, then print their exit codes\n\nRun 'docker COMMAND --help' for more information on a command.\n\ndocker volume\n\nUsage:\tdocker volume COMMAND\n\nManage volumes\n\nCommands:\n create Create a volume\n inspect Display detailed information on one or more volumes\n ls List volumes\n prune Remove all unused local volumes\n rm Remove one or more volumes\n\nRun 'docker volume COMMAND --help' for more information on a command.\n\n\"\"\"\nfrom . import image as image_util\nimport argparse\nimport os\nimport sys\nimport subprocess\n\n\ndef check_dir():\n dc_file = 'docker-compose.yml'\n if not os.path.exists(dc_file):\n print(\"执行目录错误:没有在当前目录发现docker-compose.yml!\")\n exit(-1)\n \n # 检查并创建back目录d\n def check_back_dir(back_path):\n if not os.path.exists(back_path):\n run_cmd(\"mkdir -p \"+back_path)\n check_back_dir('./back/image')\n check_back_dir('./back/version')\n\n\ndef run_cmd(cmd):\n print('执行shell:'+cmd)\n p = subprocess.call(cmd, shell=True)\n\n\ndef file_pack(dir_name):\n # TODO 先检查文件夹是否存在\n tar_file = './back/version/'+dir_name + '.tar.gz'\n cmd = \"tar -cvzf \" + tar_file + \" \"+dir_name\n run_cmd(cmd)\n\n\ndef file_unpack(dir_name):\n # TODO 先检查文件是否存在\n tar_file = './back/version/'+dir_name + '.tar.gz'\n cmd = \"tar -xvzf \" + tar_file\n run_cmd(cmd)\n\n\ndef image(args):\n # 镜像命令\n if args.pack:\n image_util.do_image_pack()\n if args.unpack:\n image_util.do_image_unpack()\n if args.clear:\n image_util.do_image_clear()\n if args.upgrade:\n image_util.do_image_upgrade()\n\n\ndef init_data(args):\n # init-data的命令\n if args.pack:\n file_pack('init-data')\n if args.unpack:\n file_unpack('init-data')\n\n\ndef run_data(args):\n # run-data的命令\n if args.pack:\n # TODO 先停止 'docker-compose down',先记录当前状态,再决定是否恢复\n run_cmd('docker-compose down')\n file_pack('run-data')\n run_cmd('docker-compose up -d ')\n if args.unpack:\n # TODO 先停止 'docker-compose down',先记录当前状态,再决定是否恢复\n run_cmd('docker-compose down')\n file_unpack('run-data')\n run_cmd('docker-compose up -d ')\n\n\ndef main_cli():\n # 创建解析对象\n parser = argparse.ArgumentParser(\n usage=\"dc-help COMMAND\", description=\"docker-compose辅助工具,帮助管理镜像、版本文件\")\n # 获取第一层子命令操作对象\n sub_parsers = parser.add_subparsers(title=\"COMMAND\",)\n # 创建一个子命令\n p1 = sub_parsers.add_parser(\"image\",\n usage='dc-help COMMAND image [-h] (--pack | --unpack | --clear | --upgrade)',\n help=\"管理docker-compose.yml中的镜像,打包、装载、清理、升级\")\n p2 = sub_parsers.add_parser(\"init-data\",\n usage=\"dc-help init-data [-h] (--pack | --unpack)\",\n help=\"init-data的压缩和解压缩\")\n p3 = sub_parsers.add_parser(\"run-data\",\n usage=\"dc-help run-data [-h] (--pack | --unpack)\",\n help=\"run-data的压缩和解压缩2\", add_help=True)\n # 互斥,且至少需要一个参数\n group = p1.add_mutually_exclusive_group(required=True)\n group.add_argument('--pack', action='store_true', help=\"对镜像进行自动打包\")\n group.add_argument('--unpack', action='store_true', help=\"对镜像进行自动装载\")\n group.add_argument('--clear', action='store_true', help=\"对镜像文件进行清理\")\n group.add_argument('--upgrade', action='store_true',\n help=\"对镜像文件进行自动装载,然后升级\")\n p1.set_defaults(func=image) # 将函数 与子解析器绑定\n\n group = p2.add_mutually_exclusive_group(required=True)\n group.add_argument('--pack', action='store_true', help=\"对init-data进行自动打包\")\n group.add_argument('--unpack', action='store_true',\n help=\"对init-data进行自动解包\")\n p2.set_defaults(func=init_data) # 将函数 与子解析器绑定\n\n group = p3.add_mutually_exclusive_group(required=True)\n group.add_argument('--pack', action='store_true', help=\"对run-data进行自动打包\")\n group.add_argument('--unpack', action='store_true', help=\"对run-data进行自动解包\")\n p3.set_defaults(func=run_data) # 将函数 与子解析器绑定\n\n import sys\n # 先检查目录\n check_dir()\n args = parser.parse_args(sys.argv[1:])\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main_cli()\n","sub_path":"dchelp/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":8526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"441459860","text":"'''\nCreated on Feb 13, 2014\n\n@author: Otrebor45\n'''\nimport pygame\nimport Vector2\nimport gameEngine.util.LineTest as LineTest\n\nclass Node:\n def __init__(self,value = None):\n self.next = None\n self.value = value\n def eq(self,val):\n return self.value == val\n \nclass Graph:\n def __init__(self, points = None):\n self.Debug = False\n self.edges = {}\n if points:\n for i in range(-1, len(points)-1):\n self.edges[ points[i]] = [ points[i+1]]\n self.first = None\n \n def connect(self, v1, v2):\n self.edges[v1] = self.edges[v1] if v1 in self.edges.keys() else []\n if not v2 in self.edges[v1]:\n self.edges[v1].append(v2)\n \n def addEdge(self,v1,v2,strict = False):\n self.edges[v1] = self.edges[v1] if v1 in self.edges.keys() else []\n if strict:\n self.edges[v1] = [v2]\n else:\n self.edges[v1].append(v2)\n \n if v1 in (self.edges[v2] if v2 in self.edges.keys() else []):\n self.edges[v2].remove(v1)\n if v2 in self.edges[v1]:\n self.edges[v1].remove(v2)\n if not len(self.edges[v1]):\n self.edges.pop(v1)\n if v2 in self.edges and not len(self.edges[v2]):\n self.edges.pop(v2)\n \n def Segment(self, v1, v2, segments):\n segment = []\n for (c1,c2) in segments:\n if (c1[0] < v1[0] or c1[1] < v1[1]) and (c2[0] > v2[0] or c2[1] > v2[1]):\n i = segment.index( (c1,c2))\n segment.pop( (c1,c2))\n segment.insert(i, (c1,v1))\n segment.insert(i+1, (c2,v2))\n break\n else:\n segment.append((c1,c2))\n return segment\n \n def AddEdge(self, v1,v2):\n segments = [(v1,v2)]\n \n _in = self.MakeVector(v1, v2).normalize()\n edges =self.getIntersectionEdges((v1,v2))\n for e in edges:\n out = self.MakeVector(e[0], e[1])\n if _in.calAngle(out) == 180:\n segments = self.Segment(v1, v2, segments)\n \n for (c1,c2) in segments:\n self.addEdge(c1, c2)\n \n def intersects(self, edge): \n for e in self.getEdges():\n if (edge[0] == e[1]) or (edge[1] == e[0]) or (edge[0] == e[0]) or ( edge[1] == e[1]):\n #if self.isPointOnLine(edge, e[0]) and self.isPointOnLine(edge, e[1]) or self.isPointOnLine(e, edge[0]) and self.isPointOnLine(e, edge[1]):\n # return True\n continue\n if self.intersect(e, edge):\n if self.Debug:\n self.drawLine(e[0], e[1], (200,200,200))\n self.drawLine(edge[0], edge[1], (0,200,200))\n pygame.time.wait(500)\n \n return True\n return False\n \n def getIntersectionEdges(self, edge):\n edges = []\n for e in self.getEdges():\n if self.intersect(e, edge):\n edges.append(e)\n return edges\n \n '''\n LineB2 = edge2[1]\n LineB1 = edge2[0]\n \n LineA2 = edge1[1]\n LineA1 = edge1[0]\n denom = ((LineB2[1] - LineB1[1]) * (LineA2[0] - LineA1[0])) - ((LineB2[0] - LineB1[0]) * (LineA2[1] - LineA1[1]))\n \n return denom != 0\n '''\n \n def getFirstVertex(self):\n current = None\n for k in self.edges.keys():\n if current:\n if k[0] < current[0] and k[1] < current[1]:\n current = k\n else:\n current = k\n return current\n \n def getFirst(self):\n for k in self.edges.keys():\n return (k,self.edges[k])\n return (None,None)\n \n def getPolygon(self,v):\n circle = []\n circle.append(v)\n #circle.first = v\n if not self.edges.has_key(v):\n return None\n _next = self.edges[v]\n if not len(_next):\n return None\n _next = _next[0]\n close = False\n while _next and not close:\n if _next in circle:\n close = True\n continue\n circle.append(_next)\n _next = self.edges[_next]\n if not len(_next):\n return None\n _next = _next[0]\n return circle\n \n def cross(self, graph):\n for edge in graph.edges:\n v1 = edge\n en = graph.edges[edge]\n if len(en):\n v2 = en[0]\n self.addEdge(v2, v1)\n\n def getBest(self, _in, v1, v2):\n best = v2[0]\n b = 360\n for v in v2:\n out = self.MakeVector(v1, v).normalize()\n ang = _in.calAngle(out)\n if ang < b:\n b = ang\n best = v\n return best\n \n def getCicles(self):\n if self.Debug:\n self.cls()\n self.print_graph( (0,255,0))\n circles = []\n \n _r = 0\n _g = 0\n _b = 50\n v1 = None\n while len(self.edges):\n circle = Graph()\n if not v1 or not self.edges.has_key(v1):\n v1 = self.getFirstVertex()\n #if v1 is self.edges.has_key(v1):\n v2 = self.edges[v1]\n if len(v2) <= 0:\n continue\n v2 = v2[0]\n circle.addEdge(v1, v2)\n circle.first = v1\n finish = False\n while not finish:\n v3 = v1\n v1 = v2\n if not self.edges.has_key(v2):\n finish = True\n continue\n _in = self.MakeVector(v1, v3).normalize()\n v2 = self.edges[v2]\n if len(v2) <= 0:\n continue\n if len(v2) > 1:\n v2 = self.getBest(_in, v1, v2)\n else:\n v2 = v2[0]\n out = self.MakeVector(v1, v2).normalize()\n if not self.TestAngle(_in, out):# or self.intersects( (v1,v2)):\n done = False\n v2 = v1\n while not done:\n parents = self.getParents(v2)\n v2 = self.getBest(_in, v2, parents)\n out = self.MakeVector(v1, v2)\n ang = _in.calAngle(out)\n if (ang == 90 or ang == 180) and not self.intersects( (v1,v2)):\n parents = self.getParents(v2)\n v3 = v2\n v3 = self.getBest(_in, v3, parents)\n _out = self.MakeVector(v1, v3)\n _nang = _in.calAngle(_out)\n while _nang == ang == 180:\n v2 = v3\n parents = self.getParents(v3)\n v3 = self.getBest(_in, v3, parents)\n _out = self.MakeVector(v1, v3)\n _nang = _in.calAngle(_out)\n self.drawLine( v2, v3)\n \n if v2 in circle.edges.keys():\n finish = True\n done = True\n elif v1 in circle.edges.keys():\n finish = True\n \n circle.addEdge(v1,v2,True)\n if self.Debug:\n \n self.print_graph((0,255,0))\n circle.print_graph()\n \n pygame.time.wait(200)\n \n poly = circle.getPolygon(v2)\n g = Graph( poly)\n self.cross(g)\n if self.Debug:\n self.cls()\n self.print_graph( (0,255,0))\n circles.append(poly)\n return circles \n \n def getEdges(self):\n edges = []\n for k in self.edges.keys():\n for v in self.edges[k]:\n edges.append( (k,v))\n return edges\n \n def getParents(self,v):\n keys = []\n for key in self.edges:\n if v in self.edges[key]:\n keys.append(key)\n return keys\n \n def TestAngle(self, v1, v2 ,_range=[0,180]):\n ang = v1.calAngle(v2) \n return (ang > _range[0] and ang <= _range[1])\n \n def MakeVector(self,v1,v2):\n return Vector2.Vector2(v2[0],-v2[1]).sub( Vector2.Vector2(v1[0],-v1[1]))\n \n \n def drawLine(self, v1, v2, color = (0,255,0),screen=None):\n flip = False\n if not screen:\n screen = pygame.display.get_surface()\n flip = True\n pygame.draw.line( screen,color, v1, v2 )\n if flip:\n pygame.display.flip()\n \n def print_graph(self, color = (255,0,0,100),screen=None):\n flip = False\n if not screen:\n screen = pygame.display.get_surface()\n flip = True\n for key in self.edges:\n for end in self.edges[key]:\n pygame.draw.line( screen,color, key, end )\n if flip: \n pygame.display.flip()\n \n def drawPolygon(self, points, color = (250,250,250,10)):\n screen = pygame.display.get_surface()\n pygame.draw.polygon(screen, color, points )\n #pygame.display.flip()\n \n def cls(self):\n screen = pygame.display.get_surface()\n screen.fill( (0,0,0))\n \n \n '''\n def intersect(self,line1, line2):\n \n self.drawLine(line1[0], line1[1], (200,0,0))\n self.drawLine(line2[0], line2[1], (0,0,200))\n pygame.time.wait(100)\n p1 = line1[0] \n q1 = line1[1]\n p2 = line2[0]\n q2 = line2[1]\n o1 = self.orientation(p1, q1, p2);\n o2 = self.orientation(p1, q1, q2);\n o3 = self.orientation(p2, q2, p1);\n o4 = self.orientation(p2, q2, q1);\n \n if (o1 != o2 and o3 != o4):\n return True;\n \n if (o1 == 0 and self.onSegment(p1, p2, q1)):\n return True\n \n if (o2 == 0 and self.onSegment(p1, q2, q1)):\n return True\n \n if (o3 == 0 and self.onSegment(p2, p1, q2)):\n return True\n if (o4 == 0 and self.onSegment(p2, q1, q2)):\n return True\n \n return False\n '''\n def orientation(self, p, q, r):\n val = (q[1] - p[1]) * (r[1] - q[1]) -(q[1] - p[1]) * (r[1] - q[1]);\n if (val == 0):\n return 0\n return 1 if val > 0 else 2\n\n \n def onSegment(self, p, q, r):\n if (q[1] <= max(p[1], r[1]) and q[1] >= min(p[1], r[1]) and q[1] <= max(p[1], r[1]) and q[1] >= min(p[1], r[1])):\n return True\n return False;\n\n\n def lineSegmentTouchesOrCrossesLine(self, seg1, seg2):\n return self.isPointOnLine(seg1, seg2[0]) or self.isPointOnLine(seg1, seg2[1]) or self.isPointRightOfLien(seg1, seg2[0]) and self.isPointRightOfLien(seg1, seg2[1]) \n \n \n \n def isPointOnLine(self, linea, p):\n aTmp = Vector2.Vector2( linea[0][0] - linea[1][0], linea[0][1]-linea[1][1] )\n pTmp = Vector2.Vector2( p[0] - linea[1][0], p[1]-linea[1][1] )\n r = aTmp.x * pTmp.y - pTmp.x * aTmp.y;\n return abs(r) < 0.000001\n \n def isPointRightOfLien(self, linea, p):\n aTmp = Vector2.Vector2( linea[0][0] - linea[1][0], linea[0][1]-linea[1][1] )\n pTmp = Vector2.Vector2( p[0] - linea[1][0], p[1]-linea[1][1] )\n \n return aTmp.cross(pTmp) < 0\n \n def intersect(self,line1, line2):\n if self.Debug:\n self.drawLine(line1[0],line1[1], (200,0,0))\n self.drawLine(line2[0], line2[1], (0,0,200))\n return LineTest.calculateIntersectPoint(line1[0], line1[1], line2[0], line2[1])\n #return self.lineSegmentTouchesOrCrossesLine(line1, line2) and self.lineSegmentTouchesOrCrossesLine(line2, line1)\n \n \n \n \n \n","sub_path":"GameEngine/gameEngine/util/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":12077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124052962","text":"import rhinoscriptsyntax as rs\r\nfrom compas_rhino.geometry import RhinoMesh\r\nfrom compas_pattern.datastructures.mesh_quad_pseudo.mesh_quad_pseudo import PseudoQuadMesh\r\nfrom compas_pattern.cad.rhino.draw import draw_graph\r\n\r\nguids = rs.GetObjects('get quad meshes', filter = 32)\r\npoles = rs.GetObjects('get pole points', filter = 1)\r\nif poles is None:\r\n poles = []\r\nelse:\r\n poles = [rs.PointCoordinates(pole) for pole in poles]\r\nrs.EnableRedraw(False)\r\nfor guid in guids:\r\n vertices, faces = RhinoMesh.from_guid(guid).get_vertices_and_faces()\r\n mesh = PseudoQuadMesh.from_vertices_and_faces_with_poles(vertices, faces, poles)\r\n #mesh = QuadMesh.from_vertices_and_faces(*RhinoMesh.from_guid(guid).get_vertices_and_faces())\r\n #print('euler', mesh.euler())\r\n #print('nb_boundaries', len(mesh.boundaries()))\r\n #mesh.collect_strips()\r\n #mesh.collect_polyedges()\r\n #polylines = [rs.AddPolyline(mesh.strip_edge_midpoint_polyline(skey)) for skey in mesh.strips()]\r\n #for i, polyline in enumerate(polylines):\r\n # \r\n #polylines = [rs.AddPolyline(polyline) for polyline in mesh.singularity_polylines()]\r\n #polylines = [rs.AddPolyline(polyline) for polyline in mesh.polylines()]\r\n #for polyline in polylines:\r\n # rs.CurveArrows(polyline, 3)\r\n #for i, vkey in enumerate(mesh.vertices()):\r\n # rs.AddCircle(mesh.vertex_coordinates(vkey), 2)\r\n # rs.AddText(str(i), mesh.vertex_coordinates(vkey), 2)\r\n #circles = [rs.AddCircle(mesh.vertex_coordinates(vkey), 1) for vkey in mesh.singularities()]\r\n circles = [rs.AddCircle(mesh.vertex_coordinates(vkey), .25) for vkey in mesh.singularities()]\r\n #vertices, edges = mesh.strip_graph()\r\n #circles = [rs.AddCircle(xyz, .125) for xyz in vertices.values()]\r\n #draw_graph(vertices, edges, spindle_size=5, node_radius=.125)\r\n #rs.AddText(mesh.number_of_strips(), mesh.centroid())\r\n #rs.AddCircle(mesh.centroid(), 2)","sub_path":"examples/rhino_draw_quad_mesh_strips.py","file_name":"rhino_draw_quad_mesh_strips.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"212460362","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 15 13:54:00 2018\r\n@author: jp15101\r\nhttps://github.com/kushalbhabra/pyMidi/blob/master/src/test.py\r\n\"\"\"\r\n\r\nimport pygame\r\n\r\nimport pygame.midi\r\n\r\nimport time\r\n\r\ndef remove_note(note_table,input_note):\r\n 'Remove a midi note from the notetable' \r\n #Remove note from the noteTable \r\n for temp_midi_loc in range(len(note_table)):\r\n #Find OffNote message in table\r\n if input_note == note_table[temp_midi_loc][1]:\r\n #Shift every following note back up one\r\n for k in range(temp_midi_loc,len(note_table)-1):\r\n if k < len(note_table)-1:\r\n note_table[k] = note_table[k+1]\r\n #Replace final note with empty values \r\n note_table[-1] = empty_midi_byte\r\n return note_table\r\n\r\n\r\n \r\ndef add_note(note_table,new_note):\r\n 'Add a midi note to the top of the notetable.'\r\n #Add note to the top of the noteTable and shift everything down one line\r\n for temp_midi_loc in range(len(note_table)-2,-1,-1): # -2 as final datum not important and code runs to +1\r\n if note_table[temp_midi_loc][2] != 0:\r\n note_table[temp_midi_loc+1] = note_table[temp_midi_loc] \r\n note_table[temp_midi_loc] = new_note\r\n note_table[0] = new_note\r\n return note_table\r\n\r\n\r\ndef arpeggiate_note_table(input_note_table,arpeggiate_type):\r\n '''Arpeggiate the received note table'''\r\n empty_midi_byte = [0,0,0,0]\r\n output_note_table = input_note_table[:]\r\n \r\n if str(arpeggiate_type) == 'up':\r\n step_count = 0\r\n step_increase = 2\r\n elif str(arpeggiate_type) == 'down':\r\n step_count = 1\r\n step_increase = 2\r\n elif str(arpeggiate_type) == 'downup':\r\n step_count = 1\r\n step_increase = 1\r\n elif str(arpeggiate_type) == 'updown': \r\n step_count = 0\r\n step_increase = 1 \r\n else:\r\n print(\"That is not a valid arpeggiator function\") \r\n\r\n current_note = empty_midi_byte\r\n \r\n '''This runs indefinitely until a new message has been received. Currently, \r\n if new note on messages are received then everything is fine and dandy. \r\n If new note off messages are received then the output only changes after a \r\n complete loop. This means there is significant delay. I need to look into\r\n how the poll messages look for NoteOff and see how they differ from note on\r\n to see if that can give the necessary answers.'''\r\n while in_midi_device.poll() == False:\r\n if step_count % 2 == 0:\r\n output_note_table.sort(key=lambda x: x[1]) #upArp\r\n elif step_count % 2 == 1:\r\n output_note_table.sort(key=lambda x: x[1],reverse=True) #downArp \r\n #loop through note table and play the notes\r\n for note_loc in range(len(output_note_table)):\r\n if output_note_table[note_loc][0] == 144:\r\n #Don't play a note if it's the same as the last note played and there are other notes available.\r\n #Removes repeats of high notes. Causes single notes to be played on repeat.\r\n if output_note_table[note_loc] == current_note and input_note_table[1] != empty_midi_byte:\r\n continue \r\n else: \r\n #play notes from output table\r\n out_midi_device.write_short(output_note_table[note_loc][0],output_note_table[note_loc][1],127)\r\n #evaluate code runtime\r\n elapsed = time.time()-t\r\n #IT would be better to wait for the remaining amount of time. If code has run for 0.1 secs\r\n #already then we want it to only wait 0.1 secs. for example\r\n all_times.append(elapsed)\r\n time.sleep(0.2)\r\n current_note = output_note_table[note_loc]\r\n #counter used to control Up/Down functionalities \r\n step_count+=step_increase\r\n\r\n\r\n#Needs initialising for some reason, and declaring that events need to be got\r\npygame.init()\r\npygame.fastevent.init()\r\nevent_get = pygame.fastevent.get\r\nevent_post = pygame.fastevent.post\r\n\r\n#Separate midi functions, controllers, outputs need to be defined\r\npygame.midi.init()\r\ninput_id = pygame.midi.get_default_input_id()\r\noutput_id = pygame.midi.get_default_output_id()\r\n\r\nin_midi_device = pygame.midi.Input( input_id )\r\nout_midi_device = pygame.midi.Output(output_id)\r\n\r\n#Creates an empty note table \r\nempty_midi_byte = [0,0,0,0]\r\ninput_note_table = [empty_midi_byte] * 10\r\n\r\n#Little code test speed nugget\r\nall_times = []\r\n\r\n#Run forever\r\nwhile True:\r\n# if a message exists\r\n if in_midi_device.poll():\r\n t = time.time()\r\n# store the first 10 new midi messages\r\n midi_events = in_midi_device.read(10)\r\n \r\n #Little code for turning off the programme from the midi controller.\r\n if midi_events[0][0] == [144, 72, 1, 0]:\r\n break\r\n \r\n \r\n #print received midi messages and \r\n for new_midi in range(len(midi_events)):\r\n if midi_events[new_midi][0][0] == 144: #if note on\r\n new_note = midi_events[new_midi][0]\r\n input_note_table = add_note(input_note_table,new_note)\r\n elif midi_events[new_midi][0][0] == 128: #if note off\r\n removal_note = midi_events[new_midi][0][1]\r\n input_note_table = remove_note(input_note_table,removal_note)\r\n\r\n #print input noteTable for viewing reference\r\n for byte in input_note_table:\r\n print(*byte) \r\n print('\\n') \r\n \r\n arpeggiate_note_table(input_note_table,'updown')\r\n \r\n\r\n#Close off all of the opened channels and exit the initialisations.\r\nin_midi_device.close()\r\n\r\npygame.midi.quit()\r\n\r\npygame.quit()","sub_path":"Data Processing/midi_processor.py","file_name":"midi_processor.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140625065","text":"#!/usr/bin/python\n\nimport json\n\nimport os\nimport sys\nfrom os.path import join\n\nimport requests as requests\nfrom datetime import datetime\n\n\ndef get_tracked_time():\n harvest_api_account_id = os.environ.get('HARVEST_API_ID')\n harvest_api_token = os.environ.get(\"HARVEST_API_BEARER\")\n\n if not harvest_api_account_id or not harvest_api_token:\n print('You need to provide valid harvest credentials in the .env file!', file=sys.stderr)\n exit(1)\n\n if os.environ.get('ENV', 'prod') == 'prod':\n headers = {\n 'Harvest-Account-ID': harvest_api_account_id,\n 'Authorization': f'Bearer {harvest_api_token}',\n 'User-Agent': 'TimeChecker'\n }\n data = requests.get(url='https://api.harvestapp.com/api/v2/time_entries', headers=headers)\n return json.loads(data.content).get('time_entries', [])\n return json.loads(open(join('..', 'data', 'time_entries.json'), 'r+').read())\n\n\ndef parse_work_quota_dates(work_quota_dates):\n if not work_quota_dates:\n print('You must add the date you started to work and you work quota to .env!', file=sys.stderr)\n print('Example: WORK_QUOTA_DATES=\"2018-09-01:70%;2019-02-01:80%\"', file=sys.stderr)\n exit(1)\n\n try:\n parsed_work_quota_dates = {\n parse_iso_date(date_quota.split(':')[0].strip()): float(date_quota.split(':')[1].strip())\n for date_quota\n in work_quota_dates.strip().strip(';').split(';')\n }\n return parsed_work_quota_dates\n except TypeError:\n print(f'Invalid work quota format: \"{work_quota_dates}\"')\n exit(1)\n\n\ndef calculate(time_entries, work_quota_dates):\n i = 0\n work_week_hours = float(os.environ.get('WORK_WEEK_HOURS', 42))\n quota_change_dates = sorted(work_quota_dates.keys())\n current_quota_start_date = quota_change_dates[i]\n current_quota = work_quota_dates[current_quota_start_date]\n\n check_work_quota_exists(current_quota_start_date, time_entries[0])\n\n weekly_hours_total = {}\n weekly_hours_delta = {}\n\n for entry in time_entries:\n work_date = parse_iso_date(entry['spent_date'])\n\n while i < len(quota_change_dates) - 1 and work_date >= quota_change_dates[i + 1]:\n i += 1\n current_quota_start_date = quota_change_dates[i]\n current_quota = work_quota_dates[current_quota_start_date]\n\n calendar_week = work_date.isocalendar()[1]\n calendar_year = work_date.isocalendar()[0]\n week_id = f'Calendarweek[{calendar_week}].Year[{calendar_year}]'\n weekly_hours_total.update({week_id: weekly_hours_total.get(week_id, 0) + entry['hours']})\n weekly_hours_delta[week_id] = work_week_hours * current_quota - weekly_hours_total.get(week_id)\n\n total_hours_worked = sum(weekly_hours_total.values())\n total_hours_average = round(total_hours_worked / len(weekly_hours_total), 2)\n\n delta_hours = round(sum(weekly_hours_delta.values()), 2)\n\n compensation_in_days = round(delta_hours / float(os.environ.get('WORK_DAY_HOURS', 8.4)), 2)\n print(f'⏱ Contract: {work_week_hours * current_quota}h / week ({current_quota*100}%)')\n print(f'💰 You sold {int(round(total_hours_worked, 0))}h of your time working 🤔')\n print(f'💻 On average you work {total_hours_average}h / week')\n compensation_type = '🛑 Undertime' if delta_hours > 0 else '✅ Overtime'\n print(f'{compensation_type}: {abs(delta_hours)}h ({abs(compensation_in_days)} working days)')\n\n\ndef check_work_quota_exists(quota_date, first_work_day_entry):\n first_work_day = parse_iso_date(first_work_day_entry['spent_date'])\n if first_work_day < quota_date:\n print(f'You worked on the {to_human_date(first_work_day)}', file=sys.stderr)\n print(f'But your earliest provided work quota date is: {to_human_date(quota_date)}', file=sys.stderr)\n exit(1)\n\n\ndef parse_iso_date(date):\n return datetime.strptime(date, '%Y-%m-%d')\n\n\ndef to_human_date(date):\n return datetime.strftime(date, '%d.%m.%Y')\n\n\nif __name__ == '__main__':\n time_entries = sorted(get_tracked_time(), key=lambda e: e['id'])\n calculate(time_entries, parse_work_quota_dates(os.environ.get('WORK_QUOTA_DATES', None)))\n","sub_path":"src/timetracker.py","file_name":"timetracker.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"104779909","text":"#Mapas de bias en precipitacion periodo historico del modelo CanESM2, CanESM5 con respecto a observaciones\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport glob\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\nimport cartopy.crs as ccrs\nimport cartopy.feature\nfrom cartopy.util import add_cyclic_point\nimport matplotlib as mpl\nmpl.rcParams['hatch.linewidth'] = 0.5 # previous pdf hatch linewidth\n\n#Defino funciones \ndef anomaly(dato,obs):\n climatologia = obs.mean(dim='time')\n anomalia = dato.mean(dim='time') - climatologia\n return climatologia, anomalia\n\n# Abrir datos-----------------------------------------------------------------\npath = '/home/tabu/Escritorio/Doctorado/ClimDinam/TP2/'\npath2 = '/media/tabu/JULIAexterno/climatologia_dinamica/'\n\n#Abro CanESM2\nCanESM2_ens = xr.open_dataset(path2+'CanESM2/pr_Amon_CanESM2_historical_ensmean_197601-200512_2.5_mes.nc')\n\n#Abro CanESM5\nCanESM5_ens = xr.open_dataset(path2+'CanESM5/pr_Amon_CanESM5_historical_ensmean1p1f1_2.5_mes.nc')\n\n#Summer monsoon obs HS\ndef annual_range_HS(dato):\n summer = dato.sel(time=dato['time.season']=='DJF').mean(dim='time').pr\n winter = dato.sel(time=dato['time.season']=='JJA').mean(dim='time').pr\n rango = (summer-winter)/30\n return rango\n\ndef annual_range_HN(dato):\n summer = dato.sel(time=dato['time.season']=='JJA').mean(dim='time').pr\n winter = dato.sel(time=dato['time.season']=='DJF').mean(dim='time').pr\n rango = (summer-winter)/30\n return rango\n\n\ndef mapa(dato1,dato2,titulo):\n #America del sur -------Observaciones-----------------------------------------\n fig = plt.figure(figsize=(10, 18),dpi=300,constrained_layout=True)\n fig_size = plt.rcParams[\"figure.figsize\"]\n data_crs = ccrs.PlateCarree(central_longitude=0)\n projection = ccrs.PlateCarree()\n ax1 = plt.subplot(1,1,1,projection=projection)\n #ax1.set_extent([275,335, 10, -60], crs=data_crs)\n clevels = np.arange(np.min(dato1),np.max(dato1),(np.max(dato1)-np.min(dato1))/11)\n levels1 = [dato1.min(),2,dato1.max()]\n ax1.contourf(cyclic_lons, lat, dato1,levels1, transform=data_crs,levels=levels1, hatches=[\"\", \"...\"], alpha=0.01)\n levels2 = [dato2.min(),2,dato2.max()]\n ax1.contourf(cyclic_lons, lat, dato2,levels2, transform=data_crs,levels=levels2, hatches=[\"\", \"...\"], alpha=.01)\n ax1.add_feature(cartopy.feature.COASTLINE)\n ax1.add_feature(cartopy.feature.BORDERS, linestyle='-', alpha=.5)\n ax1.gridlines(crs=data_crs, linewidth=0.3, linestyle='-')\n #Saco las coordenadas de la figura hasta ahora\n plt0_ax = plt.gca()\n left, bottom1, width, height = plt0_ax.get_position().bounds\n first_plot_left = plt0_ax.get_position().bounds[0]\n #Utilizo las coordenadas para definir la posición de la colorbar 1\n #colorbar_axes = fig.add_axes([first_plot_left + .9, bottom1, 0.02, 1.2*height])\n fig_size[0] = width*4 + 10\n fig_size[1] = height*2 + 3\n plt.rcParams[\"figure.figsize\"] = fig_size\n fig.suptitle(str(titulo), y=0.68, x=0.5,fontsize=20)\n return fig\n\ntitulo = 'Dominio monzón CanESM2'\nlon = np.arange(0, 362, 362/144)\nmon_range_CanESM2_HS = annual_range_HS(CanESM2_ens)\nlat = mon_range_CanESM2_HS.lat\ncyclic_data_HS, cyclic_lons = add_cyclic_point(mon_range_CanESM2_HS, coord=lon)\nmon_range_CanESM2_HN = annual_range_HN(CanESM2_ens)\ncyclic_data_HN, cyclic_lons = add_cyclic_point(mon_range_CanESM2_HN, coord=lon)\nmapa(cyclic_data_HS,cyclic_data_HN,titulo)\nplt.savefig(path+'dominio_monzon_CanESM2.png',bbox_inches='tight')\nplt.clf\n\ntitulo = 'Dominio monzón CanESM5'\nlon = np.arange(0, 362, 362/144)\nmon_range_HS = annual_range_HS(CanESM5_ens)\nlat = mon_range_CanESM2_HS.lat\ncyclic_data_HS, cyclic_lons = add_cyclic_point(mon_range_HS, coord=lon)\nmon_range_HN = annual_range_HN(CanESM5_ens)\ncyclic_data_HN, cyclic_lons = add_cyclic_point(mon_range_HN, coord=lon)\nmapa(cyclic_data_HS,cyclic_data_HN,titulo)\nplt.savefig(path+'dominio_monzon_CanESM5.png',bbox_inches='tight')\nplt.clf\n\n#Genero máscara para el área del monzón global\nmonzon_mask_HS = mon_range_CanESM2_HS.where(mon_range_CanESM2_HS > 2)/mon_range_CanESM2_HS.where(mon_range_CanESM2_HS > 2)\nmonzon_mask_HN = mon_range_CanESM2_HN.where(mon_range_CanESM2_HN > 2)/mon_range_CanESM2_HN.where(mon_range_CanESM2_HN > 2)\n\n#Genero un mapa con colores\ndef mapa_colores(dato1,dato2,titulo):\n #America del sur -------Observaciones-----------------------------------------\n fig = plt.figure(figsize=(10, 18),dpi=300,constrained_layout=True)\n fig_size = plt.rcParams[\"figure.figsize\"]\n data_crs = ccrs.PlateCarree(central_longitude=0)\n projection = ccrs.PlateCarree()\n ax1 = plt.subplot(1,1,1,projection=projection)\n #ax1.set_extent([275,335, 10, -60], crs=data_crs)\n clevels = np.arange(0,600,50)\n im1=ax1.contourf(cyclic_lons, lat, dato1,clevels,transform=data_crs,cmap='Blues',extend='both')\n im2=ax1.contourf(cyclic_lons, lat, dato2,clevels,transform=data_crs,cmap='Blues',extend='both')\n ax1.add_feature(cartopy.feature.COASTLINE)\n ax1.add_feature(cartopy.feature.BORDERS, linestyle='-', alpha=.5)\n ax1.gridlines(crs=data_crs, linewidth=0.3, linestyle='-')\n #ax1.set_title('Annual range')\n #Saco las coordenadas de la figura hasta ahora\n plt0_ax = plt.gca()\n left, bottom1, width, height = plt0_ax.get_position().bounds\n first_plot_left = plt0_ax.get_position().bounds[0]\n #Utilizo las coordenadas para definir la posición de la colorbar 1\n colorbar_axes = fig.add_axes([first_plot_left + .9, bottom1, 0.02, 1.2*height])\n fig_size[0] = width*4 + 10\n fig_size[1] = height*2 + 3\n plt.rcParams[\"figure.figsize\"] = fig_size\n fig.suptitle(str(titulo), y=0.68, x=0.5,fontsize=20)\n cbar = plt.colorbar(im1, colorbar_axes, fraction=0.05, pad=0.04,aspect=18, orientation='vertical')\n ticklabs = cbar.ax.get_yticklabels()\n cbar.ax.set_yticklabels(ticklabs, fontsize=16)\n cbar.set_label('verano-invierno [mm/day] ',fontsize=20)\n return fig\n\n\nHS = CanESM2_ens.pr*monzon_mask_HS\nHS = HS.sel(time=HS['time.season']=='DJF').mean(dim='time')\ncyclic_data_HS, cyclic_lons = add_cyclic_point(HS, coord=lon)\nHN = CanESM2_ens.pr*monzon_mask_HN\nHN = HN.sel(time=HN['time.season']=='JJA').mean(dim='time')\ncyclic_data_HN, cyclic_lons = add_cyclic_point(HN, coord=lon)\nmapa_colores(cyclic_data_HS,cyclic_data_HN,titulo)\nplt.savefig(path+'precip_en_mask_monzon_CanESM2.png',bbox_inches='tight')\nplt.clf\n\n\n","sub_path":"TP2/scripts/precipitation_bias.py","file_name":"precipitation_bias.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"183666975","text":"n = 100000\ndp = [0] * (n + 1)\n\ndef main():\n array = [0] * (n + 1)\n\n array[0] = 1\n array[1] = 1\n\n for i in range(2, int(n**(1/2) + 1)):\n if array[i] == 0:\n for j in range(i * i, n + 1, i):\n array[j] = 1\n curr_sum = 0\n\n for i in range(1, n + 1):\n if array[i] == 0:\n curr_sum += i\n dp[i] = curr_sum\n\nnum = int(input('Enter the number: '))\nmain()\nprint(dp[num])\n","sub_path":"Day 18 - Sum of Primes.py","file_name":"Day 18 - Sum of Primes.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"562777387","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport sys\nfrom os import environ\nfrom os.path import dirname, join, pardir, abspath, exists\nimport subprocess\n\nimport nose\n\n\ndef fetch_es_repo():\n # user is manually setting YAML dir, don't tamper with it\n if \"TEST_ES_YAML_DIR\" in environ:\n return\n\n repo_path = environ.get(\n \"TEST_ES_REPO\",\n abspath(join(dirname(__file__), pardir, pardir, \"elasticsearch\")),\n )\n\n # no repo\n if not exists(repo_path) or not exists(join(repo_path, \".git\")):\n print(\"No elasticsearch repo found...\")\n # set YAML DIR to empty to skip yaml tests\n environ[\"TEST_ES_YAML_DIR\"] = \"\"\n return\n\n # set YAML test dir\n environ[\"TEST_ES_YAML_DIR\"] = join(\n repo_path, \"rest-api-spec\", \"src\", \"main\", \"resources\", \"rest-api-spec\", \"test\"\n )\n\n # fetching of yaml tests disabled, we'll run with what's there\n if environ.get(\"TEST_ES_NOFETCH\", False):\n return\n\n from test_elasticsearch.test_server import get_client\n from test_elasticsearch.test_cases import SkipTest\n\n # find out the sha of the running es\n try:\n es = get_client()\n sha = es.info()[\"version\"][\"build_hash\"]\n except (SkipTest, KeyError):\n print(\"No running elasticsearch >1.X server...\")\n return\n\n # fetch new commits to be sure...\n print(\"Fetching elasticsearch repo...\")\n subprocess.check_call(\n \"cd %s && git fetch https://github.com/elasticsearch/elasticsearch.git\"\n % repo_path,\n shell=True,\n )\n # reset to the version fron info()\n subprocess.check_call(\"cd %s && git fetch\" % repo_path, shell=True)\n subprocess.check_call(\"cd %s && git reset --hard %s\" % (repo_path, sha), shell=True)\n\n\ndef run_all(argv=None):\n sys.exitfunc = lambda: sys.stderr.write(\"Shutting down....\\n\")\n\n # fetch yaml tests\n fetch_es_repo()\n\n # always insert coverage when running tests\n if argv is None:\n argv = [\n \"nosetests\",\n \"--with-xunit\",\n \"--with-xcoverage\",\n \"--cover-package=elasticsearch\",\n \"--cover-erase\",\n \"--logging-filter=elasticsearch\",\n \"--logging-level=DEBUG\",\n \"--verbose\",\n \"--with-id\",\n ]\n\n nose.run_exit(argv=argv, defaultTest=abspath(dirname(__file__)))\n\n\nif __name__ == \"__main__\":\n run_all(sys.argv)\n","sub_path":"test_elasticsearch/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416816356","text":"from django.core.management import BaseCommand\n\nfrom t.models import *\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n for h_i in range(2):\n h = H.objects.create(v=h_i)\n for m_i in range(2):\n m = M.objects.create(v=m_i, h=h)\n for l_i in range(3):\n L.objects.create(v=l_i, m=m)\n","sub_path":"t/management/commands/fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"312618008","text":"import time\n\nwith open('names_1.txt', 'r') as f:\n names_1 = f.read().split(\"\\n\") # List containing 10000 names\nwith open('names_2.txt', 'r') as f:\n names_2 = f.read().split(\"\\n\") # List containing 10000 names\n\nstart_time = time.time()\nduplicates = [] # Return the list of duplicates in this data structure\n# Replace the nested for loops below with your improvements\nfor name_1 in names_1:\n for name_2 in names_2:\n if name_1 == name_2:\n duplicates.append(name_1)\nend_time = time.time()\nprint (f\"\\n{len(duplicates)} duplicates:\\n{', '.join(duplicates)}\")\nprint (f\"Two for-loops runtime: {end_time - start_time} seconds\\n\")\n# Two for-loops runtime: 5.52326512336731 seconds\n\nstart_time = time.time()\nduplicates = []\nnames_3 = names_2\nnames_3.sort()\nlength = len(names_2)\nfor name_1 in names_1:\n i, j = 0, length-1\n while True:\n k = i + int((j-i)/2)\n name_2 = names_3[k]\n if name_1 < name_2:\n j = k\n elif name_1 > name_2:\n i = k\n else:\n duplicates.append(name_2)\n break\n if i+1 >= j:\n name_2 = names_3[j]\n if name_1 == name_2:\n duplicates.append(name_2)\n break\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates\")\nprint (f\"Binary search runtime: {end_time - start_time} seconds\\n\")\n# Binary search runtime: 0.08876609802246094 seconds\n\n# ---------- Stretch Goal -----------\n# Python has built-in tools that allow for a very efficient approach to this problem\n# What's the best time you can accomplish? Thare are no restrictions on techniques or data\n# structures, but you may not import any additional libraries that you did not write yourself.\n\nstart_time = time.time()\nduplicates = []\nnames_3 = {k:v for v,k in enumerate(names_2)}\nfor name_1 in names_1:\n if name_1 in names_3:\n duplicates.append(name_1)\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates\")\nprint (f\"Dictionary method runtime: {end_time - start_time} seconds\\n\")\n# Dictionary method runtime: 0.0020265579223632812 seconds\n\nstart_time = time.time()\n# duplicates = []\n# for n in names_1:\n# if n in names_2:\n# duplicates.append(n)\nduplicates = [n for n in names_1 if n in names_2]\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates\")\nprint (f\"List comprehension method runtime: {end_time - start_time} seconds\\n\")\n# Built-in method runtime: 0.8597457408905029 seconds\n\n# ---------- Other Methods -----------\nstart_time = time.time()\nl = list(set(names_1)) + list(set(names_2))\nduplicates = [l[i] for i in range(len(l)) if l[i] in l[:i]]\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates\")\nprint (f\"Alternative method runtime: {end_time - start_time} seconds\\n\")\n# Alternative method runtime: 2.8357491493225098 seconds\n\n'''\n64 duplicates:\nHallie Vazquez, Peyton Lloyd, Daphne Hamilton, Jaden Hawkins, Dulce Hines, Piper Hamilton, Marisol Morris, Josie Dawson, Giancarlo Warren, Amiah Hobbs, Jaydin Sawyer, Franklin Cooper, Diego Chaney, Carley Gallegos, Ahmad Watts, Malcolm Nelson, Malcolm Tucker, Grace Bridges, Luciana Ford, Davion Arias, Pablo Berg, Jadyn Mays, Marley Rivers, Abel Newman, Sanai Harrison, Cloe Norris, Clay Wilkinson, Salma Meza, Addison Clarke, Nelson Acevedo, Devyn Aguirre, Winston Austin, Carsen Tyler, Hayley Morgan, Aleah Valentine, Camryn Doyle, Josie Cole, Nathalie Little, Leia Foley, Jordin Schneider, Justine Soto, Lennon Hunt, Zara Suarez, Kale Sawyer, William Maldonado, Irvin Krause, Maliyah Serrano, Selah Hansen, Kameron Osborne, Alvaro Robbins, Leon Cochran, Andre Carrillo, Dashawn Green, Eden Howe, Logan Morrow, Ralph Roth, Trace Gates, Megan Porter, Aydan Calderon, Raven Christensen, Ashlee Randall, Victoria Roach, River Johnson, Ali Collier\nTwo for-loops runtime: 4.23667049407959 seconds\n\n64 duplicates\nBinary search runtime: 0.05987548828125 seconds\n\n64 duplicates\nDictionary method runtime: 0.003004789352416992 seconds\n\n64 duplicates\nList comprehension method runtime: 1.0831055641174316 seconds\n\n64 duplicates\nAlternative method runtime: 2.853367328643799 seconds\n'''","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"337801132","text":"\"\"\"\nN叉树的层序遍历\n\"\"\"\nfrom typing import List\n\n\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if root is None:\n return []\n level = [root]\n ret = []\n while level:\n ret.append([node.val for node in level])\n level = [child for node in level for child in node.children]\n return ret\n","sub_path":"Week_02/429.N叉树的层序遍历.py","file_name":"429.N叉树的层序遍历.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}