diff --git "a/5242.jsonl" "b/5242.jsonl" new file mode 100644--- /dev/null +++ "b/5242.jsonl" @@ -0,0 +1,750 @@ +{"seq_id":"388365648","text":"# Ingresar 3 números enteros y determinar:\n# si la suma del primero y el segundo es igual al tercero\n# Si se cumple: la suma es igual al tercero, si no, la suma es distinta\n\nprimernum=int(input(\"Ingrese el primer número entero: \"))\nsegundonum=int(input(\"Ingrese el segundo número entero: \"))\ntercernum=int(input(\"Ingrese el tercer número entero: \"))\n\nif int(primernum)+int(segundonum)==int(tercernum):\n print(\"La suma de los dos primeros números enteros es igual al tercero ingresado\")\nelse:\n print(\"La suma de los dos primeros números enteros no es igual al tercero ingresado\")","sub_path":"Actividad 1- Decisión simple/ACTIVITY 1- EX 5.py","file_name":"ACTIVITY 1- EX 5.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"370596240","text":"\"\"\"\nOMDbRequest.py\nUser input movie name and year\nThe OMDB database will return the detail of movie\n\n$ sudo pip3 install requests\n\"\"\"\nimport requests\nimport re\nimport sqlite3 as lite\nimport sys\n\ndef getMovieInfo(movie_name, movie_year):\n\n\t#To get movie data, need apikey: a5395c65\n\turl = 'http://www.omdbapi.com/?apikey=a5395c65&t={}&y={}'.format(movie_name, movie_year)\n\n\tres = requests.post(url)\n\tresJson = res.json()\n\t\n\tcon = lite.connect('movieDetail.db')\n\tcur = con.cursor()\n\n\ttry:\t\n\t\tTitle = resJson['Title']\n\t\tYear = resJson['Year']\n\t\tProduction = resJson['Production']\n\t\tDirector = resJson['Director']\n\t\timdbID = resJson['imdbID']\n\t\tMetaScore = resJson['Metascore']\n\t\tImdbRating = resJson['imdbRating']\n\t\tRottenRating = resJson['Ratings'][1]['Value']\n\t\tPlot = resJson['Plot']\n\t\tlength = resJson['Runtime']\n\t\tgenre = resJson['Genre']\n\t\tcountry = resJson['Country']\n\t\trated = resJson['Rated']\n\t\tlanguage = resJson['Language']\n\n\t\t#Print out the information of movie, for test\n\t\t#print(\"\\n\\n\")\n\t\t#print(\"Title: {}\\nYear: {}\\nProduction: {}\\nDirector: {}\\nimdbID: {}\\nScore: Rotten Tomatoes:{}, MetaScore:{}, IMDB:{}\\nPlot: {}\".format(Title, Year, Production, Director, imdbID, RottenRating, MetaScore, ImdbRating, Plot))\n\t\t#print(\"\\n\\n\")\n\n\t\tfpID = input(\"fpID: \")\n\t\tif int(fpID) < 10:\n\t\t\tfpID = \"fp00\" + fpID\n\t\telif int(fpID) > 10 and int(fpID) < 100:\n\t\t\tfpID = \"fp0\" + fpID\n\t\telse:\n\t\t\tfpID = \"fp\" + fpID\n\n\t\t#Insert into table movie\n\t\tcur.execute('INSERT INTO movie VALUES (null, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (fpID, Title, Year, Production, Director, imdbID, MetaScore, ImdbRating, RottenRating, Plot, length, genre, country, rated, language))\n\t\tcon.commit()\n\n\texcept:\n\t\tprint(\"Error occurs\")\n\n\t#Close database\n\tcon.close()\n\tprint(\"----------Inserting done----------\")\n\ndef main():\n\n\t#用户端输入 电影名/年份\n\tname = input(\"enter a movie name: \")\n\tyear = input(\"enter the year of the movie: \")\n\tgetMovieInfo(name, year)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Fresh Potatoes/OMDbRequest.py","file_name":"OMDbRequest.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"138608459","text":"import pandas as pd\r\nimport coint_models as cm\r\nimport data_preprocessing as dp\r\n\r\n# Parameters\r\n\r\nraw_data_path = 'C:/Users/user/Desktop/ASDASD' # Random folder\r\ntickers_list = ['BTC', 'LTC', 'XRP', 'LUNA', 'ADA', 'SOL', 'DOT', 'DOGE', 'TRX', '1000SHIB', 'ETH', 'BNB', 'LINK', 'UNI',\r\n 'ATOM', 'FTM', 'XMR', 'AAVE', 'NEAR', 'THETA', 'MATIC', 'EOS', 'BAT', 'ENJ']\r\ntf = '15m' # '1h', '1d'\r\n\r\nwindow_size = 672 # randomly choosen\r\nmax_n_comp = 2 # maxmium components in pairs trading eg. BTC - ETH = 2 components / BTC - ETH - LTC = 3 components\r\ninit = 0 # initial start point ( 0 means 2020-01-01 08:00)\r\nalpha = 0.05 # 0.01 / 0.1\r\n\r\n\r\n# Data Preprocessing\r\nDP = dp.get_data(tickers_list, tf, raw_data_path)\r\nDP.get_raw()\r\nDP.get_file('close')\r\nDP.get_file('volume')\r\n\r\n# Load processed data\r\ndata = pd.read_csv('crypto_' + tf + '_close' + '.csv', index_col=0)\r\n# Initialize the model\r\ncoint_models = cm.get_coint_pairs(data, 3, alpha)\r\n# Get stationary components and hedge ratio\r\ntick_param = coint_models.get_pairs(init, window_size)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"99210803","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nA list of words commonly used to prefix animal names. This class is used in the\nliving thing class to create random, but realistic, common animal names.\n\n**Usage:**\n\n::\n\n from imakedata.imakedata import IMakeData\n from imakedata.ijusthelp import rewrite_dict\n from imakedata.model.raw import raw\n\n animal_prefix_raw = rewrite_dict(raw, {\n 'name': 'animal_prefix_raw',\n 'data': 'animal_prefix',\n })\n\n from imakedata.model.raw import animal_prefix_raw\n number_records = 10\n maker = IMakeData(number_records)\n\n data_set = maker.get_data([animal_prefix_raw])\n\n for row in data_set:\n ineeddata.animal_prefix = row['animal_prefix_raw']\n\n\"\"\"\n\nanimal_prefix = ['Long', 'Black', 'Fruit', 'White', 'Tree', 'Gray', 'Flying', 'Spiny', 'Giant',\n 'Pygmy', 'Yellow', 'Red', 'Rice', 'Lesser', 'Large', 'Dwarf', 'Striped',\n 'Mountain', 'Grass', 'Forest', 'Water', 'Northern', 'Horseshoe', 'Southern',\n 'Pocket', 'Ground', 'Rock', 'Brown', 'Golden', 'Black', 'Eastern', 'Scaly',\n 'Red', 'Naked', 'Broad', 'Western', 'Tube', 'Eastern', 'Common', 'Maple',\n 'Brush', 'Woolly', 'Mexican', 'Cone', 'White', 'Hairy', 'Furred', 'Winged',\n 'Climbing', 'Spotted', 'Western', 'Green', 'Field', 'Fat', 'Desert', 'Amazon',\n 'Seed', 'Cape', 'Harvest', 'Mosaic', 'Northern', 'Striped', 'Island', 'Least',\n 'Black', 'Skipper', 'Dark', 'European', 'Collared', 'Slender', 'False', 'Grain',\n 'Lady', 'Swamp', 'Dusky', 'Narrow', 'Desert', 'Brown', 'Lesser', 'Dagger',\n 'Banded', 'Green', 'Star', 'Eastern', 'Twig', 'Plant', 'Underwing', 'Palm',\n 'Tussock', 'Spotted', 'House', 'Jack', 'Philippine', 'Clawed', 'Ringtail',\n 'South', 'Rufous', 'Soft', 'Cuban', 'Scaly', 'California', 'Wheat', 'Root',\n 'Budworm', 'Grape', 'Flour', 'Blue', 'Pale', 'Bolivian', 'Thicket', 'Large',\n 'Chinese', 'Tomb', 'Peruvian', 'Marsh', 'Central', 'Siberian', 'California',\n 'Southern', 'Spotted', 'Woolly', 'Blueberry', 'Plum', 'Little', 'North',\n 'River', 'Sunda', 'Sumatran', 'Sea', 'Flat', 'Brown', 'Western', 'Purple',\n 'Scarlet', 'Mountain', 'Pacific', 'Sierra', 'Common', 'Monkey', 'Golden',\n 'Alpine', 'Meadow', 'Valley', 'Evening', 'Sand', 'Tongue', 'Blue', 'Oregon',\n 'Pink', 'Shooting', 'Chinese', 'Death', 'Giant', 'Black', 'Fairy', 'Indian',\n 'Blazing', 'Field', 'Yellow', 'Wild', 'Mule', 'Broad', 'Columbine', 'Pride',\n 'Prairie', 'Gold', 'Elegant', 'Milk', 'Foothill', 'Lotus', 'Texas', 'Butter',\n 'Baby', 'Tree', 'Brush', 'Coastal', 'Cliff', 'Bicolored', 'Rock', 'Square',\n 'Wine', 'Birds', 'Foam', 'Face', 'Bishops', 'Beaked', 'Annual', 'Globe', 'Gray',\n 'Grand', 'Fringe', 'Fringed', 'Autumn', 'Frying', 'Aspen', 'Charming',\n 'Clustered', 'Columbia', 'Orb',\n]\n","sub_path":"imakedata/raw/animal_prefix.py","file_name":"animal_prefix.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"71661817","text":"import os\nimport pytest\nfrom nereval import (\n correct_text, correct_type, count_correct, has_overlap, Entity, precision, recall, evaluate,\n _parse_json, evaluate_json, sign_test\n)\n\ndef test_has_overlap():\n a = Entity('CILINDRISCHE PLUG', 'Productname', 0)\n b = Entity('PLUG', 'Productname', 13)\n assert has_overlap(a, b) is True\n assert has_overlap(b, a) is True\n\n b = Entity('PLUG', 'Productname', 18)\n assert has_overlap(a, b) is False\n\ndef test_has_overlap_open_interval():\n a = Entity('PLUG', 'Productname', 0)\n b = Entity('AB', 'Productname', 4)\n assert has_overlap(a, b) is False\n assert has_overlap(b, a) is False\n\ndef test_entity():\n e = Entity('CILINDRISCHE PLUG', 'Productname', 0)\n assert e.text == 'CILINDRISCHE PLUG'\n assert e.type == 'Productname'\n assert e.start == 0\n\ndef test_correct_text_symmetry():\n true = Entity('CILINDRISCHE PLUG', 'Productname', 0)\n pred = Entity('CILINDRISCHE', 'Productname', 0)\n assert correct_text(true, pred) is False\n assert correct_text(pred, true) is False\n assert correct_text(true, true) is True\n assert correct_text(pred, pred) is True\n\ndef test_correct_text_without_overlap():\n true = Entity('CILINDRISCHE PLUG', 'Productname', 0)\n pred = Entity('CILINDRISCHE PLUG', 'Productname', 11)\n assert correct_text(true, pred) is False\n\ndef test_correct_text_type_mismatch():\n true = Entity('a', 'Productname', 0)\n pred = Entity('a', 'Material', 0)\n assert correct_text(true, pred) is True\n\ndef test_correct_type_symmetry():\n true = Entity('CILINDRISCHE PLUG', 'Productname', 0)\n pred = Entity('PLUG', 'Productname', 13)\n assert correct_type(true, pred) is True\n assert correct_type(pred, true) is True\n assert correct_type(true, true) is True\n assert correct_type(pred, pred) is True\n\ndef test_correct_type_with_overlap():\n true = Entity('CILINDRISCHE', 'Productname', 0)\n pred = Entity('CILINDRISCHE PLUG', 'Productname', 0)\n assert correct_type(true, pred) is True\n\ndef test_correct_type_without_overlap():\n true = Entity('PLUG', 'Productname', 0)\n pred = Entity('CILINDRISCHE PLUG', 'Productname', 21)\n assert correct_type(true, pred) is False\n\ndef test_correct_type_with_mismatch():\n true = Entity('PLUG', 'Productname', 0)\n pred = Entity('PLUG', 'Material', 0)\n assert correct_type(true, pred) is False\n\ndef test_count_correct():\n # CILINDRISCHE PLUG DIN908 M10X1 foo\n # B_PROD I_PROD B_PROD B_DIM O\n x = [\n Entity('CILINDRISCHE PLUG', 'Productname', 0),\n Entity('DIN908', 'Productname', 18),\n Entity('M10X1', 'Dimension', 25)\n ]\n\n # CILINDRISCHE PLUG DIN908 M10X1 foo\n # B_PROD B_PROD B_PROD B_PROD B_PROD\n y = [\n # correct type, wrong text\n Entity('CILINDRISCHE', 'Productname', 0),\n # correct type, wrong text\n Entity('PLUG', 'Productname', 13),\n # correct type, correct text\n Entity('DIN908', 'Productname', 18),\n # wrong type, correct text\n Entity('M10X1', 'Productname', 25),\n # wrong type, wrong text (no entity)\n Entity('foo', 'Productname', 35)\n ]\n\n count_correct_text, count_correct_type = count_correct(x, y)\n assert count_correct_text == 2\n assert count_correct_type == 2\n\n # is not necessarily symmetric!\n count_correct_text, count_correct_type = count_correct(y, x)\n assert count_correct_text == 2\n assert count_correct_type == 3\n\n count_correct_text, count_correct_type = count_correct([], [])\n assert count_correct_text == 0\n assert count_correct_type == 0\n\ndef test_precision():\n assert precision(0, 10) == 0\n assert precision(0, 0) == 0\n assert precision(10, 10) == 1\n assert precision(5, 10) == 0.5\n\ndef test_recall():\n assert recall(0, 0) == 0\n assert recall(0, 10) == 0\n assert recall(10, 10) == 1\n assert precision(5, 10) == 0.5\n\n\ndef test_evaluate():\n # CILINDRISCHE PLUG DIN908 M10X1 foo\n # B_PROD I_PROD B_PROD B_DIM O\n x = [\n Entity('CILINDRISCHE PLUG', 'Productname', 0),\n Entity('DIN908', 'Productname', 18),\n Entity('M10X1', 'Dimension', 25)\n ]\n\n # CILINDRISCHE PLUG DIN908 M10X1 foo\n # B_PROD B_PROD B_PROD B_PROD B_PROD\n y = [\n # correct type, wrong text\n Entity('CILINDRISCHE', 'Productname', 0),\n # correct type, wrong text\n Entity('PLUG', 'Productname', 13),\n # correct type, correct text\n Entity('DIN908', 'Productname', 18),\n # wrong type, correct text\n Entity('M10X1', 'Productname', 25),\n # wrong type, wrong text (no entity)\n Entity('foo', 'Productname', 35)\n ]\n\n # dataset containing a single description\n assert evaluate([x], [y]) == 0.5\n assert evaluate([y], [x]) == 0.625\n # multiple descriptions\n assert evaluate([x, y], [x, y]) == 1\n assert evaluate([x, y], [y, x]) == 0.5625\n # edge cases\n assert evaluate([x], [[]]) == 0\n assert evaluate([[]], [x]) == 0\n\ndef test_evaluate_different_shapes():\n x = [[], []]\n y = [[], [], []]\n\n with pytest.raises(ValueError):\n evaluate(x, y)\n\ndef test_sign_test():\n x = [Entity('CILINDRISCHE PLUG', 'Productname', 0)]\n y = [Entity('CILINDRISCHE', 'Productname', 0)]\n\n assert sign_test([x], [x], [y]) == (0, 1)\n assert sign_test([x], [y], [x]) == (1, 0)\n assert sign_test([x], [x], [x]) == (0, 0)\n assert sign_test([x, y], [[], []], [x, y]) == (2, 0)\n assert sign_test([x, y], [x, y], [[], []]) == (0, 2)\n\ndef test_parse_json():\n file_name = os.path.join(os.path.dirname(__file__), 'input.json')\n predictions = _parse_json(file_name)\n assert len(predictions) == 1\n instance = predictions[0]\n assert instance['text'] == 'CILINDRISCHE PLUG'\n assert instance['true'][0] == Entity('CILINDRISCHE PLUG', 'Productname', 0)\n assert instance['predicted'][0] == Entity('CILINDRISCHE', 'Productname', 0)\n assert instance['predicted'][1] == Entity('PLUG', 'Productname', 13)\n\ndef test_evaluate_json():\n file_name = os.path.join(os.path.dirname(__file__), 'input.json')\n assert isinstance(evaluate_json(file_name), float)\n","sub_path":"test_nereval.py","file_name":"test_nereval.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"447811920","text":"\"\"\"Add 1 to 1 Show <> Venue\n\nRevision ID: 77cf86ea78fb\nRevises: cc85516f99e6\nCreate Date: 2020-04-22 07:47:43.661111\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '77cf86ea78fb'\ndown_revision = 'cc85516f99e6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Show', sa.Column('venue_id', sa.Integer(), nullable=False))\n op.create_foreign_key(None, 'Show', 'Venue', ['venue_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'Show', type_='foreignkey')\n op.drop_column('Show', 'venue_id')\n # ### end Alembic commands ###\n","sub_path":"projects/01_fyyur/starter_code/migrations/versions/77cf86ea78fb_add_1_to_1_show_venue.py","file_name":"77cf86ea78fb_add_1_to_1_show_venue.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"546466467","text":"import os\n\nimport numpy as np\nimport stft\nfrom scipy.io import wavfile\n\n# import hickle as hkl\n\nINPUT_NOISE_DIR = '../../data/raw_noise/'\nINPUT_CLEAN_DIR = '../../data/sliced_clean/'\nOUTPUT_DIR = '../../data/processed/'\n\n\ndef pad_data(data):\n num_samples = len(data)\n max_rows_in_sample = max(map(len, data))\n num_cols_in_row = data[0][0].size\n new_data = np.zeros((num_samples, max_rows_in_sample, num_cols_in_row))\n for i, sample in enumerate(data):\n num_rows = len(sample)\n for j, row in enumerate(sample):\n idx = max_rows_in_sample - num_rows + j\n for k, c in enumerate(row):\n new_data[i][idx][k] = c\n return new_data\n\n\nif __name__ == '__main__':\n processed_data = []\n # import pdb;pdb.set_trace()\n\n noise_data = [wavfile.read(INPUT_NOISE_DIR + noise)[1] for noise in os.listdir(INPUT_NOISE_DIR)[:5] if\n noise[-4:] == '.wav']\n\n batch_size = 2\n curr = 0\n curr_batch = 0\n\n for i, clean in enumerate(os.listdir(INPUT_CLEAN_DIR)[:10]):\n if clean[-4:] == '.wav':\n rate_clean, data_clean = wavfile.read(INPUT_CLEAN_DIR + clean)\n for noise in noise_data:\n data_noise = noise[:]\n\n length = len(data_clean)\n data_noise = data_noise[:length][:]\n m = min(len(data_clean), len(data_noise))\n data_combined = np.array(np.average(np.array([data_clean[:m], data_noise[:m]]), axis=0))\n\n # data_combined = np.array([(s1/2 + s2/2) for (s1, s2) in zip(data_clean, data_noise)])\n\n Sx_clean = stft.spectrogram(data_clean).transpose() / 100000\n Sx_noise = stft.spectrogram(data_noise).transpose() / 100000\n Sx_combined = stft.spectrogram(data_combined).transpose() / 100000\n\n # Sx_clean = pretty_spectrogram(data_clean.astype('float64'), fft_size=fft_size, step_size=step_size, thresh=spec_thresh)\n # Sx_noise = pretty_spectrogram(data_noise.astype('float64'), fft_size=fft_size, step_size=step_size, thresh=spec_thresh)\n # Sx_combined = pretty_spectrogram(data_combined.astype('float64'), fft_size=fft_size, step_size=step_size, thresh=spec_thresh)\n\n # Sx_target = np.concatenate((Sx_clean, Sx_noise), axis=0)\n # print(clean)\n # print (Sx_clean.shape)\n\n processed_data.append([Sx_combined, Sx_clean, Sx_noise])\n\n curr_batch += 1\n if curr_batch == batch_size:\n combined, clean, noise = zip(*processed_data)\n # max_row_in_sample = max(max(map(len,clean)),max(map(len,noise)),max(map(len,combined)))\n\n noise_padded = pad_data(noise)\n combined_padded = pad_data(combined)\n clean_padded = pad_data(clean)\n\n processed_data = np.array([combined_padded, clean_padded, noise_padded])\n\n np.savez_compressed('%sdata%d' % (OUTPUT_DIR, curr), processed_data)\n # f = h5py.File('%sdata%d' % (OUTPUT_DIR, curr), 'w')\n # f.create_dataset('data', data=processed_data, compression=\"gzip\", compression_opts=9)\n print('Saved batch curr %d' % (curr))\n processed_data = []\n curr += 1\n curr_batch = 0\n print('Finished processing %d clean slice files' % (i + 1))\n\n# np.savez_compressed('%sdata' % (OUTPUT_DIR), processed_data)\n# print('Created npz')\n# hkl.dump(processed_data, OUTPUT_DIR + 'data.hkl')\n# print('Created hkl')\n","sub_path":"data_scripts/preprocessing/process_audio.py","file_name":"process_audio.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"482560307","text":"#!/usr/bin/env python\n\nimport unittest\nimport subprocess\nimport warnings\n\n# python environmental variable\npython_env = 'python'\n\n\ndef ignore_resource_warning(func):\n '''Ignore any resource warnings produced by leaving echo_server running.'''\n\n def without_warn(self, *args, **kwargs):\n warnings.simplefilter(\"ignore\", ResourceWarning)\n return func(self, *args, **kwargs)\n return without_warn\n\n\nclass EndToEnd(unittest.TestCase):\n def test_without_echo_server(self):\n '''The benchmark should error without the presense of an echo_server on `host` arg.'''\n\n result = subprocess.run(\n [python_env, 'bench.py'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n assert(result.returncode == 1)\n\n @ignore_resource_warning\n def test_with_echo_server(self):\n '''The benchmark should run without errors.'''\n\n echo_server = subprocess.Popen([python_env, 'echo_server.py', '--n', '64'])\n result = subprocess.run(\n [python_env, 'bench.py'], stdout=subprocess.DEVNULL)\n echo_server.kill()\n assert(result.returncode == 0)\n\n @ignore_resource_warning\n def test_stat_report(self):\n '''Statistics of the benchmark should be printed to stdout.'''\n\n echo_server = subprocess.Popen([python_env, 'echo_server.py', '--n', '64'])\n result = subprocess.Popen([python_env, 'bench.py'], stdout=subprocess.PIPE, shell=True)\n result.wait()\n output = str(result.stdout.read())\n echo_server.kill()\n assert('Min' in output)\n assert('Mean' in output)\n assert('Max' in output)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_bench.py","file_name":"test_bench.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"156909055","text":"from discord.ext import commands\nimport random\nfrom ..core.cog_config import CogExtension\nfrom ..core.db.jsonstorage import JsonApi\n\n\nclass Picture(CogExtension):\n\n @commands.group()\n async def pic(self, ctx):\n pass\n\n @pic.command()\n async def list(self, ctx):\n \"\"\"cmd\n 查詢資料庫中所有的圖片。\n \"\"\"\n pic_json = JsonApi.get('DynamicSetting')\n\n pic_str = ''\n for i, pic in enumerate(pic_json['picture_link']):\n pic_str += f'{i}: {pic}\\n'\n\n if len(pic_str) > 1600:\n await ctx.send(pic_str)\n\n if len(pic_str) > 0:\n await ctx.send(pic_str)\n\n @pic.command(aliases=['insert'])\n async def add(self, ctx, link: str):\n \"\"\"cmd\n 將一張圖片加入到資料庫中。\n\n .link: 圖片的超連結\n \"\"\"\n pic_json = JsonApi.get('DynamicSetting')\n pic_json['picture_link'].append(link)\n JsonApi.put('DynamicSetting', pic_json)\n\n await ctx.send(f':white_check_mark: 圖片 {link} 已新增!')\n\n @pic.command(aliases=['delete'])\n async def remove(self, ctx, index: int):\n \"\"\"cmd\n 將一張圖片從資料庫中移除。\n\n .index: 圖片的位置(可利用list進行查詢)\n \"\"\"\n pic_json = JsonApi.get('DynamicSetting')\n\n storage_size = len(pic_json['picture_link'])\n if index >= storage_size:\n return await ctx.send(\n f':x: 位置的數字要介於 [0 ~ {storage_size - 1}] 之間!\\n'\n )\n\n del_object = pic_json['picture_link'][index]\n del pic_json['picture_link'][index]\n\n JsonApi.put('DynamicSetting', pic_json)\n\n await ctx.send(f':white_check_mark: 圖片 {del_object} 已刪除!')\n\n @pic.command(aliases=['get'])\n async def random(self, ctx):\n \"\"\"cmd\n 發送一張隨機的圖片。\n \"\"\"\n pic_json = JsonApi.get('DynamicSetting')\n random_picture = random.choice(pic_json['picture_link'])\n await ctx.send(random_picture)\n\n\ndef setup(bot):\n bot.add_cog(Picture(bot))\n","sub_path":"bot/cogs/picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"51126996","text":"# coding=utf-8\n\nimport cv2\nimport os\nimport numpy as np\nPATH = 'C:/Users/CSGrandeur/Desktop/123'\n\nfile_list = os.listdir(PATH)\nfile_list.sort()\nimg_list = []\ni = 1\nfor file in file_list:\n # print(file)\n # print(file.find('PNG') )\n if file.find('PNG') < 0 and file.find('png') < 0:\n continue\n print(file.find('PNG'))\n if file.find('PNG') >= 0:\n os.rename(PATH + '/' + file, PATH + '/' + ('%02d.png' % i))\n img = cv2.imread(PATH + '/' + ('%02d.png' % i))\n img_list.append(img)\n i += 1\nres_img = np.vstack(img_list)\ntry:\n os.makedirs(PATH + '/res')\nexcept:\n None\ncv2.imwrite(PATH + '/res/res.png', res_img)\n","sub_path":"image/image_mosaic.py","file_name":"image_mosaic.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"367840490","text":"# -*- coding: cp936 -*-\nimport copy\nimport time\nfrom functools import reduce\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport random\n\n\n# Rastrigr 函数\nfrom Petri_test import sigmoid, Funvtion\n\n\ndef object_function(x_):\n\n\n return Funvtion(x_)\n '''\n \n :param x_: \n :return: \n '''\n \"\"\"\n result = 0.\n D= 5\n\n for i in range(D):\n w = [0., 0.2, 0.5, 0.3, 0.4, 0.6]\n u = [0., 0.7, 0.9, 0.6, 0.8, 0.7]\n t = [0., 0.3, 0.4, 0.2, 0.5, 0.4]\n w[i + 1] = x_[i]\n # t[4] = x_[0]\n ww = copy.deepcopy(w)\n uu = copy.deepcopy(u)\n tt = copy.deepcopy(t)\n p1 = 0.9\n p4 = 0.9\n p5 = 0.9\n p7 = 0.9\n b = 10\n p9 = p1 * u[1] * sigmoid(p1, b, t[1])\n p2 = p1 * u[2] * sigmoid(p1, b, t[2])\n x1 = p9\n x2 = p2 * u[3] * sigmoid(p2, b, t[3])\n # p3 = max(x1, x2)\n p3 = x1 * sigmoid(x1, b, x2) + x2 * sigmoid(x1, b, x2)\n x3 = p4 * w[1] + p3 * w[2] + p5 * w[3]\n p6 = x3 * u[4] * sigmoid(x3, b, t[4])\n x4 = p6 * w[4] + p7 * w[5]\n p8 = x4 * u[5] * sigmoid(x4, b, t[5])\n result += (p8 - 0.568015203115994) ** 2\n # print(\"result\",result)\n # print(i,\"i\")\n # print(\"x\",x_)\n # print(x_)\n\n return result\n \"\"\"\n\n\n\n\n # D = 30\n # X[1,:]是取第1维中下标为1的元素的所有数据,第1行(从0开始)\n # return np.linalg.norm(x_) ** 2 # np.linalg.norm(求范数) **乘方\n # return np.linalg.norm(x_, ord=1) + abs(np.prod(x_)) #F2 搞不得\n # return np.linalg.norm(x_, ord=np.Inf) #F4\n '''result = 0.\n for n in range(D-1):\n result += 100*(x_[n+1]-x_[n]**2)**2+(x_[n]-1)**2\n return result # F5'''\n\n '''result =0.\n for n in range(D):\n result+= np.abs(x_[n]+0.5)**2\n\n return result#F6 # F6'''\n '''result = 0.\n for n in range(D):\n result += (n+1)*x_[n]**4\n\n return result+random.random() # F7'''\n '''sqrt_x = np.sqrt(np.abs(x_))\n x_new = x_*np.sin(sqrt_x)\n # print(sqrt_x,\"x_new\")\n # print(418.9828*self.D)\n # print(reduce(lambda x, y: x + y, x_new),\"reduce \")\n return 418.9828*D - reduce(lambda x, y: x + y, x_new )# F8=cannot'''\n\n '''result = 0.\n for n in range(D):\n\n result += x_[n]**2 - 10*np.cos(2*np.pi*x_[n])+10\n return result # F9'''\n\n \"\"\"x_ = np.array(x_)\n x_new1 = x_**2\n return -20*np.exp(-0.2*np.sqrt((1/D)*reduce(lambda x, y: x + y,x_new1)))-\\\n np.exp((1/D)*reduce(lambda x, y: x + y,np.cos(2*np.pi*x_)))+20+np.e # F10\"\"\"\n # x_ = np.array(x_)\n # x_new1 = x_ ** 2\n # result = 1\n # for n in range(D ):\n # result*=np.cos(x_[n]/np.sqrt(n+1))\n #\n # return 1/4000*reduce(lambda x, y: x + y,x_new1)-result+1\n\n ''' A = np.zeros((2, 25))\n a = [-32, -16, 0, 16, 32]\n A[0, :] = np.tile(a, (1, 5))\n A[1, :] = np.repeat(a, 5)\n result = 0.\n for j in range(25):\n zx1 = (x_[0]-A[0,j])**6+(x_[1]-A[1,j])**6+j+1\n result+=1/zx1\n return (0.002+result)**(-1) # F14'''\n\n # return np.linalg.norm(x_,ord=np.Inf)\n # return (x_[1]-5.1/(4*(math.pi**2))*x_[0]**2+5/math.pi*x_[0]-6)**2+10*(1-1/(8*math.pi))*math.cos(x_[0])+10 #\n\n # x_new = (np.abs(x_ + 0.5)) ** 2\n #\n # return reduce(lambda x, y: x + y, x_new)+\n\n \"\"\"x_new = (-1)*x_ * np.sin(np.sqrt(abs(x_)))\n return reduce(lambda x, y: x + y, x_new)\"\"\"\n\n '''x_new = x_*np.sin(10*np.pi*x_)\n return (-1)*reduce(lambda x, y: x + y, x_new)'''\n\n\n# 参数\ndef initpara():\n NP = 50 # 种群数量\n F = 0.6 # 缩放因子\n CR = 0.7 # 交叉概率\n generation = 200 # 遗传代数\n len_x = 5 #维度\n value_up_range = 1\n value_down_range = -value_up_range\n return NP, F, CR, generation, len_x, value_up_range, value_down_range\n\n\n# 种群初始化\ndef initialtion(NP,len_x,value_down_range,value_up_range):\n np_list = [] # 种群,染色体\n for i in range(0, NP):\n x_list = [] # 个体,基因\n for j in range(0, len_x):\n x_list.append(value_down_range + random.random() * (value_up_range - value_down_range))\n np_list.append(x_list)\n return np_list\n\n\n# 列表相减\ndef substract(a_list, b_list):\n a = len(a_list)\n new_list = []\n for i in range(0, a):\n new_list.append(a_list[i] - b_list[i])\n return new_list\n\n\n# 列表相加\ndef add(a_list, b_list):\n a = len(a_list)\n new_list = []\n for i in range(0, a):\n new_list.append(a_list[i] + b_list[i])\n return new_list\n\n\n# 列表的数乘\ndef multiply(a, b_list):\n b = len(b_list)\n new_list = []\n for i in range(0, b):\n new_list.append(a * b_list[i])\n return new_list\n\n\n# 变异\ndef mutation(np_list,NP,F):\n v_list = []\n for i in range(0, NP):\n r1 = random.randint(0, NP - 1)\n while r1 == i:\n r1 = random.randint(0, NP - 1)\n r2 = random.randint(0, NP - 1)\n while r2 == r1 | r2 == i:\n r2 = random.randint(0, NP - 1)\n r3 = random.randint(0, NP - 1)\n while r3 == r2 | r3 == r1 | r3 == i:\n r3 = random.randint(0, NP - 1)\n\n v_list.append(add(np_list[r1], multiply(F, substract(np_list[r2], np_list[r3]))))\n return v_list\n\n\n# 交叉\ndef crossover(np_list, v_list,NP,len_x,CR):\n u_list = []\n for i in range(0, NP):\n vv_list = []\n for j in range(0, len_x):\n if (random.random() <= CR) | (j == random.randint(0, len_x - 1)):\n vv_list.append(v_list[i][j])\n else:\n vv_list.append(np_list[i][j])\n u_list.append(vv_list)\n return u_list\n\n\n# 选择\ndef selection(u_list, np_list,NP):\n for i in range(0, NP):\n if object_function(u_list[i]) <= object_function(np_list[i]):\n np_list[i] = u_list[i]\n else:\n np_list[i] = np_list[i]\n return np_list\n\n\ndef DE(NP, F, CR, generation, len_x, value_up_range, value_down_range):\n # 主函数\n # NP, F, CR, generation, len_x, value_up_range, value_down_range = initpara()\n np_list = initialtion(NP,len_x,value_down_range,value_up_range)\n min_x = []\n min_f = []\n\n # np_list = [] # 种群,染色体\n for i in range(0, NP):\n xx = []\n xx.append(object_function(np_list[i]))\n min_f.append(min(xx))\n min_x.append(np_list[xx.index(min(xx))])\n for i in range(0, generation):\n v_list = mutation(np_list,NP,F)\n u_list = crossover(np_list, v_list,NP,len_x,CR)\n np_list = selection(u_list, np_list,NP)\n for i in range(0, NP):\n xx = []\n xx.append(object_function(np_list[i]))\n min_f.append(min(xx))\n min_x.append(np_list[xx.index(min(xx))])\n print(\"DE---i==\",i)\n # 输出\n\n min_ff = min(min_f) #最小适应度值\n min_xx = min_x[min_f.index(min_ff)]\n print('the minimum point is x ')\n print(min_xx)\n print('the minimum value is y ')\n print(min_ff)\n\n # min_f 最小适应度\n # min_x 最小值\n # print(min_x)\n return min_f,min_x\n\n # 画图\n '''x_label = np.arange(0, generation + 1, 1)\n plt.plot(x_label, min_f, color='blue')\n plt.xlabel('iteration')\n plt.ylabel('fx')\n plt.savefig('./iteration-f.png')\n plt.show()'''\n\nif __name__==\"__main__\":\n T = 1\n t = np.zeros(T)\n value = np.zeros(T)\n for i in range(T):\n timestart = time.time()\n\n\n NP = 50 # 种群数量\n F = 0.6 # 缩放因子\n CR = 0.7 # 交叉概率\n generation = 200 # 遗传代数\n len_x = 5 # 维度\n value_up_range = 1\n value_down_range = 0\n\n value[i] ,position= DE(NP, F, CR, generation, len_x, value_up_range, value_down_range)\n print(i, \"i\", \" \", value[i], )\n timeend = time.time()\n print(timeend-timestart,\"时间\")\n\n\n\n print(\"平均值:\", np.average(value))\n print(\"最优值:\", np.min(value))\n print(\"最差值:\", np.max(value))","sub_path":"FA-base/DE.py","file_name":"DE.py","file_ext":"py","file_size_in_byte":7957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"111469227","text":"from PyQt4 import uic\nimport os, pwd\n\nclass ClassView(object):\n def __init__(self):\n pass\n\nclass ViewLoader(object):\n def __init__(self):\n for view in self.viewFolders(os.listdir(\"views\")):\n self.addViewClass(view)\n \n def viewFolders(self, views):\n for view in views:\n if not \".\" in view:\n yield view\n \n def addViewClass(self,viewFolder):\n path = \"views/%s\" % viewFolder\n classView = ClassView()\n for view in os.listdir(path):\n viewName,viewExt = view.split(\".\")\n if viewExt == \"ui\":\n base,form = uic.loadUiType(path+\"/\"+view)\n viewName = view.split(\".\")[0]\n classView.__dict__[viewName+\"Base\"] = base\n classView.__dict__[viewName+\"Form\"] = form\n self.__dict__[viewFolder] = classView","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"6016526","text":"# 读写json\n# json.dumps, json.loads都不涉及操作文件, 只针对字符串, 而文件本身可以通过f.read(), f.write()来完成.\n\nimport json\nif __name__ == '__main__':\n a = {'a': 'a1', 'b': 'b2'}\n a_str = json.dumps(a)\n # print(a_str)\n with open('json.txt', 'w') as f:\n f.write(a_str)\n\n with open('json.txt') as f:\n t = json.loads(f.read())\n print(t)","sub_path":"week1_basic/JSONRWTest.py","file_name":"JSONRWTest.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"371234852","text":"\"\"\"The module includes an integration and a regression test for the simulation and the\nestiomation process.\n \"\"\"\nimport json\n\nimport numpy as np\nimport pytest\n\nfrom grmpy.check.auxiliary import check_special_conf\nfrom grmpy.check.check import (\n check_sim_distribution,\n check_sim_init_dict,\n check_start_values,\n)\nfrom grmpy.check.custom_exceptions import UserError\nfrom grmpy.estimate.estimate import fit\nfrom grmpy.estimate.estimate_par import calculate_criteria, process_data, start_values\nfrom grmpy.estimate.estimate_output import simulate_estimation\nfrom grmpy.grmpy_config import TEST_RESOURCES_DIR\nfrom grmpy.read.read import read\nfrom grmpy.simulate.simulate import simulate\nfrom grmpy.test.auxiliary import cleanup, dict_transformation, read_desc\nfrom grmpy.test.random_init import generate_random_dict, print_dict\n\n\ndef test1():\n \"\"\"The test runs a loop to check the consistency of the random init file generating\n process and the following simulation.\n \"\"\"\n for _ in range(10):\n dict_ = generate_random_dict()\n print_dict(dict_)\n simulate(\"test.grmpy.yml\")\n\n\ndef test2():\n \"\"\"This test runs a random selection of five regression tests from the our old\n regression test battery.\n \"\"\"\n fname = TEST_RESOURCES_DIR + \"/old_regression_vault.grmpy.json\"\n tests = json.load(open(fname))\n random_choice = np.random.choice(range(len(tests)), 5)\n tests = [tests[i] for i in random_choice]\n\n for test in tests:\n stat, dict_, criteria = test\n print_dict(dict_transformation(dict_))\n df = simulate(\"test.grmpy.yml\")\n init_dict = read(\"test.grmpy.yml\")\n start = start_values(init_dict, df, \"init\")\n _, X1, X0, Z1, Z0, Y1, Y0 = process_data(df, init_dict)\n\n criteria_ = calculate_criteria(init_dict, X1, X0, Z1, Z0, Y1, Y0, start)\n np.testing.assert_almost_equal(np.sum(df.sum()), stat)\n np.testing.assert_array_almost_equal(criteria, criteria_)\n\n\ndef test3():\n \"\"\"The test checks if the criteria function value of the simulated and the\n 'estimated' sample is equal if both samples include an identical number of\n individuals.\n \"\"\"\n for _ in range(5):\n constr = dict()\n constr[\"DETERMINISTIC\"], constr[\"AGENTS\"], constr[\"START\"] = False, 1000, \"init\"\n constr[\"OPTIMIZER\"], constr[\"SAME_SIZE\"] = \"SCIPY-BFGS\", True\n generate_random_dict(constr)\n df1 = simulate(\"test.grmpy.yml\")\n rslt = fit(\"test.grmpy.yml\")\n init_dict = read(\"test.grmpy.yml\")\n _, df2 = simulate_estimation(rslt)\n start = start_values(init_dict, df1, \"init\")\n\n criteria = []\n for data in [df1, df2]:\n _, X1, X0, Z1, Z0, Y1, Y0 = process_data(data, init_dict)\n criteria += [calculate_criteria(init_dict, X1, X0, Z1, Z0, Y1, Y0, start)]\n np.testing.assert_allclose(criteria[1], criteria[0], rtol=0.1)\n\n\ndef test4():\n \"\"\"The test checks if the estimation process works if the Powell algorithm is\n specified as the optimizer option.\n \"\"\"\n for _ in range(5):\n constr = dict()\n constr[\"DETERMINISTIC\"], constr[\"AGENTS\"], constr[\"start\"] = (\n False,\n 10000,\n \"init\",\n )\n constr[\"optimizer\"] = \"SCIPY-Powell\"\n generate_random_dict(constr)\n\n simulate(\"test.grmpy.yml\")\n fit(\"test.grmpy.yml\")\n\n\ndef test5():\n \"\"\"The test checks if the estimation process works properly when maxiter is set to\n zero.\n \"\"\"\n for _ in range(5):\n constr = dict()\n constr[\"DETERMINISTIC\"], constr[\"MAXITER\"] = False, 0\n generate_random_dict(constr)\n simulate(\"test.grmpy.yml\")\n fit(\"test.grmpy.yml\")\n\n\ndef test6():\n \"\"\"Additionally to test5 this test checks if the comparison file provides the\n expected output when maxiter is set to zero and the estimation process uses the\n initialization file values as start values.\n \"\"\"\n for _ in range(5):\n constr = dict()\n constr[\"DETERMINISTIC\"], constr[\"MAXITER\"], constr[\"AGENTS\"] = False, 0, 15000\n constr[\"START\"], constr[\"SAME_SIZE\"] = \"init\", True\n dict_ = generate_random_dict(constr)\n dict_[\"DIST\"][\"params\"][1], dict_[\"DIST\"][\"params\"][5] = 0.0, 1.0\n print_dict(dict_)\n simulate(\"test.grmpy.yml\")\n fit(\"test.grmpy.yml\")\n dict_ = read_desc(\"comparison.grmpy.info\")\n for section in [\"ALL\", \"TREATED\", \"UNTREATED\"]:\n np.testing.assert_equal(len(set(dict_[section][\"Number\"])), 1)\n np.testing.assert_almost_equal(\n dict_[section][\"Observed Sample\"],\n dict_[section][\"Simulated Sample (finish)\"],\n 0.001,\n )\n np.testing.assert_array_almost_equal(\n dict_[section][\"Simulated Sample (finish)\"],\n dict_[section][\"Simulated Sample (start)\"],\n 0.001,\n )\n\n\ndef test7():\n \"\"\"This test ensures that the estimation process returns an UserError if one tries\n to execute an estimation process with initialization file values as start values for\n an deterministic setting.\n \"\"\"\n fname_falsespec1 = TEST_RESOURCES_DIR + \"/test_falsespec1.grmpy.yml\"\n fname_falsespec2 = TEST_RESOURCES_DIR + \"/test_falsespec2.grmpy.yml\"\n fname_noparams = TEST_RESOURCES_DIR + \"/test_noparams.grmpy.yml\"\n fname_binary = TEST_RESOURCES_DIR + \"/test_binary.grmpy.yml\"\n fname_vzero = TEST_RESOURCES_DIR + \"/test_vzero.grmpy.yml\"\n fname_possd = TEST_RESOURCES_DIR + \"/test_npsd.grmpy.yml\"\n fname_zero = TEST_RESOURCES_DIR + \"/test_zero.grmpy.yml\"\n\n for _ in range(5):\n constr = dict()\n constr[\"AGENTS\"], constr[\"DETERMINISTIC\"] = 1000, True\n generate_random_dict(constr)\n dict_ = read(\"test.grmpy.yml\")\n pytest.raises(UserError, check_sim_distribution, dict_)\n pytest.raises(UserError, fit, \"test.grmpy.yml\")\n\n generate_random_dict(constr)\n dict_ = read(\"test.grmpy.yml\")\n if len(dict_[\"CHOICE\"][\"order\"]) == 1:\n dict_[\"CHOICE\"][\"params\"] = list(dict_[\"CHOICE\"][\"params\"])\n dict_[\"CHOICE\"][\"params\"] += [1.000]\n dict_[\"CHOICE\"][\"order\"] += [2]\n\n dict_[\"CHOICE\"][\"order\"][1] = \"X1\"\n print_dict(dict_)\n pytest.raises(UserError, check_sim_init_dict, dict_)\n pytest.raises(UserError, simulate, \"test.grmpy.yml\")\n pytest.raises(UserError, fit, \"test.grmpy.yml\")\n\n constr[\"AGENTS\"] = 0\n generate_random_dict(constr)\n dict_ = read(\"test.grmpy.yml\")\n pytest.raises(UserError, check_sim_init_dict, dict_)\n pytest.raises(UserError, simulate, \"test.grmpy.yml\")\n\n length = np.random.randint(2, 100)\n array = np.random.rand(length, 1)\n subsitute = np.random.randint(0, len(array) - 1)\n array[subsitute] = np.inf\n pytest.raises(UserError, check_start_values, array)\n\n dict_ = read(fname_possd)\n pytest.raises(UserError, check_sim_init_dict, dict_)\n pytest.raises(UserError, simulate, fname_possd)\n\n dict_ = read(fname_zero)\n pytest.raises(UserError, check_sim_distribution, dict_)\n pytest.raises(UserError, fit, fname_zero)\n\n dict_ = read(fname_vzero)\n pytest.raises(UserError, check_sim_distribution, dict_)\n pytest.raises(UserError, fit, fname_vzero)\n\n dict_ = read(fname_noparams)\n pytest.raises(UserError, check_sim_distribution, dict_)\n pytest.raises(UserError, fit, fname_noparams)\n\n dict_ = read(fname_falsespec1)\n pytest.raises(UserError, check_sim_init_dict, dict_)\n pytest.raises(UserError, fit, fname_noparams)\n\n dict_ = read(fname_falsespec2)\n pytest.raises(UserError, check_sim_init_dict, dict_)\n pytest.raises(UserError, fit, fname_noparams)\n\n dict_ = read(fname_binary)\n status, _ = check_special_conf(dict_)\n np.testing.assert_equal(status, True)\n pytest.raises(UserError, check_sim_init_dict, dict_)\n pytest.raises(UserError, fit, fname_noparams)\n\n\ndef test8():\n \"\"\"The test checks if an UserError occurs if wrong inputs are specified for a\n different functions/methods.\n \"\"\"\n constr = dict()\n constr[\"DETERMINISTIC\"], constr[\"AGENTS\"] = False, 1000\n generate_random_dict(constr)\n df = simulate(\"test.grmpy.yml\")\n dict_ = read(\"test.grmpy.yml\")\n a = list()\n dict_[\"ESTIMATION\"][\"file\"] = \"data.grmpy.yml\"\n print_dict(dict_, \"false_data\")\n pytest.raises(UserError, fit, \"tast.grmpy.yml\")\n pytest.raises(UserError, fit, \"false_data.grmpy.yml\")\n pytest.raises(UserError, simulate, \"tast.grmpy.yml\")\n pytest.raises(UserError, read, \"tast.grmpy.yml\")\n pytest.raises(UserError, start_values, a, df, \"init\")\n pytest.raises(UserError, generate_random_dict, a)\n\n\ndef test9():\n \"\"\"This test ensures that the random initialization file generating process, the\n read in process and the simulation process works if the constraints function allows\n for different number of covariates for each treatment state and the occurence of\n cost-benefit shifters.\"\"\"\n for _ in range(5):\n constr = dict()\n constr[\"DETERMINISTIC\"], constr[\"AGENT\"], constr[\"STATE_DIFF\"] = (\n False,\n 1000,\n True,\n )\n constr[\"OVERLAP\"] = True\n generate_random_dict(constr)\n read(\"test.grmpy.yml\")\n simulate(\"test.grmpy.yml\")\n fit(\"test.grmpy.yml\")\n\n cleanup()\n","sub_path":"grmpy/test/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":9445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"577818414","text":"# How to add a new endpoint:\n# 1. Add the route in the __init__ method. See\n# http://girder.readthedocs.io/en/latest/plugin-development.html?highlight=route\n# for details.\n# 2. Add a new method to the DSAEndpointsResource class. If it has a\n# @describeRoute decorator, it will show up in the Swagger API.\n\nfrom girder import logger\nfrom girder.api import access\nfrom girder.api.describe import describeRoute, Description\nfrom girder.api.rest import Resource, RestException\nfrom girder.constants import AccessType\n\nfrom .system import allChildFolders, allChildItems\n\n\nclass DSAEndpointsResource(Resource):\n\n def __init__(self):\n super(DSAEndpointsResource, self).__init__()\n\n self.resourceName = 'dsa_endpoints'\n self.route('GET', ('child_metadata', ':id'), self.getChildMetadata)\n\n @describeRoute(\n Description('Get all metadata for a resource and all folders and '\n 'items that are children of a resource.')\n .param('id', 'The ID of the resource.', paramType='path')\n .param('type', 'The type of the resource (folder, collection, or '\n 'user).')\n .errorResponse('ID was invalid.')\n .errorResponse('Access was denied for the resource.', 403)\n )\n @access.public\n def getChildMetadata(self, id, params):\n user = self.getCurrentUser()\n modelType = params['type']\n model = self.model(modelType)\n doc = model.load(id=id, user=user, level=AccessType.READ)\n if not doc:\n raise RestException('Resource not found.')\n results = {}\n if doc.get('meta'):\n results[str(doc['_id'])] = doc['meta']\n logger.info('Getting child metadata')\n for folder in allChildFolders(parentType=modelType, parent=doc,\n user=user, limit=0, offset=0):\n if folder.get('meta'):\n results[str(folder['_id'])] = folder['meta']\n for item in allChildItems(parentType=modelType, parent=doc,\n user=user, limit=0, offset=0):\n if item.get('meta'):\n results[str(item['_id'])] = item['meta']\n return results\n","sub_path":"server/rest/dsa.py","file_name":"dsa.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"615887150","text":"import sys\nimport sqlite3\n\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QTableWidget, QTableWidgetItem, QHeaderView, QPushButton\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtCore import pyqtSlot, QPersistentModelIndex\n\nclass ToDo(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # init connection\n self.conn = sqlite3.connect('todo.db')\n self.createDb()\n\n # load UI\n self.ui = loadUi('main.ui', self)\n\n # init widgets\n self.lineEdit = self.ui.lineEdit\n self.submitButton = self.ui.submitButton\n self.table = self.ui.tableWidget\n\n self.table.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)\n\n # connect slots\n self.submitButton.clicked.connect(self.addRecord)\n self.lineEdit.returnPressed.connect(self.submitButton.click)\n self.table.itemChanged.connect(self.updateRecord)\n\n self.loadRecords()\n\n def loadData(self):\n loadRecords\n\n def addRecord(self):\n text = self.lineEdit.text().strip()\n\n if bool(text):\n self.lineEdit.setText(None)\n\n rowCount = self.table.rowCount()\n self.table.insertRow(rowCount)\n\n cellItem = QTableWidgetItem(text)\n self.table.setItem(rowCount, 0, cellItem)\n\n cellButton = QPushButton(\"X\")\n cellButton.setStyleSheet('''\n background-color: red;\n color: white;\n ''')\n cellButton.setMaximumWidth(50)\n \n self.table.setColumnWidth(1, 50)\n self.table.setCellWidget(rowCount, 1, cellButton)\n\n index = QPersistentModelIndex(self.table.model().index(rowCount, 1))\n cellButton.clicked.connect(lambda: self.removeRecord(index, cellItem))\n\n def loadRecords(self):\n records = self.conn.execute(\"SELECT * FROM tasks ORDER BY ID DESC\")\n\n for record in records:\n rowCount = self.table.rowCount()\n self.table.insertRow(rowCount)\n\n cellItem = QTableWidgetItem(record[1])\n cellItem.setData(31, record[0])\n self.table.setItem(rowCount, 0, cellItem)\n\n cellButton = QPushButton(\"X\")\n cellButton.setStyleSheet('''\n background-color: red;\n color: white;\n ''')\n cellButton.setMaximumWidth(50)\n \n self.table.setColumnWidth(1, 50)\n self.table.setCellWidget(rowCount, 1, cellButton)\n\n index = QPersistentModelIndex(self.table.model().index(rowCount, 1))\n cellButton.clicked.connect(lambda: self.removeRecord(index, cellItem))\n\n def removeRecord(self, index, cellItem):\n #self.conn.execute(\"DELETE tasks WHERE id = {0}\".format(item.data(31)))\n #self.conn.commit()\n\n print(cellItem)\n \n self.conn.execute(\"DELETE FROM tasks WHERE id = {0}\".format(cellItem.data(31)))\n self.conn.commit()\n\n self.table.removeRow(index.row())\n\n def updateRecord(self, item):\n if item.data(31):\n self.conn.execute(\"UPDATE tasks SET task = '{0}' WHERE id = {1}\".format(item.text(), item.data(31)))\n self.conn.commit()\n else:\n self.conn.execute(\"INSERT INTO tasks(task) VALUES('{0}')\".format(item.text()))\n self.conn.commit()\n\n def createDb(self):\n self.conn.execute(\"CREATE TABLE IF NOT EXISTS tasks(id INTEGER PRIMARY KEY AUTOINCREMENT, task VARCHAR(255))\")\n \n\n\napp = QApplication(sys.argv)\n\ntoDo = ToDo()\ntoDo.show()\n#toDo.showFullScreen()\n\nsys.exit(app.exec_())","sub_path":"ToDo.py","file_name":"ToDo.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"295827273","text":"import bpy, mathutils, math, json\n\nclass SetBoneOperator(bpy.types.Operator):\n \"\"\"Set Bone\"\"\"\n bl_idname = \"wm.mocap_set_bone_operator\"\n bl_label = \"Map Bone to IMU Node\"\n sensor: bpy.props.IntProperty(name=\"Sensor\")\n sensor_str: bpy.props.StringProperty(name=\"SensorStr\")\n sensor_x: bpy.props.IntProperty(name=\"SensorX\")\n sensor_y: bpy.props.IntProperty(name=\"SensorY\")\n \n def execute(self, context):\n #context.scene.mc_settings.bones.append(bpy.context.active_pose_bone)\n context.scene.mc_settings.mapping[self.sensor_str]['imu'][self.sensor_x][self.sensor_y]['bone_name'] = '%s' % context.active_pose_bone.name\n context.scene.mc_settings.save(context.scene.mc_settings.mapping)\n return {'FINISHED'}\n\nclass SetTPoseOperator(bpy.types.Operator):\n \"\"\"Set T-Pose\"\"\"\n bl_idname = \"wm.mocap_set_tpose_operator\"\n bl_label = \"Set T-Pose\"\n clear: bpy.props.BoolProperty(name=\"Clear\", default=False)\n \n def initBonePos(self, bone):\n if bone.name in self.settings.bone_names:\n # Keep reference to this bone\n self.settings.bones.append(bone)\n index = self.settings.bone_names.index(bone.name) \n if self.clear:\n self.settings.offset[index] = mathutils.Quaternion()\n else:\n bone.rotation_mode = 'QUATERNION'\n bone.rotation_quaternion = mathutils.Quaternion()\n bpy.context.view_layer.update()\n self.settings.matrix[index] = bone.matrix.to_quaternion()\n self.settings.offset[index] = self.settings.position[index]\n return True\n return False\n\n def setInit(self, bone):\n bone.rotation_mode = 'QUATERNION'\n bone.rotation_quaternion = mathutils.Quaternion()\n\n def updateBones(self, bone, func):\n if func(bone):\n bpy.context.view_layer.update()\n for b in bone.children:\n self.updateBones(b, func)\n \n def execute(self, context):\n for bone in context.scene.mc_settings.obj.pose.bones:\n # Find parent bone and iterate through children\n if bone.parent == None:\n self.updateBones(bone, self.setInit)\n bpy.context.view_layer.update()\n context.scene.mc_settings.iterateSensors(context.scene.mc_settings.initPoseFunc)\n\n\n return {'FINISHED'} \n\nregister, unregister = bpy.utils.register_classes_factory([SetBoneOperator, SetTPoseOperator])\n\nif __name__ == \"__main__\":\n register() ","sub_path":"additional_operators.py","file_name":"additional_operators.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"75372809","text":"from pyrobot.brain import Brain\n\n\nclass WB(Brain):\n __slots__ = ['counter', 'inverse', 'oriented_action', 'actions', 'last_action']\n\n def setup(self):\n self.counter = 0\n self.inverse = {'right': 'left', 'left': 'right', 'up': 'down', 'down': 'up'}\n self.oriented_action = {'right': {'right': 'down', 'left': 'up', 'up': 'right', 'down': 'left'},\n 'left': {'right': 'up', 'left': 'down', 'up': 'left', 'down': 'right'},\n 'up': {'right': 'right', 'left': 'left', 'up': 'up', 'down': 'down'},\n 'down': {'right': 'left', 'left': 'right', 'up': 'down', 'down': 'up'}}\n\n # Direction priority for the DFS, from left to right\n self.direction_priority = ['left', 'up', 'right', 'down']\n\n # List of all the actions taken to get to the last gold of the map\n self.actions = ['right']\n self.last_action = 'right'\n self.robot.move('reset')\n\n def step(self):\n if not self.robot.getItem('win'):\n next_step = None\n\n if self.robot.getItem('golds') > 0:\n sonar = self.robot.getItem('sonar')\n\n for dir in self.direction_priority:\n if sonar[self.oriented_action[self.last_action][dir]]:\n next_step = self.oriented_action[self.last_action][dir]\n break\n\n # We eat whatever actions are opposite so that we get a direct path from the\n # last gold object we find\n if next_step != self.inverse[self.actions[len(self.actions) - 1]]:\n self.actions.append(next_step)\n else:\n self.actions.pop()\n\n self.last_action = next_step\n\n else:\n next_step = self.inverse[self.actions.pop()]\n\n object = self.robot.move(next_step)\n\n if object == 'gold':\n self.robot.move('grab')\n\n\ndef INIT(engine):\n return WB('WB', engine)","sub_path":"brain_1_extra.py","file_name":"brain_1_extra.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509592938","text":"import subprocess\nimport os\nimport sys\nimport re\nsys.path.insert(0, os.path.join(\"tools\", \"families\"))\nimport fam\nimport run_all_species\nfrom run_all_species import SpeciesRunFilter\nimport plot_speciesrax\nimport simulations_common\nimport plot_simulations\n\ndo_run = False\ndo_plot = not do_run\ndatasets = []\ncores = 30\nsubst_model = \"GTR+G\"\ngene_trees = [\"raxml-ng\"]\nlaunch_mode = \"normald\"\nreplicates = range(3000, 3100)\nvarying_params = []\n\n\n#if (do_run):\n #varying_params.append((None, [\"none\"]))\nvarying_params.append((\"sites\", [\"sites50\", \"sites200\", \"sites300\"]))\n#varying_params.append((\"families\", [\"f50\", \"f200\", \"f500\", \"f1000\"]))\nvarying_params.append((\"bl\", [\"bl0.01\", \"bl0.1\", \"bl10.0\", \"bl100.0\", \"bl1000.0\", \"bl10000.0\", \"bl100000.0\"]))\n#varying_params.append((\"dup_rate\", [\"d0.5_l0.5_t0.5\", \"d2.0_l2.0_t2.0\", \"d3.0_l3.0_t3.0\",\"d5.0_l5.0_t5.0\"]))\n#varying_params.append((\"transfer_rate\", [\"t0.5\", \"t2.0\", \"t3.0\", \"t5.0\", \"t10.0\"]))\n#varying_params.append((\"dt_ratio\", [\"d0.0_l0.0\", \"d0.5_l0.5\", \"d2.0_l2.0\", \"d5.0_l5.0\"]))\n#varying_params.append((\"population\", [\"pop10000000\", \"pop100000000\", \"pop1000000000\"]))\n#varying_params.append((\"gene_conversion_rate\", [\"gc0.5\", \"gc1.0\", \"gc2.5\", \"gc10.0\"]))\n#varying_params.append((\"species\", [\"s15\", \"s35\", \"s50\", \"s75\"]))\n\ntag = \"dtlsim\"\nfixed_point = \"ssim_\" + tag + \"_s25_f100_sites100_GTR_bl1.0_d1.0_l1.0_t1.0_gc0.0_p0.0_pop10_mu1.0_theta0.0_seed20\"\n\n# metric to plot\nmetric_names = [\"average_rrf\"]\n\n# methods to plot\nmethods_tuples = []\n#methods_tuples.append((\"minibme-mininj_raxml-ng\", \"MiniBME\"))\n#methods_tuples.append((\"minibmepruned-mininj_raxml-ng\", \"MiniBMEPruned\"))\n#methods_tuples.append((\"generax-mininj-fam_raxml-ng\", \"SpeciesRax\"))\n#methods_tuples.append((\"generax-mininj-fam-fixed_raxml-ng\", \"SpeciesRaxFixed\"))\n#methods_tuples.append((\"njrax-mininj_raxml-ng\", \"MiniNJ\"))\n#methods_tuples.append((\"astralpro_raxml-ng\", \"Astral-Pro\"))\n#methods_tuples.append((\"fastmulrfs-single_raxml-ng\", \"FastMulRFS\"))\n#methods_tuples.append((\"duptree_raxml-ng\", \"DupTree\"))\nmethods_tuples.append((\"true.true - raxml-ng\", \"RAxML-NG\"))\n\n\nmethods = []\nmethods_dict = {}\nfor t in methods_tuples:\n methods.append(t[0])\n methods_dict[t[0]] = (t[1], None)\n\n\n\n# run run_filter on all datasets in dataset\ndef run_species_methods(datasets, subst_model, cores, run_filter, launch_mode):\n for dataset in datasets:\n dataset_dir = fam.get_datadir(dataset)\n run_filter.run_reference_methods(dataset_dir, subst_model, cores, launch_mode)\n\ndef run_varying_experiment():\n run_filter = SpeciesRunFilter()\n run_filter.disable_all()\n run_filter.generate = True\n # mrbayes!!\n if (False):\n run_filter.mrbayes = True\n run_filter.mb_runs = 2\n run_filter.mb_chains = 4 \n run_filter.mb_frequencies = 5000\n run_filter.mb_generations = 500000\n mb_trees = run_filter.mb_generations * run_filter.mb_runs / (run_filter.mb_frequencies)\n run_filter.mb_burnin = mb_trees / 10\n run_filter.pargenes = True\n run_filter.pargenes_starting_trees = 1\n run_filter.pargenes_bootstrap_trees = 0\n run_filter.starting_gene_trees = gene_trees\n run_filter.duptree = True\n run_filter.njrax = True\n run_filter.astralpro = True\n run_filter.njst = True\n run_filter.fastmulrfs = True \n run_filter.cleanup = True\n run_filter.speciesraxbench = True\n #run_filter.minibme = True\n #run_filter.minibmepruned = True\n run_filter.analyse = True \n \n \n for entry in varying_params:\n datasets = simulations_common.get_dataset_list(fixed_point, entry[1], replicates)\n run_species_methods(datasets, subst_model, cores, run_filter, launch_mode)\n\ndef plot_varying_experiment():\n for entry in varying_params:\n datasets = simulations_common.get_dataset_list(fixed_point, entry[1], replicates, True)\n print(\"Plotting parameter \" + entry[0])\n for metric in metric_names:\n param = entry[0]\n output = simulations_common.get_plot_name(\"varydtl\", param, subst_model, metric) \n plot_simulations.plot_varying_params(datasets, param, metric, methods_tuples, subst_model, output)\n\n\nif (do_run):\n run_varying_experiment()\nif (do_plot):\n plot_varying_experiment()\n\n","sub_path":"old_experiments/varying_dtl.py","file_name":"varying_dtl.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"178164177","text":"\nfrom flask_login import LoginManager, login_user\n\nfrom modules.FlaskModule.FlaskModule import flask_app\nfrom opentera.modules.BaseModule import BaseModule, ModuleNames\nfrom opentera.redis.RedisVars import RedisVars\n\nfrom opentera.db.models.TeraUser import TeraUser\nfrom opentera.db.models.TeraParticipant import TeraParticipant\nfrom opentera.db.models.TeraDevice import TeraDevice\nfrom opentera.db.models.TeraService import TeraService\n\nfrom opentera.config.ConfigManager import ConfigManager\nimport datetime\nimport redis\n\nfrom flask import request, _request_ctx_stack\nfrom flask_babel import gettext\nfrom werkzeug.local import LocalProxy\nfrom flask_restx import reqparse\nfrom functools import wraps\n\nfrom flask_httpauth import HTTPBasicAuth, HTTPTokenAuth, MultiAuth\n\nfrom twisted.internet import task\n\n# Current participant identity, stacked\ncurrent_participant = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_participant', None))\n\n# Current device identity, stacked\ncurrent_device = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_device', None))\n\n# Current user identity, stacked\ncurrent_user = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_user', None))\n\n# Current service identity, stacked\ncurrent_service = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_service', None))\n\n# Authentication schemes for users\nuser_http_auth = HTTPBasicAuth(realm='user')\nuser_token_auth = HTTPTokenAuth(\"OpenTera\")\nuser_multi_auth = MultiAuth(user_http_auth, user_token_auth)\n\n# Authentication schemes for participant\nparticipant_http_auth = HTTPBasicAuth(realm='participant')\nparticipant_token_auth = HTTPTokenAuth(\"OpenTera\")\nparticipant_multi_auth = MultiAuth(participant_http_auth, participant_token_auth)\n\n\nclass DisabledTokenStorage:\n def __init__(self):\n self.disabled_tokens = []\n\n def push_disabled_token(self, token):\n if token not in self.disabled_tokens:\n self.disabled_tokens.append(token)\n\n def get_disabled_tokens(self):\n return self.disabled_tokens\n\n def is_disabled_token(self, token):\n return token in self.disabled_tokens\n\n def clear_all_disabled_tokens(self):\n self.disabled_tokens.clear()\n\n def remove_disabled_token(self, token):\n if token in self.disabled_tokens:\n self.disabled_tokens.remove(token)\n\n def remove_all_expired_tokens(self, key):\n to_be_removed = []\n for token in self.disabled_tokens:\n import jwt\n try:\n token_dict = jwt.decode(token, key, algorithms='HS256')\n # Expired tokens will throw exception.\n # If we continue here, tokens have a valid expiration time.\n # We should stop looking for expired tokens since they are added chronologically\n break\n except jwt.exceptions.ExpiredSignature as e:\n to_be_removed.append(token)\n except jwt.exceptions.PyJWTError as e:\n print(e)\n continue\n\n # Remove expired tokens\n for expired_token in to_be_removed:\n self.disabled_tokens.remove(expired_token)\n\n return to_be_removed\n\n\nclass LoginModule(BaseModule):\n\n # This client is required for static functions\n redis_client = None\n\n # Only user & participant tokens expire (for now)\n __user_disabled_token_storage = DisabledTokenStorage()\n __participant_disabled_token_storage = DisabledTokenStorage()\n\n def __init__(self, config: ConfigManager):\n\n # Update Global Redis Client\n LoginModule.redis_client = redis.Redis(host=config.redis_config['hostname'],\n port=config.redis_config['port'],\n username=config.redis_config['username'],\n password=config.redis_config['password'],\n db=config.redis_config['db'])\n\n BaseModule.__init__(self, ModuleNames.LOGIN_MODULE_NAME.value, config)\n\n self.login_manager = LoginManager()\n\n # Setup login manager\n self.setup_login_manager()\n\n # Setup cleanup task for disabled tokens\n self.cleanup_disabled_tokens_loop_task = task.LoopingCall(self.cleanup_disabled_tokens)\n\n def cleanup_disabled_tokens(self):\n print('LoginModule.cleanup_disabled_tokens task')\n # Remove expired tokens from user tokens disabled storage\n LoginModule.__user_disabled_token_storage.remove_all_expired_tokens(\n self.redisGet(RedisVars.RedisVar_UserTokenAPIKey)\n )\n # Remove expired tokens from participant tokens disabled storage\n LoginModule.__participant_disabled_token_storage.remove_all_expired_tokens(\n self.redisGet(RedisVars.RedisVar_ParticipantTokenAPIKey)\n )\n\n def setup_module_pubsub(self):\n # Additional subscribe here\n\n # We wait until we are connected to redis\n # Every 30 minutes?\n loopDeferred = self.cleanup_disabled_tokens_loop_task.start(60.0 * 30)\n\n def notify_module_messages(self, pattern, channel, message):\n \"\"\"\n We have received a published message from redis\n \"\"\"\n print('LoginModule - Received message ', pattern, channel, message)\n pass\n\n def setup_login_manager(self):\n self.login_manager.init_app(flask_app)\n self.login_manager.session_protection = \"strong\"\n\n # Cookie based configuration\n flask_app.config.update({'REMEMBER_COOKIE_NAME': 'OpenTera',\n 'REMEMBER_COOKIE_DURATION': 14,\n 'REMEMBER_COOKIE_SECURE': True,\n 'PERMANENT_SESSION_LIFETIME': datetime.timedelta(minutes=1),\n 'REMEMBER_COOKIE_REFRESH_EACH_REQUEST': True})\n\n # Setup user loader function\n self.login_manager.user_loader(self.load_user)\n\n # Setup verify password function for users\n user_http_auth.verify_password(self.user_verify_password)\n user_token_auth.verify_token(self.user_verify_token)\n\n # Setup verify password function for participants\n participant_http_auth.verify_password(self.participant_verify_password)\n participant_token_auth.verify_token(self.participant_verify_token)\n participant_http_auth.get_user_roles(self.participant_get_user_roles_http)\n participant_token_auth.get_user_roles(self.participant_get_user_roles_token)\n\n def load_user(self, user_id):\n # print('LoginModule - load_user', self, user_id)\n # Depending if we have a user or a participant online, return the right object.\n # Here current_user or current_participant are already invalid\n # Need to fetch them from database\n\n user = TeraUser.get_user_by_uuid(user_id)\n participant = TeraParticipant.get_participant_by_uuid(user_id)\n\n if participant and user:\n print('ERROR uuid exists for user and participant!')\n # TODO throw exception?\n return None\n\n if user:\n return user\n\n if participant:\n return participant\n\n return None\n\n def user_verify_password(self, username, password):\n # print('LoginModule - user_verify_password ', username)\n tentative_user = TeraUser.get_user_by_username(username)\n if not tentative_user:\n self.logger.log_warning(self.module_name, 'Invalid username', username)\n return False\n\n attempts_key = RedisVars.RedisVar_UserLoginAttemptKey + tentative_user.user_uuid\n # Count login attempts\n current_attempts = self.redisGet(attempts_key)\n if not current_attempts:\n current_attempts = 0\n else:\n current_attempts = int(current_attempts)\n\n if current_attempts >= 5:\n return False # Too many attempts in a short period of time will result in temporary disabling (see below)\n\n logged_user = TeraUser.verify_password(username=username, password=password, user=tentative_user)\n\n if logged_user:\n _request_ctx_stack.top.current_user = logged_user\n\n # print('user_verify_password, found user: ', current_user)\n # current_user.update_last_online()\n\n # Clear attempts counter\n self.redisDelete(attempts_key)\n\n login_user(current_user, remember=True)\n # print('Setting key with expiration in 60s', session['_id'], session['_user_id'])\n # self.redisSet(session['_id'], session['_user_id'], ex=60)\n return True\n\n # Update login attempt count\n current_attempts += 1\n self.redisSet(attempts_key, current_attempts, 120)\n\n self.logger.log_warning(self.module_name, 'Invalid password for user', username)\n return False\n\n @staticmethod\n def user_push_disabled_token(token):\n LoginModule.__user_disabled_token_storage.push_disabled_token(token)\n\n @staticmethod\n def is_user_token_disabled(token):\n return LoginModule.__user_disabled_token_storage.is_disabled_token(token)\n\n def user_verify_token(self, token_value):\n \"\"\"\n Tokens key is dynamic and stored in a redis variable for users.\n \"\"\"\n # Disabled tokens should never be used\n if LoginModule.is_user_token_disabled(token_value):\n return False\n\n import jwt\n try:\n token_dict = jwt.decode(token_value, self.redisGet(RedisVars.RedisVar_UserTokenAPIKey),\n algorithms='HS256')\n except jwt.exceptions.PyJWTError as e:\n print(e)\n self.logger.log_error(self.module_name, 'User Token exception occurred')\n return False\n\n if token_dict['user_uuid'] and token_dict['exp']:\n # First verify expiration date\n expiration_date = datetime.datetime.fromtimestamp(token_dict['exp'])\n\n # Expiration date in the past?\n if expiration_date < datetime.datetime.now():\n self.logger.log_warning(self.module_name, 'Token expired for user', token_dict['user_uuid'])\n return False\n\n _request_ctx_stack.top.current_user = TeraUser.get_user_by_uuid(token_dict['user_uuid'])\n # TODO: Validate if user is also online?\n if current_user:\n # current_user.update_last_online()\n login_user(current_user, remember=True)\n return True\n\n return False\n\n def participant_verify_password(self, username, password):\n # print('LoginModule - participant_verify_password for ', username)\n\n tentative_participant = TeraParticipant.get_participant_by_username(username)\n if not tentative_participant:\n self.logger.log_warning(self.module_name, 'Invalid username', username)\n return False\n\n attempts_key = RedisVars.RedisVar_ParticipantLoginAttemptKey + tentative_participant.participant_uuid\n # Count login attempts\n current_attempts = self.redisGet(attempts_key)\n if not current_attempts:\n current_attempts = 0\n else:\n current_attempts = int(current_attempts)\n\n if current_attempts >= 5:\n return False # Too many attempts in a short period of time will result in temporary disabling (see below)\n\n logged_participant = TeraParticipant.verify_password(username=username, password=password,\n participant=tentative_participant)\n if logged_participant:\n\n _request_ctx_stack.top.current_participant = TeraParticipant.get_participant_by_username(username)\n\n # print('participant_verify_password, found participant: ', current_participant)\n # current_participant.update_last_online()\n\n login_user(current_participant, remember=True)\n\n # Flag that participant has full API access\n current_participant.fullAccess = True\n\n # Clear attempts counter\n self.redisDelete(attempts_key)\n\n # print('Setting key with expiration in 60s', session['_id'], session['_user_id'])\n # self.redisSet(session['_id'], session['_user_id'], ex=60)\n return True\n\n # Update login attempt count\n current_attempts += 1\n self.redisSet(attempts_key, current_attempts, 120)\n\n self.logger.log_warning(self.module_name, 'Invalid password for participant', username)\n return False\n\n @staticmethod\n def participant_push_disabled_token(token):\n LoginModule.__participant_disabled_token_storage.push_disabled_token(token)\n\n @staticmethod\n def is_participant_token_disabled(token):\n return LoginModule.__participant_disabled_token_storage.is_disabled_token(token)\n\n def participant_verify_token(self, token_value):\n \"\"\"\n Tokens for participants are stored in the DB.\n \"\"\"\n # print('LoginModule - participant_verify_token for ', token_value, self)\n\n # TeraParticipant verifies if the participant is active and login is enabled\n _request_ctx_stack.top.current_participant = TeraParticipant.get_participant_by_token(token_value)\n\n if current_participant:\n # current_participant.update_last_online()\n login_user(current_participant, remember=True)\n return True\n\n # Second attempt, validate dynamic token\n\n # Disabled tokens should never be used\n if LoginModule.is_participant_token_disabled(token_value):\n return False\n\n \"\"\"\n Tokens key is dynamic and stored in a redis variable for participants.\n \"\"\"\n import jwt\n try:\n token_dict = jwt.decode(token_value, self.redisGet(RedisVars.RedisVar_ParticipantTokenAPIKey),\n algorithms='HS256')\n except jwt.exceptions.PyJWTError as e:\n print(e)\n self.logger.log_error(self.module_name, 'Participant Token exception occurred')\n return False\n\n if token_dict['participant_uuid'] and token_dict['exp']:\n\n # First verify expiration date\n expiration_date = datetime.datetime.fromtimestamp(token_dict['exp'])\n\n # Expiration date in the past?\n if expiration_date < datetime.datetime.now():\n self.logger.log_warning(self.module_name, 'Token expired for participant',\n token_dict['participant_uuid'])\n return False\n\n _request_ctx_stack.top.current_participant = \\\n TeraParticipant.get_participant_by_uuid(token_dict['participant_uuid'])\n\n if current_participant:\n # Flag that participant has full API access\n current_participant.fullAccess = True\n # current_participant.update_last_online()\n login_user(current_participant, remember=True)\n return True\n\n return False\n\n def participant_get_user_roles_http(self, user):\n # login with username and password will give full access\n if 'username' in user and 'password' in user and current_participant:\n return ['full', 'limited']\n\n # This should not happen, return no role\n return []\n\n def participant_get_user_roles_token(self, user):\n # Verify if we have a token auth\n if 'token' in user and current_participant:\n if user['token'] == current_participant.participant_token:\n # Using only \"access\" token, will give limited access\n return ['limited']\n else:\n # Dynamic token used, need an http login first\n # Token verification is done previously\n return ['full', 'limited']\n\n # This should not happen, return no role\n return []\n\n @staticmethod\n def device_token_or_certificate_required(f):\n \"\"\"\n Use this decorator if UUID is stored in a client certificate or token in url params.\n Acceptable for devices and participants.\n \"\"\"\n @wraps(f)\n def decorated(*args, **kwargs):\n\n # Since certificates are more secure than tokens, we will test for them first\n\n # Headers are modified in TwistedModule to add certificate information if available.\n # We are interested in the content of two fields : X-Device-Uuid, X-Participant-Uuid\n if request.headers.__contains__('X-Device-Uuid'):\n # Load device from DB\n _request_ctx_stack.top.current_device = TeraDevice.get_device_by_uuid(\n request.headers['X-Device-Uuid'])\n\n # Device must be found and enabled\n if current_device and current_device.device_enabled:\n login_user(current_device, remember=True)\n return f(*args, **kwargs)\n\n # Then verify tokens...\n # Verify token in auth headers (priority over token in params)\n if 'Authorization' in request.headers:\n try:\n # Default whitespace as separator, 1 split max\n scheme, token = request.headers['Authorization'].split(None, 1)\n except ValueError:\n # malformed Authorization header\n return gettext('Invalid token'), 401\n\n # Verify scheme and token\n if scheme == 'OpenTera':\n # Load device from DB\n _request_ctx_stack.top.current_device = TeraDevice.get_device_by_token(token)\n\n # Device must be found and enabled\n if current_device and current_device.device_enabled:\n # Returns the function if authenticated with token\n login_user(current_device, remember=True)\n return f(*args, **kwargs)\n\n # Parse args\n parser = reqparse.RequestParser()\n parser.add_argument('token', type=str, help='Token', required=False)\n token_args = parser.parse_args(strict=False)\n\n # Verify token in params\n if 'token' in token_args:\n # Load device from DB\n _request_ctx_stack.top.current_device = TeraDevice.get_device_by_token(token_args['token'])\n\n # Device must be found and enabled\n if current_device and current_device.device_enabled:\n # Returns the function if authenticated with token\n login_user(current_device, remember=True)\n return f(*args, **kwargs)\n\n # Any other case, do not call function since no valid auth found.\n return gettext('Unauthorized'), 401\n\n return decorated\n\n @staticmethod\n def service_token_or_certificate_required(f):\n \"\"\"\n Use this decorator if UUID is stored in a client certificate or token in url params.\n Acceptable for services\n \"\"\"\n @wraps(f)\n def decorated(*args, **kwargs):\n import jwt\n # Since certificates are more secure than tokens, we will test for them first\n # Headers are modified in TwistedModule to add certificate information if available.\n # We are interested in the content of field : X-Service-Uuid,\n # if request.headers.__contains__('X-Service-Uuid'):\n # # Validate service from database\n # return f(*args, **kwargs)\n\n # Then verify tokens...\n service_uuid = None\n # Verify token in auth headers (priority over token in params)\n if 'Authorization' in request.headers:\n try:\n # Default whitespace as separator, 1 split max\n scheme, token = request.headers['Authorization'].split(None, 1)\n except ValueError:\n # malformed Authorization header\n return gettext('Invalid Token'), 401\n\n # Verify scheme and token\n if scheme == 'OpenTera':\n\n try:\n token_dict = jwt.decode(token,\n LoginModule.redis_client.get(\n RedisVars.RedisVar_ServiceTokenAPIKey),\n algorithms='HS256')\n if 'service_uuid' in token_dict:\n service_uuid = token_dict['service_uuid']\n except jwt.exceptions.PyJWTError as e:\n return gettext('Unauthorized'), 401\n\n # Parse args\n if not service_uuid:\n parser = reqparse.RequestParser()\n parser.add_argument('token', type=str, help='Token', required=False)\n token_args = parser.parse_args(strict=False)\n\n # Verify token in params\n if token_args['token']:\n try:\n token_dict = jwt.decode(token_args['token'],\n LoginModule.redis_client.get(\n RedisVars.RedisVar_ServiceTokenAPIKey),\n algorithms='HS256')\n if 'service_uuid' in token_dict:\n service_uuid = token_dict['service_uuid']\n except jwt.exceptions.PyJWTError as e:\n return gettext('Unauthorized'), 401\n\n if service_uuid:\n # Check if service is allowed to connect\n service = TeraService.get_service_by_uuid(service_uuid)\n if service and service.service_enabled:\n _request_ctx_stack.top.current_service = service\n return f(*args, **kwargs)\n\n # Any other case, do not call function since no valid auth found.\n return gettext('Unauthorized'), 401\n\n return decorated\n\n\n# if __name__ == '__main__':\n# storage = DisabledTokenStorage()\n# import uuid\n#\n# def create_user():\n# new_user = TeraUser()\n# new_user.user_enabled = True\n# new_user.user_firstname = \"No Access\"\n# new_user.user_lastname = \"User!\"\n# new_user.user_profile = \"\"\n# new_user.user_password = TeraUser.encrypt_password(\"user4\")\n# new_user.user_superadmin = False\n# new_user.user_username = \"test_user\"\n# new_user.user_uuid = str(uuid.uuid4())\n# return new_user\n#\n# key = 'testkey'\n# user = create_user()\n# token1 = user.get_token(key, expiration=1)\n# token2 = user.get_token(key, expiration=3600)\n#\n# storage.push_disabled_token(token1)\n# storage.push_disabled_token(token2)\n# disabled = storage.is_disabled_token(token1)\n# disabled = storage.is_disabled_token(token2)\n#\n# storage.remove_expired_tokens(key)\n# print(storage)\n\n","sub_path":"teraserver/python/modules/LoginModule/LoginModule.py","file_name":"LoginModule.py","file_ext":"py","file_size_in_byte":23179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"231402101","text":"import onnx.utils\nimport onnx\nimport numpy as np\nimport logging\n\nfrom . import helper\nfrom .general_graph import Graph, Node\nfrom .other import topological_sort\nfrom .replacing import replace_shape_with_constant\n\n\ndef constant_folding(g):\n # Before constant folding, duplicate the constant nodes.\n duplicate_constant_node(g)\n keep_folding = True\n folded = False\n while keep_folding:\n keep_folding = False\n for node in g.node:\n # Check if the node is foldable\n if node.op_type not in constant_folding_nodes.keys():\n continue\n # Check if the parents of the node are all single follower constant node.\n all_single_constant_input = True\n for input_name in node.input:\n input_node = helper.find_node_by_output_name(g, input_name)\n relative_outputs = helper.find_nodes_by_input_name(\n g, input_name)\n if not input_node or input_node.op_type != 'Constant':\n all_single_constant_input = False\n break\n if len(relative_outputs) > 1:\n all_single_constant_input = False\n break\n if not all_single_constant_input:\n continue\n # Constant folding for the specific node\n logging.debug(\"Folding Constant nodes and %s %s.\",\n node.op_type, node.name)\n constant_folding_nodes[node.op_type](g)\n keep_folding = True\n folded = True\n return folded\n\n\ndef duplicate_constant_node(g):\n \"\"\" Duplicate the constant node if its following nodes contain constant folding\n nodes. Create and link the new constant nodes to the constant folding nodes.\n \"\"\"\n for node in g.node:\n # Find a valid constant node\n if node.op_type != 'Constant':\n continue\n output_val_info = helper.find_value_by_name(g, node.output[0])\n data_shape = helper.get_shape_from_value_info(output_val_info)\n output_nodes = helper.find_nodes_by_input_name(g, node.output[0])\n\n # For constant that has only one following node, no need to duplicate\n if len(output_nodes) < 2:\n continue\n\n # Check if its following nodes are foldable\n foldable_output_nodes = list(filter(lambda n: n.op_type in\n constant_folding_nodes.keys(), output_nodes))\n if not foldable_output_nodes:\n continue\n\n # Duplicate the node needed by foldable nodes\n for i in range(len(foldable_output_nodes)):\n logging.debug(\"Found constant %s and %s %s are availble for folding. Duplicate constant.\",\n node.name, foldable_output_nodes[i].op_type, foldable_output_nodes[i].name)\n output_name = node.output[0] + '_dup_' + str(i)\n new_constant_node = onnx.helper.make_node(\n 'Constant',\n [],\n [output_name],\n name=output_name,\n value=node.attribute[0].t\n )\n new_val_info = onnx.helper.make_tensor_value_info(\n output_name,\n node.attribute[0].t.data_type,\n data_shape\n )\n input_ind = list(foldable_output_nodes[i].input).index(\n node.output[0])\n foldable_output_nodes[i].input[input_ind] = output_name\n\n g.node.extend([new_constant_node])\n g.value_info.extend([new_val_info])\n\n # If all following nodes are foldable node, delete the original node.\n if len(foldable_output_nodes) == len(output_nodes):\n g.node.remove(node)\n g.value_info.remove(output_val_info)\n\n topological_sort(g)\n\n return\n\n\ndef reshape_constant_folding_once(g):\n \"\"\"Do constant folding for the onnx model once.\\\\\n Mainly for Shape node and its following nodes. This function only can only\\\\\n deal with the currently available constant nodes and their folloing nodes.\n\n :param g: the input graph\n :return: if anything modified, return true.\n \"\"\"\n # Get the graph and BFS\n FOLD_TYPES = set(['Gather', 'Unsqueeze', 'Concat', 'Transpose'])\n graph = Graph(g)\n todo = graph.get_sorted_node_list()\n node_to_remove = []\n for node in todo:\n if node.proto is None:\n continue\n if node.proto.op_type not in FOLD_TYPES:\n continue\n if not helper.all_constant_input(node):\n continue\n # This node can be folded\n if node.proto.op_type == 'Gather':\n prev_data_node = node.parents[0]\n prev_indice_node = node.parents[1]\n _, prev_data = helper.constant_to_list(prev_data_node.proto)\n _, indices = helper.constant_to_list(prev_indice_node.proto)\n data = [prev_data[indices[0]]]\n # Construct new node\n new_const = helper.list_to_constant(\n node.proto.output[0], None, data)\n # Modify graph proto\n g.node.extend([new_const])\n if node.proto not in node_to_remove:\n node_to_remove.append(node.proto)\n # Modify Graph structure\n const_node = Node(new_const)\n for next_node in node.children:\n next_node.parents = [\n prev if prev != node else const_node for prev in next_node.parents]\n const_node.children.append(next_node)\n elif node.proto.op_type == 'Unsqueeze':\n prev_data_node = node.parents[0]\n prev_shape, prev_data = helper.constant_to_list(\n prev_data_node.proto)\n if prev_shape != 1:\n continue\n data = [prev_data[0]]\n # Construct new node\n new_const = helper.list_to_constant(\n node.proto.output[0], [1], data)\n # Modify graph proto\n g.node.extend([new_const])\n if node.proto not in node_to_remove:\n node_to_remove.append(node.proto)\n # Modify Graph structure\n const_node = Node(new_const)\n for next_node in node.children:\n next_node.parents = [\n prev if prev != node else const_node for prev in next_node.parents]\n const_node.children.append(next_node)\n elif node.proto.op_type == 'Concat':\n new_shape = []\n foldable = True\n for parent in node.parents:\n shape, data = helper.constant_to_list(parent.proto)\n if len(shape) != 1:\n foldable = False\n break\n new_shape.append(data[0])\n if not foldable:\n continue\n # Construct new node\n new_const = helper.list_to_constant(\n node.proto.output[0], [len(new_shape)], new_shape)\n # Modify graph proto\n g.node.extend([new_const])\n if node.proto not in node_to_remove:\n node_to_remove.append(node.proto)\n # Modify Graph structure\n const_node = Node(new_const)\n for next_node in node.children:\n next_node.parents = [\n prev if prev != node else const_node for prev in next_node.parents]\n const_node.children.append(next_node)\n elif node.proto.op_type == 'Transpose':\n # Transpose the constant according to the attribute\n prev_data_node = node.parents[0]\n prev_data_np = helper.constant_to_numpy(prev_data_node.proto)\n attr_proto = helper.get_attribute_by_name(node.proto, \"perm\")\n new_data_np = np.transpose(prev_data_np, attr_proto.ints)\n # Construct new node\n new_const = helper.numpy_to_constant(\n node.proto.output[0], new_data_np)\n # Modify graph proto\n g.node.extend([new_const])\n if node.proto not in node_to_remove:\n node_to_remove.append(node.proto)\n # Modify Graph structure\n const_node = Node(new_const)\n for next_node in node.children:\n next_node.parents = [\n prev if prev != node else const_node for prev in next_node.parents]\n const_node.children.append(next_node)\n for node in node_to_remove:\n g.node.remove(node)\n if len(node_to_remove) > 0:\n return True\n else:\n return False\n\n\ndef reshape_constant_folding(model):\n \"\"\"Do constant folding for the onnx model.\\\\\n Mainly for Shape node and its following nodes. This function will do Shape\\\\\n replacement and constant folding repeatly until nothing more can be done.\n\n :param model: the input model\n :return: the new model\n \"\"\"\n model = onnx.utils.polish_model(model)\n reshaped = reshape_constant_folding_once(model.graph)\n topological_sort(model.graph)\n while replace_shape_with_constant(model.graph) or reshaped:\n topological_sort(model.graph)\n reshaped = reshape_constant_folding_once(model.graph)\n topological_sort(model.graph)\n model = onnx.utils.polish_model(model)\n return model\n\n\ndef unsqueeze_constant_folding(g):\n \"\"\"Do Unsqueeze layer constant folding for the pytorch model.\n\n :param g: the input graph\n :return: None\n \"\"\"\n graph = Graph(g)\n todo = graph.get_sorted_node_list()\n node_to_remove = []\n for node in todo:\n if node.proto is None:\n continue\n if node.proto.op_type != 'Unsqueeze':\n continue\n if not helper.all_constant_input(node):\n continue\n # Now we have an unsqueeze to fold.\n # Find the previous constant node.\n prev_data_node = node.parents[0]\n prev_data = helper.constant_to_numpy(prev_data_node.proto)\n new_dims = helper.get_attribute_by_name(node.proto, 'axes')\n new_dims = new_dims.ints\n data = prev_data\n for d in new_dims:\n data = np.expand_dims(data, d)\n # Construct new node\n new_const = helper.numpy_to_constant(\n node.proto.output[0], data)\n # Modify graph proto\n g.node.extend([new_const])\n if node.proto not in node_to_remove:\n node_to_remove.append(node.proto)\n # Modify Graph structure\n const_node = Node(new_const)\n for next_node in node.children:\n next_node.parents = [\n prev if prev != node else const_node for prev in next_node.parents]\n const_node.children.append(next_node)\n for node in node_to_remove:\n g.node.remove(node)\n\n\ndef slice_constant_folding(g):\n \"\"\" Fold constant and slice nodes to a single constant node.\n \"\"\"\n node_to_delete = []\n for node in g.node:\n if node.op_type != 'Slice':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if pre_node.op_type != 'Constant':\n continue\n pre_shape, data_list = helper.constant_to_list(pre_node)\n\n data_list = np.reshape(data_list, pre_shape)\n axes = helper.get_attribute_by_name(node, 'axes')\n ends = list(helper.get_attribute_by_name(node, 'ends').ints)\n starts = list(helper.get_attribute_by_name(node, 'starts').ints)\n\n if not axes:\n axes = list(range(len(helper.get_shape(data_list))))\n else:\n axes = list(axes.ints)\n\n new_data = helper.slice_data(data_list, starts, ends, axes)\n new_node = helper.list_to_constant(node.output[0], helper.get_shape(\n new_data), helper.flatten_to_list(new_data))\n g.node.extend([new_node])\n node_to_delete.append(node)\n node_to_delete.append(pre_node)\n value_info = helper.find_value_by_name(g, pre_node.output[0])\n g.value_info.remove(value_info)\n\n while node_to_delete:\n node = node_to_delete.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef cast_constant_folding(g):\n \"\"\" Fold constant and cast node to a single constant node.\n \"\"\"\n node_to_delete = []\n for node in g.node:\n if node.op_type != 'Cast':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if not pre_node:\n continue\n if pre_node.op_type != 'Constant':\n continue\n\n shape, data = helper.constant_to_list(pre_node)\n data_type = node.attribute[0].i\n if data_type in (6, 7):\n data = list(map(int, data))\n elif data_type == onnx.helper.TensorProto.FLOAT:\n data = list(map(float, data))\n else:\n raise RuntimeError('data type not supported')\n\n tensor = onnx.helper.make_tensor(\n name=pre_node.attribute[0].name,\n data_type=data_type,\n dims=shape,\n vals=helper.flatten_to_list(data)\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=tensor\n )\n g.node.extend([new_node])\n node_to_delete.append(pre_node)\n node_to_delete.append(node)\n\n value_info = helper.find_value_by_name(g, pre_node.output[0])\n g.value_info.remove(value_info)\n\n while node_to_delete:\n node = node_to_delete.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef reduceprod_constant_folding(g):\n \"\"\" Fold constant and reduceprod nodes to a single constant node.\n \"\"\"\n node_to_delete = []\n for node in g.node:\n if node.op_type != 'ReduceProd':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if pre_node.op_type != 'Constant':\n continue\n\n shape, data_set = helper.constant_to_list(pre_node)\n tensor = pre_node.attribute[0].t\n\n data_set = np.reshape(data_set, shape)\n for att in node.attribute:\n if att.name == 'axes':\n axes = list(att.ints)\n else:\n keepdims = int(att.i)\n\n new_data = np.prod(data_set, axis=tuple(axes), keepdims=keepdims == 1)\n new_shape = helper.get_shape(new_data)\n new_flat_data = helper.flatten_to_list(new_data)\n new_tensor = onnx.helper.make_tensor(\n name=node.output[0],\n data_type=tensor.data_type,\n dims=new_shape,\n vals=new_flat_data\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=new_tensor\n )\n\n node_to_delete.extend([pre_node, node])\n g.node.extend([new_node])\n value_info = None\n for item in g.value_info:\n if item.name == pre_node.output[0]:\n value_info = item\n if value_info is not None:\n g.value_info.remove(value_info)\n\n while node_to_delete:\n node = node_to_delete.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef reshape_constant_input_folding(g):\n \"\"\" Fold constant and reshape nodes to a single constant node.\n \"\"\"\n node_to_delete = []\n for node in g.node:\n if node.op_type != 'Reshape':\n continue\n pre_data_node = helper.find_node_by_output_name(g, node.input[0])\n pre_shape_node = helper.find_node_by_output_name(g, node.input[1])\n if pre_data_node.op_type != 'Constant' or \\\n pre_shape_node.op_type != 'Constant':\n continue\n if len(helper.find_nodes_by_input_name(g, pre_data_node.output[0])) > 1:\n continue\n if len(helper.find_nodes_by_input_name(g, pre_shape_node.output[0])) > 1:\n continue\n\n data = helper.constant_to_numpy(pre_data_node)\n _, shape = helper.constant_to_list(pre_shape_node)\n new_data = np.reshape(data, shape)\n\n new_tensor = onnx.helper.make_tensor(\n name=node.output[0],\n data_type=pre_data_node.attribute[0].t.data_type,\n dims=shape,\n vals=helper.flatten_to_list(new_data)\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=new_tensor\n )\n g.node.extend([new_node])\n\n node_to_delete.extend([node, pre_data_node, pre_shape_node])\n\n data_val_info = helper.find_value_by_name(g, pre_data_node.output[0])\n shape_val_info = helper.find_value_by_name(g, pre_shape_node.output[0])\n\n g.value_info.remove(data_val_info)\n g.value_info.remove(shape_val_info)\n\n while node_to_delete:\n node = node_to_delete.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef concat_constant_folding(g):\n \"\"\" Fold constant and concat nodes to a single constant node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Concat':\n continue\n\n valid_inputs = True\n for input_name in node.input:\n input_node = helper.find_node_by_output_name(g, input_name)\n input_node_output = helper.find_nodes_by_input_name(g, input_name)\n if len(input_node_output) > 1:\n valid_inputs = False\n break\n if input_node.op_type != 'Constant':\n valid_inputs = False\n break\n\n if not valid_inputs:\n continue\n\n input_data = []\n input_shapes = []\n for input_name in node.input:\n input_node = helper.find_node_by_output_name(g, input_name)\n s, d = helper.constant_to_list(input_node)\n d = np.reshape(d, s)\n input_data.append(d)\n input_shapes.append(s)\n node_to_del.append(input_node)\n\n concat_data = np.concatenate(input_data, axis=node.attribute[0].i)\n new_node = helper.list_to_constant(\n node.output[0],\n helper.get_shape(concat_data),\n helper.flatten_to_list(concat_data),\n data_type=input_node.attribute[0].t.data_type\n )\n g.node.extend([new_node])\n node_to_del.append(node)\n\n for input_name in node.input:\n val_info = helper.find_value_by_name(g, input_name)\n g.value_info.remove(val_info)\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef transpose_constant_folding(g):\n \"\"\"Fold constant and transpose nodes to a single constant node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Transpose':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if pre_node.op_type != 'Constant':\n continue\n shape, data = helper.constant_to_list(pre_node)\n np_data = np.reshape(data, shape)\n permutation = list(node.attribute[0].ints)\n\n new_data = np.transpose(np_data, permutation)\n new_shape = new_data.shape\n new_node = helper.list_to_constant(\n node.output[0],\n new_shape,\n new_data.flatten().tolist(),\n data_type=pre_node.attribute[0].t.data_type\n )\n\n g.node.extend([new_node])\n node_to_del.extend([node, pre_node])\n\n pre_val_info = helper.find_value_by_name(g, node.input[0])\n g.value_info.remove(pre_val_info)\n\n next_val_info = helper.find_value_by_name(g, node.output[0])\n g.value_info.remove(next_val_info)\n\n new_val_info = onnx.helper.make_tensor_value_info(\n node.output[0],\n pre_node.attribute[0].t.data_type,\n new_shape\n )\n g.value_info.extend([new_val_info])\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef unsqueeze_constant_folding1(g):\n \"\"\"Fold constant and unsqueeze nodes to a single constant node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Unsqueeze':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if pre_node.op_type != 'Constant':\n continue\n shape, data = helper.constant_to_list(pre_node)\n if type(shape) == int:\n np_data = data[0]\n else:\n np_data = np.reshape(data, shape)\n axes = list(node.attribute[0].ints)\n axes.sort()\n\n for dim in axes:\n np_data = np.expand_dims(np_data, axis=dim)\n new_shape = np_data.shape\n new_node = helper.list_to_constant(\n node.output[0],\n new_shape,\n np_data.flatten().tolist(),\n data_type=pre_node.attribute[0].t.data_type\n )\n g.node.extend([new_node])\n node_to_del.extend([node, pre_node])\n\n pre_val_info = helper.find_value_by_name(g, node.input[0])\n next_val_info = helper.find_value_by_name(g, node.output[0])\n g.value_info.remove(pre_val_info)\n g.value_info.remove(next_val_info)\n\n new_val_info = onnx.helper.make_tensor_value_info(\n node.output[0],\n pre_node.attribute[0].t.data_type,\n new_shape\n )\n g.value_info.extend([new_val_info])\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef gather_constant_folding(g):\n \"\"\"Fold constant and gather nodes to a single constant node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Gather':\n continue\n pre_data_node = helper.find_node_by_output_name(g, node.input[0])\n pre_indices_node = helper.find_node_by_output_name(g, node.input[1])\n if pre_data_node.op_type != 'Constant' or\\\n pre_indices_node.op_type != 'Constant':\n continue\n\n shape, data = helper.constant_to_list(pre_data_node)\n indice_shape, indices = helper.constant_to_list(pre_indices_node)\n if type(indice_shape) == int:\n indices = indices[0]\n\n np_data = np.reshape(data, shape)\n axis = node.attribute[0].i\n\n new_data = np.take(np_data, indices, axis=axis)\n new_shape = new_data.shape\n new_node = helper.list_to_constant(\n node.output[0],\n new_shape,\n new_data.flatten().tolist(),\n data_type=pre_data_node.attribute[0].t.data_type\n )\n\n node_to_del.extend([node, pre_data_node, pre_indices_node])\n g.node.extend([new_node])\n\n val_info_1 = helper.find_value_by_name(g, node.input[0])\n val_info_2 = helper.find_value_by_name(g, node.input[1])\n val_info_3 = helper.find_value_by_name(g, node.output[0])\n new_val_info = onnx.helper.make_tensor_value_info(\n new_node.output[0],\n pre_data_node.attribute[0].t.data_type,\n new_shape\n )\n\n g.value_info.remove(val_info_1)\n g.value_info.remove(val_info_2)\n g.value_info.remove(val_info_3)\n g.value_info.extend([new_val_info])\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef add_constant_folding(g):\n \"\"\"Fold constant and add nodes to a single constant node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Add':\n continue\n pre_node_1 = helper.find_node_by_output_name(g, node.input[0])\n pre_node_2 = helper.find_node_by_output_name(g, node.input[1])\n if not pre_node_1 or not pre_node_2:\n continue\n if pre_node_1.op_type != 'Constant' or \\\n pre_node_2.op_type != 'Constant':\n continue\n if len(helper.find_nodes_by_input_name(g, pre_node_1.output[0])) != 1:\n continue\n if len(helper.find_nodes_by_input_name(g, pre_node_2.output[0])) != 1:\n continue\n\n shape1, data1 = helper.constant_to_list(pre_node_1)\n shape2, data2 = helper.constant_to_list(pre_node_2)\n np_data1 = np.reshape(data1, shape1)\n np_data2 = np.reshape(data2, shape2)\n try:\n new_data = np.add(np_data1, np_data2)\n except:\n raise RuntimeError('can\\'t broadcast and add two data sets')\n\n new_node = helper.list_to_constant(\n node.output[0],\n new_data.shape,\n new_data.flatten().tolist(),\n data_type=pre_node_1.attribute[0].t.data_type\n )\n\n g.node.extend([new_node])\n node_to_del.extend([node, pre_node_1, pre_node_2])\n g.value_info.remove(helper.find_value_by_name(g, pre_node_1.output[0]))\n g.value_info.remove(helper.find_value_by_name(g, pre_node_2.output[0]))\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef sqrt_constant_folding(g):\n \"\"\" Fold constant and sqrt nodes to a single node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Sqrt':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if pre_node.op_type != 'Constant':\n continue\n\n shape, data = helper.constant_to_list(pre_node)\n np_data = np.sqrt(np.reshape(data, shape))\n output_val_info = helper.find_value_by_name(g, node.output[0])\n input_val_info = helper.find_value_by_name(g, node.input[0])\n data_type = output_val_info.type.tensor_type.elem_type\n\n new_tensor = onnx.helper.make_tensor(\n name=node.output[0]+'_data',\n data_type=data_type,\n dims=shape,\n vals=np_data.flatten().tolist()\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=new_tensor\n )\n\n g.value_info.remove(input_val_info)\n node_to_del.extend([pre_node, node])\n g.node.extend([new_node])\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef reciprocal_constant_folding(g):\n \"\"\" Fold constant and reciprocal nodes to a single constant node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Reciprocal':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if pre_node.op_type != 'Constant':\n continue\n shape, data = helper.constant_to_list(pre_node)\n data = list(map(lambda x: x if abs(x) > 1.e-8 else 1.e-8, data))\n np_data = np.reshape(data, shape)\n np_data = np.reciprocal(np_data)\n\n input_val_info = helper.find_value_by_name(g, node.input[0])\n output_val_info = helper.find_value_by_name(g, node.output[0])\n data_type = output_val_info.type.tensor_type.elem_type\n\n new_tensor = onnx.helper.make_tensor(\n name=node.output[0]+'_data',\n data_type=data_type,\n dims=shape,\n vals=np_data.flatten().tolist()\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=new_tensor\n )\n\n node_to_del.extend([node, pre_node])\n g.node.extend([new_node])\n\n g.value_info.remove(input_val_info)\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef mul_constant_folding(g):\n \"\"\" Fold constant and mul nodes to a single constant node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Mul':\n continue\n pre_node_1 = helper.find_node_by_output_name(g, node.input[0])\n pre_node_2 = helper.find_node_by_output_name(g, node.input[1])\n if pre_node_1.op_type != 'Constant' or \\\n pre_node_2.op_type != 'Constant':\n continue\n\n pre_value_info1 = helper.find_value_by_name(g, node.input[0])\n pre_value_info2 = helper.find_value_by_name(g, node.input[1])\n\n if len(helper.find_nodes_by_input_name(g, pre_value_info1.name)) > 1:\n continue\n if len(helper.find_nodes_by_input_name(g, pre_value_info2.name)) > 1:\n continue\n\n shape1, data1 = helper.constant_to_list(pre_node_1)\n shape2, data2 = helper.constant_to_list(pre_node_2)\n np_data1 = np.reshape(data1, shape1)\n np_data2 = np.reshape(data2, shape2)\n\n try:\n new_data = np.multiply(np_data1, np_data2)\n except:\n raise RuntimeError('can not broadcast and multiply two data sets')\n\n new_shape = new_data.shape\n new_tensor = onnx.helper.make_tensor(\n name=node.output[0]+'_data',\n data_type=pre_node_1.attribute[0].t.data_type,\n dims=new_shape,\n vals=new_data.flatten().tolist()\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=new_tensor\n )\n\n node_to_del.extend([node, pre_node_1, pre_node_2])\n g.node.extend([new_node])\n\n g.value_info.remove(pre_value_info1)\n g.value_info.remove(pre_value_info2)\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef sub_constant_folding(g):\n \"\"\" Fold constant and sub nodes to a single node.\n \"\"\"\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Sub':\n continue\n pre_node_1 = helper.find_node_by_output_name(g, node.input[0])\n pre_node_2 = helper.find_node_by_output_name(g, node.input[1])\n if pre_node_1.op_type != 'Constant':\n continue\n if pre_node_2.op_type != 'Constant':\n continue\n pre_val_info_1 = helper.find_value_by_name(g, node.input[0])\n pre_val_info_2 = helper.find_value_by_name(g, node.input[1])\n if len(helper.find_nodes_by_input_name(g, node.input[0])) > 1:\n continue\n if len(helper.find_nodes_by_input_name(g, node.input[1])) > 1:\n continue\n\n _, data1 = helper.constant_to_list(pre_node_1)\n _, data2 = helper.constant_to_list(pre_node_2)\n\n new_data = np.subtract(data1, data2)\n new_shape = helper.get_shape(new_data)\n new_tensor = onnx.helper.make_tensor(\n name=node.output[0]+'_data',\n data_type=pre_node_1.attribute[0].t.data_type,\n dims=new_shape,\n vals=helper.flatten_to_list(new_data)\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=new_tensor\n )\n\n g.node.extend([new_node])\n node_to_del.extend([node, pre_node_1, pre_node_2])\n\n g.value_info.remove(pre_val_info_1)\n g.value_info.remove(pre_val_info_2)\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n topological_sort(g)\n\n\ndef neg_constant_folding(g):\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Neg':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if not pre_node or pre_node.op_type != 'Constant':\n continue\n\n shape, data_list = helper.constant_to_list(pre_node)\n new_data_list = [-num for num in data_list]\n\n new_tensor = onnx.helper.make_tensor(\n name=pre_node.name+'_neg_tensor',\n data_type=pre_node.attribute[0].t.data_type,\n dims=shape,\n vals=new_data_list\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [node.output[0]],\n name=node.output[0],\n value=new_tensor\n )\n\n g.node.extend([new_node])\n node_to_del.extend([pre_node, node])\n g.value_info.remove(helper.find_value_by_name(g, node.input[0]))\n\n while node_to_del:\n g.node.remove(node_to_del.pop())\n\n topological_sort(g)\n\n\n# Available constant folding names to function map.\nconstant_folding_nodes = {\n 'Add': add_constant_folding,\n 'Cast': cast_constant_folding,\n 'Concat': concat_constant_folding,\n 'Gather': gather_constant_folding,\n 'Mul': mul_constant_folding,\n 'Reciprocal': reciprocal_constant_folding,\n 'ReduceProd': reduceprod_constant_folding,\n 'Reshape': reshape_constant_input_folding,\n 'Slice': slice_constant_folding,\n 'Sqrt': sqrt_constant_folding,\n 'Transpose': transpose_constant_folding,\n 'Unsqueeze': unsqueeze_constant_folding1,\n 'Sub': sub_constant_folding,\n 'Neg': neg_constant_folding\n}\n","sub_path":"optimizer_scripts/tools/constant_folding.py","file_name":"constant_folding.py","file_ext":"py","file_size_in_byte":32967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"86551719","text":"\"\"\":arg\nThis file is to be used for function for simple rocket\n\"\"\"\nimport tkinter as tk\nfrom random import random, uniform\n\nfrom sklearn.utils import Bunch\n\nfrom ga_simple_rocket.config import MAX_ROCKET_HEIGHT, WIN_ADJUST, WINDOW_SIZE, REFRESH_RATE, ROCKET_SIZE\nfrom ga_simple_rocket.individual_class import Individual\nfrom ga_simple_rocket.simple_rocket_class import SimpleRocket\n\n\ndef get_rocket_shape(point) -> Polygon:\n \"\"\"\n Returns a rocket polygon shape based on a rockets point position.\n Note that screen position has 0,0 in top left hand corner. max position in bottom right\n :param point:\n :return: Polygon shape\n \"\"\"\n point.move(0, -ROCKET_SIZE)\n tip = point\n bottom_left = Point(point.x + ROCKET_SIZE//2, point.y + ROCKET_SIZE)\n bottom_right = Point(point.x - ROCKET_SIZE//2, point.y + ROCKET_SIZE)\n vertices = [tip, bottom_left, bottom_right]\n shape = Polygon(vertices)\n shape.setFill(\"black\")\n return shape\n\n\ndef set_rocket_color(rocket: SimpleRocket, shape):\n \"\"\"\n Changed the color to represent the rockets state\n :param rocket:\n :param shape:\n :return:\n \"\"\"\n if rocket.has_failed:\n shape.setFill('red')\n elif rocket.has_landed:\n shape.setFill('green')\n elif rocket.engine_on:\n shape.setFill('gray')\n else:\n shape.setFill('black')\n\n\ndef get_ground_and_sky_limit(win):\n \"\"\"\n Sets simple red line to deliminate the the ground and sky limit\n :param win:\n :return:\n \"\"\"\n y = win.getHeight() - WIN_ADJUST\n line = Line(Point(0, y), Point(win.getWidth(), y))\n line.draw(win)\n\n y = win.getHeight() - MAX_ROCKET_HEIGHT - WIN_ADJUST\n line = Line(Point(0, y), Point(win.getWidth(), y))\n line.setFill('red')\n line.draw(win)\n\n\ndef end_windows(win):\n \"\"\"\n Boiler plate code to end window\n :param win:\n :return:\n \"\"\"\n message = Text(Point(win.getWidth() / 2, 20), 'Click anywhere to quit.')\n message.draw(win)\n win.getMouse()\n win.close()\n\n\ndef reply_single_rocket(b: Bunch, x: int = 100) -> object:\n \"\"\"\n Animates the the data of a single rocket falling\n :param b: Bunch object holding data information\n :param x: position horizontally along the screen, default 100\n :return:\n \"\"\"\n win = GraphWin('Face', WINDOW_SIZE[0], WINDOW_SIZE[1])\n get_ground_and_sky_limit(win)\n # create rocket shape from the initial rocket position\n y = win.getHeight() - WIN_ADJUST - b.data[0]['pos']\n shape = get_rocket_shape(Point(x, y))\n shape.draw(win)\n # create time counter\n time_message = Text(Point(win.getWidth() / 4, win.getHeight() - 20), 'time:0')\n time_message.draw(win)\n # animate using the data\n for i, d in enumerate(b.data):\n time.sleep(REFRESH_RATE)\n shape.move(0, -d['ds'])\n color_shape(shape, d['failed'], d['landed'])\n time_message.setText(f'time:{i+1}')\n end_windows(win)\n\n\ndef reply_multiple_rocket(b: Bunch):\n \"\"\"\n Will draw multiple rockets evenly space and reply their movement\n :param b: Bunch object holding data information\n \"\"\"\n win = GraphWin('Face', WINDOW_SIZE[0], WINDOW_SIZE[1])\n get_ground_and_sky_limit(win)\n # create time counter\n time_message = Text(Point(win.getWidth() / 4, win.getHeight() - 20), 'time:0')\n time_message.draw(win)\n shapes = []\n n = len(b.data)\n for i in range(n):\n print(f'create shape{i}')\n # create rocket shape from the initial rocket position\n y = win.getHeight() - WIN_ADJUST - b.data[0][0]['pos']\n x = win.getWidth()/(n+1)\n shapes.append(get_rocket_shape(Point(x + x*i, y)))\n shapes[i].draw(win)\n\n for t in range(100):\n time.sleep(REFRESH_RATE)\n for i in range(n):\n ds = b.data[i][t]['ds']\n shapes[i].move(0, -ds)\n color_shape(shapes[i], b.data[i][t]['failed'], b.data[i][t]['landed'])\n time_message.setText(f'time:{i+1}')\n\n end_windows(win)\n\n\ndef color_shape(shape, has_failed, has_landed):\n if has_failed:\n shape.setFill('red')\n elif has_landed:\n shape.setFill('green')\n\n\ndef select_roulette(individuals, k):\n sorted_inds = sorted(individuals, reverse=True)\n sum_fits = sum(ind.fitness[0] for ind in individuals)\n chosen = []\n for i in range(k):\n r = uniform(0, sum_fits)\n sum_ = 0\n for ind in sorted_inds:\n sum_ += ind.fitness[0]\n if sum_ > r:\n chosen.append(ind)\n break\n\n return chosen\n\n\ndef mutate_individual(individual: Individual, indpb: float):\n for i in range(len(individual.commands)):\n if random() < indpb:\n individual.commands[i] = type(individual.commands[i])(not individual.commands[i])\n return individual,\n\n\n\n","sub_path":"ga_simple_rocket/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"435144491","text":"from datetime import date, datetime\n\nfrom telegram.ext import CommandHandler\n\nfrom pycamp_bot.commands.auth import admin_needed\n\ndate_start_pycamp = None\n\n\ndef is_pycamp_started(update):\n global date_start_pycamp\n if date_start_pycamp:\n return True\n else:\n update.message.reply_text(text=\"PyCamp no ha comenzado\")\n return False\n\n\n@admin_needed\ndef start_pycamp(bot, update):\n global date_start_pycamp\n\n if is_pycamp_started:\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"PyCamp Ya habia empezado ! {}\".format(str(date_start_pycamp))\n )\n return\n\n date_start_pycamp = date.today()\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Empezó Pycamp :) ! {}\".format(str(date_start_pycamp))\n )\n\n\n@admin_needed\ndef end_pycamp(bot, update):\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Terminó Pycamp :( !\"\n )\n\n\ndef set_handlers(updater):\n updater.dispatcher.add_handler(\n CommandHandler('empezar_pycamp', start_pycamp))\n updater.dispatcher.add_handler(\n CommandHandler('terminar_pycamp', end_pycamp))\n","sub_path":"src/pycamp_bot/commands/manage_pycamp.py","file_name":"manage_pycamp.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"354236245","text":"import numpy as np\nimport torch\n\nclass PadChromatogramsForLSTM(object):\n \"\"\"Pad chromatograms in LSTM batch to same length.\n Chromatograms shaped (N, D), where N is the length of the chromatogram,\n and D is the number of traces.\n Labels shaped (N, 1).\n \"\"\"\n\n def __call__(self, batch):\n batch_size = len(batch)\n chromatograms = [item[0] for item in batch]\n labels = [item[1] for item in batch]\n lengths = [len(chromatogram) for chromatogram in chromatograms]\n max_len = max(lengths)\n trailing_dims = chromatograms[0].size()[1:]\n out_dims = (max_len, batch_size) + trailing_dims\n padded_chromatograms = torch.zeros(*out_dims)\n padded_labels = torch.zeros(batch_size, max_len)\n\n for i, chromatogram in enumerate(chromatograms):\n length = chromatogram.size(0)\n padded_chromatograms[max_len - length:, i, ...] = chromatogram\n\n for i, label in enumerate(labels):\n length = label.size(0)\n padded_labels[i, max_len - length:] = label\n\n return [padded_chromatograms, padded_labels]\n\nclass PadChromatogramsFor1DCNN(object):\n \"\"\"Pad whole chromatograms in 1DCNN batch to same size.\n Subsections shaped (D, N).\n Labels shaped (D, *), where * represents the number of labelled\n subsections.\n \"\"\"\n\n def __call__(self, batch):\n batch_size = len(batch)\n chromatograms = [item[0] for item in batch]\n lengths = [chromatogram.size()[1] for chromatogram in chromatograms]\n max_len = max(lengths)\n channel_dim = chromatograms[0].size()[0]\n out_dims = (batch_size, channel_dim, max_len)\n padded_chromatograms = torch.zeros(*out_dims)\n\n for i, chromatogram in enumerate(chromatograms):\n length = chromatogram.size(1)\n padded_chromatograms[i, 0:channel_dim, 0:length] = chromatogram\n\n labels = [item[1] for item in batch]\n label_lengths = [label.size()[0] for label in labels]\n max_label_len = max(label_lengths)\n label_out_dims = (batch_size, max_label_len)\n padded_labels = torch.zeros(batch_size, max_label_len)\n\n for i, label in enumerate(labels):\n length = label.size(0)\n padded_labels[i, 0:length] = label\n\n return [padded_chromatograms, padded_labels]\n\nclass PadChromatogramsOnlyFor1DCNN(object):\n \"\"\"Pad whole chromatograms in 1DCNN batch to same size.\n Subsections shaped (D, N).\n Assumes second item in batch is a pair of bounding box boundaries.\n \"\"\"\n\n def __call__(self, batch):\n batch_size = len(batch)\n \n assert batch_size == 1, 'batch_size=1 support only currently'\n\n chromatograms = [item[0] for item in batch]\n bboxes = [item[1] for item in batch]\n lengths = [chromatogram.size()[1] for chromatogram in chromatograms]\n max_len = max(lengths)\n channel_dim = chromatograms[0].size()[0]\n out_dims = (batch_size, channel_dim, max_len)\n padded_chromatograms = torch.zeros(*out_dims)\n\n for i, chromatogram in enumerate(chromatograms):\n length = chromatogram.size(1)\n padded_chromatograms[i, 0:channel_dim, 0:length] = chromatogram\n\n bboxes = bboxes[0].tolist()\n\n return [padded_chromatograms, bboxes]\n","sub_path":"train/collate_fns.py","file_name":"collate_fns.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"337780960","text":"# stdlib\nfrom typing import List\n\n# third party\nfrom result import Err\nfrom result import Ok\nfrom result import Result\n\n# relative\nfrom ....telemetry import instrument\nfrom ...common.serde.serializable import serializable\nfrom ...common.uid import UID\nfrom .credentials import SyftVerifyKey\nfrom .document_store import BaseUIDStoreStash\nfrom .document_store import PartitionKey\nfrom .document_store import PartitionSettings\nfrom .document_store import QueryKeys\nfrom .messages import Message\nfrom .messages import MessageStatus\n\nFromUserVerifyKeyPartitionKey = PartitionKey(\n key=\"from_user_verify_key\", type_=SyftVerifyKey\n)\nToUserVerifyKeyPartitionKey = PartitionKey(\n key=\"to_user_verify_key\", type_=SyftVerifyKey\n)\nStatusPartitionKey = PartitionKey(key=\"status\", type_=MessageStatus)\n\n\n@instrument\n@serializable(recursive_serde=True)\nclass MessageStash(BaseUIDStoreStash):\n object_type = Message\n settings: PartitionSettings = PartitionSettings(\n name=Message.__canonical_name__,\n object_type=Message,\n )\n\n def get_all_inbox_for_verify_key(\n self, verify_key: SyftVerifyKey\n ) -> Result[List[Message], str]:\n qks = QueryKeys(\n qks=[\n ToUserVerifyKeyPartitionKey.with_obj(verify_key),\n ]\n )\n return self.get_all_for_verify_key(verify_key=verify_key, qks=qks)\n\n def get_all_sent_for_verify_key(\n self, verify_key: SyftVerifyKey\n ) -> Result[List[Message], str]:\n qks = QueryKeys(\n qks=[\n FromUserVerifyKeyPartitionKey.with_obj(verify_key),\n ]\n )\n return self.get_all_for_verify_key(verify_key=verify_key, qks=qks)\n\n def get_all_for_verify_key(\n self, verify_key: SyftVerifyKey, qks: QueryKeys\n ) -> Result[List[Message], str]:\n if isinstance(verify_key, str):\n verify_key = SyftVerifyKey.from_string(verify_key)\n return self.query_all(qks=qks)\n\n def get_all_by_verify_key_for_status(\n self, verify_key: SyftVerifyKey, status: MessageStatus\n ) -> Result[List[Message], str]:\n qks = QueryKeys(\n qks=[\n FromUserVerifyKeyPartitionKey.with_obj(verify_key),\n ToUserVerifyKeyPartitionKey.with_obj(verify_key),\n StatusPartitionKey.with_obj(status),\n ]\n )\n return self.query_all(qks=qks)\n\n def update_message_status(\n self, uid: UID, status: MessageStatus\n ) -> Result[Message, str]:\n result = self.get_by_uid(uid=uid)\n if result.is_err():\n return result.err()\n\n message = result.ok()\n if message is None:\n return Err(f\"No message exists for id: {uid}\")\n message.status = status\n return self.update(obj=message)\n\n def delete_all_for_verify_key(self, verify_key: SyftVerifyKey) -> Result[bool, str]:\n messages = self.get_all_inbox_for_verify_key(verify_key=verify_key)\n for message in messages:\n result = self.delete_by_uid(uid=message.id)\n if result.is_err():\n return result\n return Ok(True)\n","sub_path":"packages/syft/src/syft/core/node/new/message_stash.py","file_name":"message_stash.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"227407574","text":"\"\"\"\n917. 仅仅反转字母\n给定一个字符串 S,返回 “反转后的” 字符串,其中不是字母的字符都保留在原地,而所有字母的位置发生反转。\n\n \n\n示例 1:\n\n输入:\"ab-cd\"\n输出:\"dc-ba\"\n示例 2:\n\n输入:\"a-bC-dEf-ghIj\"\n输出:\"j-Ih-gfE-dCba\"\n示例 3:\n\n输入:\"Test1ng-Leet=code-Q!\"\n输出:\"Qedo1ct-eeLg=ntse-T!\"\n \n\n提示:\n\nS.length <= 100\n33 <= S[i].ASCIIcode <= 122 \nS 中不包含 \\ or \"\n\n\"\"\"\n\nclass Solution:\n def reverseOnlyLetters(self, S: str) -> str:\n n=len(S)\n ans=['']*n\n i=-1\n for j in range(n):\n if not S[j].isalpha():ans[j]=S[j]\n for k in range(n):\n if S[k].isalpha():\n while i>=-n:\n if ans[i]=='':\n ans[i]=S[k]\n break\n else:i-=1\n return ''.join(ans)","sub_path":"easy/917-reverseOnlyLetters.py","file_name":"917-reverseOnlyLetters.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"358802322","text":"from heap import MaxHeap\n\nclass PriorityQueue(object):\n def __init__(self, maxsize=None):\n self.maxsize = maxsize\n self._maxheap = MaxHeap(maxsize)\n\n def push(self, priority, value):\n entry = (priority, value)\n self._maxheap.add(entry)\n\n def pop(self, with_priority=False):\n entry = self._maxheap.extract()\n if with_priority:\n return entry\n else:\n return entry[1]\n\n def is_empty(self):\n return len(self._maxheap) == 0\n\ndef test_priority_queue():\n size = 5\n pq = PriorityQueue(size)\n pq.push(5, 'purple')\n pq.push(0, 'white')\n pq.push(3, 'orange')\n pq.push(1, 'black')\n\n res = []\n while not pq.is_empty():\n res.append(pq.pop())\n assert res == ['purple', 'orange', 'black', 'white']\n\ntest_priority_queue()","sub_path":"python数据结构/priority_queue.py","file_name":"priority_queue.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"119913283","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom api_v2.config.serializers import ConfigLogListSerializer, ConfigLogSerializer\nfrom api_v2.config.utils import get_config_schema\nfrom api_v2.views import CamelCaseGenericViewSet\nfrom cm.api import update_obj_config\nfrom cm.errors import AdcmEx\nfrom cm.models import ConfigLog\nfrom django.contrib.contenttypes.models import ContentType\nfrom guardian.mixins import PermissionListMixin\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.mixins import CreateModelMixin, ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_200_OK, HTTP_201_CREATED\n\nfrom adcm.mixins import GetParentObjectMixin\nfrom adcm.permissions import VIEW_CONFIG_PERM, check_config_perm\n\n\nclass ConfigLogViewSet(\n PermissionListMixin,\n ListModelMixin,\n CreateModelMixin,\n RetrieveModelMixin,\n GetParentObjectMixin,\n CamelCaseGenericViewSet,\n): # pylint: disable=too-many-ancestors\n queryset = ConfigLog.objects.select_related(\"obj_ref\").order_by(\"-pk\")\n serializer_class = ConfigLogSerializer\n permission_required = [VIEW_CONFIG_PERM]\n filter_backends = []\n\n def get_queryset(self, *args, **kwargs):\n parent_object = self.get_parent_object()\n if parent_object is None:\n raise NotFound\n\n if not parent_object.config:\n return self.queryset.none()\n\n return super().get_queryset(*args, **kwargs).filter(obj_ref=parent_object.config)\n\n def get_serializer_class(self):\n if self.action == \"list\":\n return ConfigLogListSerializer\n\n return self.serializer_class\n\n def create(self, request, *args, **kwargs):\n parent_object = self.get_parent_object()\n\n if parent_object is None:\n raise NotFound(\"Can't find config's parent object\")\n\n if parent_object.config is None:\n raise AdcmEx(code=\"CONFIG_NOT_FOUND\", msg=\"This object has no config\")\n\n check_config_perm(\n user=request.user,\n action_type=\"change\",\n model=ContentType.objects.get_for_model(model=parent_object).model,\n obj=parent_object,\n )\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n initial_data = serializer.initial_data\n config_log = update_obj_config(\n obj_conf=parent_object.config,\n config=initial_data[\"config\"],\n attr=initial_data[\"attr\"],\n description=initial_data.get(\"description\", \"\"),\n )\n\n return Response(data=self.get_serializer(config_log).data, status=HTTP_201_CREATED)\n\n @action(methods=[\"get\"], detail=True, url_path=\"schema\", url_name=\"schema\")\n def config_schema(self, request, *args, **kwargs) -> Response: # pylint: disable=unused-argument\n schema = get_config_schema(parent_object=self.get_parent_object())\n\n return Response(data=schema, status=HTTP_200_OK)\n","sub_path":"python/api_v2/config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"462581826","text":"from statistics import mean\nimport numpy as np\nfrom client.data import Logged_Data\nfrom routes import server\nimport struct\n\ndht11_data = []\nhall_effect_data = []\ntokens = []\nlogs = []\npackets = []\n\nauthenticated_tokens = {}\n\nclass WeightCalculator:\n def __init__(self):\n self.hx711_data = []\n self.x = []\n self.y = []\n self.m = 1\n self.b = 0\n\n def add_data(self, data):\n self.hx711_data.append(data)\n \n def get_raw_data(self):\n return self.hx711_data\n \n def calibrate_value(self, x):\n y = self.m*x + self.b\n return y\n\n def add_point(self, weight, samples=20):\n x = self.hx711_data[-samples:]\n total_samples = len(x)\n average_reading = sum((_x.adc_value for _x in x)) / total_samples\n self.x.append(average_reading)\n self.y.append(weight)\n self.calibrate()\n\n def calibrate(self):\n self.calculate_calibration()\n self.upload_calibration()\n \n def calculate_calibration(self):\n if len(self.x) == 0:\n return\n \n if len(self.x) == 1:\n weight = self.y[0]\n raw = self.x[0]\n if weight == 0:\n offset = -raw\n self.b = offset\n self.m = 1\n return\n \n m, b = best_fit_slope_and_intercept(np.array(self.x), np.array(self.y))\n self.m = m\n self.b = b\n \n def upload_calibration(self):\n packet = struct.pack(\" 0) & (epoch % self.epochs_to_cycle == 0):\n self.myImageDataGenerator.cycle_KFold_train_val_sets()\n\n\n\n\nclass K_folds_iterator(Iterator):\n\n def __init__(self, x, batch_size=32):\n self.x = x\n super(K_folds_iterator, self).__init__(x.shape[0], batch_size, shuffle=False, seed=None)\n\n\n def next(self):\n with self.lock:\n if (keras.__version__.startswith('2.1') | keras.__version__.startswith('2.2')):\n index_array = next(self.index_generator)\n elif (keras.__version__ == ('2.0.8')):\n index_array, current_index, current_batch_size = next(self.index_generator)\n batch_x = self.x[index_array]\n return batch_x\n","sub_path":"KFoldCycleCallback.py","file_name":"KFoldCycleCallback.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"187326844","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ella_attachments', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='attachment',\n name='publishables',\n field=models.ManyToManyField(to='core.Publishable', verbose_name='Publishables', blank=True),\n ),\n ]\n","sub_path":"ella_attachments/migrations/0002_auto_20150506_2200.py","file_name":"0002_auto_20150506_2200.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"568478116","text":"\"\"\"\nThis is a rewritten version of the backup dashboards script at https://github.com/nathangiusti/Sisense/tree/master/BackupDashboards that uses the PySense Library. \n\"\"\"\n\nfrom PIL import Image\nimport yaml\nimport sys\nimport PySense\n\nRETRY = 3\nERROR_DASHES = []\n\n\ndef build_path(folder, dashboard_id, file_format, file_num=None):\n \"\"\"\n Builds the path to save the export to\n\n :param folder: Folder to save dashboard to\n :param dashboard_id: Id of dashboard\n :param file_format: Format to save dashboard to\n :param file_num: A number to place after the file name in case multiple images from the same dash are taken\n :return: A string in the format /dashboard_id.file_format\n \"\"\"\n if file_num:\n return \"{}\\\\{}-{}.{}\".format(folder, dashboard_id, file_num, file_format)\n else:\n return \"{}\\\\{}.{}\".format(folder, dashboard_id, file_format)\n\n\ndef build_query_string(query_parameters):\n \"\"\"\n Turns a dictionary of query parameters into a query string\n :param query_parameters: a dictionary of query parameters\n :return: The query_parameters dictionary flattened into a string\n \"\"\"\n\n query_string = ''\n if not query_parameters:\n return query_string\n for param in query_parameters:\n if isinstance(query_parameters[param], bool):\n val = str(query_parameters[param]).lower()\n else:\n val = query_parameters[param]\n query_string += '{}={}&'.format(param, val)\n if not query_string:\n return ''\n else:\n return query_string[:-1]\n\n\ndef create_cropping_list(cropping_string):\n coord_list = []\n for coord_str in cropping_string:\n if len(coord_str.split(',')) != 5:\n print(\"Invalid coordinate string {}. Requires 5 integers.\".format(cropping_string))\n continue\n for coord in coord_str.split(','):\n coord_list.append(int(coord))\n return coord_list\n\n\ndef export_png(format_vars, dashboard, file_folder, cropping):\n \"\"\"\n Exports dashboard to png\n\n :param format_vars: Dictionary from YAML containing format variables\n :param dashboard: The dashboard id\n :param file_folder: Folder to export dashboard to\n :param cropping: The cropping yaml section\n :return: Nothing\n \"\"\"\n if cropping and dashboard in cropping:\n i = 0\n for coord_str in cropping[dashboard]:\n i += 1\n coord_arr = coord_str.split(',')\n if len(coord_arr) == 5:\n file_path = build_path(file_folder, dashboard, format_vars['file_type'], i)\n png_file = PySense.get_dashboard_export_png(dashboard, file_path, format_vars['query_params'])\n image_obj = Image.open(png_file)\n x1 = int(coord_arr[0])\n y1 = int(coord_arr[1])\n x2 = int(coord_arr[2])\n y2 = int(coord_arr[3])\n width = int(coord_arr[4])\n cropped_image = image_obj.crop((x1, y1, x2, y2))\n scaling_ratio = width / (x2 - x1)\n y_coord = scaling_ratio * (y2 - y1)\n resized_image = cropped_image.resize((width, int(y_coord)))\n file_path = build_path(file_folder, dashboard, format_vars['file_type'], i)\n print('Cropped image to {} by {}'.format(width, int(y_coord)))\n resized_image.save(file_path)\n print(\"Image saved to {}\".format(file_path))\n elif len(coord_arr) == 3:\n width = int(coord_arr[1])\n height = int(coord_arr[2])\n widget_format = {'file_type': 'png', 'query_params': {'width': width, 'height': height}}\n PySense.post_dashboard_widget_export_png(dashboard,\n coord_arr[0],\n build_path(file_folder, dashboard, format_vars['file_type']),\n widget_format['query_params'])\n else:\n return PySense.get_dashboard_export_png(dashboard,\n build_path(file_folder, dashboard, format_vars['file_type']),\n param_dict=format_vars['query_params'])\n\n\ndef main():\n if sys.argv[1] is None:\n print(\"No config file supplied\")\n exit()\n\n config = sys.argv[1]\n\n with open(config, 'r') as stream:\n data_loaded = yaml.safe_load(stream)\n\n host = data_loaded['host']\n PySense.authenticate(host, data_loaded['authentication']['username'], data_loaded['authentication']['password'])\n global_vars = data_loaded['globals']\n format_vars = global_vars['format']\n file_folder = global_vars['folder']\n cropping = data_loaded['cropping'] if 'cropping' in data_loaded else None\n\n dashboard_id_list = []\n if 'query_params' in data_loaded['dashboards']:\n dashboard_id_list = PySense.get_dashboards(param_dict=data_loaded['dashboards']['query_params'])\n\n if 'ids' in data_loaded['dashboards']:\n for dashboard in data_loaded['dashboards']['ids']:\n if dashboard not in dashboard_id_list:\n dashboard_id_list.append(dashboard)\n\n print('Backing up {} dashboards'.format(len(dashboard_id_list)))\n for dashboard in dashboard_id_list:\n if format_vars['file_type'] == 'png':\n export_png(format_vars, dashboard, file_folder, cropping)\n elif format_vars['file_type'] == 'pdf':\n query_params = format_vars['query_params']\n PySense.get_dashboard_export_pdf(dashboard, file_folder + dashboard + '.pdf', query_params['paperFormat'],\n query_params['paperOrientation'], query_params['layout'])\n elif format_vars['file_type'] == 'dash':\n PySense.get_dashboard_export_dash(dashboard, file_folder + dashboard + '.dash')\n\n print('Backups complete')\n if len(ERROR_DASHES) > 0:\n print(\"Following calls failed:\")\n for dash in ERROR_DASHES:\n print(dash)\n\n\nmain()\n","sub_path":"PySense/Examples/backup_dashboards.py","file_name":"backup_dashboards.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"426639543","text":"\n\nfrom tkinter import *\nimport tkinter.messagebox as msgbox\n\n\nclass Win(Frame):\n def __init__(self, tl, master=None):\n super().__init__(master)\n self.pack()\n self.master.title(tl)\n self.createWidget()\n\n def createWidget(self):\n self.label = Label(self, text='世界和平')\n self.label.pack()\n self.nameInput = Entry(self)\n self.nameInput.pack()\n self.button_al = Button(self, text='Hello', command=self.hello)\n self.button_al.pack()\n self.button_q = Button(self, text='Quit', command=self.quit)\n self.button_q.pack()\n\n def hello(self):\n name = self.nameInput.get() or 'World'\n msgbox.showinfo('message', 'Hello %s' % name)\n\n\napp = Win('World')\napp.mainloop()\n","sub_path":"my_exec/mytk/mytk.py","file_name":"mytk.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"123808957","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on 2019-12-21\n\n@author: mingo\n@module: experiment_mama.elbow_method\n'''\n\n# clustering dataset\n# determine k using elbow method\n\nimport matplotlib\nmatplotlib.use('AGG') \nfrom sklearn.cluster import KMeans\nfrom scipy.spatial.distance import cdist\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport csv\nimport time\nimport joblib\n \n# x1 = np.array([3, 1, 1, 2, 1, 6, 6, 6, 5, 6, 7, 8, 9, 8, 9, 9, 8, 8, 9, 9, 8, 9, 8, 7, 6, 5, 6, 6, 6, 1, 2, 1, 1, 3])\n# x2 = np.array([5, 4, 5, 6, 5, 8, 6, 7, 6, 7, 1, 2, 1, 2, 3, 2, 3, 5, 4, 5, 6, 5, 8, 6, 7, 6, 7, 1, 2, 1, 2, 3, 2, 3])\n# \n# # plt.plot()\n# # plt.xlim([0, 10])\n# # plt.ylim([0, 10])\n# # plt.title('Dataset')\n# # plt.scatter(x1, x2)\n# # plt.show()\n# # \n# # # create new plot and data\n# # plt.plot()\n# X = np.array(list(zip(x1, x2))).reshape(len(x1), 2)\n# print(X)\n# colors = ['b', 'g', 'r']\n# markers = ['o', 'v', 's']\n# \n# # k means determine k\n# distortions = []\n# K = range(1, 10)\n# for k in K:\n# kmeanModel = KMeans(n_clusters=k).fit(X)\n# kmeanModel.fit(X)\n# # print(kmeanModel.inertia_/X.shape[0])\n# distortions.append(sum(np.min(cdist(X, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])\n# # print('%d: %s' % (k, str(kmeanModel.cluster_centers_)))\n# \n# # Plot the elbow\n# plt.plot(K, distortions, 'bx-')\n# plt.xlabel('k')\n# plt.ylabel('Distortion')\n# plt.title('The Elbow Method showing the optimal k')\n# plt.show()\n\ndef elbow_method_TransE_embedding():\n pkl_path = 'entity_embedding_TransE.pkl'\n with open(pkl_path, 'rb') as f:\n entity_embedding = pickle.load(f)\n print(type(entity_embedding))\n print(entity_embedding.shape)\n \n embeddings_id_entity_mapping = {}\n embeddings = []\n with open('entity_method.txt', 'r') as f:\n reader = csv.reader(f) \n idx = 0\n for row in reader:\n entity = row[0]\n entity_id = int(row[1])\n embeddings.append(entity_embedding[entity_id, ])\n embeddings_id_entity_mapping[idx] = entity\n idx += 1\n embeddings = np.array(embeddings)\n# y_pred = KMeans(n_clusters=1000, random_state=0).fit_predict(embeddings)\n distortions = []\n K = range(100,2001, 100)\n for k in K:\n start = time.time()\n kmeanModel = KMeans(n_clusters=k)\n kmeanModel.fit(embeddings)\n inertia = kmeanModel.inertia_\n distortions.append(inertia)\n print('fit k=%d spend %fs inertia: %f' % (k, time.time() - start, inertia))\n with open('distortions.txt', 'w') as f:\n f.write('\\n'.join([str(_) for _ in distortions]))\n f.write('\\n') \n plt.figure(figsize=(16,8))\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title('The Elbow Method showing the optimal k')\n plt.savefig('elbow_method_distortion.png')\n print('finish')\n\ndef elbow_method_evedroid_embedding():\n pkl_path = 'evedroid_api2vec_new.pkl'\n with open(pkl_path, 'rb') as f:\n evedroid_embedding = joblib.load(f)\n print(type(evedroid_embedding))\n print(len(evedroid_embedding))\n embeddings = []\n idx = 0\n for api in evedroid_embedding:\n# if idx < 5:\n# print(api)\n# print(evedroid_embedding[api])\n embeddings.append(evedroid_embedding[api])\n idx += 1\n embeddings = np.array(embeddings)\n# y_pred = KMeans(n_clusters=1000, random_state=0).fit_predict(embeddings)\n distortions = []\n K = range(100,2001, 100)\n for k in K:\n start = time.time()\n kmeanModel = KMeans(n_clusters=k)\n kmeanModel.fit(embeddings)\n inertia = kmeanModel.inertia_\n distortions.append(inertia)\n print('fit k=%d spend %fs inertia: %f' % (k, time.time() - start, inertia))\n with open('evedroid_distortions.txt', 'w') as f:\n f.write('\\n'.join([str(_) for _ in distortions]))\n f.write('\\n') \n plt.figure(figsize=(16,8))\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title('The Elbow Method showing the optimal k')\n plt.savefig('elbow_method_evedorid_distortion.png')\n print('finish')\n\nif __name__ == \"__main__\":\n# elbow_method_TransE_embedding()\n elbow_method_evedroid_embedding()","sub_path":"elbow_method.py","file_name":"elbow_method.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"68203994","text":"from enum import Enum, unique\n\n\"\"\"\nEnumerated type describing non-volatile parameters\n\"\"\"\n\n\n@unique\nclass Parameter(Enum):\n SheetLength = 0\n LapSize = 1\n HeatSetPoint = 2\n LaminatorSpeed = 3\n SuctionRelease = 4\n AirBlastDelay = 5\n VacuumDelay = 6\n HoldPosition = 7\n GoToHoldSpeed = 8\n InFeedDiameter = 9\n OutFeedDiameter = 10\n KnifeRatio = 11\n CutterDistance = 12\n ReturnHomeSpeed = 13\n GapLoadLength = 14\n\n\nparam_data = {\n Parameter.SheetLength: {\"name\": \"Sheet Length\", \"unit\": \"in\", \"vals\": (8.0, 48.0, 0.1, 16.0)},\n Parameter.LapSize: {\"name\": \"Lap Size\", \"unit\": \"in\", \"vals\": (0.0, 2.0, 0.1, 0.5)},\n Parameter.HeatSetPoint: {\"name\": \"Temperature Set\", \"unit\": \"\\u00b0F\", \"vals\": (150.0, 350.0, 1.0, 275.0)},\n Parameter.LaminatorSpeed: {\"name\": \"Laminator Speed\", \"unit\": \"ft/s\", \"vals\": (0.0, 60.0, 1.0, 20.0)},\n Parameter.SuctionRelease: {\"name\": \"Suction Release\", \"unit\": \"ms\", \"vals\": (200.0, 1000.0, 1.0, 600.0)},\n Parameter.AirBlastDelay: {\"name\": \"Air Blast Delay\", \"unit\": \"ms\", \"vals\": (100.0, 500.0, 1.0, 300.0)},\n Parameter.VacuumDelay: {\"name\": \"Vacuum Delay\", \"unit\": \"ms\", \"vals\": (100.0, 500.0, 1.0, 400.0)},\n Parameter.HoldPosition: {\"name\": \"Hold Position\", \"unit\": \"\", \"vals\": (2000.0, 3500.0, 1.0, 2743.0)},\n Parameter.GoToHoldSpeed: {\"name\": \"Go To Hold Speed\", \"unit\": \"\", \"vals\": (100.0, 9999.0, 1.0, 5000.0)},\n Parameter.InFeedDiameter: {\"name\": \"In Feed Diameter\", \"unit\": \"in\", \"vals\": (1.9, 2.1, 0.001, 2.0)},\n Parameter.OutFeedDiameter: {\"name\": \"Out Feed Diameter\", \"unit\": \"in\", \"vals\": (1.4, 1.6, 0.001, 1.5)},\n Parameter.KnifeRatio: {\"name\": \"Knife Speed Ratio\", \"unit\": \"\", \"vals\": (3.0, 7.0, 0.001, 5.0)},\n Parameter.CutterDistance: {\"name\": \"Cutter Distance\", \"unit\": \"\", \"vals\": (25.0, 31.0, 0.01, 28.0)},\n Parameter.ReturnHomeSpeed: {\"name\": \"Homing Speed\", \"unit\": \"\", \"vals\": (1000.0, 9999.0, 1.0, 5000.0)},\n Parameter.GapLoadLength: {\"name\": \"Gap Load Length\", \"unit\": \"in\", \"vals\": (6.0, 9.0, 0.01, 7.5)}\n}\n","sub_path":"app/enum/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"403532792","text":"import logging\nimport math\n\nfrom .adaptive_core import AdaptiveCore\nfrom ..utils import log_errors, parse_timedelta\nfrom ..protocol import pickle\n\nlogger = logging.getLogger(__name__)\n\n\nclass Adaptive(AdaptiveCore):\n '''\n Adaptively allocate workers based on scheduler load. A superclass.\n\n Contains logic to dynamically resize a Dask cluster based on current use.\n This class needs to be paired with a system that can create and destroy\n Dask workers using a cluster resource manager. Typically it is built into\n already existing solutions, rather than used directly by users.\n It is most commonly used from the ``.adapt(...)`` method of various Dask\n cluster classes.\n\n Parameters\n ----------\n cluster: object\n Must have scale and scale_down methods/coroutines\n interval : timedelta or str, default \"1000 ms\"\n Milliseconds between checks\n wait_count: int, default 3\n Number of consecutive times that a worker should be suggested for\n removal before we remove it.\n target_duration: timedelta or str, default \"5s\"\n Amount of time we want a computation to take.\n This affects how aggressively we scale up.\n worker_key: Callable[WorkerState]\n Function to group workers together when scaling down\n See Scheduler.workers_to_close for more information\n minimum: int\n Minimum number of workers to keep around\n maximum: int\n Maximum number of workers to keep around\n **kwargs:\n Extra parameters to pass to Scheduler.workers_to_close\n\n Examples\n --------\n\n This is commonly used from existing Dask classes, like KubeCluster\n\n >>> from dask_kubernetes import KubeCluster\n >>> cluster = KubeCluster()\n >>> cluster.adapt(minimum=10, maximum=100)\n\n Alternatively you can use it from your own Cluster class by subclassing\n from Dask's Cluster superclass\n\n >>> from distributed.deploy import Cluster\n >>> class MyCluster(Cluster):\n ... def scale_up(self, n):\n ... \"\"\" Bring worker count up to n \"\"\"\n ... def scale_down(self, workers):\n ... \"\"\" Remove worker addresses from cluster \"\"\"\n\n >>> cluster = MyCluster()\n >>> cluster.adapt(minimum=10, maximum=100)\n\n Notes\n -----\n Subclasses can override :meth:`Adaptive.should_scale_up` and\n :meth:`Adaptive.workers_to_close` to control when the cluster should be\n resized. The default implementation checks if there are too many tasks\n per worker or too little memory available (see :meth:`Adaptive.needs_cpu`\n and :meth:`Adaptive.needs_memory`).\n '''\n\n def __init__(\n self,\n cluster=None,\n interval=\"1s\",\n minimum=0,\n maximum=math.inf,\n wait_count=3,\n target_duration=\"5s\",\n worker_key=None,\n **kwargs\n ):\n self.cluster = cluster\n self.worker_key = worker_key\n self._workers_to_close_kwargs = kwargs\n self.target_duration = parse_timedelta(target_duration)\n\n super().__init__(\n minimum=minimum, maximum=maximum, wait_count=wait_count, interval=interval\n )\n\n @property\n def scheduler(self):\n return self.cluster.scheduler_comm\n\n @property\n def plan(self):\n try:\n return set(self.cluster.worker_spec)\n except AttributeError:\n return set(self.cluster.workers)\n\n @property\n def requested(self):\n return set(self.cluster.workers)\n\n @property\n def observed(self):\n return {d[\"name\"] for d in self.cluster.scheduler_info[\"workers\"].values()}\n\n async def target(self):\n return await self.scheduler.adaptive_target(\n target_duration=self.target_duration\n )\n\n async def workers_to_close(self, target: int):\n \"\"\"\n Determine which, if any, workers should potentially be removed from\n the cluster.\n\n Notes\n -----\n ``Adaptive.workers_to_close`` dispatches to Scheduler.workers_to_close(),\n but may be overridden in subclasses.\n\n Returns\n -------\n List of worker addresses to close, if any\n\n See Also\n --------\n Scheduler.workers_to_close\n \"\"\"\n return await self.scheduler.workers_to_close(\n target=target,\n key=pickle.dumps(self.worker_key) if self.worker_key else None,\n attribute=\"name\",\n **self._workers_to_close_kwargs\n )\n\n async def scale_down(self, workers):\n if not workers:\n return\n with log_errors():\n # Ask scheduler to cleanly retire workers\n await self.scheduler.retire_workers(\n names=workers, remove=True, close_workers=True\n )\n\n # close workers more forcefully\n logger.info(\"Retiring workers %s\", workers)\n f = self.cluster.scale_down(workers)\n if hasattr(f, \"__await__\"):\n await f\n\n async def scale_up(self, n):\n self.cluster.scale(n)\n\n @property\n def loop(self):\n return self.cluster.loop\n","sub_path":"distributed/deploy/adaptive.py","file_name":"adaptive.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"159818372","text":"#!/usr/bin/env python\nimport numpy as np\nimport cv2\n\nimg = cv2.imread('messi5.jpg')\n\nprint(img.shape)\nprint(img.size)\nprint(img.dtype)\nb,g,r = cv2.split(img)\nt = cv2.merge((b,g,r))\n\n\n\ncv2.imshow('image', t)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"several_methods.py","file_name":"several_methods.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"156958514","text":"import urllib\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\n\r\n# Replace the URL with your own IPwebcam shot.jpg IP:port\r\nurl='http://192.168.2.35:8080/shot.jpg'\r\n\r\nwhile True:\r\n\r\n # Use urllib to get the image and convert into a cv2 usable format\r\n imgResp=urllib.urlopen(url)\r\n imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)\r\n ref_img=cv2.imdecode(imgNp,-1)\r\n\r\n # put the image on screen\r\n cv2.imshow('IPWebcam',img)\r\n\r\n #To give the processor some less stress\r\n #time.sleep(0.1) \r\n\r\n\r\n\r\ndef match_percent(path_ref, img,i):\r\n ref_img = cv2.imread(path_ref, 0)\r\n ref_img = cv2.resize(ref_img, (400,300))\r\n #cv2.imshow('resize_IPWebcam1',ref_img)\r\n ref_edges = cv2.Canny(ref_img, 100, 200)\r\n\r\n #img = cv2.imread(path_img, 0)\r\n img = cv2.resize(img, (400, 300))\r\n #cv2.imshow('resize_IPWebcam2',img)\r\n edges = cv2.Canny(img, 100, 200)\r\n\r\n #cv2.imwrite(\"IP1.jpg\",img) #To CAlibrate\r\n #cv2.imwrite(\"IP2.jpg\",ref_img) #To Calibrate\r\n \r\n height, width = ref_edges.shape\r\n print(\"ctr\",i,\"height\",height,\"width\",width)\r\n whites = 0\r\n matches = 0\r\n\r\n for i in range(0, height):\r\n for j in range(0, width):\r\n if ref_edges[i, j] == 255:\r\n whites = whites + 1\r\n if (ref_edges[i, j] != 255) and edges[i, j] == 255:\r\n matches = matches + 1\r\n \r\n print(\"match\",matches)\r\n print(\"whites\",whites)\r\n a= (float(matches)/(float(matches)+float(whites)+50.0))\r\n print(\"a\",a)\r\n a*=100.0\r\n match_percent = a\r\n print(match_percent)\r\n \r\n return match_percent \r\n\r\n\r\n\r\n\r\nimport time\r\ndef operate_led():\r\n \r\n GPIO.setwarnings(False)\r\n GPIO.setmode(GPIO.BOARD)\r\n \r\n def setupgpio(LED_RED, LED_AMBER, LED_GREEN):\r\n GPIO.setup(LED_RED, GPIO.OUT)\r\n GPIO.setup(LED_AMBER, GPIO.OUT)\r\n GPIO.setup(LED_GREEN, GPIO.OUT)\r\n \r\n def red (LED_RED, LED_AMBER, LED_GREEN):\r\n GPIO.output (LED_RED, LEDON)\r\n GPIO.output (LED_AMBER, LEDOFF)\r\n GPIO.output (LED_GREEN, LEDOFF)\r\n\r\n def amber (LED_RED, LED_AMBER, LED_GREEN):\r\n GPIO.output (LED_RED, LEDOFF)\r\n GPIO.output (LED_AMBER, LEDON)\r\n GPIO.output (LED_GREEN, LEDOFF)\r\n\r\n def green (LED_RED, LED_AMBER, LED_GREEN):\r\n GPIO.output (LED_RED, LEDOFF)\r\n GPIO.output (LED_AMBER, LEDOFF)\r\n GPIO.output (LED_GREEN, LEDON)\r\n \r\n def alloff (LED_RED, LED_AMBER, LED_GREEN, val):\r\n GPIO.output (LED_RED, LEDOFF)\r\n GPIO.output (LED_AMBER, LEDOFF)\r\n GPIO.output (LED_GREEN, LEDOFF)\r\n time.sleep(val)\r\n \r\n def basicSequence (val):\r\n red ()\r\n time.sleep(val)\r\n if turns[t_no] != 1:\r\n print(t_no)\r\n amber()\r\n time.sleep(1)\r\n green()\r\n time.sleep(5)\r\n turns[t_no] = 1\r\n lock.release()\r\n\r\n RED = [7, 15, 26, 40]\r\n AMBER = [5, 13, 24, 38]\r\n GREEN = [3, 11, 22, 36]\r\n \r\n LEDOFF = 0\r\n LEDON = 1\r\n\r\n setupgpio(RED[0],AMBER[0],GREEN[0])\r\n setupgpio(RED[1],AMBER[1],GREEN[1])\r\n setupgpio(RED[2],AMBER[2],GREEN[2])\r\n setupgpio(RED[3],AMBER[3],GREEN[3])\r\n for i in range(0,4):\r\n for j in range(0,4):\r\n for k in range(0,4):\r\n alloff(RED[i],AMBER[j],GREEN[k],0)\r\n red(RED[i],AMBER[j],GREEN[k])\r\n \r\n val = [0, 0, 0, 0]\r\n cam = cv2.VideoCapture(0)\r\n ret_val, img = cam.read()\r\n while True:\r\n for i in range(0,4):\r\n if i < 2:\r\n val[i] = match_percent('reference1.jpg', 'sample'+str(i+5)+'.jpg')\r\n else:\r\n val[i] = match_percent('reference2.jpg', 'sample'+str(i+5)+'.jpg')\r\n\r\n values = val[:]\r\n for j in range(0,4):\r\n cur = val.index(min(val))\r\n amber(RED[cur], AMBER[cur], GREEN[cur])\r\n time.sleep(1)\r\n green(RED[cur], AMBER[cur], GREEN[cur])\r\n time.sleep(4-j)\r\n val[cur] = 100\r\n amber(RED[cur], AMBER[cur], GREEN[cur])\r\n red(RED[cur], AMBER[cur], GREEN[cur])\r\n val = [0, 0, 0, 0]\r\n transmit_data(values)\r\n\r\ndef get_images():\r\n ''' '''\r\n\r\nimport http.client as httplib\r\nimport urllib\r\ndef transmit_data(values):\r\n #setup_modem()\r\n key = 'C2JR3L7ILWRZLHQL'\r\n params = urllib.parse.urlencode({'field1': values[0], 'field2': values[1],'field3': values[2],'field4': values[3],'field5': 12.93496,'field6': 79.14688,'key':key }) \r\n headers = {\"Content-typZZe\": \"application/x-www-form-urlencoded\",\"Accept\": \"text/plain\"}\r\n conn = httplib.HTTPConnection(\"api.thingspeak.com:80\")\r\n try:\r\n conn.request(\"POST\", \"/update\", params, headers)\r\n response = conn.getresponse()\r\n print(response.status, response.reason)\r\n data = response.read()\r\n conn.close()\r\n except:\r\n print(\"Connection failed\")\r\n\r\ndef receive_data():\r\n ''' '''\r\n\r\nimport serial\r\nimport os, time\r\ndef setup_modem():\r\n port = serial.Serial(\"/dev/ttyAMA0\", baudrate=9600, timeout=1) \r\n port.flush()\r\n port.write(b'AT'+b'\\r')\r\n rcv = port.readline()\r\n return rcv\r\n\r\n#setup_modem()\r\nimport RPi.GPIO as GPIO\r\nRED = [7, 15, 40, 26]\r\nAMBER = [5, 13, 38, 24]\r\nGREEN = [3, 11, 36, 22]\r\nturns = [0, 0, 0, 0]\r\ntry:\r\n operate_led()\r\nexcept KeyboardInterrupt:\r\n GPIO.cleanup()\r\n","sub_path":"minor_project.py","file_name":"minor_project.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"55445334","text":"\n\ndef is_special(num):\n oMap = {}\n ox = num\n num = str(num)\n for i in range(10):\n oMap[str(i)] = 0\n for x in num:\n oMap[x] += 1\n for i in range(1, 7):\n x = str(i * ox)\n cMap = {}\n for j in range(10):\n cMap[str(j)] = 0\n for c in x:\n cMap[c] += 1\n if cMap[c] > oMap[c]:\n return False\n for j in range(10):\n if cMap[str(j)] != oMap[str(j)]:\n return False\n return True\n\n\nbegin = 100000\nend = 1666666\nfound = False\nwhile not found:\n c = begin + 9 - (begin % 9)\n while c < end:\n if is_special(c):\n print(c)\n found = True\n break\n c += 9\n begin *= 10\n end *= 10\n end += 6","sub_path":"Solution-052.py","file_name":"Solution-052.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"318213868","text":"'''\nCalculate statistics of the radius of the ionisation front\nSam Geen, November 2014\n'''\n\nimport customplot\nimport pymses, Hamu\nimport numpy as np\nimport matzner2002, errtimeplot\nfrom scipy.spatial import ConvexHull\nimport collections\nimport matplotlib.pyplot as plt\nfrom pymses.utils import constants as C\nfrom pymses.filters import CellsToPoints\nimport profilesphere,rayprof\n\nsims = collections.OrderedDict()\n\ndef Samples(amr,npts=1e6):\n centre=[0.5,0.5,0.5]\n radius = 0.5\n sph = pymses.utils.regions.Sphere(centre, radius)\n return pymses.analysis.sample_points(amr, sph.random_points(npts))\n\ndef GetRanges(snap):\n amr = snap.amr_source([\"rho\",\"xHII\"])\n cell_source = CellsToPoints(amr)\n cells = Samples(amr)\n rhos = cells[\"rho\"]\n ions = cells[\"xHII\"]\n posns = cells.points-0.5\n thresh = 0.2 # The code considers everything above 0.2 to be ionised\n try:\n pions = posns[ions > thresh,:]\n except:\n return np.zeros(5)\n # Get the points on a convex hull around the ionised gas\n if len(pions) == 0:\n return np.zeros(5)\n try:\n hull = ConvexHull(pions)\n except:\n return np.zeros(5)\n radii = np.sqrt(np.sum(hull.points[hull.vertices,:]**2,1))\n # Find ranges\n return np.percentile(radii, [1,25,50,75,100])\n\nGetRangesHamu = Hamu.Algorithm(GetRanges)\n\ndef MedianRadiusCoarse(snap):\n # Uses the function above to get a coarse sampling of the median radius\n # median is the 50th percentile above\n return GetRangesHamu(snap.hamusnap)[2] \n\ndef MedianRadiusProfile(snap):\n # Get the mean radius from the profiles\n # Assuming all gas is either x=0 or x=1, find point where x = 0.5\n # This gives us the mean radius of the ionisation front by angular position\n # rcut included for when the centre needs to be sampled better\n # HACK\n rcut = None\n if \"_C/\" in snap.hamusnap.CachePath(): \n rcut = 0.05\n if rcut is None:\n r,x = profilesphere.profileHamu(snap.hamusnap,\"xHII\",1e6)\n else:\n r,x = profilesphere.profileHamu(snap.hamusnap,\"xHII\",1e6,rcut=rcut)\n xlim = 0.5\n if x.max() < xlim:\n # No ions? return zero\n return 0.0\n #if x.max() < xlim:\n # # Allow low levels of ionisation to count as a radius in a pinch\n # xlim = x.max()\n return r[x >= xlim].max()\n \ndef plot():\n global sims\n Myr = 3.15569e13\n plt.clf()\n for simname,col in sims.iteritems():\n sim = Hamu.Simulation(simname)\n times = sim.Times()\n utime = sim.Snapshots()[0].RawData().info[\"unit_time\"].express(C.Myr)\n #boxlen = sim.Snapshots()[0].RawData().info[\"boxlen\"]\n boxlen = 1.0\n times = times * utime - 1.25\n ntimes = len(times)\n rmin = np.zeros((ntimes))\n r25p = np.zeros((ntimes))\n rmed = np.zeros((ntimes))\n r75p = np.zeros((ntimes))\n rmax = np.zeros((ntimes))\n i = 0\n # Fill data\n for snap in sim.Snapshots():\n #rmin[i],r25p[i],rmed[i],r75p[i],rmax[i] = GetRangesHamu(snap)\n rmin[i],r25p[i],rmed[i],r75p[i],rmax[i] = \\\n errtimeplot.radialstatsHamu(snap)\n i += 1\n # Make lines\n taba = np.concatenate((times,times[::-1])) # there and back agai \n minmax = np.concatenate((rmin,rmax[::-1]))\n iqr = np.concatenate((r25p,r75p[::-1]))\n plt.fill(taba,minmax*boxlen,alpha=0.33,edgecolor='none',facecolor=col)\n plt.fill(taba,iqr*boxlen, alpha=0.33,edgecolor='none',facecolor=col)\n plt.plot(times,rmed*boxlen,col,label=simname)\n # Compare with matzner\n tm2002,rm2002 = matzner2002.Findrii(sim)\n plt.plot(tm2002,rm2002,col+\"--\")\n plt.xlabel(\"Time / Myr\")\n plt.ylabel(\"Ionisation Front Radius / pc\")\n plt.legend(loc=\"upper left\",fontsize=\"x-small\")\n plt.savefig(\"../plots/radialstats.pdf\")\n\nif __name__==\"__main__\":\n sims[\"N00_M4_B02\"] = \"k\"\n sims[\"N47_M4_B02\"] = \"b\"\n sims[\"N48_M4_B02\"] = \"r\"\n sims[\"N49_M4_B02\"] = \"g\"\n plot()\n","sub_path":"MCRT/scripts/radii.py","file_name":"radii.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"406050654","text":"# header\nMOD = 1000000007\n\n# function\ndef solve(N, A):\n ret = 0\n A_sum = [0] * (N+1)\n\n for i, a in enumerate(A):\n A_sum[i+1] = (A_sum[i] + a)%MOD\n\n for i in range(N-1):\n ret += (A[i] * (A_sum[N] - A_sum[i+1]))%MOD\n ret %= MOD\n\n return ret\n\n\n# main\nif __name__ == '__main__':\n N = int(input())\n A = list(map(int, input().split()))\n print(solve(N, A))","sub_path":"venv/ABC177/abc177_c.py","file_name":"abc177_c.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"40529566","text":"from keras.models import Model, load_model\r\nfrom scipy import misc, ndimage\r\nfrom keras import backend as K\r\nfrom keras.layers import *\r\nfrom keras.optimizers import Adam\r\nimport os\r\nfrom keras.losses import binary_crossentropy\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage import transform\r\nimport cv2\r\n\r\n# Constants\r\nHEIGHT = 1024\r\nWIDTH = 512\r\nCHANNELS = 1\r\n\r\ndef down(filters, input_):\r\n down_ = Conv2D(filters, (3, 3), padding='same')(input_)\r\n down_ = BatchNormalization(epsilon=1e-4)(down_)\r\n down_ = Activation('relu')(down_)\r\n down_ = Conv2D(filters, (3, 3), padding='same')(down_)\r\n down_ = BatchNormalization(epsilon=1e-4)(down_)\r\n down_res = Activation('relu')(down_)\r\n down_pool = MaxPooling2D((2, 2), strides=(2, 2))(down_)\r\n return down_pool, down_res\r\n\r\n\r\ndef up(filters, input_, down_):\r\n up_ = UpSampling2D((2, 2))(input_)\r\n up_ = concatenate([down_, up_], axis=3)\r\n up_ = Conv2D(filters, (3, 3), padding='same')(up_)\r\n up_ = BatchNormalization(epsilon=1e-4)(up_)\r\n up_ = Activation('relu')(up_)\r\n up_ = Conv2D(filters, (3, 3), padding='same')(up_)\r\n up_ = BatchNormalization(epsilon=1e-4)(up_)\r\n # up_ = Activation('relu')(up_)\r\n # up_ = Conv2D(filters, (3, 3), padding='same')(up_)\r\n # up_ = BatchNormalization(epsilon=1e-4)(up_)\r\n up_ = Activation('relu')(up_)\r\n return up_\r\n\r\n\r\ndef get_unet_1024(input_shape=(WIDTH, HEIGHT, CHANNELS), num_classes=1):\r\n# def get_unet_1024(input_shape=(HEIGHT, WIDTH, CHANNELS), num_classes=1):\r\n inputs = Input(shape=input_shape)\r\n\r\n # down0b, down0b_res = down(8, inputs)\r\n down0a, down0a_res = down(16, inputs)\r\n down0, down0_res = down(32, down0a)\r\n down1, down1_res = down(64, down0)\r\n down2, down2_res = down(128, down1)\r\n down3, down3_res = down(256, down2)\r\n down4, down4_res = down(512, down3)\r\n\r\n center = Conv2D(512, (3, 3), padding='same')(down4)\r\n center = BatchNormalization(epsilon=1e-4)(center)\r\n center = Activation('relu')(center)\r\n center = Conv2D(512, (3, 3), padding='same')(center)\r\n center = BatchNormalization(epsilon=1e-4)(center)\r\n center = Activation('relu')(center)\r\n\r\n up4 = up(512, center, down4_res)\r\n up3 = up(256, up4, down3_res)\r\n up2 = up(128, up3, down2_res)\r\n up1 = up(64, up2, down1_res)\r\n up0 = up(32, up1, down0_res)\r\n up0a = up(16, up0, down0a_res)\r\n # up0b = up(8, up0a, down0b_res)\r\n\r\n #final_conv1 = Conv2D(16, (3, 3), padding='same', activation='relu', name='final_conv1')(up0a)\r\n classify = Conv2D(num_classes, (1, 1), activation='sigmoid', name='final_layer')(up0a)\r\n\r\n model = Model(inputs=inputs, outputs=classify)\r\n\r\n return model\r\n\r\n\r\ndef dice_coef(y_true, y_pred, smooth=1):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\r\n\r\n\r\ndef dice_coef_loss(y_true, y_pred):\r\n return 1 - dice_coef(y_true, y_pred)\r\n\r\n\r\ndef bce_dice_loss(y_true, y_pred):\r\n return binary_crossentropy(y_true, y_pred) + dice_coef_loss(y_true, y_pred)\r\n\r\nmodel_path = \"./models\"\r\nmodel = get_unet_1024()\r\nmodel.load_weights(os.path.join(model_path, \"model-1541079546-weights.h5\"))\r\nmodel.compile(loss=bce_dice_loss, optimizer=Adam(1e-5), metrics=[dice_coef])\r\n\r\nmodel = load_model(os.path.join(model_path, 'model-1541079546.h5'),\r\n custom_objects={'bce_dice_loss': bce_dice_loss, 'dice_coef': dice_coef})\r\n\r\nimg_path = \"./data/test/1000002.bmp\"\r\nimg_ = misc.imread(img_path)\r\nimg_ = np.transpose(img_, (1, 0))\r\nprint(\"img_ shape:\", img_.shape)\r\n\r\n# size = (512, 512)\r\n# img_ = cv2.resize(img, size)\r\nplt.imshow(img_)\r\nplt.show()\r\n\r\nimg_ = np.expand_dims(img_, axis=2)\r\nimg_ = np.expand_dims(img_, axis=0)\r\n\r\nprint(\"input shape:\", img_.shape)\r\nimg_out = model.predict(img_)\r\nprint(\"output shape\", img_out.shape)\r\nimg_out = np.squeeze(img_out, axis=0)\r\nprint(\"output shape\", img_out.shape)\r\nimg_out[img_out > 0.5] = 255\r\ncv2.imwrite(\"out.bmp\", img_out)","sub_path":"predict_v2.py","file_name":"predict_v2.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"246115001","text":"import sys\nimport json\nimport requests\nfrom requests.structures import CaseInsensitiveDict\nfrom requests.auth import HTTPBasicAuth\n\nurl = \"https://influxdb.tools.np.priority.o2.co.uk/api/org/invites\"\n\nheaders = CaseInsensitiveDict()\nheaders[\"Accept\"] = \"application/json\"\nheaders[\"Content-Type\"] = \"application/json\"\nheaders[\"X-Grafana-Org-Id\"] = \"1\"\n\nf = open(\"exportUsers.json\", \"r\") \n\ny = json.loads(f.read())\n\nfor x in range(len(y)):\n if not (\"admin@localhost\") in (y[x][\"email\"]):\n payload = '{{\"loginOrEmail\":\"{}\",\"name\":\"{}\",\"sendEmail\":true,\"role\":\"{}\"}}'.format((y[x][\"email\"]), (y[x][\"name\"]) ,(y[x][\"role\"]))\n print(payload)\n resp = requests.post(url, headers=headers, auth=HTTPBasicAuth(sys.argv[1], sys.argv[2]) ,data=payload)\n print(resp.content)","sub_path":"files/importGrafana.py","file_name":"importGrafana.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"611375247","text":"ford = 100000\nchevrolet = 120000\nfiat = 80000\n\np2 = 50000\np4 = 65000\np5 = 78000\n\nblanco = 10000\nazul = 20000\nnegro = 30000\n\nhay_cliente = 'si'\ncontador = 0\n\nventa = []\n\ndef marca_input():\n marca = \"\"\n while marca != \"ford\" and marca != \"chevrolet\" and marca != \"fiat\":\n marca = input(\"Ingrese por favor la marca de su nuevo auto: \")\n if marca != \"ford\" and marca != \"chevrole\" and marca != \"fiat\":\n print(\"Debe elegir una marca disponible\")\n return marca\n\n\ndef puertas_input():\n puertas = \"\"\n while puertas != '2' and puertas != '4' and puertas != '5':\n puertas = input(\"Ingrese por favor la cantidad de puertas de su nuevo auto: \")\n if puertas != '2' and puertas != '4' and puertas != '5':\n print(\"Debe elegir la cantidad de puertas disponible\")\n return puertas\n\n\ndef color_input():\n color = \"\"\n while color != 'blanco' and color != 'azul' and color != 'negro':\n color = input(\"Ingrese por favor el color de su nuevo auto: \")\n if color != 'blanco' and color != 'azul' and color != 'negro':\n print(\"Debe elegir un color disponible\")\n return color\n\n\ndef precio_marcas(marca):\n if marca == \"ford\":\n return ford\n elif marca == \"chevrolet\":\n return chevrolet\n elif marca == \"fiat\":\n return fiat\n\n\ndef precio_puertas(puertas):\n if puertas == '2':\n return p2\n elif puertas == '4':\n return p4\n elif puertas == '5':\n return p5\n\n\ndef precio_color(color):\n if color == \"blanco\":\n return blanco\n elif color == \"azul\":\n return azul\n elif color == \"negro\":\n return negro\n\n\ndef precio_inicial(marca,puertas,color):\n return precio_marcas(marca) + precio_puertas(puertas) + precio_color(color)\n\n\ndef descuento(contador,valor):\n if contador > 5 and contador < 11:\n return valor * 0.9\n elif contador > 10 and contador < 51:\n return valor * 0.85\n elif contador > 50:\n return valor * 0.82\n else:\n return valor\n\n\nwhile hay_cliente == 'si':\n contador = contador + 1\n print('Cliente Nº: ' + str(contador))\n\n nombre = input(\"Ingrese por favor su nombre: \")\n apellido = input(\"Ingrese por favor su apellido: \")\n marca = marca_input()\n puertas = puertas_input()\n color = color_input()\n precio = precio_inicial(marca,puertas,color)\n\n items = {'cliente': contador, 'nombre': nombre, 'apellido': apellido, 'marca': marca, 'puertas': puertas,\n 'color': color, 'precio': precio}\n\n venta.append(items)\n\n hay_cliente = input('¿Hay mas clientes?: ')\n if hay_cliente == 'no':\n cantidad = len(venta)\n for i in venta:\n precio = descuento(cantidad, i['precio'])\n print(\"La persona: \" + i['nombre'] + \" \" + i['apellido'] + \" \"\"compro un auto \" + i['marca'] + \" de \"\n + str(i['puertas']) + \" puertas y color \" + i['color'] + \" con un precio de $\" + str(precio))","sub_path":"autos_parte2.py","file_name":"autos_parte2.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"179146026","text":"from pts.vzGrid import *\nfrom pyramid.view import view_config\nfrom pts.vzComponents import *\nfrom pts.login.views import jsonLogin\nfrom pts.vzSQL import mysql_db\nfrom pts.financial.settings import VETORZERO_ID, LOBO_ID\nfrom pts.vzLog import log_view\nimport datetime, time\nfrom calendar import monthrange\n\nCONDITION_CURRENT_MONTH = \"\"\" AND (bat.invoice_id IN \n (SELECT invoice_id \n FROM invoice \n WHERE (invoice_status_id = 2 \n OR (invoice_status_id = 1 AND invoice_value_paid = 0)) \n AND YEAR(invoice_expiration_date) = YEAR(CURRENT_DATE) \n AND MONTH(invoice_expiration_date) = MONTH(CURRENT_DATE) \n AND DAY(invoice_expiration_date) >= DAY(CURRENT_DATE)) \n OR bat.invoice_id IN \n (SELECT invoice_id \n FROM invoice \n WHERE invoice_check is not null \n AND invoice_check != '' \n AND invoice_value_paid = 0 \n AND invoice_expiration_date < CURDATE()))\"\"\"\n\nCONDITION_NEXT_MONTH = \"\"\" AND bat.invoice_id IN (SELECT invoice_id \n FROM invoice \n WHERE (invoice_status_id = 2 \n OR (invoice_status_id = 1 AND invoice_value_paid = 0)) \n AND YEAR(invoice_expiration_date) = YEAR(CURRENT_DATE + INTERVAL 1 MONTH) \n AND MONTH(invoice_expiration_date) = MONTH(CURRENT_DATE + INTERVAL 1 MONTH))\"\"\"\n\nCONDITION_3_MONTH = \"\"\" AND bat.invoice_id IN (SELECT invoice_id \n FROM invoice \n WHERE (invoice_status_id = 2 \n OR (invoice_status_id = 1 AND invoice_value_paid = 0)) \n AND invoice_expiration_date < (CURDATE() + INTERVAL 3 MONTH))\"\"\"\n\nCONDITION_VENCIDOS_ABERTOS = \"\"\" AND bat.bank_account_id_source = ba.bank_account_id \n AND bat.invoice_id in (SELECT invoice_id \n FROM invoice \n WHERE (invoice_status_id = 2 \n OR (invoice_status_id = 1 and invoice_value_paid = 0)) \n AND invoice_expiration_date < CURDATE() \n AND invoice_expiration_date > (CURRENT_DATE - INTERVAL 1 MONTH))\"\"\"\n\nclass BankAccount(vzMultipleComponent):\n\n table_name = \"bank_account\"\n\n type = \"single\"\n\n name =\"bankaccount\"\n\n def __init__(self):\n super(vzMultipleComponent,self).__init__()\n month_curr = \"fluxo previsto \"+[\"Janeiro\",\"Fevereiro\",\"Marco\",\"Abril\",\"Maio\",\"Junho\",\"Julho\",\"Agosto\",\"Setembro\",\"Outubro\",\"Novembro\",\"Dezembro\"][time.localtime().tm_mon-1]\n month_next = \"fluxo previsto \"+[\"Janeiro\",\"Fevereiro\",\"Marco\",\"Abril\",\"Maio\",\"Junho\",\"Julho\",\"Agosto\",\"Setembro\",\"Outubro\",\"Novembro\",\"Dezembro\"][time.localtime().tm_mon%12]\n\n self.fields = {\n \"id\" : [\"hidden\",False,False,\"bank_account.bank_account_id\"],\n \"descricao\":[\"text\",True,True,\"bank_account.bank_account_desc\"],\n \"banco\":[\"select\",False,True,\"bank.bank_id\"],\n \"tipo\":[\"select\",True,True,\"bank_account_type.bank_account_type_id\"],\n \"nr. agencia\":[\"text\",False,False,\"bank_account.bank_account_branch_number\"],\n \"dig. agencia\":[\"text\",False,False,\"bank_account.bank_account_branch_dig\"],\n \"nr. conta\":[\"text\",False,False,\"bank_account.bank_account_number\"],\n \"dig. conta\":[\"text\",False,False,\"bank_account.bank_account_dig\"],\n \"detalhes\":[\"text\",True,False,\"bank_account.bank_account_detail\"],\n \"status atual\":[\"select\",False,True,\"bank_account_status.bank_account_status\"],\n \"status\":[\"none\",False,False,\"bank_account_status.bank_account_status_desc\"],\n \"a conciliar\":[\"money\",True,False,\"disable\",\"bank_account.bank_account_foreseen_balance\"],\n \"saldo atual\":[\"money\",True,False,\"disable\",\"bank_account.bank_account_balance\"],\n month_curr:[\"money\",True,False,\"disable\",\"bank_account.bank_account_foreseen_balance\"],\n month_next:[\"money\",True,False,\"disable\",\"bank_account.bank_account_foreseen_balance\"],\n \"fluxo previsto 3 meses\":[\"money\",True,False,\"disable\",\"bank_account.bank_account_foreseen_balance\"],\n }\n\n self.list_field = [\"id\",\"descricao\",\"banco\",\"tipo\",\"nr. agencia\",\"dig. agencia\",\"nr. conta\",\"dig. conta\", \"detalhes\", \"status atual\", \"status\", \"a conciliar\", \"saldo atual\", month_curr, month_next, \"fluxo previsto 3 meses\" ]\n\n list_all_sql = \"\"\"SELECT DISTINCT bank_account.bank_account_id,\n bank_account.bank_account_desc,\n bank.bank_desc,\n bank_account_type.bank_account_type_desc,\n bank_account.bank_account_branch_number,\n bank_account.bank_account_branch_dig,\n bank_account.bank_account_number,\n bank_account.bank_account_dig,\n bank_account.bank_account_detail,\n bank_account_status,\n bank_account_status.bank_account_status_desc,\n 0,\n bank_account.bank_account_balance,\n bank_account.bank_account_foreseen_balance,\n 0,\n 0 \n FROM bank_account, bank, bank_account_type, entity_x_bank_account, bank_account_status, user_x_bank_account \n WHERE bank_account.bank_id = bank.bank_id \n AND bank_account_type.bank_account_type_id = bank_account.bank_account_type_id \n AND user_x_bank_account.bank_account_id = bank_account.bank_account_id \n AND bank_account.bank_account_id = entity_x_bank_account.bank_account_id\n AND bank_account.bank_account_status = bank_account_status.bank_account_status_id \"\"\"\n\n select_unique_id_sql = \"\"\"SELECT bank_account_id,\n bank_account_desc,\n bank_id,\n bank_account_type_id,\n bank_account_branch_number,\n bank_account_branch_dig,\n bank_account_number,\n bank_account_dig,\n bank_account_detail,\n bank_account_status,\n bank_account_status.bank_account_status_desc,\n 0,\n bank_account_balance,bank_account_foreseen_balance,\n 0,\n 0 \n FROM bank_account, bank_account_status \n WHERE bank_account.bank_account_id = %s\n AND bank_account.bank_account_status = bank_account_status.bank_account_status_id\"\"\"\n\n select_saldo_unique_sql = \"\"\"SELECT IFNULL(sum(bank_account_transaction_value_paid),0) as valor\n FROM bank_account_transaction\n WHERE bank_account_id_source = %s\n AND (invoice_id IS NULL OR invoice_id IN (SELECT invoice_id FROM invoice where invoice_status_id = 1))\"\"\"\n\n select_saldo_unique_sql_previsto = \"\"\"SELECT IFNULL(sum(bank_account_transaction_value),0) as valor\n FROM bank_account_transaction where bank_account_id_source = %s\n AND invoice_id IN (SELECT invoice_id FROM invoice WHERE invoice_status_id = 2 OR (invoice_status_id = 1 AND invoice_value_paid = 0))\"\"\"\n\n select_saldo_unique_sql_previsto_atual = \"\"\"SELECT IFNULL(sum(bat.bank_account_transaction_value),0) as valor \n FROM bank_account_transaction bat\n WHERE bat.bank_account_id_source = %s \"\"\"\n select_saldo_unique_sql_previsto_atual += CONDITION_CURRENT_MONTH \n\n select_vencidos_abertos = \"\"\"SELECT IFNULL(sum(bat.bank_account_transaction_value),0) as valor \n FROM bank_account_transaction bat, bank_account ba \n WHERE ba.bank_account_id = %s AND ba.bank_account_type_id IN (11,13) \"\"\"\n select_vencidos_abertos += CONDITION_VENCIDOS_ABERTOS\n\n select_saldo_unique_sql_previsto_seguinte = \"\"\"SELECT IFNULL(sum(bat.bank_account_transaction_value),0) as valor \n FROM bank_account_transaction bat\n WHERE bat.bank_account_id_source = %s \"\"\"\n select_saldo_unique_sql_previsto_seguinte += CONDITION_NEXT_MONTH\n\n select_saldo_unique_sql_previsto_tres_meses = \"\"\"SELECT IFNULL(sum(bat.bank_account_transaction_value),0) as valor \n FROM bank_account_transaction bat\n WHERE bat.bank_account_id_source = %s \"\"\" \n select_saldo_unique_sql_previsto_tres_meses += CONDITION_3_MONTH\n\n select_id_sql = list_all_sql + \" AND bank_account.bank_account_id = %s \"\n\n count_sql = \"\"\"SELECT COUNT(DISTINCT(bank_account.bank_account_id))\n FROM bank_account, bank, bank_account_type, entity_x_bank_account, bank_account_status, user_x_bank_account \n WHERE bank_account.bank_id = bank.bank_id \n AND bank_account_type.bank_account_type_id = bank_account.bank_account_type_id \n AND user_x_bank_account.bank_account_id = bank_account.bank_account_id \n AND bank_account.bank_account_id = entity_x_bank_account.bank_account_id\n AND bank_account.bank_account_status = bank_account_status.bank_account_status_id \"\"\"\n\n delete_sql = \"DELETE FROM bank_account WHERE bank_account_id = %s\"\n\n list_search_sql = list_all_sql + \" and bank_account.bank_account_desc LIKE %s \"\n\n form_select_sql = { \"status\" : \"SELECT bank_account_status_id, bank_account_status_desc from bank_account_status\" }\n\n def soma_saldo_grid(self, json):\n i = 0\n saldo_total = 0\n saldo_previsto_total = 0\n saldo_previsto_total_atual = 0\n saldo_previsto_total_seguinte = 0\n vencidos_abertos_total = 0\n while i < len(json[0][\"components\"][0][\"body\"]):\n json[0][\"components\"][0][\"body\"][i][\"line\"][12] = 0\n bank_id = json[0][\"components\"][0][\"body\"][i][\"line\"][0]\n saldo = mysql_db.execute(self.select_saldo_unique_sql, bank_id).fetchall()[0][0]\n saldo_previsto_atual = mysql_db.execute(self.select_saldo_unique_sql_previsto_atual, bank_id).fetchall()[0][0]\n saldo_previsto_seguinte = mysql_db.execute(self.select_saldo_unique_sql_previsto_seguinte, bank_id).fetchall()[0][0]\n saldo_previsto_tres_meses = mysql_db.execute(self.select_saldo_unique_sql_previsto_tres_meses, bank_id).fetchall()[0][0]\n vencidos_abertos = mysql_db.execute(self.select_vencidos_abertos, bank_id).fetchall()[0][0]\n if vencidos_abertos:\n json[0][\"components\"][0][\"body\"][i][\"line\"][11] = \"%.2f\" % vencidos_abertos\n vencidos_abertos_total = vencidos_abertos_total + vencidos_abertos\n else:\n json[0][\"components\"][0][\"body\"][i][\"line\"][11] = \"\"\n if saldo:\n json[0][\"components\"][0][\"body\"][i][\"line\"][12] = \"%.2f\" % saldo\n saldo_total = saldo_total + saldo\n if saldo_previsto_atual:\n json[0][\"components\"][0][\"body\"][i][\"line\"][13] = \"%.2f\" % saldo_previsto_atual\n saldo_previsto_total_atual = saldo_previsto_total_atual + saldo_previsto_atual\n else:\n json[0][\"components\"][0][\"body\"][i][\"line\"][13] = \"0.00\"\n if saldo_previsto_seguinte:\n json[0][\"components\"][0][\"body\"][i][\"line\"][14] = \"%.2f\" % saldo_previsto_seguinte\n saldo_previsto_total_seguinte = saldo_previsto_total_seguinte + saldo_previsto_seguinte\n else:\n json[0][\"components\"][0][\"body\"][i][\"line\"][14] = \"0.00\"\n\n if saldo_previsto_tres_meses:\n json[0][\"components\"][0][\"body\"][i][\"line\"][15] = \"%.2f\" % saldo_previsto_tres_meses\n saldo_previsto_total = saldo_previsto_total + saldo_previsto_tres_meses\n else:\n json[0][\"components\"][0][\"body\"][i][\"line\"][15] = \"0.00\"\n\n i = i+1\n json[0][\"components\"][0][\"body\"].append({'line': ['', '', '', '', None, None, None, None, None, None, None, \"\", \"\", \"\", \"\", \"\"]})\n json[0][\"components\"][0][\"body\"].append({'line': ['0', 'Total Geral', '', '', None, None, None, None, None, None, \"\", \"%.2f\" % vencidos_abertos_total, \"%.2f\" % saldo_total, \"%.2f\" % saldo_previsto_total_atual, \"%.2f\" % saldo_previsto_total_seguinte, \"%.2f\" % saldo_previsto_total]})\n return json\n\n def soma_saldo_unique(self, json):\n bank_id = json[0][\"components\"][0][\"body\"][0][\"id\"]\n saldo = mysql_db.execute(self.select_saldo_unique_sql, bank_id).fetchall()[0][0]\n if saldo:\n json[0][\"components\"][0][\"body\"][0][\"saldo atual\"] = \"%.2f\" % saldo\n return json\n\n def soma_saldo_unique_previsto(self, json):\n bank_id = json[0][\"components\"][0][\"body\"][0][\"id\"]\n saldo = mysql_db.execute(self.select_saldo_unique_sql, bank_id).fetchall()[0][0]\n saldo = saldo + mysql_db.execute(self.select_saldo_unique_sql_previsto, bank_id).fetchall()[0][0]\n if saldo:\n json[0][\"components\"][0][\"body\"][0][\"saldo previsto\"] = \"%.2f\" % saldo\n return json\n\n\nclass BankAccountEntity(BankAccount):\n\n add_entity_bank_account_sql = \"INSERT INTO entity_x_bank_account (entity_id , bank_account_id ) VALUES (%s, %s)\"\n delete_entity_bank_account_sql = \"DELETE from entity_x_bank_account where entity_id = %s and bank_account_id = %s\"\n add_user_bank_account_sql = \"INSERT INTO user_x_bank_account (user_id , bank_account_id ) VALUES (%s, %s)\"\n\n def __init__(self, request):\n super(BankAccountEntity,self).__init__()\n self.entity_id_company = request.session['session']['entity_id_company']\n self.user_id = request.session['session']['user_id']\n\n if int(self.entity_id_company) > 1:\n self.list_all_sql = self.list_all_sql + \" and entity_x_bank_account.entity_id = %s and entity_x_bank_account.bank_account_id = bank_account.bank_account_id \"%self.entity_id_company\n self.list_search_sql = self.list_search_sql + \" and entity_x_bank_account.entity_id = %s and entity_x_bank_account.bank_account_id = bank_account.bank_account_id \"%self.entity_id_company\n self.count_sql = self.count_sql + \" and entity_x_bank_account.entity_id = %s and entity_x_bank_account.bank_account_id = bank_account.bank_account_id \"%self.entity_id_company\n else:\n self.list_all_sql = self.list_all_sql + \" and entity_x_bank_account.entity_id in (%s,%s) \"%(str(VETORZERO_ID), str(LOBO_ID))\n self.list_search_sql = self.list_search_sql + \" and entity_x_bank_account.entity_id in (%s,%s) \"%(str(VETORZERO_ID), str(LOBO_ID))\n self.count_sql = self.count_sql + \" and entity_x_bank_account.entity_id in (%s,%s) \"%(str(VETORZERO_ID), str(LOBO_ID))\n\n self.list_all_sql = self.list_all_sql + \" and user_x_bank_account.user_id = %s and user_x_bank_account.bank_account_id = bank_account.bank_account_id order by bank_account.bank_account_desc\"%self.user_id\n self.list_search_sql = self.list_search_sql + \" and user_x_bank_account.user_id = %s and user_x_bank_account.bank_account_id = bank_account.bank_account_id order by bank_account.bank_account_desc\"%self.user_id\n self.count_sql = self.count_sql + \" and user_x_bank_account.user_id = %s and user_x_bank_account.bank_account_id = bank_account.bank_account_id order by bank_account.bank_account_desc\"%self.user_id\n\n def add(self,params_dict,id=None):\n bk_id = super(BankAccountEntity,self).add(params_dict,id)\n mysql_db.execute(self.add_entity_bank_account_sql,(self.entity_id_company,bk_id))\n mysql_db.execute(self.add_user_bank_account_sql,(self.user_id,bk_id)) \n\n return bk_id\n\n def delete(self,params_dict):\n mysql_db.execute(self.delete_entity_bank_account_sql,(self.entity_id_company,params_dict[\"bankaccount_id\"]))\n #constroi um dicionario pelos nomes dos componentes\n components = dict((comp.name,comp) for comp in self.components())\n #copia o dicionario de parametros\n out_params = params_dict.copy()\n for param in params_dict:\n #obtem nome do componente\n component = self._param_to_component_name(param)\n #obtem valor do id\n id = params_dict[param]\n #se o componente, for o principal\n if component == self.name:\n #efetua o delete principal\n st = self.main_delete(id)\n #se apagou mais do que 1 linha, foi efetuado com sucesso\n if st > 0: \n out_params[param]=True\n else:\n out_params[param]=False\n #se o componente nao for um principal\n elif component in components.keys():\n #apga o componente\n st = components[component].delete(id)\n #se apagou mais do que 1 linha, foi efetuado com sucesso\n if st > 0: \n out_params[param]=True\n else:\n out_params[param]=False\n return out_params\n\n@view_config(route_name='financial.bankaccount.grid.list' , renderer=\"json\")\n@jsonLogin\n@log_view\ndef account_grid_list(request):\n bankAccount = BankAccountEntity(request)\n grid = vzMultipleComponentJsonGrid(model=bankAccount)\n params_c = request.params.copy()\n params_c[\"limit\"] = \"100\"\n json = grid.list_grid(params_c)\n json = bankAccount.soma_saldo_grid(json)\n return json\n\n@view_config(route_name='financial.bankaccount.select' , renderer=\"json\")\n@jsonLogin\ndef account_grid_select(request):\n grid = vzMultipleComponentJsonGrid(model = BankAccountEntity(request))\n return grid.form_field_select(request.params)\n\n@view_config(route_name='financial.bankaccount.grid.save',renderer=\"json\")\n@jsonLogin\n#efetua o save na tabela\ndef entity_grid_save(request):\n actions = vzMultipleComponentGridActions(BankAccountEntity(request))\n return actions.save(request)\n\n@view_config(route_name=\"financial.bankaccount.row.id\",renderer=\"json\")\n@jsonLogin\n#seleciona apenas o componente principal pelo id\ndef entity_select_unique(request):\n bankAccount = BankAccountEntity(request)\n grid = vzMultipleComponentJsonGrid(model=bankAccount)\n json = grid.select_unique(request.params)\n json = bankAccount.soma_saldo_unique(json)\n return json\n\n@view_config(route_name='financial.bankaccount.grid.del',renderer=\"json\")\n@jsonLogin\n#apaga um elemento pelo id\ndef entity_delete(request):\n actions = vzMultipleComponentGridActions(BankAccountEntity(request))\n return actions.delete(request)\n","sub_path":"financial/bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":20188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"600967984","text":"import logging\nfrom flask import Flask, redirect, request, url_for, jsonify, render_template\nfrom flask_login import (\n LoginManager,\n current_user,\n login_required,\n login_user,\n logout_user,\n)\nfrom requests_oauthlib import OAuth2Session\nfrom requests.exceptions import HTTPError\nfrom utils.helpers import validate_number, to_E_164_number, return_facilities\nfrom clients.slack import Slack\nfrom clients.twilio import Twilio\nfrom clients.unomi import Unomi\nfrom clients.two_one_one import TwoOneOne\nfrom clients.aunt_bertha import AuntBertha\nfrom datetime import datetime\nfrom utils.user import User\n\nfrom oauthlib.oauth2 import WebApplicationClient\nimport requests\n\nimport sqlite3\nimport os\nimport json\n\n# Internal imports from helper scripts\nfrom db import init_db_command\nfrom user import User\n\n# TODO: Set up application logging\n\n# Configuration\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\n# Configuration\nGOOGLE_CLIENT_ID = os.environ.get(\"GOOGLE_ID\", None)\nGOOGLE_CLIENT_SECRET = os.environ.get(\"GOOGLE_SECRET\", None)\nGOOGLE_DISCOVERY_URL = (\n \"https://accounts.google.com/.well-known/openid-configuration\"\n)\n\n# Application Creation\napp = Flask(__name__)\n# app.secret_key = os.environ[\"SECRET_KEY\"] or b'_5#y2L\"F4Q8z\\n\\xec]/' # or not working!?\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\nslack_client = Slack()\ntwilio_client = Twilio()\nunomi_client = Unomi()\ntoo_client = TwoOneOne()\naunt_bertha_client = AuntBertha()\n\n@app.errorhandler(500)\ndef internal_error(error):\n return jsonify(error)\n\n\n# Flask-Login Manager\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n@login_manager.unauthorized_handler\ndef unauthorized():\n return redirect(url_for('login'))\n\n## Configure Database\n#Naive database setup\ntry:\n init_db_command()\nexcept sqlite3.OperationalError:\n # Assume it's already been created\n pass\n\n\n# OAuth2 client setup\nclient = WebApplicationClient(GOOGLE_CLIENT_ID)\n\n# Flask-Login helper to retrieve a user from our db\n@login_manager.user_loader\ndef load_user(user_id):\n return User.get(user_id)\n\n@app.route('/')\n@login_required\ndef index():\n if current_user.is_authenticated:\n return render_template('index.html')\n else:\n return redirect(url_for('login'))\n\n\n@app.route('/terms')\ndef terms():\n return render_template('terms.html')\n\n\n@app.route('/submit', methods=['POST'])\ndef web_submit():\n \"\"\"\n Handles phone number submission coming from the website\n \"\"\"\n # Look for phone number from request form or request value:\n number = request.values.get('From', None)\n if not number:\n try:\n number = request.form[\"number\"]\n app.logger.error(\"Using web form submission.\")\n except KeyError as e:\n app.logger.error(\"Invalid submission.\")\n else:\n app.logger.error(\"Using SMS submission.\")\n number = validate_number(number)\n body = request.values.get('Body', None)\n\n channel, phone_number = slack_client.start_engagement(number, twilio_client)\n profile = unomi_client.create_profile(profile_id=channel,\n properties={'phoneNumber': phone_number})\n assert(profile['itemId'] == channel) # Sanity check\n unomi_client.track_event(channel, 'engagementStarted', {})\n if body: # Case when this is a SMS submission\n slack_client.forward_twilio_message(channel, body)\n unomi_client.track_inbound_message(channel, body)\n return render_template('confirmation.html')\n\n\n@app.route('/message', methods=['POST'])\ndef message():\n \"\"\"\n Callback for Twilio. Messages posted to this route will forward the message\n to the Slack channel associated with the senders phone number.\n \"\"\"\n body = request.values.get('Body', None)\n from_zip = request.values.get('FromZip', None)\n from_city = request.values.get('FromCity', None)\n from_state = request.values.get('FromState', None)\n\n number = to_E_164_number(request.values.get('From', None))\n channel = unomi_client.channel_from_phone_number(number)\n profile = unomi_client.profile_search(channel)\n app.logger.error(\"SLACK CHANNEL?: {0}\".format(slack_client.does_channel_exist(channel)))\n if not slack_client.does_channel_exist(channel):\n app.logger.error(\"Performing Web Submit: {0} {1}\".format(channel, body))\n web_submit() # Creates a profile\n else:\n app.logger.error(\"Channel Body: {0} {1}\".format(channel, body))\n slack_client.forward_twilio_message(channel, body)\n unomi_client.track_inbound_message(channel, body)\n if \"city\" not in profile[\"properties\"].keys():\n if from_city or from_state or from_zip:\n unomi_client.update_profile(\n profile_id=channel,\n properties={\n \"city\": from_city,\n \"state\": from_state,\n \"zipcode\": from_zip\n }\n )\n body = \":world_map: Approximate Location: {0}, {1} {2}\".format(from_city, from_state, from_zip)\n response = slack_client.forward_twilio_message(channel, body)\n\n return \"200\"\n\n\n\n@app.route('/text', methods=['POST'])\ndef text():\n \"\"\"\n Callback for Slack. /send commands in Slack will trigger a post to this\n route with parameters as defined here:\n \"\"\"\n channel_name = request.values.get('channel_name', None)\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n app.logger.debug(\"Request: {0}\".format(request.values))\n channel_name = slack_client.group_name_from_group_id(channel_id)\n number = unomi_client.phone_number_from_channel(channel_name)\n\n if number:\n text = \"@\" + user_name + \": \" + body\n try:\n twilio_client.text(number, body)\n except Exception as e:\n return jsonify(e)\n app.logger.debug(\"Slack user: {0}\".format(user_name))\n unomi_client.track_outbound_message(channel_name, body, user_name)\n\n return jsonify(\n response_type='in_channel',\n text=\"Message sent\",\n )\n else:\n return 400\n\n\n\n@app.route('/assessed', methods=['POST'])\ndef assessed():\n \"\"\"\n Callback for Slack, /assessed commands in Slack will trigger a post to this\n route with parameters.\n \"\"\"\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n channel_name = slack_client.group_name_from_group_id(channel_id)\n profile = unomi_client.profile_search(channel_name)\n #profile[\"properties\"][\"assessed\"] = datetime.today().strftime(\"%Y-%m-%d\")\n profile[\"properties\"][\"hadAssessment\"] = 'yes'\n unomi_client.update_profile(channel_name, profile[\"properties\"])\n return jsonify(\n response_type='in_channel',\n text=\"Updated saved.\",\n )\n\n@app.route('/treated', methods=['POST'])\ndef treated():\n \"\"\"\n Callback for Slack, /treated commands in Slack will trigger a post to this\n route with parameters.\n \"\"\"\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n channel_name = slack_client.group_name_from_group_id(channel_id)\n profile = unomi_client.profile_search(channel_name)\n #profile[\"properties\"][\"treated\"] = datetime.today().strftime(\"%Y-%m-%d\")\n profile[\"properties\"][\"inTreatment\"] = 'yes'\n unomi_client.update_profile(channel_name, profile[\"properties\"])\n return jsonify(\n response_type='in_channel',\n text=\"Updated saved.\",\n )\n\n\n@app.route('/need', methods=['POST'])\ndef need():\n \"\"\"\n Callback for Slack, /need commands in Slack will trigger a post to this\n route with parameters.\n\n The usage for /need is:\n\n /need [name]\n ex: /need bed\n\n \"\"\"\n app.logger.info(request.values)\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n text = body.strip()\n need = {\n \"name\": text,\n \"timeStamp\" : datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n }\n channel_name = slack_client.group_name_from_group_id(channel_id)\n app.logger.info(channel_name)\n profile = unomi_client.profile_search(channel_name)\n # Update profile property with this need\n try:\n app.logger.info(profile)\n needs = profile[\"properties\"][\"needs\"]\n needs.append(need)\n needs = list(set(needs))\n except TypeError:\n # FIXME: what does unomi return if there are no needs previously?\n # If an empty array, the above section will work, and we do not need to\n # catch a TypeError exception. Else, let's add a test for this case\n needs = [need]\n profile[\"properties\"][\"needs\"] = needs\n unomi_client.update_profile(channel_name, profile[\"properties\"])\n return jsonify(\n response_type='in_channel',\n text=\"Need saved: {0}\".format(text),\n )\n\n\n@app.route('/demographics', methods=['POST'])\ndef demographics():\n \"\"\"\n Callback for Slack, /demographic commands in Slack will trigger a post to this\n route with parameters.\n\n The usage for /demographic is:\n\n /demographic [key] [value]\n ex: /demographic gender male\n\n \"\"\"\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n text = body.strip().split()\n key = text[0]\n value = text[1]\n demographic = {\n key: value\n }\n channel_name = slack_client.group_name_from_group_id(channel_id)\n profile = unomi_client.profile_search(channel_name)\n # Update profile property with this need\n try:\n demographics = profile[\"properties\"][\"demographics\"]\n demographics[key] = value\n except KeyError:\n demographics = {key: value}\n\n text = \"Demographic saved: {0}={1}\".format(key, value)\n profile[\"properties\"][\"demographics\"] = demographics\n unomi_client.update_profile(channel_name, profile[\"properties\"])\n return jsonify(\n response_type='in_channel',\n text=text\n )\n\n\n@app.route('/event', methods=['POST'])\ndef event():\n \"\"\"\n Callback for Slack, /event commands in Slack will trigger a post to this\n route with parameters.\n\n The usage for /event is:\n\n /event [name]\n ex: /event bed\n\n \"\"\"\n print(\"SLACK REQUEST: \", request.values)\n channel_name = request.values.get('channel_name', None)\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n event = body.strip()\n profile = unomi_client.profile_search(channel_name) # NOTE: why is this function called?\n unomi_client.track_event(channel_name, 'userGenerated', {'value': event}, user_name)\n\n return jsonify(\n response_type='in_channel',\n text=\"Event saved: {0}\".format(event))\n\n\n@app.route('/stage', methods=['POST'])\ndef stage():\n \"\"\"\n Callback for Slack, /stage commands in Slack will trigger a post to this\n route with parameters.\n\n The usage for /stage is:\n\n /stage [stage] [notes]\n ex: /stage preparation meeting scheduled with physician\n\n \"\"\"\n print(\"SLACK REQUEST: \", request.values)\n channel_name = request.values.get('channel_name', None)\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n stage = body.split()[0]\n notes = ' '.join(body.split()[1:])\n profile = unomi_client.profile_search(channel_name)\n unomi_client.track_event(channel_name, 'stageChange', {'value': stage, 'notes': notes}, user_name)\n profile = unomi_client.profile_search(channel_name)\n profile[\"properties\"][\"stage\"] = {\"name\":stage,\"notes\":notes}\n unomi_client.update_profile(channel_name, profile[\"properties\"])\n return jsonify(\n response_type='in_channel',\n text=\"Event saved\")\n\n\n@app.route('/facilities', methods=['POST'])\ndef facilities():\n zipcode = request.form[\"text\"]\n data= return_facilities(zipcode=zipcode)\n attachments = []\n for i in range(len(data)):\n attachment = {\n \"title\": \"Facility\",\n \"fields\": [{\n \"value\": \"Address: {0}\".format(data[i])\n }]\n }\n attachments.append(attachment)\n\n\n return jsonify(\n response_type='in_channel',\n text=\"Facilities\",\n attachments=attachments\n )\n\n\n@app.route('/beds', methods=['POST'])\ndef beds():\n \"\"\"\n Callback for Slack, /beds commands in Slack will trigger a post to this\n route with parameters.\n\n The usage for /beds is:\n\n /beds [county] [gender] [age]\n ex: /beds philadelphia male 21\n\n \"\"\"\n channel_name = request.values.get('channel_name', None)\n user_name = request.values.get('user_name', None)\n row = request.form[\"text\"].split()\n county, gender, age = None, None, None\n try:\n county = row[0]\n gender = row[1]\n age = row[2]\n except IndexError:\n # OK, only set if needed\n pass\n beds = return_beds(county, gender, age)\n attachments = []\n for bed in beds:\n attachment = {\n \"title\": bed[\"name\"],\n \"fields\": [{\n \"value\": \"Phone: {0}\".format(bed[\"phone\"])\n }]\n }\n attachments.append(attachment)\n if len(attachments) == 0: # No beds found\n event_info = {'county': county, 'age': age, 'gender':gender }\n unomi_client.track_event(channel_name, 'noBeds', event_info, user_name)\n text = \"No beds found, this event has been logged.\"\n else:\n text = \"Open Beds in {county}\".format(county=county)\n\n return jsonify(\n response_type='in_channel',\n text=text,\n attachments=attachments\n )\n\n@app.route('/aunt_bertha', methods=['POST'])\ndef aunt_bertha():\n \"\"\"\n Callback for Slack, /auntbertha commands in Slack will trigger a post to this\n route with parameters.\n\n The usage for /211 is:\n\n /auntbertha [zipcode] [keywords]\n ex: /auntberta 19107 women recovery\n\n \"\"\"\n app.logger.debug(\"SLACK REQUEST: \", request.values)\n channel_name = request.values.get('channel_name', None)\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n text = body.strip().split()\n zipcode = text[0]\n keywords = \" \".join(text[1:])\n profile = unomi_client.profile_search(channel_name)\n unomi_client.track_event(channel_name, 'auntBerthaLookup', {'keywords': keywords, 'zipcode':zipcode}, user_name)\n attachments = aunt_bertha_client.search(keywords, zipcode)\n return jsonify(\n response_type='in_channel',\n text=\"Aunt Bertha Results\",\n attachments=attachments)\n\n\n\n@app.route('/211', methods=['POST'])\ndef two_one_one():\n \"\"\"\n Callback for Slack, /211 commands in Slack will trigger a post to this\n route with parameters.\n\n The usage for /211 is:\n\n /211 [keyword] [zipcode]\n ex: /211 shelter 19011\n\n \"\"\"\n print(\"SLACK REQUEST: \", request.values)\n channel_name = request.values.get('channel_name', None)\n body = request.values.get('text', None)\n channel_id = request.values.get('channel_id', None)\n user_name = request.values.get('user_name', None)\n text = body.strip().split()\n keyword = text[0]\n zipcode = text[1]\n profile = unomi_client.profile_search(channel_name)\n unomi_client.track_event(channel_name, 'twoOneOneLookup', {'value': \"{0} {1}\".format(keyword, zipcode)}, user_name)\n attachments = too_client.search(keyword, zipcode)\n return jsonify(\n response_type='in_channel',\n text=\"211 Results\",\n attachments=attachments)\n\n\n\"\"\"\nAdmin User Interface\n\"\"\"\n@app.route('/profiles', methods=['GET'])\n@login_required\ndef profiles_index():\n print(\"attempting\")\n profiles = unomi_client.list_profiles()\n assessments = 0\n treatments = 0\n for profile in profiles:\n try:\n if profile[\"properties\"][\"hadAssessment\"] == 'yes':\n assessments += 1\n if profile[\"properties\"][\"inTreatment\"] == 'yes':\n treatments += 1\n except:\n pass\n\n return render_template('profiles/index.html',\n profiles=profiles,\n profiles_count=len(profiles),\n treatments=treatments,\n treatment_rate=round(treatments/len(profiles)*100,1),\n assessments=assessments,\n assessment_rate=round(assessments/len(profiles)*100,1))\n\n\n@app.route('/profiles/', methods=['GET'])\n@login_required\ndef profiles_show(profile_id):\n profile = unomi_client.profile_search(profile_id)\n events = unomi_client.list_events(profile_id)\n crss_messages = {} #{ ('Michael Ghen', '+12154781286'): 17, ... }\n\n for event in events:\n try:\n user_name = event['source']['itemId']\n crs_name, phone_number = slack_client.get_phone_number_by_user_name(user_name)\n app.logger.debug(\"{0}, {1}\".format(crs_name, phone_number))\n key = (crs_name, phone_number)\n if key in crss_messages.keys():\n crss_messages[key] += 1\n else:\n crss_messages[key] = 1\n # Case when there is an inbound message, no CRS involved\n except TypeError as e:\n app.logger.warning(e)\n pass # Go to the next event\n\n all_crs_messages = []\n for key, value in crss_messages.items():\n all_crs_messages.append([key[0], key[1], value])\n crss_messages = all_crs_messages\n return render_template('profiles/show.html', profile=profile, events=events, crss_messages=crss_messages)\n\n\n@app.route('/profiles/needs', methods=['GET'])\n@login_required\ndef profiles_needs_index():\n profiles = unomi_client.list_profiles()\n needs = []\n for profile in profiles:\n try:\n for need in profile[\"properties\"][\"needs\"]:\n needs.append(\n (profile[\"itemId\"], need, \"Philadelphia\")\n )\n except KeyError:\n # No needs\n pass\n return render_template('profiles/needs/index.html', needs=needs)\n\n@app.route('/profiles/needs/data', methods=['GET'])\n@login_required\ndef profiles_needs_data():\n profiles = unomi_client.list_profiles()\n needs = {}\n county_needs = {}\n counties = []\n for profile in profiles:\n if 'needs' in profile[\"properties\"].keys():\n for need in profile[\"properties\"][\"needs\"]:\n if need[\"name\"] in needs.keys():\n needs[need[\"name\"]] += 1\n else:\n needs[need[\"name\"]] = 1\n county = profile[\"properties\"][\"county\"]\n if need[\"name\"] in county_needs.keys():\n if county in county_needs[need[\"name\"]].keys():\n county_needs[need[\"name\"]][county] += 1\n else:\n county_needs[need[\"name\"]][county] = 1\n counties.append(county)\n else:\n county_needs[need[\"name\"]] = {}\n\n\n pie_data = []\n for key, value in needs.items():\n pie_data.append({'name': key, 'y': value})\n\n bar_series = [] # {name:, data:}\n for key, value in county_needs.items():\n values = list(value.values())\n bar_series.append({'name': key, 'data': values})\n\n\n return jsonify( {'pie_data': pie_data, 'bar_series': bar_series, 'counties': list(set(counties))} )\n\n\n@app.route(\"/login\")\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n\n # Find out what URL to hit for Google login\n google_provider_cfg = get_google_provider_cfg()\n authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\n # Use library to construct the request for login and provide\n # scopes that let you retrieve user's profile from Google\n request_uri = client.prepare_request_uri(\n authorization_endpoint,\n redirect_uri=request.base_url + \"/callback\",\n scope=[\"openid\", \"email\", \"profile\"],\n )\n\n return render_template('login.html',request_uri=request_uri)\n\n@app.route(\"/login/callback\")\ndef callback():\n # Get authorization code Google sent back to you\n code = request.args.get(\"code\")\n\n # Find out what URL to hit to get tokens that allow you to ask for\n # things on behalf of a user\n google_provider_cfg = get_google_provider_cfg()\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\n\n # Prepare and send request to get tokens! Yay tokens!\n token_url, headers, body = client.prepare_token_request(\n token_endpoint,\n authorization_response=request.url,\n redirect_url=request.base_url,\n code=code,\n )\n token_response = requests.post(\n token_url,\n headers=headers,\n data=body,\n auth=(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET),\n )\n\n # Parse the tokens!\n client.parse_request_body_response(json.dumps(token_response.json()))\n\n # Now that we have tokens (yay) let's find and hit URL\n # from Google that gives you user's profile information,\n # including their Google Profile Image and Email\n userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\n uri, headers, body = client.add_token(userinfo_endpoint)\n userinfo_response = requests.get(uri, headers=headers, data=body)\n\n # We want to make sure their email is verified.\n # The user authenticated with Google, authorized our\n # app, and now we've verified their email through Google!\n if userinfo_response.json().get(\"email_verified\"):\n unique_id = userinfo_response.json()[\"sub\"]\n users_email = userinfo_response.json()[\"email\"]\n picture = userinfo_response.json()[\"picture\"]\n users_name = userinfo_response.json()[\"given_name\"]\n else:\n return \"User email not available or not verified by Google.\", 400\n\n # Create a user in our db with the information provided\n # by Google\n user = User(\n id_=unique_id, name=users_name, email=users_email, profile_pic=picture\n )\n\n # Doesn't exist? Add to database\n if not User.get(unique_id):\n User.create(unique_id, users_name, users_email, picture)\n\n # Begin user session by logging the user in\n login_user(user)\n\n # Send user back to homepage\n return redirect(url_for(\"index\"))\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n## OAuth Helper Function\ndef get_google_provider_cfg():\n return requests.get(GOOGLE_DISCOVERY_URL).json()\n\n# ## HTTPS\nif __name__ == \"__main__\":\n app.run(ssl_context=\"adhoc\")\n","sub_path":"gateway-server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"556854329","text":"\"\"\"\nThis module implements the interface to Motion Blinds.\n\n:copyright: (c) 2020 starkillerOG.\n:license: MIT, see LICENSE for more details.\n\"\"\"\n\n\nimport logging\nimport socket\nimport json\nimport datetime\nfrom enum import IntEnum\nfrom Cryptodome.Cipher import AES\n\n_LOGGER = logging.getLogger(__name__)\n\nUDP_PORT_SEND = 32100\nDEVICE_TYPE_GATEWAY = \"02000002\" # Gateway\nDEVICE_TYPE_BLIND = \"10000000\" # Standard Blind\nDEVICE_TYPE_TDBU = \"10000001\" # Top Down Bottom Up\nDEVICE_TYPE_DR = \"10000002\" # Double Roller\n\n\nclass GatewayStatus(IntEnum):\n \"\"\"Status of the gateway.\"\"\"\n\n Working = 1\n Pairing = 2\n Updating = 3\n\n\nclass BlindType(IntEnum):\n \"\"\"Blind type matching of the blind using the values provided by the motion-gateway.\"\"\"\n\n Unknown = -1\n RollerBlind = 1\n VenetianBlind = 2\n RomanBlind = 3\n HoneycombBlind = 4\n ShangriLaBlind = 5\n RollerShutter = 6\n RollerGate = 7\n Awning = 8\n TopDownBottomUp = 9\n DayNightBlind = 10\n DimmingBlind = 11\n Curtain = 12\n CurtainLeft = 13\n CurtainRight = 14\n DoubleRoller = 17\n Switch = 43\n\n\nclass BlindStatus(IntEnum):\n \"\"\"Status of the blind.\"\"\"\n\n Closing = 0\n Opening = 1\n Stopped = 2\n StatusQuery = 5\n\n\nclass LimitStatus(IntEnum):\n \"\"\"Limit status of the blind.\"\"\"\n\n NoLimit = 0\n TopLimit = 1\n BottomLimit = 2\n Limits = 3\n Limit3 = 4\n\n\nclass MotionGateway:\n \"\"\"Main class representing the Motion Gateway.\"\"\"\n\n def __init__(\n self,\n ip: str = None,\n key: str = None,\n ):\n self._ip = ip\n self._key = key\n self._token = None\n \n self._access_token = None\n self._gateway_mac = None\n self._timeout = 5.0\n\n self._device_list = {}\n self._device_type = None\n self._status = None\n self._N_devices = None\n self._RSSI = None\n self._protocol_version = None\n \n \n def __repr__(self):\n return \"\" % (\n self._ip,\n self.mac,\n self.protocol,\n self.N_devices,\n self.status,\n self.RSSI,\n )\n\n def _get_access_token(self):\n \"\"\"Calculate the AccessToken from the Key and Token.\"\"\"\n if self._token is None:\n _LOGGER.error(\"Token not yet retrieved, use GetDeviceList to obtain it before using _get_access_token.\")\n return None\n if self._key is None:\n _LOGGER.error(\"Key not specified, specify a key when creating the gateway class like MotionGateway(ip = '192.168.1.100', key = 'abcd1234-56ef-78') when using _get_access_token.\")\n return None\n \n token_bytes = bytes(self._token, 'utf-8')\n key_bytes = bytes(self._key, 'utf-8')\n\n cipher = AES.new(key_bytes, AES.MODE_ECB)\n encrypted_bytes = cipher.encrypt(token_bytes)\n self._access_token = encrypted_bytes.hex().upper()\n \n return self._access_token\n\n def _get_timestamp(self):\n \"\"\"Get the current time and format according to required Message-ID (Timestamp).\"\"\"\n time = datetime.datetime.utcnow()\n time_str = time.strftime(\"%Y%d%m%H%M%S%f\")[:-3]\n \n return time_str\n\n def _send(self, message):\n \"\"\"Send a command to the Motion Gateway.\"\"\"\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.settimeout(self._timeout)\n\n s.sendto(bytes(json.dumps(message), 'utf-8'), (self._ip, UDP_PORT_SEND))\n\n data, addr = s.recvfrom(1024)\n \n response = json.loads(data)\n \n if response.get(\"actionResult\") is not None:\n _LOGGER.error(\"Received actionResult: '%s', when sending message: '%s'\",\n response.get(\"actionResult\"),\n message\n )\n \n return response\n\n def _read_subdevice(self, mac, device_type):\n \"\"\"Read the status of a subdevice.\"\"\"\n msg = {\"msgType\": \"ReadDevice\", \"mac\": mac, \"deviceType\": device_type, \"msgID\": self._get_timestamp()}\n\n return self._send(msg)\n\n def _write_subdevice(self, mac, device_type, data):\n \"\"\"Write a command to a subdevice.\"\"\"\n msg = {\"msgType\": \"WriteDevice\", \"mac\": mac, \"deviceType\": device_type, \"AccessToken\": self.access_token, \"msgID\": self._get_timestamp(), \"data\": data}\n\n return self._send(msg)\n\n def GetDeviceList(self):\n \"\"\"Get the device list from the Motion Gateway.\"\"\"\n msg = {\"msgType\":\"GetDeviceList\", \"msgID\":self._get_timestamp()}\n\n response = self._send(msg)\n \n # check msgType\n msgType = response.get(\"msgType\")\n if msgType != \"GetDeviceListAck\":\n _LOGGER.error(\n \"Response to GetDeviceList is not a GetDeviceListAck but '%s'.\",\n msgType,\n )\n return\n \n # check device_type\n device_type = response.get(\"deviceType\")\n if device_type != DEVICE_TYPE_GATEWAY:\n _LOGGER.warning(\n \"DeviceType %s does not correspond to a gateway in GetDeviceList function.\",\n device_type,\n )\n \n # update variables\n self._gateway_mac = response[\"mac\"]\n self._device_type = device_type\n self._protocol_version = response[\"ProtocolVersion\"]\n self._token = response[\"token\"]\n \n # calculate the acces token\n self._get_access_token()\n \n # add the discovered blinds to the device list.\n for blind in response[\"data\"]:\n blind_type = blind[\"deviceType\"]\n if blind_type != DEVICE_TYPE_GATEWAY:\n blind_mac = blind[\"mac\"]\n if blind_type in [DEVICE_TYPE_BLIND]:\n self._device_list[blind_mac] = MotionBlind(gateway = self, mac = blind_mac, device_type = blind_type)\n elif blind_type in [DEVICE_TYPE_DR]:\n self._device_list[blind_mac] = MotionBlind(gateway = self, mac = blind_mac, device_type = blind_type, max_angle = 90)\n elif blind_type in [DEVICE_TYPE_TDBU]:\n self._device_list[blind_mac] = MotionTopDownBottomUp(gateway = self, mac = blind_mac, device_type = blind_type)\n else:\n _LOGGER.warning(\n \"Device with mac '%s' has DeviceType '%s' that does not correspond to a gateway or known blind.\",\n blind_mac,\n blind_type,\n )\n\n return self._device_list\n\n def Update(self):\n \"\"\"Get the status of the Motion Gateway.\"\"\"\n msg = {\"msgType\":\"ReadDevice\", \"mac\": self.mac, \"deviceType\": self.device_type, \"msgID\":self._get_timestamp()}\n\n response = self._send(msg)\n \n # check msgType\n msgType = response.get(\"msgType\")\n if msgType != \"ReadDeviceAck\":\n _LOGGER.error(\n \"Response to Update is not a ReadDeviceAck but '%s'.\",\n msgType,\n )\n return\n \n # check device_type\n device_type = response.get(\"deviceType\")\n if device_type != DEVICE_TYPE_GATEWAY:\n _LOGGER.warning(\n \"DeviceType %s does not correspond to a gateway in Update function.\",\n device_type,\n )\n \n # update variables\n self._gateway_mac = response[\"mac\"]\n self._device_type = device_type\n self._status = GatewayStatus(response[\"data\"][\"currentState\"])\n self._N_devices = response[\"data\"][\"numberOfDevices\"]\n self._RSSI = response[\"data\"][\"RSSI\"]\n\n @property\n def status(self):\n \"\"\"Return gateway status: from GatewayStatus enum.\"\"\"\n if self._status is not None:\n return self._status.name\n\n return self._status\n\n @property\n def N_devices(self):\n \"\"\"Return the number of connected child devices.\"\"\"\n return self._N_devices\n\n @property\n def RSSI(self):\n \"\"\"Return the Wi-Fi connection strength of the gateway in dBm.\"\"\"\n return self._RSSI\n\n @property\n def token(self):\n \"\"\"Return the Token.\"\"\"\n return self._token\n\n @property\n def access_token(self):\n \"\"\"Return the AccessToken.\"\"\"\n if self._access_token is None:\n if self._token is None:\n _LOGGER.error(\"Token not yet retrieved, use GetDeviceList to obtain it before using the access_token.\")\n return None\n if self._key is None:\n _LOGGER.error(\"Key not specified, specify a key when creating the gateway class like MotionGateway(ip = '192.168.1.100', key = 'abcd1234-56ef-78') when using the access_token.\")\n return None\n # calculate the acces token\n self._get_access_token()\n \n return self._access_token\n\n @property\n def mac(self):\n \"\"\"Return the mac address of the gateway.\"\"\"\n if self._gateway_mac is None:\n _LOGGER.error(\"gateway mac not yet retrieved, use GetDeviceList to obtain it before using the mac.\")\n return None\n \n return self._gateway_mac\n\n @property\n def device_type(self):\n \"\"\"Return the device type of the gateway.\"\"\"\n if self._device_type is None:\n _LOGGER.error(\"gateway device_type not yet retrieved, use GetDeviceList to obtain it before using the device_type.\")\n return None\n\n return self._device_type\n\n @property\n def protocol(self):\n \"\"\"Return the protocol version of the gateway.\"\"\"\n return self._protocol_version\n\n @property\n def device_list(self):\n \"\"\"\n Return a dict containing all blinds connected to the gateway.\n \n The keys in the dict are the mac adresses of the blinds.\n \"\"\"\n return self._device_list\n\nclass MotionBlind:\n \"\"\"Sub class representing a blind connected to the Motion Gateway.\"\"\"\n def __init__(\n self,\n gateway: MotionGateway = None,\n mac: str = None,\n device_type: str = None,\n max_angle: int = 180,\n ):\n self._gateway = gateway\n self._mac = mac\n self._device_type = device_type\n self._blind_type = None\n self._max_angle = max_angle\n \n self._status = None\n self._limit_status = None\n self._position = None\n self._angle = None\n self._battery_voltage = None\n self._battery_level = None\n self._RSSI = None\n\n def __repr__(self):\n return \"\" % (\n self.mac,\n self.blind_type,\n self.status,\n self.position,\n self.angle,\n self.limit_status,\n self.battery_level,\n self.battery_voltage,\n self.RSSI,\n )\n\n def _write(self, data):\n \"\"\"Write a command to control the blind.\"\"\"\n \n response = self._gateway._write_subdevice(self.mac, self._device_type, data)\n \n # check msgType\n msgType = response.get(\"msgType\")\n if msgType != \"WriteDeviceAck\":\n _LOGGER.error(\n \"Response to Write is not a WriteDeviceAck but '%s'.\",\n msgType,\n )\n\n return response\n\n def _calculate_battery_level(self, voltage):\n if voltage > 9.4:\n # 3 cel battery pack (12.6V)\n return round((voltage-10.4)*100/(12.6-10.4), 2)\n\n if voltage > 0.0 and voltage <= 9.4:\n # 2 cel battery pack (8.4V)\n return round((voltage-6.2)*100/(8.4-6.2), 2)\n\n return 0.0\n\n def _parse_response_common(self, response):\n \"\"\"Parse the common part of a response form the blind.\"\"\"\n\n # check device_type\n device_type = response.get(\"deviceType\")\n if device_type not in [DEVICE_TYPE_BLIND, DEVICE_TYPE_TDBU, DEVICE_TYPE_DR]:\n _LOGGER.warning(\n \"Device with mac '%s' has DeviceType '%s' that does not correspond to a known blind in Update function.\",\n self.mac,\n device_type,\n )\n \n # update variables\n self._mac = response[\"mac\"]\n self._device_type = response[\"deviceType\"]\n try:\n self._blind_type = BlindType(response[\"data\"][\"type\"])\n except ValueError:\n if self._blind_type != BlindType.Unknown:\n _LOGGER.error(\n \"Device with mac '%s' has blind_type '%s' that is not yet known, please submit an issue at https://github.com/starkillerOG/motion-blinds/issues.\",\n self.mac,\n response[\"data\"][\"type\"],\n )\n self._blind_type = BlindType.Unknown\n\n # Check max angle\n if self._blind_type in [BlindType.ShangriLaBlind]:\n self._max_angle = 90\n\n self._RSSI = response[\"data\"][\"RSSI\"]\n\n def _parse_response(self, response):\n \"\"\"Parse a response form the blind.\"\"\"\n \n # handle the part that is common among all blinds\n self._parse_response_common(response)\n \n # handle specific properties\n self._status = BlindStatus(response[\"data\"][\"operation\"])\n self._limit_status = LimitStatus(response[\"data\"][\"currentState\"])\n self._position = response[\"data\"][\"currentPosition\"]\n self._angle = response[\"data\"][\"currentAngle\"]*(180.0/self._max_angle)\n self._battery_voltage = response[\"data\"][\"batteryLevel\"]/100.0\n\n self._battery_level = self._calculate_battery_level(self._battery_voltage)\n\n def Update(self):\n \"\"\"Get the status of the blind from the Motion Gateway.\"\"\"\n response = self._gateway._read_subdevice(self.mac, self._device_type)\n # alternative: response = self._write({\"operation\": 5})\n \n # check msgType\n msgType = response.get(\"msgType\")\n if msgType != \"ReadDeviceAck\":\n _LOGGER.error(\n \"Response to Update is not a ReadDeviceAck but '%s'.\",\n msgType,\n )\n return\n \n self._parse_response(response)\n\n def Stop(self):\n \"\"\"Stop the motion of the blind.\"\"\"\n data = {\"operation\": 2}\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Open(self):\n \"\"\"Open the blind/move the blind up.\"\"\"\n data = {\"operation\": 1}\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Close(self):\n \"\"\"Close the blind/move the blind down.\"\"\"\n data = {\"operation\": 0}\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Set_position(self, position):\n \"\"\"\n Set the position of the blind.\n \n position is in %, so 0-100\n 0 = open\n 100 = closed\n \"\"\"\n data = {\"targetPosition\": position}\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Set_angle(self, angle):\n \"\"\"\n Set the angle/rotation of the blind.\n \n angle is in degrees, so 0-180\n \"\"\"\n target_angle = round(angle*self._max_angle/180.0, 0)\n \n data = {\"targetAngle\": target_angle}\n\n response = self._write(data)\n \n self._parse_response(response)\n\n @property\n def device_type(self):\n \"\"\"Return the device type of the blind.\"\"\"\n if self._device_type is None:\n _LOGGER.error(\"blind device_type not yet retrieved, use Update to obtain it before using the device_type.\")\n return None\n\n return self._device_type\n\n @property\n def blind_type(self):\n \"\"\"Return the type of the blind from BlindType enum.\"\"\"\n if self._blind_type is not None:\n return self._blind_type.name\n\n return self._blind_type\n\n @property\n def type(self):\n \"\"\"Return the type of the blind as a BlindType enum.\"\"\"\n return self._blind_type\n\n @property\n def mac(self):\n \"\"\"Return the mac address of the blind.\"\"\"\n return self._mac\n\n @property\n def status(self):\n \"\"\"Return the current status of the blind from BlindStatus enum.\"\"\"\n if self._status is not None:\n return self._status.name\n\n return self._status\n\n @property\n def limit_status(self):\n \"\"\"Return the current status of the limit detection of the blind from LimitStatus enum.\"\"\"\n if self._limit_status is not None:\n return self._limit_status.name\n\n return self._limit_status\n\n @property\n def position(self):\n \"\"\"Return the current position of the blind in % (0-100).\"\"\"\n return self._position\n\n @property\n def angle(self):\n \"\"\"Return the current angle of the blind 0-180.\"\"\"\n return self._angle\n\n @property\n def battery_voltage(self):\n \"\"\"Return the current battery voltage of the blind in V.\"\"\"\n return self._battery_voltage\n\n @property\n def battery_level(self):\n \"\"\"Return the current battery level of the blind in %.\"\"\"\n return self._battery_level\n\n @property\n def RSSI(self):\n \"\"\"Return the radio connection strength of the blind to the gateway in dBm.\"\"\"\n return self._RSSI\n\nclass MotionTopDownBottomUp(MotionBlind):\n \"\"\"Sub class representing a Top Down Bottom Up blind connected to the Motion Gateway.\"\"\"\n def _parse_response(self, response):\n \"\"\"Parse a response form the blind.\"\"\"\n \n # handle the part that is common among all blinds\n self._parse_response_common(response)\n \n # handle specific properties\n self._status = {\"T\": BlindStatus(response[\"data\"][\"operation_T\"]), \"B\": BlindStatus(response[\"data\"][\"operation_B\"])}\n self._limit_status = {\"T\": LimitStatus(response[\"data\"][\"currentState_T\"]), \"B\": LimitStatus(response[\"data\"][\"currentState_B\"])}\n self._position = {\"T\": response[\"data\"][\"currentPosition_T\"], \"B\": response[\"data\"][\"currentPosition_B\"]}\n self._angle = None\n self._battery_voltage = {\"T\": response[\"data\"][\"batteryLevel_T\"]/100.0, \"B\": response[\"data\"][\"batteryLevel_B\"]/100.0}\n\n self._battery_level = {\"T\": self._calculate_battery_level(self._battery_voltage[\"T\"]), \"B\": self._calculate_battery_level(self._battery_voltage[\"B\"])}\n\n def Stop(self, motor: str = \"B\"):\n \"\"\"Stop the motion of the blind.\"\"\"\n if motor == \"B\":\n data = {\"operation_B\": 2}\n elif motor == \"T\":\n data = {\"operation_T\": 2}\n else:\n _LOGGER.error('Please specify which motor to control \"T\" (top) or \"B\" (botom)')\n return\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Open(self, motor: str = \"B\"):\n \"\"\"Open the blind/move the blind up.\"\"\"\n if motor == \"B\":\n data = {\"operation_B\": 1}\n elif motor == \"T\":\n data = {\"operation_T\": 1}\n else:\n _LOGGER.error('Please specify which motor to control \"T\" (top) or \"B\" (botom)')\n return\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Close(self, motor: str = \"B\"):\n \"\"\"Close the blind/move the blind down.\"\"\"\n if motor == \"B\":\n data = {\"operation_B\": 0}\n elif motor == \"T\":\n data = {\"operation_T\": 0}\n else:\n _LOGGER.error('Please specify which motor to control \"T\" (top) or \"B\" (botom)')\n return\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Set_position(self, position, motor: str = \"B\"):\n \"\"\"\n Set the position of the blind.\n \n position is in %, so 0-100\n 0 = open\n 100 = closed\n \"\"\"\n if motor == \"B\":\n data = {\"targetPosition_B\": position}\n elif motor == \"T\":\n data = {\"targetPosition_T\": position}\n else:\n _LOGGER.error('Please specify which motor to control \"T\" (top) or \"B\" (botom)')\n return\n\n response = self._write(data)\n \n self._parse_response(response)\n\n def Set_angle(self, angle, motor: str = \"B\"):\n \"\"\"\n Set the angle/rotation of the blind.\n \n angle is in degrees, so 0-180\n \"\"\"\n target_angle = round(angle*self._max_angle/180.0, 0)\n\n if motor == \"B\":\n data = {\"targetAngle_B\": target_angle}\n elif motor == \"T\":\n data = {\"targetAngle_T\": target_angle}\n else:\n _LOGGER.error('Please specify which motor to control \"T\" (top) or \"B\" (botom)')\n return\n\n response = self._write(data)\n \n self._parse_response(response)\n\n @property\n def status(self):\n \"\"\"Return the current status of the blind from BlindStatus enum.\"\"\"\n if self._status is not None:\n return {\"T\": self._status[\"T\"].name, \"B\": self._status[\"B\"].name}\n\n return self._status\n\n @property\n def limit_status(self):\n \"\"\"Return the current status of the limit detection of the blind from LimitStatus enum.\"\"\"\n if self._limit_status is not None:\n return {\"T\": self._limit_status[\"T\"].name, \"B\": self._limit_status[\"B\"].name}\n\n return self._limit_status\n\n","sub_path":"motionblinds/motion_blinds.py","file_name":"motion_blinds.py","file_ext":"py","file_size_in_byte":21609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"473572954","text":"import os\nimport sqlite3\nfrom spending_app import config\n\n\nclass BaseDao:\n def __init__(self, base_path=''):\n self.base_dir = os.path.dirname(os.path.realpath(__file__)) + base_path\n\n @staticmethod\n def __create_connection():\n conn = sqlite3.connect(config.DATA_BASE_CONNECTION_STRING)\n conn.row_factory = sqlite3.Row\n return conn\n\n @staticmethod\n def __map_result(cls, items):\n return [cls.from_dict(item) for item in items]\n\n def query_one_field(self, cls, sql, params):\n conn = self.__create_connection()\n item = conn.execute(sql, params).fetchone()\n conn.close()\n\n if item is None:\n return None\n\n return cls(item[0])\n\n def query_one(self, cls, sql, params):\n conn = self.__create_connection()\n item = conn.execute(sql, params).fetchone()\n conn.close()\n\n if item is None:\n return None\n\n return cls.from_dict(item)\n\n def query_all(self, cls, sql, params):\n conn = self.__create_connection()\n items = conn.execute(sql, params).fetchall()\n conn.close()\n return self.__map_result(cls, items)\n\n def execute(self, sql, params):\n conn = self.__create_connection()\n cur = conn.execute(sql, params)\n inserted_id = cur.lastrowid\n conn.commit()\n conn.close()\n return inserted_id\n\n def executescript(self, query):\n conn = self.__create_connection()\n conn.executescript(query)\n conn.commit()\n conn.close()\n\n def get_sql(self, path):\n with open(self.base_dir + path, 'r') as f:\n query = f.read()\n return query\n","sub_path":"spending_app/dao/base_dao.py","file_name":"base_dao.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"514960391","text":"import os\nimport sys\nimport json\nimport spotipy\nimport configparser\n\n\nclass SpotList:\n\n '''\n Use Spotify api to get album and track info about artist\n '''\n\n def __init__(self):\n self.sp = spotipy.Spotify()\n self.sp.trace = False\n\n def get_discography(self, name):\n '''\n :return: dict of the artist, their albums, and track lists\n '''\n artist_info = self._get_artist(name)\n\n # Check if we need to get this atrist again\n try:\n artist_name = artist_info['name']\n except TypeError:\n artist_name = name\n save_file = os.path.join(json_path, artist_name.replace(' ', '_') + \".json\")\n if os.path.isfile(save_file):\n return False\n\n if artist_info is None:\n return {name: False}\n\n tprint(\"Getting: \" + artist_info['name'])\n\n info = artist_info\n albums = self._get_artist_albums(artist_info)\n return {artist_info['name']: {'searched': name,\n 'info': info,\n 'albums': albums\n }\n }\n\n def _get_artist(self, name):\n '''\n We need this to get the artist id\n '''\n results = self.sp.search(q='artist:' + name, type='artist')\n items = results['artists']['items']\n if len(items) > 0:\n return items[0]\n else:\n return None\n\n def _get_album_tracks(self, album):\n '''\n Get the tracks for an album, save each track json data to an array\n '''\n tracks = [] # Each tracks json data\n results = self.sp.album_tracks(album['id'])\n tracks.extend(results['items'])\n while results['next']:\n results = self.sp.next(results)\n tracks.extend(results['items'])\n return tracks\n\n def _get_artist_albums(self, artist_info):\n '''\n Get album list of artist then get the tracks for each album\n '''\n albums = []\n albums_list = {}\n\n results = self.sp.artist_albums(artist_info['id'], album_type='album,single')\n albums.extend(results['items'])\n while results['next']:\n results = self.sp.next(results)\n albums.extend(results['items'])\n for album in albums:\n # Set name to all lower so we do not get duplicate albums if\n # one uses `of` and another `Of`\n name = album['name'].lower()\n # Make sure we did not already save the album\n if name not in albums_list.keys():\n tprint(\"Getting album: \" + album['name'])\n info = self.sp.album(album['id'])\n tracks = self._get_album_tracks(album)\n albums_list[name] = {'info': info,\n 'tracks': tracks\n }\n return albums_list\n\n\ndef dump(data, music_dir):\n # There will only ever be one parent key in the data object\n artist_file = list(data.keys())[0].replace(' ', '_') + \".json\"\n save_file = os.path.join(json_path, artist_file)\n with open(save_file, 'w') as fp:\n json.dump(data, fp)\n # Append to master list\n add_to_master(artist_file)\n\n\ndef add_to_master(json_file):\n with open(master_list_file, 'a') as fp:\n fp.write(json_file + \"\\n\")\n\n\ndef tprint(msg):\n '''\n Terminal print\n '''\n try:\n print(msg)\n except UnicodeEncodeError:\n print(\"Cannot print Unicode\")\n\n\nif __name__ == '__main__':\n # Need to pass in the config file\n if len(sys.argv) < 2:\n print('Usage: {0} config_file'.format(sys.argv[0]))\n sys.exit(0)\n else:\n config_file = sys.argv[1]\n\n config = configparser.ConfigParser()\n # Read config file\n if not os.path.isfile(config_file):\n print(\"Config file not found: \" + config_file)\n sys.exit(0)\n config.read(config_file)\n\n music_dir = os.path.abspath(os.path.expanduser(config['main']['music_dir']))\n artist_list_file = os.path.abspath(os.path.expanduser(config['main']['artist_list']))\n\n # Check if artist list exists\n if os.path.isfile(artist_list_file):\n # Read list of artists into an array\n with open(artist_list_file) as f:\n artist_list = f.readlines()\n if len(artist_list) <= 0:\n print(\"No artists listed in: \" + artist_list_file)\n sys.exit(0)\n else:\n print(\"Artist file not found: \" + artist_list_file)\n sys.exit(0)\n\n # Check if music directory exists, if not create it\n if not os.path.isdir(music_dir):\n os.makedirs(music_dir)\n print(\"Music directory not found. Created it: \" + music_dir)\n json_path = os.path.join(music_dir, \"_json\")\n if not os.path.isdir(json_path):\n os.makedirs(json_path)\n print(\"json path created: \" + json_path)\n\n # Create blank master list file if not exist\n master_list_file = os.path.join(json_path, '_master.txt')\n if not os.path.isfile(master_list_file):\n open(master_list_file, 'a').close()\n\n name = ' '.join(sys.argv[1:])\n artist_spot = SpotList()\n for artist in artist_list:\n artist = artist.strip()\n data = artist_spot.get_discography(artist)\n if data is not False:\n dump(data, music_dir)\n else:\n tprint(\"Skipping: \" + artist)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"464984529","text":"import csv\nimport json\n# hardcoded dish\nimport requests\nDish = \"Spaghetti\"\n# function to extract ingredients\ndef ReturnIngredientList(Dish):\n with open(\"Recipes.csv\", \"r\", encoding = \"utf-8\") as f:\n reader = csv.reader(f)\n for ingredient in reader:\n if ingredient[0] == Dish:\n # print(ingredient)\n return ingredient\n# ingredientstr = (ReturnIngredientList(Dish))\n# print(ingredientstr)\n# ingredientArr = ingredientstr.split(\",\")\n# make get requests to Wegmans 1st search API\ndef WegmanSearch(ingredient):\n URL = \"https://api.wegmans.io/products/search?query=\"+ingredient+\"&api-version=2018-10-18&Subscription-Key=12a8aa35602741a2b73a15b9eb77f828\"\n r = requests.get(url = URL)\n # extracting data in json format\n data = r.json()\n return data[\"results\"][0][\"sku\"]\n# print(WegmanSearch(\"pasta\"))\ndef WegmanDetail(sku):\n URL = \"https://api.wegmans.io/products/\"+sku+\"?api-version=2018-10-18&Subscription-Key=12a8aa35602741a2b73a15b9eb77f828\"\n r = requests.get(url = URL)\n # extracting data in json format\n data = r.json()\n # print(data[\"nutrients\"])\n return data[\"nutrients\"]\ndef FinalCals(Dish):\n ingredientBigArr = (ReturnIngredientList(Dish))\n ingredientstr = ingredientBigArr[9]\n ingredientArr = ingredientstr.split(\",\")\n # print(ingredientArr)\n Cal = 0\n for ingredient in ingredientArr:\n arr = (WegmanDetail(WegmanSearch(ingredient)))\n for eachdic in arr:\n if eachdic['type'] == 'Calories':\n Cal += int(eachdic['quantity'])\n return Cal\n\n# FinalCals(Dish)\ndef FinalCarbs(Dish):\n ingredientBigArr = (ReturnIngredientList(Dish))\n ingredientstr = ingredientBigArr[9]\n ingredientArr = ingredientstr.split(\",\")\n # print(ingredientArr)\n Carb = 0\n for ingredient in ingredientArr:\n arr = (WegmanDetail(WegmanSearch(ingredient)))\n # print(arr)\n for eachdic in arr:\n if eachdic['type'] == 'Total Carbohydrate':\n incarb = eachdic['quantity']\n q = incarb.split()\n # print(q[0])\n Carb += int(q[0])\n return Carb\n\n\n# # FinalCarbs(Dish)\n# print(FinalCals(Dish))\n# print(FinalSugar(Dish))","sub_path":"check_beauty_calories.py","file_name":"check_beauty_calories.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"44114943","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('students', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='student',\n options={'verbose_name': '\\u0421\\u0442\\u0443\\u0434\\u0435\\u043d\\u0442', 'verbose_name_plural': '\\u0421\\u0442\\u0443\\u0434\\u0435\\u043d\\u0442\\u0438'},\n ),\n migrations.RenameField(\n model_name='student',\n old_name='last_name',\n new_name='sur_name',\n ),\n migrations.AlterField(\n model_name='student',\n name='middle_name',\n field=models.CharField(default=b'', max_length=256, verbose_name='\\u041f\\u043e-\\u0411\\u0430\\u0442\\u044c\\u043a\\u043e\\u0432\\u0456', blank=True),\n ),\n ]\n","sub_path":"students/migrations/0002_auto_20150523_1312.py","file_name":"0002_auto_20150523_1312.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"616595424","text":"from django.urls import path\nfrom .views import results_book_view, add_book_view, find_book_view, find_book_failed_view\n\napp_name = 'books'\n\nurlpatterns = [\n\tpath('add/', add_book_view, name='add'),\n\tpath('find/', find_book_view, name='find'),\n\tpath('results/', results_book_view, name='results'),\n\tpath('find_failed/', find_book_failed_view, name='find_failed'),\n\n]","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"310939763","text":"# Example of the kind of filtering that might be applied to the fixations, to remove fixations that were not on the chest x-ray\n# the defined rules are arbitrary, and the optimal filtration may depend on application, so use this code as just an example\n\nimport pandas as pd\nimport numpy as np\nfrom dataset_locations import reflacx_dataset_location\n\ndef filter_fixations(id):\n fixations_df = pd.read_csv(f'{reflacx_dataset_location}/main_data/{id}/fixations.csv')\n print(len(fixations_df))\n filtered_fixations = []\n for index, row in fixations_df.iterrows():\n \n shown_rects_image_space = [round(row['xmin_shown_from_image']) ,round(row['ymin_shown_from_image']),round(row['xmax_shown_from_image']),round(row['ymax_shown_from_image'])]\n shown_rects_screen_space = [round(row['xmin_in_screen_coordinates']) ,round(row['ymin_in_screen_coordinates']),round(row['xmax_in_screen_coordinates']),round(row['ymax_in_screen_coordinates'])]\n \n #Location of the group of button, to go to next screen and to reset the image state\n buttons_rectangle = np.array([10,933,650, 1122])\n\n image_pixels_per_screen_pixel_x = (shown_rects_image_space[2]-shown_rects_image_space[0])/(shown_rects_screen_space[2]-shown_rects_screen_space[0])\n image_pixels_per_screen_pixel_y = (shown_rects_image_space[3]-shown_rects_image_space[1])/(shown_rects_screen_space[3]-shown_rects_screen_space[1])\n \n x_screen, y_screen = convert_image_to_screen_coordinates(row['x_position'], row['y_position'], shown_rects_screen_space, shown_rects_image_space)\n \n #distance between fixation location and the part of the screen showing the image, in screen pixels\n distance_to_image = distance_point_rectangle([x_screen, y_screen],shown_rects_screen_space)\n \n #distance to button region, in pixels\n distance_to_buttons = distance_point_rectangle([x_screen, y_screen],buttons_rectangle)\n \n #distance between fixation location and the part of the screen showing the image, in degrees\n distance_to_image_angle = distance_point_rectangle([x_screen, y_screen],shown_rects_screen_space, image_pixels_per_screen_pixel_x/row['angular_resolution_x_pixels_per_degree'], image_pixels_per_screen_pixel_y/row['angular_resolution_y_pixels_per_degree'])\n \n # keep fixations that have a position inside the image or ( that are at least 50 pixels away from buttons and at most 0.5 degrees from the image) \n if distance_to_image==0 or (distance_to_buttons>50 and distance_to_image_angle<0.5):\n filtered_fixations.append(row)\n print(len(filtered_fixations))\n return pd.DataFrame(filtered_fixations)\n\ndef convert_screen_to_image_coordinates(x, y, dest_rect, source_rect):\n return interpolate_2d(dest_rect, source_rect, x,y);\n\ndef convert_image_to_screen_coordinates(x, y, dest_rect, source_rect):\n return interpolate_2d(source_rect,dest_rect,x,y);\n\ndef interpolate_2d(source_rect, dest_rect, x,y):\n x_scale = (dest_rect[2]-dest_rect[0])/(source_rect[2]-source_rect[0]);\n y_scale = (dest_rect[3]-dest_rect[1])/(source_rect[3]-source_rect[1]);\n return_x = x_scale*(x-source_rect[0])+dest_rect[0];\n return_y = y_scale*(y-source_rect[1])+dest_rect[1];\n return return_x,return_y\n\ndef check_inside_rect(p, rect):\n return p[0]>=rect[0] and p[0]<=rect[2] and p[1]>=rect[1] and p[1]<=rect[3]\n\ndef distance_point_rectangle(p, rect,multiplier_x = 1, multiplier_y = 1):\n if check_inside_rect(p, rect):\n return 0\n return nearest_distance(rect, p,multiplier_x, multiplier_y)\n\ndef nearest_distance(rectangle, point,multiplier_x = 1, multiplier_y = 1):\n if point[0]>=rectangle[0] and point[0]<=rectangle[2]:\n d_top = abs(rectangle[1] - point[1])*multiplier_y\n d_bottom = abs(rectangle[3] - point[1])*multiplier_y\n else:\n d_top =float('inf')\n d_bottom=float('inf')\n corner_y = rectangle[1] if d_top < d_bottom else rectangle[3]\n if point[1]>=rectangle[1] and point[1]<=rectangle[3]:\n d_left = abs(rectangle[0] - point[0])*multiplier_x\n d_right = abs(rectangle[2] - point[0])*multiplier_x\n else:\n d_left = float('inf')\n d_right = float('inf')\n corner_x = rectangle[0] if d_left < d_right else rectangle[2]\n d_cx = corner_x - point[0]\n d_cy = corner_y - point[1]\n d_corner = ((d_cx*multiplier_x)**2 + (d_cy*multiplier_x)**2)**0.5\n return min(d_top, d_bottom, d_left, d_right, d_corner)\n\nif __name__ == '__main__': \n filter_fixations('P300R169761')","sub_path":"examples_and_paper_numbers/filter_fixations.py","file_name":"filter_fixations.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528518153","text":"import sys\nfrom itertools import combinations as combi\ninput = sys.stdin.readline\nN = int(input())\n#팀 0~N-1번\ncandidates = [i for i in range(N)]\n#i와j가 만나면 얻는점수 데이타\ndata = [list(map(int,input().split())) for _ in range(N)]\nanswer = int(1e9)\n#팀을 만들수 있는 모든 경우 li vs temp\nfor li in list(combi(candidates,len(candidates)//2)):\n temp = []\n for candi in candidates:\n if candi not in li:\n temp.append(candi)\n # print(li,temp)\n start = 0\n link = 0\n #start 팀과 link팀의 점수를 구하고\n for i in range(len(li)):\n for j in range(len(li)):\n if i != j:\n start += data[li[i]][li[j]]\n link += data[temp[i]][temp[j]]\n #초기화해준다 최소값으로\n answer = min(abs(start-link), answer)\nprint(answer)\n","sub_path":"스타트와링크.py","file_name":"스타트와링크.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"360708317","text":"import os\n\nimport globalvar\n\n\n# ----------------------------------------------\n# Variables in Linux Machine\n# ----------------------------------------------\n\ngt_setupenv_script = 'setupenv.sh'\n\"\"\"\n.. seealso:: :const:`globalbar.gvt_windows.gt_setupenv_script`\n\"\"\"\n\ngt_mapping_script = 'MAPDRIVES.sh'\n\"\"\"\n.. seealso:: :const:`globalbar.gvt_windows.gt_mapping_script`\n\"\"\"\n\ngt_cmd_prefix = './'\n\"\"\"\n.. seealso:: :const:`globalbar.gvt_windows.gt_cmd_prefix`\n\"\"\"\n\ngt_sep = '/'\n\"\"\"\nFile separator in target OS\n\"\"\"\n\n\n# ----------------------------------------------\n# Variables in Linux VMware Machine\n# ----------------------------------------------\n\ngt_vm_rename_script = \"rename.sh\"\n\"\"\"\n.. seealso:: :const:`globalbar.gvt_windows.gt_mapping_script`.\n\"\"\"\n\ngt_vm_rename_script_local = os.path.join(globalvar.g_incoming_local, gt_vm_rename_script)\n\"\"\"\n.. seealso:: :const:`globalbar.gvt_windows.gt_vm_rename_script_local`.\n\"\"\"\n\ngt_vm_rename_script_client = os.path.join(\"/home/lvtest/\", gt_vm_rename_script)\n\"\"\"\n.. seealso:: :const:`globalbar.gvt_windows.gt_vm_rename_script_client`.\n\"\"\"\n\ngt_vm_root = \"/home/lvtest/INCOMING\"\n\"\"\"\nThe working directory on Linux vmware client machine.\n\"\"\"\n\ngt_vm_script_root = \"/mnt/cn-sha-rdfs01/RD/SAST/Installer_Services/Services/GhostAgentVDI/APPDATA/%s/%s/%s/OSID_%s\"\n\"\"\"\nThe dirctory to copy script.ini\n\"\"\"\n","sub_path":"AutoImageScripts/GhostAgentVDI/globalvar/gvt_linux.py","file_name":"gvt_linux.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"352159262","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom datetime import datetime\nfrom urllib import parse\nfrom scrapy.http import Request\nfrom scrapyspiders.items import PythontabItem\nfrom scrapyspiders.utils.common import get_md5\n\n\nclass PythontabSpider(scrapy.Spider):\n name = 'pythontab'\n allowed_domains = ['www.pythontab.com']\n start_urls = ['http://www.pythontab.com']\n categories = {\"pythonjichu\": 1, \"pythonhexinbiancheng\": 2, \"pythonweb\": 3, \"hanshu\": 4, \"pythongui\": 5}\n fetched = ['pythonjichu']\n\n def parse(self, response):\n if response.status == 200:\n for category in self.categories:\n category_url = self.start_urls[0] + '/html/' + category\n yield Request(url=category_url, callback=self.do_parse)\n else:\n print('site error')\n\n def do_parse(self, response):\n # 解析列表页中的所有文章url并交给scrapy下载后并进行解析\n post_urls = response.xpath('//ul[@id=\"catlist\"]//li//@href').extract()\n for post_url in post_urls:\n yield Request(url=parse.urljoin(response.url, post_url), callback=self.parse_detail)\n\n links = response.xpath('//div[@id=\"pages\"]//a[@class=\"a1\"]/@href').extract()\n # 下一页\n if len(links) == 2:\n yield Request(url=parse.urljoin(response.url, links[1]), callback=self.do_parse)\n\n def parse_detail(self, response):\n item = PythontabItem()\n item['url'] = response.url\n item['url_object_id'] = get_md5(response.url)\n item['title'] = response.xpath(\"//h1/text()\").extract_first()\n item['content'] = response.xpath('//div[@class=\"content\"]').extract_first()\n item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n # 文章分类\n item['category'] = 0\n for category in self.categories:\n if category in item['url']:\n item['category'] = self.categories[category]\n break\n\n yield item\n","sub_path":"scrapyspiders/scrapyspiders/spiders/pythontab.py","file_name":"pythontab.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"570721521","text":"#!-*-coding:utf-8-*-\nimport os\nimport sys\nimport dlib\nimport urllib.request\nfrom colorama import Fore, Back, Style\n\n__all__ = [\"FaceDetect\"]\n\nROOT_DIR = os.path.dirname( __file__ )\n\nclass FaceDetect:\n\n def __init__( self , model_path=False ):\n \"\"\"\n コンストラクタ\n \"\"\"\n\n remote_file = \"http://web.sfc.keio.ac.jp/~t13073si/models/mmod_human_face_detector.dat\"\n\n if model_path:\n detector_path = model_path\n else:\n detector_path = ROOT_DIR + \"/../models/mmod_human_face_detector.dat\"\n\n if not os.path.exists( detector_path ):\n self._message( \"Downloading Detector Model...\" )\n urllib.request.urlretrieve( remote_file ,\"{0}\".format(detector_path))\n self._message( \"Done\" )\n\n self.face_detector = dlib.cnn_face_detection_model_v1( detector_path )\n\n\n def detect( self , img ):\n \"\"\"\n ディテクタ\n \"\"\"\n\n return self.face_detector( img , 0 )\n\n\n def _message( self , text ):\n print( Fore.YELLOW + str(text) + Style.RESET_ALL )\n","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"516263794","text":"#NETCDF CREATION\n#PCPRR\n\n\nimport numpy as np\nfrom netCDF4 import Dataset\nimport sys\nimport glob\n\n#Locate\nf = Dataset('/temp/alex/rams_256_periodic/RCE_S300-A-2012-01-01-000000-g1.h5', 'r')\n\n#Point to location of glob material\ntest = glob.glob('/temp/alex/rams_256_periodic/*h5')\n \n\n#Variable\npcprr = f.variables['PCPRR'][:] #Surface Precip\nprint ('PCPRR: ' + str(np.shape(pcprr)))\n\n \n#Dimensions\nx = range(256)\ny = range(256)\n\n\n#Number of files (timesteps) \nt = range(len(test)) #range\ntime = len(test) #Number\n\n# average over horizontal domain\n#Pavg = np.apply_over_axes(np.mean, pcprr, [1,2]) # average horizontally at each level (at each time step)\n#Pavg = np.reshape(Pavg, (t,64)) # reshape into (time, levels)\n\n#Create Empty Array to Glob into\npcprr_glob_array = np.empty((time,256,256))\n \n#Globbing Loop\nfor i, file in enumerate (test):\n glob_file = Dataset(file, 'r')\n pcprr_glob_array[i,:,:] = (glob_file.variables['PCPRR'][:,:]) * 86400 #2D - Converted feom kg/m^2/s to mm/day\n \n# WRITE NETCDF FILE\nnc_pcprr = Dataset('Surface_Precip_PCPRR_Avg.nc', 'w', format='NETCDF4')\nnc_pcprr.description = 'This is a file of the horizontally averaged PCPRR at each vertical level and timestep'\n\n# define dimensions\nnc_pcprr.createDimension('614', None) # If \"None\", then time can be appended\nnc_pcprr.createDimension('x', 256)\nnc_pcprr.createDimension('y', 256)\nnc_pcprr.createDimension('time', time)\n\n\n# define variables for netcdf file\n#Name, Format, Dimensions\n#sys.exit()\n\ntime_nc = nc_pcprr.createVariable('time', 'f8', ('time',))\nPavg_nc = nc_pcprr.createVariable('PCPRR', 'f8', ('time','x','y')) # multi-dimensional\n\n# set variable attributes\ntime_nc.setncatts({'units':'hr','long_name':'time',})\nPavg_nc.setncatts({'units':'Kg/m^2/s','long_name':'Surface Precip',})\n\n# data\ntime_nc[:] = t\nPavg_nc[:,:,:] = pcprr_glob_array[:,:,:] \n\n\nnc_pcprr.close()","sub_path":"NetCDFcreate_PCPRR.py","file_name":"NetCDFcreate_PCPRR.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"162389476","text":"import os\nimport traceback\nfrom optparse import make_option\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand, CommandError\nfrom risks.models import AdministrativeDivision, AdministrativeDivisionMappings\n\nimport xlrd\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n '-c',\n '--commit',\n action='store_true',\n dest='commit',\n default=True,\n help='Commits Changes to the storage.')\n return parser \n\n def handle(self, **options):\n commit = options.get('commit') \n basedir = \"/home/geonode/import_data/countries\"\n allowed_extensions = [\".xlsx\"]\n \n for fname in os.listdir(basedir):\n if fname.endswith(tuple(allowed_extensions)): \n wb = xlrd.open_workbook(filename=os.path.join(basedir, fname))\n sheet = wb.sheet_by_index(0)\n \n print('start importing file {}'.format(fname))\n for row_num in range(1, sheet.nrows):\n parent_code = str(sheet.cell(row_num, 0).value).strip()\n child_code = str(sheet.cell(row_num, 1).value).strip()\n code = str(sheet.cell(row_num, 2).value).strip()\n name = sheet.cell(row_num, 3).value\n\n try:\n parent_adm_div = AdministrativeDivision.objects.get(code=parent_code)\n except AdministrativeDivision.DoesNotExist:\n raise ValueError('No adm unit found with code: {}'.format(parent_code)) \n\n try:\n child_adm_div = AdministrativeDivision.objects.get(code=child_code)\n except AdministrativeDivision.DoesNotExist:\n raise ValueError('No adm unit found with code: {}'.format(child_code)) \n\n adm_mapping, created = AdministrativeDivisionMappings.objects.get_or_create(parent=parent_adm_div, child=child_adm_div)\n updated = AdministrativeDivisionMappings.objects.filter(pk=adm_mapping.id).update(code=code, name=name)\n\n print('imported {} - {}'.format(code, unicode(name).encode('utf8')))\n","sub_path":"risks/management/commands/import_nuts2_mappings.py","file_name":"import_nuts2_mappings.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"222461511","text":"from pymongo import MongoClient\nimport pandas as pd\n\n\nclient = MongoClient()\ndb = client['rfpl_vk_users']\n\ndf_list = []\n\nfor collection_name in db.collection_names(include_system_collections=False):\n df_list.append(pd.DataFrame(list(db[collection_name].find())))\ndf = pd.concat(df_list, ignore_index=True)\nids = df['user_id'].get_values()\n\nprint(len(set(ids)))\n","sub_path":"utils/CountPostsAuthors.py","file_name":"CountPostsAuthors.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"218047072","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_today_films():\n url = 'http://www.lasexta.com/programacion/'\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'lxml')\n\n sexta3 = soup.find_all(class_='parrilla')[2]\n hoy = sexta3.find_all(class_='programacion')[0]\n fichas = hoy.find_all(class_='ficha')\n\n return [[ficha.contents[1].text, ficha.contents[3].get('title')] for ficha in fichas]","sub_path":"src/demo1/scrapper_sexta3.py","file_name":"scrapper_sexta3.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64164131","text":"from kizuna.commands.Command import Command\nfrom kizuna.utils import build_url\nimport config\nfrom kizuna.models.User import User\n\n\nclass LoginCommand(Command):\n def __init__(self, make_session) -> None:\n help_text = \"kizuna login - login to the web interface\"\n\n super().__init__('login', pattern='login', help_text=help_text, is_at=True, db_session_maker=make_session)\n\n def respond(self, slack_client, message, matches):\n send = self.send_ephemeral_factory(slack_client, message['channel'], message['user'])\n with self.db_session_scope() as session:\n user = User.get_by_slack_id(session, message['user'])\n\n if not user:\n return send(\"I don't have your users in the db. Prolly run 'kizuna refresh users' and if that still \"\n \"doesn't fix it: Austin fucked up somewhere :^(\")\n\n send(build_url(config.KIZUNA_WEB_URL, '/login', {'auth': user.get_token()}))\n","sub_path":"kizuna/commands/LoginCommand.py","file_name":"LoginCommand.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"80933838","text":"\"\"\"Handles all comment actions for code cloud.\"\"\"\n\n\nclass Comments():\n\t\"\"\"Handles all comment actions for code cloud.\"\"\"\n\n\tdef add_comment_to_pull_request(self, repo_name, pull_request_id, comment, cred_hash):\n\t\t\"\"\"Add a comment to a pull request.\"\"\"\n\t\turl = f'{self.code_cloud_api.branch_api}/{repo_name}/pull-requests/{pull_request_id}/comments'\n\t\tresponse = self.code_cloud_api.post(url=url, json_data={'text':comment}, cred_hash=cred_hash)\n\t\tresponse['data']['repo_name'] = repo_name\n\t\treturn response\n\n\tdef get_activities(self, repo_name, pull_request_id, cred_hash):\n\t\t\"\"\"Get the activity details for a pull request.\"\"\"\n\t\turl = f'{self.code_cloud_api.branch_api}/{repo_name}/pull-requests/{pull_request_id}/activities'\n\t\tresponse = self.code_cloud_api.get(url=url, cred_hash=cred_hash)\n\t\tresponse['data']['repo_name'] = repo_name\n\t\treturn response","sub_path":"devcenter/codecloud/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626901965","text":"from tools.load import LoadMatrix\nlm=LoadMatrix()\ntraindat = lm.load_numbers('../data/fm_train_real.dat')\ntestdat = lm.load_numbers('../data/fm_test_real.dat')\n\nparameter_list = [[traindat,testdat,4,False,True],[traindat,testdat,5,False,True]]\n\ndef kernel_poly_modular (fm_train_real=traindat,fm_test_real=testdat,degree=4,inhomogene=False,\n\tuse_normalization=True):\n\tfrom shogun.Features import RealFeatures\n\tfrom shogun.Kernel import PolyKernel\n\n\tfeats_train=RealFeatures(fm_train_real)\n\tfeats_test=RealFeatures(fm_test_real)\n\n\tkernel=PolyKernel(\n\t\tfeats_train, feats_train, degree, inhomogene, use_normalization)\n\n\tkm_train=kernel.get_kernel_matrix()\n\tkernel.init(feats_train, feats_test)\n\tkm_test=kernel.get_kernel_matrix()\n\treturn km_train,km_test,kernel\nif __name__=='__main__':\n\tprint('Poly')\n\tkernel_poly_modular (*parameter_list[0])\n","sub_path":"build/shogun_lib/examples/undocumented/python_modular/kernel_poly_modular.py","file_name":"kernel_poly_modular.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"358936608","text":"import requests\n\nproxy = 'ad240066:Sem900917@172.16.1.117:8080'\n\nproxies = {\n 'http':'http://'+proxy,\n 'https':'https://'+proxy\n}\n\ntry:\n res = requests.get('http://httpbin.org/get',proxies=proxies)\n print(res.text)\nexcept requests.exceptions.ConnectionError as e:\n print('Error:',e.args)\n\n\n\n\n","sub_path":"Agent/sz_05.py","file_name":"sz_05.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"488817756","text":"\n\n#calss header\nclass _CULTURED():\n\tdef __init__(self,): \n\t\tself.name = \"CULTURED\"\n\t\tself.definitions = [u'A cultured person has had a good education and knows a lot about art, music, literature, etc.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_cultured.py","file_name":"_cultured.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"310994777","text":"f=open(\"input.txt\",'r')\nline=f.readline().strip()\ndef are_opp(a, b):\n return (a.lower() == b.lower() and\n ((a.isupper() and b.islower()) or\n (a.islower() and b.isupper())))\ndef react(line):\n buf = []\n for c in line:\n if buf and are_opp(c, buf[-1]):\n buf.pop()\n else:\n buf.append(c)\n return len(buf)\nagents = set([c.lower() for c in line])\nprint(react(line))\ndel min\nprint(min([react(line.replace(a, '').replace(a.upper(), ''))\n for a in agents]))","sub_path":"Day05.py","file_name":"Day05.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"505072531","text":"# -*- coding: utf-8 -*- \nfrom django.shortcuts import render, render_to_response\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom datetime import datetime\nfrom django.conf import settings\nfrom .forms import DateFilterForm\nfrom .aux import generate_xlsx,fill_db\n\n@login_required\ndef create_xlsx(request):\n message = ''\n header = 'Создание путевых листов'\n\n if request.method == 'POST':\n form = DateFilterForm(request.POST)\n if form.is_valid():\n from_date = datetime.combine(form.cleaned_data['datestart'], datetime.min.time())\n to_date = datetime.combine(form.cleaned_data['datefinish'], datetime.min.time())\n milage = form.cleaned_data['milage']\n interval = 15\n records = fill_db()\n url = generate_xlsx(from_date,to_date,interval,milage) \n message = \"Листы созданы, добавлено {0} записей в базу, Скачать архив\".format(records,url)\n else:\n message = \"Ошибки в форме\"\n else:\n form = DateFilterForm()\n\n return render_to_response('generic/generic_edit.html', {\n 'header' : header,\n 'message' : message,\n 'form': form,\n 'extend': 'index.html',},\n context_instance = RequestContext(request)\n ) ","sub_path":"tsmon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"440462213","text":"import socket\nfrom typing import Dict, Optional, Sequence\n\nfrom .base import PipelineBase, StatsClientBase\n\n\nclass StreamPipeline(PipelineBase):\n def _send(self, data: str = \"\") -> None:\n self._client._after(\"\\n\".join(self._stats), None, None)\n self._stats.clear()\n\n\nclass StreamClientBase(StatsClientBase):\n _sock: Optional[socket.socket]\n\n def connect(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n if self._sock and hasattr(self._sock, \"close\"):\n self._sock.close()\n self._sock = None\n\n def reconnect(self) -> None:\n self.close()\n self.connect()\n\n def pipeline(self) -> StreamPipeline:\n return StreamPipeline(self)\n\n def _send(self, data: str) -> None:\n \"\"\"Send data to statsd.\"\"\"\n if not self._sock:\n self.connect()\n self._do_send(data)\n\n def _do_send(self, data: str) -> None:\n self._sock.sendall(data.encode(\"ascii\") + b\"\\n\") # type: ignore\n\n\nclass TCPStatsClient(StreamClientBase):\n \"\"\"TCP version of StatsClient.\"\"\"\n\n def __init__(\n self,\n host: str = \"localhost\",\n port: int = 8125,\n prefix: Optional[str] = None,\n timeout: int = None,\n ipv6: bool = False,\n simple_tags: Optional[Sequence[str]] = None,\n kv_tags: Optional[Dict[str, str]] = None,\n ):\n \"\"\"Create a new client.\"\"\"\n self._host = host\n self._port = port\n self._ipv6 = ipv6\n self._timeout = timeout\n self._prefix = prefix\n self._simple_tags = simple_tags or []\n self._kv_tags = kv_tags or {}\n self._sock = None\n\n def connect(self) -> None:\n fam = socket.AF_INET6 if self._ipv6 else socket.AF_INET\n family, _, _, _, addr = socket.getaddrinfo(self._host, self._port, fam, socket.SOCK_STREAM)[0]\n self._sock = socket.socket(family, socket.SOCK_STREAM)\n self._sock.settimeout(self._timeout)\n self._sock.connect(addr)\n\n\nclass UnixSocketStatsClient(StreamClientBase):\n \"\"\"Unix domain socket version of StatsClient.\"\"\"\n\n def __init__(\n self, socket_path: str, prefix: Optional[str] = None, suffix: Optional[str] = None, timeout: int = None,\n ):\n \"\"\"Create a new client.\"\"\"\n self._socket_path = socket_path\n self._timeout = timeout\n self._prefix = prefix\n self._suffix = suffix\n self._sock = None\n\n def connect(self) -> None:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.settimeout(self._timeout)\n self._sock.connect(self._socket_path)\n","sub_path":"src/statsd/client/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"224041765","text":"import cv2\nimport pyautogui\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\nsensitivity = 15\nlower_white = np.array([0,0,255-sensitivity])\nupper_white = np.array([255,sensitivity,255])\nprev_y = 0\n\nwhile True:\n ret, frame = cap.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, lower_white, upper_white)\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n\n for c in contours:\n area = cv2.contourArea(c)\n if area>300:\n #cv2.drawContours(frame, c, -1, (0,255,0),2)\n x, y, w, h = cv2.boundingRect(c)\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n if y < prev_y:\n pyautogui.press('down')\n prev_y = y\n\n #cv2.imshow('mask', mask)\n cv2.imshow('frame', frame)\n if cv2.waitKey(10) == ord('q'):\n break\n\ncap.release()\ncv2.destroyWindow()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"13643128","text":"# Задача-1:\n# Напишите скрипт, создающий директории dir_1 - dir_9 в папке,\n# из которой запущен данный скрипт.\n# И второй скрипт, удаляющий эти папки.\nimport os\nfrom shutil import copyfile\ndef addDir(name='dir',count=1):#Добавление лирикторий\n try:\n count=int(count)#Попытка приобразовать строку в число\n except ValueError:#Если неудачно то возвращаем ложь\n return False\n dir_names = []#список с именами дирикторий\n if count>1:#если не 1 папка\n for i in range(1,count+1):\n try:\n os.mkdir(f\"{name}_{i}\")\n except FileExistsError:# Если папка существует пропускаем итерацию\n continue\n dir_names.append(f\"{name}_{i}\")\n\n else:#Если 1 директория\n try:\n os.mkdir(f\"{name}\")\n except FileExistsError:#Если директория существует вернуть ошибку\n return 'Данная директория существует'\n\n if dir_names.__len__()==0:#если список с именами дирикторий пуст то\n return f'Папка {name} создана'\n else:# если он не пуст то объединяем имена и возвращаем\n dir_names = ' , '.join(dir_names)\n return f'Папки {dir_names} созданы'\n\n# Задача-2:\n# Напишите скрипт, отображающий папки текущей директории.\ndef get_thisDir():\n result = []\n for path in os.listdir(os.getcwd()):#Перебераем все содержимое текущей папки\n if os.path.isdir(path):#Если дириктория\n result.append(f'Папка:{path}')\n elif os.path.isfile(path):# если файл\n result.append(f'Файл:{path}')\n result ='\\n'.join(result)# объединяем всё в одну строку\n return result\n# # Задача-3:\n# # Напишите скрипт, создающий копию файла, из которого запущен данный скрипт.\n# copy_file = \"copy.py\"\n# if not os.path.exists(copy_file):\n# copyfile(os.path.basename(__file__),copy_file)\n# else:\n# for path in os.listdir(os.getcwd()):\n# if os.path.isfile(path=path):\n# if os.path.split(path)[1]==copy_file:\n #os.remove(path)","sub_path":"lesson05/home_work/hw05_easy.py","file_name":"hw05_easy.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"519225950","text":"from rest_framework.generics import ListCreateAPIView, RetrieveAPIView\n\nfrom .models import UrlShortener\nfrom .serializers import UrlShortenerSerializer, UrlShortenerDetailSerializer\n\n\nclass UrlShortenerApiView(ListCreateAPIView):\n \"\"\"\n\n get:\n :returns - list of target url and its shortenr url\n post:\n :param - target url\n :returns - converted shortner url form target url\n\n \"\"\"\n\n # serializer_class = ShortnerSerializer\n def get_serializer_class(self):\n if self.request.method == \"POST\":\n return UrlShortenerSerializer\n else:\n return UrlShortenerDetailSerializer\n queryset = UrlShortener.objects.all()\n\n\nclass ShortToDetailUrlApiView(RetrieveAPIView):\n\n \"\"\"\n\n get:\n :arg - short_url\n :return - detail of recived short url code to detail target url\n\n \"\"\"\n\n lookup_field = 'short_url'\n serializer_class = UrlShortenerSerializer\n queryset = UrlShortener.objects.all()\n","sub_path":"application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"604317648","text":"from tornado.web import HTTPError\n\nimport brightmd.metrics as metrics\nfrom brightmd.controllers import UserController\nfrom brightmd.handlers import BaseHandler\nfrom brightmd.models import User\nfrom brightmd.views import JSONView, PaginatedJSONView\n\n\nclass UsersHandler(BaseHandler):\n def initialize(self):\n self.controller = UserController()\n\n @metrics.track_latency()\n async def get(self):\n \"\"\"\n .. http:get:: /users\n\n Route for fetching metadata associated with all Users. Supports\n cursor-based pagination.\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /users HTTP/1.1\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n\n {\n \"next_cursor\": null,\n \"results\": [\n {\n \"first_name\": \"Casey\",\n \"last_name\": \"Parks\",\n \"zip_code\": \"97232\",\n \"email_address\": \"casey.parks@mail.com\",\n \"id\": 1\n }\n ]\n }\n\n :param cursor: String. The starting point of paginated result set.\n This should be 'next_cursor' value of previous request.\n :param limit: Integer. The maximum number of results in paginated result\n set. Must by between 1 and 1000. Default is 100.\n :param order: String. Order in which results are sorted. This value is\n ignored if a cursor value is provided. Supports `asc` and\n `desc`, with default value `desc`.\n\n :statuscode 200:\n Success.\n :statuscode 400: Invalid cursor value. Should only use 'next_cursor'\n value of previous request.\n :statuscode 400: Invalid limit value. Must be an integer between 1 and\n 1000.\n \"\"\"\n limit, cursor = self.get_pagination_params()\n instances, next_cursor = await self.controller.read_subset(\n limit,\n cursor\n )\n view = PaginatedJSONView(instances, next_cursor)\n self.write(view.render())\n\n @metrics.track_latency()\n async def post(self):\n \"\"\"\n .. http:post:: /users\n\n Route for generating a new User, and assigning metadata associated with\n it.\n\n **Example request**:\n\n .. sourcecode:: http\n\n POST /users HTTP/1.1\n first_name=Casey&\n last_name=Parks&\n zip_code=97232&\n email_address=casey.parks@mail.com\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n\n {\n \"first_name\": \"Casey\",\n \"last_name\": \"Parks\",\n \"zip_code\": \"97232\",\n \"email_address\": \"casey.parks@mail.com\",\n \"id\": 1\n }\n\n :param first_name: String. First name of user. (limit 1000 characters)\n :param last_name: String. Last name of user. (limit 1000 characters)\n :param zip_code: String. Five or nine digit zipcode, formatted as\n '00000' or '00000-0000'.\n :param email_address: String. Properly formatted email address of user.\n\n :statuscode 200: Success.\n :statuscode 400: Missing parameter.\n :statuscode 400: Character length exceeded for name.\n :statuscode 400: Improperly formatted zip code.\n :statuscode 400: Improperly formatted email address.\n \"\"\"\n data = self.generate_data(\n invalid_keys=('id',),\n required_keys=(\n 'first_name',\n 'last_name',\n 'zip_code',\n 'email_address'\n )\n )\n data['id'] = 0 # user ID assigned by database.\n user = User(**data)\n await self.controller.create(user)\n view = JSONView(user)\n self.write(view.render())\n","sub_path":"src/brightmd/handlers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"544465164","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom .forms import SongForm\n\n# Create your views here.\ndef IndexView(request):\n if request.method == 'POST':\n form = SongForm(data=request.POST)\n if form.is_valid():\n Song = form.save(commit=False)\n Song.save()\n return HttpResponseRedirect(reverse('playlist:index'))\n else:\n form = SongForm()\n\n context = {'form': form}\n return render(request, 'playlist/index.html', context)\n","sub_path":"playlist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"176067495","text":"from gpiozero import LEDBoard, MotionSensor\nfrom gpiozero.pins.pigpio import PiGPIOFactory\nfrom signal import pause\n\nips = ['192.168.1.3', '192.168.1.4', '192.168.1.5', '192.168.1.6']\nremotes = [PiGPIOFactory(host=ip) for ip in ips]\n\nleds = LEDBoard(2, 3, 4, 5) # leds on this pi\nsensors = [MotionSensor(17, pin_factory=r) for r in remotes] # remote sensors\n\nfor led, sensor in zip(leds, sensors):\n led.source = sensor.values\n\npause()\n","sub_path":"docs/examples/multi_room_motion_alert.py","file_name":"multi_room_motion_alert.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"75679129","text":"\"\"\"Product of Riemannian metrics.\"\"\"\n\nimport geomstats.backend as gs\nfrom geomstats.geometry.riemannian_metric import RiemannianMetric\n\nEPSILON = 1e-5\n\n\n# TODO(nina): unit tests\n\nclass ProductRiemannianMetric(RiemannianMetric):\n \"\"\"Class for product of Riemannian metrics.\"\"\"\n\n def __init__(self, metrics):\n self.n_metrics = len(metrics)\n dimensions = [metric.dimension for metric in metrics]\n signatures = [metric.signature for metric in metrics]\n\n self.metrics = metrics\n self.dimensions = dimensions\n self.signatures = signatures\n\n sig_0 = sum([sig[0] for sig in signatures])\n sig_1 = sum([sig[1] for sig in signatures])\n sig_2 = sum([sig[2] for sig in signatures])\n super(ProductRiemannianMetric, self).__init__(\n dimension=sum(dimensions),\n signature=(sig_0, sig_1, sig_2))\n\n def inner_product_matrix(self, base_point=None):\n \"\"\"Compute matrix of the corresponding inner product.\n\n Matrix of the inner product defined by the Riemmanian metric\n at point base_point of the manifold.\n\n Parameters\n ----------\n base_point\n\n Returns\n -------\n matrix\n \"\"\"\n matrix = gs.zeros([self.dimension, self.dimension])\n b = self.dimensions[0]\n matrix[:b, :b] = self.metrics.inner_product_matrix(base_point[0])\n dim_current = 0\n\n for i in range(self.n_metrics - 1):\n dim_current += self.dimensions[i]\n dim_next = self.dimensions[i + 1]\n a = dim_current\n b = dim_current + dim_next\n matrix_next = self.metrics.inner_product_matrix(base_point[i + 1])\n matrix[a:b, a:b] = matrix_next\n\n return matrix\n\n def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):\n \"\"\"Compute inner product between two tan space vectors at a base point.\n\n Inner product defined by the Riemannian metric at point `base_point`\n between tangent vectors `tangent_vec_a` and `tangent_vec_b`.\n\n Parameters\n ----------\n tangent_vec_a\n tangent_vec_b\n base_point\n\n Returns\n -------\n inner_product\n \"\"\"\n if base_point is None:\n base_point = [None, ] * self.n_metrics\n\n inner_products = [self.metrics[i].inner_product(tangent_vec_a[i],\n tangent_vec_b[i],\n base_point[i])\n for i in range(self.n_metrics)]\n inner_product = gs.sum(inner_products)\n\n return inner_product\n\n def exp(self, tangent_vec, base_point=None):\n \"\"\"Compute Riemannian exponential of tangent vector at base point.\n\n Riemannian exponential at point base_point\n of tangent vector tangent_vec wrt the Riemannian metric.\n\n Parameters\n ----------\n tangent_vec\n base_point\n\n Returns\n -------\n exp\n \"\"\"\n if base_point is None:\n base_point = [None, ] * self.n_metrics\n\n exp = gs.asarray([self.metrics[i].exp(tangent_vec[i],\n base_point[i])\n for i in range(self.n_metrics)])\n return exp\n\n def log(self, point, base_point=None):\n \"\"\"Compute Riemannian logarithm of a point wrt a base point.\n\n Parameters\n ----------\n point\n base_point\n\n Returns\n -------\n log\n \"\"\"\n if base_point is None:\n base_point = [None, ] * self.n_metrics\n\n log = gs.asarray([self.metrics[i].log(point[i],\n base_point[i])\n for i in range(self.n_metrics)])\n return log\n\n def squared_dist(self, point_a, point_b):\n \"\"\"Compute squared geodesic distance between two points.\n\n Parameters\n ----------\n point_a: array-like, shape=[n_samples, dimension]\n or shape=[1, dimension]\n point_b: array-like, shape=[n_samples, dimension]\n or shape=[1, dimension]\n\n Returns\n -------\n sum_sq_distances\n \"\"\"\n sq_distances = gs.asarray(\n [self.metrics[i].squared_dist(\n point_a[i], point_b[i])\n for i in range(self.n_metrics)])\n\n return sum(sq_distances)\n","sub_path":"geomstats/geometry/product_riemannian_metric.py","file_name":"product_riemannian_metric.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"129731060","text":"\"\"\"\nauthor: Yernat M. Assylbekov\nemail: yernat.assylbekov@gmail.com\ndate: 01/01/2020\n\"\"\"\n\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense, BatchNormalization, Reshape, Conv2DTranspose, Conv2D, LeakyReLU, Flatten, Dropout\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.optimizers import Adam\n\n\ndef loss_generator(logit_fake):\n \"\"\"\n loss function for generator.\n \"\"\"\n return tf.math.negative(tf.math.reduce_mean(tf.math.log(logit_fake)))\n\n\ndef loss_discriminator(logit_real, logit_fake):\n \"\"\"\n loss function for discriminator.\n \"\"\"\n loss_real = tf.math.negative(tf.math.reduce_mean(tf.math.log(logit_real)))\n loss_fake = tf.math.negative(tf.math.reduce_mean(tf.math.log(1. - logit_fake)))\n return loss_real + loss_fake\n\n\ndef Generator(output_channels, noise_size):\n \"\"\"\n model for generator.\n \"\"\"\n\n # setup input\n X = Input(shape=noise_size)\n\n # project to 4x4\n Y = Dense(units=4*4*256)(X)\n Y = LeakyReLU()(Y)\n Y = BatchNormalization()(Y)\n Y = Reshape(target_shape=(4, 4, 256))(Y)\n\n # map to 8x8\n Y = Conv2DTranspose(filters=128, kernel_size=5, strides=2, padding='same', activation='relu')(Y)\n Y = BatchNormalization()(Y)\n\n # map to 16x16\n Y = Conv2DTranspose(filters=64, kernel_size=5, strides=2, padding='same', activation='relu')(Y)\n Y = BatchNormalization()(Y)\n\n # map to 32x32\n Y = Conv2DTranspose(filters=32, kernel_size=5, strides=2, padding='same', activation='relu')(Y)\n Y = BatchNormalization()(Y)\n\n # map to 64x64\n Y = Conv2DTranspose(filters=output_channels, kernel_size=5, strides=2, padding='same', activation='sigmoid')(Y)\n\n model = Model(inputs=X, outputs=Y)\n\n return model\n\ndef Discriminator(input_channels):\n \"\"\"\n model for discriminator.\n \"\"\"\n\n # setup input\n X = Input(shape=(64, 64, input_channels))\n\n # map to 32x32\n Y = Conv2D(filters=32, kernel_size=5, strides=2, padding='same')(X) # , kernel_regularizer=l2(l=10.), bias_regularizer=l2(l=10.)\n Y = LeakyReLU()(Y)\n Y = BatchNormalization()(Y)\n Y = Dropout(0.4)(Y)\n\n # map to 16x16\n Y = Conv2D(filters=64, kernel_size=5, strides=2, padding='same')(Y)\n Y = LeakyReLU()(Y)\n Y = BatchNormalization()(Y)\n Y = Dropout(0.4)(Y)\n\n # map to 8x8\n Y = Conv2D(filters=128, kernel_size=5, strides=2, padding='same')(Y)\n Y = LeakyReLU()(Y)\n Y = BatchNormalization()(Y)\n Y = Dropout(0.4)(Y)\n\n # map to 4x4\n Y = Conv2D(filters=256, kernel_size=5, strides=2, padding='same')(Y)\n Y = LeakyReLU()(Y)\n Y = BatchNormalization()(Y)\n Y = Dropout(0.4)(Y)\n\n Y = Flatten()(Y)\n Y = Dense(units=1, activation='sigmoid')(Y)\n\n model = Model(inputs=X, outputs=Y)\n\n return model\n\ndef create_generator(channels, noise_size, learning_rate, beta_1):\n \"\"\"\n creates generator, its optimizer (Adam) and checkpoint.\n \"\"\"\n generator = Generator(output_channels=channels, noise_size=noise_size)\n generator_optimizer = Adam(lr=learning_rate, beta_1=beta_1)\n generator_checkpoint = tf.train.Checkpoint(optimizer=generator_optimizer, model=generator)\n\n return generator, generator_optimizer, generator_checkpoint\n\ndef create_discriminator(channels, learning_rate, beta_1):\n \"\"\"\n creates discriminator, its optimizer (Adam) and checkpoint.\n \"\"\"\n discriminator = Discriminator(input_channels=channels)\n discriminator_optimizer = Adam(lr=learning_rate, beta_1=beta_1)\n discriminator_checkpoint = tf.train.Checkpoint(optimizer=discriminator_optimizer, model=discriminator)\n\n return discriminator, discriminator_optimizer, discriminator_checkpoint\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"40323598","text":"#!/usr/bin/env python\n\n# Copyright (c) 2014-2018 Michael Hirsch, Ph.D.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\ninstall_requires = ['six','python-dateutil','pytz']\ntests_require = ['pytest','coveralls','pyproj']\n# %%\nfrom setuptools import setup,find_packages\n\nsetup(name='pymap3d',\n packages=find_packages(),\n version = '1.6.3',\n description='pure Python coordinate conversions, following convention of several popular Matlab routines.',\n long_description=open('README.rst').read(),\n author = 'Michael Hirsch, Ph.D.',\n url = 'https://github.com/scivision/pymap3d',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2.6',\n 'Topic :: Scientific/Engineering :: GIS',\n ],\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require={'tests':tests_require,\n 'full':['numpy','astropy']},\n python_requires='>=2.6', \n\t )\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"354033331","text":"#! /usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# calculates autocorrelation function of a time series\n# uses a top hot function as the input time series\n# and plots the autocorrelation function which will be\n# a triangle function\n#\n# https://en.wikipedia.org/wiki/Autocorrelation#Estimation\n\ndef correlate(s1, s2=None, ax=None):\n \"\"\"\n parameters:\n - s1 : signal 1\n - s2 : signal 2, if None than calculate autocorrelation of s1\n - ax : where to plot\n \n return:\n - corr : correlation array\n \"\"\"\n # subtract the mean of the time series\n y1 = s1 - s1.mean()\n\n if s2 is not None:\n y2 = s2 - s2.mean()\n\n # correlate\n if s2 is None:\n corr = np.correlate(y1, y1, mode=\"full\")\n else:\n corr = np.correlate(y1, y2, mode=\"full\")\n\n # take only the second half of the time series because\n # correlate returns the autocorrelation from a negative time\n # and not zero\n corr = corr[corr.size / 2:]\n\n # normalize by the variance\n corr /= (s1.var() * np.arange(s1.size, 0, -1))\n\n if ax is not None:\n ax.plot(corr)\n\n return corr\n\n# create signal\nn = 300\nx = np.arange(n)\ns2 = np.random.normal(0, 1, size=n)\ns1 = np.sin(x * np.pi / 10) + np.sin(x * np.pi / 23) + s2\n\n\ncorr1 = correlate(s1)\ncorr2 = correlate(s2)\n\n# get sample times for plotting\nt = np.arange(n/2)\n\n# plot\nfig, ax = plt.subplots()\nax.plot(t, corr1[n/2:])\nax.plot(t, corr2[n/2:])\nax.set_ylabel(\"Autocorrelation Function\")\nax.set_xlabel(\"Time (s)\")\nplt.show()","sub_path":"sviewer/autocorr.py","file_name":"autocorr.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"182732865","text":"\n# Multiply divisor by n quotient and return the result\ndef multiply(divisor, quotient):\n divisorList = list(map(int, divisor))\n result = carry = 0\n resultStr = \"\"\n dividend = []\n properResultStr = \"\"\n for num in divisorList[::-1] + [-1]:\n if num == -1 and carry >= 1:\n dividend.append(carry)\n break\n result = (quotient * num) + carry\n resultStr = str(result)\n if result >= 10:\n carry = result // 10\n result = int(resultStr[-1])\n elif result < 10:\n carry = 0\n result = int(resultStr[0])\n dividend.append(result)\n #properResultStr = \"\".join(str(x) for x in currentDividend)\n #print(properResultStr[::-1])\n return dividend[::-1]\n\n# receives lists of divisor and dividend for easier processing\n# top is initial dividend (at minus new dividend result)\n# bottom is new dividend\ndef substraction(top, bottom):\n top = top[0:len(bottom)]\n top = top[::-1]\n bottom = bottom[::-1]\n result = carry = 0\n subResult = []\n #print(\"top\", top)\n #print(\"bottom\", bottom)\n\n for i in range(len(top)):\n if top[i] >= bottom[i]:\n result = top[i] - (bottom[i] + carry)\n subResult.append(result)\n carry = 0\n elif top[i] < bottom[i]:\n result = (top[i] + 10) - (bottom[i] + carry)\n subResult.append(result)\n carry = 1\n\n # need to make sure last one is not a zero, if is remove\n # might need to do a for loop here in case of more zero's\n if subResult[-1] == 0:\n subResult.pop(-1)\n # and reverse to return correct result\n #print(subResult[::-1])\n return subResult[::-1]\n\ndef divide(dividend,divisor):\n divisorList = list(map(int, divisor))\n dividendList = list(map(int, dividend))\n currentDividend = []\n quotient = []\n subtractionResult = []\n quotientStr = \"\"\n remainderStr = \"\"\n\n # Find start and initial quotient\n if divisorList[0] > dividendList[0]:\n start = len(divisor) + 1\n quotient.append(int(dividend[0:2]) // divisorList[0])\n # else start at divisor length\n else:\n start = len(divisor)\n quotient.append(dividendList[0] // divisorList[0])\n #print(start, quotient)\n\n currentDividend = multiply(divisor, quotient[0])\n subtractionResult = substraction(dividendList, currentDividend)\n\n # Loop through the rest\n for i in range(len(dividend) - (start)):\n # check if MLN(Divisor) is > MLN(Dividend)\n # if it is start at divisor length + 1\n if divisorList[0] > subtractionResult[0]:\n currentNum = ''.join(str(subtractionResult[i]) for i in range(0, 2))\n quotient.append( int(currentNum) // divisorList[0])\n # else start at divisor length\n else:\n quotient.append(subtractionResult[0] // divisorList[0])\n #print(quotient)\n\n currentDividend = multiply(divisor, quotient[i + 1])\n subtractionResult.append(dividendList[start + i])\n subtractionResult = substraction(subtractionResult, currentDividend)\n\n quotientStr = ''.join(str(q) for q in quotient)\n remainderStr = ''.join(str(r) for r in subtractionResult)\n #remainderStr = [s.lstrip(\"0\") for s in remainderStr]\n\n print(\"Quotient = \", quotientStr)\n print(\"Remainder = \", remainderStr)\n\n return quotientStr, remainderStr\n\n#divide(\"1786076652\", \"4031214\")\n#divide(\"708786169050364264498483629949124333050732544381974635927181624935149734520700893406021846629015886556979336613266067831305308408103158793977395210\", \"7940654497315444076393326654563424600027591749183996881831501175073708180868620549933284\")\ndivide(\"981866229000\", \"125300\")\n#print(multiply(\"4031214\", 4))\n#substraction(17860766)","sub_path":"out/production/Code-Wars-Python/LongDivision.py","file_name":"LongDivision.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"259051392","text":"# 请设计一个函数,用来判断在一个矩阵中是否存在一条包含某字符串所有字符的路径。\n# 路径可以从矩阵中的任意一个格子开始,每一步可以在矩阵中向左,向右,向上,向下移动一个格子。\n# 如果一条路径经过了矩阵中的某一个格子,则之后不能再次进入这个格子。\n# 例如 a b c e s f c s a d e e 这样的3 X 4 矩阵中包含一条字符串\"bcced\"的路径,但是矩阵中不包含\"abcb\"路径,\n# 因为字符串的第一个字符b占据了矩阵中的第一行第二个格子之后,路径不能再次进入该格子。\n\n\n\"\"\"\n解析:\n 1.回溯法:\n 2.\n\n\"\"\"\n\n# -*- coding:utf-8 -*-\ndef BFS(matrix, row, col, path, visited):\n if row < 0 or row >= len(matrix) or col < 0 or col >= len(matrix[0]) or visited[row][col]: # 判断各种可能出现的越界情况\n return False\n if path[0] == matrix[row][col]: # 找到与路径第一个相同的 字符\n if len(path) == 1: # 万一只需要找一个\n return True\n visited[row][col] = 1 #标志位设为 1\n if BFS(matrix, row + 1, col, path[1:], visited) or \\\n BFS(matrix, row - 1, col, path[1:], visited) or \\\n BFS(matrix, row, col - 1, path[1:], visited) or \\\n BFS(matrix, row, col + 1, path[1:], visited):\n return True\n return False\n else:\n return False\n\n\nclass Solution:\n def hasPath(self, matrix, rows, cols, path):\n # write code here\n array = list(matrix)\n array = [array[i * cols:(i + 1) * cols] for i in range(rows)] # 把array变成矩阵\n for i in range(rows):\n for j in range(cols):\n visited = [[0] * len(array[0]) for _ in range(len(array))] # visited:创建的一个'访问'标志位\n if BFS(array, i, j, list(path), visited):\n return True\n return False\n\nprint(Solution().hasPath(\"ABCESFCSADEE\",3,4,\"BCCED\"))","sub_path":"剑指offer/065.矩阵中的路径.py","file_name":"065.矩阵中的路径.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"418045673","text":"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.utils.spectral_norm as SpectralNorm\n\ndef get_norm(norm_type, size):\n\tif(norm_type == 'batchnorm'):\n\t\treturn nn.BatchNorm2d(size)\n\telif(norm_type == 'instancenorm'):\n\t\treturn nn.InstanceNorm2d(size)\n\nclass ConvBlock(nn.Module):\n\tdef __init__(self, ni, no, ks, stride, pad = None, use_bn = True, norm_type = 'batchnorm'):\n\t\tsuper(ConvBlock, self).__init__()\n\t\tself.use_bn = use_bn\n\t\tif(pad == None):\n\t\t\tpad = ks // 2 // stride\n\t\tself.conv = nn.Conv2d(ni, no, ks, stride, pad, bias = False)\n\t\tif(self.use_bn == True):\n\t\t\tif(norm_type == 'batchnorm'):\n\t\t\t\tself.bn = nn.BatchNorm2d(no)\n\t\t\telif(norm_type == 'instancenorm'):\n\t\t\t\tself.bn = nn.InstanceNorm2d(no)\n\t\tself.relu = nn.LeakyReLU(0.2, inplace = True)\n\n\tdef forward(self, x):\n\t\tout = self.conv(x)\n\t\tif(self.use_bn == True):\n\t\t\tout = self.bn(out)\n\t\tout = self.relu(out)\n\t\treturn out\n\nclass DeConvBlock(nn.Module):\n\tdef __init__(self, ni, no, ks, stride, pad = None, output_pad = 0, use_bn = True, norm_type = 'batchnorm'):\n\t\tsuper(DeConvBlock, self).__init__()\n\t\tself.use_bn = use_bn\n\t\tif(pad == None):\n\t\t\tpad = ks // 2 // stride\n\t\tself.deconv = nn.ConvTranspose2d(ni, no, ks, stride, pad, output_padding = output_pad, bias = False)\n\t\tif(self.use_bn == True):\n\t\t\tif(norm_type == 'batchnorm'):\n\t\t\t\tself.bn = nn.BatchNorm2d(no)\n\t\t\telif(norm_type == 'instancenorm'):\n\t\t\t\tself.bn = nn.InstanceNorm2d(no)\n\t\tself.relu = nn.ReLU(inplace = True)\n\n\tdef forward(self, x):\n\t\tout = self.deconv(x)\n\t\tif(self.use_bn == True):\n\t\t\tout = self.bn(out)\n\t\tout = self.relu(out)\n\t\treturn out\n\nclass SelfAttention(nn.Module):\n\tdef __init__(self, ni):\n\t\tsuper(SelfAttention, self).__init__()\n\t\tself.ni = ni\n\t\tself.f = nn.Conv2d(self.ni, self.ni//8, 1, 1, 0)\n\t\tself.g = nn.Conv2d(self.ni, self.ni//8, 1, 1, 0)\n\t\tself.h = nn.Conv2d(self.ni, self.ni, 1, 1, 0)\n\t\tself.softmax = nn.Softmax(dim = -1)\n\t\tself.alpha = nn.Parameter(torch.tensor(0.0))\n\n\tdef forward(self, x):\n\t\t# x : (bs, ni, sz, sz)\n\t\tf_out = self.f(x)\n\t\t# (bs, ni // 8, sz, sz)\n\t\tf_out = f_out.view(f_out.size(0), self.ni//8, -1)\n\t\t# (bs, ni // 8, sz * sz)\n\t\tf_out = f_out.permute(0, 2, 1)\n\t\t# (bs, sz * sz, ni // 8)\n\n\t\t# x : (bs, ni, sz, sz)\n\t\tg_out = self.g(x)\n\t\t# (bs, ni // 8, sz, sz)\n\t\tg_out = g_out.view(g_out.size(0), self.ni//8, -1)\n\t\t# (bs, ni // 8, sz * sz)\n\n\t\t# x : (bs, ni, sz, sz)\n\t\th_out = self.h(x)\n\t\t# (bs, ni, sz, sz)\n\t\th_out = h_out.view(h_out.size(0), self.ni, -1)\n\t\t# (bs, ni, sz * sz)\n\n\t\tf_g_mult = torch.bmm(f_out, g_out)\n\t\t# (bs, sz * sz, sz * sz)\n\t\tf_g_mult = self.softmax(f_g_mult)\n\t\t# (bs, sz * sz, sz * sz)\n\t\tf_g_h_mult = torch.bmm(h_out, f_g_mult)\n\t\t# (bs, ni, sz * sz)\n\t\tf_g_h_mult = f_g_h_mult.view(*x.shape)\n\t\t# (bs, ni, sz, sz)\n\n\t\tout = self.alpha * f_g_h_mult + x\n\t\t# (bs, ni, sz, sz)\n\n\t\treturn out\n\n# U-Net 256x256 Generator\nclass UNet_G_256x256(nn.Module):\n\tdef __init__(self, ic, oc, use_f = True, norm_type = 'batchnorm'):\n\t\tsuper(UNet_G_256x256, self).__init__()\n\t\tself.ic = ic\n\t\tself.oc = oc\n\t\tself.use_f = use_f\n\t\t\n\t\tself.leaky_relu = nn.LeakyReLU(0.2, inplace = True)\n\t\tself.relu = nn.ReLU(inplace = True)\n\n\t\tself.enc_conv1 = nn.Conv2d(ic, 64, 4, 2, 1, bias = False)\n\t\tself.enc_bn1 = None\n\t\tself.enc_conv2 = nn.Conv2d(64, 128, 4, 2, 1, bias = False)\n\t\tself.enc_bn2 = get_norm(norm_type, 128)\n\t\tself.enc_conv3 = nn.Conv2d(128, 256, 4, 2, 1, bias = False)\n\t\tself.enc_bn3 = get_norm(norm_type, 256)\n\t\tself.enc_conv4 = nn.Conv2d(256, 512, 4, 2, 1, bias = False)\n\t\tself.enc_bn4 = get_norm(norm_type, 512)\n\t\tself.enc_conv5 = nn.Conv2d(512, 512, 4, 2, 1, bias = False)\n\t\tself.enc_bn5 = get_norm(norm_type, 512)\n\t\tself.enc_conv6 = nn.Conv2d(512, 512, 4, 2, 1, bias = False)\n\t\tself.enc_bn6 = get_norm(norm_type, 512)\n\t\tself.enc_conv7 = nn.Conv2d(512, 512, 4, 2, 1, bias = False)\n\t\tself.enc_bn7 = get_norm(norm_type, 512)\n\t\tself.enc_conv8 = nn.Conv2d(512, 512, 4, 2, 1, bias = False)\n\n\t\tself.dec_conv1 = nn.ConvTranspose2d(512, 512, 4, 2, 1, bias = False)\n\t\tself.dec_bn1 = get_norm(norm_type, 512)\n\t\tself.dec_conv2 = nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias = False)\n\t\tself.dec_bn2 = get_norm(norm_type, 512)\n\t\tself.dec_conv3 = nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias = False)\n\t\tself.dec_bn3 = get_norm(norm_type, 512)\n\t\tself.dec_conv4 = nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias = False)\n\t\tself.dec_bn4 = get_norm(norm_type, 512)\n\t\tself.dec_conv5 = nn.ConvTranspose2d(1024, 256, 4, 2, 1, bias = False)\n\t\tself.dec_bn5 = get_norm(norm_type, 256)\n\t\tself.dec_conv6 = nn.ConvTranspose2d(512, 128, 4, 2, 1, bias = False)\n\t\tself.dec_bn6 = get_norm(norm_type, 128)\n\t\tself.dec_conv7 = nn.ConvTranspose2d(256, 64, 4, 2, 1, bias = False)\n\t\tself.dec_bn7 = get_norm(norm_type, 64)\n\t\tself.dec_conv8 = nn.ConvTranspose2d(128, oc, 4, 2, 1, bias = False)\n\n\t\tself.tanh = nn.Tanh()\n\t\tself.dropout = nn.Dropout()\n\n\t\tfor m in self.modules():\n\t\t\tif(isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d)):\n\t\t\t\tm.weight.data.normal_(0.0, 0.02)\n\t\t\t\tif(m.bias is not None):\n\t\t\t\t\tm.bias.data.zero_()\n\t\n\tdef forward(self, x):\n\t\t# (bs, nc, 256, 256)\n\t\ten1 = self.enc_conv1(x)\n\t\t# (bs, 64, 128, 128)\n\t\ten2 = self.enc_bn2(self.enc_conv2(self.leaky_relu(en1)))\n\t\t# (bs, 128, 64, 64)\n\t\ten3 = self.enc_bn3(self.enc_conv3(self.leaky_relu(en2)))\n\t\t# (bs, 256, 32, 32)\n\t\ten4 = self.enc_bn4(self.enc_conv4(self.leaky_relu(en3)))\n\t\t# (bs, 512, 16, 16)\n\t\ten5 = self.enc_bn5(self.enc_conv5(self.leaky_relu(en4)))\n\t\t# (bs, 512, 8, 8)\n\t\ten6 = self.enc_bn6(self.enc_conv6(self.leaky_relu(en5)))\n\t\t# (bs, 512, 4, 4)\n\t\ten7 = self.enc_bn7(self.enc_conv7(self.leaky_relu(en6)))\n\t\t# (bs, 512, 2, 2)\n\t\ten8 = self.enc_conv8(self.leaky_relu(en7))\n\t\t# (bs, 512, 1, 1)\n\t\tif(self.use_f):\n\t\t\tde8 = F.dropout(self.dec_bn1(self.dec_conv1(self.relu(en8))))\n\t\t\t# (bs, 512, 2, 2)\n\t\t\tde7 = F.dropout(self.dec_bn2(self.dec_conv2(self.relu(torch.cat([de8, en7], 1)))))\n\t\t\t# (bs, 512, 4, 4)\n\t\t\tde6 = F.dropout(self.dec_bn3(self.dec_conv3(self.relu(torch.cat([de7, en6], 1)))))\n\t\t\t# (bs, 512, 8, 8)\n\t\telse:\n\t\t\tde8 = self.dropout(self.dec_bn1(self.dec_conv1(self.relu(en8))))\n\t\t\t# (bs, 512, 2, 2)\n\t\t\tde7 = self.dropout(self.dec_bn2(self.dec_conv2(self.relu(torch.cat([de8, en7], 1)))))\n\t\t\t# (bs, 512, 4, 4)\n\t\t\tde6 = self.dropout(self.dec_bn3(self.dec_conv3(self.relu(torch.cat([de7, en6], 1)))))\n\t\t\t# (bs, 512, 8, 8)\n\t\tde5 = self.dec_bn4(self.dec_conv4(self.relu(torch.cat([de6, en5], 1))))\n\t\t# (bs, 512, 16, 16)\n\t\tde4 = self.dec_bn5(self.dec_conv5(self.relu(torch.cat([de5, en4], 1))))\n\t\t# (bs, 256, 32, 32)\n\t\tde3 = self.dec_bn6(self.dec_conv6(self.relu(torch.cat([de4, en3], 1))))\n\t\t# (bs, 128, 64, 64)\n\t\tde2 = self.dec_bn7(self.dec_conv7(self.relu(torch.cat([de3, en2], 1))))\n\t\t# (bs, 64, 128, 128)\n\t\tde1 = self.dec_conv8(self.relu(torch.cat([de2, en1], 1)))\n\t\t# (bs, 3, 256, 256)\n\t\tout = self.tanh(de1)\n\n\t\tdel en1, en2, en3, en4, en5, en6, en7, en8, de8, de7, de6, de5, de4, de3, de2, de1\n\n\t\treturn out\n\n\nclass PatchGan_D_70x70(nn.Module):\n\tdef __init__(self, ic_1, ic_2, use_sigmoid = True, norm_type = 'batchnorm'):\n\t\tsuper(PatchGan_D_70x70, self).__init__()\n\t\tself.ic_1 = ic_1\n\t\tself.ic_2 = ic_2\n\t\tself.use_sigmoid = use_sigmoid\n\t\tself.leaky_relu = nn.LeakyReLU(0.2, inplace = True)\n\t\tself.conv1 = nn.Conv2d(self.ic_1 + self.ic_2, 64, 4, 2, 1, bias = False)\n\t\tself.bn1 = None\n\t\tself.conv2 = nn.Conv2d(64, 128, 4, 2, 1, bias = False)\n\t\tself.bn2 = get_norm(norm_type, 128)\n\t\tself.conv3 = nn.Conv2d(128, 256, 4, 2, 1, bias = False)\n\t\tself.bn3 = get_norm(norm_type, 256)\n\t\tself.conv4 = nn.Conv2d(256, 512, 4, 1, 1, bias = False)\n\t\tself.bn4 = get_norm(norm_type, 512)\n\t\tself.conv5 = nn.Conv2d(512, 1, 4, 1, 1, bias = False)\n\t\tself.sigmoid = nn.Sigmoid()\n\n\t\tfor m in self.modules():\n\t\t\tif(isinstance(m, nn.Conv2d)):\n\t\t\t\tm.weight.data.normal_(0.0, 0.02)\n\t\t\t\tif(m.bias is not None):\n\t\t\t\t\tm.bias.data.zero_()\n\n\tdef forward(self, x1, x2):\n\t\tout = torch.cat([x1, x2], 1)\n\t\t# (bs, ic_1+ic_2, 256, 256)\n\t\tout = self.leaky_relu(self.conv1(out))\n\t\t# (bs, 64, 128, 128)\n\t\tout = self.leaky_relu(self.bn2(self.conv2(out)))\n\t\t# (bs, 128, 64, 64)\n\t\tout = self.leaky_relu(self.bn3(self.conv3(out)))\n\t\t# (bs, 256, 32, 32)\n\t\tout = self.leaky_relu(self.bn4(self.conv4(out)))\n\t\t# (bs, 512, 31, 31)\n\t\tout = self.conv5(out)\n\t\t# (bs, 512, 30, 30)\n\t\tif(self.use_sigmoid == True):\n\t\t\tout = self.sigmoid(out)\n\n\t\treturn out\n\n\nclass PatchGan_D_286x286(nn.Module):\n\tdef __init__(self, ic_1, ic_2, use_sigmoid = True, norm_type = 'batchnorm'):\n\t\tsuper(PatchGan_D_286x286, self).__init__()\n\t\tself.ic_1 = ic_1\n\t\tself.ic_2 = ic_2\n\t\tself.use_sigmoid = use_sigmoid\n\t\tself.leaky_relu = nn.LeakyReLU(0.2, inplace = True)\n\t\tself.conv1 = nn.Conv2d(self.ic_1 + self.ic_2, 64, 4, 2, 1, bias = False)\n\t\tself.bn1 = None\n\t\tself.conv2 = nn.Conv2d(64, 128, 4, 2, 1, bias = False)\n\t\tself.bn2 = get_norm(norm_type, 128)\n\t\tself.conv3 = nn.Conv2d(128, 256, 4, 2, 1, bias = False)\n\t\tself.bn3 = get_norm(norm_type, 256)\n\t\tself.conv4 = nn.Conv2d(256, 512, 4, 2, 1, bias = False)\n\t\tself.bn4 = get_norm(norm_type, 256)\n\t\tself.conv5 = nn.Conv2d(512, 512, 4, 2, 1, bias = False)\n\t\tself.bn5 = get_norm(norm_type, 256)\n\t\tself.conv6 = nn.Conv2d(512, 512, 4, 1, 1, bias = False)\n\t\tself.bn6 = get_norm(norm_type, 512)\n\t\tself.conv7 = nn.Conv2d(512, 1, 4, 1, 1, bias = False)\n\t\tself.sigmoid = nn.Sigmoid()\n\n\t\tfor m in self.modules():\n\t\t\tif(isinstance(m, nn.Conv2d)):\n\t\t\t\tm.weight.data.normal_(0.0, 0.02)\n\t\t\t\tif(m.bias is not None):\n\t\t\t\t\tm.bias.data.zero_()\n\n\tdef forward(self, x1, x2):\n\t\tout = torch.cat([x1, x2], 1)\n\t\t# (bs, ic_1+ic_2, 256, 256)\n\t\tout = self.leaky_relu(self.conv1(out))\n\t\t# (bs, 64, 128, 128)\n\t\tout = self.leaky_relu(self.bn2(self.conv2(out)))\n\t\t# (bs, 128, 64, 64)\n\t\tout = self.leaky_relu(self.bn3(self.conv3(out)))\n\t\t# (bs, 256, 32, 32)\n\t\tout = self.leaky_relu(self.bn4(self.conv4(out)))\n\t\t# (bs, 256, 16, 16)\n\t\tout = self.leaky_relu(self.bn5(self.conv5(out)))\n\t\t# (bs, 256, 8, 8)\n\t\tout = self.leaky_relu(self.bn6(self.conv6(out)))\n\t\t# (bs, 512, 7, 7)\n\t\tout = self.conv7(out)\n\t\t# (bs, 512, 6, 6)\n\t\tif(self.use_sigmoid == True):\n\t\t\tout = self.sigmoid(out)\n\n\t\treturn out\n\n\ndef receptive_calculator(input_size, ks, stride, pad):\n\treturn int((input_size - ks + 2 * pad) / stride + 1)\n\ndef inverse_receptive_calculator(output_size, ks, stride, pad):\n\treturn ((output_size - 1) * stride) + ks\n\n","sub_path":"architectures_experimental/img2img.py","file_name":"img2img.py","file_ext":"py","file_size_in_byte":10227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"82664444","text":"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Batch reader to sequence generation model, with bucketing support.\n\nContinuously reads source and target from tf.Example file, batches the\nsources and targets, and returns the batched inputs.\n\nExample:\n batcher = batch_reader.Batcher(...)\n while True:\n (...) = batcher.NextBatch()\n\"\"\"\n\nimport collections\nimport queue\nfrom random import random\nfrom random import shuffle\nfrom threading import Thread\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport data\n\nModelInput = collections.namedtuple(\n 'ModelInput',\n 'enc_input dec_input dec_target enc_len dec_len source targets')\n\nBUCKET_CACHE_BATCH = 100\nQUEUE_NUM_BATCH = 100\nDAEMON_READER_THREADS = 16\nBUCKETING_THREADS = 4\n\n\nclass Batcher(object):\n \"\"\"Batch reader with shuffling and bucketing support.\"\"\"\n\n def __init__(self, data_path, config):\n \"\"\"Batcher initializer.\n\n Args:\n data_path: tf.Example filepattern.\n config: model hyperparameters.\n \"\"\"\n self._data_path = data_path\n self._config = config\n self._input_vocab = config.input_vocab\n self._output_vocab = config.output_vocab\n self._source_key = config.source_key\n self._target_key = config.target_key\n self.use_bucketing = config.use_bucketing\n self._truncate_input = config.truncate_input\n self._input_queue = queue.Queue(QUEUE_NUM_BATCH * config.batch_size)\n self._bucket_input_queue = queue.Queue(QUEUE_NUM_BATCH)\n self._input_threads = []\n for _ in range(DAEMON_READER_THREADS):\n self._input_threads.append(Thread(target=self._FillInputQueue))\n self._input_threads[-1].daemon = True\n self._input_threads[-1].start()\n self._bucketing_threads = []\n for _ in range(BUCKETING_THREADS):\n self._bucketing_threads.append(Thread(target=self._FillBucketInputQueue))\n self._bucketing_threads[-1].daemon = True\n self._bucketing_threads[-1].start()\n\n self._watch_thread = Thread(target=self._WatchThreads)\n self._watch_thread.daemon = True\n self._watch_thread.start()\n\n def NextBatch(self):\n \"\"\"Returns a batch of inputs for model.\n\n Returns:\n Tuple (enc_batch, dec_batch, target_batch, enc_input_len, dec_input_len,\n loss_weights, origin_sources, origin_targets) where:\n enc_batch: A batch of encoder inputs [batch_size, config.enc_timestamps].\n dec_batch: A batch of decoder inputs [batch_size, config.dec_timestamps].\n target_batch: A batch of targets [batch_size, config.dec_timestamps].\n enc_input_len: Encoder input lengths of the batch.\n dec_input_len: Decoder input lengths of the batch.\n loss_weights: Weights for loss function, 1 if not padded, 0 if padded.\n source: string. Original source words.\n targets: List of strings. Original target words.\n \"\"\"\n enc_batch = np.zeros(\n (self._config.batch_size, self._config.max_input_len), dtype=np.int32)\n enc_input_lens = np.zeros((self._config.batch_size), dtype=np.int32)\n dec_batch = np.zeros(\n (self._config.batch_size, self._config.max_output_len), dtype=np.int32)\n dec_output_lens = np.zeros((self._config.batch_size), dtype=np.int32)\n target_batch = np.zeros(\n (self._config.batch_size, self._config.max_output_len), dtype=np.int32)\n loss_weights = np.zeros(\n (self._config.batch_size, self._config.max_output_len),\n dtype=np.float32)\n source = ['None'] * self._config.batch_size\n targets = [['None']] * self._config.batch_size\n\n buckets = self._bucket_input_queue.get()\n for i in range(self._config.batch_size):\n (enc_inputs, dec_inputs, dec_targets, enc_input_len, dec_output_len,\n source_i, targets_i) = buckets[i]\n\n enc_input_lens[i] = enc_input_len\n dec_output_lens[i] = dec_output_len\n enc_batch[i, :] = enc_inputs[:]\n dec_batch[i, :] = dec_inputs[:]\n target_batch[i, :] = dec_targets[:]\n source[i] = source_i\n targets[i] = targets_i\n for j in range(dec_output_len):\n loss_weights[i][j] = 1\n\n return (enc_batch, dec_batch, target_batch, enc_input_lens, dec_output_lens,\n loss_weights, source, targets)\n\n def _FillInputQueue(self):\n \"\"\"Fills input queue with ModelInput.\"\"\"\n\n # input gets padded\n pad_id = self._input_vocab.WordToId(data.PAD_TOKEN)\n # output get start id and padded with end ids\n end_id = self._output_vocab.WordToId(data.SENTENCE_END)\n\n input_gen = self._TextGenerator(data.ExampleGen(self._data_path))\n while True:\n (source, targets) = next(input_gen)\n # target = choice(targets)\n target = targets[0]\n\n # Convert sentences to word IDs, stripping existing and .\n enc_inputs = data.GetWordIds(source, self._input_vocab)\n dec_inputs = data.GetWordIds(target, self._output_vocab)\n\n # Filter out too-short input\n if len(enc_inputs) < self._config.min_input_len:\n tf.logging.warning('Drop an example - input to short: %d (min: %d)',\n len(enc_inputs), self._config.min_input_len)\n continue\n\n if len(dec_inputs) < self._config.min_input_len:\n tf.logging.warning('Drop an example - output to short: %d (min: %d)',\n len(enc_inputs), self._config.min_input_len)\n continue\n\n # If we're not truncating input, throw out too-long input\n if not self._truncate_input:\n if len(enc_inputs) > self._config.max_input_len:\n tf.logging.warning('Drop an example - input to long: %d (max: %d)',\n len(enc_inputs), self._config.max_input_len)\n continue\n if len(dec_inputs) > self._config.max_output_len:\n tf.logging.warning('Drop an example - output to long: %d (max: %d)',\n len(dec_inputs), self._config.max_output_len)\n continue\n # If we are truncating input, do so if necessary\n else:\n if len(enc_inputs) > self._config.max_input_len:\n enc_inputs = enc_inputs[:self._config.max_input_len]\n if len(dec_inputs) > self._config.max_output_len:\n dec_inputs = dec_inputs[:self._config.max_output_len]\n\n # dec_targets is dec_inputs without at beginning, plus at end\n dec_targets = dec_inputs[1:]\n dec_targets.append(end_id)\n\n enc_input_len = len(enc_inputs)\n dec_output_len = len(dec_targets)\n\n # Pad if necessary\n while len(enc_inputs) < self._config.max_input_len:\n enc_inputs.append(pad_id)\n while len(dec_inputs) < self._config.max_output_len:\n dec_inputs.append(end_id)\n while len(dec_targets) < self._config.max_output_len:\n dec_targets.append(end_id)\n\n element = ModelInput(enc_inputs, dec_inputs, dec_targets, enc_input_len,\n dec_output_len, source, targets)\n self._input_queue.put(element)\n\n def _FillBucketInputQueue(self):\n \"\"\"Fills bucketed batches into the bucket_input_queue.\"\"\"\n while True:\n inputs = []\n for _ in range(self._config.batch_size * BUCKET_CACHE_BATCH):\n inputs.append(self._input_queue.get())\n if self.use_bucketing:\n inputs = sorted(inputs, key=lambda inp: inp.enc_len)\n\n batches = []\n for i in range(0, len(inputs), self._config.batch_size):\n batches.append(inputs[i:i + self._config.batch_size])\n shuffle(batches)\n for b in batches:\n self._bucket_input_queue.put(b)\n\n def _WatchThreads(self):\n \"\"\"Watches the daemon input threads and restarts if dead.\"\"\"\n while True:\n time.sleep(60)\n input_threads = []\n for t in self._input_threads:\n if t.is_alive():\n input_threads.append(t)\n else:\n tf.logging.error('Found input thread dead.')\n new_t = Thread(target=self._FillInputQueue)\n input_threads.append(new_t)\n input_threads[-1].daemon = True\n input_threads[-1].start()\n self._input_threads = input_threads\n\n bucketing_threads = []\n for t in self._bucketing_threads:\n if t.is_alive():\n bucketing_threads.append(t)\n else:\n tf.logging.error('Found bucketing thread dead.')\n new_t = Thread(target=self._FillBucketInputQueue)\n bucketing_threads.append(new_t)\n bucketing_threads[-1].daemon = True\n bucketing_threads[-1].start()\n self._bucketing_threads = bucketing_threads\n\n def _TextGenerator(self, example_gen):\n \"\"\"Generates source and target text from tf.Example.\n\n Args:\n example_gen: ExampleGen that yields tf.Example.\n\n Yields:\n Tuple (source_text, target_text) where:\n source_text: Text source string.\n target_texts: Text targets (well-formed) string.\n \"\"\"\n while True:\n example = next(example_gen)\n try:\n\n # TARGET\n all_target_texts = []\n if len(self._target_key.split(',')) > 1:\n all_target_text = ''\n counter = -1\n # concat different keys (not combinable with multiple targets)\n for key in self._target_key.split(','):\n if counter >= 0:\n all_target_text += ' '\n all_target_text += self._GetExFeatureText(example, key)[0].strip()\n counter += 1\n all_target_text = self._AddSentenceBoundary(all_target_text)\n all_target_texts.append(all_target_text)\n else:\n key = self._target_key\n for target_text in self._GetExFeatureText(example, key):\n target_text = target_text.strip()\n target_text = self._AddSentenceBoundary(target_text)\n all_target_texts.append(target_text)\n\n # SOURCE\n all_source_text = ''\n counter = -1\n # if input is list of keys we concat them using separator tokens.\n for key in self._source_key.split(','):\n if counter >= 0:\n # , etc. must already be part of the vocab\n if self._input_vocab.WordToId('') <= 0:\n tf.logging.error('Separator token missing: ',\n str(counter))\n all_source_text += ' '\n # sepcial key to add the length of the output to the input\n if key == '%LENGTH%':\n all_source_text += str(len(all_target_texts[0].split()))\n elif len(key.split('%')) == 2:\n if random() < float(key.split('%')[0]) / 100:\n all_source_text += self._GetExFeatureText(\n example, key.split('%')[1])[0].strip()\n else:\n all_source_text += ' '\n else:\n all_source_text += self._GetExFeatureText(example, key)[0].strip()\n counter += 1\n all_source_text = self._AddSentenceBoundary(all_source_text)\n\n yield (all_source_text, all_target_texts)\n\n except ValueError as e:\n tf.logging.error(e)\n tf.logging.error('Failed to get article or abstract from example')\n continue\n\n def _AddSentenceBoundary(self, text):\n \"\"\"Pads text with start end end of sentence token iff needed.\n\n Args:\n text: text to be padded.\n\n Returns:\n A text with start and end tokens.\n \"\"\"\n\n if not text.startswith(data.SENTENCE_START):\n text = data.SENTENCE_START + ' ' + text\n if not text.endswith(data.SENTENCE_END):\n text = text + ' ' + data.SENTENCE_END\n\n return text\n\n def _GetExFeatureText(self, example, key):\n \"\"\"Extracts text for a feature from tf.Example.\n\n Args:\n example: tf.Example.\n key: Key of the feature to be extracted.\n\n Returns:\n A feature text extracted.\n \"\"\"\n\n values = []\n for value in example.features.feature[key].bytes_list.value:\n values.append(value.decode(\"utf-8\"))\n\n return values\n","sub_path":"batch_reader/vocab_batcher.py","file_name":"vocab_batcher.py","file_ext":"py","file_size_in_byte":12406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"529493221","text":"#coding=gbk\n\"\"\"\nCreated on Sun Jul 19 15:10:13 2015\n\n@author: Administrator\n\"\"\"\nimport wx\n\nclass CashSuper():\n def AcceptCash(self,money):\n return 0\n \nclass CashNormal(CashSuper):\n def AcceptCash(self,money):\n return money\n \nclass CashRebate(CashSuper):\n discount = 0\n def __init__(self,ds):\n self.discount = ds\n def AcceptCash(self,money):\n return money*self.discount\n \nclass CashReturn(CashSuper):\n total = 0\n ret = 0\n def __init__(self,t,r):\n self.total = t\n self.ret = r\n def AcceptCash(self,money):\n if (money >= self.total):\n return money - self.ret\n else:\n return money\n \nclass CashContext():\n def __init__(self,ctype):\n strategy = {}\n strategy[0] = CashNormal()\n strategy[1] = CashRebate(0.7)\n strategy[2] = CashReturn(300,100)\n self.cs = strategy[ctype]\n def GetResult(self,money):\n return self.cs.AcceptCash(money)\n \n\nclass MyFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self,None,size = (300,400))\n self.Title = \"商场收银系统\"\n self.panel = wx.Panel(self)\n wx.StaticText(self.panel,-1,\"单价: \",pos = (20,30))\n self.txtPrice = wx.TextCtrl(self.panel,-1,pos = (80,30))\n self.okButton = wx.Button(self.panel,-1,\"确定\",pos = (200,30))\n \n wx.StaticText(self.panel,-1,\"数量:\",pos = (20,80))\n self.txtNum = wx.TextCtrl(self.panel,-1,pos = (80,80))\n self.resetButton = wx.Button(self.panel,-1,\"重置\",pos = (200,80))\n \n wx.StaticText(self.panel,-1,\"折扣: \",pos = (20,130))\n self.straList = [\"正常收费\",\"打7折\",\"满300返100\"]\n self.listbox = wx.Choice(self.panel,-1,(80,130),choices = self.straList)\n self.listbox.SetSelection(0)\n self.itemsText = wx.TextCtrl(self.panel,-1,size = (250,120),pos = (10,180),style = wx.TE_MULTILINE)\n wx.StaticText(self.panel,-1,\"总价\",pos = (20,320))\n wx.StaticText(self.panel,-1,\"0\",pos = (80,320))\n \n def GetPrices(self):\n \n tot = float(self.txtPrice.GetValue())*float(self.txtNum.GetValue())\n cc = CashContext(self.listbox.GetSelection())\n result = cc.GetResult(tot)\n txt = \"单价: \"+self.txtPrice.GetValue() + \" 数量: \"+self.txtNum.GetValue() +\" \"+self.straList[self.listbox.GetSelection()]+\" 合计: \" +str(result) + \"\\n \"\n return txt,result\n \n\nclass MyApp(wx.App):\n def __init__(self):\n wx.App.__init__(self,redirect = False)\n self.total = 0\n \n def OnInit(self):\n self.frame = MyFrame()\n self.frame.Show()\n self.Bind(wx.EVT_BUTTON,self.OnokButton,self.frame.okButton)\n self.Bind(wx.EVT_BUTTON,self.OnresetButton,self.frame.resetButton)\n return True\n \n def OnokButton(self,event):\n txt,tot = self.frame.GetPrices()\n self.frame.itemsText.AppendText(txt)\n self.total += tot\n wx.StaticText(self.frame.panel,-1,str(self.total),pos = (80,300))\n\n \n def OnresetButton(self,event):\n self.frame.txtNum.Clear()\n self.frame.txtPrice.Clear()\n self.frame.itemsText.Clear()\n wx.StaticText(self.frame.panel,-1,str(0),pos = (80,300))\n\nif __name__ == \"__main__\":\n app = MyApp()\n app.MainLoop()\n \n","sub_path":"strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"420762432","text":"# self\nfrom yhl_workspace import *\nfrom data.for_prepare import yhl_read, make_subset, clear_file\n# help\nimport ctypes\nimport multiprocessing\nfrom sklearn.neighbors import KDTree\n\n\nclass gloveYHL():\n def most_similar(self, word, k=20, metric_type='euclidean'):\n if(hasattr(self, 'yhl_kdtree') is False):\n self.yhl_kdtree = KDTree(self.x_w, metric=metric_type)\n # 如果是未收录词\n if(word not in self.words_index.keys()):\n return None\n # 提取当前词语的词向量\n word_feature = self.x_w[self.words_index[word]]\n # 得到前 k 个在欧式空间最相近的词向量下标\n ans = self.yhl_kdtree.query([word_feature], k=k, return_distance=False)\n for index in ans[0]:\n yield self.words[index], similarity(self.x_w[index], word_feature)\n\n def load(self, file_name, min_count=5):\n if(hasattr(self, 'words_cnt') is False):\n self.words_cnt = yhl_dict()\n # 定义读取器\n reader = yhl_read(yhl_work(file_name))\n for i, line in enumerate(reader):\n if(i > 0 and i % 1000 == 0):\n print(i, ' is read over ...')\n for word in line.replace('\\n', '').split(' '):\n if(word in self.words_cnt):\n self.words_cnt[word] += 1\n elif(word != ''):\n self.words_cnt[word] = 1\n print('词汇总数  : ', len(self.words_cnt))\n # 最重要的是, 获取每个词语的索引\n # self.words = list(self.words_cnt.keys())\n self.words = ['yhl'] + \\\n [l for l, r in self.words_cnt.items() if(r > min_count)]\n self.words_size = len(self.words)\n self.words_index = yhl_dict(\n {word: i for i, word in enumerate(self.words)})\n print('len : ', len(self.words))\n\n def get_co_occur(self, file_name, min_count=5, win_size=4, ):\n def get_window(line, win_size=4):\n # 定义窗口生成器; 或者把生成的样本直接写入文件\n line = [word for word in line.strip().split(' ') if(word != '')]\n limit = len(line)\n for i, word in enumerate(line):\n l = max(0, i - win_size)\n r = min(i + win_size + 1, limit)\n window = line[l:i] + line[i + 1: r]\n yield self.words_index[word], [self.words_index[it] for it in window]\n # 现在开始获取共现矩阵\n co_occur = yhl_dict()\n reader = yhl_read(yhl_work(file_name))\n for i, line in enumerate(reader):\n # 不断地获取窗口\n for word, window in get_window(line, win_size=4):\n real_win_size = len(window)\n for offset, cur in enumerate(window):\n co_occur[word, cur] += 1.0 / \\\n (math.sqrt(abs(offset - real_win_size)) + 1e-3)\n co_occur[cur, word] += 1.0 / \\\n (math.sqrt(abs(offset - real_win_size)) + 1e-3)\n # self.co_occur = list(self.co_occur.items())\n ## 刚开始为了快点训练, 先过滤掉一些次数太小的共现词语\n co_occur = [(l, r, cnt)\n for (l, r), cnt in co_occur.items()] # if(cnt > min_count)\n print('训练样本  : ', len(co_occur))\n pickle.dump(co_occur, open('../data/glove_data.pkl', 'wb'))\n return co_occur\n\n def train(self, train_file_name, valid_file_name='', process_num=4, min_count=5, win_size=4, feature_len=100, init_lrt=0.01, yhl_iters=20, max_clip=3, build_kdtree=True):\n self.co_occur = self.get_co_occur(\n train_file_name, min_count, win_size)\n # 定义共享内存\n if(hasattr(self, 'v_center') is False):\n def get_array(l, r, d=2, _lock=True):\n arr = multiprocessing.Array('d', l * r, lock=True)\n real_shape = (l, r) if(d == 2) else l\n # 初始化权值矩阵\n temp = numpy.frombuffer(\n arr.get_obj(), dtype=numpy.float64).reshape(real_shape)\n numpy.copyto(temp, numpy.random.normal(0, 0.212, real_shape))\n return arr\n self.v_center = get_array(self.words_size, feature_len)\n self.v_context = get_array(self.words_size, feature_len)\n self.bias_center = get_array(self.words_size, 1)\n self.bias_context = get_array(self.words_size, 1)\n self.weights_shape = (self.words_size, feature_len)\n # self.shared_arr = multiprocessing.Array('d', 2 * 5, lock=True)\n # 开始迭代训练\n\n def child_train(*train_args):\n # 获取参数\n yhl_id, init_lrt, yhl_iters, max_clip = train_args\n # shared_arr = numpy.frombuffer(\n # self.shared_arr.get_obj(), dtype=numpy.float64).reshape(10)\n # shared_arr += 1\n # return\n # 恢复矩阵\n v_center = numpy.frombuffer(\n self.v_center.get_obj(), dtype=numpy.float64).reshape(self.weights_shape)\n v_context = numpy.frombuffer(\n self.v_context.get_obj(), dtype=numpy.float64).reshape(self.weights_shape)\n bias_center = numpy.frombuffer(\n self.bias_center.get_obj(), dtype=numpy.float64).reshape(self.words_size)\n bias_context = numpy.frombuffer(\n self.bias_center.get_obj(), dtype=numpy.float64).reshape(self.words_size)\n # 每个进程负责一部分迭代\n for one_round in range(yhl_iters):\n numpy.random.shuffle(self.co_occur)\n yhl_cost = 0.0\n for piece, (l, r, cnt) in enumerate(self.co_occur):\n common_grad = v_center[l].dot(\n v_context[r]) + bias_center[l] + bias_context[r] - math.log(cnt)\n f = (cnt / 100.0) ** 0.75 if(cnt < 100) else 1\n # 开始累计损失\n yhl_cost += f * (common_grad ** 2) / 2\n # 获取统一的梯度\n grad = numpy.clip(\n init_lrt * f * common_grad, -max_clip, max_clip)\n # 开始更新\n v_center[l] -= grad * v_context[r]\n v_context[r] -= grad * v_center[l]\n bias_center[l] -= grad\n bias_context[r] -= grad\n print(datetime.now().strftime('%H:%M:%S'), ' 进程 ', yhl_id, ' ',\n 'iter ', one_round, ' ', round(one_round * 1. / yhl_iters, 4) * 100, '% cost : ', yhl_cost)\n init_lrt = init_lrt * (0.999 ** one_round)\n\n print('cpu 核心数 : ', process_num)\n # 开启多核\n yhl_process = [\n multiprocessing.Process(\n target=child_train,\n args=(yhl_id, init_lrt, yhl_iters, max_clip))\n for yhl_id in range(1, process_num + 1)\n ]\n # with 语义更合适\n for pro in yhl_process:\n pro.start()\n for pro in yhl_process:\n pro.join()\n\n # self.shared_arr = numpy.ctypeslib.as_array(\n # self.shared_arr.get_obj()).reshape(10)\n # print(self.shared_arr)\n # time.sleep(1000)\n\n self.v_center = numpy.ctypeslib.as_array(\n self.v_center.get_obj()).reshape(self.weights_shape)\n self.v_context = numpy.ctypeslib.as_array(\n self.v_context.get_obj()).reshape(self.weights_shape)\n # 最后将两个向量相加\n self.x_w = self.v_center + self.v_context\n if(build_kdtree == True):\n self.yhl_kdtree = KDTree(self.x_w, metric='euclidean')\n print('\\nkdtree is built successfully')\n\n def save(self):\n pickle.dump(self.v_center, open('../result/glove_v_center.pkl', 'wb'))\n pickle.dump(self.v_context, open(\n '../result/glove_v_context.pkl', 'wb'))\n pickle.dump(self.words, open('../result/glove_words.pkl', 'wb'))\n pickle.dump(self.words_index, open(\n '../result/glove_words_index.pkl', 'wb'))\n if(hasattr(self, 'yhl_kdtree') is True):\n pickle.dump(self.words, open('../result/glove_kdtree.pkl', 'wb'))\n\n\nif __name__ == '__main__':\n # 文件\n train_file_name, valid_file_name = (\n '../data/small/multi/people.txt', '../data/small/multi/valid.pkl')\n # train_file_name, valid_file_name = (\n # '../data/wiki.txt', '../data/valid.pkl')\n # 定义模型\n model = gloveYHL()\n # 加载语料\n model.load(train_file_name, min_count=10)\n # 开始训练\n model.train(\n train_file_name=train_file_name,\n valid_file_name=valid_file_name,\n process_num=4,\n # process_num=multiprocessing.cpu_count(),\n min_count=5,\n win_size=8,\n feature_len=128,\n init_lrt=0.0312,\n yhl_iters=20,\n max_clip=3,\n build_kdtree=True,\n )\n model.save()\n ans = model.most_similar('李达康')\n print(\"------------- 李达康 ----------------\")\n for it in ans:\n print(it)\n ans = model.most_similar('沙瑞金')\n print(\"------------- 沙瑞金 ----------------\")\n for it in ans:\n print(it)\n ans = model.most_similar('人民')\n print(\"------------- 人民 ----------------\")\n for it in ans:\n print(it)\n\n # glove 效果远远不如 word2vec\n\n # print(similarity(model.x_w[]))\n","sub_path":"src/glove.py","file_name":"glove.py","file_ext":"py","file_size_in_byte":9468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"124415263","text":"#%% Imports\nfrom PlayerDistributionFunctions import worst_on_decisive_games\nfrom Match import Match\nimport pandas as pd\n\n#%% Simulate matches\nresults = {}\nfor i in range(10000):\n match = Match(1, worst_on_decisive_games)\n winner_index = match.play_full_match()\n\n if winner_index == 1:\n match_score = []\n for match_set in match.score.games_for_set:\n match_score.append([match_set[1], match_set[0]])\n else:\n match_score = match.score.games_for_set\n flat_match_score = tuple([item for sublist in match_score for item in sublist])\n if flat_match_score in results:\n results[flat_match_score] = results.get(flat_match_score) + 1\n else:\n results[flat_match_score] = 1\n\n#%% Create dataframe\ndf = pd.DataFrame(list(zip(results.keys(), results.values())), columns =['Result', 'Quantity'])","sub_path":"worst_on_decisisve_games_simulation.py","file_name":"worst_on_decisisve_games_simulation.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468662207","text":"import tkinter as tk, os\nfrom io import BytesIO\n\nfrom PIL import Image, ImageTk\nimport re\nimport urllib.request\n\n\ndef getHtml(url):\n try:\n page = urllib.request.urlopen(url)\n html = page.read()\n html = str(html)\n return html\n except urllib.error.HTTPError as e:\n print(e.reason)\n return False\n\n\ndef getAllImg(html):\n reg = r'src=\\\"(.{0,100}\\.jpg)\\\"'\n imglist = re.findall(reg, html)\n return imglist\n\n\ndef getNvyouIDs(html):\n reg = r'/luyilu/(.{0,100}).html'\n nvyouIDList = re.findall(reg, html)\n nvyouIDList = list(map(int, nvyouIDList))\n nvyouIDList = list(set(nvyouIDList))\n nvyouIDList.sort()\n nvyouIDList.reverse()\n return nvyouIDList\n\n\ndef getImage(url):\n rq = urllib.request.Request(url)\n u = urllib.request.urlopen(rq)\n data = u.read()\n img = Image.open(BytesIO(data))\n if img.size[0] <= 430:\n return False\n return ImageTk.PhotoImage(img)\n\n\nclass PicSite:\n def __init__(self, url):\n self.url = url\n self.subUrl = ''\n self.nvyouIDs = getNvyouIDs(getHtml(url))\n self.image = ''\n self.nPerPage = 0\n\n def crawling_by_category(self):\n for nvyouID in self.nvyouIDs:\n self.subUrl = self.url + str(nvyouID) + '.html'\n # crawling(picSite)\n for value in range(1, 30):\n if value != 1:\n self.subUrl = self.url + str(nvyouID) + '_' + str(value) + '.html'\n print(\"Crawling \" + self.subUrl)\n srcHtml = getHtml(self.subUrl)\n if not srcHtml:\n return False\n imglist = getAllImg(srcHtml)\n if len(imglist) > 0 and imglist[\n 0] == 'https://www.images.96xxpic.com:8819/allimg/161029/1-1610292146350-L.jpg':\n return False\n #每页只显示三张\n self.nPerPage = 0\n for il in imglist:\n self.nPerPage += 1\n if self.nPerPage >= 4: break\n photoImage = getImage(il)\n if not photoImage: continue\n yield photoImage\n print(\"peek \" + il)\n\n\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.pack()\n self.picSite = PicSite(\"https://96xx2019.com/luyilu/\")\n self.crawlingGenerator = self.picSite.crawling_by_category()\n self.img = next(self.crawlingGenerator)\n self.createWidgets()\n\n def createWidgets(self):\n # self.btnPrev = tk.Button(self, text='Prev', command=self.prev)\n # self.btnPrev.pack(side=tk.TOP)\n self.btnNext = tk.Button(self, text='Next', command=self.next)\n self.btnNext.pack(side=tk.TOP)\n self.btnNext = tk.Button(self, text='Next Page', command=self.nextPage)\n self.btnNext.pack(side=tk.TOP)\n self.lblImage = tk.Label(self)\n self.lblImage['image'] = self.img\n self.lblImage.pack()\n\n # def prev(self):\n # self.showfile(-1)\n\n def next(self):\n self.showfile()\n\n def nextPage(self):\n self.picSite.nPerPage = 4 #直接翻页\n self.showfile()\n\n def showfile(self):\n self.img = next(self.crawlingGenerator)\n self.lblImage['image'] = self.img\n\n\nif __name__ == '__main__':\n # 设置背景颜色\n bgcolor = '#000000'\n root = tk.Tk()\n root.title('简易图片浏览器')\n root.configure(bg=bgcolor)\n\n # 窗口最大化\n w = root.winfo_screenwidth()\n h = root.winfo_screenheight()\n root.geometry(\"%dx%d\" % (w, h))\n\n app = Application(master=root)\n app.mainloop()\n","sub_path":"picBrowser.py","file_name":"picBrowser.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"201716816","text":"from setuptools import setup, find_packages\r\n\r\ntry: # for pip >= 10\r\n from pip._internal.req import parse_requirements\r\nexcept ImportError: # for pip <= 9.0.3\r\n from pip.req import parse_requirements\r\n\r\n# Set the library's long description to the repo's README.md\r\nwith open('README.md', 'r') as readme_file:\r\n readme = readme_file.read()\r\n\r\nwith open('requirements.txt') as f:\r\n required = f.read().splitlines()\r\n\r\nrequirements = required\r\n\r\nsetup(\r\n name='abstract_python_email_validation',\r\n version='1.0.0',\r\n author='Benjamin Bouchet',\r\n author_email='libraries@abstractapi.com',\r\n description=\"AbstractEmailValidation - Wrapper to quickly start using the powerful AbstractAPI's email validation service in your projects.\",\r\n long_description=readme,\r\n long_description_content_type='text/markdown',\r\n url='https://github.com/abstractapi/python-email-validation',\r\n packages=find_packages(),\r\n install_requires=requirements,\r\n classifiers=[\r\n 'Programming Language :: Python :: 3.6',\r\n 'Intended Audience :: Developers',\r\n 'License :: OSI Approved :: MIT License',\r\n ],\r\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"285623537","text":"import os\nimport subprocess\n\nfrom typing import Dict, Optional\n\nfrom cfengine import PromiseModule, ValidationError, Result\nfrom pydantic import (\n BaseModel,\n ValidationError as PydanticValidationError,\n validator,\n)\n\n\nclass GitPromiseTypeModel(BaseModel):\n destination: str\n repository: str\n bare: bool = False\n clone: bool = True\n depth: int = 0\n executable: str = \"git\"\n force: bool = False\n recursive: bool = True\n reference: Optional[str]\n remote: str = \"origin\"\n ssh_options: Optional[str]\n update: bool = True\n version: str = \"HEAD\"\n\n @validator(\"destination\")\n def destination_must_be_absolute(cls, v):\n if not os.path.isabs(v):\n raise ValueError(\"must be an absolute path\")\n return v\n\n @validator(\"depth\")\n def depth_must_be_positive(cls, v):\n if not v >= 0:\n raise ValueError(\"must be a positive number\")\n return v\n\n\nclass GitPromiseTypeModule(PromiseModule):\n def __init__(self, **kwargs):\n super(GitPromiseTypeModule, self).__init__(\n \"git_promise_module\", \"0.1.0\", **kwargs\n )\n\n def validate_promise(self, promiser: str, attributes: Dict):\n attributes.setdefault(\"destination\", promiser)\n try:\n GitPromiseTypeModel(**attributes)\n except PydanticValidationError as e:\n errors = [\n \".\".join(map(str, err[\"loc\"])) + \": \" + err[\"msg\"] for err in e.errors()\n ]\n raise ValidationError(\", \".join(errors))\n\n def evaluate_promise(self, promiser: str, attributes: Dict):\n safe_promiser = promiser.replace(\",\", \"_\")\n attributes.setdefault(\"destination\", promiser)\n model = GitPromiseTypeModel(**attributes)\n\n classes = []\n result = Result.KEPT\n\n # if the repository doesn't exist\n if not os.path.exists(model.destination):\n if not model.clone:\n return (Result.NOT_KEPT, [f\"{safe_promiser}_not_found\"])\n try:\n self.log_info(\n f\"Cloning '{model.repository}:{model.version}' to '{model.destination}'\"\n )\n clone_options = []\n if model.bare:\n clone_options += [\"--bare\"]\n if model.depth:\n clone_options += [f\"--depth={str(model.depth)}\"]\n if model.reference:\n clone_options += [\"--reference\", model.reference]\n output = subprocess.check_output(\n [\n model.executable,\n \"clone\",\n model.repository,\n model.destination,\n \"--origin\",\n model.remote,\n \"--branch\",\n model.version,\n ]\n + clone_options,\n env=self._git_envvars(model),\n )\n classes.append(f\"{safe_promiser}_cloned\")\n result = Result.REPAIRED\n except subprocess.CalledProcessError as e:\n error = e.output.decode()\n self.log_error(f\"Failed clone: {error}\")\n return (Result.NOT_KEPT, [f\"{safe_promiser}_clone_failed\"])\n\n else:\n # discard local changes to the repository\n if model.force:\n try:\n output = subprocess.check_output(\n [model.executable, \"status\", \"--porcelain\"],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n if output.decode(\"utf-8\").strip() != \"\":\n self.log_info(f\"Reset '{model.destination}' to HEAD\")\n output = subprocess.check_output(\n [model.executable, \"reset\", \"--hard\", \"HEAD\"],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n output = subprocess.check_output(\n [model.executable, \"clean\", \"-f\"],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n classes.append(f\"{safe_promiser}_reset\")\n result = Result.REPAIRED\n except subprocess.CalledProcessError as e:\n error = e.output.decode()\n self.log_error(f\"Failed reset: {error}\")\n return (Result.NOT_KEPT, [f\"{safe_promiser}_reset_failed\"])\n\n # Update the repository\n if model.update:\n try:\n self.log_info(f\"Updating '{model.repository}' in '{model.destination}'\")\n # fetch the remote\n output = subprocess.check_output(\n [model.executable, \"fetch\", model.remote],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n # checkout the branch, if different from the current one\n output = subprocess.check_output(\n [model.executable, \"rev-parse\", \"--abbrev-ref\", \"HEAD\"],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n detached = False\n if output.decode(\"utf-8\").strip() == \"HEAD\":\n detached = True\n output = subprocess.check_output(\n [model.executable, \"rev-parse\", \"HEAD\"],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n if output.decode(\"utf-8\").strip() != model.version:\n output = subprocess.check_output(\n [model.executable, \"checkout\", model.version],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n result = Result.REPAIRED\n # check if merge with the remote branch is needed\n if not detached:\n output = subprocess.check_output(\n [\n model.executable,\n \"diff\",\n f\"..{model.remote}/{model.version}\",\n ],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n if output.decode(\"utf-8\") != \"\":\n output = subprocess.check_output(\n [\n model.executable,\n \"merge\",\n model.remote + \"/\" + model.version,\n ],\n cwd=model.destination,\n env=self._git_envvars(model),\n )\n result = Result.REPAIRED\n classes.append(f\"{safe_promiser}_updated\")\n except subprocess.CalledProcessError as e:\n error = e.output.decode()\n self.log_error(f\"Failed fetch: {error}\")\n return (Result.NOT_KEPT, [f\"{safe_promiser}_update_failed\"])\n\n # everything okay\n return (result, classes)\n\n def _git_envvars(self, model: GitPromiseTypeModel):\n env = os.environ.copy()\n env[\"GIT_SSH_COMMAND\"] = f\"{model.executable}\"\n if model.ssh_options:\n env[\"GIT_SSH_COMMAND\"] += \" \" + model.ssh_options\n return env\n\n\nif __name__ == \"__main__\":\n GitPromiseTypeModule().start()\n","sub_path":"promise_types/git/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":8067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"371296381","text":"import mido\n\nimport logging\nimport sys\n\n\ndef logger():\n \"\"\"Configure the main logger.\"\"\"\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n ch = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('[ ' + sys.argv[0] + ' ] - [ %(asctime)s ] - [ %(levelname)s ] - %(message)s')\n ch.setFormatter(formatter)\n root.handlers = [ch]\n return root\n\n\nclass MidiController(object):\n \"\"\"Methods relating to control of the application by the Teensy Midi Controller.\"\"\"\n\n DEVICE = 'Teensy MIDI'\n notes = {\n 60: 'red',\n 61: 'green'\n }\n\n def __init__(self):\n self.log = logger()\n if 'Teensy MIDI' not in mido.get_input_names():\n self.log.error('Error connecting to Teensy foot controller.')\n sys.exit(1)\n self.input = mido.open_input(MidiController.DEVICE)\n self.log.info('Device Registered.')\n\n def eventloop(self):\n \"\"\"Listen for Midi Events\"\"\"\n # Clear the buffer\n for _ in self.input.iter_pending():\n pass\n\n # Begin Listening Loop\n while True:\n msg = self.input.receive(block=False)\n if msg and msg.type == 'note_off':\n if msg.note in MidiController.notes:\n self.__getattribute__(MidiController.notes[msg.note])()\n\n def green(self):\n self.log.warn('Green Button')\n\n def red(self):\n self.log.warn('Red Button')\n\n\ndef main():\n mc = MidiController()\n mc.eventloop()\n\nif __name__ == '__main__':\n main()","sub_path":"util/midi-trigger/midi_test.py","file_name":"midi_test.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"19559871","text":"# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nimport vobject\n\nCOMMAND_TEMPLATE = u\"osascript {applescript_filename}\"\n\n\ndef fetch_all_contacts_vcards():\n cmd = COMMAND_TEMPLATE.format(\n applescript_filename='fetch_all_contacts_vcards.scpt',\n )\n cwd = '/'.join(os.path.realpath(__file__).split('/')[:-1])\n all_vcards_string = subprocess.check_output([cmd], cwd=cwd, shell=True)\n return all_vcards_string\n\n\ndef get_list_of_vcards_strings(all_vcards_string):\n split_by = '\\r\\nEND:VCARD'\n return [i+split_by for i in all_vcards_string.split(split_by)][:-1]\n\n\ndef get_list_of_vcards_objects(all_vcards_strings_list):\n return [vobject.readOne(i) for i in all_vcards_strings_list]\n","sub_path":"clients/fetch_contacts.py","file_name":"fetch_contacts.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"353936636","text":"#!/usr/bin/env python3\n\nfrom ac_util import *\n\n# This file is imported by amplicon_classifier.py to get genes\n\ndef merge_intervals(feature_dict, tol=1):\n for item, usort_intd in feature_dict.items():\n for chrom, usort_ints in usort_intd.items():\n # sort ints\n sort_ints = sorted(usort_ints)\n # merge sorted ints\n mi = [sort_ints[0]]\n for ival in sort_ints[1:]:\n if ival[0] <= mi[-1][1] + tol:\n ui = (mi[-1][0], ival[1])\n mi[-1] = ui\n\n else:\n mi.append(ival)\n\n feature_dict[item][chrom] = mi\n\n\ndef get_gene_ends(gs, ge, strand, a, b):\n has5p, has3p = False, False\n if strand == \"-\":\n gs, ge = ge, gs\n\n if a <= gs <= b:\n has5p = True\n\n if a <= ge < b:\n has3p = True\n\n return has5p, has3p\n\n\ndef get_genes_from_intervals(gene_lookup, feature_dict, gseg_cn_d):\n feat_to_gene_trunc = defaultdict(lambda: defaultdict(set))\n feat_to_gene_cn = defaultdict(lambda: defaultdict(float))\n for feat_name, curr_fd in feature_dict.items():\n for chrom, intlist in curr_fd.items():\n for a, b in intlist:\n ogenes_ints = gene_lookup[chrom][a:b]\n for gp in ogenes_ints:\n gname, strand = gp.data\n has5p, has3p = get_gene_ends(gp.begin, gp.end, strand, a, b)\n gsegs_hit = gseg_cn_d[chrom][gp.begin:gp.end]\n gene_valid_cns = [x.data for x in gsegs_hit if x.end - x.begin > 1000]\n if gene_valid_cns:\n gene_cn = max(gene_valid_cns)\n else:\n gene_cn = \"unknown\"\n feat_to_gene_cn[feat_name][gname] = gene_cn\n if has5p:\n feat_to_gene_trunc[feat_name][gname].add(\"5p\")\n if has3p:\n feat_to_gene_trunc[feat_name][gname].add(\"3p\")\n\n return feat_to_gene_trunc, feat_to_gene_cn\n\n\ndef get_gseg_cns(graphf, add_chr_tag):\n gseg_cn_d = defaultdict(IntervalTree)\n with open(graphf) as infile:\n for line in infile:\n fields = line.rsplit()\n if line.startswith(\"sequence\"):\n c, s, e = fields[1].rsplit(\":\")[0], int(fields[1].rsplit(\":\")[1][:-1]), int(fields[2].rsplit(\":\")[1][:-1])+1\n cn = float(fields[3])\n if add_chr_tag and not c.startswith('chr'):\n c = \"chr\" + c\n\n gseg_cn_d[c].addi(s, e, cn)\n\n return gseg_cn_d\n\n\ndef extract_gene_list(sname, ampN, gene_lookup, cycleList, segSeqD, bfb_cycle_inds, ecIndexClusters,\n invalidInds, bfbStat, ecStat, ampClass, graphf, add_chr_tag, prefix):\n feature_dict = {}\n gseg_cn_d = get_gseg_cns(graphf, add_chr_tag)\n invalidSet = set(invalidInds)\n all_used = invalidSet.union(bfb_cycle_inds)\n used_segs = defaultdict(IntervalTree)\n graph_cns = get_graph_cns(graphf, add_chr_tag)\n if bfbStat:\n # collect unmerged genomic intervals comprising the feature\n bfb_interval_dict = defaultdict(list)\n for b_ind in bfb_cycle_inds:\n if b_ind not in invalidSet:\n for c_id in cycleList[b_ind]:\n chrom, l, r = segSeqD[abs(c_id)]\n if chrom:\n bfb_interval_dict[chrom].append((l, r))\n used_segs[chrom].addi(l, r+1)\n\n feature_dict[\"BFB_1\"] = bfb_interval_dict\n\n if ecStat:\n # collect unmerged genomic intervals comprising the feature\n for amp_ind, ec_cycle_inds in enumerate(ecIndexClusters):\n ec_interval_dict = defaultdict(list)\n for e_ind in ec_cycle_inds:\n all_used.add(e_ind)\n if e_ind not in invalidSet:\n for c_id in cycleList[e_ind]:\n chrom, l, r = segSeqD[abs(c_id)]\n if chrom:\n used_segs[chrom].addi(l, r+1)\n # chop out low cn regions\n seg_t = IntervalTree([Interval(l, r+1)])\n olapping_low_cns = [x for x in graph_cns[chrom][l:r+1] if x.data < 4]\n for x in olapping_low_cns:\n seg_t.chop(x.begin, x.end+1)\n\n for x in seg_t:\n ec_interval_dict[chrom].append((x.begin, x.end))\n\n feature_dict[\"ecDNA_\" + str(amp_ind + 1)] = ec_interval_dict\n\n if ampClass != \"No amp/Invalid\":\n other_interval_dict = defaultdict(list)\n for o_ind in range(len(cycleList)):\n if o_ind not in all_used:\n for c_id in cycleList[o_ind]:\n if abs(c_id) not in used_segs:\n chrom, l, r = segSeqD[abs(c_id)]\n if not used_segs[chrom][l:r] and not chrom is None:\n other_interval_dict[chrom].append((l, r))\n\n if not ecStat and not bfbStat:\n feature_dict[ampClass + \"_1\"] = other_interval_dict\n else:\n feature_dict[\"unknown_1\"] = other_interval_dict\n\n # merge all the intervals in each list of intervals\n # tot_init_intervals = sum([len(ilist) for fd in feature_dict.values() for ilist in fd.values()])\n merge_intervals(feature_dict)\n # tot_final_intervals = sum([len(ilist) for fd in feature_dict.values() for ilist in fd.values()])\n # print(\"Feature extraction: started with \" + str(tot_init_intervals) + \" unmerged intervals, finished with \" + str(\n # tot_final_intervals) + \" intervals\")\n\n write_interval_beds(prefix, sname, ampN, feature_dict)\n return get_genes_from_intervals(gene_lookup, feature_dict, gseg_cn_d)\n","sub_path":"get_genes.py","file_name":"get_genes.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"104294787","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport bisect\nimport itertools\nfrom fwunit.ip import IP, IPSet\nimport logging\nfrom fwunit.types import Rule\nfrom fwunit.common import simplify_rules\nfrom fwunit.common import combine_names\nfrom collections import namedtuple\n\nlogger = logging.getLogger(__name__)\n\nSubnet = namedtuple('Subnet', ['cidr_block', 'name', 'dynamic'])\nSecurityGroupId = namedtuple('SecurityGroupId', ['id', 'region'])\n\n\ndef get_rules(aws, app_map, regions, dynamic_subnets):\n if not regions:\n logger.info(\"Getting all regions\")\n regions = aws.all_regions()\n\n logger.info(\"collecting subnets\")\n subnets = []\n managed_ip_space = IPSet([])\n for id, subnet in aws.get_all_subnets(regions).iteritems():\n name = subnet.tags.get('Name', id)\n dynamic = name in dynamic_subnets or id in dynamic_subnets\n cidr_block = IP(subnet.cidr_block)\n subnet = Subnet(cidr_block=cidr_block, name=name, dynamic=dynamic)\n subnets.append(subnet)\n managed_ip_space = managed_ip_space + IPSet([cidr_block])\n unmanaged_ip_space = IPSet([IP('0.0.0.0/0')]) - managed_ip_space\n\n logger.info(\"collecting dynamic subnet IP ranges\")\n dynamic_ipsets = {}\n per_host_subnet_ips = IPSet()\n for subnet in subnets:\n if subnet.dynamic:\n ipset = dynamic_ipsets.get(subnet.name, IPSet([]))\n ipset += IPSet([subnet.cidr_block])\n dynamic_ipsets[subnet.name] = ipset\n else:\n per_host_subnet_ips += IPSet([subnet.cidr_block])\n\n # sort by IP subnet, so we can use a binary search\n logger.info(\"sorting subnets by IP\")\n subnets.sort(key=lambda s: s.cidr_block)\n _subnet_blocks = [s.cidr_block for s in subnets]\n\n def subnet_by_ip(ip):\n i = bisect.bisect_right(_subnet_blocks, ip)\n if i and ip in _subnet_blocks[i - 1]:\n return subnets[i - 1]\n\n logger.info(\"examining instances\")\n sgids_by_dynamic_subnet = {} # {subnet name: set of SecurityGroupIds}\n sgids_by_instance = {} # {instance_name: [ip, set of SecurityGroupIds]}\n all_sgids = set()\n ips_by_sg = {} # {group id: IPSet}\n for id, instance in aws.get_all_instances(regions).iteritems():\n if instance.state == 'terminated' or instance.state == 'shutting-down':\n continue # meh, who cares\n if not instance.vpc_id:\n continue # not in vpc; ignored\n if not instance.private_ip_address:\n logger.debug(\n \"ignoring instance with no private_ip_address: %s, tags %r\",\n instance.id, instance.tags)\n continue\n ip = IP(instance.private_ip_address)\n\n for g in instance.groups:\n ips_by_sg[g.id] = ips_by_sg.get(g.id, IPSet([])) + IPSet([IP(ip)])\n\n subnet = subnet_by_ip(ip)\n if not subnet:\n logger.debug(\n \"ignoring instance with no matching subnet for %s: %s, tags %r\",\n ip, instance.id, instance.tags)\n continue\n\n if subnet.dynamic:\n sgset = sgids_by_dynamic_subnet.setdefault(subnet.name, set())\n else:\n inst_name = instance.tags.get('Name', instance.id)\n if inst_name in sgids_by_instance:\n inst_name = inst_name + ' ({})'.format(instance.id)\n sgset = set()\n sgids_by_instance[inst_name] = [ip, sgset]\n new_sgids = set(SecurityGroupId(g.id, instance.region.name)\n for g in instance.groups)\n sgset.update(new_sgids)\n all_sgids.update(new_sgids)\n\n logger.info(\"accumulating security groups\")\n all_apps = set(app_map.values())\n security_groups = {}\n for sgid in all_sgids:\n sg = security_groups[sgid] = aws.get_security_group(sgid)\n assert sg, \"no security group with id {}\".format(sgid)\n # pre-process all of the rules' apps now\n for sgrule in itertools.chain(sg.rules, sg.rules_egress):\n proto = str(sgrule.ip_protocol)\n if proto == '-1':\n proto = 'any'\n if sgrule.from_port == sgrule.to_port:\n if str(sgrule.from_port) in (\"None\", \"-1\"):\n app = \"*/{}\".format(proto)\n else:\n app = '{}/{}'.format(sgrule.from_port, proto)\n else:\n app = '{}-{}/{}'.format(sgrule.from_port, sgrule.to_port, proto)\n app = app_map[app]\n sgrule.app = app\n all_apps.add(app)\n\n rules = {}\n to_intersect = {}\n def make_rules(sgid, local):\n sg = security_groups[sgid]\n for dir, sgrules in [('in', sg.rules), ('out', sg.rules_egress)]:\n for sgrule in sgrules:\n if sgrule.app == '*/any':\n apps = all_apps | set(['@@other'])\n else:\n apps = [sgrule.app]\n for app in apps:\n for grant in sgrule.grants:\n if grant.cidr_ip:\n remote = IPSet([IP(grant.cidr_ip)])\n else:\n remote = ips_by_sg.get(grant.group_id, None)\n if not remote:\n continue\n src, dst = (remote, local) if dir == 'in' else (local, remote)\n name = \"{}/{}\".format(sg.name, dir)\n # first make rules involving non-managed space, leaving\n # only managed-to-managed\n if dir == 'in':\n unmanaged_src = src & unmanaged_ip_space\n if unmanaged_src:\n rules.setdefault(app, []).append(Rule(\n src=unmanaged_src, dst=dst, app=app, name=name))\n src = src & managed_ip_space\n else:\n unmanaged_dst = dst & unmanaged_ip_space\n if unmanaged_dst:\n rules.setdefault(app, []).append(Rule(\n src=src, dst=unmanaged_dst, app=app, name=name))\n dst = dst & managed_ip_space\n if src and dst:\n to_intersect.setdefault(app, {}).setdefault(dir, []).append((src, dst, name))\n\n logger.info(\"writing rules for dynamic subnets\")\n for subnet_name, sgids in sgids_by_dynamic_subnet.iteritems():\n subnet = dynamic_ipsets[subnet_name]\n logger.debug(\" subnet %s, %s\", subnet_name, subnet)\n for sgid in sgids:\n make_rules(sgid, subnet)\n\n logger.info(\"writing rules for instances in per-host subnets\")\n per_host_host_ips = IPSet()\n for inst_name, info in sgids_by_instance.iteritems():\n ip, sgids = info\n logger.debug(\" instance %s at %s\", inst_name, ip)\n host_ip = IPSet([ip])\n per_host_host_ips += host_ip\n for sgid in sgids:\n make_rules(sgid, host_ip)\n\n logger.info(\"assuming unrestricted outbound access from unoccupied IPs in per-host subnets\")\n unoccupied = per_host_subnet_ips - per_host_host_ips\n for app in all_apps:\n rules.setdefault(app, []).append(Rule(\n src=unoccupied, dst=unmanaged_ip_space, app=app, name='unoccupied/out'))\n to_intersect.setdefault(app, {}).setdefault('out', []).append((unoccupied, managed_ip_space, 'unoccupied/out'))\n\n # traffic within the manage Ip space is governed both by outbound rules on\n # the source and inbound rules on the destination.\n logger.info(\"intersecting inbound and outbound rules\")\n for app, dirs in to_intersect.iteritems():\n in_rules = dirs.get('in', [])\n out_rules = dirs.get('out', [])\n logger.debug(\"..for %s\", app)\n new_rules = []\n for inr in in_rules:\n for outr in out_rules:\n src = inr[0] & outr[0]\n if not src:\n continue\n dst = inr[1] & outr[1]\n if not dst:\n continue\n new_rules.append(Rule(src=src, dst=dst, app=app,\n name=combine_names(inr[2], outr[2])))\n # simplify now, within this app, to save space and time\n new_rules = simplify_rules({app: new_rules})[app]\n rules.setdefault(app, []).extend(new_rules)\n\n rules = simplify_rules(rules)\n return rules\n","sub_path":"fwunit/aws/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"63843018","text":"from C_fit_full import Stich_grids\nimport numpy as np\nimport pandas as pd\nimport os\nfrom glob import glob\n\nmetal=np.round(np.arange(0.002,0.031,0.001),3)\nage=np.round(np.arange(.5,6.1,.1),1)\ntau=[0,8.0, 8.3, 8.48, 8.6, 8.7, 8.78, 8.85, 8.9, 8.95, 9.0, 9.04, 9.08, 9.11, 9.15, 9.18, 9.2, 9.23, 9.26, 9.28,\n 9.3, 9.32, 9.34, 9.36, 9.38, 9.4, 9.41, 9.43, 9.45, 9.46, 9.48]\n\nflist = glob('/home/vestrada78840/chidat/*_full_fit_tZ_pos.npy')\nglist = [os.path.basename(U).replace('_full_fit_tZ_pos.npy','') for U in flist]\n\ndef Best_fit_model(name, metal, age, tau, redshift, dust = np.arange(0,1.1,.1)):\n grids = ['/fdata/scratch/vestrada78840/chidat/{0}_d{1}_chidata.npy'.format(name,U) for U in range(11)]\n chi = Stich_grids(grids)\n x = np.argwhere(chi == np.min(chi))[0]\n print(x)\n print(dust[x[0]],metal[x[1]], age[x[2]], tau[x[3]],redshift[x[4]])\n return dust[x[0]],metal[x[1]], age[x[2]], tau[x[3]],redshift[x[4]]\n\nbfZ,bft,bftau,bfz,bfd = np.zeros([5,len(glist)])\nfor i in range(len(glist)):\n z,Pz = np.save('/home/vestrada78840/chidat/{0}_rs_pos.npy'.format(glist[i]))\n\n bfd[i],bfZ[i],bft[i],bftau[i],bfz[i] = Best_fit_model(glist[i] + '_full_fit', Z, t, tau, z, d)\n \nDF = pd.DataFrame({'gids':glist,'bfZ':bfZ,'bft':bft,'bftau':bftau,'bfz':bfz,'bfd':bfd})\n\nDF.to_pickle('/home/vestrada78840/chidat/BF_fullfit.pkl')","sub_path":"scripts/C_get_best_fit.py","file_name":"C_get_best_fit.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"445523575","text":"import sys \n\nN = int(input())\nP = [0]+list(map(int,sys.stdin.readline().split()))\ndp = [0]*(N+1)\ndp[1] = P[1]\n\nfor i in range(2,N+1):\n for j in range(i-1,0,-1):\n \n if P[i]<=P[j]:\n continue\n else:\n if dp[i]')\ndef hello(name=None):\n return render_template('hello.html', name=name)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n print('POST method')\n return 'POST'\n else:\n print('GET method')\n return 'GET'\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n f = request.files['file']\n f.save('file')\n return 'It\\'s ok!'\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('error_pages/404.html'), 404\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"300271836","text":"\nimport os\nimport operator\nimport subprocess\nimport bottle\n\nsubprocess.Popen(['python', 'sensory.py'])\n\napp_dir = os.path.dirname(os.path.realpath(__file__))\nstatic_dir = os.path.join(app_dir, 'static')\nphotos_dir = os.path.join(static_dir, 'photos')\n\n@bottle.get('/static/')\ndef static(filepath):\n return bottle.static_file(filepath, root=static_dir)\n\ndef photo_dict_from_filename(filename):\n return {\n 'filename': filename,\n 'url': os.path.join('static', 'photos', filename),\n 'ts': float(filename.replace('.jpg', ''))\n }\n\n@bottle.get('/photos')\ndef photos():\n photo_filenames = os.listdir(photos_dir) if os.path.exists(photos_dir) else []\n photo_dicts = [photo_dict_from_filename(f) for f in photo_filenames]\n return {\n \"photos\": sorted(photo_dicts, key=operator.itemgetter('ts'), reverse=True)\n }\n\n@bottle.get('/')\ndef index():\n return (''\n 'Sensory'\n ''\n '
'\n ''\n ''\n ''\n ''\n ''\n '')\n\nbottle.run(host='0.0.0.0', port=8080)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"370337084","text":"import cv2\nimport numpy as np\n\ndef rgb_to_hsv(arr):\n \n arr = np.array(arr.astype(np.float32))\n \n # need to scale [0, 255] to [0, 1]\n if np.any(arr > 1.0):\n arr /= 255.0\n \n r = arr[..., 0]\n g = arr[..., 1]\n b = arr[..., 2]\n \n hsv = np.zeros_like(arr)\n \n # element-wise max and min against the channel dimension (HWC layout) \n maxes = arr.max(-1)\n mins = arr.min(-1)\n \n # chroma\n c = maxes - mins\n \n # to prevent division by zero\n c_nonzero = c > 0.0\n max_nonzero = maxes > 0.0\n \n # hue calculation\n # red channel\n idx = (maxes == r) & c_nonzero\n hsv[idx, 0] = ((g[idx] - b[idx]) / c[idx]) % 6.0\n \n # green channel\n idx = (maxes == g) & c_nonzero\n hsv[idx, 0] = (b[idx] - r[idx]) / c[idx] + 2.0\n \n # blue channel\n idx = (maxes == b) & c_nonzero\n hsv[idx, 0] = (r[idx] - g[idx]) / c[idx] + 4.0\n \n hsv[..., 0] = hsv[..., 0] * 60 # 6.0\n\n # saturation\n hsv[max_nonzero, 1] = c[max_nonzero] / maxes[max_nonzero]\n \n # value\n hsv[..., 2] = maxes \n \n return hsv \n\ndef hsv_to_rgb(arr):\n \n h = arr[..., 0]\n s = arr[..., 1]\n v = arr[..., 2]\n \n rgb_arr = np.zeros_like(arr)\n \n chroma = v * s\n \n h_prime = h * 6.0\n \n x = chroma * ( 1 - np.abs(h_prime % 2 - 1)) \n m = v - chroma\n \n rgb_arr[..., 0] = chroma * (\n np.logical_and(h_prime >= 0, h_prime < 1) + \n np.logical_and(h_prime >= 5, h_prime < 6)\n ) + x * (\n np.logical_and(h_prime >= 1, h_prime < 2) + \n np.logical_and(h_prime >= 4, h_prime < 5) \n )\n \n rgb_arr[..., 1] = chroma * (\n np.logical_and(h_prime >= 1, h_prime < 3)\n ) + x * (\n np.logical_and(h_prime >= 0, h_prime < 1) + \n np.logical_and(h_prime >= 3, h_prime < 4) \n )\n \n rgb_arr[..., 2] = chroma * (\n np.logical_and(h_prime >= 3, h_prime < 5)\n ) + x * (\n np.logical_and(h_prime >= 2, h_prime < 3) + \n np.logical_and(h_prime >= 5, h_prime < 6) \n ) \n \n for i in xrange(3):\n rgb_arr[..., i] += m\n \n rgb_arr = np.round(rgb_arr * 255.0).astype(np.uint8)\n rgb_arr = np.minimum(rgb_arr, 255)\n \n return rgb_arr\n\n\nrgb = cv2.cvtColor(cv2.imread('test.bmp'), cv2.COLOR_BGR2RGB)\nhsv = cv2.cvtColor(rgb, cv2.COLOR_RGB2HSV)\n# plt.imshow(rgb)\n\nreconstructed_rgb = hsv_to_rgb(rgb_to_hsv(rgb))\n\nreconstructed_rgb_opencv = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\n\n#cv2.imwrite(\"test_python.bmp\", reconstructed_rgb)\n\nprint(\"Original:\\n\")\nprint(rgb)\nprint(\"\\n\")\nprint(\"Reconstructed OpenCV:\\n\")\nprint(reconstructed_rgb_opencv)\nprint(\"\\n\")\nprint(\"Reconstructed Marek:\\n\")\nprint(reconstructed_rgb)","sub_path":"mihai/mihai_test.py","file_name":"mihai_test.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"196319371","text":"from .exposure import AllExposureTypes\nfrom .exposureSerialization import ExposureSerialization\n\n########################################################################\nclass ModuleExposures(ExposureSerialization,AllExposureTypes):\n \n def __init__(self):\n self.num_positions = 0\n self.beta_msci = 0.0\n self.beta_weight = 0\n self.beta_sp500 = 0.0\n \n AllExposureTypes.__init__(self)\n ExposureSerialization.__init__(self)\n return\n \n ### In the case of the fund, when multiple portfolios are conglomerated together,\n ### this will add the exposure for the portfolio to the fund.\n def addExposureForPortfolio(self,portfolio):\n for (expName,exp) in self.exposures.iteritems():\n exp.addPortfolio(portfolio)\n for (expName,exp) in self.exposures.iteritems():\n exp.reallocate()\n \n self.num_positions += portfolio.num_positions\n \n w_new = portfolio.net_delta\n oldWeight = self.beta_weight \n self.beta_weight += portfolio.market \n ## Adjust betas incrementally so we don't have to consolidate in the end.\n if self.beta_weight != 0.0:\n self.beta_msci = (oldWeight * self.beta_msci + w_new * portfolio.beta_msci)/self.beta_weight\n self.beta_sp500 = (oldWeight * self.beta_sp500 + w_new * portfolio.beta_sp500)/self.beta_weight\n \n return\n \n ### Attributes the exposure and other metric calculations for a security to the\n ### conglomerate object.\n def addExposureForSecurity(self,security):\n\n for (expName,exp) in self.exposures.iteritems():\n exp.addSecurity(security)\n for (expName,exp) in self.exposures.iteritems():\n exp.reallocate()\n\n self.num_positions += 1\n\n w_new = security.net_delta\n oldWeight = self.beta_weight \n self.beta_weight += security.market\n ## Adjust betas incrementally so we don't have to consolidate in the end.\n if self.beta_weight != 0.0:\n self.beta_msci = (oldWeight * self.beta_msci + w_new * security.BetaMSCI.value)/self.beta_weight\n self.beta_sp500 = (oldWeight * self.beta_sp500 + w_new * security.BetaSP500.value)/self.beta_weight\n return\n\n","sub_path":"app/exposure/moduleExposure.py","file_name":"moduleExposure.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"446737165","text":"#!/usr/bin/env python3\n\nimport argparse\nimport operator\nimport functools\n\nclass Knot:\n def __init__(self):\n self.length = 256\n self.values = list(range(self.length))\n self.skip = self.position = 0\n\n def twist(self, length):\n r = range(self.position, self.position + length)\n for i, j in list(zip(r, reversed(r)))[:length//2]:\n i, j = i%self.length, j%self.length\n self.values[i], self.values[j] = self.values[j], self.values[i]\n self.position = (self.position + length + self.skip) % self.length\n self.skip += 1\n\n def dense_hash(self):\n blocks = []\n for i in range(0,256,16):\n blocks.append(functools.reduce(operator.xor, self.values[i:i+16]))\n return ''.join('{:02x}'.format(b) for b in blocks)\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('FILE', nargs = '?', default = 'input')\n args = parser.parse_args()\n with open(args.FILE, 'r') as f:\n s = f.read().strip()\n lengths = [int(i) for i in s.split(',')]\n k = Knot()\n for length in lengths:\n k.twist(length)\n print(k.values[0]*k.values[1])\n\n k2 = Knot()\n lengths = [ord(c) for c in s] + [17, 31, 73, 47, 23]\n for _ in range(64):\n for length in lengths:\n k2.twist(length)\n print(k2.dense_hash())\n","sub_path":"2017/day_10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"159938564","text":"\"\"\"Collection of useful actions to define arguments.\"\"\"\n\nimport os\nimport re\nimport sys\nimport yaml\nimport argparse\nfrom enum import Enum\nfrom argparse import Namespace, Action, SUPPRESS, _StoreAction, _SubParsersAction\n\nfrom .optionals import get_config_read_mode, FilesCompleterMethod\nfrom .typing import restricted_number_type\nfrom .util import (\n yamlParserError,\n yamlScannerError,\n ParserError,\n namespace_to_dict,\n dict_to_namespace,\n Path,\n _flat_namespace_to_dict,\n _dict_to_flat_namespace,\n _check_unknown_kwargs,\n _issubclass\n)\n\n\n__all__ = [\n 'ActionConfigFile',\n 'ActionYesNo',\n 'ActionEnum',\n 'ActionOperators',\n 'ActionParser',\n 'ActionPath',\n 'ActionPathList',\n]\n\n\ndef _find_action(parser, dest:str):\n \"\"\"Finds an action in a parser given its dest.\n\n Args:\n parser (ArgumentParser): A parser where to search.\n dest: The dest string to search with.\n\n Returns:\n Action or None: The action if found, otherwise None.\n \"\"\"\n for action in parser._actions:\n if action.dest == dest:\n return action\n elif isinstance(action, ActionParser) and dest.startswith(action.dest+'.'):\n return _find_action(action._parser, dest)\n elif isinstance(action, _ActionSubCommands) and dest in action._name_parser_map:\n return action\n return None\n\n\ndef _is_action_value_list(action:Action):\n \"\"\"Checks whether an action produces a list value.\n\n Args:\n action: An argparse action to check.\n\n Returns:\n bool: True if produces list otherwise False.\n \"\"\"\n if action.nargs in {'*', '+'} or isinstance(action.nargs, int):\n return True\n return False\n\n\nclass ActionConfigFile(Action, FilesCompleterMethod):\n \"\"\"Action to indicate that an argument is a configuration file or a configuration string.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializer for ActionConfigFile instance.\"\"\"\n if 'default' in kwargs:\n raise ValueError('default not allowed for ActionConfigFile, use default_config_files.')\n opt_name = kwargs['option_strings']\n opt_name = opt_name[0] if len(opt_name) == 1 else [x for x in opt_name if x[0:2] == '--'][0]\n if '.' in opt_name:\n raise ValueError('ActionConfigFile must be a top level option.')\n super().__init__(**kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n \"\"\"Parses the given configuration and adds all the corresponding keys to the namespace.\n\n Raises:\n TypeError: If there are problems parsing the configuration.\n \"\"\"\n self._apply_config(parser, namespace, self.dest, values)\n\n @staticmethod\n def _apply_config(parser, namespace, dest, value):\n if not hasattr(namespace, dest) or not isinstance(getattr(namespace, dest), list):\n setattr(namespace, dest, [])\n try:\n cfg_path = Path(value, mode=get_config_read_mode())\n except TypeError as ex_path:\n try:\n if isinstance(yaml.safe_load(value), str):\n raise ex_path\n cfg_path = None\n cfg_file = parser.parse_string(value, env=False, defaults=False, _skip_check=True)\n except (TypeError, yamlParserError, yamlScannerError) as ex_str:\n raise TypeError('Parser key \"'+dest+'\": '+str(ex_str))\n else:\n cfg_file = parser.parse_path(value, env=False, defaults=False, _skip_check=True)\n cfg_file = _dict_to_flat_namespace(namespace_to_dict(cfg_file))\n getattr(namespace, dest).append(cfg_path)\n for key, val in vars(cfg_file).items():\n if key == '__cwd__' and hasattr(namespace, '__cwd__'):\n setattr(namespace, key, getattr(namespace, key)+val)\n else:\n setattr(namespace, key, val)\n\n\nclass _ActionPrintConfig(Action):\n def __init__(self,\n option_strings,\n dest=SUPPRESS,\n default=SUPPRESS,\n help='print configuration and exit'):\n super().__init__(option_strings=option_strings,\n dest=dest,\n default=default,\n nargs=0,\n help=help)\n\n def __call__(self, parser, *args, **kwargs):\n parser._print_config = True\n\n\nclass ActionYesNo(Action):\n \"\"\"Paired options --{yes_prefix}opt, --{no_prefix}opt to set True or False respectively.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializer for ActionYesNo instance.\n\n Args:\n yes_prefix (str): Prefix for yes option (default='').\n no_prefix (str or None): Prefix for no option (default='no_').\n\n Raises:\n ValueError: If a parameter is invalid.\n \"\"\"\n self._yes_prefix = ''\n self._no_prefix = 'no_'\n if 'yes_prefix' in kwargs or 'no_prefix' in kwargs or len(kwargs) == 0:\n _check_unknown_kwargs(kwargs, {'yes_prefix', 'no_prefix'})\n if 'yes_prefix' in kwargs:\n self._yes_prefix = kwargs['yes_prefix']\n if 'no_prefix' in kwargs:\n self._no_prefix = kwargs['no_prefix']\n else:\n self._yes_prefix = kwargs.pop('_yes_prefix') if '_yes_prefix' in kwargs else ''\n self._no_prefix = kwargs.pop('_no_prefix') if '_no_prefix' in kwargs else 'no_'\n if len(kwargs['option_strings']) == 0:\n raise ValueError(type(self).__name__+' not intended for positional arguments ('+kwargs['dest']+').')\n opt_name = kwargs['option_strings'][0]\n if not opt_name.startswith('--'+self._yes_prefix):\n raise ValueError('Expected option string to start with \"--'+self._yes_prefix+'\".')\n if self._no_prefix is not None:\n kwargs['option_strings'] += [re.sub('^--'+self._yes_prefix, '--'+self._no_prefix, opt_name)]\n if self._no_prefix is None and 'nargs' in kwargs and kwargs['nargs'] != 1:\n raise ValueError('ActionYesNo with no_prefix=None only supports nargs=1.')\n if 'nargs' in kwargs and kwargs['nargs'] in {'?', 1}:\n kwargs['metavar'] = '{true,yes,false,no}'\n if kwargs['nargs'] == 1:\n kwargs['nargs'] = None\n else:\n kwargs['nargs'] = 0\n kwargs['metavar'] = None\n if 'default' not in kwargs:\n kwargs['default'] = False\n kwargs['type'] = ActionYesNo._boolean_type\n super().__init__(**kwargs)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Sets the corresponding key to True or False depending on the option string used.\"\"\"\n if len(args) == 0:\n kwargs['_yes_prefix'] = self._yes_prefix\n kwargs['_no_prefix'] = self._no_prefix\n return ActionYesNo(**kwargs)\n value = args[2] if isinstance(args[2], bool) else True\n if self._no_prefix is not None and args[3].startswith('--'+self._no_prefix):\n setattr(args[1], self.dest, not value)\n else:\n setattr(args[1], self.dest, value)\n\n def _add_dest_prefix(self, prefix):\n self.dest = prefix+'.'+self.dest\n self.option_strings[0] = re.sub('^--'+self._yes_prefix, '--'+self._yes_prefix+prefix+'.', self.option_strings[0])\n if self._no_prefix is not None:\n self.option_strings[-1] = re.sub('^--'+self._no_prefix, '--'+self._no_prefix+prefix+'.', self.option_strings[-1])\n\n def _check_type(self, value, cfg=None):\n return ActionYesNo._boolean_type(value)\n\n @staticmethod\n def _boolean_type(x):\n if isinstance(x, str) and x.lower() in {'true', 'yes', 'false', 'no'}:\n x = True if x.lower() in {'true', 'yes'} else False\n elif not isinstance(x, bool):\n raise TypeError('Value not boolean: '+str(x)+'.')\n return x\n\n def completer(self, **kwargs):\n \"\"\"Used by argcomplete to support tab completion of arguments.\"\"\"\n return ['true', 'false', 'yes', 'no']\n\n\nclass ActionEnum(Action):\n \"\"\"An action based on an Enum that maps to-from strings and enum values.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializer for ActionEnum instance.\n\n Args:\n enum (Enum): An Enum class.\n\n Raises:\n ValueError: If a parameter is invalid.\n \"\"\"\n if 'enum' in kwargs:\n _check_unknown_kwargs(kwargs, {'enum'})\n if not _issubclass(kwargs['enum'], Enum):\n raise ValueError('Expected enum to be an instance of Enum.')\n self._enum = kwargs['enum']\n elif '_enum' not in kwargs:\n raise ValueError('Expected enum keyword argument.')\n else:\n self._enum = kwargs.pop('_enum')\n kwargs['metavar'] = '{'+','.join(self._enum.__members__.keys())+'}'\n super().__init__(**kwargs)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Parses an argument mapping a string to its Enum value.\n\n Raises:\n TypeError: If value not present in the Enum.\n \"\"\"\n if len(args) == 0:\n kwargs['_enum'] = self._enum\n return ActionEnum(**kwargs)\n setattr(args[1], self.dest, self._check_type(args[2]))\n\n def _check_type(self, value, cfg=None):\n islist = _is_action_value_list(self)\n if not islist:\n value = [value]\n for num, val in enumerate(value):\n try:\n if isinstance(val, str):\n value[num] = self._enum[val]\n else:\n self._enum(val)\n except KeyError:\n elem = '' if not islist else ' element '+str(num+1)\n raise TypeError('Parser key \"'+self.dest+'\"'+elem+': value '+str(val)+' not in '+self._enum.__name__+'.')\n return value if islist else value[0]\n\n def completer(self, **kwargs):\n \"\"\"Used by argcomplete to support tab completion of arguments.\"\"\"\n return list(self._enum.__members__.keys())\n\n\nclass ActionOperators:\n \"\"\"DEPRECATED: Action to restrict a value with comparison operators.\n\n The new alternative is explained in :ref:`restricted-numbers`.\n \"\"\"\n\n def __init__(self, **kwargs):\n if 'expr' in kwargs:\n _check_unknown_kwargs(kwargs, {'expr', 'join', 'type'})\n self._type = restricted_number_type(None, kwargs.get('type', int), kwargs['expr'], kwargs.get('join', 'and'))\n else:\n raise ValueError('Expected expr keyword argument.')\n\n def __call__(self, *args, **kwargs):\n if 'type' in kwargs:\n raise ValueError('ActionOperators does not allow type given to add_argument.')\n kwargs['type'] = self._type\n return _StoreAction(**kwargs)\n\n\nclass ActionParser(Action):\n \"\"\"Action to parse option with a given parser optionally loading from file if string value.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializer for ActionParser instance.\n\n Args:\n parser (ArgumentParser): A parser to parse the option with.\n\n Raises:\n ValueError: If the parser parameter is invalid.\n \"\"\"\n if 'parser' in kwargs:\n ## Runs when first initializing class by external user ##\n _check_unknown_kwargs(kwargs, {'parser'})\n self._parser = kwargs['parser']\n if not isinstance(self._parser, argparse.ArgumentParser):\n raise ValueError('Expected parser keyword argument to be an ArgumentParser.')\n elif '_parser' not in kwargs:\n raise ValueError('Expected parser keyword argument.')\n else:\n ## Runs when initialied from the __call__ method below ##\n self._parser = kwargs.pop('_parser')\n super().__init__(**kwargs)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Parses an argument with the corresponding parser and if valid, sets the parsed value to the corresponding key.\n\n Raises:\n TypeError: If the argument is not valid.\n \"\"\"\n if len(args) == 0:\n ## Runs when within _ActionsContainer super().add_argument call ##\n kwargs['_parser'] = self._parser\n return ActionParser(**kwargs)\n ## Runs when parsing a value ##\n value = _dict_to_flat_namespace(namespace_to_dict(self._check_type(args[2])))\n for key, val in vars(value).items():\n setattr(args[1], key, val)\n if hasattr(value, '__path__'):\n setattr(args[1], self.dest+'.__path__', getattr(value, '__path__'))\n\n def _check_type(self, value, cfg=None):\n try:\n fpath = None\n if isinstance(value, str):\n value = yaml.safe_load(value)\n if isinstance(value, str):\n fpath = Path(value, mode=get_config_read_mode())\n value = self._parser.parse_path(fpath, _base=self.dest)\n else:\n value = dict_to_namespace(_flat_namespace_to_dict(dict_to_namespace({self.dest: value})))\n self._parser.check_config(value, skip_none=True)\n if fpath is not None:\n value.__path__ = fpath\n except KeyError as ex:\n raise type(ex)(re.sub('^Parser key ([^:]+):', 'Parser key '+self.dest+'.\\\\1: ', str(ex)))\n return value\n\n @staticmethod\n def _set_inner_parser_prefix(parser, prefix, action):\n \"\"\"Sets the value of env_prefix to an ActionParser and all sub ActionParsers it contains.\n\n Args:\n parser (ArgumentParser): The parser to which the action belongs.\n action (ActionParser): The action to set its env_prefix.\n \"\"\"\n assert isinstance(action, ActionParser)\n action._parser.env_prefix = parser.env_prefix\n action._parser.default_env = parser.default_env\n option_string_actions = {}\n for key, val in action._parser._option_string_actions.items():\n option_string_actions[re.sub('^--', '--'+prefix+'.', key)] = val\n action._parser._option_string_actions = option_string_actions\n for subaction in action._parser._actions:\n if isinstance(subaction, ActionYesNo):\n subaction._add_dest_prefix(prefix)\n else:\n subaction.dest = prefix+'.'+subaction.dest\n for n in range(len(subaction.option_strings)):\n subaction.option_strings[n] = re.sub('^--', '--'+prefix+'.', subaction.option_strings[n])\n if isinstance(subaction, ActionParser):\n ActionParser._set_inner_parser_prefix(action._parser, prefix, subaction)\n\n\nclass _ActionSubCommands(_SubParsersAction):\n \"\"\"Extension of argparse._SubParsersAction to modify sub-commands functionality.\"\"\"\n\n _env_prefix = None\n\n\n def add_parser(self, name, **kwargs):\n \"\"\"Raises a NotImplementedError.\"\"\"\n raise NotImplementedError('In jsonargparse sub-commands are added using the add_subcommand method.')\n\n\n def add_subcommand(self, name, parser, **kwargs):\n \"\"\"Adds a parser as a sub-command parser.\n\n In contrast to `argparse.ArgumentParser.add_subparsers\n `_\n add_parser requires to be given a parser as argument.\n \"\"\"\n if parser._subparsers is not None:\n raise ValueError('Multiple levels of subcommands must be added in level order.')\n\n parser.prog = '%s [options] %s' % (self._prog_prefix, name)\n parser.env_prefix = self._env_prefix+'_'+name+'_'\n\n def remove_print_config(actions):\n print_config = [a for a in actions if isinstance(a, _ActionPrintConfig)]\n if len(print_config) > 0:\n actions.remove(print_config[0])\n\n remove_print_config(parser._actions)\n remove_print_config(parser._action_groups[1]._group_actions)\n\n # create a pseudo-action to hold the choice help\n aliases = kwargs.pop('aliases', ())\n if 'help' in kwargs:\n help_arg = kwargs.pop('help')\n choice_action = self._ChoicesPseudoAction(name, aliases, help_arg)\n self._choices_actions.append(choice_action)\n\n # add the parser to the name-parser map\n self._name_parser_map[name] = parser\n for alias in aliases:\n self._name_parser_map[alias] = parser\n\n return parser\n\n\n def __call__(self, parser, namespace, values, option_string=None):\n \"\"\"Adds sub-command dest and parses sub-command arguments.\"\"\"\n subcommand = values[0]\n arg_strings = values[1:]\n\n # set the parser name\n setattr(namespace, self.dest, subcommand)\n\n # parse arguments\n if subcommand in self._name_parser_map:\n subparser = self._name_parser_map[subcommand]\n subnamespace, unk = subparser.parse_known_args(arg_strings)\n if unk:\n raise ParserError('Unrecognized arguments: %s' % ' '.join(unk))\n for key, value in vars(subnamespace).items():\n setattr(namespace, subcommand+'.'+key, value)\n\n\n @staticmethod\n def handle_subcommands(parser, cfg, env, defaults, prefix=''):\n \"\"\"Adds sub-command dest if missing and parses defaults and environment variables.\"\"\"\n if parser._subparsers is None:\n return\n\n cfg_dict = cfg.__dict__ if isinstance(cfg, Namespace) else cfg\n cfg_keys = set(vars(_dict_to_flat_namespace(cfg)).keys())\n cfg_keys = cfg_keys.union(set(cfg_dict.keys()))\n\n # Get subcommands action\n for action in parser._actions:\n if isinstance(action, _ActionSubCommands):\n break\n\n # Get sub-command parser\n subcommand = None\n dest = prefix + action.dest\n if dest in cfg_dict and cfg_dict[dest] is not None:\n subcommand = cfg_dict[dest]\n else:\n for key in action.choices.keys():\n if any([v.startswith(key+'.') for v in cfg_dict.keys()]):\n subcommand = key\n break\n cfg_dict[dest] = subcommand\n\n assert subcommand in action._name_parser_map\n subparser = action._name_parser_map[subcommand]\n\n # merge environment variable values and default values\n subnamespace = None\n if env:\n subnamespace = subparser.parse_env(defaults=defaults, nested=False, _skip_check=True)\n elif defaults:\n subnamespace = subparser.get_defaults(nested=False)\n\n if subnamespace is not None:\n for key, value in vars(subnamespace).items():\n key = prefix + subcommand+'.'+key\n if key not in cfg_keys:\n cfg_dict[key] = value\n\n if subparser._subparsers is not None:\n prefix = prefix + subcommand + '.'\n _ActionSubCommands.handle_subcommands(subparser, cfg, env, defaults, prefix)\n\n\nclass ActionPath(Action, FilesCompleterMethod):\n \"\"\"Action to check and store a path.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializer for ActionPath instance.\n\n Args:\n mode (str): The required type and access permissions among [fdrwxcuFDRWX] as a keyword argument, e.g. ActionPath(mode='drw').\n skip_check (bool): Whether to skip path checks (def.=False).\n\n Raises:\n ValueError: If the mode parameter is invalid.\n \"\"\"\n if 'mode' in kwargs:\n _check_unknown_kwargs(kwargs, {'mode', 'skip_check'})\n Path._check_mode(kwargs['mode'])\n self._mode = kwargs['mode']\n self._skip_check = kwargs.get('skip_check', False)\n elif '_mode' not in kwargs:\n raise ValueError('ActionPath expects mode keyword argument.')\n else:\n self._mode = kwargs.pop('_mode')\n self._skip_check = kwargs.pop('_skip_check')\n super().__init__(**kwargs)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Parses an argument as a Path and if valid sets the parsed value to the corresponding key.\n\n Raises:\n TypeError: If the argument is not a valid Path.\n \"\"\"\n if len(args) == 0:\n kwargs['_mode'] = self._mode\n kwargs['_skip_check'] = self._skip_check\n return ActionPath(**kwargs)\n if hasattr(self, 'nargs') and self.nargs == '?' and args[2] is None:\n setattr(args[1], self.dest, args[2])\n else:\n setattr(args[1], self.dest, self._check_type(args[2]))\n\n def _check_type(self, value, cfg=None, islist=None):\n islist = _is_action_value_list(self) if islist is None else islist\n if not islist:\n value = [value]\n try:\n for num, val in enumerate(value):\n if isinstance(val, Path):\n val = Path(str(val), mode=self._mode, skip_check=self._skip_check, cwd=val.cwd)\n else:\n val = Path(val, mode=self._mode, skip_check=self._skip_check)\n value[num] = val\n except TypeError as ex:\n raise TypeError('Parser key \"'+self.dest+'\": '+str(ex))\n return value if islist else value[0]\n\n\nclass ActionPathList(Action, FilesCompleterMethod):\n \"\"\"Action to check and store a list of file paths read from a plain text file or stream.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializer for ActionPathList instance.\n\n Args:\n mode (str): The required type and access permissions among [fdrwxcuFDRWX] as a keyword argument (uppercase means not), e.g. ActionPathList(mode='fr').\n skip_check (bool): Whether to skip path checks (def.=False).\n rel (str): Whether relative paths are with respect to current working directory 'cwd' or the list's parent directory 'list' (default='cwd').\n\n Raises:\n ValueError: If any of the parameters (mode or rel) are invalid.\n \"\"\"\n if 'mode' in kwargs:\n _check_unknown_kwargs(kwargs, {'mode', 'skip_check', 'rel'})\n Path._check_mode(kwargs['mode'])\n self._mode = kwargs['mode']\n self._skip_check = kwargs.get('skip_check', False)\n self._rel = kwargs.get('rel', 'cwd')\n if self._rel not in {'cwd', 'list'}:\n raise ValueError('rel must be either \"cwd\" or \"list\", got '+str(self._rel)+'.')\n elif '_mode' not in kwargs:\n raise ValueError('Expected mode keyword argument.')\n else:\n self._mode = kwargs.pop('_mode')\n self._skip_check = kwargs.pop('_skip_check')\n self._rel = kwargs.pop('_rel')\n super().__init__(**kwargs)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Parses an argument as a PathList and if valid sets the parsed value to the corresponding key.\n\n Raises:\n TypeError: If the argument is not a valid PathList.\n \"\"\"\n if len(args) == 0:\n if 'nargs' in kwargs and kwargs['nargs'] not in {'+', 1}:\n raise ValueError('ActionPathList only supports nargs of 1 or \"+\".')\n kwargs['_mode'] = self._mode\n kwargs['_skip_check'] = self._skip_check\n kwargs['_rel'] = self._rel\n return ActionPathList(**kwargs)\n setattr(args[1], self.dest, self._check_type(args[2]))\n\n def _check_type(self, value, cfg=None):\n if value == []:\n return value\n islist = _is_action_value_list(self)\n if not islist and not isinstance(value, list):\n value = [value]\n if isinstance(value, list) and all(isinstance(v, str) for v in value):\n path_list_files = value\n value = []\n for path_list_file in path_list_files:\n try:\n with sys.stdin if path_list_file == '-' else open(path_list_file, 'r') as f:\n path_list = [x.strip() for x in f.readlines()]\n except FileNotFoundError as ex:\n raise TypeError('Problems reading path list: '+path_list_file+' :: '+str(ex))\n cwd = os.getcwd()\n if self._rel == 'list' and path_list_file != '-':\n os.chdir(os.path.abspath(os.path.join(path_list_file, os.pardir)))\n try:\n for num, val in enumerate(path_list):\n try:\n path_list[num] = Path(val, mode=self._mode)\n except TypeError as ex:\n raise TypeError('Path number '+str(num+1)+' in list '+path_list_file+', '+str(ex))\n finally:\n os.chdir(cwd)\n value += path_list\n return value\n else:\n return ActionPath._check_type(self, value, islist=True)\n","sub_path":"jsonargparse/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":24948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"625024749","text":"import threading\nimport time\nimport PythonGUIAutomation.engine.test_manager as test_manager\nimport PythonGUIAutomation.engine.multi_dev_sync as multi_dev_sync\n\ndef singleDeviceTest(options):\n '''\n options - tuple with following options:\n - option[0] - function to call\n - option[1] - device id\n - option[2] - package name of app\n - option[3] - clear app data at start of tests\n - option[4] - start app at the start of tests\n '''\n testObj = test_manager.TestManager(devId = options[1], \n packageName = options[2],\n clearAppData = options[3],\n startApp = options[4])\n options[0](testObj)\n\ndef multiDeviceTest(options, waitBetweenCalls):\n '''\n options:\n tuple of tuples with following options should be passed:\n - option[0] - function to call\n - option[1] - device id \n - option[2] - package name of app\n - option[3] - bool - clear app data at start of tests\n - option[4] - bool - 2start app at the start of tests\n - option[5] - index of device, basic integer nr, for example 1 (must be unique)\n note that device id should be unique (it can't be same in both options tuples)\n waitBetweens calls - if should wait between starting each thread, in seconds\n '''\n #checking if device id(s) provided are all unique\n devIds = [options[i][1] for i in range(len(options))]\n if len(set(devIds)) != len(devIds):\n raise Exception('Device IDS list is not unique!!!')\n \n #creating multi dev sync object\n multiDevSyncObj = multi_dev_sync.MultiDevSync()\n #starting threads with each function & option set\n for i in options:\n testManagerInstance = test_manager.TestManager(*i[1:], multi = multiDevSyncObj)\n thread = threading.Thread(target = i[0], args = (testManagerInstance,))\n thread.start()\n time.sleep(waitBetweenCalls)","sub_path":"engine/test_dispatcher.py","file_name":"test_dispatcher.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"609797392","text":"\"\"\"Problem 37 - Truncatable primes\"\"\"\nfrom eulerlib import is_prime, primes\n\n\ndef truncatable(x):\n s = str(x)\n if len(s) <= 1:\n return False\n for i in range(len(s)):\n if not is_prime(int(s[i:])) or not is_prime(int(s[:i + 1])):\n return False\n return True\n\nL = []\nfor prime in primes():\n if truncatable(prime):\n L.append(prime)\n print(prime)\n if len(L) == 11:\n break\n\nprint(sum(L))\n","sub_path":"python/p037.py","file_name":"p037.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"618370989","text":"#Author:Chris.chen\nimport re\nimport urllib.request\nfrom bs4 import BeautifulSoup\n\ndef save_file(imgurl,auctionitemlot):\n urllib.request.urlretrieve(imgurl, 'F:\\\\auction\\\\%s.jpg' % auctionitemlot.strip())\n\nauctionitemurl = []\nmain_url = 'https://chiswickauctions.co.uk'\nurl = \"https://chiswickauctions.co.uk/catalogues/1711132/\"\nresponse = urllib.request.urlopen(url)\ndata = response.read().decode(\"utf-8\")\nres = r''\nm = re.findall(res,data)\nfor i in m:\n auctionitemurl.append(i)\nauctionitemurl=set(auctionitemurl)\nprint(auctionitemurl)\nfor i in auctionitemurl:\n response1 = urllib.request.urlopen(i)\n data1 = response1.read().decode(\"utf-8\")\n soup = BeautifulSoup(data1, 'html5lib')\n auctionitemlot = soup.find('h3').get_text()\n\n #拍品标题描述\n auctionitemdes = soup.find('div',{'class','singlelot'}).get_text()\n print(str(auctionitemdes).strip())\n\n #图片\n res1 = r''\n res2= r'\"\"'\n imgurl1 = re.findall(res1, data1)\n imgurl2 = re.findall(res2,data1)\n for imgurl in imgurl2:\n try:\n print(main_url+imgurl)\n save_file(main_url+imgurl, auctionitemlot)\n except:\n break\n for imgurl in imgurl1:\n try:\n pic_num = re.findall(\"/wp-content/uploads/img-collections/1711132/(\\d+_\\d+).jpg\",imgurl)\n print(main_url+imgurl)\n print(pic_num[0])\n save_file(main_url+imgurl, pic_num[0])\n except:\n break\n with open(\"F:\\\\auction\\\\%s.txt\"%auctionitemlot,'a+',encoding='utf-8') as auctionitemfile:\n auctionitemfile.write(str(auctionitemdes).strip())\n\n\n\n\n","sub_path":"tools/Crawler/爬虫chiswickauctions.co.uk.py","file_name":"爬虫chiswickauctions.co.uk.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"31432752","text":"\n# import datetime\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport os\n\n\"\"\"\n\nA module containing some commonly used plotting\nfunctions which tries to avoid constant repetitions\nof function definitions and funciton calls.\n\nSupport for the addition of metadata is added as well.\n\n\nModule's attributes:\n\nfontsize: sets the default fontsize for the\n project.\n\nplots_root = ''\nThe root folder for storing graphs.\n\nModule's methods:\n\nprepare_ax(ax, legend=True, fontsize=fontsize, grid=True,\n whichGrid='major', ncol=1)\n\nProperly formats an axes object.\n\nModule's classes:\n\nplotter()\n\n\n\n\n\"\"\"\n\n\nfontsize = [17, 20, 24]\n\nplots_root = '../Graphs/'\n\n\ndef prepare_ax(ax, legend=True, fontsize=fontsize, grid=True,\n whichGrid='major', ncol=1, loc='best'):\n \"\"\"\n Prepare axes for plotting\n\n Parameters\n ----------\n\n ax - axes object\n plot_mbl_ergodic - whether to plot MBL and ergodic\n horizontal lines for orientation\n legend - whether to include legend\n fontsize - which fontsize to use\n grid - whether to show grid or not\n ncol - number of columns in the legend\n\n \"\"\"\n\n ax.tick_params(axis='x', labelsize=fontsize[1], pad=5, direction='out')\n if legend:\n ax.legend(loc=loc, prop={\n 'size': fontsize[0]}, fontsize=fontsize[0],\n framealpha=0.5, ncol=ncol,\n handlelength=1.,\n columnspacing=0.6,\n handletextpad=0.5,\n frameon=False)\n\n ax.tick_params(axis='x', labelsize=fontsize[1])\n ax.tick_params(axis='y', labelsize=fontsize[1])\n if grid:\n ax.grid(which=whichGrid)\n\n\nclass Plotter(object):\n\n def __init__(self, nrows, ncols, figsize=(14, 7), metadata={},\n sharex=True, sharey=True):\n super(Plotter, self).__init__()\n\n self.figsize = figsize\n self.create_plot(nrows, ncols, sharex, sharey)\n self.metadata = {} # an empty dict for the metadata\n\n # methods\n\n def create_plot(self, nrows, ncols, sharex=True, sharey=False):\n \"\"\"\n Creates a figure and axes. Makes sure that the axes object\n can be flattened even if there is only a single subplot.\n\n \"\"\"\n plt.close()\n self.fig, axes = plt.subplots(\n nrows, ncols, figsize=self.figsize, sharex=sharex, sharey=sharey)\n self.axes = np.array(axes)\n\n # prepare axes\n def prepare_plot(self, savename='', plot_type='', desc='', top=0.89,\n subfolder='', save=False, save_metadata=True,\n show=True, block=True):\n \"\"\"\n Prepare plotting layout for plotting and for saving the\n figures - creates\n the savename string and such.\n\n A note about the storage path for the plots:\n\n graphs_folder = '../Graphs/plot_type/desc/subfolder'\n\n Parameters\n ----------\n savename: string\n Filename of the saved plot.\n plot_type: which quantity are we calculating and plotting\n (SFF, level ratios, etc.). Defaults to an empty\n string.\n desc: job type description which usually tells us where the\n initial numerical data were stored. Defaults\n to an empty string.\n subfolder: string\n specifies a possible subfolder structure for\n storing files. Defaults to an empty string.\n top: float\n a parameter in subplots_adjust function specifying\n where the top of the plot should be.\n save: boolean\n whether to save the plot or not. Defaults to False\n to avoid unintentional overwriting of data.\n save_metadata: boolean\n whether to add metadata to a pdf file\n show - boolean\n If True, a plot is shown after it has been created.\n Defaults to True.\n\n \"\"\"\n self.fig.tight_layout()\n self.fig.subplots_adjust(top=top)\n\n if save: # save graphs\n\n graphs_folder = plots_root + plot_type + '/' + \\\n desc + '/' + subfolder\n\n if not os.path.isdir(graphs_folder):\n os.makedirs(graphs_folder)\n\n savename = graphs_folder + '/' + savename\n\n pdffig = PdfPages(savename)\n\n self.fig.savefig(pdffig, format='pdf')\n\n if save_metadata:\n metadata = pdffig.infodict()\n\n metadata.update(self.metadata)\n print('prepare_plot info: plot metadata: {}'.format(metadata))\n\n pdffig.close()\n\n if show:\n\n plt.show(block=block)\n","sub_path":"plotting_tools/plotter_cls.py","file_name":"plotter_cls.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"316147133","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('junmakii_django_app', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('image', models.ImageField(default=b'', max_length=2000, upload_to=b'images')),\n ('title', models.CharField(blank=True, max_length=255, null=True)),\n ('key', models.CharField(blank=True, max_length=255, null=True)),\n ('text', models.TextField(blank=True, null=True)),\n ('tags', models.CharField(blank=True, max_length=255, null=True)),\n ('created_at', models.DateTimeField(auto_now=True)),\n ],\n ),\n ]\n","sub_path":"lib/junmakii_django_app/junmakii_django_app/migrations/0002_image.py","file_name":"0002_image.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"160057952","text":"############################################################\n# -*- coding: utf-8 -*-\n#\n# # # # # # #\n# ## ## # ## # #\n# # # # # # # # # # #\n# # ## # ## ## ######\n# # # # # # #\n#\n# Python-based Tool for interaction with the 10micron mounts\n# GUI with PyQT5 for python\n#\n# written in python3, (c) 2019-2021 by mworion\n#\n# Licence APL2.0\n#\n###########################################################\nfrom invoke import task\nfrom PIL import Image\nimport glob\nimport time\nimport os\n\nrn = ''\n#\n# defining all necessary virtual client login for building over all platforms\n#\n\n# defining environment for ubuntu\nclientUbuntu = 'astro-ubuntu.fritz.box'\nuserUbuntu = 'mw@' + clientUbuntu\nworkUbuntu = '/home/mw/test'\nworkUbuntuSCP = userUbuntu + ':/home/mw/test'\n\n# same for windows1 with cmd.exe as shell\nclientWindows = 'astro-windows.fritz.box'\nuWin = 'mw@' + clientWindows\nwWin = 'test'\nwWinSCP = uWin + ':/Users/mw/test'\n\n# same for macOS\nclientMac = 'astro-mac-bigsur.fritz.box'\n# clientMac = 'astro-mac-catalina.fritz.box'\n# clientMac = 'astro-mac-mojave.fritz.box'\nuserMac = 'mw@' + clientMac\nworkMac = 'test'\nworkMacSCP = userMac + ':/Users/mw/test'\n\n\ndef runMWd(c, param):\n c.run(param)\n\n\ndef runMW(c, param):\n # c.run(param, echo=False, hide='out')\n c.run(param)\n\n\ndef printMW(param):\n print(param)\n\n\n@task\ndef clean_mw(c):\n printMW('clean mountwizzard')\n runMW(c, 'rm -rf .pytest_cache')\n runMW(c, 'rm -rf mw4.egg-info')\n runMW(c, 'find ./mw4 | grep -E \"(__pycache__)\" | xargs rm -rf')\n\n\n@task\ndef image_res(c):\n printMW('changing image resolution for docs to 150 dpi')\n files = glob.glob('./docs/source/**/*.png', recursive=True)\n for file in files:\n print(file)\n im = Image.open(file)\n im.save(file, dpi=(150, 150))\n\n\n@task\ndef version_doc(c):\n printMW('changing the version number to setup.py')\n\n # getting version of desired package\n with open('setup.py', 'r') as setup:\n text = setup.readlines()\n\n for line in text:\n if line.strip().startswith('version'):\n _, number, _ = line.split(\"'\")\n\n # reading configuration file\n with open('./docs/source/conf.py', 'r') as conf:\n text = conf.readlines()\n textNew = list()\n\n print(f'>{number}<')\n\n # replacing the version number\n for line in text:\n if line.startswith('version'):\n line = f\"version = '{number}'\\n\"\n if line.startswith('release'):\n line = f\"release = '{number}'\\n\"\n textNew.append(line)\n\n # writing configuration file\n with open('./docs/source/conf.py', 'w+') as conf:\n conf.writelines(textNew)\n\n\n@task\ndef update_builtins(c):\n printMW('building resources')\n runMW(c, 'cp ./data/de421_23.bsp ./mw4/resource/data/de421_23.bsp')\n runMW(c, 'cp ./data/active.txt ./mw4/resource/data/active.txt')\n runMW(c, 'cp ./data/finals2000A.all ./mw4/resource/data/finals2000A.all')\n runMW(c, 'cp ./data/finals.data ./mw4/resource/data/finals.data')\n\n\n@task\ndef build_resource(c):\n printMW('building resources')\n resourceDir = './mw4/resource/'\n with c.cd(resourceDir + 'data'):\n with open(resourceDir + 'data/content.txt', 'w') as f:\n for file in glob.glob(resourceDir + 'data/*.*'):\n t = os.stat(file).st_mtime\n f.write(f'{os.path.basename(file)} {t}\\n')\n runMW(c, f'pyrcc5 -o {resourceDir}resources.py {resourceDir}resources.qrc')\n\n\n@task\ndef build_widgets(c):\n printMW('building widgets')\n widgetDir = './mw4/gui/widgets/'\n widgets = ['hemisphere', 'image', 'main', 'measure', 'message',\n 'satellite', 'keypad', 'devicePopup', 'analyse',\n 'simulator', 'downloadPopup']\n for widget in widgets:\n name = widgetDir + widget\n runMW(c, f'python -m PyQt5.uic.pyuic -x {name}.ui -o {name}_ui.py')\n\n\n@task()\ndef test_mw(c):\n printMW('testing mountwizzard')\n runMW(c, 'flake8')\n runMW(c, 'pytest tests/unit_tests/zLoader')\n runMW(c, 'pytest tests/unit_tests/zMainApp')\n runMW(c, 'pytest tests/unit_tests/base')\n runMW(c, 'pytest tests/unit_tests/logic/astrometry')\n runMW(c, 'pytest tests/unit_tests/logic/cover')\n runMW(c, 'pytest tests/unit_tests/logic/databaseProcessing')\n runMW(c, 'pytest tests/unit_tests/logic/dome')\n runMW(c, 'pytest tests/unit_tests/logic/environment')\n runMW(c, 'pytest tests/unit_tests/logic/imaging')\n runMW(c, 'pytest tests/unit_tests/logic/measure')\n runMW(c, 'pytest tests/unit_tests/logic/modeldata')\n runMW(c, 'pytest tests/unit_tests/logic/powerswitch')\n runMW(c, 'pytest tests/unit_tests/logic/remote')\n runMW(c, 'pytest tests/unit_tests/logic/telescope')\n runMW(c, 'pytest tests/unit_tests/gui/extWindows')\n runMW(c, 'pytest tests/unit_tests/gui/mainWindow')\n runMW(c, 'pytest tests/unit_tests/gui/mainWmixin')\n runMW(c, 'pytest tests/unit_tests/gui/utilities')\n runMW(c, 'pytest tests/unit_tests/mountcontrol')\n runMW(c, 'pytest tests/unit_tests/indibase')\n runMW(c, 'pytest tests/unit_tests/logic/automation')\n\n\n@task(pre=[build_resource, build_widgets, version_doc])\ndef build_mw(c):\n printMW('building dist mountwizzard4')\n with c.cd('.'):\n runMW(c, 'rm -f dist/mountwizzard4*.tar.gz')\n runMW(c, 'python setup.py sdist')\n runMW(c, 'cp dist/mountwizzard4*.tar.gz ../MountWizzard4/dist/mountwizzard4.tar.gz')\n\n with open('notes.txt') as f:\n tmp = f.readlines()\n rn = ''\n for line in tmp:\n rn += line\n\n print(rn)\n\n\n@task(pre=[build_mw])\ndef upload_mw(c):\n printMW('uploading dist mountwizzard4')\n\n with open('notes.txt') as f:\n tmp = f.readlines()\n rn = ''\n for line in tmp:\n rn += line\n\n with c.cd('./dist'):\n print(rn)\n print(f'twine upload mountwizzard4-*.tar.gz -r pypi -c \"{rn}\"')\n runMW(c, f'twine upload mountwizzard4-*.tar.gz -r pypi -c \"{rn}\"')\n runMW(c, 'rm notes.txt')\n\n\n@task(pre=[])\ndef test_win(c):\n printMW('test windows install')\n printMW('...delete test dir')\n runMW(c, f'ssh {uWin} \"if exist {wWin} rd /s /q {wWin}\"')\n time.sleep(1)\n printMW('...make test dir')\n runMW(c, f'ssh {uWin} \"if not exist {wWin} mkdir {wWin}\"')\n time.sleep(1)\n\n with c.cd('dist'):\n printMW('...copy *.tar.gz to test dir')\n runMWd(c, f'scp -r mountwizzard4.tar.gz {wWinSCP}')\n\n with c.cd('support/2.0/Windows'):\n printMW('...copy install script to test dir')\n runMWd(c, f'scp -r MW4_InstallTest.bat {wWinSCP}')\n runMWd(c, f'scp -r MW4_Install.bat {wWinSCP}')\n printMW('...run install script in test dir')\n runMWd(c, f'ssh {uWin} \"cd {wWin} && MW4_InstallTest.bat\"')\n printMW('...copy run script to test dir')\n runMWd(c, f'ssh {uWin} \"cd {wWin} && echo > test.txt\"')\n runMWd(c, f'scp -r MW4_Run.bat {wWinSCP}')\n printMW('...run MountWizzard4 for 3 seconds')\n runMWd(c, f'ssh {uWin} \"cd {wWin} && MW4_Run.bat\"')\n\n\n@task(pre=[])\ndef test_ubuntu(c):\n printMW('test ubuntu install')\n printMW('...delete test dir')\n runMW(c, f'ssh {userUbuntu} \"rm -rf {workUbuntu}\"')\n time.sleep(1)\n printMW('...make test dir')\n runMW(c, f'ssh {userUbuntu} \"mkdir {workUbuntu}\"')\n time.sleep(1)\n\n with c.cd('dist'):\n printMW('...copy *.tar.gz to test dir')\n runMWd(c, f'scp -r mountwizzard4.tar.gz {workUbuntuSCP}')\n\n with c.cd('support/2.0/Ubuntu'):\n printMW('...copy install script to test dir')\n runMWd(c, f'scp -r MW4_InstallTest.sh {workUbuntuSCP}')\n runMWd(c, f'scp -r MW4_Install.sh {workUbuntuSCP}')\n printMW('...run install script in test dir')\n runMWd(c, f'ssh {userUbuntu} \"cd {workUbuntu} && ./MW4_InstallTest.sh\"')\n printMW('...copy run script and environ to test dir')\n runMWd(c, f'ssh {userUbuntu} \"cd {workUbuntu} && touch test.txt\"')\n runMWd(c, f'scp -r MW4_Run.sh {workUbuntuSCP}')\n runMWd(c, f'scp -r MountWizzard4.desktop {workUbuntuSCP}')\n runMWd(c, f'scp -r mw4.png {workUbuntuSCP}')\n printMW('...run MountWizzard4 for 3 seconds')\n runMWd(c, f'ssh {userUbuntu} \"cd {workUbuntu} && xvfb-run ./MW4_Run.sh\"')\n\n\n@task(pre=[])\ndef test_mac(c):\n printMW('test catalina install')\n printMW('...delete test dir')\n runMW(c, f'ssh {userMac} \"rm -rf {workMac}\"')\n time.sleep(1)\n printMW('...make test dir')\n runMW(c, f'ssh {userMac} \"mkdir {workMac}\"')\n time.sleep(1)\n\n with c.cd('dist'):\n printMW('...copy *.tar.gz to test dir')\n runMWd(c, f'scp -r mountwizzard4.tar.gz {workMacSCP}')\n\n with c.cd('support/2.0/MacOSx'):\n printMW('...copy install script to test dir')\n runMWd(c, f'scp -r MW4_InstallTest.command {workMacSCP}')\n runMWd(c, f'scp -r MW4_Install.command {workMacSCP}')\n printMW('...run install script in test dir')\n runMWd(c, f'ssh {userMac} \"cd {workMac} && ./MW4_InstallTest.command\"')\n printMW('...copy run script and environ to test dir')\n runMWd(c, f'ssh {userMac} \"cd {workMac} && touch test.txt\"')\n runMWd(c, f'scp -r MW4_Run.command {workMacSCP}')\n printMW('...run MountWizzard4 for 3 seconds')\n runMWd(c, f'ssh {userMac} \"cd {workMac} && ./MW4_Run.command\"')\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":9326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"319088334","text":"# -*- coding: utf-8 -*-\r\n'''\r\n@Time : 2020/05/08 11:45\r\n@Author : Tianxiaomo\r\n@File : coco_annotatin.py\r\n@Noice :\r\n@Modificattion :\r\n @Author :\r\n @Time :\r\n @Detail :\r\n\r\n'''\r\nimport json, sys, os\r\nfrom collections import defaultdict\r\nfrom tqdm import tqdm\r\n\r\nif __name__ == '__main__':\r\n \"\"\"hyper parameters\"\"\"\r\n # json_file_path = '/home/yyr/data/open-images-bus-trucks/annotations/open_images_val_coco_format.json'\r\n # images_dir_path = '/home/yyr/data/open-images-bus-trucks/images/'\r\n # output_path = 'data/val.txt'\r\n json_file_path, images_dir_path, output_path = sys.argv[1], sys.argv[2], sys.argv[3]\r\n\r\n \"\"\"load json file\"\"\"\r\n name_box_id = defaultdict(list)\r\n id_name = dict()\r\n with open(json_file_path, encoding='utf-8') as f:\r\n data = json.load(f)\r\n\r\n \"\"\"generate labels\"\"\"\r\n images = data['images']\r\n annotations = data['annotations']\r\n for ant in tqdm(annotations):\r\n id = ant['image_id']\r\n name = os.path.join(images_dir_path, images[id-1]['file_name'])\r\n cat = ant['category_id'] - 1\r\n name_box_id[name].append([ant['bbox'], cat])\r\n\r\n \"\"\"write to txt\"\"\"\r\n with open(output_path, 'w') as f:\r\n for key in tqdm(name_box_id.keys()):\r\n f.write(key)\r\n box_infos = name_box_id[key]\r\n for info in box_infos:\r\n x_min = int(info[0][0])\r\n y_min = int(info[0][1])\r\n x_max = x_min + int(info[0][2])\r\n y_max = y_min + int(info[0][3])\r\n\r\n box_info = \" %d,%d,%d,%d,%d\" % (\r\n x_min, y_min, x_max, y_max, int(info[1]))\r\n f.write(box_info)\r\n f.write('\\n')\r\n","sub_path":"tool/coco_annotation.py","file_name":"coco_annotation.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"198315459","text":"# tic-tac-toe\ngame = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],]\n\ndef game_board(player: int=0, row: int=0, column: int=0, just_display: bool=False):\n \n print(\" a b c\")\n\n if not just_display:\n game[row][column] = player\n for count, row in enumerate(game):\n print(count, row)\n\ngame_board(just_display=True)\n\ngame_board(player=1, row=1, column=1)\n","sub_path":"1. Learning to program with Python 3/1. Basics/lesson7 - function params.py","file_name":"lesson7 - function params.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"420723929","text":"import datetime\nimport re\nimport time\n\ndays = [\"\",\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\",\n \"Sunday\"]\n\ndef week(self, input):\n \"\"\"Displays the week and weekday for today or for a given date.\"\"\"\n m = input.args\n msg = lambda s: self.say(s)\n\n if m:\n m = re.match(r\"(\\d{2,4}?)-(\\d{1,2})-(\\d{1,2})\", m)\n if not m:\n raise self.BadInputError()\n return\n t = map(int, m.groups())\n if t[0] < 100:\n t[0] += 2000\n date = datetime.date(t[0], t[1], t[2])\n else:\n t = time.localtime()\n date = datetime.date(t[0], t[1], t[2])\n \n msg(\"%s is a %s in week %s%d%s\" % (str(date),\n days[date.isoweekday()],\n chr(2),date.isocalendar()[1],chr(2)))\n\n\nweek.rule = [\"vecka\", \"week\"]\nweek.usage = [(\"Display the current week\", \"$pcmd\"),\n (\"Display the week for a given date\", \"$pcmd YYYY-MM-DD\")]\nweek.example = [(\"Display the week Halloween was in in 1999\", \"$pcmd 1999-10-31\")]\n","sub_path":"plugins/week.py","file_name":"week.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"508545309","text":"#访问数据库\nimport pymysql\nconnect=pymysql.Connect(\nhost='localhost',\nport=3306,\nuser='root',\npasswd='password',\ndb='mydata',\ncharset='utf8'\n)\ncursor=connect.cursor()\nsql1='SELECT r_depap FROM fme_archive_today WHERE executedate between 20180101 and 20180120'\nsql2='SELECT r_arrap FROM fme_archive_today WHERE executedate between 20180101 and 20180120'\ncursor.execute(sql1)\ncursor.execute(sql2)\nresult=cursor.fetchall()\nmyset=set(result)\nlist1=[]\nfor item in myset:\n\tlist1.append(result.count(item))\n\tprint('the %s has found %d'%(item,result.count(item)))\nprint(sorted(list1,reverse=True))\ncursor.close()\nconnect.close() \n\n","sub_path":"airport-sort.py","file_name":"airport-sort.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"124621069","text":"import collections\nimport sys\nimport os\nimport json\n\nFPS = 1\nWIDTH = 2\nHEIGHT = 3\nSETUP = {FPS: -1, WIDTH: -1, HEIGHT: -1}\n\n\ndef main(argv):\n if len(argv) < 1:\n print('please provide files to convert to json')\n sys.exit(1)\n\n for file_name in argv:\n\n if not os.path.isfile(file_name):\n print(\"File path {} does not exist.\".format(file_name))\n continue\n\n with open(file_name) as file:\n lines = file.readlines()\n\n setup_lines = []\n point_one_lines = []\n point_two_lines = []\n partition_lines(lines, setup_lines, point_one_lines, point_two_lines)\n read_setup(setup_lines)\n point_keys = read_in_keys(point_one_lines, point_two_lines)\n\n\n\n scale_x_keys = {\"Name\": \"Scale X\"}\n scale_y_keys = {\"Name\": \"Scale Y\"}\n position_x_keys = {\"Name\": \"Position X\"}\n position_y_keys = {\"Name\": \"Position Y\"}\n rotation_keys = {\"Name\": \"Rotation\"}\n init_keys(point_keys, position_x_keys, position_y_keys, scale_x_keys, scale_y_keys, rotation_keys)\n\n\t\t# clean_up_keys(position_x_keys, position_y_keys, scale_x_keys, scale_y_keys, rotation_keys)\n\n json_name = convert_filename(file_name)\n print(\"writing {} to {}\".format(file_name, json_name))\n dump_json_to_file(json_name, scale_x_keys, scale_y_keys, position_x_keys, position_y_keys, rotation_keys)\n\n\ndef read_setup(setup_lines):\n for line in setup_lines:\n if \"Units Per Second\" in line:\n SETUP[FPS] = float(line.split()[-1])\n elif \"Source Width\" in line:\n SETUP[WIDTH] = float(line.split()[-1])\n elif \"Source Height\" in line:\n SETUP[HEIGHT] = float(line.split()[-1])\n\n\ndef partition_lines(lines, out_setup_lines, out_point_one_lines, out_point_two_lines):\n \"\"\" splits lines into partitions for scale, position, and rotation \"\"\"\n # todo make this more efficient\n setup_end = -1\n point_one_start = -1\n point_one_end = -1\n point_two_start = -1\n point_two_end = -1\n\n i = 0\n size = len(lines)\n while i < size:\n line = lines[i]\n if \"Point #1\" in line:\n setup_end = i - 1\n point_one_start = i + 2\n while not lines[i].isspace():\n i += 1\n point_one_end = i\n elif \"Point #2\" in line:\n point_two_start = i + 2\n while not lines[i].isspace():\n i += 1\n point_two_end = i\n i += 1\n\n out_setup_lines.extend(lines[:setup_end])\n out_point_one_lines.extend(lines[point_one_start:point_one_end])\n out_point_two_lines.extend(lines[point_two_start:point_two_end])\n\n\ndef read_in_keys(point_one_lines, point_two_lines):\n keys = collections.defaultdict(dict)\n for line in point_one_lines:\n if len(line) > 0:\n entries = line.split()\n\n seconds = seconds_from_frame(int(entries[0]))\n x = float(entries[1])\n y = abs(float(entries[2]) - SETUP[HEIGHT])\n\n keys[seconds]['x1'] = x\n keys[seconds]['y1'] = y\n\n for line in point_two_lines:\n if len(line) > 0:\n entries = line.split()\n\n seconds = seconds_from_frame(int(entries[0]))\n x = float(entries[1])\n y = abs(float(entries[2]) - SETUP[HEIGHT])\n\n keys[seconds]['x2'] = x\n keys[seconds]['y2'] = y\n\n return keys\n\n\ndef init_keys(point_keys, out_position_x_keys, out_position_y_keys, out_scale_x_keys, out_scale_y_keys, out_rotation_keys):\n # todo init keys for seconds 0 and one frame before start of keys (see clean_keys for example)\n for seconds, coords in point_keys.items():\n px, py = position_from_points(coords)\n sx, sy = scale_from_points(coords)\n r = rotation_from_points(coords)\n\n out_position_x_keys[seconds] = px\n out_position_y_keys[seconds] = py\n out_scale_x_keys[seconds] = sx\n out_scale_y_keys[seconds] = sy\n out_rotation_keys[seconds] = r\n\n\ndef clean_up_keys(position_x_keys, position_y_keys, scale_x_keys, scale_y_keys, rotation_keys):\n\tposition_tol = 0.01\n\tscale_tol = 0.01\n\trotation_tol = 0.01\n\n\tclean_keys(position_x_keys, position_tol)\n\tclean_keys(position_y_keys, position_tol)\n\tclean_keys(scale_x_keys, scale_tol)\n\tclean_keys(scale_y_keys, scale_tol)\n\tclean_keys(rotation_keys, rotation_tol)\n\n\ndef clean_keys(keys, tol):\n\t# todo write clean_keys\n\tpass\n\tto_delete = []\n\tprev = None\n\n\titems_iter = iter(keys.items())\n\t# todo this might not even \"compile\"\n\tfirst = items_iter.next()\n\tprev = first[1]\n\n\tfor time, value in items_iter:\r\n\t\tif (value < prev + tol) and (value > prev - tol):\n\t\t\tto_delete.append(time)\n\t\telse:\n\t\t\tprev = value\n\n\n\tfor time in to_delete:\n\t\tdel keys[time]\n\n\ndef position_from_points(coords):\n x = (coords['x1'] + coords['x2']) / 2.0\n y = (coords['y1'] + coords['y2']) / 2.0\n x = deg_from_width(x)\n y = deg_from_height(y)\n return x, y\n\n\ndef scale_from_points(coords):\n # todo write scale_from_points\n # need to keep track of initial scale\n return 1, 1\n\n\ndef rotation_from_points(coords):\n # todo write rotation_from_points\n # need to keep track of initial rotation\n return 0\n\n\ndef convert_scale(scale):\n \"\"\" convert from 0 - 100 to 0 - 1 \"\"\"\n return scale / 100\n\n\ndef deg_from_width(width):\n \"\"\" divide by canvas width and multiply by 360 degrees and shift to -180 to 180 \"\"\"\n return ((width / SETUP[WIDTH]) * 360.0) - 180\n\n\ndef deg_from_height(height):\n \"\"\" divide by canvas height and multiply by 180 degrees and shift to -90 to 90\"\"\"\n return ((height / SETUP[HEIGHT]) * 180) - 90\n\n\ndef seconds_from_frame(frame):\n \"\"\" divide by frame rate \"\"\"\n return frame / SETUP[FPS]\n\n\ndef convert_filename(file_name):\n \"\"\" changes '.txt' to '.json '\"\"\"\n return file_name[:-4] + '.json'\n\n\ndef dump_json_to_file(file_name, scale_x_keys, scale_y_keys, position_x_keys, position_y_keys, rotation_keys):\n data = [scale_x_keys, scale_y_keys, position_x_keys, position_y_keys, rotation_keys]\n\n with open(file_name, 'w') as file:\n json.dump(data, file, indent=2)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"TrackingStuff/Version2/AdobeKeyFramesToJsonV2.py","file_name":"AdobeKeyFramesToJsonV2.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"397560450","text":"# RT-Thread building script for component\nImport('rtconfig')\nfrom building import *\n\ncwd = GetCurrentDir()\nlibs = []\nsrc = Glob('*src/*.c') + Glob('src/*.cpp')\ncpppath = [cwd + '/inc']\nlibpath = [cwd + '/lib']\n\nif not GetDepend('BSP_USE_STDDRIVER_SOURCE'):\n if rtconfig.PLATFORM in ['armcc', 'armclang']:\n if GetOption('target') == 'mdk5' and os.path.isfile('./lib/libstddriver_keil.lib'):\n libs += ['libstddriver_keil']\n elif GetOption('target') == 'mdk4' and os.path.isfile('./lib/libstddriver_keil4.lib'):\n libs += ['libstddriver_keil4']\n elif rtconfig.PLATFORM in ['gcc'] and os.path.isfile('./lib/libstddriver_gcc.a'):\n libs += ['libstddriver_gcc']\n elif os.path.isfile('./lib/libstddriver_iar.a'):\n libs += ['libstddriver_iar']\n\nif not libs:\n group = DefineGroup('Libraries', src, depend = [''], CPPPATH = cpppath)\nelse:\n src = []\n group = DefineGroup('Libraries', src, depend = [''], CPPPATH = cpppath, LIBS = libs, LIBPATH = libpath)\n\nReturn('group')\n","sub_path":"bsp/nuvoton/libraries/m2354/StdDriver/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"99047309","text":"import cv2\nimport tracking.PID\nimport xarm\n\nclass KeypointFollow:\n def __init__(self):\n self.Arm = xarm.Controller('/dev/ttyTHS1', False)\n self.target_servox=90\n self.target_servoy=45\n self.xservo_pid = tracking.PID.PositionalPID(2.5, 0.2, 0.35)\n self.yservo_pid = tracking.PID.PositionalPID(3.5, 0.4, 0.5)\n\n def follow_function(self, x, y):\n\n\n point_x = x\n point_y = y\n\n if not (self.target_servox>=180 and point_x<=320 or self.target_servox<=0 and point_x>=320):\n self.xservo_pid.SystemOutput = point_x\n self.xservo_pid.SetStepSignal(320)\n\n self.xservo_pid.SetInertiaTime(0.005, 0.01)\n print(\"PID system output: \", self.xservo_pid.SystemOutput)\n\n target_valuex = int(1500 + self.xservo_pid.SystemOutput)\n print(\"target_valuex: \", target_valuex)\n self.target_servox = int((target_valuex - 500) / 10)\n print(\"target_servox: \", self.target_servox)\n\n if self.target_servox > 180: self.target_servox = 180\n if self.target_servox < 0: self.target_servox = 0\n if not (self.target_servoy>=90 and point_y<=240 or self.target_servoy<=0 and point_y>=240):\n\n self.yservo_pid.SystemOutput = point_y\n self.yservo_pid.SetStepSignal(240)\n\n\n self.yservo_pid.SetInertiaTime(0.005, 0.01)\n print(\"PID system output: \", self.yservo_pid.SystemOutput)\n\n target_valuey = int(1500 + self.yservo_pid.SystemOutput)\n print(\"target_valuey: \", target_valuey)\n\n self.target_servoy = int((target_valuey - 500) / 10) - 20\n print(\"target_servoy: \", self.target_servoy)\n\n if self.target_servoy > 90: self.target_servoy = 90\n if self.target_servoy < 0: self.target_servoy = 0\n\n joints_0 = [90, 90, self.target_servoy / 2, self.target_servoy / 2, 55, self.target_servox]\n for i in range(len(joints_0)):\n print(\"Servo id {}, angle: {}\".format(i+1, float(joints_0[i])))\n self.Arm.setPosition(i+1, float(joints_0[i]), 800)\n return\n","sub_path":"tasks/robot_arm_tracker/tracking/follow_keypoint.py","file_name":"follow_keypoint.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"622918265","text":"import random,datetime,string , os ,time ,subprocess \nfrom selenium import webdriver\nfrom faker_e164.providers import E164Provider\nfrom faker import Faker\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\nfrom pyvirtualdisplay import Display\nimport requests\nimport io\nfrom pydub import AudioSegment\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom stem import Signal\nfrom stem.control import Controller\nfrom zoho_act_mail import *\nimport curses\nimport speech_recognition as sr\ndisplay = Display(visible=0, size=(960, 860))\n\n######################################################\n#controller = Controller.from_port(port=9051)ay pydub\n#apt-get install python-pyaudio python3-pyaudio ffmpeg\n# pip3 install speechrecognition requests pyvirtualdisplay pydub\n#apt-get install python-pyaudio python3-pyaudio ffmpeg\n#kx_id echo $HOSTNAME > /root/id_docker\nos.system(\"pkill firefox && pkill Xephyr\")\nemail_users=[\"m2\",\"m3\",\"m4\",\"m5\",\"m6\",\"m7\",\"m8\",\"m10\",\"m20\",\"m21\",\"m31\"]\npull_url=\"https://bitbucket.org/m0uray/aux/raw/82987aba3576d4f74b9aa871013f0612e8784037/pull-docker \"\nprint(email_users)\nf = open(\"/root/id_docker\", \"r\")\nlines=f.readline().split(\"\\n\")\nhost_name=lines[0]\nprint(lines[0])\ntry:\n\tfor ll in email_users :\n\t\tif host_name in ll:\n\t\t\temail_users.remove(ll)\n\t\t\tbreak\nexcept :\n\tprint(\"ok nothing \")\nprint(email_users)\n\nrandomindex = random.randint(0,len(email_users)-1)\nnew_host=email_users[randomindex]\nprint (new_host)\nkx_id=new_host\n\nurl_kxd=gather_acces(kx_id+\"@chokariz.tk\")\nurl_kxd=url_kxd[-1]\nprint (url_kxd,kx_id)\n#input()\n#input()\ndk_image=\"s00ka/da0\"\ndk_liste_image=\"s00ka/list\"\ncomd=\"docker rm -f 80x || docker pull \"+dk_image+\":latest && docker run -d --name 80x --hostname \"+kx_id+\" -e PASSWORD=123456789 --cap-add=NET_ADMIN --device=/dev/net/tun -p 6080:6080 -p 5001:22 -p 6001:3389 -p 1022:1022 --dns=8.8.4.4 --dns=8.8.8.8 \"+dk_image+\" && clear && ioo=$(curl ipinfo.io/ip) && echo $ioo:6080 && docker rm -f 50x || docker pull balitch00/bl_1:latest && docker run -d --privileged --cap-add SYS_ADMIN --name 50x --hostname l\"+kx_id+\" -p 5002:22 -p 1984:1984 --dns=8.8.4.4 --dns=8.8.8.8 \"+dk_liste_image+\" && clear && ioo=$(curl ipinfo.io/ip) && echo $ioo:6080 \"\ncmd_tran=comd.replace(\"xxx-xxx\",kx_id)\nprint(cmd_tran)\nnew_cmd =\"export kx_id=\"+kx_id+\" && curl \"+pull_url+\"| bash\"\n######################################################\n#red=\"https://trial.docker.com/launching?token=800af145-2b45-487c-b62b-11acdf3c374e\"\nred=url_kxd\nglobal arr_info\narr_info=[]\ndomains=[\"@gmail.com\"]\n\n############################################################################################################\n############################################################################################################\nProfile_name=\"s0ob7y28.gitlab_fire_prof\"\nBinarry=\"/root/firefox-sdk/bin/firefox-bin\"\npath_profile=\"/root/.mozilla/firefox/\"+Profile_name\ngecko_path=\"/usr/bin/geckodriver13\"\n\n\n\t#return outpu_l0g\n\n\t#os.system(\"echo 'CAPTCHA KILLED' >> cchck \")\n\n\ndef sstart():\n\tnew_prof()\n\tcapabilities = webdriver.DesiredCapabilities().FIREFOX\n\tcapabilities[\"marionette\"] = True\n\tprofile = webdriver.FirefoxProfile(path_profile)\n\tbinary = FirefoxBinary(Binarry)\n\tdriver = webdriver.Firefox(firefox_binary=binary , capabilities=capabilities , firefox_profile=profile , executable_path=gecko_path)\n\treturn driver\n\n\n\n\n######################################################\n\n\nreasonableCharacters = (string.digits + string.ascii_letters )\ndef password0(minimum=5, maximum=6):\n return ''.join(\n random.choice(reasonableCharacters) for x in range(\n random.randint(minimum, maximum)\n )\n )\n\n\n\n\n########################################################################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n########################################################################################################\"\n\n\n\n########################################################################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n########################################################################################################\"\n\n\ndef new_prof():\n\tl0g (\"new profile\")\n\tdisplay.start()\n\n\n\tprint(\"C R E A T I N G P R O F I L E : \" ,end=\"\",flush=True)\n\tremove_prof=\"rm -rf /root/.mozilla/firefox/\"+Profile_name\n\textract_prof=\"tar xf \"+Profile_name+\".tar.gz -C /root/.mozilla/firefox\"\n\tempty_tmp=\"rm -rf /tmp/*\"\n\t#vpn_ip=get_myip()\n\t#l0g(vpn_ip)\n\n\n\ttry:\n\t\tsubprocess.Popen(remove_prof, shell=True)\n\t\ttime.sleep(1)\n\t\tsubprocess.Popen(empty_tmp, shell=True)\n\texcept:\n\t\tpass\n\ttry:\n\t\tsubprocess.Popen(extract_prof, shell=True)\n\t\ttime.sleep(1)\n\t\tprint(\" O K \")\n\texcept:\n\t\tpass\n########################################################################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n###################################### \" \"################################################\"\n########################################################################################################\"\ndef go_820():\n\tl0g(\"----------------------------------------> START <-------------------------------------- \")\n\tl0g(\"\")\n\tl0g (\"new session\")\n\tdriver=sstart()\n\tdriver.get(red)\n\n\t#vpn_ip=get_myip()\n\t\n\n\ttry:\n\t\tif \"153.92.30.200\" in vpn_ip :\n\t\t\tl0g(\"VPN down ---> \"+vpn_ip)\n\t\telse:\n\t\t\tl0g(\"VPN UP ---> \"+vpn_ip)\n\texcept:\n\t\tpass\n\n########################################################################################################\"\n########################################################################################################\"\n\n\ttry:\n\t\tstar_new_trial=WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.CSS_SELECTOR, '._1HzQ5Leqk_2upNfof4J1WE')))\n\t\tstar_new_trial.click()\n\t\txx=13\n\t\tl0g(\"ready !! WAITING \"+str(60*xx)+\" ---> \")\n\t\ttime.sleep(60*xx)\n\t\t\n\texcept:\n\t\tl0g(\"no trial !! ---> \")\n\n\t#time.sleep(2)driver.get(red)https://trial.docker.com/launching?token=05e8ae1f-0ce3-49cf-830b-d0a351220b42\n########################################################################################################\"\n########################################################################################################\"\n\ttry:\n\t\tstar_new_trial=WebDriverWait(driver, 25).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.LFciOZ2c0shORRbilT_zE')))\n\t\txx=1\n\t\tl0g(\"ready !! WAITING \"+str(60*xx)+\" ---> \")\n\t\ttime.sleep(60*xx)\n\t\tl0g(\"ready !!NO WAITING \"+str(60*xx)+\" ---> \")\n\texcept:\n\t\tl0g(\"no trial !! ---> \")\n########################################################################################################\"\n########################################################################################################\"\n########################################################################################################\"\n\t\n\tactivvat=gather_acces(kx_id+\"@chokariz.tk\")\n\tprint(kx_id ,activvat)\n\tdriver.get(activvat[-1])\n\t#input()\n\n\t\n\tdriver.get(\"https://trial.docker.com/demo\")\n\ttime.sleep(25)\n\t#input()\n\tc_url=driver.current_url\n\tif \"demo\" in c_url :\n\t\ttry:\n\t\t\t#element = wait.until(EC.visibility_of_element_located((By.XPATH, \"//*[@id='posted_1']/div[3]\")))\n\t\t\ttrial_lisst=WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"root\"]/div/div/div[1]/ul/li[3]/div')))\n\t\t\tprint(\"docker ok \",\"G_BLOOD_END\")\n\t\t\tlis1=driver.find_elements_by_xpath('//*[@id=\"root\"]/div/div/div[1]/ul/li[3]/div')[0]\n\t\t\tlis1.click()\n\t\t\tprint(\" ADMIN O K\",\"G_BLOOD_END\")\n\t\texcept Exception as e :\n\t\t\tprint(\"errrrooo \"+str(e))\n\t\ttry:\n\t\t\tB_button=WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.terminal.xterm.xterm-theme-default.xterm-cursor-style-block')))\n\t\t\tB_button.click()\n\t\t\ttime.sleep(10)\n\t\t\tB2_button=WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.xterm-rows')))\n\t\t\ttter=B2_button.text\n\t\t\t#xterm-rows\n\t\t\tif \"admin@trial-console:~$\" in tter:\n\t\t\t\tB_button.send_keys(Keys.RETURN)\n\t\t\t\ttime.sleep(5)\n\t\t\t\tB_button.send_keys(new_cmd,Keys.RETURN)\n\t\t\t\tprint(\" admin@trial-console : O K \",\"G_BLOOD_END\")\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t#driver.refresh()\n\t\t\t\t#chk(driver)\n\t\texcept Exception as e:\n\t\t\t\tprint(\" N O \"+str(e),\"G_BLOOD_END\")\n\t#input()\n\ttry:\n\t\t#OU3wpEgR9LahNNZBRuuF6 _2Emw38rNoK7Azp0MbyegGI\n\t\tadmin_cosol=WebDriverWait(driver, 25).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.OU3wpEgR9LahNNZBRuuF6._2Emw38rNoK7Azp0MbyegGI')))\n\t\tprint(\"okkkkkkkkkkkkk\")\n\texcept Exception as e :\n\t\tprint(\"okkkkkkkkkkkkk\"+str(e))\n\ttry:\n\t\t#element = wait.until(EC.visibility_of_element_located((By.XPATH, \"//*[@id='posted_1']/div[3]\")))\n\t\ttrial_lisst=WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"root\"]/div/div/div[1]/ul/li[3]/div')))\n\t\tprint(\"docker ok \",\"G_BLOOD_END\")\n\t\tlis1=driver.find_elements_by_xpath('//*[@id=\"root\"]/div/div/div[1]/ul/li[3]/div')[0]\n\t\tlis1.click()\n\t\tlis1.click()\n\t\tprint(\" ADMIN O K\",\"G_BLOOD_END\")\n\texcept Exception as e :\n\t\tprint(\"nooooooooooooooooooo\"+str(e))\n\t#input()\n\texit(0)\n\tdriver.quit()\n\n\ntry:\n\tvisibilityo=sys.argv[1]\n\tprint(visibilityo)\nexcept:\n\tvisibilityo=\"0\"\n\n\n\ndef starter(visibilityo):\n\tif visibilityo != \"1\":\n\t\tprint(\"Visible = hide\")\n\t\t#display = Display(visible=0, size=(800, 600))\n\t\t#display.start()\n\t\tgo_820()\n\t\tdisplay.stop()\n\telse:\n\t\tprint(\"Visible = visible\")\n\t\tgo_820()\n\nstarter(visibilityo)","sub_path":"krlater-zoho/login_acti.py","file_name":"login_acti.py","file_ext":"py","file_size_in_byte":10664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"190546790","text":"'''\nPR 10.5a: Blikanie nadpisu\n\nNapíšte program, ktorý bude simulovať blikanie nadpisov. Nadpismi rozumieme štyri rovnaké slová otočené o 0⁰, 90⁰, 180⁰ a 270⁰. Každých 250ms sa zmenia/posunú farby jednotlivých nadpisov. Na „posun“ farieb použite n-ticu.\n'''\n\nimport tkinter as tk\nimport random\n\nwidth, height = 1280, 720\nsx, sy = width // 2, height // 2\nfarby = ('cyan', 'magenta', 'yellow', 'green')\noffset = 0\n\nc = tk.Canvas(width=width, height=height, background='black')\nc.pack()\n\n\ndef vykresli():\n global offset\n c.delete('all')\n for i in range(4):\n c.create_text(sx, sy, text=' Python', font='Arial 32 bold', angle=90 * i, fill=farby[(i + offset) % 4], anchor='w')\n offset += 1\n if offset >= 4:\n offset = 0\n\n c.after(250, vykresli)\n\n\nvykresli()\n","sub_path":"20171205/blikanie_a.py","file_name":"blikanie_a.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"56476385","text":"import os\n\nROOT_PATH = \"test_mind/results\"\n\nimp_id = 1\n\nwith open(os.path.join(ROOT_PATH, \"nrms-test-ep4.txt\"), \"w\") as fw:\n for pno in range(20):\n lines = open(os.path.join(ROOT_PATH, \"nrms-test-prediction.p{}.txt\".format(pno)), \"r\").readlines()\n for l in lines:\n rank = l.strip().split()[1]\n fw.write(\"{} {}\\n\".format(imp_id, rank))\n imp_id += 1\n","sub_path":"examples/merge_test.py","file_name":"merge_test.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"522082733","text":"import util\r\nimport os\r\nimport context\r\nimport time\r\nfrom xml.dom import minidom\r\nfrom tempfile import mkstemp\r\nfrom shutil import move\r\nfrom os import remove, close\r\n\r\ndef replace(file, pattern, subst):\r\n #Create temp file\r\n fh, abs_path = mkstemp()\r\n new_file = open(abs_path,'w')\r\n old_file = open(file)\r\n for line in old_file:\r\n new_file.write(line.replace(pattern, subst))\r\n #close temp file\r\n new_file.close()\r\n close(fh)\r\n old_file.close()\r\n #Remove original file\r\n remove(file)\r\n #Move new file\r\n move(abs_path, file)\r\n\r\ndef get_s3_dir(ctx, svr):\r\n try:\r\n svr=svr.upper()\r\n do_build = ctx.config.getElementsByTagName('do_build')[0]\r\n elePlatform = do_build.getElementsByTagName('Linux')[0]\r\n for mk in elePlatform.getElementsByTagName('make'):\r\n module = ((mk.getElementsByTagName('module'))[0].childNodes)[0].nodeValue\r\n module = module.upper()\r\n if svr==module:\r\n return ((mk.getElementsByTagName('s3'))[0].childNodes)[0].nodeValue\r\n except Exception as e:\r\n print (\"we occur a exception get_s3_dir:\" + str(e))\r\n return ''\r\n\r\n finally:\r\n pass\r\n return ''\r\n \r\ndef check_package_install_success(log_file):\r\n last_line = util.file_read_last_line(log_file,3)\r\n #Successful compile (1.108 sec). Resulting Setup program filename is:\r\n #print (\"last_line=\"+last_line)\r\n \r\n ret_val = False\r\n \r\n try:\r\n #defautly, install-shield won't fail.\r\n #correct_index = last_line.index(\"Successful compile\")\r\n ret_val = True\r\n \r\n finally:\r\n pass\r\n \r\n return ret_val\r\n\t\r\ndef do_package_cab(sed_dir, ctx):\r\n\t\t#command line is: \"C:/windows/SysWow64/IExpress /N /Q Zoom.SED\"\r\n\t\tos.chdir(sed_dir)\r\n\t\tstr_log_file_path = ctx.log_dir+\"/\"+\"pack_cab_result.log\"\r\n\t\tcommand = \"C:/windows/SysWow64/IExpress /N /Q \"+ctx.cab_script+\" > \"+str_log_file_path\r\n\t\tprint(\"command=\"+command)\r\n\t\tos.system(command)\r\n \r\n\t\treturn True\t\r\n\t\t\t\r\ndef do_package_installer(working_dir, dest_config, ctx):\r\n os.chdir(working_dir)\r\n\r\n str_log_file_path = ctx.log_dir+\"/\"+\"install_shield_result.log\"\r\n command = \"iscmdbld -p \"+dest_config+\" > \"+str_log_file_path\r\n print(\"command=\"+command)\r\n os.system(command)\r\n\r\n iscc_cmd_ok = check_package_install_success(str_log_file_path)\r\n\r\n if False == iscc_cmd_ok:\r\n ctx.attach_list.append(str_log_file_path)\r\n ctx.notable_error_string = ctx.notable_error_string+\"\\n installshield run failed.\"\r\n else:\r\n #command = \r\n #os.system (\"copy %s %s\" % (\"setup.exe\", \"setup.doc\"))\r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\ZoomInstaller\\\\ZoomInstaller\\\\DiskImages\\\\DISK1\\\\ZoomInstaller.msi \"+ctx.src_home+\"\\Bin\\Release /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n\r\n os.chdir(ctx.init_working_dir)\r\n \r\n return iscc_cmd_ok\r\n\r\ndef do_bin_copy_win(dest_folder,ctx,build_type):\r\n if (\"win\" == build_type) or (\"all\" == build_type):\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\\"+ctx.cab_script+\" \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\\"+ctx.exe_script+\" \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\\"+ctx.xp_script+\" \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n #write a SED file here\r\n f = open(ctx.src_home+\"\\\\Bin\\\\Release\\\\\"+ctx.exe_script,'r')\r\n str_template = f.read()\r\n str_template = str_template.replace(\"TargetName=.\\\\VideoBox.EXE\",\"TargetName=.\\\\\"+ctx.vendor+\".\"+ctx.version+\".EXE\") \r\n f.close( )\r\n f = open(dest_folder+\"\\\\\"+ctx.exe_script,'w')\r\n f.write(str_template)\r\n f.close()\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\\"+ctx.sh_script+\" \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\winhttp.dll \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\vcredist_x86.exe \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\\"+ctx.launcher_name+\" \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\zDevHelper.exe \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\\"+ctx.installer_name+\" \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\ZoomInstaller.msi \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\*.pdb \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Codec\\\\zltcodec\\\\trunk\\\\bin\\\\Release\\\\*.pdb \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\*.map \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Codec\\\\zltcodec\\\\trunk\\\\maps\\\\Release\\\\*.map \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"erase \"+dest_folder+\"\\\\zltEncoderApp.pdb\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"erase \"+dest_folder+\"\\\\zltDecoderApp.pdb\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"erase \"+dest_folder+\"\\\\testCptHost.pdb\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"erase \"+dest_folder+\"\\\\Installer.pdb\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n #copy ClickOnce files\r\n os.system(\"mkdir \"+dest_folder+\"\\\\\"+ctx.co_target)\r\n os.system(\"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\app.publish\\\\* \"+dest_folder+\"\\\\\"+ctx.co_target+\" /s /i\")\r\n \r\n if (\"android\" == build_type) or (\"all\" == build_type):\r\n #copy android binary and symbols\r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\*.apk \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"xcopy \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\android_symbol_\"+ctx.version+\".rar \"+dest_folder+\" /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"erase \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\*.apk\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"erase \"+ctx.src_home+\"\\\\Bin\\\\Release\\\\android_symbol_\"+ctx.version+\".rar \"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command) \r\n\r\ndef do_bin_copy_mac(src_folder, dest_folder, ctx):\r\n #copy_command = \"cp -R \"+src_folder+\"*.bundle \"+dest_folder\r\n #print(\"command=\"+copy_command)\r\n #os.system(copy_command)\r\n os.chdir(src_folder)\r\n command = \"zip mac_map_\"+ctx.version+\".zip *.txt \"\r\n print(\"command=\"+command)\r\n os.system(command)\r\n\r\n copy_command = \"cp -R zoom.us.app \"+ctx.src_home+\"/Client/build/mac/buildpkg/Scripts/\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"cp -R ZoomUsPlugIn.plugin \"+ctx.src_home+\"/Client/build/mac/buildpkg/Scripts/\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n copy_command = \"cp mac_map_\"+ctx.version+\".zip \"+dest_folder\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n \r\n #copy_command = \"cp -R \"+src_folder+\"zoom.us.app \"+dest_folder\r\n #print(\"command=\"+copy_command)\r\n #os.system(copy_command)\r\n \r\n copy_command = \"cp -R \"+src_folder+\"zDevHelper.app \"+dest_folder\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n\r\n #copy_command = \"cp -R \"+src_folder+\"ZoomUsPlugIn.plugin \"+dest_folder\r\n #print(\"command=\"+copy_command)\r\n #os.system(copy_command)\r\n\r\n copy_command = \"cp -R \"+src_folder+\"Transcode.app \"+dest_folder\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n\r\ndef do_package_self_zip(working_dir, dest_config, ctx):\r\n os.chdir(working_dir)\r\n str_log_file_path = ctx.log_dir+\"/\"+\"iexpress_result.log\"\r\n command = \"iexpress /N /Q \"+dest_config+\" > \"+str_log_file_path\r\n print(\"command=\"+command)\r\n os.system(command)\r\n\r\n os.chdir(ctx.init_working_dir)\r\n \r\n return True\r\n\r\ndef do_package_linux(bin_dir, ctx):\r\n #4.\tAppend version number \r\n os.chdir(bin_dir)\r\n \r\n if get_s3_dir(ctx,'cctrl') != '':\r\n command = \"md5sum cctrl > cctrl_\"+ctx.version+\".md5\"\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command))\r\n\r\n command = \"mv cctrl cctrl_\"+ctx.version \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success \r\n\r\n command = \"/opt/s3/s3cmd put cctrl_\"+ctx.version+\".md5 \" + get_s3_dir(ctx,'cctrl') + \"/cctrl_\"+ctx.version+\".md5\" \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put cctrl_\"+ctx.version+\" \"+get_s3_dir(ctx,'cctrl') + \"/cctrl_\"+ctx.version \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n \r\n if get_s3_dir(ctx,'zctrl') != '':\r\n command = \"md5sum zctrl > zctrl_\"+ctx.version+\".md5\"\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command))\r\n \r\n command = \"mv zctrl zctrl_\"+ctx.version\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put zctrl_\"+ctx.version+\".md5 \" + get_s3_dir(ctx,'zctrl') + \"/zctrl_\"+ctx.version+\".md5\" \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put zctrl_\"+ctx.version+\" \"+get_s3_dir(ctx,'zctrl')+\"/zctrl_\"+ctx.version \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n \r\n if get_s3_dir(ctx,'mmr') != '':\r\n command = \"md5sum mmr > mmr_\"+ctx.version+\".md5\"\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command))\r\n \r\n command = \"mv mmr mmr_\"+ctx.version\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put mmr_\"+ctx.version+\".md5 \" + get_s3_dir(ctx,'mmr') + \"/mmr_\"+ctx.version+\".md5\" \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put mmr_\"+ctx.version+\" \"+get_s3_dir(ctx,'mmr')+\"/mmr_\"+ctx.version \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n \r\n if get_s3_dir(ctx,'ccgw') != '':\r\n command = \"md5sum ccgw > ccgw_\"+ctx.version+\".md5\"\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command))\r\n \r\n command = \"mv ccgw ccgw_\"+ctx.version\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put ccgw_\"+ctx.version+\".md5 \" + get_s3_dir(ctx,'ccgw') + \"/ccgw_\"+ctx.version+\".md5\" \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put ccgw_\"+ctx.version+\" \"+get_s3_dir(ctx,'ccgw')+\"/ccgw_\"+ctx.version \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n \r\n if get_s3_dir(ctx,'telgw') != '':\r\n command = \"md5sum telgw > telgw_\"+ctx.version+\".md5\"\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command))\r\n \r\n command = \"mv telgw telgw_\"+ctx.version\r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put telgw_\"+ctx.version+\".md5 \" + get_s3_dir(ctx,'telgw') + \"/telgw_\"+ctx.version+\".md5\" \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n\r\n command = \"/opt/s3/s3cmd put telgw_\"+ctx.version+\" \"+get_s3_dir(ctx,'telgw')+\"/telgw_\"+ctx.version \r\n print(\"command=\"+command)\r\n success = (0 == os.system(command)) and success\r\n \r\n return success\r\n \r\n #5.\tGenerate MD5 for each file and put them into S3\r\n #6.\tUpload targets to S3://zoom-op/release \r\n \r\nif __name__==\"__main__\": \r\n #os.chdir(\"/Volumes/DailyBuild/Output/\") \r\n #command = \"Zip -r -9 20120305-2018.zip 20120305-2018\"\r\n #print(\"command=\"+command)\r\n #os.system(command)\r\n\r\n ctx = context.SaaSbeeBVTContext()\r\n ctx.set_zoom_tag(\"2.0\")\r\n ctx.set_svn_tag(\"1111\")\r\n \r\n ctx.set_src_home(\"G:\\\\DailyBuild\\\\SaaSbee_SVN\\\\\")\r\n if do_package_installer(\"G:/DailyBuild/SaaSbee_SVN/Client/src/install/windows/MSI\",\"ZoomInstaller.ism\",ctx):\r\n print (\"installshield success\")\r\n else:\r\n print (\"installshield failed\")\r\n\r\n time.sleep(10)\r\n \r\n copy_command = \"xcopy G:\\\\DailyBuild\\\\SaaSbee_SVN\\\\Bin\\\\Release\\\\ZoomInstaller\\\\ZoomInstaller\\\\DiskImages\\\\DISK1\\\\ZoomInstaller.msi G:\\\\DailyBuild\\\\SaaSbee_SVN\\\\Bin\\\\Release /Y\"\r\n print(\"command=\"+copy_command)\r\n os.system(copy_command)\r\n\r\n# time.sleep(3) \r\n\r\n# if do_package_self_zip(\"G:/DailyBuild/SaaSbee_SVN/bin/release\",\"VideoBox.SED\", ctx):\r\n# print (\"self zip success\")\r\n# else:\r\n# print (\"self zip failed\")\r\n\r\n# dest_folder = \"I:\\\\work\\\\DailyBuild\\\\\"+ctx.build_time\r\n# util.dir_create(dest_folder);\r\n\r\n# do_bin_copy_win(dest_folder,ctx);\r\n \r\nif __name__==\"__main__1\": \r\n ctx = context.SaaSbeeBVTContext() \r\n ctx.set_svn_tag(\"9999\")\r\n bin_dir = \"/mnt/build/zoom/bin\"\r\n success = do_package_linux(bin_dir, ctx)\r\n print(\"success=\"+str(success))\r\n \r\nif __name__==\"__main__2\":\r\n ctx = context.SaaSbeeBVTContext()\r\n xmldoc = minidom.parse('bvt_config.xml')\r\n ctx.set_config(xmldoc)\r\n print (\"s3 directory for mmr is:\"+get_s3_dir(ctx,'mmr'))\r\n print (\"s3 directory for zc is:\"+get_s3_dir(ctx,'zctrl'))\r\n print (\"s3 directory for cc is:\"+get_s3_dir(ctx,'cctrl'))\r\n print (\"s3 directory for ccgw is:\"+get_s3_dir(ctx,'ccgw'))\r\n\r\nif __name__==\"__main__3\": \r\n ctx = context.SaaSbeeBVTContext()\r\n ctx.set_src_home(\"G:\\\\DailyBuild\\\\SaaSbee_SVN\\\\\")\r\n print (do_package_ZoomCab(\"I:\\\\work\\\\DailyBuild\\\\1.1.21170.0411\",ctx)) \r\n \r\n","sub_path":"Build_Scripts_iOS_PSO/PSO_IPHONEX/script_2.x/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":15377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385019881","text":"import serial\nimport logging\nimport threading\nimport multiprocessing\nimport struct\nfrom queue import Queue\n\nfrom PyQt5.QtCore import QObject, pyqtSignal\n\nfrom Common.Exceptions import BadSignatureException\n\n\nclass LLTransceiver(QObject):\n gateway = serial.Serial()\n counter = 0\n queue = Queue()\n subscribed = []\n connected = pyqtSignal()\n disconnected = pyqtSignal()\n logger = logging.getLogger('main')\n\n def __init__(self, port, speed=115200):\n \"\"\"\n Opens a port to transmit with defined parameters\n :param port: port to open\n :param speed: defaults to 38400\n \"\"\"\n\n super().__init__()\n self.port_name = port\n self.gateway.baudrate = speed\n self.gateway.port = self.port_name\n\n def connect(self):\n self.logger.info(f'Opening port {self.port_name} with speed {self.gateway.baudrate}')\n\n self.gateway.open()\n if self.gateway.is_open:\n self.logger.info(f'Port {self.port_name} opened with speed {self.gateway.baudrate}')\n self.connected.emit()\n\n processing_thread = threading.Thread(target=self.wait_for_packet)\n processing_thread.start()\n self.logger.info(f'Started processing thread {processing_thread}')\n\n sender_thread = threading.Thread(target=self.packet_sender)\n sender_thread.start()\n self.logger.info(f'Started sender thread {sender_thread}')\n\n else:\n self.logger.fatal(f'Failed to open port {self.port_name} with speed {self.gateway.baudrate}')\n raise Exception()\n\n def is_up(self):\n return self.gateway.is_open\n\n def subscribe(self, target):\n \"\"\"\n Add a method to be called when packet received\n :param target: method to be called\n \"\"\"\n\n if callable(target):\n self.subscribed.append(target)\n\n def wait_for_packet(self, *args, **kwargs):\n while self.gateway.is_open:\n try:\n bit = self.gateway.read(1)\n if bit == b'+':\n self.counter += 1\n self.logger.debug(f'Got packet ({bit}) from port {self.port_name} number {self.counter}')\n payload = self.read_full_packet()\n for caller in self.subscribed:\n '''thread = threading.Thread(target=caller, args=[payload])\n self.logger.debug(f'Starting processing thread ({thread}) for packet')\n thread.start()'''\n caller(payload)\n\n except:\n pass\n\n def read_full_packet(self):\n length_bytes = self.gateway.read(2)\n length = struct.unpack(' 52:\n raise BadSignatureException()\n self.logger.debug(f'Reading packet {self.counter} with length {length} [{length_bytes}]')\n payload = self.gateway.read(length)\n if b'+' in payload:\n raise BadSignatureException()\n\n return payload\n\n def enqueue_packet(self, payload, callback=None):\n self.logger.debug(f'Got packet {payload} to enqueue...')\n payload = self.add_signature_and_length(payload)\n self.queue.put([payload, callback])\n self.logger.debug(f'Packet {payload} enqueued')\n\n def add_signature_and_length(self, payload):\n signature = b'+'\n length = struct.pack(' 0:\n payload, callback = self.queue.get()\n self.logger.debug(f'Got packet {payload} from queue')\n self.gateway.write(payload)\n if callable(callback):\n thread = threading.Thread(target=callback, args=[payload])\n thread.start()\n self.queue.task_done()\n self.logger.info(f'Packet {payload} sent')\n\n def close(self):\n if self.gateway.is_open:\n self.gateway.close()\n self.disconnected.emit()\n self.logger.info(f'Port {self.port_name} closed')\n\n\n","sub_path":"LowLevel/LLTransceiver.py","file_name":"LLTransceiver.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"370793368","text":"from flask import render_template, url_for, flash, request, redirect, Response, render_template_string\nfrom website import app, api\nfrom .auth import SignupApi, LoginApi\nfrom .data import InputApi, OutputApi, TimeLines, InputApiTime, OutputApiTime\nfrom .data import ListTimes, OutputEmployee, UpdateTimeLine, EditEmployee\nfrom flask_jwt_extended import jwt_required\nfrom .excel import Excel\nfrom website.database.models import Users, TimeLine, Trackers\nimport copy\nfrom datetime import timedelta, datetime\nimport io\nfrom openpyxl import Workbook\n\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n return render_template(\"index.html\")\n\n\n@app.route(\"/sign-up\", methods=['GET', 'POST'])\ndef signUp():\n return render_template(\"index.html\")\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef my_index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/staff\", methods=['GET', 'POST'])\ndef staff():\n return render_template(\"index.html\")\n\n\n@app.route(\"/setting\", methods=['GET', 'POST'])\ndef setting():\n return render_template(\"index.html\")\n\n\n@app.route(\"/detail-staff/\", methods=['GET'])\ndef detail(user):\n return render_template(\"index.html\", id=user)\n\n\n@app.route(\"/export/excel/employee\", methods=['GET'])\ndef export():\n myio = io.BytesIO()\n wb = Workbook()\n ws1 = wb.active\n ws1['A1'] = \"Tên nhân viên: \"\n ws1['A2'] = \"Chức vụ: \"\n ws1['C2'] = \"Ngày sinh: \"\n ws1['A3'] = \"Ngày\"\n ws1['B3'] = \"Check in\"\n ws1['C3'] = \"Check out\"\n ws1['D3'] = 'Tổng thời gian'\n total = timedelta(hours=0, minutes=0)\n user = request.args.get('users')\n times = Trackers.objects(user=user)\n employee = Users.objects.get(pk=user)\n month = request.args.get('month')\n if(month == None):\n month = (datetime.now()).strftime(\"%#m\")\n year = request.args.get('year')\n if(year == None):\n year = (datetime.now()).strftime(\"%y\")\n ws1.title = employee.lastName + ' ' + employee.firstName\n ws1['B1'] = employee.lastName + ' ' + employee.firstName\n ws1['B2'] = employee.affiliation\n # ws1['D2'] = employee.birth\n list_time = ListTimes(times, month, year)\n ws1['B4'] = \"Bảng chấm công tháng \" + month\n i = 5\n for time in list_time:\n ws1['A' + str(i)] = time[\"id\"]\n ws1['B' + str(i)] = time[\"checkIn\"]\n ws1['C' + str(i)] = time[\"checkOut\"]\n ws1['D' + str(i)] = time[\"total\"]\n a = time[\"total\"].split(':') # Cắt time để lấy ngày và giờ\n t = timedelta(hours=int(a[0]), minutes=int(a[1]))\n total += t\n i += 1\n ws1['A' + str(i)] = \"Tổng thời gian làm\"\n ws1['D' + str(i)] = total\n wb.save(myio)\n myio.seek(0)\n\n return Response(myio, mimetype=\"application/ms-excel\", headers={\"Content-Disposition\": \"attachment;filename=employee.xlsx\"})\n\n\n@app.route('/export', methods=['GET'])\ndef listEmployee():\n file = io.BytesIO()\n\n if request.args.get('users') == 'all':\n users = []\n employeelist = Users.objects.all()\n for employee in employeelist:\n users.append(employee['id'])\n else:\n users = (request.args.get('users')).split(',')\n month = request.args.get('month')\n if(month == None):\n month = (datetime.now()).strftime(\"%#m\")\n year = request.args.get('year')\n if(year == None):\n year = (datetime.now()).strftime(\"%y\")\n file = Excel(users, month, year)\n return Response(file, mimetype=\"application/ms-excel\", headers={\"Content-Disposition\": \"attachment;filename=employee_report.xlsx\"})\n\n\ndef initialize_routes(api):\n api.add_resource(SignupApi, '/api/auth/signup')\n api.add_resource(LoginApi, '/api/auth/login')\n api.add_resource(InputApi, '/api/data/input')\n api.add_resource(EditEmployee, '/api/edit/employee')\n api.add_resource(OutputApi, '/api/data/output')\n api.add_resource(TimeLines, '/api/time/output')\n api.add_resource(UpdateTimeLine, '/api/time/update')\n api.add_resource(InputApiTime, '/api/time/employee/input')\n api.add_resource(OutputApiTime, '/api/time/employee/output')\n api.add_resource(OutputEmployee, '/api/employee/output')\n","sub_path":"website/resources/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"601275554","text":"from string import upper\npi = 3.14\n\ndef raio(r):\n end = (2 * pi) * r\n maior = 'M' if (((2 * pi) * r) > 100.0) else \"Cm\"\n print (\"Comprimento do perimetro: \" + str(end) + str(maior))\n \n\ndef diametro(d):\n end = pi * d\n maior = 'M' if ((pi * d) > 100.0) else \"Cm\"\n print (\"Comprimento do perimetro: \" + str(end) + str(maior))\nwhile True:\n tipo = str(raw_input('raio(r) ou diametro(d))? : '))\n tipo = tipo.upper()\n if tipo.find(\"R\") == 0:\n num = float(raw_input(\"Digite o valor do raio:\"))\n raio(num)\n \n elif tipo.find(\"D\") == 0:\n num = float(raw_input(\"Digite o valor do diametro:\"))\n diametro(num)\n else:\n print(\"Apenas Raio ou Diametro\")\n again = raw_input(\"Deseja fazer mais calculos(Sim ou Nao)?:\")\n again = again.upper()\n if again.find(\"S\") != 0:\n break","sub_path":"calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"331312661","text":"# solutions.py\n\"\"\"Volume II Lab 8: Markov Chains\nSolutions file. Written by Shane McQuarrie, even though\nit should have been written by Jared Webb. All well.\n\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import lil_matrix\nfrom os import system\n\n\n# Problem 1: implement this function.\ndef random_markov(n):\n \"\"\"Create a transition matrix for a random Markov chain with n states.\n This should be stored as an nxn numpy array. The columns sum to 1.\n \"\"\"\n transition_matrix = np.empty((n,n))\n for j in range(n):\n column = np.random.random(n)\n column /= column.sum()\n transition_matrix[:,j] = column\n return transition_matrix\n\n\n# Problem 2: modify this function.\ndef forecast(num_days):\n \"\"\"Run a simulation for the weather over 'num_days' days, with\n 'hot' as the starting state. Return a list containing the day-by-day\n results, not including the starting day.\n\n Example:\n >>> forecast(3)\n [1, 1, 0]\n\n # Or, if you prefer,\n >>> forecast(5)\n ['cold', 'hot', 'hot', 'cold', 'cold']\n \"\"\"\n transition_matrix = np.array([[.7, .6], [.3, .4]])\n current_state = 0\n record = []\n for day in xrange(num_days):\n random_number = np.random.random()\n if random_number < transition_matrix[1, current_state]:\n current_state = 1\n else:\n current_state = 0\n record.append(current_state)\n return record\n# Roughly 66.7% of the entries should be zeros.\n# Roughly 33.3% of the entries should be ones.\n\n\n# Problem 3: implement this function.\ndef four_state_forecast(days=1):\n transition = np.array(\n [[.5, .3, .1, 0],[.3, .3, .3, .3],[.2, .3, .4, .5],[0, .1, .2, .1]])\n current_state = 0\n record = []\n for day in xrange(days):\n current_state = np.argmax(\n np.random.multinomial(1, transition[:,current_state]))\n record.append(current_state)\n return record\n# Roughly 24.6% of the entries should be zeros.\n# Roughly 30.1% of the entries should be ones.\n# Roughly 33.2% of the entries should be twos.\n# Roughly 12.1% of the entries should be threes.\n\n\n# Problem 4: implement this function.\ndef analyze_simulation():\n \"\"\"Analyze the results of the previous two problems.\"\"\"\n hot1, cold1, hot2, mild, cold2, freezing = [], [], [], [], [], []\n for i in xrange(10):\n f2 = forecast(10000)\n f4 = four_state_forecast(10000)\n hot1.append(f2.count(0))\n cold1.append(f2.count(1))\n hot2.append(f4.count(0))\n mild.append(f4.count(1))\n cold2.append(f4.count(2))\n freezing.append(f4.count(3))\n print(\"2-state forecast Hot days:\\t%f%%\"%(np.mean(hot1)/100.))\n print(\"2-state forecast Cold days:\\t%f%%\"%(np.mean(cold1)/100.))\n print(\"4-state forecast Hot days:\\t%f%%\"%(np.mean(hot2)/100.))\n print(\"4-state forecast Mild days:\\t%f%%\"%(np.mean(mild)/100.))\n print(\"4-state forecast Cold days:\\t%f%%\"%(np.mean(cold2)/100.))\n print(\"4-state forecast Freezing days:\\t%f%%\"%(np.mean(freezing)/100.))\n\n\ndef problem5(filename):\n \"\"\"Read in from a file, convert to ints, read out to a file.\"\"\"\n\n word_list = ['$tart']\n with open(filename, 'r') as f:\n contents = f.readlines()\n outfile = open(\"int_file.txt\", 'w')\n for line in contents:\n sentence = line.split()\n for word in sentence:\n if word not in word_list:\n word_list.append(word)\n outfile.write(str(word_list.index(word)) + \" \")\n outfile.write('\\n')\n word_list.append('en&')\n return word_list\n\n\ndef problem6(int_file, num_states, sparse=False):\n\n # Initialize the transition matrix.\n if sparse:\n markov = lil_matrix((num_states, num_states))\n else:\n markov = np.zeros((num_states, num_states))\n\n # Read in the data and process it.\n with open(int_file, 'r') as f:\n contents = f.readlines()\n data = []\n for line in contents:\n data.append(line.split())\n\n # Build the matrix.\n for i in xrange(len(data)-1):\n line = data[i]\n markov[int(line[0]), 0] += 1\n for j in xrange(len(line)-1):\n markov[int(line[j+1]), int(line[j])] += 1\n markov[num_states-1, int(line[-1])] += 1\n\n # Divide by nonzero column sums.\n for j in xrange(num_states):\n s = markov[:,j].sum()\n if s != 0:\n markov[:,j] /= s\n return markov\n\ndef sentences(infile, outfile, num_sentences=1):\n \"\"\"Generate random sentences using the word list generated in\n Problem 5 and the transition matrix generated in Problem 6.\n Write the results to the specified outfile.\n\n Parameters:\n infile (str): The path to a filen containing a training set.\n outfile (str): The file to write the random sentences to.\n num_sentences (int): The number of random sentences to write.\n\n Returns:\n None\n \"\"\"\n\n # Get output from previous problems.\n word_list = problem5(infile)\n transition = problem6('int_file.txt', len(word_list), False)\n\n # Transition through the Markov chain.\n stop = transition.shape[1] - 1\n output = \"\"\n for i in xrange(num_sentences):\n current_state = 0\n while current_state != stop:\n current_state = np.argmax(\n np.random.multinomial(1, transition[:,current_state]))\n if current_state != stop:\n output += word_list[current_state] + \" \"\n else:\n output += \"\\n\"\n\n # Write the results to the specified output file.\n with open(outfile, 'w') as f:\n f.write(output)\n system(\"rm int_file.txt\")\n","sub_path":"Labs/MarkovChains/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"628859202","text":"# src-ch1/FallingSphereEulerHeun.py;ODEschemes.py @ git@lrhgit/tkt4140/src/src-ch1/ODEschemes.py;\nfrom DragCoefficientGeneric import cd_sphere\nfrom ODEschemes import euler, heun\nfrom matplotlib.pyplot import *\nimport numpy as np\n\n# change some default values to make plots more readable\nLNWDT = 2\nFNT = 11\nrcParams['lines.linewidth'] = LNWDT\nrcParams['font.size'] = FNT\n\ng = 9.81 # Gravity m/s^2\nd = 41.0e-3 # Diameter of the sphere\nrho_f = 1.22 # Density of fluid [kg/m^3]\nrho_s = 1275 # Density of sphere [kg/m^3]\nnu = 1.5e-5 # Kinematical viscosity [m^2/s]\nCD = 0.4 # Constant drag coefficient\n\n\ndef f(z, t):\n \"\"\"2x2 system for sphere with constant drag.\"\"\"\n zout = np.zeros_like(z)\n alpha = 3.0*rho_f/(4.0*rho_s*d)*CD\n zout[:] = [z[1], g - alpha*z[1]**2]\n return zout\n\n\ndef f2(z, t):\n \"\"\"2x2 system for sphere with Re-dependent drag.\"\"\"\n zout = np.zeros_like(z)\n v = abs(z[1])\n Re = v*d/nu\n CD = cd_sphere(Re)\n alpha = 3.0*rho_f/(4.0*rho_s*d)*CD\n zout[:] = [z[1], g - alpha*z[1]**2]\n return zout\n\n# main program starts here\n\n\nT = 10 # end of simulation\nN = 20 # no of time steps\ntime = np.linspace(0, T, N+1)\n\nz0 = np.zeros(2)\nz0[0] = 2.0\n\n# compute response with constant CD using Euler's method\nze = euler(f, z0, time)\n# compute response with varying CD using Euler's method\nze2 = euler(f2, z0, time)\n\n# compute response with constant CD using Heun's method\nzh = heun(f, z0, time)\n# compute response with varying CD using Heun's method\nzh2 = heun(f2, z0, time)\n\nk1 = np.sqrt(g*4*rho_s*d/(3*rho_f*CD))\nk2 = np.sqrt(3*rho_f*g*CD/(4*rho_s*d))\n# compute response with constant CD using analytical solution\nv_a = k1*np.tanh(k2*time)\n\n# plotting\n\nlegends = []\nline_type = ['-', ':', '.', '-.', '--']\n\nplot(time, v_a, line_type[0])\nlegends.append('Analytical (constant CD)')\n\nplot(time, ze[:, 1], line_type[1])\nlegends.append('Euler (constant CD)')\n\nplot(time, zh[:, 1], line_type[2])\nlegends.append('Heun (constant CD)')\n\nplot(time, ze2[:, 1], line_type[3])\nlegends.append('Euler (varying CD)')\n\nplot(time, zh2[:, 1], line_type[4])\nlegends.append('Heun (varying CD)')\n\nlegend(legends, loc='best', frameon=False)\n\nxlabel('Time [s]')\nylabel('Velocity [m/s]')\ngrid()\nshow()\n","sub_path":"Numerical_Methods_for_Engineers/ch1/FallingSphereEulereHeun.py","file_name":"FallingSphereEulereHeun.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"412204574","text":"class Solution:\n def flatten(self, root):\n def recur(node, next_node):\n if not node:\n return next_node\n elif not node.left and not node.right:\n node.right = next_node\n elif not node.left:\n node.right = recur(node.right, next_node)\n elif not node.right:\n node.right = recur(node.left, next_node)\n node.left = None\n else:\n node.right = recur(node.left, recur(node.right, next_node))\n node.left = None\n return node\n\n recur(root, None)\n\n\nclass Solution:\n def flatten(self, root):\n cur = root\n while cur:\n if cur.left:\n temp = cur.left\n while temp.right:\n temp = temp.right\n temp.right = cur.right\n cur.right = cur.left\n cur.left = None\n cur = cur.right\n\n","sub_path":"leetcode/py/114.py","file_name":"114.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"296814550","text":"#!/usr/bin/env python\n\n\"\"\"\n Importing necessary libraries\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport time\nfrom random import random as rnd\nimport matplotlib.animation as anim\nimport matplotlib.pyplot as plt\nimport mpmath\nimport simulate\n\ndirectory = \"Data/3\"\n\n# Preprocessor Definitions\nNO = 0\nYES = 1\nexi = 1\nNXM = 10240\nNKXM = 5120\nNPMAX = 4000000 # maximum number of super-particles\n\n# Definitions of constants\nPI = np.pi\nHBAR = 1.05457266e-34 # Reduced Planck constant\nLIGHT_V = 299792458 # Light velocity\nM_e = 9.1093897e-31 # Electron mass\nQ = 1.60217733e-19 # Electron charge in absolute value\neV = 1.6e-19 # 1 eV in J\nM_eV = M_e * LIGHT_V * LIGHT_V # Electon mass in J\nM1_eV = 0.049 # 1st Eigenstate mass in eV\nM1_J = M1_eV * eV\nM1 = M1_J / (LIGHT_V ** 2)\nM2_eV = 0.050 # 2nd Eigenstate mass in eV\nM2_J = M2_eV * eV\nM2 = M2_J / (LIGHT_V ** 2)\nM3_eV = 0.070 # 3rd Eigenstate mass in eV\nM3_J = M3_eV * eV\nM3 = M3_J / (LIGHT_V ** 2)\nM3i_eV = 0.0087 # 3rd Eigenstate mass in eV for inverted hierarchy\nM3i_J = M3i_eV * eV\nM3i = M3i_J / (LIGHT_V ** 2)\n\nE_n_eV = 1e6 # Neutrino energy in eV\nE_n_J = E_n_eV * eV # Neutrino energy in J\n\n# In Hartree atomic unit\nH_HBAR = 1\nH_LIGHT_V = 137 # Light veelocity in Hertree unit\nH_LENGTH = H_LIGHT_V / LIGHT_V\nH_M_e = 1\nH_M1 = M1 / M_e\nH_M2 = M2 / M_e\nH_M3 = M3 / M_e\nH_M3i = M3i / M_e\n\nH_eV = 27.211385 # Hartree energy in eV\n\nM_M1 = M_e\nM_M2 = (M2 / M1) * M_M1\nM_M3 = (M3 / M1) * M_M1\n\n# Particular Calculation\n\nE_n = E_n_eV / H_eV\nM = M_M3\nP_n = np.sqrt(E_n ** 2 - M ** 2 * H_LIGHT_V ** 4) / H_LIGHT_V\n\n# All integers\nFINAL = 0\nK = np.zeros(NPMAX + exi)\nW = np.zeros(NPMAX + exi)\nDIST = np.zeros((NXM + exi, 2 * NKXM + exi))\nISEED = 38467\nUPDATED = np.zeros(NPMAX + exi)\n\n# All doubles here...\nFW1 = np.zeros((NXM + exi, 2 * NKXM + exi))\nDENSX = np.zeros(NXM + exi)\nDENSK = np.zeros(2 * NKXM + exi)\nPHI = np.zeros(NXM + exi)\nTIME = 0.0\nBKTQ = 0.0\nQH = 0.0\n\nP = np.zeros(NPMAX + exi)\n\nVW = np.zeros((NXM + exi, 2 * NKXM + exi))\n\nGAMMA = np.zeros(NXM + exi)\n\nPTIME = np.zeros(NPMAX + exi)\n\n# Initial conditions\n\nINUM = 2000 # maximum number of particles in a phase-space cell for the initial distribution 20 (#200)\n# LX = 200.e-9 # total length of spatial domain\nLX = 500 # total length of spatial domain (#200)\n# LX = 2.44e10 * H_LENGTH # total length of spatial domain (#200)\n# DT = 0.01e-15 # time step\nDT = 10 # time step (1) (#10)\n# LC = 50.e-9 # coherence length\n# LC = (LX / 10) * 0.3 # coherence length\nLC = 250 # coherence length (#100)\n# LC = PI / (n_v * DT) # coherence length (#100)\nNX = 500 # number of cells in x-direction (500) (#200)\n# ITMAX = 400000 # total number of time steps 200\n# ITMAX = int(LX/DT) *5 # total number of time steps 200 (#4000)\nITMAX = 8000 # total number of time steps 200 (#4000)\nANNIHILATION_FREQUENCY = 100 # (#100)\n\n# SIGMA_WAVE_PACKET = 3.15e-9 # wave packet dispersion\n# SIGMA_WAVE_PACKET = (5.1/3.4)*0.1 # wave packet dispersion (#10)\nSIGMA_WAVE_PACKET = 10 # wave packet dispersion (#10)\n# SIGMA_WAVE_PACKET = 3.15e7 * H_LENGTH # wave packet dispersion (#10)\n# X0_WAVE_PACKET = LX / 2 - 31.5e-9 # wave packet initial position\nX0_WAVE_PACKET = SIGMA_WAVE_PACKET + 10 # wave packet initial position (#SIGMA_WAVE_PACKET + 10 )\n\nBARRIER_POTENTIAL = 0 # value of the potential barrier\nBARRIER_POSITION = 0.5 * LX # barrier center position\nBARRIER_WIDTH = 6.e-9 # barrier width\n\n# spatial cell length\nDX = LX / NX\n\n# automatic calculation of NKX\nNKX = (int)(0.5 * LC / DX)\n\n# pseudo - wave vector length\nDKX = 10 * PI / LC\n\nK0_WAVE_PACKET = 100 * DKX # 1st eigenmass wave packet initial wave vector (150) (#500)\nK1_WAVE_PACKET = 50.0 * DKX # 2nd eigenmass wave packet initial wave vector (120) (#150)\nK2_WAVE_PACKET = 1000 * DKX # 3rd eigenmass wave packet initial wave vector (100) (#100)\n\n# PMNS Matrix\ntheta12 = (np.pi / 180) * 35.26\ntheta23 = (np.pi / 180) * 45\ntheta13 = (np.pi / 180) * 13.2\ndelta = np.pi / 4\nc12 = np.cos(theta12)\nc23 = np.cos(theta23)\nc13 = np.cos(theta13)\ns12 = np.sin(theta12)\ns23 = np.sin(theta23)\ns13 = np.sin(theta13)\n\nPMNS = [[c12 * c13, s12 * c13, s13 * np.exp(delta)],\n [-s12 * c23 - c12 * s23 * s13 * np.exp(delta), c12 * c23 - s12 * s23 * s13 * np.exp(delta), s23 * c13],\n [s12 * s23 - c12 * c23 * s13 * np.exp(delta), -c12 * s23 - s12 * c23 * s13 * np.exp(delta), c23 * c13]\n ]\ntheta = (np.pi / 180) * 45\nPMNS2 = [[np.cos(theta), np.sin(theta)],\n [-np.sin(theta), np.cos(theta)]\n ]\n\n\ndef rnda():\n global ISEED\n ISEED = (1027.0 * ISEED) % 1048576.0\n return ISEED / 1048576.0\n\n\ndef distribution():\n global INUM\n print(\"Calculation of distribution fucntion\\n\")\n print(\"Number of particles {} \\n\".format(INUM))\n\n for i in range(0, NX + 1):\n for k in range(0, 2 * NKX - 1):\n DIST[i][k] = 0\n\n # cloud in cell algorithm\n\n for n in range(0, INUM):\n i = int(P[n] / DX) + 1\n k = K[n]\n if (0 < i) and (i <= NX) and (-NKX < k) and (k < NKX):\n DIST[int(i)][int(k + NKX - 1)] += W[n] ###########njkefhjkwehfjkewb\n\n # stores the normalized quasi-distribution function\n norm = 0\n for i in range(1, NX + 1):\n for j in range(-NKX + 1, NKX):\n FW1[i][j + NKX - 1] = (DIST[i][j + NKX - 1])\n for i in range(1, NX + 1):\n for j in range(-NKX + 1, NKX):\n norm += FW1[i][j + NKX - 1]\n norm *= DX * DKX\n for i in range(1, NX + 1):\n for j in range(-NKX + 1, NKX):\n FW1[i][j + NKX - 1] /= norm\n\n print(\"end of distribution function calculation\")\n\n\ndef density():\n for i in range(1, NX + 1):\n sum = 0\n for j in range(-NKX + 1, NKX):\n sum += FW1[i][j + NKX - 1]\n DENSX[i] = sum * DKX\n DENSX[1] = DENSX[NX] = 0\n\n # in k space\n for j in range(-NKX + 1, NKX):\n sum = 0\n for i in range(1, NX + 1):\n sum += FW1[i][j + NKX - 1]\n DENSK[j + NKX - 1] = sum * DX\n\n\ndef kernel():\n for i in range(1, NX + 1):\n for j in range(0, NKX):\n VW[i][j] = 0\n\n for l in range(1, int(0.5 * LC / DX) + 1 + 1):\n if l <= (i + 1) and (i + 1) <= NX and l <= (i - 1) and (i - 1) <= NX:\n VW[i][j] += np.sin(2. * j * DKX * (1 - 0.5) * DX) * PHI[i + 1] - PHI[i - 1]\n\n VW[i][j] *= -2. * (-Q) * DX / (HBAR * LC)\n\n\ndef calculate_gamma():\n for i in range(1, NX + 1):\n GAMMA[i] = 0\n\n # the implementation below holds taking into account the fact that\n # the Wigner potential is anti-symetric w.r.t the k-space\n\n # for j in range(1, NKX):\n # GAMMA[i] += abs(VW[i][j])\n\n\ndef devconf():\n global INUM, PMNS, PMNS2\n\n d_max = 0\n\n # definition of the initial conditions\n\n for i in range(1, NX + 1):\n for j in range(-NKX + 1, NKX):\n # FW1[i][j + NKX - 1] = pow((PMNS2[0][0]), 2) * np.exp(\n # -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n # np.exp(-pow(((j * DKX) - K0_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0)) + \\\n # pow((PMNS2[0][1]), 2) * np.exp(\n # -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n # np.exp(-pow(((j * DKX) - K1_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0)) +\\\n # pow((PMNS2[1][0]), 2) * np.exp(\n # -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n # np.exp(-pow(((j * DKX) - K0_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0)) + \\\n # pow((PMNS2[1][1]), 2) * np.exp(\n # -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n # np.exp(-pow(((j * DKX) - K1_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0))\n\n # FW1[i][j + NKX - 1] = pow((PMNS[0][0]), 2) * np.exp(\n # -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n # np.exp(-pow(((j * DKX) - K0_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0)) + \\\n # pow((PMNS[0][1]), 2) * np.exp(\n # -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n # np.exp(-pow(((j * DKX) - K1_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0)) + \\\n # pow((PMNS[0][2]), 2) * np.exp(\n # -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n # np.exp(-pow(((j * DKX) - K2_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0))\n\n\n FW1[i][j + NKX - 1] = np.exp(\n -pow(((i - 0.5) * DX - X0_WAVE_PACKET) / SIGMA_WAVE_PACKET, 2.0)) * \\\n np.exp(-pow(((j * DKX) - K0_WAVE_PACKET) * SIGMA_WAVE_PACKET, 2.0))\n\n # normalization of the initial condition\n norm = 0\n for i in range(1, NX + 1):\n for j in range(-NKX + 1, NKX):\n norm += FW1[i][j + NKX - 1]\n\n norm *= DX * DKX\n\n for i in range(1, NX + 1):\n for j in range(-NKX + 1, NKX):\n FW1[i][j + NKX - 1] /= norm\n\n # calculate the EPP variable for the cloud in cell algorithm\n\n for i in range(1, NX + 1):\n for j in range(0, 2 * NKX - 1):\n if d_max < abs(FW1[i][j]):\n d_max = abs(FW1[i][j])\n\n epp = d_max / INUM\n\n # calculate initial distribution function\n print(\"config() - calculating initial distribution\\n\")\n INUM = 0\n\n for i in range(1, NX + 1):\n for j in range(0, 2 * NKX - 1):\n local_number_of_particles = int(abs(FW1[i][j]) / epp + 0.5)\n\n # creates the new local particels in the (i, k) th phase space cell\n # the particles are uniformly distributes in space\n\n for n in range(1, local_number_of_particles + 1):\n m = INUM + n - 1\n if rnd() > 0.5:\n P[m] = (i - 0.5 + 0.5 * rnd()) * DX\n else:\n P[m] = (i - 0.5 - 0.5 * rnd()) * DX\n K[m] = j - NKX + 1\n if FW1[i][j] > 0:\n W[m] = +1\n else:\n W[m] = -1\n INUM += local_number_of_particles\n\n distribution()\n\n print(\"Initiail number of electron super particles {} \\n\".format(INUM))\n\n\ndef WMC():\n '''\n Evolution of the particles\n and creating of (+,-) couples\n :return:\n '''\n\n global INUM\n INUM = int(INUM)\n\n sum = 0\n number_of_created_particles = 0\n\n # initial settings\n number_of_outside_particles = 0\n all_particles_updated = NO\n\n for n in range(0, INUM):\n UPDATED[n] = NO\n for n in range(0, INUM):\n PTIME[n] = DT\n\n while all_particles_updated == NO:\n number_of_outside_particles = 0\n\n # evolution and couples creation\n for n in range(0, INUM):\n if UPDATED[n] == NO:\n hmt = HBAR / (M) * PTIME[n]\n\n # drift n-th particle\n x0 = P[n]\n k0 = K[n] * DKX\n i = int(x0 / DX + 1) # int convert\n\n # evolve position and wave vector of the n-th particle\n if i > 0 and i <= NX and -NKX < K[n] and K[n] < NKX:\n P[n] = x0 + hmt * k0\n\n # calculate the probability that the wave-vector actually evoloves\n # accordingly to the continuous dkx\n # check if a couple of (+,-) have to be created\n\n if GAMMA[i] != 0:\n time = 0\n while time < PTIME[n]:\n rdt = -np.log(rnd()) / GAMMA[i]\n time += rdt\n if time < PTIME[n]:\n created = NO\n r = rnd()\n sum = 0.\n\n # random selection of the wave-vector\n j = 0\n while created == NO and j < NKX:\n p = abs(VW[i][j]) / GAMMA[i]\n if sum <= r and r < (sum + p):\n number_of_outside_particles += 2\n num = INUM + number_of_created_particles\n\n # select a random time inettrval when the creating happens\n # assign position\n\n P[num - 2] = P[num - 1] = x0 + HBAR / (MSTAR * M) * time * k0\n\n # assign eave-vector\n if VW[i][j] >= 0.:\n K[num - 2] = K[n] + j\n K[num - 1] = K[n] - j\n else:\n K[num - 2] = K[n] - j\n K[num - 1] = K[n] + j\n\n # assign quantum weight\n if W[n] == 1:\n W[num - 2] = +1\n W[num - 1] = -1\n else:\n W[num - 2] = -1\n W[num - 1] = +1\n\n # assign flag to evolove the particcles at the next loop\n UPDATED[num - 2] = UPDATED[num - 1] = NO\n\n # assign time\n PTIME[num - 2] = PTIME[num - 1] = PTIME[n] - time\n\n # eventually ignore the just-created couples since at least\n # one of them outside the device\n if K[num - 2] <= -NKX or K[num - 2] >= NKX or K[num - 1] <= -NKX or K[\n num - 1] >= NKX:\n num = INUM - 2\n number_of_created_particles -= 2\n\n created = YES\n\n sum += p\n j += 1\n\n else:\n number_of_outside_particles += 1\n\n UPDATED[n] = YES\n\n # end of for (n=0;...)\n\n INUM += number_of_created_particles\n\n print(\"INUM = {} -- particles created = {}\\n\".format(INUM, number_of_created_particles))\n\n if INUM > NPMAX:\n print(\"Number of particles has exploded - please increase NPMAX and recompile\\n\")\n exit(0)\n\n # checks if all particles have been updated\n\n flag = YES\n\n for n in range(0, INUM):\n if UPDATED[n] == NO:\n flag = NO\n all_particles_updated = flag\n\n print(\"--number of particles outside = {} -- \\n\".format(number_of_outside_particles))\n\n\ndef annihilation():\n global INUM\n\n print(\"\\n# of particles before annihilation = {}\\n\".format(INUM))\n\n # calculates the new array of particles\n INUM = 0\n for i in range(1, NX + 1):\n for k in range(0, 2 * NKX - 1):\n local_number_of_particles = abs(DIST[i][k])\n\n # creates the new local particles in the (i,k)-th phase-sapce cell\n # the particles are uniformly distributed in space\n\n for n in range(1, int(local_number_of_particles + 1)):\n m = int(INUM + n)\n if rnd() > 0.5:\n P[m] = (i - 0.5 + 0.5 * rnd()) * DX\n else:\n P[m] = (i - 0.5 - 0.5 * rnd()) * DX\n K[m] = k - NKX + 1\n if DIST[i][k] > 0:\n W[m] = +1\n else:\n W[m] = -1\n\n INUM += local_number_of_particles\n\n print(\"# of particles after the annihilation = {}\\n\\n\".format(INUM))\n\n\ndef save(ind):\n if ind == 0 or ind == 1:\n\n # saves potential\n fp = open(\"{}/potential.dat\".format(directory), \"w\")\n for i in range(1, NX + 1):\n fp.write(\"{} {}\\n\".format((i - 0.5) * DX, PHI[i]))\n fp.close()\n\n # saves gamma function\n fp = open(\"{}/gamma.dat\".format(directory), \"w\")\n for i in range(1, NX + 1):\n fp.write(\"{} {}\\n\".format((i - 0.5) * DX, GAMMA[i]))\n fp.close()\n\n # saves the coordinates axis values\n fp = open(\"{}/x.dat\".format(directory), \"w\")\n for i in range(1, NX + 1):\n fp.write(\"{}\\n\".format((i - 0.5) * DX))\n fp.close()\n\n fp = open(\"{}/k.dat\".format(directory), \"w\")\n for i in range(-NKX, NKX):\n fp.write(\"{}\\n\".format((i + 0.5) * DKX))\n fp.close()\n\n # saves normalized the Wigner quasi - distribution\n # == == == == == == == == == == == == == == == == == == == == == == ==\n fp = open(\"{}/Wigner_quasi_distribution_{}.dat\".format(directory, ind), \"w\")\n for i in range(1, NX + 1):\n for j in range(-NKX, NKX):\n if j == -NKX:\n fp.write(\"{} \".format(FW1[i][-NKX + 1 + NKX - 1]))\n else:\n fp.write(\"{} \".format(FW1[i][j + NKX - 1]))\n fp.write(\"\\n\")\n fp.close()\n\n # saves the electron probability density in x - space\n fp = open(\"{}/wigner_probability_density_{}.dat\".format(directory, ind), \"w\")\n for i in range(1, NX + 1):\n fp.write(\"{}\\n\".format(DENSX[i]))\n fp.close()\n\n # saves the electron probability density in k space\n fp = open(\"{}/Wigner_k_space_probability_density_{}.dat\".format(directory, ind), \"w\")\n for i in range(-NKX, NKX):\n if i == -NKX:\n fp.write(\"{} {}\\n\".format((i + 0.5) * DKX, DENSK[0]))\n else:\n fp.write(\"{} {}\\n\".format((i + 0.5) * DKX, DENSK[i + NKX - 1]))\n fp.close()\n\n\ndef _main():\n global NO, YES, NXM, NKXM, NPMAX, \\\n Q, HBAR, M, MSTAR, PI, \\\n NKX, FINAL, K, W, DIST, ISEED, UPDATED, \\\n FW1, DENSX, DENSK, PHI, DX, DKX, TIME, BKTQ, QH, \\\n P, VW, GAMMA, PTIME, \\\n SIGMA_WAVE_PACKET, X0_WAVE_PACKET, K0_WAVE_PACKET, \\\n BARRIER_POTENTIAL, BARRIER_POSITION, BARRIER_WIDTH, \\\n INUM, LX, LC, NX, DT, ITMAX, ANNIHILATION_FREQUENCY\n\n # Print the innitial number of particles\n print(\"\\nMAXIMUM NUMBER OF PARTICLES ALLOWED = {}\\n\\n\".format(NPMAX))\n\n # defines the potental barrier\n for i in range(1, NX + 1):\n PHI[i] = 0\n for i in range(1, NX + 1):\n pos = (i - 0.5) * DX\n if pos >= (BARRIER_POSITION - 0.5 * BARRIER_WIDTH) and pos <= (BARRIER_POSITION + 0.5 * BARRIER_WIDTH):\n PHI[i] += BARRIER_POTENTIAL\n\n # set gamma function to zero\n for i in range(1, NX + 1 + 1):\n GAMMA[i] = 0\n\n # get initial time\n nowtm = time.time()\n print('Simulation started: {}'.format(nowtm))\n\n ### Initilization\n\n devconf()\n density()\n save(0)\n\n print(\"\\n\")\n\n # updates the solution\n\n for i in range(1, ITMAX + 1):\n TIME += DT\n print(\"{} of {} -- Time={} \\n\\n\".format(i, ITMAX, TIME))\n\n if i == 1:\n print(\"calculating Wigner potential\\n\")\n # kernel()\n print(\"calculating gamma function\\n\")\n calculate_gamma()\n\n print(\"evolving wigner\\n\")\n\n WMC()\n print(\"calculating distribution function\\n\")\n distribution()\n print(\"calculating density in x- and k-space\\n\")\n density()\n if i % ANNIHILATION_FREQUENCY == 0 and i is not ITMAX:\n print(\"Annihiliation of particles\\n\")\n annihilation()\n save(i)\n\n print(\"\\n\")\n\n print(\"output files saved\\n\\n\")\n\n endt = time.time()\n\n print(\"Simulation ended: {}\".format(endt))\n\n\n# def _visualize(index, type):\n# if type == 1:\n# fp = open(\"wigner_probability_density_{}.dat\".format(index), 'r')\n# XX=[]\n# YY=[]\n# for line in fp:\n# a, b = line.split(' ')\n# a=float(a)\n# b=float(b.replace('\\n',''))\n# XX.append(a)\n# YY.append(b)\n# plt.plot(XX, YY)\n#\n#\n# def _simulate():\n# f = plt.figure(figsize=(10, 7), dpi=80)\n# ax=f.gca()\n# time0 = time.time()\n# _visualize(1, 1)\n# time1 = time.time()\n# interval = 1000 * (1 / 60) - float(time1 - time0)\n#\n# ani = anim.FuncAnimation(f, _visualize, ITMAX,\n# fargs=(1),\n# interval=interval, blit=False)\n# plt.show()\n\n\nif __name__ == '__main__':\n _main()\n con = int(input(\"Do you want to run simulation: \"))\n if con == 1:\n simulate.simulate(1, int(ITMAX))\n else:\n exit(0)\n","sub_path":"Code/main_BACKUP_.py","file_name":"main_BACKUP_.py","file_ext":"py","file_size_in_byte":21135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"308073231","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0009_category_slug'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='blogpost',\n name='body_html',\n field=models.TextField(null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='blogpost',\n name='post_date',\n field=models.DateTimeField(default=datetime.date(2014, 12, 10), auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='blogpost',\n name='is_draft',\n field=models.BooleanField(default=True),\n ),\n migrations.AlterField(\n model_name='category',\n name='is_private',\n field=models.BooleanField(default=True),\n ),\n ]\n","sub_path":"blog/migrations/0010_auto_20141210_1200.py","file_name":"0010_auto_20141210_1200.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"495171183","text":"import os\nimport pandas as pd\nfrom app import app\nfrom flask import request\nfrom modules.search import Search\nfrom flask_restful import Resource\n\nclass Ping(Resource):\n def get(self):\n return {\n 'success': True,\n 'message': \"Pong!\"\n }\n\nclass Analyze(Resource):\n \n def __init__(self):\n self.ALLOWED_EXTENSIONS = set(['csv'])\n\n def allowed_file(self, filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in self.ALLOWED_EXTENSIONS\n\n def post(self):\n input_file = request.files['file'] #Might have to change the CURL filename on request.files['']\n print(input_file)\n\n if input_file.filename == '':\n return {\n 'success': False,\n 'message': 'Invalid request: No file detected.'\n }\n \n if not self.allowed_file(input_file.filename):\n return {\n 'success': False,\n 'message': 'File extension is not allowed'\n }\n else:\n input_file.save(os.path.join('/tmp/', input_file.filename))\n print(\"ANALYZING!\")\n big_five, the_one = Search.analyze(os.path.join('/tmp/', input_file.filename))\n os.remove(os.path.join('/tmp/', input_file.filename))\n return {\n 'success': True,\n 'input_sheet': input_file.filename,\n 'prediction': the_one,\n 'predicted_sheets': big_five\n }","sub_path":"resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"635407060","text":"#!coding:utf8\nimport datetime,time\nimport json\nfrom collections import OrderedDict\n\nimport talib \nimport pandas as pd\nimport numpy as np\n\nfrom stk_util import time_count\nfrom tech_cross_type import get_crossx_type,get_angle,get_angle_diff_stage\n\ndef load_ta_pat_map():\n return json.load(open('talib_pattern_name.json'))\nTA_PATTERN_MAP = load_ta_pat_map()\n@time_count\ndef candle_analyse(df):\n '''\n input:OHLC dataframe\n '''\n cn_names = []\n ## calc all candle score\n open,high,low,close = df['open'],df['high'],df['low'],df['close'] \n df=df[['date']].copy()\n \n func_names = talib.get_function_groups()['Pattern Recognition']\n for func_name in func_names:\n func = getattr(talib,func_name) \n score_data = func(open,high,low,close)\n patt_name = func_name[3:]\n # if score_data!=0:\n df[patt_name] = score_data\n cn_names.append(patt_name)\n \n def make_dict(row):\n res_d = OrderedDict()\n for func_name in func_names:\n patt_name = func_name[3:]\n score = row[patt_name]\n if score!=0:\n res_d[patt_name] = score\n st= ','.join(['%s:%s'%(k,v) for k,v in res_d.items()])\n return st\n \n df['CDLList'] = df.apply(make_dict,axis=1)\n ###\n total_cdl_score = df[cn_names].sum(axis=1)\n df['CDLScore'] = total_cdl_score\n # print jsdump(cdl_info)\n return df\n\n@time_count \ndef pivot_line(open,high,low,close, mode='classic'):\n pivot = (high + low + 2* close )/4\n r1 = pivot*2 - low \n s1 = pivot*2 - high\n r2 = pivot + r1 - s1\n s2 = pivot - (r1 - s1)\n r3 = high + 2*(pivot - low)\n s3 = low - 2*(high - pivot)\n names = 'pivot,r1,s1,r2,s2,r3,s3'.split(',')\n res_d = OrderedDict(zip(names ,[pivot,r1,s1,r2,s2,r3,s3]))\n if mode == 'extend':\n res_d['sm1'] = (pivot+s1)/2\n res_d['sm2'] = (s1+s2)/2\n res_d['sm3'] = (s2+s3)/2\n res_d['rm1'] = (pivot+r1)/2\n res_d['rm2'] = (r1+r2)/2\n res_d['rm3'] = (r2+r3)/2\n return res_d\n\n@time_count\ndef pivot_line_analyse(open,high,low,close):\n df = pd.DataFrame({'open':open,'high':high,'low':low,'close': close})\n def pivot_line_judge(row):\n po= pivot_line(row['open'],row['high'],row['low'],row['close'])\n return ','.join(['%s:%0.2f'%(k,v) for k,v in po.items()])\n df = pd.DataFrame({'pivot_line':df.apply(pivot_line_judge,axis=1)})\n return df \n\n@time_count \ndef value_range_map(vlu,up_down_points,up_mid_down_name): \n for i,thre in enumerate(up_down_points):\n if vlu > thre:\n return up_mid_down_name[i]\n return up_mid_down_name[-1]\n \n@time_count\ndef boll_analyse(ohlcv,period=10):\n boll_up, boll_mid, boll_low = talib.BBANDS(ohlcv['close'],period)\n scale = period\n uag = get_angle(boll_up *scale, 2)\n mag = get_angle(boll_mid*scale, 2) \n lag = get_angle(boll_low*scale, 2)\n df = pd.DataFrame({'boll_up': boll_up, 'boll_mid':boll_mid, 'boll_low':boll_low \n ,'bollup_ag':uag,'bollmid_ag':mag,'bolllow_ag':lag })\n def boll_judge(row):\n m_ag = row['bollmid_ag']\n u_ag = row['bollup_ag']\n # print m,u\n if m_ag >= 0:\n res = 'UP'\n else:\n res = 'DN'\n res+='-'+get_angle_diff_stage(u_ag,m_ag)\n return res\n df['boll_stage'] = df.apply(boll_judge,axis=1) \n return df\n \n@time_count\ndef macd_analyse(ohlcv,period=10):\n dif, dea, hist = talib.MACD(ohlcv['close'],period)\n # pdb.set_trace()\n # df = pd.DataFrame({'macd_dif': dif, 'macd_dea':dea, 'macd_hist':hist }) \n res = [] \n df = get_crossx_type(dif,dea)\n df['macd_hist'] = hist\n df = df.rename({'fast_line':'dif','slow_line':'dea','cross_stage':'macd_stage','fast_ag':'dif_ag','slow_ag':'dea_ag'},axis=1)\n def macd_judge(row):\n res = row['macd_stage']\n if row['macd_hist']>0:\n res +=' POS-HIS'\n else:\n res +=' NEG-HIS' \n return res\n # pdb.set_trace()\n df['macd_stage'] = df.apply(macd_judge,axis=1) \n return df\n \n@time_count\ndef rsi_analyse(ohlcv,period=10):\n close = ohlcv['close']\n rsi = talib.RSI(close)\n rsi_ag = get_angle(rsi,2)\n df = pd.DataFrame({'rsi':rsi,'rsi_ag':rsi_ag})\n def rsi_row(row):\n return value_range_map( row['rsi'] ,[70,30],['OverBrought','MID','OverSell'])\n df['rsi_stage'] = df.apply(rsi_row, axis=1)\n # pdb.set_trace()\n return df\n\n@time_count\ndef cci_analyse(ohlcv,period=10):\n high,low,close = ohlcv['high'],ohlcv['low'],ohlcv['close']\n cci = talib.CCI(high,low,close) \n cci_ag = get_angle(cci,2)\n df = pd.DataFrame({'cci':cci,'cci_ag':cci_ag})\n def cci_row(row):\n return value_range_map( row['cci'] ,[100,-100],['OverBrought','MID','OverSell'])\n df['cci_stage'] = df.apply(cci_row, axis=1)\n # pdb.set_trace()\n return df\n\n@time_count\ndef roc_analyse(ohlcv,period=10):\n high,low,close = ohlcv['high'],ohlcv['low'],ohlcv['close']\n roc = talib.ROCP(close) \n roc_ag = get_angle(roc,2)\n maroc = talib.SMA(roc,14)\n maroc_ag = talib.SMA(maroc,14)\n df = pd.DataFrame({'roc':roc,'roc_ag':roc_ag,'maroc':maroc,'maroc_ag':maroc_ag})\n def roc_row(row):\n return value_range_map( row['roc'] ,[0,-0.0001],['STRONG','ZERO','WEAK'])\n df['roc_stage'] = df.apply(roc_row, axis=1)\n # pdb.set_trace()\n return df\n\n@time_count\ndef kdj_analyse(ohlcv,period=10):\n high,low,close = ohlcv['high'],ohlcv['low'],ohlcv['close']\n slk,sld = talib.STOCH(high,low,close, fastk_period=9,slowk_period=3,slowk_matype=0,slowd_period=3,slowd_matype=0)\n slj = 3*slk-2*sld\n p_ag = get_angle(close)\n ##\n k_ag = get_angle(slk)\n d_ag = get_angle(sld)\n j_ag = get_angle(slj)\n ##\n k_aag = get_angle(k_ag)\n d_aag = get_angle(d_ag)\n j_aag = get_angle(j_ag)\n df = pd.DataFrame({'kdj_k':slk,'kdj_d':sld,'kdj_j':slj\n ,'k_ag':k_ag,'d_ag':d_ag,'j_ag':j_ag\n ,'p_ag':p_ag\n ,'k_aag':k_aag,'d_aag':d_aag,'j_aag':j_aag\n })\n def kdj_row(row):\n sw = 'MIDL'\n if row['kdj_k']>row['kdj_d']:\n sw = 'KD-STRG,'\n elif row['kdj_k']0 and row['k_aag']<0) or \\\n (row['k_ag']<0 and row['k_aag']>0) or \\\n (row['d_ag']>0 and row['d_aag']<0) or \\\n (row['d_ag']<0 and row['d_aag']>0):\n sw += 'TURN' ## 转向\n else:\n sw += 'NORM' ## 无现象\n res = [\n sw \n # ,'K-'+value_range_map( row['kdj_k'] ,[80,20],['OB','MD','OS']) +'-'+ value_range_map( row['kdj_k'] ,[50,49.99],['S','M','W'])\n # ,'D-'+value_range_map( row['kdj_d'] ,[80,20],['OB','MD','OS']) +'-'+ value_range_map( row['kdj_d'] ,[50,49.99],['S','M','W']) \n # ,'J-'+value_range_map( row['kdj_j'] ,[80,20],['OB','MD','OS']) +'-'+ value_range_map( row['kdj_j'] ,[50,49.99],['S','M','W'])\n ]\n return ','.join(res)\n \n df['kdj_stage'] = df.apply(kdj_row, axis=1)\n return df\n\n@time_count\ndef mtm_analyse(ohlcv,period1=6,period2=12):\n close = ohlcv['close']\n mom = talib.MOM(close,period1)\n mamom = talib.SMA(mom,period2)\n ag_mom = get_angle(mom)\n ag_mamom = get_angle(mamom)\n df = pd.DataFrame( {'mom':mom,'mamom':mamom,'ag_mom':ag_mom,'ag_mamom':ag_mamom} )\n def mom_row(row):\n res = []\n if row['mom']-row['mamom']>0:\n res.append('UP_MA')\n elif row['mom']-row['mamom']<=0:\n res.append('DN_MA')\n if row['mom']>0:\n res.append('POS_MOM')\n elif row['mom']<=0:\n res.append('NEG_MOM')\n \n return ' '.join(res)\n df['mom_cross_stage'] = get_crossx_type(df['mom'],df['mamom'])['cross_stage'] \n df['mom_stage'] = df.apply(mom_row, axis=1)\n # pdb.set_trace() \n return df\n\n@time_count\ndef aroon_analyse(ohlcv,period=14):\n high = ohlcv['high']\n low = ohlcv['low'] \n adown,aup = talib.AROON(high,low,period)\n df = pd.DataFrame( {'aroon_down':adown,'aroon_up':aup} )\n '''当 AroonUp大于AroonDown,并且AroonUp大于50,多头开仓;\n 当 AroonUp小于AroonDown,或者AroonUp小于50,多��平仓;\n 当 AroonDown大于AroonUp,并且AroonDown大于50,空头开仓;\n 当 AroonDown小于AroonUp,或者AroonDown小于50,空头平仓;'''\n def aroon_row(row):\n res = []\n if row['aroon_up'] > row['aroon_down'] and row['aroon_up'] >50:\n res.append('UP-STRONG')\n elif row['aroon_up'] < row['aroon_down'] or row['aroon_up'] <50:\n res.append('DN-WEAK')\n elif row['aroon_up'] < row['aroon_down'] or row['aroon_down'] >50:\n res.append('DN-STRONG')\n elif row['aroon_up'] > row['aroon_down'] or row['aroon_down'] <50:\n res.append('UP-WEAK')\n else:\n res.append('UNKNOWN')\n return ' '.join(res)\n df['aroon_stage'] = df.apply(aroon_row, axis=1) \n return df\n \ndef get_weekday(dt):\n if isinstance(dt,basestring):\n dt = datetime.datetime.strptime(dt,'%Y-%m-%d')\n dic = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']\n w = dic[dt.weekday()]\n return w\n\n@time_count\ndef weekday_analyse(df,col='date'):\n wd =pd.DataFrame()\n # pdb.set_trace()\n wd['week_stage'] = df[col].apply(get_weekday)\n return wd\n \n@time_count \ndef vwap_analyse(ohlcv,period = 3): \n close,high,low,volume = ohlcv['close'],ohlcv['high'],ohlcv['low'],ohlcv['volume']\n df = ohlcv\n mse = np.square(close-(high+low)/2)\n df['mse'] = mse\n vwap,vswap = [],[]\n # pdb.set_trace()\n \n vsum = df['volume'].rolling(period).sum()\n pvsum = df['volume']*df['close']/vsum*period\n ma_pvsum = talib.SMA(pvsum, period)\n \n s_vsum = (df['volume']*df['mse']).rolling(period).sum()\n s_pvsum = (df['volume']*df['mse']*df['close'])/s_vsum*period\n s_ma_pvsum = talib.SMA(s_pvsum, period) \n vwap = ma_pvsum\n vswap = s_ma_pvsum\n ndf = pd.DataFrame(df['close'])\n ndf['vwap'] = vwap\n ndf['vwap_stage'] = get_crossx_type(close,vwap)['cross_stage']\n ndf['vswap'] = vswap\n ndf['vswap_stage'] = get_crossx_type(close,vswap)['cross_stage']\n ndf.pop('close')\n return ndf\n \n@time_count\ndef ma_analyse(ohlcv,period=10,target_col='close'):\n close = ohlcv[target_col]\n ma = OrderedDict()\n prefix=''\n if target_col!='close':\n prefix = '%s_'%target_col\n # cycles = [5,10,20,40,60,120,240]\n cycles = [3,5,20,60]\n for cyc in cycles:\n ma[prefix+'EMA%s'%cyc] = talib.EMA(close,cyc)\n ma[prefix+'SMA%s'%cyc] = talib.SMA(close,cyc)\n df = pd.DataFrame(ma)\n ## ema_sma_dif_judge\n def ema_sma_dif_judge(row):\n es_res = []\n for cyc in cycles:\n ema = row[prefix+'EMA%s'%cyc]\n sma = row[prefix+'SMA%s'%cyc]\n if ema>sma:\n s= '%s:UP'%cyc\n else:\n s='%s:DN'%cyc\n es_res.append(s)\n return ','.join(es_res)\n ## \n def ma_stage_judge(row,ma_type):\n ma_res = []\n for i,cyc in enumerate(cycles[:-1]):\n ma = row[prefix+'%s%s'%(ma_type,cyc)]\n ama = row[prefix+'%s%s'%(ma_type,cycles[i+1])]\n if ma >= ama:\n s= '%s-%s:UP'%(cyc,cycles[i+1])\n else:\n s= '%s-%s:DN'%(cyc,cycles[i+1])\n ma_res.append(s)\n return ','.join(ma_res)\n ##\n df[ prefix +'ma_es_dif_stage'] = df.apply(ema_sma_dif_judge,axis=1)\n df[ prefix +'ema_stage'] = df.apply(lambda row:ma_stage_judge(row,'EMA'), axis=1)\n df[ prefix +'sma_stage'] = df.apply(lambda row:ma_stage_judge(row,'SMA'), axis=1) \n return df\n\n@time_count\ndef td9_analyse(ohlcv):\n close = ohlcv['close']\n return ohlcv\n ","sub_path":"stk_console/tech_analyse_indicator.py","file_name":"tech_analyse_indicator.py","file_ext":"py","file_size_in_byte":11927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"496493167","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom datetime import datetime\n\nimport logging\nimport os\nimport subprocess\nimport yaml\nimport pandas as pd\nimport numpy as np\nimport string\nimport random\nfrom dashboard.models import *\nimport shutil\nimport sqlite3\n\nlogger = logging.getLogger('django')\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nupload_path = \"upload/\"\ndata_path = \"cohana/\"\n\nif not os.path.exists(upload_path):\n os.mkdir(upload_path)\n\nif not os.path.exists(data_path):\n os.mkdir(data_path)\n\nfieldTypes = {\n 'User ID':{\n 'type': \"UserKey\",\n \"datatype\": \"String\"\n },\n 'Event':{\n 'type': \"Action\",\n \"datatype\": \"String\"\n },\n 'Event Related':{\n 'type': \"Segment\",\n \"datatype\": \"String\"\n },\n 'Time':{\n 'type': \"ActionTime\",\n \"datatype\": \"Int32\"\n },\n 'Value':{\n 'type': \"Metric\",\n \"datatype\": \"Int32\"\n },\n}\n\nclass Upload( View ):\n\n def get(self, request):\n return render(request, \"upload.html\")\n\n def post( self,request ):\n errors = {\n \"type\": \"Uploading data\",\n \"advice\": \"Please upload your data!\",\n }\n\n if \"csv_file\" not in request.FILES:\n errors['details'] = [\"No dataset is uploaded!\"]\n return render(request, \"error.html\", errors)\n\n csv_file_content = request.FILES[\"csv_file\"]\n file_name = request.POST.get(\"name\")\n\n if file_name == \"\":\n errors['details'] = [\"Please input a name\"]\n return render(request, \"error.html\", errors)\n\n rand_str = ''.join(random.sample(string.ascii_letters + string.digits, 8))\n file_save = datetime.now().strftime('%Y%m%d%H%M%S') + rand_str\n f = open(upload_path + file_save + \".csv\", 'wb')\n for chunk in csv_file_content.chunks():\n f.write(chunk)\n f.close()\n\n rawdata = pd.read_csv(upload_path + file_save + \".csv\")\n columns = rawdata.columns\n columns = [i.lower().strip() for i in columns if i != '' and i != '\\r']\n rawdata.columns = columns\n\n if \"event\" in list(columns):\n request.session['relateds'] = list(rawdata[\"event\"].unique())\n else:\n request.session['relateds'] = []\n\n request.session['columns'] = columns\n request.session['csv_name'] = file_name\n request.session['csv_save'] = file_save\n\n user = User.objects.get(id=request.user.id)\n try:\n his = upload_history.objects.get(user_id=user)\n his.file_save=file_save\n his.save()\n except:\n upload_history.objects.create(user_id=user, file_save=file_save)\n\n his_all = []\n for his in upload_history.objects.all():\n his_all.append(his.file_save)\n\n files = os.listdir(upload_path)\n for file in files:\n if file[:-4] not in his_all:\n os.remove(upload_path+file)\n\n return redirect(\"/column_list/\")\n\n\ndef get_FileSize(filePath):\n fsize = os.path.getsize(str(filePath))\n fsize = fsize/float(1024*1024)\n return round(fsize,2)\n\ndef getFoldSize(foldPath, size=0):\n for root, dirs, files in os.walk(foldPath):\n for f in files:\n size += os.path.getsize(os.path.join(root, f))\n return size\n\n\nclass Column_list( View ):\n def get(self, request):\n if request.session['csv_save'] == \"error\":\n return redirect(\"/upload/\")\n\n result = {}\n columns = request.session['columns']\n relateds = request.session['relateds']\n\n result['columns'] = request.session['columns']\n result['options'] = fieldTypes\n\n result['column_type'] = {}\n\n if \"id\" in columns:\n result['column_type'][\"id\"] = \"User ID\"\n columns.remove(\"id\")\n if \"time\" in columns:\n result['column_type'][\"time\"] = \"Time\"\n columns.remove(\"time\")\n if \"event\" in columns:\n result['column_type'][\"event\"] = \"Event\"\n columns.remove(\"event\")\n for related_col in relateds:\n if related_col.lower() in columns:\n result['column_type'][related_col] = \"Event Related\"\n columns.remove(related_col)\n for col in columns:\n result['column_type'][col] = \"Value\"\n\n return render(request, \"column_list.html\", result)\n\n def post( self,request ):\n errors = {\n \"type\": \"Loading data\",\n \"advice\": \"Please check your data format!\",\n }\n\n file_save = request.session['csv_save']\n\n\n if request.session['csv_save'] == \"error\":\n return redirect(\"/upload/\")\n else:\n data = pd.read_csv(upload_path + file_save + \".csv\")\n\n # check the format of the dataset\n check_list = ['User ID', 'Time', 'Event']\n for check in check_list:\n check_cols = []\n for field in request.session['columns']:\n if request.POST.get(field) == check:\n check_cols.append(field)\n\n if len(check_cols)>1 or len(check_cols) == 0:\n if len(check_cols)>1:\n errors['details'] = [\"More than one columns denote the %s: %s\" % (check,str(check_cols))]\n if len(check_cols) == 0:\n errors['details'] = [\"No column denotes the %s!\" %(check)]\n return render(request, \"error.html\", errors)\n\n\n if \"value\" not in request.session['columns']:\n errors[\"details\"]= [\"No column names value!\"]\n return render(request, \"error.html\", errors)\n\n events = list(data['event'].unique())\n for field in request.session['columns']:\n if field not in events and fieldTypes[request.POST.get(field)]['type'] == \"Segment\":\n errors[\"details\"] = [\"%s is not an event, please check again!\" %(field)]\n return render(request, \"error.html\", errors)\n\n # preprocess the dataset\n sub_path = data_path + \"/%s\" % request.session['csv_save']\n if not os.path.exists(sub_path):\n os.mkdir(sub_path)\n\n with open(sub_path + '/table.yaml', 'w') as f:\n fields = []\n for field in request.session['columns']:\n fields.append({\n \"name\": field.replace('\\r', ''),\n \"fieldType\": fieldTypes[request.POST.get(field)]['type'],\n \"dataType\": fieldTypes[request.POST.get(field)]['datatype'],\n })\n if fieldTypes[request.POST.get(field)]['type'] == \"ActionTime\":\n data['time'] = pd.to_datetime(data['time'])\n data['time'] = data['time'].dt.strftime(\"%Y-%m-%d\")\n\n f.write(yaml.dump({'fields': fields, 'charset': 'utf-8'}, default_flow_style=False))\n\n data.to_csv(data_path + \"%s/data.csv\" % file_save, index=False)\n\n return_info = subprocess.Popen('utils/preprocess.sh '+ str(request.session['csv_save']), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n sh_results = []\n for next_line in return_info.stdout:\n sh_results.append(next_line.decode(\"utf-8\", \"ignore\"))\n if sh_results[-1][:19]!= \"Loading Finished in\":\n for next_line in sh_results:\n logger.info(next_line)\n\n shutil.rmtree(data_path + file_save)\n os.remove(upload_path + file_save + \".csv\")\n\n request.session['csv_save'] = \"error\"\n\n errors = {\n \"type\": \"Loading data\",\n \"advice\": \"Please check your data format!\",\n \"details\": sh_results,\n }\n return render(request, \"error.html\", errors)\n\n logger.info(\"[*] Loading data successfully.\")\n os.remove(upload_path + file_save + \".csv\")\n\n demographic_info = self.get_demographic_info(request, data)\n with open(sub_path + '/demographic.yaml', 'w') as f:\n f.write(yaml.dump(demographic_info, default_flow_style=False))\n\n user = User.objects.get(id=request.user.id)\n new_file = csv_file(\n user_id=user,\n file_name=request.session['csv_name'],\n file_save=request.session['csv_save'],\n file_size=get_FileSize(data_path + file_save + \"/data.csv\"),\n num_ids=demographic_info['User ID']['data'],\n num_records=len(data),\n involved_dates= \"%s to %s\" %(demographic_info['Time']['data'][0], demographic_info['Time']['data'][1])\n )\n new_file.save()\n\n self.clean_dim()\n\n return redirect(\"/database/\")\n\n\n def value_partition(self, col):\n start = min(col)\n end = max(col)\n interval = int((end - start) / 8 + 0.5)\n sub_col = {}\n\n sub_col['y'] = []\n sub_col['x'] = []\n\n for i in range(9):\n sub_col['y'].append(\"[%s,%s)\" % (start + interval * i, min(start + interval * (i + 1) - 1, end)))\n sub_col['x'].append(0)\n for i in col:\n sub_col['x'][int((i - start) / interval)] += 1\n return sub_col\n\n def get_demographic_info(self, request, df):\n\n cols = df.columns\n events = list(df['event'].unique())\n results = {}\n results['Value'] = []\n value_cols = []\n unique_id = \"\"\n\n for col in cols:\n if request.POST.get(col) == \"User ID\":\n sub_col = {\n \"name\": col,\n \"data\": len(df[col].unique()),\n }\n results['User ID'] = sub_col\n unique_id = col\n value_cols.append(col)\n elif request.POST.get(col) == \"Time\":\n sub_col = {\n \"name\": col,\n \"data\": [np.min(df['time']), np.max(df['time'])],\n }\n results['Time'] = sub_col\n elif request.POST.get(col) == \"Event\":\n sub_col = {\n \"name\": col,\n \"data\": dict(df[col].value_counts()),\n }\n results['Event'] = sub_col\n elif col != \"value\" and col not in events:\n value_cols.append(col)\n\n temp = df[value_cols]\n temp = temp.drop_duplicates()\n value_cols.remove(unique_id)\n\n for col in value_cols:\n lens = len(temp[col].unique())\n if lens <= 8:\n sub_col = {\n \"name\": col,\n \"type\": \"pie\",\n \"data\": dict(temp[col].value_counts())\n }\n else:\n sub_col = {\n \"name\": col,\n \"type\": \"bar\",\n \"data\": self.value_partition(temp[col])\n }\n\n results['Value'].append(sub_col)\n return results\n\n def clean_dim(self):\n conn = sqlite3.connect('dim.db')\n cur = conn.cursor()\n cur.execute(\"select * from sqlite_master;\")\n files_list = os.listdir(data_path)\n\n for table in cur.fetchall():\n if table[1] not in files_list:\n cur.execute(\"DROP TABLE '%s';\" % table[1])\n\n","sub_path":"dashboard/views/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":11365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"440013506","text":"class Node:\n def __init__(self,key):\n self.left=None\n self.right=None\n self.val=key\ndef inorder(root):\n if root:\n inorder(root.left)\n print(root.val)\n inorder(root.right)\ndef postorder(root):\n if root:\n postorder(root.left)\n postorder(root.right)\n print(root.val)\ndef preorder(root):\n if root:\n print(root.val)\n preorder(root.left)\n preorder(root.right)\n\nroot=Node(1)\nroot.left=Node(2)\nroot.right=Node(3)\nroot.left.left=Node(4)\nroot.left.right=Node(5)\nroot.right.left=Node(6)\nroot.right.right=Node(7)\nprint(\"Inorder traversal\")\ninorder(root)\nprint(\"\\n post order traversal\")\npostorder(root)\nprint(\"\\n preorder order\")\npreorder(root)\n","sub_path":"tree/traverse.py","file_name":"traverse.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"538101111","text":"#! /usr/bin/env python\n\nVERSION='0.1'\nAPPNAME='script'\n\nsrcdir = '.'\nblddir = 'build'\n\nimport sys, os\nimport waf_dynamo, waf_ddf\nimport Options\n\ndef init():\n pass\n\ndef set_options(opt):\n opt.tool_options('waf_dynamo')\n\ndef configure(conf):\n conf.check_tool('waf_dynamo')\n conf.check_tool('waf_ddf')\n\n waf_ddf.configure(conf)\n\n conf.sub_config('src')\n\n conf.env.append_value('CPPPATH', \"default/src\")\n conf.env['STATICLIB_DLIB'] = ['dlib', 'mbedtls']\n conf.env['STATICLIB_DDF'] = 'ddf'\n conf.env['STATICLIB_RESOURCE'] = 'resource'\n conf.env['STATICLIB_EXTENSION'] = 'extension'\n\n conf.env.append_unique('CCDEFINES', 'DLIB_LOG_DOMAIN=\"SCRIPT\"')\n conf.env.append_unique('CXXDEFINES', 'DLIB_LOG_DOMAIN=\"SCRIPT\"')\n\ndef build(bld):\n python_path = os.environ.get('PYTHONPATH', '')\n os.environ['PYTHONPATH'] = os.path.abspath('build/default/src/script') + os.pathsep + python_path\n bld.add_subdirs('src')\n\ndef shutdown():\n if not Options.commands['build']:\n return\n\n # We need to add default/src/ddf to PYTHONPATH here. (ddf_extensions_pb2.py and plugin_pb2.py)\n # Only required 'in' ddf-lib.\n\n os.environ['DM_SAVEGAME_HOME'] = 'build'\n sys.path.append('.')\n\n import server\n\n serv = None\n if not getattr(Options.options, 'skip_tests', False):\n serv = server.Server()\n serv.start()\n try:\n waf_dynamo.run_tests(valgrind = True)\n finally:\n if serv:\n serv.stop()\n","sub_path":"engine/script/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"91396673","text":"# -*- coding: utf-8 -*-\nimport json\nfrom common.basic_troop_service_entity_handler import \\\n BasicTroopServiceEntityHandler\n\nDOMAIN_NAME = ''\nURL = ''\nBODY_DATA = ''\n_BODY_DATA = ''\nif BODY_DATA:\n _BODY_DATA = json.loads(BODY_DATA)\nQUERY_DATA = ''\nMETHOD_TYPE = ''\nCONTENT_TYPE = ''\nREQUEST_DATA = _BODY_DATA or QUERY_DATA\nREQUEST_HEADERS = ''\nHAS_DATA_PATTERN = False\n\n\nclass Foo(BasicTroopServiceEntityHandler):\n \"\"\"\n accessible attribute list for response data:\n %s\n ==================\n kwargs for request:\n Please refer to the constants BODY_DATA or QUERY_DATA request parameters\n \"\"\"\n def __init__(self, domain_name=DOMAIN_NAME, **kwargs):\n super(Foo, self).__init__(\n domain_name=domain_name,\n url_string=URL,\n data=REQUEST_DATA,\n method_type=METHOD_TYPE,\n request_content_type=CONTENT_TYPE,\n request_headers=REQUEST_HEADERS,\n has_data_pattern=HAS_DATA_PATTERN,\n **kwargs\n )\n\n def _set_data_pattern(self, *args, **kwargs):\n pass\n\n\nif __name__ == '__main__':\n e = Foo()\n e.send_request()\n","sub_path":"tools/code_gen/restful_service_entity_generator/har_entity_template.py","file_name":"har_entity_template.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"488759122","text":"#!/usr/bin/env python3\n\nimport argparse \nimport json \nimport numpy as np \nimport struct \nimport pickle \n\nimport config.config_api as config_api \nimport simulator.sim_api as sim_api \n\nNUM_THREADS_X = 32\nNUM_THREADS_Y = 4\nBLOCK_SIZE = 32\n\n\ndef _py_conv(host_input, host_kernel, num_rows, num_cols):\n output_matrix = np.zeros((num_rows * num_cols))\n num_blocks = (num_rows * num_cols) // (BLOCK_SIZE * BLOCK_SIZE)\n for bid in range(num_blocks):\n for i in range(BLOCK_SIZE):\n for j in range(BLOCK_SIZE):\n sum_val = 0.0\n for kx in range(3):\n for ky in range(3):\n row_id = min(max(i + kx - 1, 0), BLOCK_SIZE - 1)\n col_id = min(max(j + ky - 1, 0), BLOCK_SIZE - 1)\n slice_id = row_id // NUM_THREADS_Y \n row_id = row_id % NUM_THREADS_Y \n sum_val += (host_kernel[kx * 3 + ky] * host_input[\n slice_id * NUM_THREADS_X * NUM_THREADS_Y \n * num_blocks + bid * NUM_THREADS_X * NUM_THREADS_Y\n + row_id * NUM_THREADS_X + col_id])\n\n slice_id = i // NUM_THREADS_Y \n row_id = i % NUM_THREADS_Y \n output_matrix[\n slice_id * NUM_THREADS_X * NUM_THREADS_Y * num_blocks\n + bid * NUM_THREADS_X * NUM_THREADS_Y \n + row_id * NUM_THREADS_X + j] = sum_val \n\n return output_matrix.astype(np.float32)\n\n\ndef _return_true(event):\n return True \n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description=\"Run the simulation for the convolution\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter \n )\n\n # Application dependent parameters \n parser.add_argument(\"input_prog_file\", help=\"Specify the path of \"\n \"executable program\")\n parser.add_argument(\"--num_rows\", type=int, default=4096)\n parser.add_argument(\"--num_cols\", type=int, default=4096)\n\n # Hardware dependent parameters \n parser.add_argument(\"--hardware_config\", \"-c\", default=None, \n help=\"Specify the path of hardware config\")\n\n # Runtime parameters\n parser.add_argument(\"--num_threads_x\", type=int, default=32)\n parser.add_argument(\"--num_threads_y\", type=int, default=4)\n parser.add_argument(\"--num_blocks\", type=int, default=512)\n parser.add_argument(\"--mapping_method\", \"-m\", default=None,\n help=\"Specify the config of mapping blocks to \"\n \"hardware vaults\")\n\n # Output files \n parser.add_argument(\"--output_perf_file\", default=None,\n help=\"The output file storing performance metrics\")\n parser.add_argument(\"--output_trace_file\", default=None,\n help=\"The output file storing trace events\")\n\n args = parser.parse_args() \n assert (args.num_threads_x == NUM_THREADS_X \n and args.num_threads_y == NUM_THREADS_Y) \n\n # Load hardware configuration from file:\n hw_config_dict = config_api.load_hardware_config(\n overwrite_config_file_path=args.hardware_config\n )\n hw_config_dict[\"display_simulation_progress\"] = True \n\n # Load block mapping from file \n if args.mapping_method is None:\n mapping_dict = {}\n print(\"Total number of cores:\", hw_config_dict[\"total_num_cores\"])\n for i in range(args.num_blocks):\n # mapping_dict[i] = 0\n mapping_dict[i] = i % hw_config_dict[\"total_num_cores\"]\n else:\n with open(args.mapping_method, \"r\") as f:\n raw_dict = json.load(f)\n mapping_dict = {}\n for key in raw_dict:\n mapping_dict[int(key)] = raw_dict[key]\n\n # Load kernel from executable program file \n with open(args.input_prog_file, \"rb\") as f:\n opt_kernel = pickle.load(f)\n\n # Init MPU hardware and allocate memory\n if args.output_trace_file is None:\n hardware = sim_api.init_hardware(hw_config_dict)\n else:\n hardware = sim_api.init_hardware(\n hw_config_dict, filter_func=_return_true)\n print(\"Hardware initialization: Success!\")\n\n ptr_input = hardware.mem.allocate(args.num_rows * args.num_cols * 4)\n ptr_kernel = hardware.mem.allocate(9 * 4)\n ptr_output = hardware.mem.allocate(args.num_rows * args.num_cols * 4)\n hardware.mem.finalize() \n\n input_image = np.random.rand(\n args.num_rows * args.num_cols).astype(np.float32)\n input_kernel = np.random.rand(9).astype(np.float32)\n hardware.mem.set_value(ptr_input, input_image.tobytes())\n hardware.mem.set_value(ptr_kernel, input_kernel.tobytes())\n print(\"Hardware memory set-up: Success!\") \n \n # Start simulation\n print(\"Starting simulation...\")\n total_cycles, sim_freq = hardware.run_simulation(\n kernel=opt_kernel, \n kernel_args=[ptr_input, ptr_kernel, ptr_output, \n args.num_rows, args.num_cols],\n grid_dim=(1, 1, args.num_blocks),\n block_dim=(1, args.num_threads_y, args.num_threads_x),\n block_schedule=mapping_dict, \n )\n\n print(\n \"Simulation finished: {} cycles at {} MHz\".format(\n total_cycles, sim_freq)\n )\n print(\"Total time: {} us\".format(\n total_cycles / sim_freq)\n )\n\n # Compare results after simulation \n output_buffer = hardware.mem.get_value(\n ptr_output, args.num_rows * args.num_cols * 4)\n sim_results = np.array(\n struct.unpack(\n \"{}f\".format(args.num_rows * args.num_cols), output_buffer)\n ).astype(np.float32)\n\n ground_truth = _py_conv(\n input_image, input_kernel, args.num_rows, args.num_cols)\n np.testing.assert_allclose(sim_results, ground_truth, atol=1e-5)\n print(\"Correctness check: Success!\")\n\n # Dump output files \n if args.output_perf_file is not None:\n hardware.dump_perf_metrics(args.output_perf_file)\n\n if args.output_trace_file is not None:\n hardware.dump_timeline(args.output_trace_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"benchmark/conv/conv_sim.py","file_name":"conv_sim.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"93751562","text":"def mean(numbers):\n if numbers:\n if isinstance(numbers, dict):\n res=[]\n for value in numbers.values():\n res += value\n return sum(res)/len(res)\n else:\n return sum(numbers)/len(numbers)\n else:\n return 0\n\ndef courses_rating(persons, course):\n res = 0\n lenth = 0\n for person in persons:\n if course in person.grades:\n if person.grades[course]:\n res += mean(person.grades[course])\n lenth += 1\n if lenth != 0:\n return res/lenth\n\nclass Student:\n def __init__(self, name, surname, gender):\n self.name = name\n self.surname = surname\n self.gender = gender\n self.finished_courses = []\n self.courses_in_progress = []\n self.grades = {}\n def rate_lecturer(self, lecturer, course, grade):\n if isinstance(lecturer, Lecturer) and course in self.courses_in_progress and course in lecturer.courses_attached:\n if isinstance(grade, int) and (0 <= grade <= 10) :\n if course in lecturer.grades:\n lecturer.grades[course] += [grade]\n else:\n lecturer.grades[course] = [grade]\n else:\n print(\"Оценка введена неверно\")\n else:\n return 'Ошибка'\n def __str__(self):\n info = str(f\"Имя: {self.name}\\n\"\n f\"Фамилия: {self.surname}\\n\"\n f\"Средняя оценка: {mean(self.grades)}\\n\"\n f\"Курсы в процессе обучения: {', '.join(self.courses_in_progress)}\\n\"\n f\"Завершенные курсы: {', '.join(self.finished_courses)}\\n\")\n return info\n def __lt__(self, other):\n return mean(self.grades) < mean(other.grades)\n def __gt__(self, other):\n return mean(self.grades) > mean(other.grades)\n def __le__(self, other):\n return mean(self.grades) <= mean(other.grades)\n def __ge__(self, other):\n return mean(self.grades) >= mean(other.grades)\nclass Mentor:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n self.courses_attached = []\n\nclass Lecturer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n self.grades = {}\n def __str__(self):\n return f\"Имя: {self.name}\\nФамилия: {self.surname}\\nСредняя оценка: {mean(self.grades)}\"\n def __lt__(self, other):\n return mean(self.grades) < mean(other.grades)\n def __gt__(self, other):\n return mean(self.grades) > mean(other.grades)\n def __le__(self, other):\n return mean(self.grades) <= mean(other.grades)\n def __ge__(self, other):\n return mean(self.grades) >= mean(other.grades)\n\nclass Reviewer(Mentor):\n def __str__(self):\n return f\"Имя: {self.name}\\nФамилия: {self.surname}\"\n\n def rate_hw(self, student, course, grade):\n if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:\n if course in student.grades:\n student.grades[course] += [grade]\n else:\n student.grades[course] = [grade]\n else:\n return 'Ошибка'\n\nStudents = [Student('Ruoy', 'Eman', 'your_gender'),\n Student('Ron', 'Wuizli', 'your_gender')]\nStudents[0].courses_in_progress += ['Python']\nStudents[0].courses_in_progress += ['Git']\nStudents[0].finished_courses += ['Introduction to the programming']\nStudents[1].courses_in_progress += ['Python']\nStudents[1].finished_courses += ['Paint']\nStudents[1].finished_courses += ['Calculator']\n\nLecturers = [Lecturer('Leps', 'Grisha'),\n Lecturer('Jigurda', 'Nikita')]\nLecturers[0].courses_attached += ['Python']\nLecturers[0].courses_attached += ['Java']\nLecturers[1].courses_attached += ['JS']\n\nReviewers = [Reviewer('Nikolaev', 'Igor'),\n Reviewer('Baskov', 'Nikolay')]\nReviewers[0].courses_attached += ['Python']\nReviewers[0].courses_attached += ['Git']\nReviewers[1].courses_attached += ['Python']\nReviewers[1].courses_attached += ['JS']\n\nReviewers[0].rate_hw(Students[0], 'Python', 8)\nReviewers[0].rate_hw(Students[0], 'Python', 9)\nReviewers[0].rate_hw(Students[0], 'Git', 10)\nReviewers[0].rate_hw(Students[1], 'Python', 10)\nReviewers[1].rate_hw(Students[1], 'Python', 6)\n\nStudents[0].rate_lecturer(Lecturers[0], 'Python', 10)\nStudents[0].rate_lecturer(Lecturers[0], 'Python', 9)\nStudents[1].rate_lecturer(Lecturers[0], 'Python', 8)\nStudents[1].rate_lecturer(Lecturers[1], 'JS', 7)\n\nprint(f\"Reviewers[0]:\\n{Reviewers[0]}\\n\")\nprint(f\"Lecturers[0]:\\n{Lecturers[0]}\\n\")\nprint(f\"Lecturers[1]:\\n{Lecturers[1]}\\n\")\nprint(f\"Students[0]:\\n{Students[0]}\\n\")\nprint(f\"Students[1]:\\n{Students[1]}\\n\")\n\nprint(\"Lecturers[0] < Lecturers[1] ==>\", Lecturers[0] > Lecturers[1])\nprint(\"Students[0] > Students[1] ==>\", Students[0] > Students[1])\n\nprint(f\"\\nСредняя оценка домашних работ по Python: {courses_rating(Students, 'Python')}\")\nprint(f\"Средняя оценка лекторов по курсу Python: {courses_rating(Lecturers, 'Python')}\")\n","sub_path":"HW.py","file_name":"HW.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"405385414","text":"import gzip\nimport json\nimport re\n\nwith gzip.open(\"jawiki-country.json.gz\", \"rt\") as f:\n for i in f:\n obj=json.loads(i)\n\n if re.match(obj['title'], r'イギリス'):\n text = obj['text']\n pattern = re.compile(r'.*\\[\\[Category:(.*?)(?:\\|.*)?\\]\\].*', re.MULTILINE)\n for c in pattern.findall(text):\n print(c)\n","sub_path":"chap3/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"638873839","text":"#!/usr/bin/python\n# -*- encoding: utf8 -*-\n\n\"\"\"Support functions to do with the core language.\n\nAuthor: Rudolf Cardinal (rudolf@pobox.com)\nCreated: 2013\nLast update: 21 Sep 2015\n\nCopyright/licensing:\n\n Copyright (C) 2013-2015 Rudolf Cardinal (rudolf@pobox.com).\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport importlib\nimport pkgutil\nimport six\nfrom six.moves import range\n\n\n# =============================================================================\n# enum\n# =============================================================================\n\ndef enum(**enums):\n \"\"\"Enum support, as at http://stackoverflow.com/questions/36932\"\"\"\n return type('Enum', (), enums)\n\n\n# =============================================================================\n# AttrDict\n# =============================================================================\n\nclass AttrDict(dict):\n # http://stackoverflow.com/questions/4984647\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\n# =============================================================================\n# Other dictionary operations\n# =============================================================================\n\ndef merge_dicts(*dict_args):\n '''\n Given any number of dicts, shallow copy and merge into a new dict,\n precedence goes to key value pairs in latter dicts.\n '''\n # http://stackoverflow.com/questions/38987\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result\n\n\n# =============================================================================\n# Helper functions\n# =============================================================================\n\ndef convert_to_bool(x, default=None):\n if not x: # None, zero, blank string...\n return default\n try:\n return int(x) != 0\n except:\n pass\n try:\n return float(x) != 0\n except:\n pass\n if not isinstance(x, six.string_types):\n raise Exception(\"Unknown thing being converted to bool: {}\".format(x))\n x = x.upper()\n if x in [\"Y\", \"YES\", \"T\", \"TRUE\"]:\n return True\n if x in [\"N\", \"NO\", \"F\", \"FALSE\"]:\n return False\n raise Exception(\"Unknown thing being converted to bool: {}\".format(x))\n\n\ndef convert_attrs_to_bool(obj, attrs, default=None):\n for a in attrs:\n setattr(obj, a, convert_to_bool(getattr(obj, a), default=default))\n\n\ndef convert_attrs_to_uppercase(obj, attrs):\n for a in attrs:\n value = getattr(obj, a)\n if value is None:\n continue\n setattr(obj, a, value.upper())\n\n\ndef convert_attrs_to_lowercase(obj, attrs):\n for a in attrs:\n value = getattr(obj, a)\n if value is None:\n continue\n setattr(obj, a, value.lower())\n\n\ndef convert_attrs_to_int(obj, attrs, default=None):\n for a in attrs:\n value = getattr(obj, a)\n try:\n value = int(value)\n except:\n value = default\n setattr(obj, a, value)\n\n\ndef raise_if_attr_blank(obj, attrs):\n for a in attrs:\n value = getattr(obj, a)\n if value is None or value is \"\":\n raise Exception(\"Blank attribute: {}\".format(a))\n\n\ndef count_bool(blist):\n return sum([1 if x else 0 for x in blist])\n\n\ndef chunks(l, n):\n \"\"\" Yield successive n-sized chunks from l.\n \"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef is_integer(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\n# =============================================================================\n# Module management\n# =============================================================================\n\ndef import_submodules(package, recursive=True):\n # http://stackoverflow.com/questions/3365740/how-to-import-all-submodules\n \"\"\" Import all submodules of a module, recursively, including subpackages\n\n :param package: package (name or actual module)\n :type package: str | module\n :rtype: dict[str, types.ModuleType]\n \"\"\"\n if isinstance(package, str):\n package = importlib.import_module(package)\n results = {}\n for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):\n full_name = package.__name__ + '.' + name\n results[full_name] = importlib.import_module(full_name)\n if recursive and is_pkg:\n results.update(import_submodules(full_name))\n return results\n\n# Note slightly nastier way: e.g.\n# # Task imports: everything in \"tasks\" directory\n# task_modules = glob.glob(os.path.dirname(__file__) + \"/tasks/*.py\")\n# task_modules = [os.path.basename(f)[:-3] for f in task_modules]\n# for tm in task_modules:\n# __import__(tm, locals(), globals())\n","sub_path":"pythonlib/rnc_lang.py","file_name":"rnc_lang.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"88763272","text":"from collections import deque\nN,M = map(int,input().split())\n\n\nstep_x = [-1,1,0,0]\nstep_y = [0,0,-1,1]\n\ndef bfs(x,y):\n \n q = deque([])\n q.append((x,y,0))\n res = 10000\n \n while q:\n x,y,cnt = q.popleft()\n \n if x == M - 1 and y == N - 1:\n res = min(res,cnt)\n \n for i in range(4):\n if M > x + step_x[i] >=0 and N > y + step_y[i] >=0 and G[y + step_y[i]][x + step_x[i]]:\n G[y + step_y[i]][x + step_x[i]] = 0\n q.append((x + step_x[i],y + step_y[i],cnt+1))\n \n return res\n \nG = [[] for i in range(N)]\n\nfor i in range(N):\n G[i] = list(map(int,input()))\n \n\nprint(bfs(0,0)+1)\n","sub_path":"2178 미로탐색 실버1.py","file_name":"2178 미로탐색 실버1.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"265560576","text":"import matplotlib.pyplot as plt\nimport sys\n\ndef print_no_document_found_error():\n print(\"ERROR: No .txt document found\")\n print(\"Please add a .txt document as first argument when calling this script\")\n print(\"Note: That .txt document has had to be created by the related \\'LocationApp\\' for Android\")\n print(\"Exiting\")\n print(\"\\n\")\n\n# Returns the amount of samples recorded\ndef get_sample_count(filename):\n with open(filename) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\ndef get_data(filename):\n uwb_positions = []\n filtered_positions = []\n raw_accelerations = []\n filtered_accelerations = []\n\n with open(filename) as f:\n for line in f:\n uwb_position = line.split('|')[0]\n filtered_position = line.split('|')[1]\n raw_acceleration = line.split('|')[2]\n filtered_acceleration = line.split('|')[3]\n\n uwb_x = float(uwb_position.split(',')[0])\n uwb_y = float(uwb_position.split(',')[1])\n uwb_z = float(uwb_position.split(',')[2])\n \n filtered_x = float(filtered_position.split(',')[0])\n filtered_y = float(filtered_position.split(',')[1])\n filtered_z = float(filtered_position.split(',')[2])\n \n raw_acceleration_x = float(raw_acceleration.split(',')[0])\n raw_acceleration_y = float(raw_acceleration.split(',')[1])\n raw_acceleration_z = float(raw_acceleration.split(',')[2])\n \n filtered_acceleration_x = float(filtered_acceleration.split(',')[0])\n filtered_acceleration_y = float(filtered_acceleration.split(',')[1])\n filtered_acceleration_z = float(filtered_acceleration.split(',')[2])\n\n # Add positions to lists\n uwb_positions.append([uwb_x, uwb_y, uwb_z])\n filtered_positions.append([filtered_x, filtered_y, filtered_z])\n raw_accelerations.append([raw_acceleration_x, raw_acceleration_y, raw_acceleration_z])\n filtered_accelerations.append([filtered_acceleration_x, filtered_acceleration_y, filtered_acceleration_z])\n \n return uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations\n\ndef get_values(uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations):\n uwb_x_coordinates = []\n uwb_y_coordinates = []\n uwb_z_coordinates = []\n filtered_x_coordinates = []\n filtered_y_coordinates = []\n filtered_z_coordinates = []\n raw_x_accelerations = []\n raw_y_accelerations = []\n raw_z_accelerations = []\n filtered_x_accelerations = []\n filtered_y_accelerations = []\n filtered_z_accelerations = []\n\n for p in uwb_positions:\n uwb_x_coordinate = p[0]\n uwb_x_coordinates.append(uwb_x_coordinate)\n uwb_y_coordinate = p[1]\n uwb_y_coordinates.append(uwb_y_coordinate)\n uwb_z_coordinate = p[2]\n uwb_z_coordinates.append(uwb_z_coordinate)\n \n for p in filtered_positions:\n filtered_x_coordinate = p[0]\n filtered_x_coordinates.append(filtered_x_coordinate)\n filtered_y_coordinate = p[1]\n filtered_y_coordinates.append(filtered_y_coordinate)\n filtered_z_coordinate = p[2]\n filtered_z_coordinates.append(filtered_z_coordinate)\n\n for a in raw_accelerations:\n raw_x_acceleration = a[0]\n raw_x_accelerations.append(raw_x_acceleration)\n raw_y_acceleration = a[1]\n raw_y_accelerations.append(raw_y_acceleration)\n raw_z_acceleration = a[2]\n raw_z_accelerations.append(raw_z_acceleration)\n \n for a in filtered_accelerations:\n filtered_x_acceleration = a[0]\n filtered_x_accelerations.append(filtered_x_acceleration)\n filtered_y_acceleration = a[1]\n filtered_y_accelerations.append(filtered_y_acceleration)\n filtered_z_acceleration = a[2]\n filtered_z_accelerations.append(filtered_z_acceleration)\n\n return uwb_x_coordinates, uwb_y_coordinates, uwb_z_coordinates, filtered_x_coordinates, filtered_y_coordinates, filtered_z_coordinates, raw_x_accelerations, raw_y_accelerations, raw_z_accelerations, filtered_x_accelerations, filtered_y_accelerations, filtered_z_accelerations\n\ndef plot(uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations, sample_count):\n fig = plt.figure(figsize=(7, 13))\n ax0 = plt.subplot(211)\n ax1 = plt.subplot(212, projection='3d')\n plt.title(\"Raw UWB and filtered positions\")\n plot_2D_cartesian(uwb_positions, filtered_positions, ax0)\n #plot_3D(uwb_positions, filtered_positions, ax1)\n plt.show()\n plot_line_chart(uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations, sample_count)\n\ndef plot_2D_cartesian(uwb_positions, filtered_positions, axs):\n plt.xlabel = \"X Axis\"\n plt.ylabel = \"Y Axis\"\n # Plot 2D raw UWB positions\n for x, y, z in uwb_positions:\n axs.scatter(x, y, c='b', marker='^')\n # Plot 2D filtered positions\n for x, y, z in filtered_positions:\n axs.scatter(x, y, c='r', marker='x')\n\ndef plot_3D(uwb_positions, filtered_positions, axs):\n axs.set_xlabel('X Axis')\n axs.set_ylabel('Y Axis')\n axs.set_zlabel('Z Axis')\n\n # Plot 3D raw uwb positions\n for x, y, z in uwb_positions:\n axs.scatter(x, y, z, c='b', marker='^')\n # Plot 3D filtered positions\n for x, y, z in filtered_positions:\n axs.scatter(x, y, z, c='r', marker='x')\n\ndef plot_line_chart(uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations, sample_count):\n fig = plt.figure()\n uwb_x_coordinates, uwb_y_coordinates, uwb_z_coordinates, filtered_x_coordinates, filtered_y_coordinates, filtered_z_coordinates, raw_x_accelerations, raw_y_accelerations, raw_z_accelerations, filtered_x_accelerations, filtered_y_accelerations, filtered_z_accelerations = get_values(uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations)\n plt.title(\"Raw UWB and filtered positions\")\n # Plot coordinates\n ax1 = fig.add_subplot(311)\n ax1.plot(range(sample_count), uwb_x_coordinates, label='UWB X', c='b')\n ax1.plot(range(sample_count), filtered_x_coordinates, label='Filtered X', c='r')\n ax1.legend()\n\n ax2 = fig.add_subplot(312)\n ax2.plot(range(sample_count), uwb_y_coordinates, label='UWB Y', c='b')\n ax2.plot(range(sample_count), filtered_y_coordinates, label='Filtered Y', c='r')\n ax2.legend()\n\n ax3 = fig.add_subplot(313)\n ax3.plot(range(sample_count), uwb_z_coordinates, label='UWB Z', c='b')\n ax3.plot(range(sample_count), filtered_z_coordinates, label='Filtered Z', c='r')\n ax3.axhline(1.67, 0, 1, label='User Height', c='g')\n ax3.legend()\n\n plt.show()\n\n # Plot accelerations\n fig = plt.figure()\n plt.title(\"Raw and filtered accelerations\")\n ax1 = fig.add_subplot(311)\n ax1.plot(range(sample_count), raw_x_accelerations, label='Raw X', c='b')\n ax1.plot(range(sample_count), filtered_x_accelerations, label='Filtered X', c='r')\n ax1.legend()\n\n ax2 = fig.add_subplot(312)\n ax2.plot(range(sample_count), raw_y_accelerations, label='Raw Y', c='b')\n ax2.plot(range(sample_count), filtered_y_accelerations, label='Filtered Y', c='r')\n ax2.legend()\n\n ax3 = fig.add_subplot(313)\n ax3.plot(range(sample_count), raw_z_accelerations, label='Raw Z', c='b')\n ax3.plot(range(sample_count), filtered_z_accelerations, label='Filtered Z', c='r')\n ax3.axhline(2.0, 0, 1, label='Z Acc Threshold', c='g')\n ax3.axhline(-2.0, 0, 1, c='g')\n ax3.legend()\n\n plt.show()\n\nif __name__ == \"__main__\":\n try:\n filename = sys.argv[1]\n except IndexError:\n print_no_document_found_error()\n exit(1)\n \n sample_count = get_sample_count(filename)\n uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations = get_data(filename)\n plot(uwb_positions, filtered_positions, raw_accelerations, filtered_accelerations, sample_count)\n","sub_path":"Measurements/helper_scripts/measurements_plot_movement.py","file_name":"measurements_plot_movement.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"308466211","text":"import json,datetime,pytz,random\nfrom rest_framework.views import APIView\nfrom django.http import JsonResponse\nfrom api.models import Urldetails\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers\nfrom django.db.models import F,Q\n\nclass Generatetinyurl(APIView):\n\tdef post(self,request):\n\t\trequestadata = request.data.get('urldata')\n\t\trequestjson = json.loads(requestadata)\n\n\t\tif requestjson['url']: \n\t\t\tif not request.session.has_key('session_id'):\n\t\t\t\trequest.session.create()\n\t\t\t\trequest.session['session_id'] = request.session.session_key\n\t\t\tif self.check_url_or_not(requestjson['url']):\n\t\t\t\turl_id = self.insert_tiny_url(requestjson['url'],requestjson['nickname'],request.session['session_id'])\n\t\t\t\tfinal_tiny_url =\"http://127.0.0.1:8001/web/\"+url_id\n\t\t\t\tresponse = {\"Status\":\"Success\",\"Message\":\"Ok\",\"tinyurl\":final_tiny_url,\"code\":\"200\"}\n\t\t\telse:\n\t\t\t\tresponse = {\"Status\":\"Error\",\"Message\":\"Invalid Url\",\"code\":\"300\"}\n\t\telse:\n\t\t\tresponse = {\"Status\":\"Error\",\"Message\":\"Request Parameter Missing\",\"code\":\"100\"}\n\t\treturn JsonResponse(response)\n\n\tdef insert_tiny_url(self,url,nickname,sessionid):\n\t\turl_id = self.generate_unique_url_id()\n\t\turlobj = Urldetails()\n\t\turlobj.url_nickname = nickname\n\t\turlobj.link = url\n\t\turlobj.sessionid = sessionid\n\t\turlobj.createddate = self.get_current_time()\n\t\turlobj.url_id = url_id\n\t\turlobj.save()\n\n\t\treturn url_id\n\n\tdef check_url_or_not(self,url):\n\t\tif 'https://' in url:\n\t\t\treturn True\n\t\telif 'http://' in url:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef get_current_time(self):\n\t\tnow_date = datetime.datetime.now(pytz.timezone('UTC'))\n\t\tnow_date = now_date.astimezone(pytz.timezone('Asia/Kolkata'))\n\t\tnow_date = now_date.strftime('%Y-%m-%d %H:%M:%S')\n\t\tnow_date = datetime.datetime.strptime(now_date,'%Y-%m-%d %H:%M:%S')\n\t\treturn now_date\t\n\n\tdef generate_unique_url_id(self):\n\t\turl_id = ''\n\t\tunique_arr = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789'\n\t\tfor i in range(10):\n\t\t\tnbr = random.randint(0,len(unique_arr)-1)\n\t\t\turl_id +=unique_arr[nbr]\n\t\treturn url_id\t\n\nclass Getlink(APIView):\n\tdef get(self,request,urlid):\n\t\ttry:\n\t\t\turlobj = Urldetails.objects.get(url_id=urlid)\n\t\t\tresponse = {\"Status\":\"Success\",\"Message\":\"Ok\",\"link\":urlobj.link,\"code\":\"200\"}\n\t\texcept ObjectDoesNotExist:\n\t\t\tresponse ={\"Status\":\"Error\",\"Message\":\"No Such Id Available\",\"code\":\"100\"}\t\n\t\treturn JsonResponse(response)\n\nclass Fetchurldetails(APIView):\n\tdef get(self,request,pageno,sessionid):\n\t\tif sessionid:\n\t\t\turlobjcount = Urldetails.objects.filter(sessionid=sessionid,status=1).count()\n\n\t\t\tno_of_data_display = 5\n\t\t\tlimitdata = (int(pageno)-1)*no_of_data_display\n\n\t\t\tif no_of_data_display*int(pageno) > urlobjcount:\n\t\t\t\tno_of_data_display = None\t\n\n\t\t\turlobj = Urldetails.objects.filter(sessionid=sessionid,status=1).values('url_nickname','url_id','link','createddate').order_by('-createddate')[limitdata:no_of_data_display]\n\t\t\tif urlobj:\n\t\t\t\turl_list =[]\n\t\t\t\tfor urlfetchobj in urlobj:\n\t\t\t\t\tfetchdata =self.make_fetch_details(urlfetchobj)\n\t\t\t\t\turl_list.append(fetchdata)\n\t\t\t\tresponse ={\"Status\":\"Success\",\"Message\":\"OK\",\"data\":url_list,\"code\":\"200\"}\t\n\t\t\telse:\n\t\t\t\tresponse ={\"Status\":\"Error\",\"Message\":\"No Data Available\",\"code\":\"100\"}\t\n\t\telse:\n\t\t\tresponse ={\"Status\":\"Error\",\"Message\":\"Request Parameter is Missing\",\"code\":\"100\"}\t\t\n\t\treturn JsonResponse(response)\t\t\n\n\tdef make_fetch_details(self,urlobj):\n\t\turl_arr = {}\n\t\tif urlobj:\n\t\t\turl_arr['nickname'] = urlobj['url_nickname']\n\t\t\turl_arr['tinyurl'] = 'http://127.0.0.1:8001/web/'+urlobj['url_id']\n\t\t\turl_arr['urlid'] = urlobj['url_id']\n\t\t\turl_arr['createddate'] = urlobj['createddate']\n\t\treturn url_arr\n\n\nclass Removeurl(APIView):\n\tdef post(self,request):\n\t\trequestdata = request.data.get('delete')\n\t\trequestjson = json.loads(requestdata)\n\t\tif requestjson['urlid'] and requestjson['sessionid']:\n\t\t\tresponse=self.check_and_update_status(requestjson['urlid'],requestjson['sessionid'])\n\t\telse:\n\t\t\tresponse = {\"Status\":\"Error\",\"Message\":\"Request Parameter is Missing\",\"code\":\"100\"}\n\t\treturn JsonResponse(response)\n\n\tdef check_and_update_status(self,urlid,sessionid):\n\t\ttry:\n\t\t\turlobj = Urldetails.objects.get(\n\t\t\t\t\t\t\t\t\tQ(url_id=urlid) &\n\t\t\t\t\t\t\t\t\tQ(sessionid=sessionid)\n\t\t\t\t\t\t\t\t)\n\t\t\turlobj.status =0\n\t\t\turlobj.save()\n\t\t\tresponse ={\"Staus\":\"Success\",\"Message\":\"Update Successfully\",\"code\":\"200\"}\n\t\texcept ObjectDoesNotExist:\n\t\t\tresponse ={\"Staus\":\"Error\",\"Message\":\"Invalid Id\",\"code\":\"100\"}\n\t\treturn response\t\t\t\n\n\nclass Getdetail_via_nickname(APIView):\n\tdef post(self,request):\n\t\trequestdata = request.data.get('nickname')\n\t\trequestjson = json.loads(requestdata)\n\n\t\tif requestjson['nickname'] and requestjson['sessionid']:\n\t\t\turlobj = Urldetails.objects.filter(\n\t\t\t\t\t\t\t\t\tQ(url_nickname__contains=requestjson['nickname']) &\n\t\t\t\t\t\t\t\t\tQ(sessionid=requestjson['sessionid'])\n\t\t\t\t\t\t\t\t)\t\t\t\t\t\t\n\t\t\tif urlobj:\n\t\t\t\turl_list = []\n\t\t\t\tfor urlfetchobj in urlobj:\n\t\t\t\t\tfetchdata =self.make_fetch_details(urlfetchobj)\n\t\t\t\t\tprint(urlfetchobj.url_nickname)\n\t\t\t\t\turl_list.append(fetchdata)\t\n\t\t\t\tresponse ={\"Status\":\"Success\",\"Message\":\"OK\",\"data\":url_list,\"code\":\"200\"}\n\t\t\telse:\n\t\t\t\tresponse = {\"Status\":\"Error\",\"Message\":\"Invalid Nickname\",\"code\":\"100\"}\t\t\n\t\telse:\n\t\t\tresponse = {\"Status\":\"Error\",\"Message\":\"Request Parameter is Missing\",\"code\":\"100\"}\n\t\treturn JsonResponse(response)\t\n\n\tdef make_fetch_details(self,urlobj):\n\t\turl_arr = {}\n\t\tif urlobj:\n\t\t\turl_arr['nickname'] = urlobj.url_nickname\n\t\t\turl_arr['tinyurl'] = 'http://127.0.0.1:8001/web/'+urlobj.url_id\n\t\t\turl_arr['urlid'] = urlobj.url_id\n\t\t\turl_arr['createddate'] = urlobj.createddate\n\t\treturn url_arr\t\t\t\t\t\t\t\n\t\t\n\t\t\n\t\t \t\t\t\n","sub_path":"api/views/Tinyurlview.py","file_name":"Tinyurlview.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"236228204","text":"###########################################################################\n# K64_accelorometer_capture.py\n# 1. Python code that receives and parses serial port string data\n# 2. Decodes x,y,z accelerometer data from Freescale K64_FRDM Kinetis Board\n# 3. Continously Plot and update last 12 readings into charts using mathplotlib\n# Accel device = FXO8700CQ\n# paired with Firmware (K64_AACCELOROMETER)\n# https://developer.mbed.org/compiler/#nav:/K64_ACCELOROMETER\n# created by Jess Valdez\n############################################################################\n\nimport serial\nimport time\nimport unicodedata\nimport string\nimport re\n#import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport time\n\n\nXstr = '0'\nXnum = 0\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(3,2,1)\nax2 = fig.add_subplot(3,2,2)\nax3 = fig.add_subplot(3,2,3)\nax4 = fig.add_subplot(3,2,4)\nax5 = fig.add_subplot(3,2,5)\nax6 = fig.add_subplot(3,2,6)\n\n\n\nYxar = [0,1,2,3,4,5,6,7,8,9,10,11]\nYyar = [0,0,0,0,0,0,0,0,0,0,0,0]\nYyar2 = [0,0,0,0,0,0,0,0,0,0,0,0]\nYyar3 = [0,0,0,0,0,0,0,0,0,0,0,0]\n\nYyar_mean = [0,0,0,0,0,0,0,0,0,0,0,0]\n\n#magnetomer\nmYyar = [0,0,0,0,0,0,0,0,0,0,0,0]\nmYyar2 = [0,0,0,0,0,0,0,0,0,0,0,0]\nmYyar3 = [0,0,0,0,0,0,0,0,0,0,0,0]\n\n\n########################\n#serial port business\n#########################\nser = serial.Serial(\n port='COM30',\\\n baudrate=9600,\\\n parity=serial.PARITY_NONE,\\\n stopbits=serial.STOPBITS_ONE,\\\n bytesize=serial.EIGHTBITS,\\\n timeout=0)\n\nprint(\"connected to: \" + ser.portstr)\n\n\ndef animate(i):\n line = ser.readlines()\n nline = str(line)\n \n N = nline.find('N:') \n Nstr = str (nline[(N+3):(N+5)])\n Nstr.strip()\n if len(Nstr) > 0:\n Nnum = int(Nstr)\n print (Nnum)\n \n #print (linenum)\n #############################\n #accellerometer X,Y,Z data\n ############################# \n X = nline.find('X:') \n Xstr = str (nline[(X+3):(X+8)])\n Xstr.strip()\n if len(Xstr) > 0:\n Xnum = int(Xstr)\n print (Xnum)\n Yyar[Nnum] = Xnum + sum(Yyar[0:11]) / 13.0\n for i in range(len(Yyar)):\n Yyar[i] = sum(Yyar[0:11]) / 12.0\n ax1.clear()\n ax1.plot(Yxar,Yyar, label = 'accel X', linewidth=3, color='r')\n ax1.set_title('accel X',fontsize=12, color='r')\n #ax1.axvspan(-500, 500, facecolor='g', alpha=0.5)\n ax1.set_ylim([-500,800])\n ax1.set_autoscaley_on(False)\n ax1.autoscale_view()\n\n Y = nline.find('Y:') \n Ystr = str (nline[(Y+3):(Y+8)])\n Ystr.strip()\n if len(Ystr) > 0:\n Ynum = int(Ystr)\n print (Ynum)\n Yyar2[Nnum] = Ynum + sum(Yyar2[0:11]) / 13.0\n for i in range(len(Yyar2)):\n Yyar2[i] = sum(Yyar2[0:11]) / 12.0\n ax2.clear()\n ax2.plot(Yxar,Yyar2, label = 'accel Y', linewidth=3, color='r')\n #legend = ax2.legend(loc='upper center', shadow=True)\n ax2.set_title('accel Y',fontsize=12,color='r')\n ax2.set_ylim([-100,1000])\n ax2.set_autoscaley_on(False)\n ax2.autoscale_view()\n \n Z = nline.find('Z:') \n Zstr = str (nline[(Z+3):(Z+8)])\n Zstr.strip()\n if len(Zstr) > 0:\n Znum = int(Zstr)\n print (Znum)\n Yyar3[Nnum] = Znum + sum(Yyar3[0:11]) / 13.0\n for i in range(len(Yyar3)):\n Yyar3[i] = sum(Yyar3[0:11]) / 12.0\n ax3.clear()\n ax3.plot(Yxar,Yyar3, label = 'accel Z', linewidth=3, color='r')\n #legend = ax3.legend(loc='upper center', shadow=True)\n ax3.set_title('accel Z',fontsize=12,color='r')\n ax3.set_ylim([-100,1300])\n ax3.set_autoscaley_on(False)\n ax3.autoscale_view()\n #############################\n #magnetometer X,Y,Z data\n #############################\n mX = nline.find('2X:') \n mXstr = str (nline[(mX+3):(mX+8)])\n mXstr.strip()\n if len(mXstr) > 0:\n mXnum = int(mXstr)\n print (mXnum)\n mYyar[Nnum] = mXnum + sum(mYyar[0:11]) / 13.0\n for i in range(len(mYyar)):\n mYyar[i] = sum(mYyar[0:11]) / 12.0\n ax4.clear()\n ax4.plot(Yxar,mYyar, label = 'mag X', linewidth=3, color='b')\n #legend = ax4.legend(loc='upper center', shadow=True)\n #ax4.setp(mYyar, linewidth=2, color='r')\n ax4.set_title('mag X',fontsize=12,color='b')\n ax4.set_ylim([0,500])\n ax4.set_autoscaley_on(False)\n ax4.autoscale_view()\n \n mY = nline.find('2Y:') \n mYstr = str (nline[(mY+3):(mY+8)])\n mYstr.strip()\n if len(mYstr) > 0:\n mYnum = int(mYstr)\n print (mYnum)\n mYyar2[Nnum] = mYnum + sum(mYyar2[0:11]) / 13.0\n for i in range(len(mYyar2)):\n mYyar2[i] = sum(mYyar2[0:11]) / 12.0\n ax5.clear()\n ax5.plot(Yxar,mYyar2, label = 'mag Y', linewidth=3, color='b')\n #legend = ax5.legend(loc='upper center', shadow=True)\n ax5.set_title('mag Y',fontsize=12,color='b')\n ax5.set_ylim([-120,100])\n ax5.set_autoscaley_on(False)\n ax5.autoscale_view()\n \n mZ = nline.find('2Z:') \n mZstr = str (nline[(mZ+3):(mZ+8)])\n mZstr.strip()\n if len(Zstr) > 0:\n mZnum = int(mZstr)\n print (mZnum)\n mYyar3[Nnum] = mZnum + sum(mYyar3[0:11]) / 13.0\n for i in range(len(mYyar3)):\n mYyar3[i] = sum(mYyar3[0:11]) / 12.0\n ax6.clear()\n ax6.plot(Yxar,mYyar3, label = 'mag Z', linewidth=3, color='b')\n #legend = ax6.legend(loc='upper center', shadow=True)\n ax6.set_title('mag Z',fontsize=12,color='b')\n ax6.set_ylim([0,500])\n ax6.set_autoscaley_on(False)\n ax6.autoscale_view()\n \nani = animation.FuncAnimation(fig, animate, interval=700)\n\nplt.show()\n\nser.close()\n\n\n\n","sub_path":"K64F_accelorometer_capture.py","file_name":"K64F_accelorometer_capture.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"36196799","text":"from django.conf.urls import url,include\nfrom . import views\n\nurlpatterns = [\n url(r'^$',views.contextfunc),\n url(r'^logout$',views.logout),\n url(r'^addappt$',views.addappointment),\n url(r'^edit/(?P\\d+)$',views.update),\n url(r'^update/(?P\\d+)$',views.editing),\n url(r'^delete/(?P\\d+)$',views.deleting),\n url(r'^dashboard$',views.dashboard),\n\n # url(r'^ideatyped$',views.addideatodb),\n # url(r'^logout$',views.logout),\n # url(r'^put_likes_indatabase$',views.likesindatabase),\n # url(r'^namelink/(?P\\d+)$',views.contextfunctwo),\n # url(r'^wholikes/(?P\\d+)$',views.wholikes),\n # url(r'^aliaslink/(?P\\d+)$',views.contextfunctwo),\n # url(r'^deleteidea_from_database/(?P\\d+)$',views.deletefromdatabase),\n\n]\n","sub_path":"apps/app2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454139763","text":"from utils import clear_message, caesar\nfrom constants import LOGO\n\nwhile True:\n\n print(LOGO)\n\n while True:\n action = input('Type \"encode\" to encrypt, type \"decode\" to decrypt:\\n').lower()\n if action not in {'encode', 'decode'}:\n print('Unnknown action. Please try again.\\n')\n continue\n break\n\n message = list((input('\\nType your message:\\n').lower()))\n message = clear_message(message)\n print(f'Everything but chars has been removed from message: {\"\".join(message)}\\n')\n\n while True:\n try:\n shift = int(input('\\nType the shift number:\\n'))\n except ValueError:\n print('It is not valid integer. Please try again.\\n')\n continue\n break\n\n if action == 'encode':\n print(f'\\nHere is the encoded result:\\n{caesar(action, message, shift)}')\n elif action == 'decode':\n print(f'\\nHere is the decoded result:\\n{caesar(action, message, shift)}')\n\n choice = input('Type \"yes\" if you want to go again. Otherwise type \"no\".\\n').lower()\n if choice == 'no':\n break\n","sub_path":"day_008/caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"380169570","text":"# 이항 계수란 ?\n# - 크기가 n인 유한 집합의 크기가 k인 부분집합의 수\n# - nCk = (n k)\n\n# 파스칼의 삼각형\n'''\nT = int(input())\n\nfor ts in range(1, T+1):\n N = int(input())\n for i in range(N):\n array = [[1] * i for i in range(1, N+1)]\n if i > 1:\n for i in range(N):\n for j in range(i-1, 0, -1):\n array[i][j] = array[i-1][j-1] + array[i-1][j]\n\n print('#%d' % ts)\n for i in array:\n print(*i)\n'''\n# https://rh-tn.tistory.com/32\n\nN, K = map(int, input().split())\nans = 1\nfor i in range(N, N-K, -1):\n ans *= i\nfor j in range(K, 0, -1):\n ans //= j\nprint(ans)\n\n# 구현 (재귀)\ndef binomial(n, r):\n if r == 0 or n == r:\n return 1\n return binomial(n-1, r-1) + binomial(n-1, r)","sub_path":"Python/BOJ/Level/14_NumberCombination/07_11050.py","file_name":"07_11050.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"116311335","text":"########## Diversity plots - Functions for making diversity plots using plotly ##########\n\n########## Niu Du dniu at jcvi.org 08/31/2018 ##########\n\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objs as go\nfrom scipy.interpolate import griddata\nfrom plotly import tools\nimport colorlover as cl\nfrom scipy.stats import f_oneway\nfrom statsmodels.stats.multitest import fdrcorrection\n\ndef Hawk_smash(List):\n '''Flattern lists within a list '''\n return [item for sublist in List for item in sublist]\n\ndef plot_contour(metadata,factor,resolution = 50,contour_method='linear'):\n resolution = str(resolution)+'j'\n x = metadata['x'].values\n y = metadata['y'].values\n z = metadata[factor].values\n X,Y = np.mgrid[min(x):max(x):complex(resolution), min(y):max(y):complex(resolution)]\n points = [[a,b] for a,b in zip(x,y)]\n Z = griddata(points, z, (X, Y), method=contour_method)\n return X,Y,Z\n\ndef group_data_ANOVA(Subset,plot_list):\n '''Group data by alphabetic names using oneway ANOVA'''\n ANOVA_m = np.zeros((len(Subset),len(Subset)))\n for u,U in enumerate(plot_list):\n for v,V in enumerate(plot_list):\n ANOVA_m[u,v] = f_oneway(Subset[U],Subset[V])[1]\n \n df_ANOVA_m = pd.DataFrame(ANOVA_m,index = plot_list,columns=plot_list).fillna(1)\n df_ANOVA_m = df_ANOVA_m[df_ANOVA_m<0.05]\n df_ANOVA_m = df_ANOVA_m.replace(to_replace=0,value=1)\n df_ANOVA_m = df_ANOVA_m.fillna(1)\n df_ANOVA_test = df_ANOVA_m.copy() ### Test here\n i = 0\n \n for col in plot_list:\n if len(df_ANOVA_m.loc[df_ANOVA_m[df_ANOVA_m[col]==1].index])>0:\n df_ANOVA_m.loc[df_ANOVA_m[df_ANOVA_m[col]==1].index] = chr(65+i)\n i+=1\n return dict(df_ANOVA_m[df_ANOVA_m.columns[0]])\n\ndef ANOVA_test(meta_data,df_otu_counts,Group_factor,Group_list,P_cut):\n '''Conducting one way ANOVA test to find significantly enriched OTUs in each group\n dict_enriched_OTU_group: enriched OTUs in each group\n df_ANOVA_results_group: F, p, and p_adj for each enriched OTU within each group\n '''\n \n meta_data_filter = meta_data.loc[df_otu_counts.columns]\n dict_group = dict(zip(Group_list,[list(meta_data_filter[meta_data_filter[Group_factor] == x].index) for x in Group_list]))\n\n ANOVA_results = {}\n for group in Group_list:\n Others = list(set(df_otu_counts.columns) - set(dict_group[group]))\n ANOVA_results[group] = [f_oneway(list(df_otu_counts[dict_group[group]].loc[x]), list(df_otu_counts[Others].loc[x]))[0:2] for x in df_otu_counts.index] \n \n df_ANOVA_results_group = {}\n for group in Group_list:\n df_ANOVA_results_group[group] = pd.DataFrame(ANOVA_results[group],index = df_otu_counts.index, columns=['F_ratio','p_value'])\n df_ANOVA_results_group[group]['P_adj'] = list(fdrcorrection(df_ANOVA_results_group[group]['p_value'])[1])\n df_ANOVA_results_group[group] = df_ANOVA_results_group[group].sort_values(by = 'F_ratio',ascending = False) \n \n\n # Target the enriched ones for each group of interest\n Enrich_ratio = pd.DataFrame(index = df_otu_counts.index)\n for group in Group_list:\n Other_group = list(set(df_otu_counts.columns) - set(dict_group[group]))\n Enrich_ratio[group] = df_otu_counts[dict_group[group]].mean(axis = 1) - df_otu_counts[Other_group].mean(axis = 1)\n \n dict_enriched_OTU_group = {}\n for group in Group_list:\n df_temp = df_ANOVA_results_group[group]\n pos_list = set(df_temp.index).intersection(Enrich_ratio[Enrich_ratio[group]>0][group].index)\n dict_enriched_OTU_group[group] = df_temp.loc[pos_list][df_temp.P_adj entering into %r' % tag)\n self._stack.append(tag)\n\n def pop_stack(self):\n return self._stack.pop() if self._stack else None\n\n def _leave(self, tag):\n logger.debug(' > trying to leave %r' % tag)\n\n top = self.pop_stack()\n\n if top is None:\n warnings.warn(\"Cannot leave requested tag, stack empty. Tag: %r\" % tag)\n return False\n\n if top == tag:\n return True\n\n warnings.warn(\"Close tag request does not match current opened tag. Current: %r, Requested: %r\" % (\n top,\n tag\n ))\n\n return False\n\n def append(self, result):\n \"\"\"\n Append given item to the processor result stream\n :param result: tuple(event_type, data, pos)\n :return:\n \"\"\"\n self._result.append(result)\n\n def extend(self, result):\n \"\"\"\n Extends processor result stream by given results\n :param result: list,tuple\n :return:\n \"\"\"\n self._result.extend(result)\n\n def flush(self):\n \"\"\"\n Finalize processing and returns processor result.\n Can be called only when processor is exhausted, which means iteration is over\n :return:\n \"\"\"\n assert self._exhausted, \"Processor is in mid of operation. Cannot flush, because result is not ready\"\n return self._result\n\n\nclass RootProcessor(BaseProcessor):\n \"\"\"\n Root processor takes fresh diff iterator and process all events.\n If diff operation is detected control is routed to appropriate special processor.\n \"\"\"\n\n def __init__(self, diff_iter):\n diff_iter = OneBackIterator(diff_iter)\n super(RootProcessor, self).__init__(diff_iter, None)\n\n def _stop(self, operation, event):\n # never stops, root processor run over all events\n return False\n\n def _subprocess_event(self, operation, event, parent=None):\n\n if operation == 'equal':\n processor_cls = EqualProcessor\n elif operation == 'insert':\n processor_cls = InsertProcessor\n elif operation == 'delete':\n processor_cls = DeleteProcessor\n else:\n raise AssertionError(\"Unsupported operation type %r\" % operation)\n\n # rewind iterator one step back, so processor will the this event as a first one\n self._iter.go_back()\n\n logger.debug(\"-> Entering into processor %r\" % processor_cls)\n\n processor = processor_cls(self._iter, parent=(parent or self.get_current_element()))\n for op, evt, parent in processor:\n processor.extend(self._subprocess_event(op, evt, parent))\n\n res = processor.flush()\n\n logger.debug('<- Processor %r left' % processor_cls)\n return res\n\n def _process_event(self, operation, event):\n res = self._subprocess_event(operation, event)\n self.extend(res)\n\n def execute(self):\n for _ in self:\n # root processor should never yield\n raise AssertionError\n\n return self.flush()\n\n def _process_block(self, start_event):\n raise AssertionError\n\n\nclass EqualProcessor(BaseProcessor):\n\n def _stop(self, operation, event):\n return operation != 'equal' and not self._stack\n\n def _process_event(self, operation, event):\n if operation != 'equal':\n return False\n\n return super(EqualProcessor, self)._process_event(operation, event)\n\n def _process_block(self, start_event):\n event_type, data, pos = start_event\n old_events = []\n new_events = []\n op_codes = set()\n\n # track how many times the same tag is open inside pass-thorough block\n counter = 1\n\n for operation, event in self._iter:\n\n op_codes.add(operation)\n\n if operation in {'equal', 'delete'}:\n old_events.append(event)\n\n if operation in {'equal', 'insert'}:\n new_events.append(event)\n\n et, dt, p = event\n if et == START and dt[0] == data[0]:\n # again, inside skipped block the same tag is open, increase tag opening counter\n counter += 1\n elif et == END and dt == data[0]:\n counter -= 1\n if counter == 0:\n break\n\n if len(op_codes) > 1 or 'equal' not in op_codes:\n # if there was more than one diff operation in block contents or\n # operation is different than equal, then it means that block contents changed,\n # render old as removed, render new as inserted\n processor = DeleteProcessor(parent=self.get_current_element())\n processor.extend(old_events)\n self.extend(processor.flush())\n processor = InsertProcessor(parent=self.get_current_element())\n processor.extend(new_events)\n self.extend(processor.flush())\n else:\n self.extend(new_events)\n\n\nclass SingleOperationProcessor(BaseProcessor):\n\n class MARKER(object):\n pass\n\n operation = None\n\n def __init__(self, *args, **kwargs):\n super(SingleOperationProcessor, self).__init__(*args, **kwargs)\n self._stack = []\n self._buffer = []\n\n self._rendered = False\n self._all_same = None\n\n if self.can_contain_diff():\n self.open_diff()\n\n def _stop(self, operation, event):\n if operation != self.operation and (not self._stack or self._stack[0] is self.MARKER):\n return True\n\n return False\n\n def _process_event(self, operation, event):\n\n if operation == 'equal':\n self._all_same = False\n self.append(event)\n\n if operation != self.operation:\n self._all_same = False\n return False\n\n return super(SingleOperationProcessor, self)._process_event(operation, event)\n\n def _enter(self, tag):\n super(SingleOperationProcessor, self)._enter(tag)\n\n if self._rendered is True and self.get_current_element() is self.MARKER:\n # if diff tag is open and we are about to enter into new tag\n # and we are in diff tag, close current diff, so\n # Foo bar \n # I I E I\n # will be rendered as:\n # FooE\n #\n self.close_diff()\n\n if self._rendered is False and self.can_contain_diff():\n self.open_diff()\n\n def _leave(self, tag):\n\n if super(SingleOperationProcessor, self)._leave(tag):\n\n top = self.get_current_element()\n if top is self.MARKER:\n self.pop_stack()\n self.close_diff()\n\n return True\n\n return False\n\n def append(self, result):\n if self._rendered:\n self._buffer.append(result)\n else:\n evt_type, data, pos = result\n if evt_type == TEXT:\n # Diff processor tag is not rendered yet, but text node insertion is requested\n # this means that content visible to user won't be marked properly\n # This may happen only if current node not allow to place diff inside but allow\n # to has content nodes. Probably there is only one such tag in HTML -
\n                warnings.warn(\"Diff tag not rendered and text node is about to be appended to the result.\"\n                              \"Text '%r' won't be marked properly\" % data)\n\n                if not self._append_hazardous_result():\n                    return\n\n            self._result.append(result)\n\n    def extend(self, result):\n        if self._rendered:\n            self._buffer.extend(result)\n        else:\n            # Diff processor tag is not rendered yet, but set of results was requested to be inserted\n            # at bulk. These results won't be marked properly.\n            warnings.warn(\"Diff tag not rendered and bulk of events is about to be inserted in the result\")\n            if not self._append_hazardous_result():\n                return\n\n            self._result.extend(result)\n\n    def _append_hazardous_result(self):\n        \"\"\"\n        Items which are about to be appended will not be properly marked according to the processor,\n        because diff tag was not placed yet. By default, such content appending is not allowed.\n        :return:\n        \"\"\"\n        return False\n\n    def close_diff(self):\n        \"\"\"\n        Close diff marked piece of HTML. Detect if all operation events was the same, if so it means that only\n        one kind of operation happen, so regular diff tag should be rendered. If operations are not the same\n        this means that inside diffed element there is also original content, so in fact this was a change in\n        formatting.\n\n        Diffed sections are rendered lazily. Here, opening diff tag is put into result stream, then\n        collected buffer events and closing tag.\n        \"\"\"\n        # child is an element which should be wrapped by diff, this could be a text node\n        # or tag\n        child = self._buffer[0]\n        if self._all_same:\n            # all events was the same operation\n            node = getattr(DefaultDiffProducer, 'render_%s' % self.operation)(self.get_current_element())\n        else:\n            # here text node is not valid, diff in text is by word, its not possible that\n            # text nodes are inserted and not all in this context was inserted (if so, diff iterator\n            # should return equal action and break insert processor)\n            assert child[0] == START\n            formatting_node = DOMNode(\n                name=child[1][0],\n                attrs=child[1][1]\n            )\n            node = getattr(DefaultDiffProducer, 'render_formatting_%s' % self.operation)(self.get_current_element(),\n                                                                                         formatting_node)\n\n        logger.debug(\"Lazy diff '%s' of %d nodes marked using %r\" % (self.operation, len(self._buffer), node))\n\n        self._result.append((START, (QName(node.name), Attrs(node.attrs)), None))\n        self._result.extend(self._buffer)\n        self._result.append((END, QName(node.name), None))\n\n        self._rendered = False\n        self._buffer = []\n\n    def open_diff(self):\n        \"\"\"\n        Lazily open diff marked piece of HTML. As now, all results will be stored in temporary buffer until\n        opened tags stack return back to diff mark and close_diff() will be called.\n        \"\"\"\n        logger.debug(\"Diff tag allowed in %r. Opening node.\" % self.get_current_element())\n        self._buffer = []\n        self._rendered = True\n        self._all_same = True\n        self._stack.append(self.MARKER)\n\n\n    def flush(self):\n        if self._stack and self._stack[0] is self.MARKER:\n            self.close_diff()\n\n        return super(SingleOperationProcessor, self).flush()\n\n    def _process_block(self, start_event):\n        raise NotImplementedError()\n\n\nclass InsertProcessor(SingleOperationProcessor):\n\n    operation = 'insert'\n\n    def _append_hazardous_result(self):\n        # for insertion processor allow to put hazardous result into output,\n        # this is not OK, but only side effect is that content which was inserted in new document version\n        # won't be marked as inserted. This case is better that dropping this content completely out.\n        return True\n\n    def _process_block(self, start_event):\n        # whole block should be marked as inserted, collect sub-events which type is equal or inserted,\n        # so block will be rendered in the same way as is in new file version\n        self.extend(self._collect_block(start_event, {'equal', 'insert'}))\n\n\nclass DeleteProcessor(SingleOperationProcessor):\n    operation = 'delete'\n\n    def _passthrough(self, start_event):\n        # node is marked as removed and all its contents should be skipped\n        # in deletion it means that should not be passed to the output\n        # just consume block\n        self._collect_block(start_event, set())\n\n    def _process_block(self, start_event):\n        # whole block should be marked as removed, collect contents events which type is equal or deleted,\n        # so removed block will be rendered in the same way as exist in old file version\n        # skip any insertion inside (if exist) because cannot be rendered (diff is not allowed inside block)\n        self.extend(self._collect_block(start_event, {'equal', 'delete'}))\n","sub_path":"pyhtmldiff/differ/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":18268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"362059774","text":"import warnings\n\nimport torch\nimport torchmetrics.image as t_metrics\n\nfrom .. import config as cfg\nfrom ..loss.feature_loss import FeatureLoss\nfrom ..loss.hole_loss import HoleLoss\nfrom ..loss.total_variation_loss import TotalVariationLoss\nfrom ..loss.valid_loss import ValidLoss\nfrom ..utils.featurizer import VGG16FeatureExtractor\n\n\n@torch.no_grad()\ndef get_metrics(img_mask, loss_mask, output, gt, setname):\n    metric_settings = {\n        'valid': {},\n        'hole': {},\n        'tv': {},\n        'var': {},\n        'feature': {\n            'outputs': ['style', 'prc']\n        },\n        'StructuralSimilarityIndexMeasure': {\n            'torchmetric_settings': {}\n        },\n        'UniversalImageQualityIndex': {\n            'torchmetric_settings': {'reset_real_features': True}\n        },\n        'FrechetInceptionDistance': {\n            'torchmetric_settings': {'requires_image': True, 'feature': 64, 'reset_real_features': True,\n                                     'subset_size': cfg.batch_size}\n        },\n        'KernelInceptionDistance': {\n            'torchmetric_settings': {'requires_image': True, 'feature': 64, 'reset_real_features': True,\n                                     'subset_size': cfg.batch_size},\n            'outputs': ['mu', 'std']\n        }\n    }\n\n    mask = img_mask[:, cfg.recurrent_steps, cfg.gt_channels, :, :]\n\n    if loss_mask is not None:\n        mask += loss_mask\n        mask[mask < 0] = 0\n        mask[mask > 1] = 1\n        assert ((mask == 0) | (mask == 1)).all(), \"Not all values in mask are zeros or ones!\"\n\n    metric_dict = {}\n    if setname == 'train':\n        metrics = cfg.train_metrics\n    elif setname == 'val':\n        metrics = cfg.val_metrics\n    elif setname == 'test':\n        metrics = cfg.test_metrics\n\n    for metric in metrics:\n        settings = metric_settings[metric]\n\n        if 'valid' in metric:\n            val_loss = ValidLoss().to(cfg.device)\n            metric_output = val_loss(mask, output[:, cfg.recurrent_steps, :, :, :],\n                                     gt[:, cfg.recurrent_steps, cfg.gt_channels, :, :])\n            metric_dict[f'metric/{setname}/valid'] = metric_output['valid']\n\n        elif 'hole' in metric:\n            val_loss = HoleLoss().to(cfg.device)\n            metric_output = val_loss(mask, output[:, cfg.recurrent_steps, :, :, :],\n                                     gt[:, cfg.recurrent_steps, cfg.gt_channels, :, :])\n            metric_dict[f'metric/{setname}/hole'] = metric_output['hole']\n\n        elif 'tv' in metric:\n            val_loss = TotalVariationLoss().to(cfg.device)\n            metric_output = val_loss(mask, output[:, cfg.recurrent_steps, :, :, :],\n                                     gt[:, cfg.recurrent_steps, cfg.gt_channels, :, :])\n            metric_dict[f'metric/{setname}/tv'] = metric_output['tv']\n\n        elif 'feature' in metric:\n            feat_loss = FeatureLoss(VGG16FeatureExtractor()).to(cfg.device)\n            metric_output = feat_loss(mask, output[:, cfg.recurrent_steps, :, :, :],\n                                      gt[:, cfg.recurrent_steps, cfg.gt_channels, :, :])\n            metric_dict[f'metric/{setname}/style'] = metric_output['style']\n            metric_dict[f'metric/{setname}/prc'] = metric_output['prc']\n\n        else:\n            metric_outputs = calculate_metric(metric, mask, output[:, cfg.recurrent_steps, :, :, :],\n                                              gt[:, cfg.recurrent_steps, cfg.gt_channels, :, :],\n                                              torchmetrics_settings=settings['torchmetric_settings'])\n\n            if len(metric_outputs) > 1:\n                for k, metric_name in enumerate(settings['outputs']):\n                    metric_dict[f'metric/{setname}/{metric}_{metric_name}'] = metric_outputs[k]\n            else:\n                metric_dict[f'metric/{setname}/{metric}'] = metric_outputs[0]\n\n    return metric_dict\n\n\ndef calculate_metric(name_expr, mask, output, gt, domain='valid', torchmetrics_settings={}, outputs=[]):\n    metric_str = [m for m in t_metrics.__dict__.keys() if (name_expr == m)]\n\n    if len(metric_str) == 0:\n        metric_str = [m for m in t_metrics.__dict__.keys() if (name_expr in m)]\n        if len(metric_str) > 1:\n            warnings.warn('found multiple hits for metric name {}. Will use {}'.format(name_expr, metric_str[0]))\n\n    assert len(metric_str) > 0, 'metric {} not found in torchmetrics.image. Maybe torch-fidelity is missing.'.format(\n        name_expr)\n\n    metric = t_metrics.__dict__[metric_str[0]](**torchmetrics_settings).to(cfg.device)\n\n    if domain == 'valid':\n        pred = mask * output\n        target = mask * gt\n    elif domain == 'hole':\n        pred = (1 - mask) * output\n        target = (1 - mask) * gt\n    elif domain == 'comp_infill':\n        pred = mask * gt + (1 - mask) * output\n        target = gt\n\n    results_ch = []\n\n    for channel in range(output.shape[1]):\n        pred_ch = torch.unsqueeze(pred[:, channel, :, :], dim=1)\n        target_ch = torch.unsqueeze(target[:, channel, :, :], dim=1)\n\n        if 'requires_image' in torchmetrics_settings and torchmetrics_settings['requires_image']:\n\n            pred_ch = torch.cat([pred_ch] * 3, 1)\n            target_ch = torch.cat([target_ch] * 3, 1)\n\n            setattr(metric, 'normalize', True)\n            metric.update((pred_ch), real=True)\n            metric.update((target_ch), real=False)\n            result_ch = metric.compute()\n        else:\n            result_ch = metric(pred_ch, target_ch)\n\n        results_ch.append(result_ch)\n\n    if isinstance(results_ch[0], tuple):\n        result_out = [0] * len(results_ch[0])\n        for result_ch in results_ch:\n            for k, result_ch_arg in enumerate(result_ch):\n                result_out[k] += result_ch_arg\n    else:\n        result_out = 0\n        for result_ch in results_ch:\n            result_out += result_ch\n        result_out = [result_out]\n\n    return result_out\n","sub_path":"climatereconstructionai/metrics/get_metrics.py","file_name":"get_metrics.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"566803426","text":"# Problem: You need to fid the eigenvalues and eigenvectors of a square matrix\n## \n## What are these?\n##\n## Eigenvectors are widely used in machine learning libraries. \n## Intuitively, given a linear transformation represented by a matrix, A, eigenvectors are vectors that, when the transformation is applied,\n## change only in scale (not direction).\n##\n\n\nimport numpy as np\n\n# create the matrix\nmatrix = np.array([\n    [1, -1, 3],\n    [1, 1, 6],\n    [3, 8, 9]\n])\n\n# calculate eigenvalues and eigenvectors\neigenvalues, eigenvectors = np.linalg.eig(matrix)\n\n# log them out\nprint(\"Eigenvalues: \" + str(eigenvalues))\nprint(\"Eigenvectors: \" + str(eigenvectors))","sub_path":"data_objects/matrix_eigen.py","file_name":"matrix_eigen.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"437712178","text":"import matplotlib\nimport matplotlib.pylab as pl\nimport matplotlib.pyplot as plt\nimport Models as Mod\nimport Learner\nimport gspread\nimport os\nimport numpy as np\nimport pandas as pd\nfrom datetime import timedelta, datetime\nfrom scipy.stats.mstats import hmean, gmean\n\ndef run_region(region, sl1, sl, dt, pop, step=14, mAvg=False,\n                is_SIR=False, model=Mod.SIRD, is_semanal=False):\n    \"\"\"\n        region: str\n            Name of the region/counry/city\n        sl1: int\n            position in the dataframe of the first training data\n        sl: int\n            position in the dataframe of the last training data (sl > sl1)\n        dc: pandas.DataFrame\n            metadata of region (to gat population)\n        dt: pandas.Dataframe\n            source of traing data. the len is assumed to be >= sl. The required columns are:\n                - 'At': active cases\n                - 'Rt': cummulative cases\n                - 'Óbitos': cummulative deceases\n                - 'Confirmados': cummulative confirmed cases\n                - 'Data': Date of the data\n            the rows should be sorted by date (column 'Data')\n        step: int\n            prediction range (in days) after the last training day data.\n            Default: 14\n        mAvg: boolean\n            Use the moving average to smooth the data\n            default: False\n        min_casos: int\n            discards data od dt where the cummulative confirmed is less then min_casos.\n            default: 0\n        is_SIR: boolean\n            Use SIR model instead of SIRD model\n            default: False\n    \"\"\"\n    pop = pop\n\n    ini = dt\n    ini = ini[[\"Data\", \"At\", \"Rt\", \"Óbitos\", 'Confirmados']]\n    ini = sort_data(ini)\n    ini = ini.set_index(ini['Data'])\n\n    if is_semanal:\n        ini.index = pd.to_datetime(ini.index)\n        ini = ini.resample('W').mean()\n        ini.index = ini.index.strftime('%m/%d/%Y')\n        ini['Data'] = ini.index\n\n    countday = np.arange(0, len(ini))\n    ini['Count Day'] = countday\n\n    if mAvg:\n        ini = movingAvg(ini, 7, [\"Data\", \"At\", \"Rt\", \"Óbitos\", 'Confirmados'])\n\n    recovered_pp = ini[\"Rt\"]\n    death_pp = ini[\"Óbitos\"]\n    data_pp = ini[\"At\"]\n    conf = ini['Confirmados'].iloc[sl1]\n    d = sl - sl1 + step\n    i_0 = data_pp.iloc[sl1]\n    rC_0 = recovered_pp.iloc[sl1]\n    rD_0 = death_pp.iloc[sl1]\n    s_0 = pop - conf\n    if is_SIR:\n        val_0 = [s_0, i_0, rC_0 + rD_0]\n    else:\n        val_0 = [s_0, i_0, rC_0, rD_0]\n    learner = Learner.Learner_Geral(region, model, d,is_semanal, *val_0, params=[], is_SIR =is_SIR)\n    # recovered, death, inf,vac1, vac2,\n    if is_SIR:\n        # rec_data = \n        learner.train(recovered_pp[sl1:sl]+death_pp[sl1:sl],0,\n                  data_pp[sl1:sl], 0, 0)\n    else:\n        learner.train(recovered_pp[sl1:sl], death_pp[sl1:sl],\n                  data_pp[sl1:sl], 0, 0)\n    df_save = learner.save_results(data_pp[sl1:sl])\n    return learner, df_save\n\n\ndef get_val0_vac(region, dt, dc, sl1, sl, mAvg=False, dose=False, intermed=False, rec=False):\n        pop = get_pop(region, dc)\n\n        ini = get_data(dt, region, is_pred=False)\n        ini = ini[[\"Data\", \"At\", \"Rt\", \"Óbitos\", 'Confirmados', 'Vac 1', 'Vac 2']]\n        ini = sort_data(ini)\n        ini = ini.set_index(ini['Data'])\n        countday = np.arange(0, len(ini))\n        ini['Count Day'] = countday\n\n        if mAvg:\n            ini = movingAvg(ini, 7, [\"Data\", \"At\", \"Rt\",\n                            \"Óbitos\", 'Confirmados', 'Vac 1', 'Vac 2'])\n\n        recovered_pp = ini[\"Rt\"]\n        death_pp = ini[\"Óbitos\"]\n        data_pp = ini[\"At\"]\n        vac1 = ini['Vac 1']\n        vac2 = ini['Vac 2']\n        conf = ini['Confirmados'].iloc[sl1]\n        \n        i_0 = data_pp.iloc[sl1]\n        rC_0 = recovered_pp.iloc[sl1]\n        rD_0 = death_pp.iloc[sl1]\n\n        s_0 = pop - conf - vac1[sl1]\n        v2_0 = vac2.iloc[sl1]\n        if dose:\n            if intermed:\n                vi2 = np.maximum(0,vac2.iloc[sl1] - vac2.iloc[sl1-20])\n                v1_only = vac1 - vac2\n                vi1 = np.maximum(0,v1_only.iloc[sl1] - v1_only.iloc[sl1-20])\n                v2 = v2_0 - vi2\n                v1 = v1_only.iloc[sl1] - vi1\n                if rec:\n                    _,_, theta1,theta2 = kargs['params']\n                    v1,im1 = [v1*(1-theta1), v1*theta1]\n                    v2,im2 = [v2*(1-theta2), v2*theta2]\n                    val_0 = [s_0,vi1,v1,vi2,v2,i_0,0,0,rC_0,0,0,rD_0,im1,im2]\n                else:\n                    val_0 = [s_0,vi1,v1,vi2,v2,i_0,0,0,rC_0,0,0,rD_0]\n            else:\n                v1_0 = vac1.iloc[sl1] - v2_0\n                if rec:\n                    _,_, theta1,theta2 = kargs['params']\n                    v1_0,im1 = [v1_0*(1-theta1), v1_0*theta1]\n                    v2_0,im2 = [v2_0*(1-theta2), v2_0*theta2]\n                    val_0 = [s_0,v1_0,v2_0,i_0,0,0,rC_0,0,0,rD_0,im1,im2]\n                else:\n                    val_0 = [s_0,v1_0,v2_0,i_0,0,0,rC_0,0,0,rD_0]\n        else:\n            v1_0 = vac1.iloc[sl1]\n            v1_only = vac1\n            if intermed:\n                vi1 = vac1.iloc[sl1-20]\n                v1 = v1_0 - vi1\n                if rec:\n                    _,theta1,_ = kargs['params']\n                    v1,im1 = [v1*(1-theta1), v1*theta1]\n                    val_0 = [s_0,vi1,v1,i_0,0,rC_0,0,rD_0,im1]\n                else:\n                    val_0 = [s_0,vi1,v1,i_0,0,rC_0,0,rD_0]\n            else:\n                if rec:\n                    _,theta1 = kargs['params']\n                    v1_0,im1 = [v1_0*(1-theta1), v1_0*theta1]\n                    val_0 = [s_0,v1_0,i_0,0,rC_0,0,rD_0,im1]\n                else:\n                    val_0 = [s_0,v1_0,i_0,0,rC_0,0,rD_0]\n        return val_0, recovered_pp, death_pp, data_pp, vac2, v1_only\n\ndef run_vac(region, sl1, sl, dc, dt, step=14, mAvg=False,\n                min_casos=0, model=Mod.SVIRD_1D, dose=False, intermed=False, rec=False, **kargs):\n    \"\"\"\n        region: str\n            Name of the region/counry/city\n        sl1: int\n            position in the dataframe of the first training data\n        sl: int\n            position in the dataframe of the last training data (sl > sl1)\n        dc: pandas.DataFrame\n            metadata of region (to gat population)\n        dt: pandas.Dataframe\n            source of traing data. the len is assumed to be >= sl. The required columns are:\n                - 'At': active cases\n                - 'Rt': cummulative cases\n                - 'Óbitos': cummulative deceases\n                - 'Confirmados': cummulative confirmed cases\n                - 'Data': Date of the data\n            the rows should be sorted by date (column 'Data')\n        step: int\n            prediction range (in days) after the last training day data.\n            Default: 14\n        mAvg: boolean\n            Use the moving average to smooth the data\n            default: False\n        min_casos: int\n            discards data od dt where the cummulative confirmed is less then min_casos.\n            default: 0\n\n    \"\"\"\n\n    val_0, recovered_pp, death_pp, data_pp, vac2, v1_only = get_val0_vac(region, dt, dc, sl1, sl, mAvg, dose, intermed, rec)\n    d = sl - sl1 + step\n    learner = SIRD_NN.Learner_Geral(region, model, d,False, *val_0, **kargs)\n    learner.train(recovered_pp[sl1:sl], death_pp[sl1:sl], data_pp[sl1:sl], v1_only[sl1:sl], vac2[sl1:sl])\n    \n    df_save = learner.save_results(data_pp[sl1:sl])\n    \n    return learner, df_save, data_pp[sl1:sl]\n\ndef get_pop(region, dc):\n    if region == 'São Paulo (Estado)' or region == 'Brasil':\n        pop = dc['Habitantes (2019)'].sum()\n    elif 'Cidades' in dc.columns and region in dc['Cidades'].unique():\n        c = dc[dc[\"Cidades\"] == region]\n        pop = c[\"Habitantes (2019)\"].values[0]\n    elif region in dc['Região'].unique():\n        pop = dc[dc['Região'] == region]['Habitantes (2019)'].sum()\n    else:\n        c = dc[dc[\"SP-Subregião\"] == region]\n        pop = c[\"Habitantes (2019)\"].values[0]\n    return pop\n\ndef nearest(items, pivot):\n    return min([i for i in items], key=lambda x: abs(datetime.strptime(x, '%m/%d/%Y') - datetime.strptime(pivot, '%m/%d/%Y')))\n\ndef calc_vacRate(vac):\n    vac = vac[1:].values - vac[:-1].values\n    vac = vac[vac>0]\n    vacR = np.mean(vac)\n    return vacR\n\ndef translate(r):\n        translator = {\n        'Grande SP Leste': 'Greater SP East',\n        'Grande SP Norte': 'Greater SP North',\n        'Grande SP Oeste': 'Greater SP West',\n        'Grande SP Sudeste': 'Greater SP Southeast',\n        'Grande SP Sudoeste': 'Greater SP Southwest',\n        'Sul':'South',\n        'Norte':'North',\n        'Nordeste':'Northeast',\n        'Sudeste': 'Southeast',\n        'Centro-Oeste': 'Midwest',\n        'Brasil':'Brazil',\n        'Metropolitana': 'Greater São Paulo',\n        'Litorânea':'Coastal',\n        'Interior Leste': 'Interior (East)',\n        'Interior Oeste': 'Interior (West)',\n        'Estado de SP': 'State of São Paulo',\n        'São Paulo (Estado)': \"São Paulo (State)\"\n        }\n        if r in translator.keys():\n            return translator[r]\n        return r\n\ndef read_global(region):\n    # read files\n    df_c = pd.read_csv('JHU/time_series_covid19_confirmed_global.csv')\n    df_d = pd.read_csv('JHU/time_series_covid19_deaths_global.csv')\n    df_r = pd.read_csv('JHU/time_series_covid19_recovered_global.csv')\n    \n    # get data from country\n    df_c = df_c[df_c['Country/Region'] == region ]\n    df_d = df_d[df_d['Country/Region'] == region ]\n    df_r = df_r[df_r['Country/Region'] == region ]\n    \n#     #unify regions and transpose dataframe\n    df_c = df_c.groupby('Country/Region', as_index = False).sum().transpose()\n    df_d = df_d.groupby('Country/Region', as_index = False).sum().transpose()\n    df_r = df_r.groupby('Country/Region', as_index = False).sum().transpose()\n    \n    # format date and set as index\n    data_c = pd.to_datetime(df_c.index[4:])\n    data_d = pd.to_datetime(df_d.index[4:])\n    data_r = pd.to_datetime(df_r.index[4:])\n    \n    idx_c = df_c.index.values\n    idx_d = df_d.index.values\n    idx_r = df_r.index.values\n\n\n    idx_c[4:] = data_c.strftime('%m/%d/%Y')\n    idx_d[4:] = data_d.strftime('%m/%d/%Y')\n    idx_r[4:] = data_r.strftime('%m/%d/%Y')\n    \n    df_c = df_c.set_index(idx_c)\n    df_d = df_d.set_index(idx_d)\n    df_r = df_r.set_index(idx_r)\n    \n    # get array of dates\n    data = data_r.strftime('%m/%d/%Y')\n    \n    # get datas\n    conf =  df_c.iloc[4:].values.flatten()\n    death =  df_d.iloc[4:].values.flatten()\n    dailyDeath = np.zeros((len(death),))\n    dailyDeath[1:] = death[1:] - death[:-1]\n    rec =  df_r.iloc[4:].values.flatten()\n    infec = conf - death - rec\n\n    # create dataframe\n    df = pd.DataFrame(data = {\n        'SP-Subregião':region,\n        'Data':data,\n        'Rt':rec,\n        'Óbitos':death,\n        'Confirmados' :conf,\n        'At' : infec\n    },index = data)\n    return df\n\ndef get_data(df_data,r, is_pred = True):\n    cols = ['beta(t)','gamma_Rec','gamma_Death','Lethality','Rt']\n    # dc = pd.read_csv(f'data/dados - Agrupamento.csv')\n    if r in df_data[\"SP-Subregião\"].unique():\n        df_d = df_data[df_data[\"SP-Subregião\"] == r]\n    elif ('Cidades' in df_data.columns) and (r in df_data[\"Cidades\"].unique()):\n        df_d = df_data[df_data[\"Cidades\"] == r]\n    # elif r in dc['Região'].unique():\n    #     est = dc[dc['Região'] == r]['SP-Subregião'].unique()\n    #     df_d = pd.DataFrame()\n    #     for e in est:\n    #         df_d =df_d.append(df_data[df_data['SP-Subregião'] == e])\n    #     df_d = df_d.groupby(['Data'], as_index=False).sum()\n    #     df_d['SP-Subregião'] = r\n    #     if is_pred:\n    #         l = len(est)\n    #         for c in cols:\n    #             if c in df_d.columns:\n    #                 df_d.loc[:,c] = df_d[c]/l\n    else:\n        return pd.DataFrame(columns = df_data.columns)\n    if 'Used in Train' in df_data.columns:\n        df_d =  df_d.astype({'Used in Train': 'bool'})\n    return df_d\n\ndef sort_data(df, col = 'Data'):\n    if df.index.name == col:\n        df.index.name = f'{col}_index'\n    df.loc[:,col] = pd.to_datetime(df[col])\n    df.sort_values(by=col, inplace=True, ascending=True)\n    df.loc[:,col] = df[col].dt.strftime('%m/%d/%Y')\n    return df\n\n\ndef non_num_mean(df):\n        return np.mean(df) if isinstance(df[0], (int, float)) else df[0]\n\ndef diff_mean(df_vals):\n    df_mean = df_vals.mean(1)\n    idx = df_mean.index\n    er_total = np.zeros((len(idx),))\n    for pos, i in enumerate(idx):\n        p = df_mean.loc[i]\n        q = df_vals.loc[i]\n        er = 0\n        n = (~np.isnan(q)).sum()\n        if n > 1:\n            for v in q:\n                if np.isnan(v):\n                    continue\n                er += (p - v) ** 2\n            er = er / (n - 1)\n        er_total[pos] = (er)\n    return np.linalg.norm(er_total)\n\ndef movingAvg(df, n, cols = None):\n    if cols is None:\n        return df.rolling(window=n).mean()\n    else:\n        df.iloc[:, 1:] = df[cols].rolling(window=n).mean().copy()\n        return df\n\ndef run_mape(df_d, df_p):\n    return df_d.sub(df_p, axis  = 0).div(df_d, axis = 0).abs().mean() * 100\n\ndef run_rmse(df_d, df_p):\n    return np.sqrt(np.mean(np.square(df_d.sub(df_p, axis  = 0))))\n\ndef run_nrmse(df_d, df_p,**kwarg):\n    rmse = run_rmse(df_d, df_p)\n    med = np.mean(df_d)\n    return rmse/med\n\ndef run_mae(df_d, df_p):\n    return df_d.sub(df_p, axis  = 0).abs().mean()\n\n\ndef ERROR_DF(df_data, df_p, r, cols_p = ['Infected', 'Recovered', 'Death'],\n             cols_d=[\"At\", 'Rt', \"Óbitos\"],prev = True, error = run_mape): \n    dc = pd.read_csv(f'data/dados - Agrupamento.csv')\n    df_d = get_data(df_data,r,is_pred = False)\n\n    df_p = get_data(df_p,r)\n    if prev:\n        df_p = df_p[~df_p['Used in Train']]\n    else:\n        df_p = df_p[df_p['Used in Train']]\n        \n    \n    \n    idx = df_p['Data']\n    df_p = df_p.set_index(df_p['Data'])\n    df_d = df_d.set_index(df_d['Data'])\n    idx_d = df_d.index\n    idx_d = idx_d.intersection(idx)\n    df_d = df_d.loc[idx_d]\n    df_p = df_p.loc[idx_d]\n    \n    \n    \n    df_d['Total'] = df_d[cols_d].sum(axis=1)\n    \n    df_p['Total'] = df_p[cols_p].sum(axis=1)\n    l = len(df_d)\n    df_d = df_d.set_index(np.arange(l))\n    df_p = df_p.set_index(np.arange(l))\n    result = list()\n    for i,c in enumerate(cols_p):\n        result.append(error(df_d[cols_d[i]], df_p[c]))\n    result.append( error(df_d['Total'], df_p['Total']))\n    return result\n\ndef MAPE(arq_data, arq_prev, file = 'MAPE.csv', total = False, save = False, MM = False, is_weekly=False):\n    df_data = arq_data\n    df_prev = arq_prev\n    df_save = pd.DataFrame(columns = [\"SP-Subregião\", 'MAPE Infectados', 'MAPE  Recuperados'])\n    for r in df_prev[\"SP-Subregião\"].unique():\n        df_p = df_prev[df_prev[\"SP-Subregião\"] == r]\n        df_d = get_data(df_data,r,is_pred = False)\n        if total:\n            df_p = df_p[df_p['Used in Train']]\n        else:\n            df_p = df_p[~df_p['Used in Train']]\n        if MM:\n#             df_d = movingAvg(df_d, 7, [\"Data\", \"At\", \"Rt\", \"Óbitos\", 'Confirmados'])\n            pass\n        if is_weekly:\n            df_d = turn_weekly(df_d)\n\n        df_d[\"Data\"] = pd.to_datetime(df_d[\"Data\"])\n        df_d[\"Data\"] = df_d[\"Data\"].dt.strftime(\"%m/%d/%Y\")\n\n        df_d = df_d[df_d[\"Data\"].isin(df_p[\"Data\"])][[\"At\", 'Rt','Data']]\n\n        print(df_d)\n\n        df_p = df_p[df_p[\"Data\"].isin(df_d[\"Data\"])]\n        l = len(df_d)\n        df_d = df_d.set_index(np.arange(l))\n        df_p = df_p[['Infected', 'Recovered']].set_index(np.arange(l))\n        er_I = run_mape(df_d['At'], df_p['Infected'])\n        er_R = run_mape(df_d['Rt'], df_p['Recovered'])\n        # er_D = run_mape(df_d['Óbitos'], df_p['Death'])\n        df_save = df_save.append({\"SP-Subregião\":r, 'MAPE Infectados':er_I, 'MAPE  Recuperados':er_R}, ignore_index=True)\n        \n    if save:\n        df_save.to_csv(file)\n    return df_save\ndef cluster(region,prev,pasta, lim = None,coef_I = 1, coef_D = 0, coef_R = 0, percent = 0.05):\n    if os.path.isfile(f'{pasta}/{region}/MAPE_Total-{region}-Prev{prev}.csv'):\n        file_mape = f'{pasta}/{region}/MAPE_Total-{region}-Prev{prev}.csv'\n    else:\n        file_mape = f'{pasta}/{region}/MAPE_Real-{region}.csv'\n    MAPES = pd.read_csv(file_mape,index_col = 0)\n    result = list()\n    grupos = {}\n    if lim is None:\n        lim_I = (np.max(MAPES['MAPE Infectados']) - np.min(MAPES['MAPE Infectados'])) * percent + np.min(MAPES['MAPE Infectados'])\n        lim_R = (np.max(MAPES['MAPE  Recuperados']) - np.min(MAPES['MAPE  Recuperados'])) * percent + np.min(MAPES['MAPE  Recuperados'])\n        lim_D = 0\n        \n        lim = (lim_I * coef_I + lim_R * coef_R + lim_D * coef_D)/(coef_I + coef_R + coef_D)\n    for i in range(len(MAPES)):\n        Inf = MAPES['MAPE Infectados'].iloc[i] * coef_I\n        Rec = MAPES['MAPE  Recuperados'].iloc[i] * coef_R\n        # Dea = MAPES['MAPE Óbitos'].iloc[i] * coef_D\n        Dia = MAPES.index[i]\n        \n#         metrica = (Inf + Rec + Dea)/(coef_I + coef_R + coef_D)\n        if Inf < lim and Rec < lim:\n#         if metrica < lim:\n            g = 0\n            result.append('Passed')\n        else:\n            g = 1\n            result.append('Discarded')\n        grupos[f'{Dia}'] = g\n    MAPES['Result'] = result\n    MAPES.to_csv(file_mape)\n    return grupos\n\ndef filter_results(region, dia_ini, dia_fim, prev, pasta, inner_dir, coef_I = 1, coef_D = 1, coef_R = 1, \n                    lim = None,return_total = False, dir_sufix=None, recalc_rt=False):\n\n    print(region)\n\n    g = cluster(region,prev, coef_I = coef_I, coef_D = coef_D, coef_R = coef_R, lim = lim, pasta = pasta)\n    df_g1 = pd.DataFrame()   \n    # df_data = pd.read_csv(\"data\\\\dados - Data_subregions.csv\")\n    for dLen in range(dia_ini,dia_fim+1):\n        if inner_dir:\n            if dir_sufix is None:\n                file = f'{pasta}/{region}/prev-{prev}/Subregions_Pred_{dLen}D_prev-{prev}-{region}.csv'\n            else:\n                file = f'{pasta}/{region}/{dir_sufix}/Subregions_Pred_{dLen}D_prev-{prev}-{region}.csv'\n        else:        \n            file = f'{pasta}/{region}/Subregions_Pred_{dLen}D_prev-{prev}-{region}.csv'\n        if not os.path.exists(file):\n            continue\n        df = pd.read_csv(file,index_col = 0)\n        if g[f'{dLen}'] == 0:# and max(df['Rt']) < 2.5:\n            if return_total:\n                df = df.set_index(df['Data'])\n                df_g1 = pd.concat([df_g1,df], axis =1)\n            else:\n                df_g1 = df_g1.append(df)\n    \n    if return_total or len(df_g1) == 0:\n        return df_g1,df_g1,df_g1\n\n    df_mean = df_g1.groupby('Data', as_index=True).agg({c:gmean if c not in ['Data','SP-Subregião','Used in Train','beta(t)', 'Gamma',\n                                                                                'Rt','OPTM_Result'] else non_num_mean for c in df.columns})\n    # df_mean = df_g1.groupby('Data', as_index=False).mean()\n    df_mean['SP-Subregião'] = region\n    df_min = df_g1.groupby('Data', as_index=False).min()\n    df_max = df_g1.groupby('Data', as_index=False).max()\n    \n    # df_mean.insert(1,'SP-Subregião', region)\n    if recalc_rt:\n        df_mean['Rt'] = df_mean['beta(t)']/(df_mean['Gamma_Rec']+df_mean['Gamma_Death']) * df_mean['Susceptible']/df_mean[['Susceptible','Infected','Recovered','Death']].sum(axis=1)\n        df_min['Rt'] = df_min['beta(t)']/(df_min['Gamma_Rec']+df_min['Gamma_Death']) * df_min['Susceptible']/df_min[['Susceptible','Infected','Recovered','Death']].sum(axis=1)\n        df_max['Rt'] = df_max['beta(t)']/(df_max['Gamma_Rec']+df_max['Gamma_Death']) * df_max['Susceptible']/df_max[['Susceptible','Infected','Recovered','Death']].sum(axis=1)\n\n    return df_mean,df_min,df_max\n\ndef unifica(dia_ini, dia_fim, prev, region, pasta, df_data, inner_dir = False, df_geral = None,\n            crop = 10, MM = False, dir_sufix=None,is_weekly=False, reuse = False,recalc_rt=False):\n    \n    df_MAPE = pd.DataFrame()\n    \n    if not os.path.exists(f'{pasta}/{region}/MAPE_Total-{region}-Prev{prev}.csv') or not reuse:\n        for dLen in range(dia_ini,dia_fim+1):\n            if inner_dir:\n                if dir_sufix is None:\n                    file2 = f'{pasta}/{region}/prev-{prev}/Subregions_Pred_{dLen}D_prev-{prev}-{region}.csv'\n                else:\n                    file2 = f'{pasta}/{region}/{dir_sufix}/Subregions_Pred_{dLen}D_prev-{prev}-{region}.csv'\n            else:        \n                file2 = f'{pasta}/{region}/Subregions_Pred_{dLen}D_prev-{prev}-{region}.csv'\n            if not os.path.exists(file2):\n                df_MAPE1 = pd.DataFrame({'SP-Subregião':f'{region}',\n                                        'MAPE Infectados':100.0,\n                                        'MAPE Óbitos':100.0,\n                                        'MAPE Recuperados':100.0}, index = [dLen])\n                print(f'{file2} - does not exist')\n            else:       \n                df_prev = pd.read_csv(file2)\n                df_MAPE1 = MAPE(df_data, df_prev,total = True,MM=MM,is_weekly=is_weekly)\n                df_MAPE1 = df_MAPE1.set_index(pd.Index([dLen]))\n\n            df_MAPE = df_MAPE.append(df_MAPE1)\n        df_MAPE.to_csv(f'{pasta}/{region}/MAPE_Total-{region}-Prev{prev}.csv')\n    df_pred,df_pred_min, df_pred_max  = filter_results(region,dia_ini,dia_fim, prev, pasta, inner_dir, lim = 50, dir_sufix=dir_sufix,recalc_rt=recalc_rt)\n    \n    df_pred = df_pred.drop('Unnamed: 0', axis=1,errors ='ignore')\n    df_pred = df_pred.round({'Infected':0, 'Recovered':0, 'Death':0})\n    \n    if len(df_pred) == 0:\n        if df_geral is None:\n            return df_pred, df_pred_min, df_pred_max\n        else: \n            return df_geral\n    \n    df_pred = sort_data(df_pred)\n    df_pred_min = sort_data(df_pred_min)\n    df_pred_max = sort_data(df_pred_max)\n    count_idx = np.arange(0, len(df_pred))\n    df_pred.set_index(count_idx,inplace=True)\n    df_pred_min.set_index(count_idx,inplace=True)\n    df_pred_max.set_index(count_idx,inplace=True)\n    \n    df_pred = df_pred.astype({'Used in Train': 'bool'})\n\n    esp = len(df_pred[~df_pred['Used in Train']])\n    \n    if crop is not None:\n        if esp > crop:\n            cut = esp-crop\n            df_pred = df_pred.iloc[:-cut]\n            df_pred_min = df_pred_min.iloc[:-cut]\n            df_pred_max = df_pred_max.iloc[:-cut]\n\n\n    if df_geral is not None:\n        df_geral[0] = df_geral[0].append(df_pred)\n        df_geral[1] = df_geral[1].append(df_pred_min)\n        df_geral[2] = df_geral[2].append(df_pred_max)\n        \n        return df_geral\n    return df_pred, df_pred_min, df_pred_max\n\ndef MAPE_DF(df_data, df_p, r, cols_p = ['Infected', 'Recovered', 'Death'], cols_d=[\"At\", 'Rt', \"Óbitos\", 'Data'],prev = True): \n    # dc = pd.read_csv(f'data/dados - Agrupamento.csv')\n    df_d = get_data(df_data,r,is_pred = False)\n\n    df_p = get_data(df_p,r)\n\n    if prev:\n        df_p = df_p[~df_p['Used in Train']]\n    else:\n        df_p = df_p[df_p['Used in Train']]\n        \n    \n\n    df_d = df_d[df_d[\"Data\"].isin(df_p[\"Data\"])][cols_d]\n    df_p = df_p[df_p[\"Data\"].isin(df_d[\"Data\"])]\n    df_d['Total'] = df_d[cols_d[0:3]].sum(axis=1)\n    \n    df_p.loc[:,'Total'] = df_p.sum(axis=1)\n    l = len(df_d)\n    df_d = df_d.set_index(np.arange(l))\n    df_p = df_p.set_index(np.arange(l))\n\n    er_I = run_mape(df_d[cols_d[0]], df_p[cols_p[0]])\n    er_R = run_mape(df_d[cols_d[1]], df_p[cols_p[1]])\n    er_D = run_mape(df_d[cols_d[2]], df_p[cols_p[2]])\n    er_T = run_mape(df_d['Total'], df_p['Total'])\n    return er_I, er_R, er_D,er_T\n\ndef calc_rec(T, conf, death):\n    recT = np.zeros((len(conf),))\n    recT[T:] = conf[:-T] - death[:-T]\n    infecT =  conf - death - recT\n    return recT, infecT\n\ndef plot_unique(df_avg, df_d,col_d,title,fs,savefile,idx,esp=None,is_rt = False, **kwargs):\n    fig, ax = plt.subplots(figsize=(15,10))\n    if is_rt:\n        ax.get_yaxis().set_major_formatter(\n            matplotlib.ticker.FuncFormatter(lambda x, p: format(x, ',.2f')))\n    else:\n        ax.get_yaxis().set_major_formatter(\n            matplotlib.ticker.FuncFormatter(lambda x, p: format(x, ',.2f').rstrip('0').rstrip('.')))\n    \n    matplotlib.rcParams.update({'font.size': fs})\n    stp = 5\n    lx = 0\n    if esp == None:\n        esp = len(df_avg) - 30\n    if isinstance(df_avg, list): \n        train_lim = len(df_avg[0])-esp\n        leg = kwargs['leg']\n        for i,df in enumerate(df_avg):\n            df = pd.DataFrame({leg[i]:df[:]})\n            idx_a = idx.intersection(df_d.index)\n            df.set_index(idx_a)\n            df.plot(ax=ax,lw=2)\n            \n            if len(df) > lx:\n                lx = len(df)-1\n    else:\n        train_lim = len(df_avg)-esp\n        df_avg = pd.DataFrame({'Mean':df_avg[:]})\n        df_avg.set_index(idx)\n        \n        df_avg.plot( ax=ax, c='r',lw=2)\n        lx = len(df_avg)-1\n        nidx = len(df_avg)\n        if 'plot_all_days' in kwargs.keys() and kwargs['plot_all_days'] == True:\n            total = kwargs['Total'][kwargs['col']]\n            if 'faixa' in kwargs.keys() and kwargs['faixa'] == True:\n                lim_min = total.min(1)[:nidx]\n                lim_max = total.max(1)[:nidx]\n                plt.fill_between(idx, lim_min, \n                     lim_max, alpha = 0.25, label = 'Infection estimative range')\n            else:\n                colors = pl.cm.cool(np.linspace(0,1,len(total.columns)))\n                total.plot(ax=ax, color=colors, alpha=0.6,legend=None,linewidth=3)\n    \n    if df_d is not None:\n        df_d[[col_d]].plot(ax=ax, c='k', linestyle='-.',lw=2,marker='o')\n    \n    ax.axvline(train_lim-1, ymin = 0, linestyle = ':', c = 'k',lw=2)\n    ax.yaxis.grid(lw=1)\n    plt.title(title)\n    ax.set_xlabel('Date')\n    tks = np.linspace(idx.index[0], lx, num=stp, endpoint=True)\n    tks = tks.astype(int)\n    \n    plt.xticks(tks,idx.iloc[tks])\n    plt.xlim(idx.index[0], lx)\n    \n    bottom, top = plt.ylim()\n    left, right = plt.xlim()\n    \n    dist = 3*(right - left)/20\n    off = (top - bottom)/20\n    plt.text(train_lim - dist, bottom + off, \"Training\")\n    plt.text(train_lim + dist/6, bottom + off, \"Test\")\n    dirname = os.path.dirname(savefile)\n    fig.tight_layout()\n    if not os.path.isdir(dirname):\n        os.makedirs(dirname, exist_ok = True)\n    plt.savefig(savefile)\n    \n    plt.close()\ndef plot(df_data, df_pred,r,pasta,pasta_graph,fs = 24,T = None, **kwargs):\n    if isinstance(df_pred, list):\n        plot_mult(df_data, df_pred,r,pasta,pasta_graph,fs)\n        return\n    pasta_save = f'{pasta}/{pasta_graph}'\n    if not os.path.isdir(pasta_save):\n        os.makedirs(pasta_save, exist_ok = True)\n    df_d = get_data(df_data, r,is_pred = False)\n    df_p = get_data(df_pred, r)\n    df_d = sort_data(df_d)\n    \n    if T is not None:\n        rec, inf = calc_rec(T,df_d['Confirmados'], df_d['Óbitos'] )\n        df_d.loc[:,'Rt'] = rec\n        df_d.loc[:,'At'] = inf\n    \n    df_conf = df_p[['Infected', 'Recovered']].sum(axis=1)\n    perday = np.zeros((len(df_conf),))\n    perday[0] = np.nan\n    perday[1:] = df_conf.iloc[1:].values - df_conf.iloc[:-1].values\n    df_p.loc[:,'Daily new cases'] = perday\n    df_pA = df_p[~df_p['Used in Train']]\n    esp = len(df_pA)\n    title =  translate(r)\n    idx = df_p['Data']\n    df_d.set_index('Data',inplace = True)\n    idx_d = df_d.index\n    idx_d = idx_d.intersection(idx)\n    \n    perday = np.zeros((len(df_d),))\n    perday[0] = np.nan\n    perday[1:] = df_d['Confirmados'].iloc[1:].values - df_d['Confirmados'].iloc[:-1].values\n    df_d.loc[:,'Daily new cases (Real data)'] = perday\n\n    df_d = df_d.loc[idx_d].rename(columns = {'At':f'Active Cases (Real data)',\n                                           'Rt':'Recovered Cases (Real data)',\n                                           'Confirmados': 'Confirmed (Real data)'})\n    \n    \n    ####################               PLOT INFECTADOS                #######################\n    plot_unique(df_p['Infected'], df_d,'Active Cases (Real data)',f'Infected - {title}',fs,f'{pasta_save}/Infected\\\\{r}_Inf.png',idx, esp = esp, **kwargs, col = 'Infected')\n    ####################               PLOT Recovered                 #######################\n    plot_unique(df_p['Recovered'], df_d,'Recovered Cases (Real data)',f'Cumulative Recovered - {title}',fs,f'{pasta_save}/recovered\\\\{r}_Rec.png',idx, esp = esp, **kwargs, col = 'Recovered')\n    ####################               PLOT ÓBITOS                    #######################\n    # plot_unique(df_p['Death'], df_d,'Deceased (Real data)',f'Cumulative Deceased - {title}',fs,f'{pasta_save}/Death\\\\{r}_Death.png',idx, esp = esp, **kwargs, col = 'Death')\n    ###################                PLOT R(t)                      #######################\n    plot_unique(df_p['Rt'], None,None,r'$R_0(t)$ - '+f'{title}',fs,f'{pasta_save}/Rt\\\\{r}_Rt.png',idx, esp = esp,is_rt = True, **kwargs, col = 'Rt')\n    ###################                PLOT R(t)                      #######################\n    plot_unique(df_p[['Infected', 'Recovered']].sum(axis=1), df_d,'Confirmed (Real data)',f'Cumulative Confirmed - {title}',fs,f'{pasta_save}/Confirmed\\\\{r}_Conf.png',idx, esp = esp)\n\n\n    \n    df_aux = movingAvg(df_d['Daily new cases (Real data)'],7)\n    df_d.loc[:,'Daily new cases (Real data - MM)'] = df_aux\n    df_pA.set_index('Data',inplace=True)\n    idx_a = df_aux.index\n    idx_a = idx_a.intersection(df_pA.index)\n    df_aux1 = df_aux.loc[idx_a]\n    df_pA = df_pA.loc[idx_a]\n\n    ###################                PLOT New Cases                      #######################\n    plot_unique(df_p['Daily new cases'], df_d, 'Daily new cases (Real data)',\n                f'Daily New Cases - {title}', fs, f'{pasta_save}/Newcases/{r}_newCases.png', idx, esp=esp, leg=['Daily new cases', 'Moving Average of real data'])\n    ##################                PLOT New Cases MM                   #######################\n    plot_unique(df_p['Daily new cases'], df_d, 'Daily new cases (Real data - MM)',\n                f'Daily New Cases - {title}', fs, f'{pasta_save}/Newcases/MM/{r}_newCasesMM.png', idx, esp=esp, leg=['Daily new cases', 'Moving Average of real data'])\n\n\ndef plot_mult(df_data, df_pred,r,pasta,pasta_graph,fs = 24,**kwargs):\n    pasta_save = f'{pasta}/{pasta_graph}'\n    dc = pd.read_csv(f'data/dados - Agrupamento.csv')\n    if not os.path.isdir(pasta_save):\n        os.makedirs(pasta_save, exist_ok = True)\n    if r in df_data[\"SP-Subregião\"].unique():\n        df_d = df_data[df_data[\"SP-Subregião\"] == r]\n        df_p = []\n        for df in df_pred:\n            df_p.append(df[df[\"SP-Subregião\"] == r])\n    elif r in dc['Região'].unique():\n        est = dc[dc['Região'] == r]['SP-Subregião'].unique()\n        df_d = pd.DataFrame()\n        \n        df_p = pd.DataFrame()\n        for e in est:\n            df_d =df_d.append(df_data[df_data['SP-Subregião'] == e])\n            df_p =df_p.append(df_pred[df_pred['SP-Subregião'] == e])\n        df_d = df_d.groupby(['Data'], as_index=False).sum()\n        df_p = df_p.groupby(['Data'], as_index=False).sum()\n        df_p.loc[:,'Rt'] = df_p['Rt']/len(df_p['Rt'])\n    idx = df_p[0].set_index(df_p[0]['Data']).index\n    title =  translate(r)\n    \n    df_d.set_index('Data',inplace = True)\n    idx_d = df_d.index\n    idx_d = idx_d.intersection(idx)\n    df_d = df_d.loc[idx_d].rename(columns = {'At':f'Active Cases (Real data)',\n                                           'Rt':'Recovered Cases (Real data)',\n                                           'Óbitos':'Deceased (Real data)',\n                                           'Confirmados': 'Confirmed (Real data)'})\n    leg = [r'Transient $\\beta(t)$', r'Constant $\\beta$']\n    ####################               PLOT INFECTADOS                #######################\n    plt_df = []\n    for df in df_p:\n        plt_df.append(df['Infected'])\n    plot_unique(plt_df, df_d,'Active Cases (Real data)',f'Infected - {title}',fs,f'{pasta_save}/Infected\\\\{r}_Inf.png',idx, leg = leg, **kwargs)\n    ####################               PLOT Recovered                 #######################\n    plt_df = []\n    for df in df_p:\n        plt_df.append(df['Recovered'])\n    plot_unique(plt_df, df_d,'Recovered Cases (Real data)',f'Recovered - {title}',fs,f'{pasta_save}/Recovered\\\\{r}_Rec.png',idx, leg = leg, **kwargs)\n    ####################               PLOT ÓBITOS                    #######################\n    plt_df = []\n    for df in df_p:\n        plt_df.append(df['Death'])\n    plot_unique(plt_df, df_d,'Deceased (Real data)',f'Deceased - {title}',fs,f'{pasta_save}/Death\\\\{r}_Death.png',idx, leg = leg, **kwargs)\n    ###################                PLOT R(t)                      #######################\n    plt_df = []\n    for df in df_p:\n        plt_df.append(df['Rt'])\n    plot_unique(plt_df, None,None,f'R(t) - {title}',fs,f'{pasta_save}\\\\{r}_Rt.png',idx, leg = leg,is_rt = True, **kwargs)\n    ###################                PLOT R(t)                      #######################\n    plt_df = []\n    for df in df_p:\n        plt_df.append(df[['Infected', 'Recovered', 'Death']].sum(axis=1))\n    plot_unique(plt_df, df_d,'Confirmed (Real data)',f'Accumulated Confirmed - {title}',fs,f'{pasta_save}/Confirmed\\\\{r}_Conf.png',idx, leg = leg, **kwargs)\n\ndef eval_ivp(df,interval,y0, method = 'LSODA'):\n    def sird(t,y,df):\n        t = int(np.rint(t))\n        beta = df['beta(t)'].iloc[t]\n        gamma_d = df['gamma_Death'].iloc[t]\n        gamma_r = df['gamma_Rec'].iloc[t]\n        S,I,R,D = y\n        dS = -I * beta * S\n        dI =  I * beta * S - I*(gamma_d + gamma_r)\n        dR =  I * gamma_r\n        dD =  I * gamma_d\n        return [dS, dI, dR,dD]\n    IVP = solve_ivp(sird,interval, y0,\n                        t_eval=np.arange(interval[0], interval[1]+1, 1), vectorized=False, method=method, args = ([df]))\n    return IVP\n\ndef run_ivp(df,reg,dc):\n    df = df.drop('Unnamed: 0', axis=1, errors='ignore')\n    df = df.round({'Infected': 0, 'Recovered': 0, 'Death': 0})\n    df['Data'] = pd.to_datetime(df.Data)\n    df.sort_values(by='Data', inplace=True, ascending=True)\n    d0 = df.Data.iloc[0]\n    idx = (df.Data - d0).dt.days\n    df.set_index(idx.values, inplace=True)\n    df['Data'] = df['Data'].dt.strftime('%m/%d/%Y')\n\n    tlim = len(df) -1\n    t = [0,tlim]\n    pop = get_pop(reg, dc)\n    cols = ['Infected', 'Recovered', 'Death']\n\n    y0 = df[cols].iloc[0].to_list()\n#     y0[0] = y0[0] + 300\n    y0 = y0/pop\n    s0 = 1 - sum(y0)\n    y0 = [s0] + y0.tolist()\n    \n    IVP = eval_ivp(df,t,y0, method='RK45')\n    \n    for i,c in enumerate(cols,1):\n        df[c] = IVP.y[i] * pop\n    df = df.round({'Infected':0, 'Recovered':0, 'Death':0})\n    S = 1 - (IVP.y[1] +IVP.y[2]+IVP.y[3])\n    new_rt = df['beta(t)'] / (df['gamma_Rec'] + df['gamma_Death']) * S\n    df['Rt'] = new_rt\n    return df\n\ndef turn_weekly(df, period='W'):\n    df = df.set_index('Data')\n    df.index = pd.to_datetime(df.index)\n    df = df.resample(period).mean()\n    df.index = df.index.strftime('%m/%d/%Y')\n    df['Data'] = df.index\n    df.index.name=''\n    \n    return df\n\ndef get_SP_data():\n    file_d = f'data/Dataset SP - SP_Data.csv'\n    df_data = pd.read_csv(file_d, index_col = False)\n    df_data.set_index(df_data['Data'], inplace=True)\n    df_data = df_data.rename(columns={'Vacinados 1':'Vac 1', 'Vacinados 2':'Vac 2', 'Recuperados': 'Rt'}).fillna(0)\n    df_data['At'] = df_data['Confirmados'] - df_data[['Rt', 'Óbitos']].sum(axis=1)\n    df_data['SP-Subregião'] = 'São Paulo (Estado)'\n\n    return df_data\n\ndef run_unifica(dtime,case, prev=0,regs = None, unify = True, crop = 10, MM = False,\n                dia_ini = 10, dia_fim = 30, rerun = False,save_sufix='',\n                pasta=None, inner_dir=False, is_SIR=False, dir_sufix=None, is_weekly=False, gen_graphs = True, reuse = False,\n                recalc_rt=False):\n    if case == 'state':\n        if pasta is None:\n            pasta = f'Run_States/{dtime}'\n        dc = pd.read_csv('data/dados - states.csv')\n        file_d = f'data/dados - Data_states.csv'\n        df_data = pd.read_csv(file_d, index_col = False)\n        if regs is None:\n            regs = ['Brasil', 'Norte', 'Nordeste', 'Sul', 'Sudeste', 'Centro-Oeste']\n    elif case == 'subregion':\n        dc = pd.read_csv('data/dados - subregions.csv')\n        if pasta is None:\n            pasta = f'Run_Semanal/{dtime}'\n        file_d = f'data/dados - Data_subregions.csv'\n        df_data = pd.read_csv(file_d, index_col = False)\n        if regs is  None:\n            regs = df_data['SP-Subregião'].unique().tolist() \n        df_data = df_data.append(get_SP_data())\n    elif case == 'city':\n        dc = pd.read_csv('data/dados - Cidades.csv')\n        if pasta is None:\n            pasta = f'Run_City/{dtime}'\n        file_d = f'data/dados - Data_cidades.csv'\n        df_data = pd.read_csv(file_d, index_col = False)\n        if regs is  None:\n            regs = df_data['SP-Subregião'].unique().tolist() \n    elif case == 'JHU':\n        if pasta is None:\n            pasta = f'Run_JHU/{dtime}'\n        file_d  ='JHU'\n        if regs is  None:\n            regs = ['Canada', 'Germany']\n        df_data = pd.DataFrame()\n        for r in regs:\n            df_aux = read_global(r)\n            df_data = df_data.append(df_aux)\n    elif case == 'SP-dataset':\n        dc = pd.read_csv('data/dados - subregions.csv')\n        if pasta is None:\n            pasta = f'Run_Semanal/{dtime}'\n        df_data = get_SP_data()\n        if regs is  None:\n            regs = ['São Paulo (Estado)']\n    elif case == 'BR-dataset':\n        if pasta is None:\n            pasta = f'Run_States/{dtime}'\n        dc = pd.read_csv('data/dados - states.csv')\n\n        df_data = pd.read_csv('data/cases-brazil-states.csv')\n        df_data = df_data[df_data['state'] == 'TOTAL']\n\n        df_data = df_data[['date', 'deaths', 'totalCases', 'recovered', 'vaccinated', 'vaccinated_second']]\n        rc = df_data['totalCases'].values[:-21] - df_data['deaths'].values[:-21]\n        df_data['Rt'] = np.zeros((21,)).tolist()+  rc.tolist()\n        df_data['At'] = df_data['totalCases'] - df_data[['deaths','Rt']].sum(axis=1)\n\n        df_data = df_data.rename(columns={\n            'deaths': 'Óbitos',\n            'totalCases': 'Confirmados',\n            'vaccinated':'Vac 1',\n            'vaccinated_second': 'Vac 2',\n            'date': 'Data'\n        })\n        date = pd.to_datetime(df_data['Data'])\n        df_data['Data'] = date.dt.strftime('%m/%d/%Y')\n        df_data['SP-Subregião'] = 'Brasil'\n        if regs is None:\n            regs = ['Brasil']\n    else:\n        pasta = f'data/resulted_data/neural_network/temp/'\n        df_data = pd.read_csv(f'data/filtered_data/subregions/{case}/GENERAL_COVID_DATA.csv')\n        df_data = (df_data[df_data[\"REGION\"] == case])\n        df_data = df_data.rename(columns={\"DATE\": \"Data\", \"ACTIVE_CASES\": \"At\", \"CUMULATIVE_RECOVERED\": \"Rt\", \"CUMULATIVE_DEATHS\": \"Óbitos\", \"CUMULATIVE_CASES\": \"Confirmados\", \"REGION\": \"SP-Subregião\"})\n        regs = [case]\n        df_data = df_data.iloc[-33:-2]\n\n    df_geral = pd.DataFrame()\n    df_geral_min = pd.DataFrame()\n    df_geral_max = pd.DataFrame()\n    data_cpy = df_data.copy()\n    if unify:\n        for r in regs:\n            [df_,df_min,df_max] = unifica(dia_ini, dia_fim,prev, r, pasta = pasta,df_geral = None, crop = crop,\n                               inner_dir = inner_dir, df_data=df_data, MM = MM, dir_sufix=dir_sufix,is_weekly=is_weekly,reuse=reuse,recalc_rt=recalc_rt)\n            \n            if rerun:\n                df_ = run_ivp(df_,r,dc)\n                df_min = run_ivp(df_min,r,dc)\n                df_max = run_ivp(df_max,r,dc)\n\n            df_geral = df_geral.append(df_)\n            df_geral_min = df_geral_min.append(df_min)\n            df_geral_max = df_geral_max.append(df_max)\n            \n        print(df_geral)\n        date_str = df_geral[df_geral['Used in Train']]['Data'].iloc[-1]\n        dtime = datetime.strptime(date_str,'%m/%d/%Y')\n        pred_day = dtime.strftime('%Y-%b-%d')+save_sufix\n        if case == 'state' or case == 'BR-dataset':\n            dir_res = f'Val-Results-states/{pred_day}/{dia_ini}-{dia_fim}'\n        elif case == 'subregion' or case == 'SP-dataset':\n            dir_res = f'Val-Results/{pred_day}/{dia_ini}-{dia_fim}' \n        elif case == 'city':\n            dir_res = f'Val-Results-City/{pred_day}/{dia_ini}-{dia_fim}' \n        elif case == 'JHU':\n            dir_res = f'Val-Results-JHU/{pred_day}/{dia_ini}-{dia_fim}' \n        else:\n            dir_res = f'data/resulted_data/neural_network/{case}'\n        if MM:\n            dir_res = dir_res + '/MM'\n        if not os.path.isdir(dir_res):\n                os.makedirs(dir_res, exist_ok = True)\n        \n        df_geral.to_csv(f'{dir_res}/pred_all.csv')\n        df_geral_min.to_csv(f'{dir_res}/pred_all_min.csv')\n        df_geral_max.to_csv(f'{dir_res}/pred_all_max.csv')\n        \n    else:\n\n        df_geral = unifica(dia_ini, dia_fim,prev, regs[0], pasta = pasta,df_geral = df_geral, crop = 10, inner_dir = False, file1=file_d,is_weekly=is_weekly)\n        date_str = df_geral[df_geral['Used in Train']]['Data'].iloc[-1]\n        dtime = datetime.strptime(date_str,'%m/%d/%Y')\n        pred_day = dtime.strftime('%Y-%b-%d')\n        if case == 'state' or case == 'BR-dataset':\n            dir_res = f'Val-Results-states/{pred_day}/{dia_ini}-{dia_fim}'\n        elif case == 'subregion' or case == 'SP-dataset':\n            dir_res = f'Val-Results/{pred_day}/{dia_ini}-{dia_fim}'\n        elif case == 'city':\n            dir_res = f'Val-Results-City/{pred_day}/{dia_ini}-{dia_fim}' \n        elif case == 'JHU':\n            dir_res = f'Val-Results-JHU/{pred_day}/{dia_ini}-{dia_fim}' \n        \n        if MM:\n            dir_res = dir_res + '/MM'\n    df_att = pd.DataFrame()\n    if gen_graphs:\n        for r in regs:\n            print(r)\n            df_geral_t = pd.read_csv(f'{dir_res}/pred_all.csv',index_col = 0)\n            df_geral_min = pd.read_csv(f'{dir_res}/pred_all_min.csv',index_col = 0)\n            df_geral_max = pd.read_csv(f'{dir_res}/pred_all_max.csv',index_col = 0)\n            \n\n                \n            df_plt = get_data(df_geral_t,r)\n            df_plt_min = get_data(df_geral_min,r)\n            df_plt_max = get_data(df_geral_max,r)\n            df_data = get_data(data_cpy, r) \n            if is_weekly:\n                df_data = turn_weekly(df_data)\n                df_data['SP-Subregião']=r\n            if MM:\n                date_cpy = df_data['Data']\n                df_data = movingAvg(df_data, 7, ['At', 'Rt','Confirmados','Óbitos'])\n                df_data['SP-Subregião']=r\n                df_data['Data'] = date_cpy\n            if len(df_plt) == 0:\n                continue\n            if rerun:\n                plot(df_data, df_plt,r,dir_res,'Graficos-att_rerun',fs = 24)\n                plot(df_data, df_plt_min,r,dir_res,'Graficos-min_rerun',fs = 24)\n                plot(df_data, df_plt_max,r,dir_res,'Graficos-max_rerun',fs = 24)\n            else:   \n                plot(df_data, df_plt,r,dir_res,'Graficos-att',fs = 24)\n                plot(df_data, df_plt_min,r,dir_res,'Graficos-min',fs = 24)\n                plot(df_data, df_plt_max,r,dir_res,'Graficos-max',fs = 24)\n\n","sub_path":"algorithms/SIRD_NN/Utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":43136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"337745345","text":"\"\"\"Support for Helios ventilations with an easyControls interface\"\"\"\n\nimport requests\n\nfrom homecontrol.dependencies.entity_types import Item\n\n\nclass HeliosVentilation(Item):\n    \"\"\"The ventilation item\"\"\"\n    async def init(self):\n        \"\"\"Initialise the HeliosVentilation item\"\"\"\n        self.core.tick_engine.tick(30)(self.ensure_login)\n\n    async def ensure_login(self) -> None:\n        \"\"\"\n        The Helios easyControls system has a global login management.\n        That means to keep the requests working\n        we just need to send a login request every ten minutes.\n        \"\"\"\n        requests.post(\n            f\"http://{self.cfg['host']}/info.htm\",\n            data={'v00402': 'helios'})\n\n    async def start_party(self,\n                          duration: int = None,\n                          party_level: int = None) -> None:\n        \"\"\"Action: Start party mode\"\"\"\n        duration = duration or self.cfg[\"default_party_duration\"]\n        party_level = party_level or self.cfg[\"default_party_level\"]\n        requests.post(f\"http://{self.cfg['host']}/party.htm\", data={\n            \"v00091\": duration,\n            \"v00092\": party_level,\n            \"v00093\": 0,\n            \"v00094\": 1  # Activate party mode\n        })\n\n    async def stop_party(self) -> None:\n        \"\"\"Action: Stop party mode\"\"\"\n        requests.post(f\"http://{self.cfg['host']}/party.htm\", data={\n            \"v00094\": 0,  # Deactivate party mode\n        })\n\n    async def set_speed(self, value: int) -> dict:\n        \"\"\"Setter for speed\"\"\"\n        await self.states.update(\"speed\", value)\n        requests.post(f\"http://{self.cfg['host']}/index.htm\", data={\n            \"v00102\": value,  # Speed register\n        })\n        return {\"speed\": value}\n\n    async def stop(self) -> None:\n        \"\"\"Stops the item\"\"\"\n        self.core.tick_engine.remove_tick(30, self.ensure_login)\n","sub_path":"homecontrol/modules/helios_ventilation/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"13477058","text":"import os\nfrom datetime import datetime\nimport pytz\n\nfrom datalabframework import logging\nfrom datalabframework.yaml import yaml\nfrom datalabframework._utils import merge\n\nimport json\nimport jsonschema\n\nfrom dotenv import load_dotenv\nfrom jinja2 import Environment\n\n# metadata files are cached once read the first time\ndef read(file_paths=None):\n    \"\"\"\n    Return all profiles, stored in a nested dictionary\n    profiles are merged over the list provided profiles. list order determines override\n    each profile name\n    :param file_paths: list of yaml files\n    :return: dict of profiles\n    \"\"\"\n    profiles = {}\n\n    if not file_paths:\n        file_paths = []\n    \n    for filename in file_paths:\n        if os.path.isfile(filename):\n            with open(filename, 'r') as f:\n                try:\n                    docs = list(yaml.load_all(f))\n                except yaml.YAMLError as e:\n                    if hasattr(e, 'problem_mark'):\n                        mark = e.problem_mark\n                        logging.error(\"Error loading yml file {} at position: (%s:%s): skipping file\".format(filename, mark.line+1, mark.column+1))\n                        docs = []\n                finally:\n                    for doc in docs:\n                        doc['profile'] = doc.get('profile', 'default')\n                        profiles[doc['profile']] = merge(profiles.get(doc['profile'],{}), doc)\n\n    return profiles\n\ndef inherit(profiles):\n    \"\"\"\n    Modify profiles to inherit from default profile\n    :param profiles: input dict of profiles\n    :return: profile\n    \"\"\"\n\n    # inherit from default for all other profiles\n    for k in profiles.get('default', {}).keys():\n        for p in profiles.keys() - 'default':\n            profiles[p][k] = merge(profiles['default'][k], profiles[p].get(k))\n\n    return profiles\n\ndef render(metadata, dotenv_path=None, max_passes=5):\n    \"\"\"\n    Renders jinja expressions in the given input metadata.\n    jinja templates can refer to the dictionary itself for variable substitution\n\n    :param metadata: dict, input metadata with values containing jinja templates\n    :param dotenv_path: file to export as env variables\n    :param max_passes: max number of rendering passes\n    :return: dict, rendered dictionary\n    \"\"\"\n\n    # get env variables from .env file\n    if dotenv_path and os.path.isfile(dotenv_path):\n        load_dotenv(dotenv_path=dotenv_path)\n\n    env = Environment()\n    env.globals['env'] = lambda key, value=None: os.getenv(key, value)\n    env.globals['now'] = lambda tz=None, format='%Y-%m-%d %H:%M:%S': datetime.strftime(datetime.now(pytz.timezone(tz if tz else 'UTC')), format)\n    \n    doc = json.dumps(metadata)\n\n    rendered = metadata\n\n    for i in range(max_passes):\n        dictionary = json.loads(doc)\n\n        #rendering with jinja\n        template = env.from_string(doc)\n        doc = template.render(dictionary)\n\n        # all done, or more rendering required?\n        rendered = json.loads(doc)\n        if dictionary == rendered:\n            break\n\n    return rendered\n\ndef v(d, schema):\n    message=None\n    try:\n        jsonschema.validate(d, schema)\n        return\n    except jsonschema.exceptions.ValidationError as e:\n        message  = f'{e.message} \\n\\n## schema path:\\n\\'{\"/\".join(e.schema_path)}\\'\\n\\n'\n        message += f'## metadata schema definition {\"for \" + str(e.parent) if e.parent else \"\"}:'\n        message += f'\\n{yaml.dump(e.schema)}'\n    \n    if message:\n        raise ValueError(message)\n        \ndef validate_schema(md, schema_filename):\n    dir_path = os.path.dirname(os.path.realpath(__file__))\n    filename = os.path.abspath(os.path.join(dir_path, 'schemas/{}'.format(schema_filename)))\n    with open(filename) as f:\n        v(md, yaml.load(f))\n\ndef validate(md):\n\n    # validate data structure\n    validate_schema(md, 'top.yml')\n        \n    # _validate_schema(md['loggers'], 'loggers.yml')\n\n    # for d in md['providers']:\n    #     _validate_schema(d, 'provider.yml')\n    #\n    # for d in md['resources']:\n    #     _validate_schema(d, 'resource.yml')\n\n    # validate semantics\n    providers = md.get('providers', {}).keys()\n    for resource_alias, r in md.get('resources',{}).items():\n        resource_provider = r.get('provider')\n        if resource_provider and resource_provider not in providers:\n            print(\n                f'resource {resource_alias}: given provider \"{resource_provider}\" does not match any metadata provider')\n\ndef load(profile=None, file_paths=None, dotenv_path=None, factory_defaults=True):\n    \"\"\"\n    Load the profile, given a list of yml files and a .env filename\n    profiles inherit from the defaul profile, a profile not found will contain the same elements as the default profile\n\n    :param profile:\n    :param file_paths:\n    :param dotenv_path:\n    :return:\n    \"\"\"\n    if profile is None:\n        profile = 'default'\n\n    if factory_defaults:\n        # get the default metadata configuration file\n        dir_path = os.path.dirname(os.path.realpath(__file__))\n        default_metadata_file = os.path.abspath(os.path.join(dir_path, 'schemas/default.yml'))\n    \n        #prepend the default configuration\n        file_paths = [default_metadata_file] + file_paths\n\n    profiles = read(file_paths)\n\n    # empty profile if profile not found\n    if profile not in profiles.keys():\n        if file_paths:\n            message = '\\nList of loaded metadata files:\\n'\n            for f in file_paths:\n                message += f'  - {f}\\n'\n            message += '\\nList of available profiles:\\n'\n            for p in profiles.keys():\n                message += f'  - {p}\\n'   \n        raise ValueError(f'Profile \"{profile}\" not found.\\n{message}')\n\n    # read metadata, get the profile, if not found get an empty profile\n    profiles = inherit(profiles)\n    metadata = profiles[profile]\n\n    # render any jinja templates in the profile\n    md = render(metadata, dotenv_path)\n    \n    # validate\n    validate(md)\n    \n    return md\n","sub_path":"datalabframework/metadata/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"295201566","text":"'''\nCreate Triqler input files from DIA-NN output files.\n'''\n\nimport os\nimport sys\nimport glob\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..triqler import __version__, __copyright__\nfrom .. import parsers\n\n\ndef main():\n  print('triqler.convert.diann version %s\\n%s' % (__version__, __copyright__))\n  print('Issued command:', os.path.basename(__file__) + \" \" + \" \".join(map(str, sys.argv[1:])))\n  \n  args, params = parseArgs()\n  \n  # hack for windows\n  if len(args.in_file) == 1 and '*' in args.in_file[0]:\n    args.in_file = glob.glob(args.in_file[0])\n  \n  diann_to_triqler(args.in_file, args.file_list_file, args.out_file, params)\n\n\ndef parseArgs():\n  import argparse\n  apars = argparse.ArgumentParser(\n      description='Converts DIA-NN output files to Triqler input format.',\n      formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n  \n  requiredNamed = apars.add_argument_group('required arguments')\n  \n  apars.add_argument('in_file', default=None, metavar = \"IN_FILE\",\n                     help='''DIA-NN output file\n                          ''')\n  \n  requiredNamed.add_argument('--file_list_file', metavar='L', \n                     help='''Simple tab separated file with run names in first column and condition in second column. \n                             The run names should be identical to the entries in the \"Run\" column of the DIA-NN output file.\n                             ''',\n                     required = True)\n\n  apars.add_argument('--out_file', default = \"triqler_input.tsv\", metavar='OUT', \n                     help='''Path to triqler input file (writing in TSV format).\n                          ''')\n  \n  # ------------------------------------------------\n  args = apars.parse_args()\n  \n  params = dict()\n  return args, params\n\n\ndef diann_to_triqler(diann_file_path: str, file_list_file: str, triqler_input_file: str, params: Dict):\n  file_list_df = parse_file_list(file_list_file)\n  \n  sample_mapper = dict(zip(file_list_df[\"run\"], file_list_df[\"sample\"]))\n  condition_mapper = dict(zip(file_list_df[\"run\"], file_list_df[\"condition\"]))\n  \n  df = pd.read_csv(diann_file_path, sep='\\t')\n  \n  df[\"run\"] = df[\"Run\"].map(sample_mapper)\n  df[\"condition\"] = df[\"Run\"].map(condition_mapper)\n  df[\"charge\"] = df[\"Precursor.Charge\"]\n  df[\"searchScore\"] = -np.log(df[\"Q.Value\"])\n  df[\"intensity\"] = df['Precursor.Quantity']\n  df[\"peptide\"] = df[\"Stripped.Sequence\"]\n  df[\"proteins\"] = df[\"Protein.Ids\"]\n  triqler_input_df = df[[\"run\", \"condition\", \"charge\", \"searchScore\", \"intensity\", \"peptide\", \"proteins\"]]\n\n  triqler_input_df.to_csv(triqler_input_file, sep='\\t', index=False)\n\n\ndef parse_file_list(file_list_file: str):\n  file_list_df = pd.read_csv(file_list_file, sep='\\t', header=None)\n  \n  if len(file_list_df.columns) < 2:\n    raise ValueError(\"Too few columns present in file list mapping, need at least 2 columns: run, condition\")\n  \n  if len(file_list_df.columns) > 4:\n    raise ValueError(\"Too many columns present in file list mapping, can at most have 4 columns: run, condition, sample, fraction\")\n  \n  file_list_df.columns = [\"run\", \"condition\", \"sample\", \"fraction\"][:len(file_list_df.columns)]\n  \n  if \"sample\" not in file_list_df.columns:\n    file_list_df[\"sample\"] = file_list_df[\"run\"]\n  \n  if \"fraction\" not in file_list_df.columns:\n    file_list_df[\"fraction\"] = -1\n  \n  return file_list_df\n  \n\nif __name__ == \"__main__\":\n   main()\n","sub_path":"triqler/convert/diann.py","file_name":"diann.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"239382494","text":"from turtle import Turtle, Screen\nfrom prettytable import PrettyTable\n\ntimmy = Turtle()\nprint(timmy)\ntimmy.shape(\"turtle\")\ntimmy.color(\"coral\")\ntimmy.forward(100)\n\nmy_screen = Screen()\nprint(my_screen.canvheight)\nmy_screen.exitonclick()\n\ntable = PrettyTable()\ntable.add_column(\"Pokemon name\", [\"Pikachu\", \"Squirtle\", \"Charmander\"])\ntable.add_column(\"Type\", [\"Electric\", \"Water\", \"Fire\"])\n\nprint(table)\n","sub_path":"Day16/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"74148045","text":"import time\nimport datetime\nimport json\n\ndef timing_function(func):\n    '''\n    :param func: service name or something\n    :return: will be return json\n    '''\n    json_data = {}\n    def wrapper(*args):\n\n        print(\"Timing Function called from  {} .\".format(func.__name__))\n        json_data[\"called_function\"] = func.__name__\n\n        time_of_start = datetime.datetime.now()\n        print(\"Started at {}\".format(time_of_start))\n        json_data[\"started_at\"] = datetime.datetime.now()\n\n        for i,arg in enumerate(args):\n            i += 1\n            print(\"Input Parameter {}\".format(i) ,end=\" : \")\n            print(arg)\n            json_data[\"Input Parameter {}\".format(i)] = arg\n\n        started_time = time.time()\n        output_data = func()\n        finished_time = time.time()\n\n        time_of_end = datetime.datetime.now()\n        print(\"Finished at {}\".format(time_of_end))\n        json_data[\"finished_at\"] = time_of_end\n\n        for i,output in enumerate(output_data):\n            i += 1\n            print(\"Output Parameter {}\".format(i) ,end=\" : \")\n            print(output)\n            json_data[\"Output Parameter {}\".format(i)] = output\n\n        print(\"Runtime of service : {}\".format(finished_time-started_time))\n\n        print(\"JSON : \",end=\" : \")\n        print(json_data)\n\n    return wrapper\n\n@timing_function\ndef test_function(*args):\n    '''\n    :param args: parameters\n    :return: outputs\n    '''\n    data = [[\"outputTest\"],{'output1': '200', 'output2': '404', 'output3': '505'}]\n    list_data = []\n    for i in (range(0, 1000000)):\n        list_data.append(i)\n    return data\ntest_function([\"deneme\",\"deneme2\"],{'Name': 'Zara', 'Age': 7, 'Class': 'First'})\n","sub_path":"log_decorator/logPy.py","file_name":"logPy.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"142202667","text":"\"\"\"\nBecause I'm using importlib.resources, which did not exist until python 3.7, this does not support < 3.7 unless I can\nfigure out another method for importing package resources.\n\"\"\"\nimport setuptools\n\nlong_description = \"\"\"\nA collection of modern flat themes inspired by Bootstrap. There are more than a dozen built-in themes, and you also have \nthe ability to easily create your own.\n\n## Links\n- **Documentation:** https://ttkbootstrap.readthedocs.io/en/latest/  \n- **GitHub:** https://github.com/israel-dryer/ttkbootstrap\n\"\"\"\n\nsetuptools.setup(\n    name=\"ttkbootstrap\",\n    version=\"0.4.6\",\n    author=\"Israel Dryer\",\n    author_email=\"israel.dryer@gmail.com\",\n    description=\"A collection of modern ttk themes inspired by Bootstrap\",\n    long_description=long_description,\n    long_description_content_type=\"text/markdown\",\n    classifiers=[\n        \"Programming Language :: Python :: 3\",\n        \"License :: OSI Approved :: MIT License\",\n        \"Operating System :: OS Independent\",\n    ],\n    url=\"https://github.com/israel-dryer/ttkbootstrap\",\n    package_dir={\"\": \"src\"},\n    packages=setuptools.find_packages(where=\"src\"),\n    package_data={\"\": [\"*.json\", \"*.ttf\"]},\n    include_package_data=True,\n    install_requires=[\"pillow>=8.2.0\"],\n    python_requires=\">=3.6\",\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"224778620","text":"def primes_sieve(limit):\n    limitn = limit+1\n    not_prime = [False] * limitn\n    primes = []\n\n    for i in range(2, limitn):\n        if not_prime[i]:\n            continue\n        for f in range(i*2, limitn, i):\n            not_prime[f] = True\n\n        primes.append(i)\n\n    return primes\n\nprimes = set(primes_sieve(1000000));\nexclude = ['2','4','5','6','8','0']\ndef circularPrime(n):\n    ss = str(n);\n    for i in range(n):\n        if ss[-1] in exclude:\n            return 0;\n        if int(ss) not in primes:\n            return 0;\n        ss = ss[-1] + ss[:-1];\n    return 1;\n\ngg = 0;\nfor num in primes:\n    gg += circularPrime(num);\nprint (gg);\n","sub_path":"soluciones/semana7/papsdpasldf.py","file_name":"papsdpasldf.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"383852670","text":"# seguindo tutorial em: http://danielhnyk.cz/creating-your-own-estimator-scikit-learn/\n\n# Criar um objeto compatível com sklearn\nfrom sklearn.linear_model import LinearRegression, RidgeCV, LassoCV, LassoLarsCV, ElasticNetCV\nfrom sklearn.base import BaseEstimator, RegressorMixin\nimport numpy as np\nfrom numba import jit, autojit, njit, prange          # import the decorator\n\n''' \n@njit(parallel=True)\ndef calcInteraction(X, net):\n  H = np.zeros((X.shape[0], net.shape[0]))\n  for i in prange(X.shape[0]):\n    for j in range(net.shape[0]):      \n      if np.any(X[i,:]==0):\n        H[i,j] = 0.0\n      else:\n        H[i,j] = np.prod(X[i,:]**net[j,:])\n  return H\n'''\n\nclass ITELM(BaseEstimator, RegressorMixin):\n    \"\"\"Interaction-Transformation Extreme Machine Learning algorithm\"\"\"\n    \n    # o método que inicializar as configurações da rede\n    # , np.sin, np.tan, np.log1p, np.tanh\n    def __init__(self, n_inter = 100, f_set = [np.cos, np.sqrt, np.sin, np.tan, np.log1p, np.tanh], exp_range = (0,3), modelCV = LassoLarsCV(), maxInt = 3, seed = None):\n        self.n_inter   = n_inter\n        self.n_funcs   = len(f_set) + 1\n        self.f_set     = f_set\n        self.exp_range = exp_range\n        self.seed      = seed\n        self.modelCV = modelCV\n        self.maxInt  = maxInt\n    \n    def _createNetwork(self, X):\n        np.random.seed(self.seed)\n        sizeNet = (self.n_inter, X.shape[1])\n        min_e, max_e = self.exp_range\n        \n        self.network_ = np.random.randint(min_e, max_e + 1, size=sizeNet)\n        \n        for i in range(self.n_inter):\n            idx = np.flatnonzero(self.network_[i,:])\n            if len(idx) > self.maxInt:\n                perm = np.random.permutation(idx)\n                self.network_[i, perm[self.maxInt:]] = 0\n\n        #self.network_ = self.network_* np.random.choice([0,1], size=sizeNet, p=[0.7,0.3])\n                                   \n        self.network_ = np.unique(self.network_, axis=0)\n        self.n_inter  = self.network_.shape[0]\n\n        mask  = np.any(X==0, axis=0)\n        self.mask0_ = (X==0).sum(axis=0) > 0.7*X.shape[1]\n\n        \n        self.network_[:,mask]  = np.absolute(self.network_[:,mask])\n        #self.network_[:,self.mask0_] = 0\n        \n        return self\n\n    def _calcInteraction(self, X, net):\n      H = np.zeros((X.shape[0], net.shape[0]))\n      for i in range(X.shape[0]):\n        H[i,:] = np.prod(X[i,:]**self.network_,axis=1)\n      return H\n      \n    def _transformData(self, X):\n        H = np.ndarray((X.shape[0], self.n_funcs*self.n_inter))\n        #X[:,self.mask0_] = 1\n        \n        H[:, :self.n_inter] = self._calcInteraction(X, self.network_)\n        for i, f in enumerate(self.f_set):\n            idx_st  = (i+1)*self.n_inter\n            idx_end = (i+2)*self.n_inter\n            H[:, idx_st:idx_end] = f(H[:,:self.n_inter])        \n\n        maskNan         = np.any(np.isnan(H), axis=0)\n        maskInf         = np.any(np.isinf(H), axis=0)\n        maskBig         = np.any(np.abs(H) > 1e+100, axis=0)\n\n        H[:, maskNan]   = 0\n        H[:, maskInf]   = 0\n        H[:, maskBig]   = 0\n\n        return H\n    \n    # gera o modelo\n    def fit(self, X, y):\n        self._createNetwork(X)\n        \n        H                 = self._transformData(X)\n        self.maskNonZero_ = np.any(H!=0, axis=0)\n        self.maskNonZero_ = np.logical_and(self.maskNonZero_, np.absolute(H.std(axis=0)) > 1e-8)\n        \n        \n        if np.sum(self.maskNonZero_) <= 1:\n          Hmask = np.random.random((X.shape[0], 2)) # fit noise for now\n        else:\n          Hmask = H[:,self.maskNonZero_]\n        \n#        print(X[0,:]**self.network_[0,:])\n#        print(X[0,:]**self.network_[1,:])\n#        print(Hmask.shape, y.shape, self.n_inter, self.exp_range, Hmask[0,:10])#(corr==0).sum())\n        self.modelCV.fit(Hmask, y)\n        \n        return self\n    \n    # gera uma predição\n    def predict(self, X):\n        try:\n            getattr(self, \"maskNonZero_\")\n        except AttributeError:\n            raise RuntimeError(\"You must train regressor before predicting data!\")\n        \n        H = self._transformData(X)\n        if np.sum(self.maskNonZero_) <= 1:\n          Hmask = np.random.random((X.shape[0], 2)) # fit noise for now\n        else:\n          Hmask = H[:,self.maskNonZero_]\n        \n        return self.modelCV.predict(Hmask)\n    \n    # calcula o score\n    def score(self, X, y):\n        try:\n            getattr(self, \"maskNonZero_\")\n        except AttributeError:\n            raise RuntimeError(\"You must train regressor before predicting data!\")\n        \n        H = self._transformData(X)\n        if np.sum(self.maskNonZero_) <= 1:\n          Hmask = np.random.random((X.shape[0], 2)) # fit noise for now\n        else:\n          Hmask = H[:,self.maskNonZero_]\n        \n        if self.modelCV is None:\n            return 0.0\n        else:\n            return self.modelCV.score(Hmask, y)\n","sub_path":"Fabricio/ITELM.py","file_name":"ITELM.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"211059868","text":"import csv\nimport numpy as np\nimport tensorflow.contrib.learn as skflow\nfrom sklearn import datasets, metrics\nfrom sklearn.cross_validation import KFold\nimport sys\nimport tensorflow as tf\n\nmaxInt = sys.maxsize\ndecrement = True\n\nwhile decrement:\n    # decrease the maxInt value by factor 10 \n    # as long as the OverflowError occurs.\n\n    decrement = False\n    try:\n        csv.field_size_limit(maxInt)\n    except OverflowError:\n        maxInt = int(maxInt/10)\n        decrement = True\n\nwith open('training_data_abridged.csv') as infile:\n    data = csv.DictReader(infile)\n\n    features = []\n    classes = []\n    for line in data:\n        p = line['pixels']\n        p = p.split()\n        #print (p)\n\n        p_2 = []\n        for i in p:\n            p_2.append(float(i))\n\n        c = [0,0,0]\n        c[int(line['class'])] = 1\n\n        features.append(p_2)\n        classes.append(c)\n\nfeatures = np.array(features)\nclasses = np.array(classes)\n\n\n\nclasses = np.argmax(classes, axis=1)\n#print(features.shape, \" \", classes.shape)\n\ndef max_pool_2x2(tensor_in):\n    return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n        padding='SAME')\n\ndef conv_model(X, y):\n    # reshape X to 2d tensor with 2nd and 3rd dimensions being image width and height\n    # final dimension being the number of color channels\n    X = tf.reshape(X, [-1, 192, 192, 1])\n    # first conv layer will compute 32 features for each 5x5 patch\n    with tf.variable_scope('conv_layer1'):\n        h_conv1 = skflow.ops.conv2d(X, n_filters=32, filter_shape=[5, 5], \n                                    bias=True, activation=tf.nn.relu)\n        h_pool1 = max_pool_2x2(h_conv1)\n    # second conv layer will compute 64 features for each 5x5 patch\n    with tf.variable_scope('conv_layer2'):\n        h_conv2 = skflow.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5], \n                                    bias=True, activation=tf.nn.relu)\n        h_pool2 = max_pool_2x2(h_conv2)\n        # reshape tensor into a batch of vectors\n        h_pool2_flat = tf.reshape(h_pool2, [-1, 48 * 48 * 64])\n    # densely connected layer with 1024 neurons\n    h_fc1 = skflow.ops.dnn(h_pool2_flat, [1024], activation=tf.nn.relu, keep_prob=0.5)\n    return skflow.models.logistic_regression(h_fc1, y)\n\n# Training and predicting\nclassifier = skflow.TensorFlowEstimator(\n    model_fn=conv_model, n_classes=3, batch_size=33, steps=500,\n    learning_rate=0.001)\n\n#classifier = skflow.TensorFlowDNNClassifier(hidden_units=[100, 20, 10], n_classes=3)\nclassifier.fit(features, classes)\nscore = metrics.accuracy_score(classes, classifier.predict(features))\nprint(\"Accuracy: %f\" % score)","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"85835447","text":"import math\nfrom utils.key_utils import *\n\nhero_results = {}\nmatchup_results = {}\ncombo_results = {}\nfaction_results = {RADIANT_KEY: 0, DIRE_KEY: 0}\n\nhero_winrates = {}\nmatchup_winrates = {}\ncombo_winrates = {}\nfaction_winrates = {}\n\nhero_advantages = {}\nmatchup_advantages = {}\ncombo_advantages = {}\nfaction_advantages = {}\n\nhero_values = {}\nmatchup_values = {}\ncombo_values = {}\nfaction_values = {}\n\ndata = {HEROES_KEY: hero_values, MATCHUPS_KEY: matchup_values, COMBOS_KEY: combo_values, FACTIONS_KEY: faction_values}\n\n\ndef calculate_hero_winrates():\n    for hero in hero_results:\n        wins = hero_results[hero][WINS_KEY]\n        games = hero_results[hero][GAMES_KEY]\n        hero_winrates[hero] = wins / games\n\n\ndef calculate_matchup_winrates():\n    for matchup_key in matchup_results:\n        hero1, hero2 = split_matchup_key(matchup_key)\n        hero1_wins_key = make_wins_key(hero1)\n        hero2_wins_key = make_wins_key(hero2)\n        hero1_winrate_key = make_winrate_key(hero1)\n        hero2_winrate_key = make_winrate_key(hero2)\n        hero1_wins = matchup_results[matchup_key][hero1_wins_key]\n        hero2_wins = matchup_results[matchup_key][hero2_wins_key]\n        games = matchup_results[matchup_key][GAMES_KEY]\n        hero1_winrate = hero1_wins / games\n        hero2_winrate = hero2_wins / games\n        matchup_winrates[matchup_key] = {hero1_winrate_key: hero1_winrate, hero2_winrate_key: hero2_winrate}\n\n\ndef calculate_combo_winrates():\n    for combo_key in combo_results:\n        wins = combo_results[combo_key][WINS_KEY]\n        games = combo_results[combo_key][GAMES_KEY]\n        combo_winrates[combo_key] = wins / games\n\n\ndef calculate_faction_winrates():\n    radiant_wins = faction_results[RADIANT_KEY]\n    dire_wins = faction_results[DIRE_KEY]\n    games = radiant_wins + dire_wins\n    faction_winrates[RADIANT_KEY] = radiant_wins / games\n    faction_winrates[DIRE_KEY] = dire_wins / games\n\n\ndef finalize_winrates():\n    for hero in hero_results:\n        if hero not in hero_winrates:\n            hero_winrates[hero] = 0.5\n    for hero in hero_results:\n        for enemy in hero_results:\n            if hero != enemy:\n                matchup_key = make_matchup_key(hero, enemy)\n                if matchup_key not in matchup_winrates:\n                    hero_winrate = hero_winrates[hero]\n                    enemy_winrate = hero_winrates[enemy]\n                    hero_matchup_winrate = hero_winrate / (hero_winrate + enemy_winrate)\n                    enemy_matchup_winrate = enemy_winrate / (hero_winrate + enemy_winrate)\n                    hero_winrate_key = make_winrate_key(hero)\n                    enemy_winrate_key = make_winrate_key(enemy)\n                    matchup_winrates[matchup_key] = {hero_winrate_key: hero_matchup_winrate, enemy_winrate_key: enemy_matchup_winrate}\n    for hero in hero_results:\n        for ally in hero_results:\n            if hero != ally:\n                combo_key = make_combo_key(hero, ally)\n                if combo_key not in combo_winrates:\n                    combo_winrates[combo_key] = (hero_winrates[hero] + hero_winrates[ally]) / 2\n\n\ndef calculate_winrates():\n    calculate_hero_winrates()\n    calculate_matchup_winrates()\n    calculate_combo_winrates()\n    calculate_faction_winrates()\n    finalize_winrates()\n\n\ndef calculate_hero_advantages():\n    for hero in hero_winrates:\n        hero_advantages[hero] = hero_winrates[hero] - 0.5\n\n\ndef calculate_matchup_advantages():\n    for matchup_key in matchup_winrates:\n        hero1, hero2 = split_matchup_key(matchup_key)\n        hero1_winrate_key = make_winrate_key(hero1)\n        hero2_winrate_key = make_winrate_key(hero2)\n        hero1_advantage_key = make_advantage_key(hero1)\n        hero2_advantage_key = make_advantage_key(hero2)\n        hero1_base_winrate = hero_winrates[hero1]\n        hero2_base_winrate = hero_winrates[hero2]\n        hero1_matchup_winrate = matchup_winrates[matchup_key][hero1_winrate_key]\n        hero2_matchup_winrate = matchup_winrates[matchup_key][hero2_winrate_key]\n        hero1_advantage, hero2_advantage = get_matchup_advantage(hero1_base_winrate, hero2_base_winrate, hero1_matchup_winrate,\n                                                                 hero2_matchup_winrate)\n        matchup_advantages[matchup_key] = {hero1_advantage_key: hero1_advantage, hero2_advantage_key: hero2_advantage}\n\n\ndef calculate_combo_advantages():\n    for combo_key in combo_winrates:\n        hero1, hero2 = split_combo_key(combo_key)\n        hero1_base_winrate = hero_winrates[hero1]\n        hero2_base_winrate = hero_winrates[hero2]\n        combo_winrate = combo_winrates[combo_key]\n        combo_advantages[combo_key] = get_combo_advantage(hero1_base_winrate, hero2_base_winrate, combo_winrate)\n\n\ndef calculate_faction_advantages():\n    faction_advantages[RADIANT_KEY] = faction_winrates[RADIANT_KEY] - 0.5\n    faction_advantages[DIRE_KEY] = faction_winrates[DIRE_KEY] - 0.5\n\n\ndef calculate_advantages():\n    calculate_hero_advantages()\n    calculate_matchup_advantages()\n    calculate_combo_advantages()\n    calculate_faction_advantages()\n\n\ndef assign_hero_values():\n    for hero in hero_advantages:\n        hero_values[hero] = hero_advantages[hero]\n\n\ndef assign_matchup_values():\n    for matchup_key in matchup_advantages:\n        hero1, hero2 = split_matchup_key(matchup_key)\n        hero1_value_key = make_value_key(hero1)\n        hero2_value_key = make_value_key(hero2)\n        hero1_advantage_key = make_advantage_key(hero1)\n        hero2_advantage_key = make_advantage_key(hero2)\n        hero1_advantage = matchup_advantages[matchup_key][hero1_advantage_key]\n        hero2_advantage = matchup_advantages[matchup_key][hero2_advantage_key]\n        games = 0\n        if matchup_key in matchup_results and GAMES_KEY in matchup_results[matchup_key]:\n            games = matchup_results[matchup_key][GAMES_KEY]\n        hero1_value = get_value(hero1_advantage, games)\n        hero2_value = get_value(hero2_advantage, games)\n        matchup_values[matchup_key] = {hero1_value_key: hero1_value, hero2_value_key: hero2_value}\n\n\ndef assign_combo_values():\n    for combo_key in combo_advantages:\n        advantage = combo_advantages[combo_key]\n        games = 0\n        if combo_key in combo_results and GAMES_KEY in combo_results[combo_key]:\n            games = combo_results[combo_key][GAMES_KEY]\n        value = get_value(advantage, games)\n        combo_values[combo_key] = value\n\n\ndef assign_faction_values():\n    faction_values[RADIANT_KEY] = faction_advantages[RADIANT_KEY]\n    faction_values[DIRE_KEY] = faction_advantages[DIRE_KEY]\n\n\ndef assign_values():\n    assign_hero_values()\n    assign_matchup_values()\n    assign_combo_values()\n    assign_faction_values()\n\n\ndef record_hero(hero, win):\n    if hero not in hero_results:\n        hero_results[hero] = {WINS_KEY: 0, GAMES_KEY: 0}\n    if win:\n        hero_results[hero][WINS_KEY] += 1\n    hero_results[hero][GAMES_KEY] += 1\n\n\ndef record_matchup(hero1, hero2, hero1_win):\n    matchup_key = make_matchup_key(hero1, hero2)\n    hero1_wins_key = make_wins_key(hero1)\n    hero2_wins_key = make_wins_key(hero2)\n    if matchup_key not in matchup_results:\n        matchup_results[matchup_key] = {hero1_wins_key: 0, hero2_wins_key: 0, GAMES_KEY: 0}\n    if hero1_win:\n        matchup_results[matchup_key][hero1_wins_key] += 1\n    else:\n        matchup_results[matchup_key][hero2_wins_key] += 1\n    matchup_results[matchup_key][GAMES_KEY] += 1\n\n\ndef record_combo(hero1, hero2, win):\n    combo_key = make_combo_key(hero1, hero2)\n    if combo_key not in combo_results:\n        combo_results[combo_key] = {WINS_KEY: 0, GAMES_KEY: 0}\n    if win:\n        combo_results[combo_key][WINS_KEY] += 1\n    combo_results[combo_key][GAMES_KEY] += 1\n\n\ndef record_faction(radiant_win):\n    if radiant_win:\n        faction_results[RADIANT_KEY] += 1\n    else:\n        faction_results[DIRE_KEY] += 1\n\n\ndef get_matchup_advantage(base_winrate1, base_winrate2, matchup_winrate1, matchup_winrate2):\n    advantage1 = matchup_winrate1 - base_winrate1\n    advantage2 = matchup_winrate2 - base_winrate2\n    return advantage1, advantage2\n\n\ndef get_combo_advantage(base_winrate1, base_winrate2, combo_winrate):\n    return 2 * combo_winrate - base_winrate1 - base_winrate2\n\n\ndef get_value(advantage, num_games):\n    return advantage * get_confidence(num_games)\n\n\ndef get_confidence(num_games):\n    if num_games == 0:\n        return math.log(1)\n    else:\n        return math.log(num_games)\n","sub_path":"src/utils/advantage_utils.py","file_name":"advantage_utils.py","file_ext":"py","file_size_in_byte":8452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"435200773","text":"import platform\nimport datetime\nimport calendar\nimport time\n\nimport dateutil.parser\nimport pkginfo\n\nfrom contextlog import get_logger\n\nfrom . import context\nfrom . import imprules\nfrom . import rules\n\nfrom .backends import JobState\n\n\n# =====\ndef get_node_name():\n    return platform.uname()[1]\n\n\ndef get_version():\n    pkg = pkginfo.get_metadata(\"powny\")\n    return (pkg.version if pkg is not None else \"\")\n\n\ndef get_user_agent():\n    return \"Powny/{}\".format(get_version())\n\n\n# =====\ndef make_isotime(unix=None):  # ISO 8601\n    if unix is None:\n        unix = time.time()\n    return datetime.datetime.utcfromtimestamp(unix).strftime(\"%Y-%m-%d %H:%M:%S.%fZ\")\n\n\ndef from_isotime(line):\n    dt = dateutil.parser.parse(line)\n    return calendar.timegm(dt.utctimetuple()) + dt.microsecond / 10 ** 6  # pylint: disable=maybe-no-member\n\n\n# =====\ndef make_loader(rules_root):\n    return imprules.Loader(\n        prefix=rules_root,\n        group_by=(\n            (\"handlers\", rules.is_event_handler),\n            (\"methods\", lambda _: True),\n        ),\n    )\n\n\ndef get_exposed(backend, loader):\n    head = backend.rules.get_head()\n    exposed = None\n    errors = None\n    exc = None\n    if head is not None:\n        try:\n            (exposed, errors) = loader.get_exposed(head)\n        except Exception as err:\n            exc = \"{}: {}\".format(type(err).__name__, err)\n            get_logger().exception(\"Can't load HEAD '%s'\", head)\n    return (head, exposed, errors, exc)\n\n\ndef make_job(head, name, kwargs, exposed):\n    method = exposed.get(\"methods\", {}).get(name)\n    if method is None:\n        return None\n    else:\n        return _make_job_state(head, name, method, kwargs)\n\n\ndef make_jobs_by_matchers(head, kwargs, exposed):\n    return [\n        _make_job_state(head, name, method, kwargs)\n        for (name, method) in exposed.get(\"handlers\", {}).items()\n        if rules.check_match(method, kwargs)\n    ]\n\n\ndef _make_job_state(head, name, method, kwargs):\n    return JobState(\n        head=head,\n        method_name=name,\n        kwargs=kwargs,\n        state=context.dump_call(method, kwargs),\n        job_id=None,\n        request=None,\n    )\n","sub_path":"powny/core/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"75741678","text":"#!/usr/bin/python\n\n# Given a list of numbers that may has duplicate numbers, return all possible subsets\n#\n#  Notice\n#\n# Each element in a subset must be in non-descending order.\n# The ordering between two subsets is free.\n# The solution set must not contain duplicate subsets.\n#\n# Example\n# If S = [1,2,2], a solution is:\n#\n# [\n#   [2],\n#   [1],\n#   [1,2,2],\n#   [2,2],\n#   [1,2],\n#   []\n# ]\n\n\nclass Solution:\n    \"\"\"\n    @param S: A set of numbers.\n    @return: A list of lists. All valid subsets.\n    \"\"\"\n\n    def subsetsWithDup(self, S):\n        # write your code here\n        if S is None or len(S) == 0:\n            return [S]\n\n        S.sort()\n        results = []\n        self.subsetsHelper(S, 0, [], results)\n\n        return results\n\n    def subsetsHelper(self, S, startIndex, subset, results):\n\n        results.append(subset[:])\n\n        for i in range(startIndex, len(S)):\n            if i != 0:\n                if S[i] == S[i - 1] and i > startIndex:\n                    continue\n            subset.append(S[i])\n            self.subsetsHelper(S, i + 1, subset, results)\n            subset.pop()\n\n\ndef test():\n    a = Solution()\n\n    assert a.subsetsWithDup([]) == [[]]\n    assert a.subsetsWithDup([0]) == [[], [0]]\n    assert a.subsetsWithDup([1, 2, 2]) == [[], [1], [1, 2], [1, 2, 2], [2], [2, 2]]\n\nif __name__ == '__main__':\n    test()\n","sub_path":"Subsets II.py","file_name":"Subsets II.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"417317919","text":"new_lines = []\n\nwith open('templates/index.html', 'r') as in_file:\n    lines = in_file.readlines()\n    for line in lines:\n        css_index = line.find('.css')\n        if css_index > -1:\n            i = 1\n            while line[css_index - i] != '=':\n                i += 1\n            line = line[:(css_index - i + 2)] + '../static/' + line[(css_index - i + 2):]\n        js_index = line.find('.js')\n        while js_index > -1:\n            i = 1\n            while line[js_index - i] != '=':\n                i += 1\n            line = line[:(js_index - i + 2)] + '../static/' + line[(js_index - i + 2):]\n            js_index = line.find('.js', js_index + 11)\n        new_lines.append(line)\n\nwith open('templates/index.html', 'w') as out_file:\n    out_file.write(''.join(new_lines))\n","sub_path":"backend/src_mod.py","file_name":"src_mod.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"57670344","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n    统计工具,用于统计作各种统计.\n\"\"\"\n\n\nimport sys\nfrom os import getcwd\n\nsys.path.insert(0, getcwd())\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom collections import defaultdict\nfrom datetime import datetime\nimport os\n\nfrom gensim import corpora, models\n\nfrom utils.exporter import CSVExporter\n\n\nclass WordAnalyser(object):\n    \"\"\"\n        单词统计器.\n    \"\"\"\n    def __init__(self):\n        pass\n\n    @staticmethod\n    def __save_csv_file(index_row, data_rows, filedir, filename, mode):\n        create_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%s')\n        exporter = CSVExporter()\n        exporter.save_from_list(\n            index_row=index_row,\n            data_rows=data_rows,\n            filename=os.path.join(filedir, '%s_%s.csv' % (create_time, filename)),\n            mode=mode\n        )\n\n    def tf(self, corpus, save_dir=None, mode='w'):\n        \"\"\"\n        统计语料的TF.\n        :param corpus: 分词后的语料库,为list(list(word))\n        :param save_dir: 结果保存目录.\n        :param mode: 文件保存模式.\n        :return: list(tuple)\n        \"\"\"\n        if not isinstance(corpus, list):\n            raise TypeError('corpus must be list.')\n        result = []\n        corpus_dict = corpora.Dictionary(corpus)\n        bow = [corpus_dict.doc2bow(text) for text in corpus]\n\n        for i, text in enumerate(bow):\n            tf_tuple = [\n                (corpus_dict[token_id], word_freq) for token_id, word_freq in text\n            ]\n            sorted_tf = sorted(tf_tuple, key=lambda x: x[1], reverse=True)\n            result.append(sorted_tf)\n            if save_dir is not None:\n                self.__save_csv_file(['word', 'freq'], sorted_tf, save_dir, 'TF_%s' % i, mode)\n        return result\n\n    def tfidf(self, corpus, save_dir=None, mode='w'):\n        \"\"\"\n        统计语料的TF-IDF.\n        :param corpus: 分词后的语料库,为list(list(word))\n        :param save_dir: 结果保存目录.\n        :param mode: 文件保存模式.\n        :return: list(tuple)\n        \"\"\"\n        if not isinstance(corpus, list):\n            raise TypeError('corpus must be list(str).')\n        result = []\n        corpus_dict = corpora.Dictionary(corpus)\n        bow = [corpus_dict.doc2bow(text) for text in corpus]\n        tfidf = models.TfidfModel(bow, normalize=False)\n        for i, text in enumerate(tfidf[bow]):\n            score_tuple = [\n                (corpus_dict[token_id], score) for token_id, score in text\n            ]\n            sorted_score = sorted(score_tuple, key=lambda x: x[1], reverse=True)\n            result.append(sorted_score)\n            if save_dir is not None:\n                self.__save_csv_file(['word', 'freq'], sorted_score, save_dir, 'TF-IDF_%s' % i, mode)\n        return result\n\n    def tf_and_tfidf(self, corpus, save_dir=None, mode='w'):\n        \"\"\"\n        计算tf和idf并且将结果合并到同一个文件中.\n        :param corpus: 语料库\n        :param save_dir: 保存目录\n        :param mode: 操作文件模式.\n        :return: list(list(tuple(word, tf, tfidf)))\n        \"\"\"\n        corpus_dict = corpora.Dictionary(corpus)\n        bow = [corpus_dict.doc2bow(text) for text in corpus]\n        tfidf_model = models.TfidfModel(bow, normalize=False)\n        allresult = []\n        for i, tuples in enumerate(bow):\n            result = []\n            for token_id, tf in tuples:\n                result.append((corpus_dict[token_id], tf, tf * 1.0 * tfidf_model.idfs[token_id]))\n            if save_dir is not None:\n                self.__save_csv_file(['word', 'TF', 'TF-IDF'], result, save_dir, 'TF&TF-IDF_%s' % i, mode)\n            allresult.append(result)\n        return allresult\n\n    def word_freq_percentage(self, corpus, save_dir=None, mode='w'):\n        \"\"\"\n        统计文档词频占所有文档的百分比\n        :param corpus: 语料库\n        :param save_dir: 保存目录\n        :param mode: 操作文件格式\n        :return: list(list(tuple(word, tf, tfidf, percentage)))\n        \"\"\"\n        all_result = []\n        word_infos = self.tf_and_tfidf(corpus)\n\n        sum_info = defaultdict(int)  # 保存语料库中词出现的总数\n        for word_info in word_infos:\n            for word, tf, tfidf in word_info:\n                sum_info[word] += tf\n\n        for i, word_info in enumerate(word_infos):\n            result = []\n            for word, tf, tfidf in word_info:\n                result.append((word, tf, tfidf, tf * 1.0 / sum_info[word]))\n            all_result.append(result)\n            if save_dir is not None:\n                self.__save_csv_file(['Word', 'TF', 'TF-IDF', 'Percentage'], result, save_dir, 'result_%s' % i, mode)\n\n        return all_result\n\n\ndef information_gain(dataset):\n    pass\n\nif __name__ == '__main__':\n    tc = [\n        ['你好', '你好'],\n        ['经典', '电影'],\n        ['你好', '经典'],\n    ]\n    # analyser = WordAnalyser()\n    # analyser.tf_and_tfidf(tc)\n    information_gain(tc)\n","sub_path":"preprocess/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"144149456","text":"\"\"\"If a bird swims like a duck, it quacks like a duck, it flies like a duck then it is a duck\"\"\"\r\nclass PyCharm:\r\n    def execute(self):\r\n        print('Compiling')\r\n        print('Wait')\r\n\r\nclass MyEditor:\r\n    def execute(self):\r\n        print('Checking spellings')\r\n        print('Checking syntax')\r\n        print('Compilation complete')\r\n\r\nclass laptop:\r\n    \r\n    def code(self,ide):\r\n        ide.execute()\r\nide=PyCharm()\r\nide=MyEditor()\r\ntest=laptop()\r\ntest.code(ide)","sub_path":"learning_python/classes_and_objects/duck_typing.py","file_name":"duck_typing.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"66291604","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom utils import openUrl, URL_ES, INDEX_QUOTE,configureLog\nfrom elasticsearch import Elasticsearch\nfrom datetime import date, timedelta, datetime\nfrom BDINParser import parseFileLike\nfrom StringIO import StringIO\nimport logging\nimport zipfile\nimport argparse\n\nURL_BDI='http://bvmf.bmfbovespa.com.br/fechamento-pregao/bdi/'\n\ndef saveQuotes(quote):\n    logging.info('Saving quote of type %s', quote['TIPREG'])\n    es = Elasticsearch([URL_ES])\n    if quote['TIPREG'] != 2: \n        return\n    if 'CODNEG' not in quote:\n        logging.warning('CODNEG not found, ignoring quote %s', quote)\n        return\n    es.index(index=INDEX_QUOTE, id=quote['CODNEG'], doc_type=quote['TIPREG'], body=quote)\n\ndef downloadBdiFile(dt):\n    fileurl = '{0}bdi{1}.zip'.format(URL_BDI, dt.strftime('%m%d')) \n    logging.info('Downloading file %s', fileurl)\n\n    # save file\n    fp = StringIO(openUrl(fileurl, True))    \n    z = zipfile.ZipFile(fp)\n    for name in z.namelist():\n        yield z.open(name)\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"-d\", \"--date\", dest=\"date\", \n        type=lambda s: datetime.strptime(s, '%Y%m%d'), \n        help=\"Set the quote date to get\")\n    configureLog(parser)\n    args = parser.parse_args() \n    \n    dt = date.today() - timedelta(days=1) if args.date is None else args.date\n    for bdiFile in downloadBdiFile(dt):\n        for quote in parseFileLike(bdiFile):\n            quote['FILEDT'] = dt\n            quote['REGDT'] = datetime.now() \n            saveQuotes(quote)\n\n    logging.info('Done, exiting...')\n","sub_path":"getquotes.py","file_name":"getquotes.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"480553978","text":"\nimport sounddevice as sd\nimport soundfile as sf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cmath as mth;\nimport math as math;\n\n\n### Initialisation des signaux\n\npath = \"../datas/\"\n\nmess, FeMess = sf.read(path + \"mess.wav\")\n# sd.play(mess,FeMess)\nmess_dif, FeMess_dif = sf.read( path + \"mess_difficile.wav\")\n# sd.play(mess_dif,FeMess_dif)\nmess_ss , FeMess_ss = sf.read( path + \"mess_ssespace.wav\")\n# sd.play(mess_ss, FeMess_dif)\nsymbA, FeA = sf.read(path + \"symboleA.wav\")\n# print(FeA)\n\n\n#symbA = np.concatenate((symbA,np.zeros(1000*len(symbA))))\n\n# sd.play(symbA, FeA)\nsymbA2, FeA2 = sf.read(path + \"symboleA2.wav\")\nsymbA2 = symbA2 * np.hanning(len(symbA2))\n#for i in range(6):\n#    symbA2 = np.concatenate((symbA2, symbA2))\n#symbA2 = np.concatenate((symbA2,np.zeros(1000*len(symbA2))))\n\n# sd.play(symbA2,FeA2)\nsymbU, FeU = sf.read(path + \"symboleU.wav\")\n#for i in range(6):\n#    symbU = np.concatenate((symbU, symbU))\n#symbU = np.concatenate((symbU,np.zeros(1000*len(symbU))))\n#symbU = symbU * np.hanning(len(symbU))\n# sd.play(symbU, FeU)\nsymbU2, FeU2 = sf.read(path + \"symboleU2.wav\")\nsymbU2 = symbU2 * np.hanning(len(symbU2))\n#for i in range(6):\n#    symbU2 = np.concatenate((symbU2, symbU2))\n#symbU2 = np.concatenate((symbU2,np.zeros(1000*len(symbU2))))\n\n# sd.play(symbU2, FeU2)\n\npi = math.pi\nlistRessource = [mess, mess_dif,mess_ss, symbA,symbA2, symbU, symbU2]\nlistFrequency = [FeMess, FeMess_dif, FeMess_ss, FeA, FeA2, FeU, FeU2]\n\n\ndef createTime(liste, fe, pas):\n    # Create the time of the current list with pas for step\n    liste = np.array(liste)\n    return np.arange(0,  liste.shape[0] * 1.0 /fe,  1.0 /pas)\n\ndef createTime2(liste):\n    # Create a classic time list\n    return np.arange(0,  len(liste),  1)\n\n\ndef displayGraph(x,y, i1, i2) :\n    # Display the graph\n    plt.plot(x,y)\n    plt.xlim(i1, i2)\n    plt.show()\n\ndef displayListGraph(listeX, listeY) :\n    # Display on the same screen all the graph\n    for index,x in enumerate(listeX) :\n        plt.plot(x,listeY[index])\n    plt.show()\n\ndef displaySignal(liste, Fe, pas) :\n    displayGraph(createTime(liste,Fe,pas), liste)\n\ndef displayAllSignal() :\n    couleur = [\"b\",\"r\",\"g\",\"m\",\"y\",\"c\", \"k\"]\n    m = len(listRessource)\n    for index, x in enumerate(listRessource):\n        listTemps = createTime(x, listFrequency[index],8000)\n        plt.subplot(m,1,index+1)\n        plt.plot(listTemps,x,couleur[index])\n    plt.show()\n\ndef maximumList(liste, Fe, pas) :\n    #Return the maximum of the list and the index\n\n    time = createTime(liste, Fe, pas)[-1]\n\n    index,max = 0,(liste.tolist())[0]\n    for i,x in enumerate(liste.tolist()):\n        if x>max and i * 1.0 / time > 500 and i * 1.0 / time < 526:\n            max = x\n            index=i\n    #print(index * 1.0 / time)\n    return index,i\n\ndef createTF(list, Fe, pas) :\n    listeTF =  np.fft.fft(list,10000)\n    listeTF =  np.array([abs(x) for x in listeTF])\n    tempsTF = createTime2(listeTF)\n    time = createTime(list, Fe, pas)[-1]\n    #displayGraph(tempsTF * 1.0 / time,listeTF, 450, 550)\n    return tempsTF,listeTF\n\ndef getFrequencyTF(listTF, Fe, pas) :\n\n    # maxTF, inutile = maximumList(listTF, Fe, pas)\n    # maxTF, inutile = maximumList(listTF)\n    # maxTF = listTF.argmax()\n    # print(\"Max \")\n    # print(maxTF)\n    maxTF, i = maximumList(listTF, Fe, pas)\n    time = createTime(listTF, Fe, pas)[-1]\n    # print(\"Time\")\n    # print(time)\n    # return(maxTF * 1.0 / time)\n    return ( maxTF* 1.0 / time)\n\n\n\ndef getLetter(f) :\n    lettre = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\n    if (f-500 > len(lettre) or f-500 <0):\n        return 0\n    return lettre[ (int) (f-500) ]\n\ndef getLetterFromBasic(liste, Fe, pas) :\n    tps,listeTF = createTF(liste, Fe, pas)\n    f = getFrequencyTF(listeTF, Fe,pas)\n    return getLetter(f)\n\n\ndef decoupage500(liste) :\n    l,l_prov = [],[]\n    n = np.array(liste).shape[0]\n    i = 0\n\n    for index,x in enumerate(liste):\n        l_prov.append(x)\n        if (i == 499) :\n            i=0\n            l.append(l_prov)\n            l_prov = []\n        else :\n            i+=1\n    return l\n\ndef createTFMessage(liste, Fe, pas) :\n    decoupage = decoupage500(liste)\n    result = []\n    message = []\n    for x in decoupage :\n        result.append(getFrequencyTF(x,Fe,pas))\n        message.append(getLetterFromBasic(x,Fe,pas))\n\n    # plt.hist(abscisse,result1, 5 ,'yellow','red' )\n    res = plt.hist( result)\n    plt.show()\n    # displayGraph(abscisse,result)\n    return message\n\n\n\ndef suppRedondance(liste) :\n    l = [liste[0]]\n    i = 0\n    element_seen = liste[0]\n    for index,x in enumerate(liste):\n        if (x != element_seen) :\n            element_seen = x\n            l.append(element_seen)\n    return l\n\n\n\ndef passeBas(liste, Fe, f0):\n\n    w0 = 2 * np.pi * f0\n    y = []\n    dt = 1.0 / Fe\n    a = dt * w0 / (1.0 + dt * w0)\n    y.append(a * liste[0])\n    for i in range(1, len(liste)):\n       y.append(a * liste[i] + (1-a) * y[i-1])\n    return y\n\n\ndef passeHaut(liste, Fe, f0):\n    w0 = 2 * np.pi * f0\n    y = []\n    dt = 1.0 / Fe\n    a = dt * w0 / (1.0 + dt * w0)\n    y.append(liste[0])\n    for i in range(1, len(liste)):\n       y.append(a * (liste[i] - liste[i-1] + y[i-1]))\n    return y\n\n\ndef concat(liste, a,b) :\n    l1 =[]\n    for i in range (len(liste)) :\n        if (i>= a and i<= b) :\n            l1.append(liste[i])\n    return l1\n\ndef most_common(array) :\n    return max(set(array), key = array.count)\n\ndef decode(s,Fe) :\n    i = 0\n    message = []\n    extraitLetters = []\n    while i < (len(s) - 1999) :\n        extrait = s[i:i+2000]\n        extrait = extrait * np.hanning(len(extrait))\n        #for j in range(4):\n        #   extrait = np.concatenate((extrait, extrait))\n        extrait = np.concatenate((extrait, np.zeros(50*len(extrait))))\n        letter = getLetterFromBasic(extrait,Fe,Fe)\n        #print(letter)\n        extraitLetters.append(letter)\n        #if not message or message[-1] != letter :\n        message.append(letter)\n        #if letter == 0 :\n        #    message.append(most_common(extraitLetters))\n        #    extraitLetters = []\n        i = i+2500\n    return message\n\n\n#Symbole A\nprint(decode(symbA,FeA))\n# symbA = symbA * np.hanning(len(symbA))\n# for i in range(6):\n#     symbA = np.concatenate((symbA, symbA))\n#print(getLetterFromBasic(symbA,FeA,FeA))\nprint(decode(symbA2,FeA2))\n# print(getLetterFromBasic(symbA2,FeA2,8000))\n# test, val = createTF(symbA2)\n# liste1 = concat(createTime2(symbA2), 480,530)\n# liste2 = concat(val,480,530)\n\n# newSym = passeBas(symbA2, FeA2,527)\n# newSym1 = passeHaut(newSym, FeA2, 499)\n\n\n\n\n# displayGraph(createTime2(symbA2), createTF(symbA2))\n# getFrequencyTF(symbA2, FeA2, 8000)\n\n# displayGraph(createTime2(symbA2), symbA2)\n# displayGraph(createTime2(newSym), newSym1)\n# tps1, Tf1 = createTF(newSym)\n# tps2, Tf2 = createTF(newSym1)\n\n# print (getFrequencyTF(Tf1, FeA2,8000))\n# print (getFrequencyTF(Tf2, FeA2,8000))\n\n\n# displaySignal(symbA2,FeA2,8000)\n\n#Symbole U\n\nprint(decode(symbU,FeU))\nprint(decode(symbU2,FeU2))\n# displaySignal(symbU2,FeU2,8000)\n\n\n# test = ['A','T','U','B','U','Y','Y','U']\n# print(most_common(test))\n\n#Mess\nprint(\"Message :\")\nprint(decode(mess,FeMess))\nprint(\"Message sans espaces :\")\nprint(decode(mess_ss,FeMess_ss))\nprint(\"Message difficile :\")\nprint(decode(mess_dif,FeMess_dif))\n\nmessTF = np.fft.fft(mess)\n# Makes the module of the ttf\nmessTF = [abs(x) for x in messTF]\nl_temps = createTime2(mess)\n# plt.plot(l_temps, messTF)\n# plt.show()\n\nlistes = decoupage500(mess)\n\n# message = createTFMessage(mess,FeMess,8000)\n# print(message)\n\n\n\n#Mess 2\n\n# message = createTFMessage(mess_ss,FeMess_ss,8000)\n# print(message)\n\n#Mess dur\n\n# message = createTFMessage(mess_dif,FeMess_dif,8000)\n# print(suppRedondance(message))\n","sub_path":"code/codeGlobal.py","file_name":"codeGlobal.py","file_ext":"py","file_size_in_byte":7747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"391626502","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport os\nimport sys\nimport signal\nimport logging\nimport module.common.const as const\nimport module.common.util as util\nfrom gevent import monkey,subprocess\nfrom bottle import run\nfrom module.common.topologydb import create_metadata\nfrom module.api import *\n\n# gevent\nmonkey.patch_all(subprocess=True)\nsys.modules['subprocess'] = subprocess\n\ndef outlog_confvalue(logger):\n    # write the settings to the log file.\n    logger.info('----------[settings]---------')\n    logger.info('rest_host={0}'.format(config.rest_host))\n    logger.info('rest_port={0}'.format(config.rest_port))\n    logger.info('rest_base={0}'.format(config.rest_base))\n    logger.info('post_uri={0}'.format(config.post_uri))\n    logger.info('db_addr={0}'.format(config.db_addr))\n    logger.info('db_port={0}'.format(config.db_port))\n    logger.info('db_user={0}'.format(config.db_user))\n    logger.info('db_pass={0}'.format(config.db_pass))\n    logger.info('topology_db={0}'.format(config.topology_db))\n    logger.info('mon_data_sdn_db={0}'.format(config.mon_data_sdn_db))\n    logger.info('mon_data_se_db={0}'.format(config.mon_data_se_db))\n    logger.info('mon_data_cp_db={0}'.format(config.mon_data_cp_db))\n    logger.info('mon_data_tn_db={0}'.format(config.mon_data_tn_db))\n    logger.info('log_dir={0}'.format(config.log_dir))\n    logger.info('log_file={0}'.format(config.log_file))\n    logger.info('debug_flg={0}'.format(config.debug_flg))\n    logger.info('-----------------------------')\n\n# Main function.\nif __name__ == '__main__':\n    start_msg = '{0} starting up...'.format(const.MODULE_NAME_API)\n\n    # Do not output KeyboardInterrupt.\n    signal.signal(signal.SIGINT, signal.SIG_DFL)\n \n    # check log directory.\n    if not os.path.exists(config.log_dir):\n        os.mkdir(config.log_dir)\n\n    # make logfile name(full path).\n    logfile = config.log_dir + '/' + config.log_file\n\n    # create logger(monitoring_api).\n    if not os.path.exists(config.log_dir):\n        os.mkdir(config.log_dir)\n    logfile = config.log_dir + '/' + config.log_file\n    logger = util.init_logger(const.MODULE_NAME_API,logfile)\n    if not logger:\n        sys.exit(1)\n\n    # create topologyDB meta data.\n    create_metadata(config.topology_db,config.db_addr\n                                    ,config.db_port,config.db_user,config.db_pass,config.debug_flg)\n\n    # run.\n    if config.debug_flg == 1:\n        logger.setLevel(logging.DEBUG)\n        logger.info('(debug mode)' + start_msg)\n    else:\n        logger.setLevel(logging.INFO)\n        logger.info(start_msg)\n    outlog_confvalue(logger)\n    # \"debug\" flag not available at Bottle < 0.11\n    #run(host=config.rest_host, port=config.rest_port)\n    #run(host=config.rest_host, port=config.rest_port, debug=config.debug_flg)\n    run(host=config.rest_host, port=config.rest_port, server='gevent')\n","sub_path":"msjp/monitoring_api.py","file_name":"monitoring_api.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"152869230","text":"import sys\nimport csv\nimport pandas as pd\nimport re\nclass Element:\n    def __init__(self, element, number, symbol, weight):\n        self._element = element\n        self._number = number\n        self._symbol = symbol\n        self._weight = weight\n\n    def __str__(self):\n        return self._element + '(' + self._symbol + ', ' + str(self._number) +',' + str(self._weight) + ')' \n\n\ndef molecular_weight_symb(symb, nmb, periodic_table):\n    return periodic_table[symb]._weight * nmb\n\ndef molecular_weight(eq, periodic_table):\n    eq_ = re.sub('([0-9]+)', ' \\\\1 ', eq)\n    eq_ = [ s for s in re.split('\\W+', eq_)  if s != '']\n    symb, nmb = eq_\n    nmb = int(nmb)\n    return molecular_weight_symb(symb, nmb,periodic_table)\n    \n\n    \n\nif __name__ == \"__main__\":\n    h= Element('hydrog', 1, 'H', 3)\n    print(h)\n    filename = sys.argv[1]\n    df = pd.read_csv(filename, index_col=False)\n    # print(df)\n    print(df.shape)\n    # print(df.iloc[0,0:4])\n    periodic_table = {}\n    for i in range(0,df.shape[0]):\n        el=df.loc[i,'Element']\n        num=df.loc[i,'Number']\n        sym=df.loc[i,'Symbol']\n        wght=df.loc[i,'Weight']\n        element = Element(el, num, sym, wght)\n        periodic_table[sym] = element\n        print(element)\n\n    print(molecular_weight('H20', periodic_table))\n\n","sub_path":"Element.py","file_name":"Element.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"259247770","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=wrong-import-position,unused-import\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport sys\nsys.path.append('/adcm')\nimport adcm.init_django\n\nfrom cm.ansible_plugin import ContextActionModule\nfrom cm.api import (\n    set_cluster_state,\n    set_host_state,\n    set_service_state,\n    set_service_state_by_id,\n    set_provider_state\n)\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'supported_by': 'Arenadata'}\n\nDOCUMENTATION = r'''\n---\nmodule: adcm_state\nshort_description: Change state of object\ndescription:\n  - This is special ADCM only module which is usefull for seting state for various ADCM objects.\n  - There is support of cluster, service, host and providers states\n  - This one is allowed to be used in various execution contexts.\noptions:\n  - option-name: type\n    required: true\n    choises:\n      - cluster\n      - service\n      - provider\n      - host\n    description: type of object which should be changed\n\n  - option-name: state\n    required: true\n    type: string\n    description: value of state which should be set\n\n  - option-name: service_name\n    required: false\n    type: string\n    description: usefull in cluster context only. In that context you are able to set the state value for a service belongs to the cluster.\n\nnotes:\n  - If type is 'service', there is no needs to specify service_name\n'''\n\nEXAMPLES = r'''\n- adcm_state:\n    type: \"cluster\"\n    state: \"statey\"\n  register: out\n- adcm_state:\n    type: \"service\"\n    service_name: \"First\"\n    state: \"bimba!\"\n'''\n\nRETURN = r'''\nstate:\n  returned: success\n  type: str\n  example: \"operational\"\n'''\n\n\nclass ActionModule(ContextActionModule):\n\n    TRANSFERS_FILES = False\n    _VALID_ARGS = frozenset(('type', 'service_name', 'state'))\n    _MANDATORY_ARGS = ('type', 'state')\n\n    def _do_cluster(self, task_vars, context):\n        res = self._wrap_call(\n            set_cluster_state,\n            context['cluster_id'],\n            self._task.args[\"state\"]\n        )\n        res['state'] = self._task.args[\"state\"]\n        return res\n\n    def _do_service_by_name(self, task_vars, context):\n        res = self._wrap_call(\n            set_service_state,\n            context['cluster_id'],\n            self._task.args[\"service_name\"],\n            self._task.args[\"state\"]\n        )\n        res['state'] = self._task.args[\"state\"]\n        return res\n\n    def _do_service(self, task_vars, context):\n        res = self._wrap_call(\n            set_service_state_by_id,\n            context['cluster_id'],\n            context['service_id'],\n            self._task.args[\"state\"]\n        )\n        res['state'] = self._task.args[\"state\"]\n        return res\n\n    def _do_host(self, task_vars, context):\n        res = self._wrap_call(\n            set_host_state,\n            context['host_id'],\n            self._task.args[\"state\"],\n        )\n        res['state'] = self._task.args[\"state\"]\n        return res\n\n    def _do_provider(self, task_vars, context):\n        res = self._wrap_call(\n            set_provider_state,\n            context['provider_id'],\n            self._task.args[\"state\"],\n        )\n        res['state'] = self._task.args[\"state\"]\n        return res\n","sub_path":"ansible/plugins/action/adcm_state.py","file_name":"adcm_state.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"564337031","text":"import os\nimport sys\nimport configparser\nimport argparse\nimport platform\n\nCONFIG = \"setting.ini\"\n\ndef get_parameters():\n    osc = platform.system()\n    current_path = os.getcwd()\n    project = os.path.basename(os.path.abspath(os.path.join(current_path, '..')))\n\n    extra = None\n    tmp = configparser.ConfigParser()\n    # conf = c\n    if osc == \"Windows\":\n        tmp.read(\"{}{}{}\".format(current_path,'\\\\',CONFIG))\n    else:\n        tmp.read(\"{}{}{}\".format(current_path,'/',CONFIG))\n    extra=tmp[osc]\n\n    extra.setdefault('Vivado_cmd',os.path.join(os.path.abspath(extra['VivadoInstallPath']), extra['VivadoVersion'], 'bin', 'vivado'))\n\n    index = ['name','board','path','os','pwd','vivado','tcl','version']\n    value =[ project,extra['Board'],extra['WorkPath'],osc,current_path,extra['Vivado_cmd'],extra['TclPath'],extra['VivadoVersion'] ]\n\n    config=dict(zip(index,value))\n\n    return config\n\ndef run_script(vivado,script,*argv,**args):\n\n    try:\n        mode = args[\"mode\"]\n    except KeyError:\n        mode = \"batch\"\n\n    argvs = ' '.join(argv)\n\n    vivado_cmd=\"{} -mode {} -source {} -tclargs {} \".format(vivado,mode,script,argvs)\n\n\n\n    return vivado_cmd\n    \n\ndef test():\n    conf = get_parameters()\n    run_script(conf['vivado'],conf['tcl'],\"adas\",\"dasdasd\",mode=\"2333\")\n\ndef stage():\n    conf = get_parameters()\n    cmd=run_script(conf['vivado'],conf['tcl'],conf['name'],conf['path'],conf['board'])\n    print(cmd)\n\ndef realtime():\n    conf = get_parameters()\n    cmd=run_script(conf['vivado'],conf['tcl'],conf['name'],conf['path'],conf['board'])\n    print(cmd)\n    os.system(cmd)\n\nif __name__ == \"__main__\":\n    realtime()\n\n\n","sub_path":"remote_auto.py","file_name":"remote_auto.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"241494517","text":"# -*- coding: utf-8 -*-\n\n#\n# Copyright 2015 Jun-ya HASEBA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport datetime\n\n\n# 日時の文字列表現のフォーマット\nDATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'\n\n\ndef _shift(target):\n    \"\"\"\n    UTCの日時をJSTの日時に変換する。\n\n    :param target: UTCの日時の文字列表現\n    :return: JSTに変換した日時の文字列表現\n    \"\"\"\n    # UTCをJSTに変換する\n    utc = datetime.datetime.strptime(target, DATETIME_FORMAT)\n    jst = utc + datetime.timedelta(hours=9)\n    return jst.strftime(DATETIME_FORMAT)\n","sub_path":"timeshift/timeshift.py","file_name":"timeshift.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"375909694","text":"import requests\n\n#This tool will attempt to download a series of geojson files from a layers REST endpoint\n\n# parameters\nname = 'Saratoga'                       # name of county, etc. for file name\nmain_url = r'https://spatialags.vhb.com/arcgis/rest/services/29820_Saratoga/NY_County_Saratoga/MapServer/57'\n                  # REST endpoint for layer, will look like 'http://gis.co.ym.mn.gov/arcgis/rest/services/YellowMedicine/YellowMedicine_DataLayers/MapServer/40'\nmin_value = 100076                # integer, lowest value of objectid\nmax_value =  200999           # integer, highest value of objectid (or rounded up to nearest 1000-1 e.g. round 6758 to 6999 NOT 7000)\nobject_id_name = 'OBJECTID'     # may be something like 'OBJECTID_1'\ndirectory = r'C:\\Users\\NicholasRolstad\\Desktop\\{}'.format(name)                  # output directory\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)     Chrome/37.0.2049.0 Safari/537.36'}\n\n\n# function that builds list of intervals to comply with 1000 feature limit per call\ndef build_list(min_value, max_value):\n    interval_list = []\n    minimum = round(min_value, -3)\n    minimum = minimum - 1000 if minimum > min_value else minimum\n    minimum = 0 if minimum < 0 else minimum\n    maximum = minimum + 999\n\n    while maximum < (max_value + 1000):\n        interval_list.append([int(minimum), int(maximum)])\n        maximum += 1000\n        minimum += 1000\n\n    return interval_list\n\n\nwhere_clauses = build_list(min_value, max_value)\n\ncount = 1\nfor clause in where_clauses:\n    url='{}/query?where={}%20>=%20{}%20AND%20{}%20<={}%20&outFields=*&f=json'.format(main_url, object_id_name, clause[0], object_id_name, clause[1])\n    print (url)\n    r = requests.get(url, headers=headers)\n    content = r.text.encode('utf-8')\n    with open('{}/{}_parcels{}.json'.format(directory, name, count), 'wb') as file:\n        file.write(content)\n    count += 1\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"648053308","text":"#!/usr/bin/python\nimport sys\nimport os\nimport pytest\n\ntry:\n    zbathome = os.environ['ZBAT_HOME']\nexcept:\n    print('Test cannot run.  Please export ZBAT_HOME.')\n    sys.exit()\n\nif zbathome + 'lib' not in sys.path:\n    sys.path.append(zbathome + 'lib')\n\nfrom ui.integrations.zbUIIntegration import Integration\nfrom ui.integrations.zbAssetManageSystems import AssetManagementSystems\nfrom common.zbConfig import NUMBER_RETRIES, DELAY_SECONDS, SCREENSHOT_ON_FAIL\nfrom common.zbCommon import rerunIfFail\n\n# fixture\n@pytest.fixture(scope=\"module\")\ndef integration_browser(browser_factory):\n    browser = browser_factory(Integration)\n    return browser[\"selenium\"]\n\n\n@pytest.fixture(scope=\"module\")\ndef asset_management_browser(browser_factory, integration_browser):\n    browser = browser_factory(AssetManagementSystems,\n                              custom_payload={\"selenium\": integration_browser.selenium},\n                              single_br=False)\n    return browser[\"selenium\"]\n\nclass TestIntegrationAssetManagement:\n\n    @pytest.mark.regression\n    def test_asset_management_regression(self, asset_management_browser):\n        assert rerunIfFail(function=asset_management_browser.check_asset_configuration(),\n                           selenium=asset_management_browser.selenium,\n                           screenshot=SCREENSHOT_ON_FAIL,\n                           testname=zbathome + 'artifacts/test_Integration.png',\n                           number=NUMBER_RETRIES,\n                           delay=DELAY_SECONDS)\n\n    @pytest.mark.regression\n    def test_servicenow_integration_regression(self, asset_management_browser):\n        assert rerunIfFail(function=asset_management_browser.check_servicenow_integration(),\n                           selenium=asset_management_browser.selenium,\n                           screenshot=SCREENSHOT_ON_FAIL,\n                           testname=zbathome + 'artifacts/test_Integration.png',\n                           number=NUMBER_RETRIES,\n                           delay=DELAY_SECONDS)\n","sub_path":"tests/ui/test_integration_asset_management_systems.py","file_name":"test_integration_asset_management_systems.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"43925","text":"from argparse import ArgumentParser\nimport os\n\nimport keras.backend as K\nfrom keras.models import load_model\n\nfrom mnist.lib import load_test_data, prep_x_data, prep_y_data\n\n\ndef main(model_path, data_path, output_dir):\n    # Load the model\n    model = load_model(model_path)\n    # Load the test data\n    x_test, y_test = load_test_data(data_path)\n    x_test = prep_x_data(x_test)\n    y_test = prep_y_data(y_test)\n    # Evaluate the model on the test data\n    results = model.evaluate(x_test, y_test)\n    # Write out the results of evaluation into file(s) in the output dir\n    # For now just going to write out some text...\n    results_output = os.path.join(output_dir, 'results.txt')\n    with open(results_output, 'w') as f:\n        # just cause I know that it's coming out as a scalar now\n        f.write(str(results))\n        f.write('\\n')\n\n\nif __name__ == \"__main__\":\n    parser = ArgumentParser()\n    parser.add_argument('--model-path')\n    parser.add_argument('--data-path')\n    parser.add_argument('--output-dir')\n    args = parser.parse_args()\n    main(args.model_path, args.data_path, args.output_dir)\n","sub_path":"mnist/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"91049882","text":"\nimport datetime\n\n\ndef csv_parser(path):\n    ret = {}\n\n    fp = open(path, \"r\")\n    header = fp.readline().strip().replace(\"<\", \"\").replace(\">\", \"\").split(\",\")\n\n    for line in fp:\n        splits = line.strip().split(\",\")\n        d = {}\n\n        for i, item in enumerate(splits):\n            d[header[i]] = item\n\n        ret[splits[0]] = d\n\n    fp.close()\n    return ret\n\n\ndef d_sort(d_keys, dict):\n    keys = list(d_keys)\n    keys.sort()\n    ret = []\n\n    for key in keys:\n        ret.append(dict[key])\n\n    return ret\n\n\ndef workdays(start, end):\n    day = start\n    while day < end:\n        if day.weekday() is not 5 and day.weekday() is not 6:\n            yield day\n        day += datetime.timedelta(days=1)\n\n\ndef format_as_date(string):\n    year = int(string[:4])\n    month = int(string[4:6])\n    day = int(string[6:8])\n    return datetime.datetime(year=year, month=month, day=day)\n","sub_path":"analysis/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"374260549","text":"import os\nimport random\n\nimg_path = \"/root/samples/\"\n\ntrain_path = \"./samples/\"\ntrain_val = open(\"train_val.txt\",\"w+\")\n\ntest_val = open(\"test_val.txt\",\"w+\")\n\nmap_txt = open(\"label-map.txt\",\"r\")\n\nfiles = os.listdir(img_path)\nprint(files)\nrandom.shuffle(files)\nsplit_nums = int(len(files)*0.8)\ntrain_files = files[:split_nums]\ntest_files = files[split_nums:]\n# print(train_files)\n# print(test_files)\n\ntrain_label = []\ntest_label = []\nmap_label = [text.split(\"\\n\")[0] for text in map_txt.readlines() ]\nprint(map_label)\nsplit_char = '_'\nsplit_index = 0\nfor i in range(len(train_files)):\n    label_text = str.upper(train_files[i].split(split_char)[split_index])\n\n    while label_text.__len__()<10:\n        label_text+=\"-\"\n    label_text = list(label_text)\n    label_tmp =[]\n    for i in range(len(label_text)):\n        label_tmp.append(map_label.index(label_text[i]))\n\n    label_tmp  = \" \".join(str(i) for i in label_tmp)\n    train_label.append(label_tmp)\n\nfor i in range(len(test_files)):\n    label_text = str.upper(test_files[i].split(split_char)[split_index])\n\n\n    while label_text.__len__()<10:\n        label_text+=\"-\"\n    label_text = list(label_text)\n    label_tmp = []\n    for i in range(len(label_text)):\n        label_tmp.append(map_label.index(label_text[i]))\n    label_tmp  = \" \".join(str(i) for i in label_tmp)\n    test_label.append(label_tmp)\n\nprint(train_label)\nprint(test_label)\n\nfor i in range(train_files.__len__()):\n    savestr = img_path+train_files[i]+\" \"+train_label[i]+\"\\n\"\n    print(savestr)\n    train_val.write(savestr)\nfor i in range(test_files.__len__()):\n    savestr = img_path+test_files[i] + \" \" + test_label[i]+\"\\n\"\n    print(savestr)\n    test_val.write(savestr)\n\ntrain_val.close()\ntest_val.close()","sub_path":"test/train/createlable.py","file_name":"createlable.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"77462864","text":"\"\"\"\nsqlite的基础配置文件\ncreate by judy\n2019/02/18\n\"\"\"\nimport os\n\n\nclass SqliteConfig:\n    \"\"\"表示sqlite专用配置。\\n\n    dbdir: 数据库文件存放路径,默认./_clientdb\\n\n    dbname: 数据库文件名,默认data.db\\n\n    maxdbfisize: 最大数据库文件大小,默认100MB\\n\n    errordbdelete: 异常数据库文件是否删除,默认False不删除,而是改名存放。\\n\n    connecttimeoutsec: 数据库链接超时时间,float,单位秒,默认60秒\\n\n    delete_on_error: 当数据库文件出错时是否删除,默认为True\"\"\"\n\n    def __init__(\n            self,\n            dbdir: str = './_clientdb',\n            # dbname: str = 'data.db',\n            maxdbfisize: float = 100 * 1024 * 1024,\n            maxconnperdb: int = 20,\n            errordbdelete: bool = False,\n            connecttimeoutsec: float = 60,\n            delete_on_error: bool = True,\n    ):\n        self._dbdir = os.path.abspath('./_clientdb')\n        if not isinstance(dbdir, str) and not dbdir == \"\":\n            self._dbdir = os.path.abspath(dbdir)\n\n        # self._dbname = 'data.db'\n        # if not isinstance(dbname, str) and not dbname == \"\":\n        #     self._dbname = dbname\n\n        self._maxdbfisize = 100 * 1024 * 1024\n        if type(maxdbfisize) in [int, float] and maxdbfisize >= 1024 * 1024:\n            self._maxdbfisize = maxdbfisize\n\n        self._maxconnperdb = 20\n        if isinstance(maxconnperdb, int) and maxconnperdb > 0:\n            self._maxconnperdb = maxconnperdb\n\n        self._errordbdelete: bool = False\n        if isinstance(errordbdelete, bool):\n            self._errordbdelete = errordbdelete\n\n        self._connecttimeoutsec: float = 60\n        if type(connecttimeoutsec) in [int, float]:\n            if connecttimeoutsec <= 0:\n                self._connecttimeoutsec = None\n            else:\n                self._connecttimeoutsec = connecttimeoutsec\n\n        self._delete_on_error: bool = True\n        if isinstance(delete_on_error, bool):\n            self._delete_on_error = delete_on_error\n","sub_path":"savecode/threeyears/idownclient/clientdbmanager/dbsqlite/sqliteconfig.py","file_name":"sqliteconfig.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"297735935","text":"# This file is part of Checkbox.\n#\n# Copyright 2012-2013 Canonical Ltd.\n# Written by:\n#   Zygmunt Krynicki \n#\n# Checkbox is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3,\n# as published by the Free Software Foundation.\n\n#\n# Checkbox is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Checkbox.  If not, see .\n\n\"\"\"\n:mod:`plainbox.impl.commands.checkbox` -- mix-in for checkbox commands\n======================================================================\n\n.. warning::\n\n    THIS MODULE DOES NOT HAVE STABLE PUBLIC API\n\"\"\"\n\nimport os\nfrom argparse import FileType\nfrom logging import getLogger\nimport itertools\n\nfrom plainbox.i18n import gettext as _\nfrom plainbox.impl.secure.qualifiers import RegExpJobQualifier\nfrom plainbox.impl.secure.qualifiers import WhiteList\nfrom plainbox.impl.secure.qualifiers import select_jobs\nfrom plainbox.impl.secure.rfc822 import FileTextSource\n\nlogger = getLogger(\"plainbox.commands.checkbox\")\n\n\nclass CheckBoxInvocationMixIn:\n\n    def __init__(self, provider_list, config):\n        self.provider_list = provider_list\n        self.config = config\n\n    def get_job_list(self, ns):\n        \"\"\"\n        Load and return a list of JobDefinition instances\n        \"\"\"\n        return list(\n            itertools.chain(*[\n                p.load_all_jobs()[0] for p in self.provider_list]))\n\n    def get_whitelist_from_file(self, filename, stream=None):\n        \"\"\"\n        Load a whitelist from a file, with special behavior.\n\n        :param filename:\n            name of the file to load\n        :param stream:\n            (optional) pre-opened stream pointing at the whitelist\n        :returns:\n            The loaded whitelist or None if loading fails for any reason\n\n        This function implements special loading behavior for whitelists that\n        makes them inherit the implicit namespace of the provider they may be a\n        part of. Before loading the whitelist directly from the file, all known\n        providers are interrogated to see if any of them has a whitelist that\n        was loaded from the same file (as indicated by os.path.realpath())\n\n        The stream argument can be provided if the caller already has an open\n        file object, which is typically the case when working with argparse.\n        \"\"\"\n        # Look up a whitelist with the same name in any of the providers\n        wanted_realpath = os.path.realpath(filename)\n        for provider in self.provider_list:\n            for whitelist in provider.get_builtin_whitelists():\n                if (whitelist.origin is not None\n                        and whitelist.origin.source is not None\n                        and isinstance(whitelist.origin.source,\n                                       FileTextSource)\n                        and os.path.realpath(\n                            whitelist.origin.source.filename) ==\n                        wanted_realpath):\n                    logger.debug(\n                        _(\"Using whitelist %r obtained from provider %r\"),\n                        whitelist.name, provider)\n                    return whitelist\n        # Or load it directly\n        try:\n            if stream is not None:\n                return WhiteList.from_string(stream.read(), filename=filename)\n            else:\n                return WhiteList.from_file(filename)\n        except Exception as exc:\n            logger.warning(\n                _(\"Unable to load whitelist %r: %s\"), filename, exc)\n\n    def _get_matching_job_list(self, ns, job_list):\n        logger.debug(\"_get_matching_job_list(%r, %r)\", ns, job_list)\n        qualifier_list = []\n        # Add whitelists\n        for whitelist_file in ns.whitelist:\n            qualifier = self.get_whitelist_from_file(\n                whitelist_file.name, whitelist_file)\n            if qualifier is not None:\n                qualifier_list.append(qualifier)\n        # Add all the --include jobs\n        for pattern in ns.include_pattern_list:\n            try:\n                qualifier = RegExpJobQualifier(\n                    '^{}$'.format(pattern), inclusive=True)\n            except Exception as exc:\n                logger.warning(\n                    _(\"Incorrect pattern %r: %s\"), pattern, exc)\n            else:\n                qualifier_list.append(qualifier)\n        # Add all the --exclude jobs\n        for pattern in ns.exclude_pattern_list:\n            try:\n                qualifier = RegExpJobQualifier(\n                    '^{}$'.format(pattern), inclusive=False)\n            except Exception as exc:\n                logger.warning(\n                    _(\"Incorrect pattern %r: %s\"), pattern, exc)\n            else:\n                qualifier_list.append(qualifier)\n        logger.debug(\"select_jobs(%r, %r)\", job_list, qualifier_list)\n        return select_jobs(job_list, qualifier_list)\n\n\nclass CheckBoxCommandMixIn:\n    \"\"\"\n    Mix-in class for plainbox commands that want to discover and load checkbox\n    jobs\n    \"\"\"\n\n    def enhance_parser(self, parser):\n        \"\"\"\n        Add common options for job selection to an existing parser\n        \"\"\"\n        group = parser.add_argument_group(title=_(\"job definition options\"))\n        group.add_argument(\n            '-i', '--include-pattern', action=\"append\",\n            metavar=_('PATTERN'), default=[], dest='include_pattern_list',\n            # TRANSLATORS: this is in imperative form\n            help=_(\"include jobs matching the given regular expression\"))\n        group.add_argument(\n            '-x', '--exclude-pattern', action=\"append\",\n            metavar=_(\"PATTERN\"), default=[], dest='exclude_pattern_list',\n            # TRANSLATORS: this is in imperative form\n            help=_(\"exclude jobs matching the given regular expression\"))\n        # TODO: Find a way to handle the encoding of the file\n        group.add_argument(\n            '-w', '--whitelist',\n            action=\"append\",\n            metavar=_(\"WHITELIST\"),\n            default=[],\n            type=FileType(\"rt\"),\n            # TRANSLATORS: this is in imperative form\n            help=_(\"load whitelist containing run patterns\"))\n","sub_path":"venv/lib/python3.6/site-packages/plainbox/impl/commands/checkbox.py","file_name":"checkbox.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"187439946","text":"# process msk impact bed file into a standardized form which can be ingested by my R scripts\n\nimport pandas as pd\n\ninfile = \"/home/franzese/projects/hotspot_signature_panel/data/IMPACT_v2_expanded.bed\"\noutfile = \"data/msk_impact_regions.tsv\"\n\ndf = pd.read_csv(infile, sep=\"\\t\")\n\n# for some reason the gencode Chromosome column had \"chr\" prepended to each entry and I did that first\n# so my R scripts which ingest that df expects it to be there, so I have to put it here as well\ndf['Chromosome'] = \"chr\" + df['Chromosome'].astype(str)\n\nformatted_regions = df[[\"Chromosome\", \"Start\", \"End\"]]\n\nformatted_regions.to_csv(outfile, sep=\"\\t\", index=False, index_label=False)\n","sub_path":"process_msk_impact.py","file_name":"process_msk_impact.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"528432111","text":"from math import sqrt, exp\r\nfrom tree import Tree\r\n\r\ndef Rho(tree, f = False):\r\n\r\n    if not f and tree.root.extra['Rho'] != '--':\r\n        return tree.root.extra['Rho']\r\n    else:\r\n        mutCount = 0\r\n        for leaf in tree.leaves:\r\n            mutCount += mutationCount(leaf, tree)\r\n        rho = float(mutCount)/ len(tree.leaves)\r\n        tree.root.extra['Rho'] = rho\r\n        return rho\r\n\r\n\r\ndef StErr(tree, f = False):\r\n\r\n    if not f and tree.root.extra['SE'] != '--':\r\n        return tree.root.extra['SE']\r\n    tSum = 0.0\r\n    for node in tree.leaves: tSum += len(tree.tree[node].mutations)\r\n    for node in tree.nodes[1:]:  tSum += len(tree.tree[node].mutations) * \\\r\n                      (len(Tree(tree.subtree(node)).leaves)) **2\r\n    se = sqrt(tSum/(len(tree.leaves)**2))\r\n    tree.root.extra['SE'] = se\r\n    return se\r\n\r\n\r\ndef Age(tree):\r\n\r\n    rho = Rho(tree)\r\n    age = (exp(-exp(-0.0263 *(rho + 40.2789))) *rho *3624)\r\n    return age\r\n\r\n\r\ndef ConfidenceInterval(tree):\r\n\r\n    rho = Rho(tree)\r\n    se = StErr(tree)\r\n    lower = max(exp(-exp(-0.0263 * ((rho - (1.96 * se)) + 40.2789)))* \\\r\n            (rho - (1.96 * se)) * 3624.0, 0)\r\n    upper = exp(-exp(-0.0263 * ((rho + (1.96 * se)) + 40.2789)))* \\\r\n            (rho + (1.96 * se)) * 3624.0;\r\n    return (lower, upper)\r\n\r\n\r\ndef mutationCount(node, tree, mutCount = 0):\r\n\r\n    mutCount += len(tree.tree[node].mutations)\r\n    parent = tree.tree[node].parent\r\n    if parent != tree.tree.keys()[0]:\r\n        mutCount = mutationCount(parent, tree, mutCount)\r\n    return mutCount\r\n\r\n\r\ndef fN(tree, node, N = 1):\r\n\r\n    t = tree.tree[node].type\r\n    if N == 1:\r\n        if t[0] >= N and t[2] > 0:\r\n            return fStats(Tree(tree.subtree(node)), N)\r\n        else: return ['NE','NE','NE']\r\n    elif N == 2:\r\n        if t[0] >= N:\r\n            return fStats(Tree(tree.subtree(node)), N)\r\n        else: return ['NE','NE','NE']\r\n    \r\ndef fStats(tree, N):\r\n\r\n    f = True\r\n    nodes = tree.tree.values()\r\n    for i in range(len(nodes)-1, 0, -1):\r\n        if nodes[i].type[0] > (N-1) or nodes[i].type[1] > (N-1) \\\r\n           or nodes[i].isSource() in [\"Source\", \"Undefined\"]:\r\n            tree.removeNode(nodes[i].name)            \r\n    if len(tree.leaves) == 0: return [0,'--','--'] #N/As\r\n    elif len(tree.leaves) == 1: return [1,0,0]\r\n    else: return len(tree.leaves), Rho(tree, f), StErr(tree, f)\r\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"264260599","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 19 12:46:38 2017\r\n\r\n@author: benmo\r\n\"\"\"\r\n\r\nimport time, datetime, sys, os, socket, dask, rpy2, numpy as np, pandas as pd, xarray as xr\r\nimport netCDF4, h5py\r\nfrom dask import dataframe as ddf\r\nfrom arctic import Arctic\r\nfrom pyspark.sql import SparkSession\r\nfrom netCDF4 import Dataset as nc4ds\r\n#from sympy import *\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom functools import reduce\r\nimport psycopg2\r\nimport sqlalchemy\r\n\r\nconn = psycopg2.connect(\"dbname='timeseries' user='postgres' host='localhost' password='justbusted'\")\r\nuri=\"postgresql://postgres:justbusted@localhost:5432/timeseries\"\r\nengine = sqlalchemy.create_engine(uri)\r\n\r\n\r\n\r\n\r\n'''cache = Chest()\r\ncache = Chest(path='D:/Data/FinData/temp/', available_memory=3e9)'''\r\ncName = socket.gethostname()\r\nscriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))\r\n\r\nstore = Arctic('localhost')\r\n#store.initialize_library('StockData')\r\n#library = store['Test']\r\n\r\ninit = 0\r\n\r\n\r\n#stocks_xr = xr.open_dataset('Z:/netCDF/dailyNCDF.nc', chunks={'Ticker': 64})\r\n\r\nscriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))   # get script directory\r\n\r\nif sys.platform == 'linux':\r\n    genDir = \"/home/benmo/OneDrive/GitHub/DataPlayground\" #get general functions path\r\n    os.chdir(genDir)\r\n    drive = \"/home/benmo/Data\"\r\nelse:\r\n    try: \r\n        genDir = \"D:/OneDrive/GitHub/DataPlayground\" #get general functions path\r\n        os.chdir(genDir)\r\n        drive = \"D:\"\r\n    except: \r\n        genDir = \"D:/benmo/OneDrive/GitHub/DataPlayground\"\r\n        os.chdir(genDir)\r\n        drive = \"D:\"\r\n\r\n\r\nfrom General import * # import custom 'general' functions\r\n\r\nos.chdir(scriptDir)\r\n\r\ntoday = datetime.datetime.fromtimestamp(time.time())\r\n\r\ndef initIntra():\r\n    store = Arctic('localhost')\r\n    store.initialize_library('IntraDay')\r\n    library = store['IntraDay']\r\n    dirList = os.listdir('{drive}/IntraDay/'.format(drive=drive))\r\n    \r\n    def dateord(x):\r\n        dateo = time.strptime(x,'%d_%m_%Y.csv')\r\n        dateo = datetime.date(dateo.tm_year,dateo.tm_mon,dateo.tm_mday)\r\n        return dateo.toordinal()\r\n    \r\n    \r\n    for stock in dirList:\r\n        filesi = os.listdir('{drive}/IntraDay/{s}/'.format(s=stock,drive=drive))\r\n        days5i = pd.read_csv(\"{drive}/IntraDay/{s}/{d}\".format(drive=drive,\r\n                             s=stock,d=reduce(lambda x, y: y if dateord(y) < dateord(x) else x, filesi)))\r\n        days5i.drop('a', axis=1, inplace=True)\r\n        days5i.rename(columns={'Unnamed: 0':'Date'}, inplace=True)\r\n        days5i['Date'] = pd.to_datetime(days5i['Date'])\r\n        days5i.set_index('Date',inplace=True)\r\n        days5i['source'] = reduce(lambda x, y: y if dateord(y) < dateord(x) else x, filesi)\r\n        library.write(stock, days5i, metadata={'source': 'GoogleFinance'})\r\n        \r\n        list1 = []\r\n        temp = pd.Series(filesi)\r\n        for x in filesi:        \r\n            mini = reduce(lambda x, y: y if dateord(y) < dateord(x) else x, temp.values)    \r\n            list1.append(mini)\r\n            temp = temp.drop(temp[temp == mini].index)\r\n        \r\n        filesi = list1\r\n        \r\n        for i in range(len(filesi)):\r\n            \r\n            datei = time.strptime(filesi[i],'%d_%m_%Y.csv')\r\n            datei = datetime.date(datei.tm_year,datei.tm_mon,datei.tm_mday)\r\n            datei_num = datei.toordinal()\r\n            \r\n            date_start = dateord(library.read(stock).data['source'][-1])\r\n            \r\n            if datei_num > date_start:\r\n                \r\n                data_stock = library.read(stock).data\r\n                data_stock = data_stock.append(days5i)\r\n            \r\n                days5i = pd.read_csv(\"{drive}/IntraDay/{s}/{d}\".format(drive=drive,s=stock,d=filesi[i]))\r\n                days5i.drop('a', axis=1, inplace=True)\r\n                days5i.rename(columns={'Unnamed: 0':'Date'}, inplace=True)\r\n                days5i['Date'] = pd.to_datetime(days5i['Date'])\r\n                days5i.set_index('Date',inplace=True)\r\n                days5i['source'] = filesi[i]\r\n                \r\n                library.write(stock, data_stock, metadata={'source': 'GoogleFinance'})\r\n                \r\n                \r\n\r\ndef intraUpdate():\r\n    store = Arctic('localhost')\r\n    library = store['IntraDay']\r\n    dirList = os.listdir('{drive}/IntraDay/'.format(drive=drive))\r\n    \r\n  \r\n    for stock in dirList:\r\n        \r\n        def dateord(x):\r\n            dateo = time.strptime(x,'%d_%m_%Y.csv')\r\n            dateo = datetime.date(dateo.tm_year,dateo.tm_mon,dateo.tm_mday)\r\n            return dateo.toordinal()\r\n        \r\n        \r\n        try:\r\n            filesi = os.listdir('{drive}/IntraDay/{s}/'.format(drive=drive,s=stock))\r\n            date_start = library.read(stock).data['source'][-1]\r\n            date_start = time.strptime(date_start,'%d_%m_%Y.csv')\r\n            date_start = datetime.date(date_start.tm_year,date_start.tm_mon,date_start.tm_mday)\r\n            date_start = date_start.toordinal()\r\n            item = library.read(stock)\r\n            data_stock = item.data\r\n            \r\n            list1 = []\r\n            temp = pd.Series(filesi)\r\n            for x in filesi:        \r\n                mini = reduce(lambda x, y: y if dateord(y) < dateord(x) else x, temp.values)    \r\n                list1.append(mini)\r\n                temp = temp.drop(temp[temp == mini].index)\r\n            \r\n            filesi = list1\r\n            \r\n            \r\n            \r\n            for i in range(len(filesi)):\r\n                datei = time.strptime(filesi[i],'%d_%m_%Y.csv')\r\n                datei = datetime.date(datei.tm_year,datei.tm_mon,datei.tm_mday)\r\n                datei_num = datei.toordinal()\r\n                \r\n                if datei_num > date_start:\r\n                    days5i = pd.read_csv(\"{drive}/IntraDay/{s}/{d}\".format(drive=drive,s=stock,d=filesi[i]))\r\n                    days5i.drop('a', axis=1, inplace=True)\r\n                    days5i.rename(columns={'Unnamed: 0':'Date'}, inplace=True)\r\n                    days5i['Date'] = pd.to_datetime(days5i['Date'])\r\n                    days5i.set_index('Date',inplace=True)\r\n                    days5i['source'] = filesi[i]\r\n                    data_stock = data_stock.append(days5i)\r\n        except:\r\n            pass\r\n        library.write(stock, data_stock, metadata={'source': 'GoogleFinance'})\r\n    \r\n\r\nif init == 1:\r\n    for i in stocks_xr.Ticker: \r\n        tickeri = str(i.values)\r\n        stocki=stocks_xr.sel(Ticker = tickeri).to_dataframe()\r\n        stocki.set_index(stocki.index,inplace=True)\r\n        library.write(tickeri, stocki, metadata={'source': 'xarray'})\r\n    \r\n\r\n#item = library.read('A')\r\n#a = item.data\r\n#metadata = item.metadata\r\n#a.index[0:15]\r\n        \r\n        \r\n        \r\n#formatter = dates_noWkd(x)\r\ndef test_it():\r\n    library = store['StockData']\r\n    #library = store['IntraDay']         \r\n    item = library.read('A')\r\n    bob  = item.data\r\n    #returns = bob[['High', 'Low']]\r\n    returns = bob[['Adj_High', 'Adj_Low']]\r\n    returns = returns.drop_duplicates().sort_index()\r\n    formatter = dates_noWkd(returns.index)\r\n\r\n    sns.set_style(\"darkgrid\")\r\n    fig, ax = plt.subplots()\r\n    #returns.plot.line(ax.xaxis.set_major_formatter(formatter))\r\n\r\n    \r\n    itdA[['Low','High']].plot.line(use_index=False)\r\n    \r\n\r\ndef tsDB():\r\n    for sym in library.list_symbols():\r\n        item = library.read(sym)\r\n        bob  = item.data\r\n        returns = bob.drop_duplicates().sort_index().reset_index().drop(\r\n                columns='source')\r\n        returns['ticker'] = sym\r\n        returns = returns.rename(columns=lambda x: x.lower())\r\n        returns.to_sql('intraday', engine.connect(), index=False,if_exists='append')\r\n        ","sub_path":"Testing/TestFinDB.py","file_name":"TestFinDB.py","file_ext":"py","file_size_in_byte":7713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"517676690","text":"import h5py\nimport numpy as np\nimport os.path\nimport random\nimport matplotlib.pyplot as plt\nimport math\nimport tensorflow as tf\n\n#TODO(Joonsu): Integrate the function with Tensorflow Dataset \ndef create_OAI_dataset(data_folder, get_train=True, start=1, end=61, get_slices=True, save=False):\n    \n    img_list = []\n    seg_list = []\n    \n    for i in range(start,end):\n        for j in range(2):\n            if i <= 9:\n                if get_train:\n                    fname_img = 'train_00{}_V0{}.im'.format(i, j)\n                    fname_seg = 'train_00{}_V0{}.seg'.format(i, j)\n                else:\n                    fname_img = 'valid_00{}_V0{}.im'.format(i, j)\n                    fname_seg = 'valid_00{}_V0{}.seg'.format(i, j)\n            else:\n                if get_train:\n                    fname_img = 'train_0{}_V0{}.im'.format(i, j)\n                    fname_seg = 'train_0{}_V0{}.seg'.format(i, j)\n                else:\n                    fname_img = 'valid_0{}_V0{}.im'.format(i, j)\n                    fname_seg = 'valid_0{}_V0{}.seg'.format(i, j)\n            \n            img_filepath = os.path.join(data_folder, fname_img)\n            seg_filepath = os.path.join(data_folder, fname_seg)\n\n            with h5py.File(img_filepath,'r') as hf:\n                img = np.array(hf['data'])\n            with h5py.File(seg_filepath,'r') as hf:\n                seg = np.array(hf['data'])\n            \n            if get_slices:\n                img = np.rollaxis(img, 2, 0)\n                seg = np.rollaxis(seg, 2, 0) \n            \n            img = np.expand_dims(img, axis=3)    \n            img_list.append(img)\n            seg_list.append(seg)\n\n        print('{} out of {} datasets have been processed'.format(i, end-start))\n\n    x = np.asarray(img_list)\n    y = np.asarray(seg_list)\n\n    if get_slices:\n        x = np.reshape(x, (x.shape[0]*x.shape[1], x.shape[2], x.shape[3], x.shape[4]))\n        y = np.reshape(y, (y.shape[0]*y.shape[1], y.shape[2], y.shape[3], y.shape[4]))\n\n    if save:\n        if get_train:\n            fname_img_npy = os.path.join(data_folder, 'x_train.npy')\n            fname_seg_npy = os.path.join(data_folder, 'y_train.npy')\n        else:\n            fname_img_npy = os.path.join(data_folder, 'x_valid.npy')\n            fname_seg_npy = os.path.join(data_folder, 'y_valid.npy')\n\n        np.save(fname_img_npy, x)\n        np.save(fname_seg_npy, y)\n\n    return x, y\n\ndef train_generator(data_path, batch_size = 10, multi_class = False):\n    \n    folders = os.listdir(data_path)\n\n    sample_idx = folders.index(\"samples\")\n    samples_path = data_path + str(folders[sample_idx])\n\n    labels_idx = folders.index(\"labels\")\n    labels_path = data_path + str(folders[labels_idx]) \n\n    samples_in = os.listdir(samples_path)\n    labels_in = os.listdir(labels_path)    \n    \n    # Loop forever so the generator never terminates\n    while True: \n\n        samples = []\n        labels = []\n\n        count = 0\n        \n        while count < batch_size:\n\n            rand_idx = random.randint(0, len(samples_in) - 1)\n\n            sample = np.load(samples_path + \"/\" + samples_in[rand_idx])\n            sample = sample[48:336,48:336]\n            samples.append(sample)\n            \n            label = np.load(labels_path + \"/\" + labels_in[rand_idx])\n            label = label[48:336,48:336,:]\n\n            if multi_class == True:\n                background = np.zeros((label.shape[0], label.shape[1], 1))\n                for i in range(label.shape[0]):\n                    for j in range(label.shape[1]):\n                        sum = np.sum(label[i,j,:])\n                        if sum == 0:\n                            background[i][j] = 1\n                        else:\n                            background[i][j] = 0\n                            \n                label = np.concatenate((label, background), axis = 2)\n                label = np.reshape(label, (label.shape[0]*label.shape[1], 7))\n                labels.append(label)\n\n            else:\n                label = np.sum(label, axis = 2)\n                labels.append(label)\n            \n            X_ = np.array(samples)\n            Y_ = np.array(labels)\n\n            X_ = np.expand_dims(X_, axis=3)\n            if not multi_class:\n                Y_ = np.expand_dims(Y_, axis=3)\n                        \n            count += 1\n\n        yield (X_, Y_)\n\ndef get_slices(path_in, path_out, extension):\n    \n    image_3d = os.listdir(path_in)\n    idx = 1\n    file_list = []\n\n    for image in image_3d:\n        if image.endswith(extension):\n            file_list.append(image)\n    \n    for image in file_list:\n        # create training samples\n        img_path = os.path.join(path_in, str(image))\n        with h5py.File(img_path, 'r') as hf:\n            img = np.array(hf['data'])\n\n        img_shape = img.shape\n\n        for channel in range(img_shape[2]):\n            if len(img_shape) == 3:\n                img_slice = img[:,:,channel]\n            elif len(img_shape) == 4:\n                img_slice = img[:,:,channel,:]\n            \n            name_out = \"img_\" + str(idx)\n            save_samples = os.path.join(path_out, name_out)\n            np.save(save_samples, img_slice)\n            idx += 1\n\nclass DataGenerator(tf.keras.utils.Sequence):\n    'Generates data for Keras'\n    def __init__(self, \n                x_set, \n                y_set,\n                batch_size=4, \n                shuffle=True, \n                multi_class=True):\n\n        self.x_set = x_set\n        self.y_set = y_set\n        self.x = os.listdir(x_set) \n        self.y = os.listdir(y_set)\n        self.batch_size = batch_size\n        self.shuffle = shuffle\n        self.multi_class = multi_class\n        self.idx_list = np.arange(start=1, stop=len(self.x)+1)\n        \n    def __len__(self):\n        return math.ceil(len(self.x) / self.batch_size)\n\n    def __getitem__(self, idx):\n        \n        indexes = self.idx_list[idx*self.batch_size:(idx+1)*self.batch_size]\n        # generate data\n        X, y = self.data_generator(indexes)\n\n        return X, y\n\n    def on_epoch_end(self):\n        if self.shuffle == True:\n            random.shuffle(self.idx_list)\n\n    def data_generator(self, indexes):\n        \n        # Initialization\n        X = np.empty((self.batch_size, 288, 288, 1))\n        \n        if self.multi_class:\n            Y = np.empty((self.batch_size, 288*288, 7))\n        else:\n            Y = np.empty((self.batch_size, 288,288, 1))\n\n        for i, idx in enumerate(indexes):\n\n            img = np.load(self.x_set + 'img_' + str(idx) + '.npy')\n            img = img[48:336,48:336]\n            img = np.expand_dims(img, axis=2)\n            X[i,:] = img\n\n            seg = np.load(self.y_set + 'img_' + str(idx) + '.npy')\n            seg = seg[48:336,48:336,:]\n\n            if self.multi_class:\n                seg_1d = self.get_multiclass(seg)\n                Y[i,:] = seg_1d\n            else:\n                seg = np.sum(seg, axis=2)\n                seg = np.expand_dims(seg, axis=2)\n                Y[i,:] = seg\n       \n        return X, Y\n\n    def get_multiclass(self, label):\n        \n        background = np.zeros((label.shape[0], label.shape[1], 1))\n        for i in range(label.shape[0]):\n            for j in range(label.shape[1]):\n                sum = np.sum(label[i,j,:])\n                if sum == 0:\n                    background[i][j] = 1\n                else:\n                    background[i][j] = 0\n                    \n        label = np.concatenate((label, background), axis = 2)\n        label = np.reshape(label, (label.shape[0]*label.shape[1], label.shape[2]))\n        \n        return label\n        ","sub_path":"Segmentation/utils/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":7611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"235904775","text":"# -*- coding: utf-8 -*-\n__author__ = 'PetrVasiliev'\nimport urllib2\nfrom app import app, render_template, redirect\nfrom app.tool.get_from_db import get_user\nfrom flask import jsonify, request, send_from_directory, session\nfrom database import db_session\nfrom models import Event, Fund, Patient, City\nfrom fileload import upload_file\nfrom tools import get_url\napp.secret_key = app.config['SECRET_KEY']\n\n@app.route('/')\ndef index():\n    if 'username' in session:\n        return render_template(\"index.html\")\n    else:\n        return render_template(\"signin.html\")\n\n@app.route('/api')\ndef docs():\n    if 'username' in session:\n        return render_template(\"docs.html\")\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/events', methods=['GET'])\ndef events():\n    city_name = request.args.get('city')\n    city = City.query.filter(City.city == city_name).first()\n    u = Event.query.filter(Event.city_id == city.id).all()\n    if u:\n        data = []\n        for event in u:\n            object_data = {\n                'info': 'success',\n                'title': event.title,\n                'description': event.description,\n                'date': event.date,\n                'cover': get_url('events', event.cover),\n                'longi': event.longi,\n                'lati': event.lati,\n                'city': event.city_id,\n                'id': event.id,\n                'category': event.category_id,\n                'address': event.address\n            }\n            data.append(object_data)\n        response = {\n            'info': 'success',\n            'data': data\n        }\n    else:\n        response = {\n            'info': 'error'\n        }\n    return jsonify(response)\n\n@app.route('/event', methods=['POST'])\ndef postevent():\n    if 'username' in session:\n        title = request.form['title'].encode('utf-8')\n        description = request.form['description'].encode('utf-8')\n        date = str(request.form['date'])\n        cover = request.files['cover']\n        longi = str(request.form['longi'])\n        lati = str(request.form['lati'])\n        city = request.form['city'].encode('utf-8')\n        category = request.form['category'].encode('utf-8')\n        address = request.form['address'].encode('utf-8')\n        f = upload_file(cover, 'events')\n        if f == -1:\n            return jsonify({\n                'info': 'error'\n            })\n        u = Event(title, description, date, f, longi, lati, city, category, address)\n        db_session.add(u)\n        db_session.commit()\n        return jsonify({\n            'info': 'success'\n        })\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/event', methods=['GET'])\ndef getevent():\n    id = request.args.get('id')\n    if id.isdigit():\n        event = Event.query.filter(Event.id == id).first()\n        if event:\n            response = {\n                'info': 'success',\n                'title': event.title,\n                'description': event.description,\n                'date': event.date,\n                'cover': event.cover,\n                'longi': event.longi,\n                'lati': event.lati,\n                'city': event.city,\n                'id': event.id,\n                'category': event.category\n            }\n        else:\n            response = {\n                'info': 'error'\n            }\n    else:\n        response = {\n            'info': 'error'\n        }\n\n    return jsonify(response)\n\n@app.route('/event', methods=['PUT'])\ndef putevent():\n    if 'username' in session:\n        id = str(request.form['id'])\n        title = request.form['title'].encode('utf-8')\n        description = request.form['description'].encode('utf-8')\n        date = str(request.form['date'])\n        cover = str(request.form['cover'])\n        longi = str(request.form['longi'])\n        lati = str(request.form['lati'])\n        city = request.form['city'].encode('utf-8')\n        category = request.form['category'].encode('utf-8')\n        address = request.form['address'].encode('utf-8')\n        u = db_session.query(Event).filter_by(id=id).first()\n        u.title = title\n        u.description = description\n        u.date = date\n        u.cover = cover\n        u.longi = longi\n        u.lati = lati\n        u.city = city\n        u.category = category\n        u.address = address\n        db_session.commit()\n        response = {\n            'info': 'success'\n        }\n        return jsonify(response)\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/event', methods=['DELETE'])\ndef deleteevent():\n    if 'username' in session:\n        id = request.args.get('id')\n        if (id.isdigit()):\n            u = db_session.query(Event).filter_by(id=id).first()\n            if u:\n                db_session.delete(u)\n                db_session.commit()\n                response = {\n                    'info': 'success'\n                }\n            else:\n                response = {\n                    'info': 'error'\n                }\n        else:\n            response = {\n                'info': 'error'\n            }\n        return jsonify(response)\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/funds', methods=['GET'])\ndef funds():\n    fund = Fund.query.all()\n    if (fund):\n        data = []\n        for u in fund:\n            object_data = {\n                'name': u.name,\n                'description': u.description,\n                'short_description': u.short_description,\n                'budget': u.budget,\n                'phone': u.phone,\n                'email': u.email,\n                'bank_book': u.bank_book,\n                'cover': get_url('funds', u.cover),\n                'id': u.id\n            }\n            data.append(object_data)\n        response = {\n            'info': 'success',\n            'data': data\n        }\n    else:\n        response = {\n            'info': 'error'\n        }\n    return jsonify(response)\n\n@app.route('/fund', methods=['GET'])\ndef getfund():\n    id = request.args.get('id')\n    if (id.isdigit()):\n        fund = Fund.query.filter(Fund.id == id).first()\n        if fund:\n            response = {\n                'info': 'success',\n                'title': fund.name,\n                'description': fund.description,\n                'short_description': fund.short_description,\n                'budget': fund.budget,\n                'phone': fund.phone,\n                'email': fund.email,\n                'bank_book': fund.bank_book,\n                'cover': fund.cover,\n                'id': fund.id\n            }\n        else:\n            response = {\n                'info': 'error'\n            }\n    else:\n        response = {\n            'info': 'error'\n        }\n    return jsonify(response)\n\n@app.route('/fund', methods=['POST'])\ndef postfund():\n    if 'username' in session:\n        name = request.form['name'].encode('utf-8')\n        description = request.form['description'].encode('utf-8')\n        short_description = request.form['short_description'].encode('utf-8')\n        cover = request.files['cover']\n        phone = str(request.form['phone'])\n        email = str(request.form['email'])\n        budget = str(request.form['budget'])\n        bank_book = str(request.form['bank_book'])\n        f = upload_file(cover, 'funds')\n        if (f == -1):\n            return jsonify({\n                'info': 'error'\n            })\n        u = Fund(name, description, budget, phone, email, bank_book, f, short_description)\n        db_session.add(u)\n        db_session.commit()\n        return jsonify({\n            'info': 'success'\n            }\n        )\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/fund', methods=['PUT'])\ndef putfund():\n    if 'username' in session:\n        id = str(request.form['id'])\n        name = request.form['name'].encode('utf-8')\n        description = request.form['description'].encode('utf-8')\n        cover = str(request.form['cover'])\n        phone = str(request.form['phone'])\n        email = str(request.form['email'])\n        budget = str(request.form['budget'])\n        bank_book = str(request.form['bank_book'])\n        short_description = request.form['short_description'].encode('utf-8')\n        u = db_session.query(Fund).filter_by(id=id).first()\n        u.name = name\n        u.description = description\n        u.cover = cover\n        u.phone = phone\n        u.email = email\n        u.budget = budget\n        u.bank_book = bank_book\n        u.short_description = short_description\n        db_session.commit()\n        response = {\n            'info': 'success'\n        }\n        return jsonify(response)\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/fund', methods=['DELETE'])\ndef deletefund():\n    if 'username' in session:\n        id = request.args.get('id')\n        if (id.isdigit()):\n            u = db_session.query(Fund).filter_by(id=id).first()\n            if (u):\n                db_session.delete(u)\n                db_session.commit()\n                response = {\n                    'info': 'success'\n                }\n            else:\n                response = {\n                    'info': 'error'\n                }\n        else:\n            response = {\n                'info': 'error'\n            }\n        return jsonify(response)\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/patients', methods=['GET'])\ndef patients():\n    if request.method == 'GET':\n        lim = request.args.get('limit')\n        off = request.args.get('offset')\n        fund_id = request.args.get('fund_id')\n        if fund_id:\n            results = Patient.query.filter(Patient.fund_id == fund_id).limit(lim).offset(off).all()\n        else:\n            results = Patient.query.limit(lim).offset(off).all()\n        if results:\n            data = []\n            for u in results:\n                object_data = {\n                    'fund': u.funds.name,\n                    'name': u.name,\n                    'surname': u.surname,\n                    'secname': u.secname,\n                    'age': u.age,\n                    'diagnosis': u.diagnosis,\n                    'budget': u.budget,\n                    'current_budget': u.current_budget,\n                    'biography': u.biography,\n                    'id': u.id,\n                    'cover': get_url('patients', u.cover)\n                }\n                data.append(object_data)\n            return jsonify({\n                'info': 'success',\n                'data': data\n            })\n        else:\n            return jsonify({\n                'info': 'error'\n            })\n\n@app.route('/patient', methods=['POST'])\ndef postpatient():\n    if 'username' in session:\n        name = request.form['name'].encode('utf-8')\n        surname = request.form['surname'].encode('utf-8')\n        secname = request.form['secname'].encode('utf-8')\n        age = str(request.form['age'])\n        diagnosis = request.form['diagnosis'].encode('utf-8')\n        budget = str(request.form['budget'])\n        current_budget = str(request.form['current_budget'])\n        biography = request.form['biography'].encode('utf-8')\n        fund_id = request.form['fund_id']\n        cover = request.files['cover']\n        filename = upload_file(cover, 'patients')\n        if filename == -1:\n            return jsonify({\n                'info': 'error'\n            })\n        u = Patient(fund_id, name, surname, secname, age, diagnosis, budget, current_budget, biography, filename)\n        db_session.add(u)\n        db_session.commit()\n        response = {\n            'info': 'success'\n        }\n        return jsonify(response)\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/patient', methods=['GET'])\ndef getpatient():\n    id = request.form['id']\n    if(id.isdigit()):\n        patient = Patient.query.filter(Patient.id == id).first()\n        if(patient):\n            response = {\n                'info': 'success',\n                'fund': patient.funds.name,\n                'name': patient.name,\n                'surname': patient.surname,\n                'secname': patient.secname,\n                'age': patient.age,\n                'diagnosis': patient.diagnosis,\n                'budget': patient.budget,\n                'current_budget': patient.current_budget,\n                'biography': patient.biography,\n                'id': patient.id\n            }\n        else:\n            response = {\n                'info': 'error'\n            }\n    else:\n        response = {\n            'info': 'error'\n        }\n    return jsonify(response)\n\n@app.route('/patient', methods=['PUT'])\ndef putpatient():\n    if 'username' in session:\n        id = str(request.form['id'])\n        name = request.form['name'].encode('utf-8')\n        surname = request.form['surname'].encode('utf-8')\n        secname = request.form['secname'].encode('utf-8')\n        age = str(request.form['age'])\n        diagnosis = request.form['diagnosis'].encode('utf-8')\n        budget = str(request.form['budget'])\n        current_budget = str(request.form['current_budget'])\n        biography = request.form['biography'].encode('utf-8')\n        fund_id = request.form['fund_id']\n        u = db_session.query(Patient).filter_by(id=id).first()\n        u.name = name\n        u.secname = secname\n        u.surname = surname\n        u.age = age\n        u.diagnosis = diagnosis\n        u.budget = budget\n        u.current_budget = current_budget\n        u.biography = biography\n        u.fund_id = fund_id\n        db_session.commit()\n        response = {\n            'info': 'success'\n        }\n        return jsonify(response)\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/patient', methods=['DELETE'])\ndef deletepatient():\n    if 'username' in session:\n        id = request.args.get('id')\n        if (id.isdigit()):\n            u = db_session.query(Patient).filter_by(id=id).first()\n            if (u):\n                db_session.delete(u)\n                db_session.commit()\n                response = {\n                    'info': 'success'\n                }\n            else:\n                response = {\n                    'info': 'error'\n                }\n        else:\n            response = {\n                'info': 'error'\n            }\n        return jsonify(response)\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/cities', methods=['GET'])\ndef getcities():\n    cities = City.query.all()\n    cities_json = []\n    if cities:\n        for city in cities:\n            city_json = {\n                \"id\": city.id,\n                \"city\": city.city\n            }\n            cities_json.append(city_json)\n        return jsonify({\n            'info': 'success',\n            'data': cities_json\n        })\n    else:\n        return jsonify({\n            'info': 'error'\n        })\n\n@app.route('/uploads//')\ndef uploaded_file(path, filename):\n    return send_from_directory(app.config['UPLOAD_FOLDER'] + '/' + path,\n                               filename)\n\n@app.route('/signin', methods=['GET', 'POST'])\ndef signin():\n    if request.method == 'POST':\n        info = u\"Неверный логин или пароль\"\n        username = request.form['username']\n        password = request.form['password']\n        if username and password:\n            user = get_user(username, password)\n            if user:\n                session['username'] = username\n                session['password'] = password\n                return redirect('/')\n            else:\n                return render_template(\"signin.html\", info=info)\n        else:\n            return render_template(\"signin.html\", info=info)\n    else:\n        return render_template(\"signin.html\")\n\n@app.route(\"/logout\")\ndef logout():\n    session.clear()\n    return redirect(\"/signin\")\n\n@app.errorhandler(400)\ndef bad_request(e):\n    content = {\n        \"title\": \"Bad Request\",\n        \"status\": \"400 - Bad Request\",\n        \"description\": \"Zhopa koroche\"\n    }\n    return render_template(\"error.html\", content=content), 400\n\n@app.errorhandler(404)\ndef not_found(e):\n    if 'username' in session:\n        content = {\n            \"title\": \"Not found\",\n            \"status\": \"404 - Not found\",\n            \"description\": \"This request did not found\"\n        }\n        return render_template(\"error.html\", content=content), 404\n    else:\n        return redirect(\"/signin\")\n\n@app.route('/testmap', methods=['POST'])\ndef map():\n    address = request.form['address'].encode('utf-8')\n    url = \"https://maps.googleapis.com/maps/api/geocode/json?address=%s\" % address\n    response = urllib2.urlopen(url)\n    jsongeocode = response.read()\n    return jsongeocode","sub_path":"app/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":16530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"521778788","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport os\nimport boto3\nimport typing as t\nfrom dataclasses import dataclass, field\nfrom hmalib.common.classification_models import (\n    BankedContentIDClassificationLabel,\n    BankIDClassificationLabel,\n    BankSourceClassificationLabel,\n    ClassificationLabel,\n    Label,\n    WritebackTypes,\n    PendingOpinionChange,\n)\nfrom hmalib.common.evaluator_models import ActionLabel, ActionRule\nfrom hmalib.common.aws_dataclass import HasAWSSerialization\nfrom hmalib.common.config import HMAConfig\nfrom mypy_boto3_sqs import SQSClient\nfrom functools import lru_cache\n\n\n@dataclass\nclass BankedSignal:\n    \"\"\"\n    BankedSignal fields:\n    - `banked_content_id`: Inside the bank, the unique way to refer to what\n      was matched against\n    - `bank_id`: The unique way to refer to the bank banked_content_id came from\n    - `bank_source`: This is forward looking: this might be 'te' or 'local';\n      indicates source of or relationship between one or more banks\n    - `classifications`: a set of labels that provide context about the banked\n       signal\n    \"\"\"\n\n    banked_content_id: str\n    bank_id: str\n    bank_source: str\n    classifications: t.Set[Label] = field(default_factory=set)\n\n    def add_bank_classifications(self):\n        self.classifications.add(BankSourceClassificationLabel(self.bank_source))\n        self.classifications.add(BankIDClassificationLabel(self.bank_id))\n        self.classifications.add(\n            BankedContentIDClassificationLabel(self.banked_content_id)\n        )\n\n    def add_classification(self, classification: str):\n        if len(self.classifications) == 0:\n            self.add_bank_classifications()\n        self.classifications.add(ClassificationLabel(classification))\n\n\n@dataclass\nclass MatchMessage(HasAWSSerialization):\n    \"\"\"\n    Captures a set of matches that will need to be processed. We create one\n    match message for a single content key. It is possible that a single content\n    hash matches multiple datasets. When it does, the entire set of matches are\n    forwarded together so that any appropriate action can be taken.\n\n    - `content_key`: A way for partners to refer uniquely to content on their\n      site\n    - `content_hash`: The hash generated for the content_key\n    \"\"\"\n\n    content_key: str\n    content_hash: str\n    matching_banked_signals: t.List[BankedSignal] = field(default_factory=list)\n\n\n@dataclass\nclass ActionMessage(MatchMessage):\n    \"\"\"\n    The action performer needs the match message plus which action to perform\n    \"\"\"\n\n    action_label: ActionLabel = ActionLabel(\"UnspecifiedAction\")\n    action_rules: t.List[ActionRule] = field(default_factory=list)\n\n    @classmethod\n    def from_match_message_action_label_and_action_rules(\n        cls,\n        match_message: MatchMessage,\n        action_label: ActionLabel,\n        action_rules: t.List[ActionRule],\n    ) -> \"ActionMessage\":\n        return cls(\n            match_message.content_key,\n            match_message.content_hash,\n            match_message.matching_banked_signals,\n            action_label,\n            action_rules,\n        )\n\n\n@dataclass\nclass WritebackMessageConfig:\n    \"\"\"\n    Simple holder for getting typed environment variables\n    \"\"\"\n\n    writebacks_queue_url: str\n    sqs_client: SQSClient\n\n    @classmethod\n    @lru_cache(maxsize=None)\n    def get(cls):\n        return cls(\n            writebacks_queue_url=os.environ[\"WRITEBACKS_QUEUE_URL\"],\n            sqs_client=boto3.client(\"sqs\"),\n        )\n\n\n@dataclass\nclass WritebackMessage(HasAWSSerialization):\n    \"\"\"\n    Writebacks happen on a collection of BankedSignals. To perform a write back,\n    instantiate an instacne of this class and run the send_to_queue method\n\n    The Writebacker needs the match message plus which writeback type to perfrom\n    on the source of the signal (for now, ThreatExchange).\n    \"\"\"\n\n    banked_signals: t.List[BankedSignal]\n\n    writeback_type: WritebackTypes.WritebackType = field(\n        default=WritebackTypes.NoWriteback\n    )\n\n    @classmethod\n    def from_match_message_and_type(\n        cls,\n        match_message: MatchMessage,\n        writeback_type: WritebackTypes.WritebackType,\n    ) -> \"WritebackMessage\":\n        return cls(\n            match_message.matching_banked_signals,\n            writeback_type,\n        )\n\n    @classmethod\n    def from_banked_signal_and_opinion_change(\n        cls, banked_signal: BankedSignal, opinion_change: PendingOpinionChange\n    ) -> \"WritebackMessage\":\n        opinion_change_to_writeback_type = {\n            PendingOpinionChange.MARK_TRUE_POSITIVE: WritebackTypes.TruePositive,\n            PendingOpinionChange.MARK_FALSE_POSITIVE: WritebackTypes.FalsePositive,\n            PendingOpinionChange.REMOVE_OPINION: WritebackTypes.RemoveOpinion,\n        }\n\n        writeback_type = opinion_change_to_writeback_type.get(\n            opinion_change, WritebackTypes.NoWriteback\n        )\n\n        return cls(\n            [banked_signal],\n            writeback_type,\n        )\n\n    def send_to_queue(self) -> None:\n        if self.writeback_type == WritebackTypes.NoWriteback:\n            return\n\n        config = WritebackMessageConfig.get()\n        config.sqs_client.send_message(\n            QueueUrl=config.writebacks_queue_url,\n            MessageBody=self.to_aws_json(),\n        )\n","sub_path":"hasher-matcher-actioner/hmalib/common/message_models.py","file_name":"message_models.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"80080207","text":"# coding=utf-8\nimport os\nfrom models import Employee\nfrom bs4 import BeautifulSoup\nfrom config import Config\nfrom mparser import ProfileParser\nfrom mparser import get_doc_bySelenium\n\ndef handler(tag):\n    tds = tag.find_all(\"td\")\n    if not tds or len(tds) != 4:\n        return None\n    employee = Employee()\n    ass = tag.find_all('a')\n    if ass and len(ass) != 0:\n        employee.url = ass[0]['href']\n    employee.name = tds[0].get_text().strip()\n    employee.name = ''.join(employee.name.split())\n\n    title = tds[1].get_text()\n    if title and len(title) != 0:\n        employee.title = ''.join(title.split())\n\n    email = tds[3].get_text()\n    if email and len(email) != 0:\n        employee.email = ''.join(email.split())\n\n\n    tel = tds[2].get_text()\n    if tel and len(tel) != 0:\n        employee.tel = ''.join(tel.split())\n\n    return employee\n\ndef set_attr_hook(name,value):\n    if name == 'departments':\n        if len(value) > 32:\n            return None\n    elif name == 'email':\n        pass\n    return value\n\n# @doc: 输入为个人详情页的整个网页源码\n# @output:输出employee,如果没有检测到内容返回None          \n# employee可用属性(url, name, email, tel, title, profile, research, departments,fax,addr):\ndef profile_handler(doc, name, url, path):\n    filename = os.path.join(path, name + \".html\")\n    employee = Employee(name=name, url=url)\n\n    # 只保存名称和个人主页,个人简历文件另存当前目录\n    soup = BeautifulSoup(doc, Config.SOUP_PARSER)\n    divs = soup.find_all(name=\"div\", class_=\"rightb_con\", limit=1)\n    if not divs or len(divs) == 0:\n        div = soup\n    else:\n        div = divs[0]\n\n    if not os.path.exists(filename):\n        with open(filename, 'wb') as fp:\n            content = div.prettify()\n            fp.write(content)\n            fp.close()\n\n    return employee\n","sub_path":"eduParser/out/hust/计算机科学与技术学院/MyHandler.py","file_name":"MyHandler.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"3606782","text":"'''\nArtificial Intelligence CSCI-3613, Fall 2021\nAnar Mehraliyev\nCRN 10321\nExtra Assignment\nOctober 13, 2021\n'''\n\nimport os\nfrom datetime import datetime\n\nimport solver\nimport ui\nfrom node import *\nfrom solver import isSolveable\n\n'''\n---Goal State in This Problem---\n _____ _____ _____\n|     |     |     |\n|  1  |  2  |  3  |\n|_____|_____|_____|\n|     |     |     |\n|  4  |  5  |  6  |\n|_____|_____|_____|\n|     |     |     |\n|  7  |  8  |  0  |\n|_____|_____|_____|\n\nsample 1: [[1, 2, 3], [0, 4, 6], [7, 5, 8]], (1, 0)\nsample 2: [[1, 2, 3], [5, 6, 0], [7, 8, 4]], (1, 2)\nsample 3: [[1, 8, 2], [0, 4, 3], [7, 6, 5]], (1, 0)\nsample 4: [[7, 2, 4], [5, 0, 6], [8, 3, 1]], (1, 1);\n'''\n\nos.system('color a')\n\nsamples = [np.array([[1, 2, 3], [0, 4, 6], [7, 5, 8]]),\n           np.array([[1, 2, 3], [5, 6, 0], [7, 8, 4]]),\n           np.array([[1, 8, 2], [0, 4, 3], [7, 6, 5]]),\n           np.array([[7, 2, 4], [5, 0, 6], [8, 3, 1]])]\n\n# it will hold the user input data\nargs = {}\nprint(\"Pick the initial state from:\\n[1] Samples (4 available);\\n[2] Random generator\\n[3] Input\")\n\nargs['start'] = int(input())\n\n# Deciding how the initial state is to be generated. From samples, randomized, or manually.\nif args['start'] == 1:\n    print(\"Select the sample:\\n1 2 3 4\")\n    initial_state = samples[int(input()) - 1]\n\nelif args['start'] == 2:\n    initial_state = np.random.default_rng().choice(9, size=9, replace=False).reshape(3, 3)\n\nelif args['start'] == 3:\n    # input 2D array\n    print(\"Enter input (3x3):\")\n    initial_state = np.array([input().split() for _ in range(3)], int)\n\nelse:\n    os.system('exit')\n\n# find the position of the '0' in the state\ninitial_pos0 = tuple(*np.argwhere(initial_state == 0))\n\nprint(\"---Initial State---\")\nui.displayTable(initial_state)\n\nif not isSolveable(initial_state):\n    print(\"The puzzle is not solvable.\")\n    exit()\n\nroot_node = Node(initial_state, initial_pos0, None, None)\n\nprint(\"Choose algorithm:\\n[1] BFS\\n[2] DFS\")\n\nargs['alg'] = int(input())\nstart_time = datetime.now()\n\nif args['alg'] == 1:\n    solution_node = solver.solveBFS(root_node)\nelif args['alg'] == 2:\n    print(\"Enter depth limit:\")\n    args['depth'] = int(input())\n    solution_node = solver.solveDFS(root_node, args['depth'])\n\n    if solution_node is None:\n        print(\"No solution found for depth: \", args['depth'])\n        exit()\nelse:\n    os.system('exit')\n\nfinish_time = datetime.now()\nfinal_nodes = solver.trackSolution(solution_node)\n\nprint(\"---Solution---\")\n\nfor node in final_nodes:\n    ui.displayTable(node.state)\n\nprint(\"# of steps: \" + str(len(final_nodes) - 1))\nprint(\"Time elapsed: \" + str((finish_time - start_time).total_seconds()))\n\nos.system('pause')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"78428119","text":"\"\"\"d3format_by_metric\n\nRevision ID: f162a1dea4c4\nRevises: 960c69cb1f5b\nCreate Date: 2016-07-06 22:04:28.685100\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'f162a1dea4c4'\ndown_revision = '960c69cb1f5b'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n    op.add_column('metrics', sa.Column('d3format', sa.String(length=128), nullable=True))\n    op.add_column('sql_metrics', sa.Column('d3format', sa.String(length=128), nullable=True))\n\n\ndef downgrade():\n    op.drop_column('sql_metrics', 'd3format')\n    op.drop_column('metrics', 'd3format')\n","sub_path":"superset/migrations/versions/f162a1dea4c4_d3format_by_metric.py","file_name":"f162a1dea4c4_d3format_by_metric.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"468951356","text":"import math\nn,w,l=[int(i) for i in input().strip().split()]\narr=[]\nfor i in range(n):\n\th,r=[int(i) for i in input().strip().split()]\n\tf=math.ceil((l-h)/r)\n\tif f<0:\n\t\tf=0\n\tarr.append([h,r,f])\narr.sort(key=lambda x:x[2])\ni=0\nx=0\nwood=0\nsumi=0\nwhile i=w:\n\t\twood-=y\n\t\twhile wood-sumi>=w and m>x:\n\t\t\twood-=sumi\n\t\t\tm-=1\n\t\tbreak\n\tsumi+=arr[i][1]\n\tx=m\n\ti+=1\nwhile wood= version.parse(PYTORCH_CPU_VIRTUAL_PKG):\n        session.conda_install('-c', 'pytorch', f'pytorch=={pytorch}', 'cpuonly')\n    else:\n        session.conda_install('-c', 'pytorch', f'pytorch-cpu=={pytorch}')\n    session.install('.[test]')\n    session.run('pytest', '-v')\n\n\n@nox.session(python=False)\n@nox.parametrize(\"pytorch\", PYTORCH_VERSIONS, ids=PYTORCH_IDS)\n@nox.parametrize(\"python\", CONDA_PYTHON_VERSIONS, ids=CONDA_PYTHON_IDS)\ndef dry_run_pytorch_only_deps(session, pytorch, python):\n    if version.parse(pytorch) >= version.parse(PYTORCH_CPU_VIRTUAL_PKG):\n        session.run('conda', 'create', '-n', 'dry_run', '--only-deps', '-d', '-c', 'pytorch', f'pytorch=={pytorch}', 'cpuonly', f'python={python}')\n    else:\n        session.run('conda', 'create', '-n', 'dry_run', '--only-deps', '-d', '-c', 'pytorch', f'pytorch-cpu=={pytorch}', f'python={python}')\n\n\n@nox.session(python=False)\n@nox.parametrize(\"pytorch\", PYTORCH_VERSIONS, ids=PYTORCH_IDS)\n@nox.parametrize(\"python\", CONDA_PYTHON_VERSIONS, ids=CONDA_PYTHON_IDS)\ndef dry_run_pytorch_no_deps(session, pytorch, python):\n    if version.parse(pytorch) >= version.parse(PYTORCH_CPU_VIRTUAL_PKG):\n        session.run('conda', 'create', '-n', 'dry_run', '--no-deps', '-d', '-c', 'pytorch', f'pytorch=={pytorch}', 'cpuonly', f'python={python}')\n    else:\n        session.run('conda', 'create', '-n', 'dry_run', '--no-deps', '-d', '-c', 'pytorch', f'pytorch-cpu=={pytorch}', f'python={python}')","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"475464482","text":"from fastapi.testclient import TestClient\nimport pytest\n\nfrom main import app\n\n\n@pytest.fixture()\ndef client():\n    \"\"\"Prepare application test client.\"\"\"\n    with TestClient(app) as test_client:\n        yield test_client\n\n\ndef test_suppliers(client):\n    \"\"\"Test '/suppliers' endpoint.\"\"\"\n    test_path = '/suppliers'\n    test_records = [{'SupplierID': 1, 'CompanyName': 'Exotic Liquids'},\n                    {'SupplierID': 2, 'CompanyName': 'New Orleans Cajun Delights'}]\n\n    response = client.get(test_path)\n    payload = response.json()\n\n    assert response.status_code == 200\n    assert type(payload) is list\n    assert sorted(payload, key=lambda item: item['SupplierID']) == payload\n    assert payload[:2] == test_records\n\n\ndef test_supplier(client):\n    \"\"\"Test '/suppliers' endpoint with given supplier id.\"\"\"\n    test_path = '/suppliers/{}'\n    test_id = 5\n    test_record = {\n        'SupplierID': 5,\n        'CompanyName': 'Cooperativa de Quesos \\'Las Cabras\\'',\n        'ContactName': 'Antonio del Valle Saavedra',\n        'ContactTitle': 'Export Administrator',\n        'Address': 'Calle del Rosal 4',\n        'City': 'Oviedo',\n        'Region': 'Asturias',\n        'PostalCode': '33007',\n        'Country': 'Spain',\n        'Phone': '(98) 598 76 54',\n        'Fax': None,\n        'HomePage': None,\n    }\n\n    response = client.get(test_path.format(test_id))\n    payload = response.json()\n    response_invalid = client.get(test_path.format(999))\n\n    assert response.status_code == 200\n    assert response_invalid.status_code == 404\n    assert type(payload) is dict\n    assert payload == test_record\n\n\ndef test_supplier_products(client):\n    \"\"\"Test '/suppliers/{}/products' endpoint with given supplier id.\"\"\"\n    test_path = '/suppliers/{}/products'\n    test_id = 12\n    test_records = [{'ProductID': 29, 'ProductName': 'Thüringer Rostbratwurst',\n                    'Category': {'CategoryID': 6, 'CategoryName': 'Meat/Poultry'}, 'Discontinued': 1},\n                    {'ProductID': 28, 'ProductName': 'Rössle Sauerkraut',\n                    'Category': {'CategoryID': 7, 'CategoryName': 'Produce'}, 'Discontinued': 1}]\n\n    response = client.get(test_path.format(test_id))\n    payload = response.json()\n    response_invalid = client.get(test_path.format(999))\n\n    assert response.status_code == 200\n    assert response_invalid.status_code == 404\n    assert type(payload) is list\n    assert payload[-2:] == test_records\n\n\ndef test_create_supplier(client):\n    \"\"\"Test POST '/suppliers' endpoint.\"\"\"\n    test_path = '/suppliers'\n    verify_path = '/suppliers/{}'\n    new_record = {\n        'CompanyName': 'Test Company Name',\n        'ContactName': 'Test Contact Name',\n        'ContactTitle': 'Unknown',\n        'Address': 'Test Address',\n        'City': 'Test City',\n        'PostalCode': '123-123',\n        'Country': 'Unknown',\n        'Phone': '123-123-123',\n    }\n    new_short_record = {'CompanyName': 'Short Company Name'}\n    invalid_record = {'City': 'Test City'}\n\n    response_invalid = client.post(test_path, json=invalid_record)\n    response = client.post(test_path, json=new_record)\n    payload = response.json()\n    response_verify = client.get(verify_path.format(payload['SupplierID']))\n    response_short = client.post(test_path, json=new_short_record)\n    payload_short = response_short.json()\n    response_short_verify = client.get(verify_path.format(payload_short['SupplierID']))\n\n    assert response_invalid.status_code == 422\n    assert response.status_code == 201\n    assert response_verify.status_code == 200\n    assert payload.items() <= response_verify.json().items()\n    assert response_short.status_code == 201\n    assert response_short_verify.status_code == 200\n    assert payload_short.items() <= response_short_verify.json().items()\n\n\ndef test_update_supplier(client):\n    \"\"\"Test PUT '/suppliers' endpoint with given supplier id.\"\"\"\n    test_path = '/suppliers/{}'\n    create_path = '/suppliers'\n    new_record = {'CompanyName': 'Update Company Name'}\n    update_attributes = {'City': 'New City', 'Address': 'New address'}\n\n    response_create = client.post(create_path, json=new_record)\n    payload = response_create.json()\n    supplier_id = payload['SupplierID']\n    response_update = client.put(test_path.format(supplier_id), json=update_attributes)\n    payload_updated = response_update.json()\n\n    assert response_update.status_code == 200\n    payload.update(update_attributes)\n    assert payload_updated == payload\n\n\ndef test_delete_supplier(client):\n    \"\"\"Test DELETE '/suppliers' endpoint with given supplier id.\"\"\"\n    test_path = '/suppliers/{}'\n    create_path = '/suppliers'\n    new_record = {'CompanyName': 'Delete Company Name'}\n\n    response_create = client.post(create_path, json=new_record)\n    payload = response_create.json()\n    supplier_id = payload['SupplierID']\n    response_delete = client.delete(test_path.format(supplier_id))\n    response_duplicate = client.delete(test_path.format(supplier_id))\n    response_verify = client.get(test_path.format(supplier_id))\n\n    assert response_delete.status_code == 204\n    assert response_duplicate.status_code == 401\n    assert response_verify.status_code == 404\n","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"278232139","text":"import math\nimport torch\n\ndef get_action(mu, std):\n    action = torch.normal(mu, std)\n    action = action.data.numpy()\n    return action\n\n# logarithm의 property을 이용하여 ratio를 만들 때 사용하기 위한\n# normal distribution의 probability density\ndef log_prob_density(x, mu, std):\n    log_density = -(x - mu).pow(2) / (2 * std.pow(2)) \\\n                    - 0.5 * math.log(2 * math.pi)\n    return log_density.sum(1, keepdim=True)\n\n\ndef hessian_vector_product(actor, states, p, cg_damping):\n    p.detach() \n    kl = kl_divergence(old_actor=actor, new_actor=actor, states=states)\n    kl = kl.mean()\n    \n    kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True)\n    kl_grad = flat_grad(kl_grad)\n\n    kl_grad_p = (kl_grad * p).sum() \n    kl_hessian = torch.autograd.grad(kl_grad_p, actor.parameters())\n    kl_hessian = flat_hessian(kl_hessian)\n\n    return kl_hessian + p * cg_damping # cg_damping = 0.1\n\ndef kl_divergence(old_actor, new_actor, states):\n    mu, std = new_actor(torch.Tensor(states))\n    mu_old, std_old = old_actor(torch.Tensor(states))\n    mu_old = mu_old.detach()\n    std_old = std_old.detach()\n\n    # kl divergence between old policy and new policy : D( pi_old || pi_new )\n    # pi_old -> mu_old, std_old / pi_new -> mu, std\n    # be careful of calculating KL-divergence. It is not symmetric metric.\n    kl = torch.log(std / std_old) + (std_old.pow(2) + (mu_old - mu).pow(2)) / (2.0 * std.pow(2)) - 0.5\n    return kl.sum(1, keepdim=True)\n\ndef flat_grad(grads):\n    grad_flatten = []\n    for grad in grads:\n        grad_flatten.append(grad.view(-1))\n    grad_flatten = torch.cat(grad_flatten)\n    return grad_flatten\n\ndef flat_hessian(hessians):\n    hessians_flatten = []\n    for hessian in hessians:\n        hessians_flatten.append(hessian.contiguous().view(-1))\n    hessians_flatten = torch.cat(hessians_flatten).data\n    return hessians_flatten\n\n\ndef flat_params(model):\n    params = []\n    for param in model.parameters():\n        params.append(param.data.view(-1))\n    params_flatten = torch.cat(params)\n    return params_flatten\n\ndef update_model(model, new_params):\n    index = 0\n    for params in model.parameters():\n        params_length = len(params.view(-1))\n        new_param = new_params[index: index + params_length]\n        new_param = new_param.view(params.size())\n        params.data.copy_(new_param)\n        index += params_length","sub_path":"mujoco/tnpg/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"285725405","text":"import tensorflow as tf\nimport numpy as np\nimport pickle\nimport time\nISOTIMEFORMAT='%Y-%m-%d %X'\n       \ntf.flags.DEFINE_string(\"data_dir\", \"./data\", \"The data dir.\")\ntf.flags.DEFINE_string(\"sub_dir\", \"WikiPeople\", \"The sub data dir.\")\ntf.flags.DEFINE_string(\"dataset_name\", \"WikiPeople\", \"The name of the dataset.\")\ntf.flags.DEFINE_string(\"bin_postfix\", \"\", \"The new_postfix for the output bin file.\")\ntf.flags.DEFINE_boolean(\"if_permutate\", False, \"If permutate for test filter.\")\nFLAGS = tf.flags.FLAGS  \nFLAGS._parse_flags()   \n#FLAGS.flag_values_dict()\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n#for attr, value in sorted(FLAGS.flag_values_dict().items()):\n    print(\"{}={}\".format(attr.upper(), value))\n\ndef permutations(arr, position, end, res):\n    \"\"\"\n    Permutate the array\n    \"\"\"\n    if position == end:\n        res.append(tuple(arr))\n    else:\n        for index in range(position, end):\n            arr[index], arr[position] = arr[position], arr[index]\n            permutations(arr, position+1, end, res)\n            arr[index], arr[position] = arr[position], arr[index]\n    return res\n\ndef load_data_from_txt(filenames, values_indexes = None, roles_indexes = None, ary_permutation = None):\n    \"\"\"\n    Take a list of file names and build the corresponding dictionnary of facts\n    \"\"\"\n    if values_indexes is None:\n        values_indexes= dict()\n        values = set()\n        next_val = 0\n    else:\n        values = set(values_indexes)\n        next_val = max(values_indexes.values()) + 1\n\n    if roles_indexes is None:\n        roles_indexes= dict()\n        roles= set()\n        next_role = 0\n    else:\n        roles = set(roles_indexes)\n        next_role = max(roles_indexes.values()) + 1\n    if ary_permutation is None:\n        ary_permutation= dict()\n\n    max_n = 2  # The maximum arity of the facts\n    for filename in filenames:\n        with open(filename, 'r') as f:\n            for i, line in enumerate(f):\n                xx_dict = eval(line)\n                xx = xx_dict['N']\n                if xx > max_n:\n                    max_n = xx\n    data = []\n    for i in range(max_n-1):\n        data.append(dict())\n\n    for filename in filenames:\n        with open(filename) as f:\n            lines = f.readlines()\n\n        for _, line in enumerate(lines):\n            aline = ()        \n            xx_dict = eval(line)\n            for k in xx_dict:\n                if k == 'N':\n                    continue\n                if k in roles:\n                    role_ind = roles_indexes[k]\n                else:\n                    role_ind = next_role\n                    next_role += 1\n                    roles_indexes[k] = role_ind\n                    roles.add(k)\n                if type(xx_dict[k]) == str:\n                    val = xx_dict[k]\n                    if val in values:\n                        val_ind = values_indexes[val]\n                    else:\n                        val_ind = next_val\n                        next_val += 1\n                        values_indexes[val] = val_ind\n                        values.add(val)\n                    aline = aline + (role_ind,)\n                    aline = aline + (val_ind,)\n                else:\n                    for val in xx_dict[k]:  # Multiple values\n                        if val in values:\n                            val_ind = values_indexes[val]\n                        else:\n                            val_ind = next_val\n                            next_val += 1\n                            values_indexes[val] = val_ind\n                            values.add(val)\n                        aline = aline + (role_ind,)\n                        aline = aline + (val_ind,)\n                        \n            if FLAGS.if_permutate == True:  # Permutate the elements in the fact for negative sampling or further computing the filtered metrics in the test process\n                if xx_dict['N'] in ary_permutation:\n                    res = ary_permutation[xx_dict['N']]\n                else:\n                    res = []\n                    arr = np.array(range(xx_dict['N']))\n                    res = permutations(arr, 0, len(arr), res)\n                    ary_permutation[xx_dict['N']] = res\n                for tpl in res:\n                    tmpline = ()\n                    for tmp_ind in tpl:\n                        tmpline = tmpline + (aline[2*tmp_ind], aline[2*tmp_ind+1])\n                    data[xx_dict['N']-2][tmpline] = [1]\n            else:\n                data[xx_dict['N']-2][aline] = [1]\n        \n    return data, values_indexes, roles_indexes, ary_permutation\n\ndef get_neg_candidate_set(folder, values_indexes, roles_indexes):\n    \"\"\"\n    Get negative candidate set for replacing value\n    \"\"\"\n    role_val = {}\n    with open(folder + 'n-ary_train.json') as f:\n        lines = f.readlines()\n    for _, line in enumerate(lines):\n        n_dict = eval(line)\n        for k in n_dict:\n            if k == 'N':\n                continue\n            k_ind = roles_indexes[k]\n            if k_ind not in role_val:\n                role_val[k_ind] = []\n            v = n_dict[k]\n            if type(v) == str:\n                v_ind = values_indexes[v]\n                if v_ind not in role_val[k_ind]:\n                    role_val[k_ind].append(v_ind)\n            else:  # Multiple values\n                for val in v:\n                    val_ind = values_indexes[val]\n                    if val_ind not in role_val[k_ind]:\n                        role_val[k_ind].append(val_ind)\n    return role_val\n\ndef build_data(folder='data/', dataset_name='WikiPeople'):\n    \"\"\"\n    Build data and save to files\n    \"\"\"\n    train_facts, values_indexes, roles_indexes, ary_permutation = load_data_from_txt([folder + 'n-ary_train.json'])\n    valid_facts, values_indexes, roles_indexes, ary_permutation = load_data_from_txt([folder + 'n-ary_valid.json'], \n            values_indexes = values_indexes , roles_indexes = roles_indexes, ary_permutation = ary_permutation)\n    test_facts, values_indexes, roles_indexes, ary_permutation = load_data_from_txt([folder + 'n-ary_test.json'], \n            values_indexes = values_indexes , roles_indexes = roles_indexes, ary_permutation = ary_permutation)\n    data_info = {}\n    data_info[\"train_facts\"] = train_facts\n    data_info[\"valid_facts\"] = valid_facts\n    data_info['test_facts'] = test_facts\n    data_info['values_indexes'] = values_indexes\n    data_info['roles_indexes'] = roles_indexes\n    if FLAGS.if_permutate == False:\n        role_val = get_neg_candidate_set(folder, values_indexes, roles_indexes)\n        data_info['role_val'] = role_val\n    with open(folder + dataset_name + FLAGS.bin_postfix + \".bin\", 'wb') as f:\n        pickle.dump(data_info, f)\n\nif __name__ == '__main__':\n    print(time.strftime(ISOTIMEFORMAT, time.localtime()))\n    afolder = FLAGS.data_dir + '/'\n    if FLAGS.sub_dir != '':\n        afolder = FLAGS.data_dir + '/' + FLAGS.sub_dir + '/'\n    build_data(folder=afolder, dataset_name=FLAGS.dataset_name)\n    print(time.strftime(ISOTIMEFORMAT, time.localtime()))\n","sub_path":"builddata.py","file_name":"builddata.py","file_ext":"py","file_size_in_byte":7055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"26511052","text":"def norepeating(arr):\n    right = []\n    for i in arr:\n        if arr.count(i) == 1:\n            right.append(i)\n    print(f'Niepowtarzające się numery: {right}')\n\ntab = [1,2,1,2,7,9,9,11]\n\nnorepeating(tab)\n","sub_path":"04-Subroutines/Exercises/04-37.py","file_name":"04-37.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"568582129","text":"from knock25 import phase\nfrom knock26 import remove_pair, wiki_stress\nfrom knock21 import filter_english\n\nwiki_inner_link = r\"\\[\\[(?P.+?)\\]\\]\"\n\nif __name__ == \"__main__\":\n    domain, fields = phase(filter_english())\n    fields = remove_pair(fields, wiki_inner_link, wiki_stress)\n    print(\"{1}の{0}\".format(*domain))\n    for k,v in fields.items():\n        print(\"%s:\\t%s\" % (k, v))\n","sub_path":"zchen/chapter03/knock27.py","file_name":"knock27.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"125264357","text":"#Faça um programa que leia uma frase pelo teclado e mostre:\n# Quantas vezes aparece a letra \"A\"\n# Em que posição ela aparece a primeira vez\n# Em que posição ela aparece a última vez\n\nfrase = input(str('Digite uma frase: '))\nx = frase.upper()\nqtdA = 'A' in x\ncount = 0\n\nfor i in x:\n    if i == 'A':\n        count = count + 1\n\n\nif count == 0:\n    print('Não existe a letra \"A\" nesta frase!')\nelse:\n    print(f'Existem {count} letra(s) \"A\" nesta frase!')\n\nfirstPosition = x.find('A')\nprint(f'A letra A aparace pela primeira vez na posição {firstPosition}')\n\nlastPosition = x.rfind('A')\nprint(f'A letra A aparace pela última vez na posição {lastPosition}')","sub_path":"PythonExercicios/#026_Primeira_e_ultima_Ocorrencia_String.py","file_name":"#026_Primeira_e_ultima_Ocorrencia_String.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"559288491","text":"# -*- coding:utf-8 -*-\n# Copyright © 2012 Clément Schaff, Mahdi Ben Jelloul\n\n\"\"\"\nopenFisca, Logiciel libre de simulation du système socio-fiscal français\nCopyright © 2011 Clément Schaff, Mahdi Ben Jelloul\n\nThis file is part of openFisca.\n\n    openFisca is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    openFisca is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with openFisca.  If not, see .\n\"\"\"\nfrom src.gui.qt.QtGui import (QWidget, QDockWidget, QLabel, QHBoxLayout, QVBoxLayout, QGroupBox, QButtonGroup)\nfrom src.gui.qt.QtCore import SIGNAL, Signal, Qt\nfrom src.gui.qthelpers import OfSs\nfrom src.gui.config import get_icon\n#from pandas.sandbox.qtpandas import DataFrameWidget\nfrom src.gui.qthelpers import DataFrameViewWidget \n\nfrom src.plugins import OpenfiscaPluginWidget, PluginConfigPage\nfrom src.gui.baseconfig import get_translation\n_ = get_translation('src')\n\n\nclass PopulationExplorerConfigPage(PluginConfigPage):\n    def __init__(self, plugin, parent):\n        PluginConfigPage.__init__(self, plugin, parent)\n        self.get_name = lambda: _(\"Population data explorer\")\n        \n    def setup_page(self):\n        \"\"\"\n        Setup the page of the survey widget\n        \"\"\"\n        \n        population_group = QGroupBox(_(\"Alternatives population data\")) \n        population_bg = QButtonGroup(self)\n        population_label = QLabel(_(\"Location of population data\")) \n\n        country_default_radio = self.create_radiobutton(_(\"Use country default population data\"),\n                                                    'use_default', False,\n                                                    tip = _(\"Use country default population data\"),\n                                                    \n                                button_group = population_bg)\n        population_radio = self.create_radiobutton(_(\"The following file\"),  # le fichier suivant\",\n                                               'enable', True,\n                                               _(\"population data file for micrsosimulation\"), # \"Fichier de données pour la microsimulation\",\n                                               button_group=population_bg)\n        population_file = self.create_browsefile(\"\", 'data_file',\n                                             filters='*.h5')\n        \n        self.connect(country_default_radio, SIGNAL(\"toggled(bool)\"),\n                     population_file.setDisabled)\n        self.connect(population_radio, SIGNAL(\"toggled(bool)\"),\n                     population_file.setEnabled)\n        population_file_layout = QHBoxLayout()\n        population_file_layout.addWidget(population_radio)\n        population_file_layout.addWidget(population_file)\n\n        population_layout = QVBoxLayout()\n        population_layout.addWidget(population_label)\n        population_layout.addWidget(country_default_radio)\n        population_layout.addLayout(population_file_layout)\n        population_group.setLayout(population_layout)\n        vlayout = QVBoxLayout()\n        vlayout.addWidget(population_group)\n        vlayout.addStretch(1)\n        self.setLayout(vlayout)\n\nclass PopulationExplorerWidget(OpenfiscaPluginWidget):    \n    \"\"\"\n    Population data explorer Widget\n    \"\"\"\n    CONF_SECTION = 'population'\n    CONFIGWIDGET_CLASS = PopulationExplorerConfigPage\n    LOCATION = Qt.LeftDockWidgetArea\n    FEATURES = QDockWidget.DockWidgetClosable | \\\n               QDockWidget.DockWidgetFloatable | \\\n               QDockWidget.DockWidgetMovable\n    DISABLE_ACTIONS_WHEN_HIDDEN = False\n    sig_option_changed = Signal(str, object)\n\n    def __init__(self, parent = None):\n        super(PopulationExplorerWidget, self).__init__(parent)\n        self.setStyleSheet(OfSs.dock_style)\n        # Create geometry\n        self.setObjectName( _(\"Population data explorer\"))\n        self.dockWidgetContents = QWidget()\n\n        self.view = DataFrameViewWidget(self.dockWidgetContents)\n\n        verticalLayout = QVBoxLayout(self.dockWidgetContents)\n        verticalLayout.addWidget(self.view)\n        self.setLayout(verticalLayout)\n\n        # Initialize attributes\n        self.parent = parent    \n        self.initialize_plugin() # To run the suitable inherited API methods\n\n        # Initialize attributes\n        self.parent = parent    \n        self.data = None\n    \n    def set_dataframe(self, dataframe = None, name = None):\n        '''\n        Sets the current dataframe\n        '''\n        if name is not None:\n            self.data = self.dataframes[name]\n        if dataframe is not None:\n            self.data = dataframe\n                            \n    def clear(self):\n        self.view.clear()\n        self.data = None\n        self.datatables_choices = []\n        self.dataframes = {}\n        \n    \n    #------ OpenfiscaPluginMixin API ---------------------------------------------\n    \n    def apply_plugin_settings(self, options):\n        \"\"\"\n        Apply configuration file's plugin settings\n        \"\"\"\n                \n        if 'data_file' in options:\n            NotImplementedError\n       \n        if 'use_default' in options:     \n            from src.lib.utils import of_import\n            default_profiles_filename = of_import(\"\",\"DEFAULT_PROFILES_FILENAME\", self.simulation.country)\n            self.simulation.load_profiles(default_profiles_filename)\n            self.refresh_plugin()\n            \n            \n    #------ OpenfiscaPluginWidget API ---------------------------------------------\n\n    def get_plugin_title(self):\n        \"\"\"\n        Return plugin title\n        Note: after some thinking, it appears that using a method\n        is more flexible here than using a class attribute\n        \"\"\"\n        return _(\"Population Data Explorer\")\n\n    \n    def get_plugin_icon(self):\n        \"\"\"\n        Return plugin icon (QIcon instance)\n        Note: this is required for plugins creating a main window\n              (see OpenfiscaPluginMixin.create_mainwindow)\n              and for configuration dialog widgets creation\n        \"\"\"\n        return get_icon('OpenFisca22.png')\n            \n    def get_plugin_actions(self):\n        \"\"\"\n        Return a list of actions related to plugin\n        Note: these actions will be enabled when plugin's dockwidget is visible\n              and they will be disabled when it's hidden\n        \"\"\"\n        pass\n\n    \n    def register_plugin(self):\n        \"\"\"\n        Register plugin in OpenFisca's main window\n        \"\"\"\n        self.simulation = self.main.simulation\n        self.main.add_dockwidget(self)\n\n    def refresh_plugin(self):\n        '''\n        Update Survey dataframes\n        '''\n        self.starting_long_process(_(\"Refreshing population explorer dataframe ...\"))\n        self.clear()\n#        self.view.set_dataframe(self.simulation.profiles.reset_index())\n        self.ending_long_process(_(\"Population explorer dataframe updated\"))\n    \n    def closing_plugin(self, cancelable=False):\n        \"\"\"\n        Perform actions before parent main window is closed\n        Return True or False whether the plugin may be closed immediately or not\n        Note: returned value is ignored if *cancelable* is False\n        \"\"\"\n        return True\n\n        \n ","sub_path":"src/plugins/general/PopulationExplorer.py","file_name":"PopulationExplorer.py","file_ext":"py","file_size_in_byte":7651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"608003525","text":"from tkinter import *\nfrom random import choice\nfrom time import sleep\n\n\nclass Player:\n    def __init__(self, ox , tbt , ply):\n        self.ox = ox\n        self.tbt = tbt\n        self.ply = ply\n\n\n    def scoresFor(self,b,ox,ply):\n        \"\"\"Returns a list of scores and scores each col based on the human moves/ply and the AI moves/ply.\"\"\"\n        scoresList = []\n        if ox == 'Red': #I do this in order to figure out who is the human and who is the AI\n            human = 'Yellow'\n        else:\n            human = 'Red'\n\n        for col in range(b.width): #For each col in the board\n            if ply == 0: #This is the base case and when it reaches 0, it will stop the recursion and if it does not allow the move it will append a -1. But if it allows the move it will append 50 to the list. It will not check for wins for 0 ply\n                if b.allowsMoveGUI(col) == False: #If the move is not allowed\n                    scoresList.append(-1) #Insert -1 into the list\n                else:\n                    scoresList.append(50) #Catch all statement of appending 50\n            else:\n                if b.allowsMoveGUI(col) == True: #If the move is allowed\n                    b.addGUIMove(col,ox)   #Add the move\n                    if b.winsForGUI(ox) == True: #If you add the move and it is a win\n                        scoresList.append(100)  # Append 100 into the list\n                    elif ply > 1: #If the ply is greater than 1\n                        scoresList.append(100-max(self.scoresFor(b,human,ply-1))) #Use recursion to call back this same functions and test for the human moves.If the human gets 100 for a potential win, append 100 into the list and the final list that is appended would be zero. As 100 -100  = 0.Which 0 meaning you have lost.\n                    else:\n                        scoresList.append(50) #if there are no wins for the col, append 50 to the list.\n                    b.delGUIMove(col) #Delete all the moves when the for loop is complete\n\n                else:\n                    scoresList.append(-1) #If the board is full, append -1\n        print(scoresList)\n        return scoresList #At the end of everything, return the of the list\n\n    def nextMove(self,b):\n        tieBreakList = [] #Makes an empty list\n        for colMove,score in enumerate(self.scoresFor(b,self.ox,self.ply)): # I am iterating through the list and returning one item at the time and returning thr index as well.\n            if score == 100 and max(self.scoresFor(b,self.ox,self.ply)) == 100: # If the score is 100 and and the mox of the list is 100\n                tieBreakList.append(colMove)    #Then I will append the index of the max values into the list and return it\n            elif score == 50 and max(self.scoresFor(b,self.ox,self.ply)) == 50: ## If the score is 50 and and the mox of the list is 50\n                tieBreakList.append(colMove) #Then I will append the index of the max values into the list and return it\n            elif score == 0 and max(self.scoresFor(b,self.ox,self.ply)) == 0:  # # If the score is 0 and and the mox of the list is 0\n                tieBreakList.append(colMove)    #Then I will append the index of the max values into the list and return it\n            else:\n                continue #A catch all statement to keep running the for loop.\n        print(tieBreakList)\n        return self.tieBreakMove(tieBreakList) #I want to return a interger and use self.tieBreakMove to index the right spot depending on what is inputted into self.tbt\n\n\n\n    def tieBreakMove(self,scores):\n        if self.tbt == \"Left\":\n            return scores[0] #Return the first thing in the list as that represents left\n        if self.tbt == \"Right\":\n            return scores[-1] #Returns the last thing in the list as that represents the right index in the list\n        if self.tbt == \"Random\":\n            return choice(scores)  #Returns a random thing in the list which are just indexes and uses the choice finction for randomness.\n\n\nclass Connect4:\n\n    def __init__(self, width, height, window):\n        \"\"\"This __init__the board and sets variables to what each thinh is. It also sets up the board\n         and makes a list inside a list while also setting each element into an empty space string\"\"\"\n        self.width = width\n        self.height = height\n\n        self.window = window #Makes the window for the game\n        self.frame = Frame(window)\n        self.frame.pack()\n        self.messageSize = 75\n        self.diameter = self.width*10\n        self.initialColor = 'white'\n        self.newGameButton = Button(self.frame, text = 'New Game', command = self.newGame)\n        self.newGameButton.pack(side = RIGHT)\n        self.quitButton = Button(self.frame, text='Quit', command=self.quitGame)\n        self.quitButton.pack(side = LEFT)\n        self.thePly = 0\n        self.cSlider = Scale(window, orient=HORIZONTAL,from_=0,to=4,length=200, label='Difficulty -->',command= self.fixThePly)\n        self.cSlider.pack()\n        self.cSlider.get()\n\n        self.message = Label(self.window, text = 'Connect 4',font='Courier 22', fg = 'black')\n        self.message.pack()\n        self.draw = Canvas(window, height = self.height*75, width = self.width*72,bg='dark blue')\n        self.draw.bind('', self.mouseInput)\n        self.draw.pack()\n        self.clearBoard()\n        self.playTheGame = False\n        self.turn = True\n\n\n    def clearBoard(self):\n        self.draw.delete(\"all\")\n        self.circles = []\n        self.colors = []\n\n        y = 0\n        for row in range(self.height):\n            circleRow = []\n            colorRow = []\n            x = 0\n            for col in range(self.width):\n                circleRow += [self.draw.create_oval(x+7, y+9, x + self.diameter, y + self.diameter, fill=self.initialColor)]\n                colorRow += [self.initialColor]\n                x += self.diameter\n\n            self.circles += [circleRow]\n            self.colors += [colorRow]\n            y += self.diameter\n\n\n    def newGame(self):\n        self.playTheGame = True\n        self.ai = Player('Yellow','Random',self.thePly)\n        self.clearBoard()\n\n\n\n    def addGUIMove(self,col,color):\n        for row in range( self.height ):\n            if self.colors[row][col] != self.initialColor: #If it is not a empty space\n                self.draw.itemconfig(self.circles[row-1][col], fill=color) #If you put 5 in row it kinda works\n                self.colors[row-1][col] = color\n                print('true')\n                return\n        self.draw.itemconfig(self.circles[self.height-1][col], fill=color)\n        self.colors[self.height-1][col] = color  #Subtract the height as the board is getting used up\n\n    def delGUIMove(self, col):\n        \"\"\"Delets the move the person wants to undo. Uses same logic as addMove()\"\"\"\n        for row in range( self.height ):\n            if self.colors[row][col] != self.initialColor: #If it is not a empty space\n                self.draw.itemconfig(self.circles[row][col], fill=self.initialColor) #If you put 5 in row it kinda works\n                self.colors[row][col] = self.initialColor\n                return\n\n    def fixThePly(self,ply):\n        self.thePly = int(ply)\n        print('Ply', ply)\n\n    def mouseInput(self, event):\n        col = int(event.x / self.diameter) #Should only be mouse input\n        row = int(event.y/self.diameter)\n        print('board[%s]' % (col))\n        if self.turn == True:\n            if self.playTheGame == True:\n                self.message.config(text='Playing the game')\n                if self.allowsMoveGUI(col) == True:\n                    self.addGUIMove(col,'Red')\n                    self.window.update()\n                    self.ai = Player('Yellow','Random',self.thePly)\n                    if self.isFullGui() == False:\n                        if self.winsForGUI('Red'):\n                            self.message.config(text='You win')\n                            self.turn = False\n                            self.window.update()\n                        else:\n                            self.window.update()\n                            self.window.after(500)\n                            self.message.config(text='Your Move')\n                            self.addGUIMove(self.ai.nextMove(self),'Yellow')\n                            self.ai = Player('Yellow','Random',self.thePly)\n                        if self.isFullGui() == False:\n                            if self.winsForGUI('Yellow'):\n                                self.message.config(text='You lose')\n                                self.window.update()\n                                self.turn = False\n                    else:\n                        self.message.config(text='Tie Game')\n                        self.turn = False\n\n                else:\n                    self.message.config(text='Can\\'t place a move there')\n        else:\n            return\n\n\n\n\n\n    def quitGame(self):\n        self.window.destroy()\n\n\n\n    def winsForGUI(self,color):\n          # check for horizontal wins\n        for row in range(0,self.height):\n            for col in range(0,self.width-3):\n                if self.colors[row][col] == color and \\\n                    self.colors[row][col+1] == color and \\\n                    self.colors[row][col+2] == color and \\\n                    self.colors[row][col+3] == color:\n                        return True\n\n        for row in range(0,self.height-3):  #Checks for vertical wins and checks for rows\n            for col in range(0,self.width):\n                if self.colors[row][col] == color and \\\n                    self.colors[row+1][col] == color and \\\n                    self.colors[row+2][col] == color and \\\n                    self.colors[row+3][col] == color:\n                        return True\n\n        for row in range(0,self.height-3):  #Checks for diagonal win #1\n            for col in range(0,self.width-3): #Set height and width -3 as we are checking 3 in a row diagonal wins. Also uses slope of 1\n                if self.colors[row][col] == color and \\\n                    self.colors[row+1][col+1] == color and \\\n                    self.colors[row+2][col+2] == color and \\\n                    self.colors[row+3][col+3] == color:\n                        return True\n\n        for row in range(0,self.height-3): #Checks for the other diagonol win #2\n            for col in range(3,self.width): #Same logic as diagonal win #1 but checks for slope of -1\n                if self.colors[row][col] == color and \\\n                    self.colors[row+1][col-1] == color and \\\n                    self.colors[row+2][col-2] == color and \\\n                    self.colors[row+3][col-3] == color:\n                        return True\n        return False\n\n\n\n    def isFullGui(self):\n        for col in range(self.width):\n            if self.allowsMoveGUI(col) == True: #If the board can allow the move for all of the columns, It is not full\n                    return False\n        return True #Else, it is Full\n\n    def allowsMoveGUI(self,col):\n        \"\"\"Checks if the move is allowed or not\"\"\"\n        if 0 <= col < self.width and self.colors[0][col] == self.initialColor:\n            print(self.colors)\n            return True\n        else:\n            return False    #Or it is not true\n\n\n\n\n\n\n\n\ndef main():\n    \"\"\"Acts a point of excusion of any program, controls when and where its excuted\"\"\"\n    root = Tk()\n    root.title('Connect 4')\n    myScreen = Connect4(7,6,root)\n    root.mainloop() #Keeps running the root window and tkinter until I decide to destroy it\n    #player = Player('o','Random',3)\n    #print(player.nextMove(board))\n    #board.playGameWith(player)\n\n\nif __name__ == '__main__':\n    main()\n","sub_path":"Assignment11.py","file_name":"Assignment11.py","file_ext":"py","file_size_in_byte":11694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"294779291","text":"# -*- coding: utf-8 -*-\n\"\"\"bootstrap_py.pypi.\"\"\"\nimport sys\nimport socket\nfrom bootstrap_py.exceptions import BackendFailure, Conflict\nif sys.version_info < (3, 0):\n    import xmlrpclib as xmlrpc_client\nelse:\n    from xmlrpc import client as xmlrpc_client\n\n#: PyPI XML-RPC API url\nPYPI_URL = 'https://pypi.python.org/pypi'\n\n\ndef package_existent(name):\n    \"\"\"search package.\n\n    * :class:`bootstrap_py.exceptions.Conflict` exception occurs\n      when user specified name has already existed.\n\n    * :class:`bootstrap_py.exceptions.BackendFailure` exception occurs\n      when PyPI service is down.\n\n    :param str name: package name\n    \"\"\"\n    if sys.version_info < (3, 0):\n        try:\n            result = search_package(name)\n        except (socket.error,\n                xmlrpc_client.ProtocolError) as exc:\n            raise BackendFailure(exc)\n    else:\n        try:\n            result = search_package(name)\n        except (socket.gaierror,\n                TimeoutError,\n                ConnectionRefusedError,\n                xmlrpc_client.ProtocolError) as exc:\n            raise BackendFailure(exc)\n    if result:\n        msg = ('[error] \"{0}\" is registered already in PyPI.\\n'\n               '\\tSpecify another package name.').format(name)\n        raise Conflict(msg)\n\n\ndef search_package(name):\n    \"\"\"search package.\n\n    :param str name: package name\n\n    :rtype: list\n    :return: package name list\n    \"\"\"\n    client = xmlrpc_client.ServerProxy(PYPI_URL)\n    return [pkg for pkg in client.search({'name': name})\n            if pkg.get('name') == name]\n","sub_path":"venv/Lib/site-packages/bootstrap_py/pypi.py","file_name":"pypi.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"390773548","text":"def panagram(a):\n    alpha=\"abcdefghijklmnopqrstuvwxyz\"\n    for i in alpha:\n        if i not in a.lower():\n            return False\n    else:\n        return True\nif (panagram(\"The quick brown fox jumps over the lazy dog\")==True):\n    print(\"Panagram\")\nelse:\n    print(\"Not\")\n","sub_path":"Find_Anagram.py","file_name":"Find_Anagram.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"549080533","text":"#There is a matrix of integer numbers. Number is entered. Define, which rows and columns contains this number.\r\nimport numpy as np\r\nimport random\r\nN=int(input()); M=int(input());\r\nmatrixx=[];\r\nfor i in range(N):\r\n    row=[];\r\n    for j in range(M):\r\n        row.append(int(random.random()*10))\r\n    matrixx.append(row)\r\nprint(matrixx); l=0;\r\nnumber=int(input());\r\nfor row in matrixx:\r\n    for i in range(N):\r\n        if row[i]==number:\r\n            print(\"number of row and column containing the number\",l,i);\r\n    l=l+1;\r\n","sub_path":"10 (matrix).py","file_name":"10 (matrix).py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"330510563","text":"from django.urls import path\nfrom . import views\n\n# paths in this app\nurlpatterns = [\n    path('all', views.all_nodes, name='all'),\n    path('rels/', views.get_relationship_by_type, name='rels'),\n    path('nodes', views.get_nodes, name='nodes'),\n    path('nodes/', views.get_nodes_by_label, name='get_nodes_by_label'),\n    path('article/nodes', views.get_edges_in_article, name='nodes_in_article'),\n\n    path('nodes//', views.equal_str, name='equal_string'),\n    path('nodes//equal/', views.equal, name='equal'),\n    path('nodes//greater/', views.greater, name='greater'),\n    path('nodes//greator/', views.greater_or_equal, name='greater_or_equal'),\n    path('nodes//less/', views.less, name='less'),\n    path('nodes//lessor/', views.less_or_equal, name='lesser_or_equal'),\n\n    path(\"articles\", views.get_all_article, name='articles'),\n    path(\"articles/\", views.get_articles_in_subreddit, name='articles'),\n\n    path(\"subreddit\", views.get_edges_in_subreddit, name='subreddit'),\n    path('lenses', views.get_topological_lens, name=\"lens\"),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"598574880","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n# Основная функция вызванная ботом\ndef schedule(group):\n    value = code_group(group)\n    if value:\n        return page(value)\n\n\n# Юзер агент для парсера\ndef user_agent():\n    return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0'\n\n\n# Запрос номера для введенного номера группы\ndef code_group(group):\n    group_number = group\n    response = requests.get(\n        'https://urfu.ru/api/schedule/groups/suggest',\n        params={'query': group_number},\n        headers={'User-Agent': user_agent()}\n        )\n    json_response = response.json()\n    # Проверяем, что группа введена корректно\n    if json_response:\n        if len(json_response['suggestions']) == 1:\n            data_value = json_response['suggestions'][0]['data']\n            return str(data_value)\n\n\n# Парсим страницу\ndef page(value):\n    page = requests.get(\n        'https://urfu.ru/api/schedule/groups/lessons/'+value,\n        headers={'User-Agent': user_agent()}\n        )\n    soup = BeautifulSoup(page.text, 'html.parser')\n    table = tableDataText(soup.find('table', {'class': 'shedule-group-table'}))\n    return table\n\n\n# Парсим таблицу с расписанием в словарь\ndef tableDataText(table):\n    sch_dict = {}  # Словарь в котором хранится расписание\n    dict_keys = []  # Список с ключами - датами к словарю\n    trs = table.find_all('tr')\n    for tr in trs:\n        row = []\n        for td in tr.find_all(['b']):  # Ключ - день\n            dict_keys.append(td.get_text(strip=True))\n            sch_dict.update({dict_keys[-1]: []})\n        # Название предмета\n        row += [' '.join(td.get_text(strip=True).split())\n                for td in tr.find_all(['dd'],)]\n        # Время\n        row += [td.get_text(strip=True) for td in tr.find_all(\n            [],\n            'shedule-weekday-time')]\n        # Учитель\n        row += [td.get_text(strip=True) for td in tr.find_all(\n            ['span'], 'teacher')]\n        # Кабинет\n        row += [' '.join(td.get_text(strip=True).split()) for\n                td in tr.find_all(['span'], 'cabinet')]\n        # Добавляем в словарь\n        sch_dict[dict_keys[-1]] += row\n    return sch_dict\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"200553429","text":"# _*_coding:utf-8 _*_\r\n#开发人员    : sidian\r\n#开发时间    : 2019/3/29 20:54\r\n#文件名称    : 002.py\r\n#开发工具    : PyCharm\r\nmember = ['小甲鱼', 88, '黑夜', 90, '迷途', 85, '怡静', 90, '秋舞斜阳', 88]\r\nfor i in range(0,10,2):\r\n    print(member[i],end=' ')\r\n    print(member[i+1])\r\nprint('=' *30)\r\n\r\ncount = 0\r\nwhile True:\r\n    print(member[count],member[count+1])\r\n    count += 2\r\n    if count == 10:\r\n        break\r\n\r\n","sub_path":"010/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"150826830","text":"\"\"\"\nCS3B, Assignment #2, RPN Calculator\nUlises Marian\n\"\"\"\nimport numpy\n#pip install numpy\n\nclass MyStack:\n    # Constants\n    MAX_CAPACITY = 100000\n    DEFAULT_CAPACITY = 10\n\n    # Initializer method\n    def __init__(self, default_item, capacity=DEFAULT_CAPACITY, dtype=int):\n        # If the capacity is bad, fail right away\n        if not self.validate_capacity(capacity):\n            raise ValueError(\"Capacity \" + str(capacity) + \" is invalid\")\n        self.capacity = capacity\n        self.default_item = default_item\n\n        # Make room in the stack and make sure it's empty to begin with\n        self.clear()\n\n    def clear(self):\n        # Allocate the storage and initialize top of stack\n        self.stack = numpy.array([self.default_item for _ in range(\n           self.capacity)])\n        self.top_of_stack = 0\n\n    @classmethod\n    def validate_capacity(cls, capacity):\n        return 0 <= capacity <= cls.MAX_CAPACITY\n\n    def push(self, item_to_push):\n        if self.is_full():\n            raise OverflowError(\"Push failed - capacity reached\")\n\n        self.stack[self.top_of_stack] = item_to_push\n        self.top_of_stack += 1\n\n    def pop(self):\n        if self.is_empty():\n            raise IndexError(\"Pop failed - stack is empty\")\n\n        self.top_of_stack -= 1\n        return self.stack[self.top_of_stack]\n\n    def is_empty(self):\n        return self.top_of_stack == 0\n\n    def is_full(self):\n        return self.top_of_stack == self.capacity\n\n    def get_capacity(self):\n        return self.capacity\n\n    def __str__(self):\n        string_descr = \"in stack is {} and in top_of_stack is {}.\"\n        string_descr = string_descr.format(self.stack, self.top_of_stack)\n        return string_descr\n\n\ndef mystack_test():\n    # Instantiate two empty stacks, one of 50 ints, another of 15 strings\n    s1 = MyStack(-1, 50)\n    s2 = MyStack(\"undefined\")\n    # and one more with bad argument\n    try:\n        s3 = MyStack(None, -100)\n        print(\"Failed test: expected __init()__ to reject negative capcity but it didn't\")\n    except Exception as e:\n        print(\"Successful test: handled negative capacity: \" + str(e))\n\n    # Confirm the stack capacities\n    print(\"------ Stack Sizes -------\\n  s1: {}   s2: {}\\n\".\n          format(s1.get_capacity(), s2.get_capacity()))\n\n    # Pop empty stack\n    print(\"------ Test stack ------\\n\")\n    try:\n        s1.pop()\n        print(\"Failed test: expected pop() to raise empty-stack exception but it didn't\")\n    except Exception as e:\n        print(\"Successful test: handled popping empty s1: \" + str(e))\n\n    # Push some items\n    s1.push(44)\n    s1.push(123)\n    s1.push(99)\n    s1.push(10)\n    s1.push(1000)\n    # try to put a square peg into a round hole\n    try:\n        s1.push(\"should not be allowed into an int stack\")\n        print(\"Failed test: expected push() to reject due to type incompatibility but it didn't\")\n    except Exception as e:\n        print(\"Successful test: rejected due to type incompatibility: \" + str(e))\n    try:\n        s2.push(444)\n        print(\"Failed test: expected push() to reject due to type incompatibility but it didn't\")\n    except Exception as e:\n        print(\"Successful test: rejected due to type incompatibility: \" + str(e))\n    try:\n        s1.push(44.4)\n        s2.push(\"bank\")\n        s2.push(\"-34\")\n        s2.push(\"should be okay\")\n        s2.push(\"a penny earned\")\n        s2.push(\"item #9277\")\n        s2.push(\"where am i?\")\n        s2.push(\"4\")\n        s2.push(\"4\")\n        s2.push(\"4\")\n        s2.push(\"4\")\n        print(\"Failed test: expected push() to reject due to type incompatibility but it didn't\")\n    except Exception as e:\n        print(\"Successful test: rejected due to type incompatibility: \" + str(e))\n\n    try:\n        s2.push(\"This is when stack is full\")\n        print(\"Failed test: expected push() to throw exception but it didn't\")\n    except Exception as e:\n        print(\"Successful test: handled pushing when stack is full: \" + str(e))\n    print(\"\\n--------- First Stack ---------\\n\")\n\n    # Pop and inspect the items\n    for k in range(0, 10):\n        try:\n            print(\"[\" + str(s1.pop()) + \"]\")\n        except Exception as e:\n            print(\"Successful test: handled popping empty stack s1: \" + str(e))\n    print(\"\\n--------- Second Stack ---------\\n\")\n    for k in range(0, 10):\n        print(\"[\" + str(s2.pop()) + \"]\")\n\n\nif __name__ == \"__main__\":\n    mystack_test()\n\n\nclass RpnCalculator:\n    # class constants\n    ADDITION = \"+\"\n    SUBTRACTION = \"-\"\n    FLOOR_DIVISION = \"//\"\n    MULTIPLICATION = \"*\"\n    OPERATORS = [ADDITION, SUBTRACTION, FLOOR_DIVISION, MULTIPLICATION]\n\n    @staticmethod\n    def eval(rpn_expression):\n        save_list = RpnCalculator.parse(rpn_expression)\n        save_result = RpnCalculator.eval_tokens(save_list)\n        return save_result\n\n    @staticmethod\n    def parse(rpn_expression):\n        individual_tok = rpn_expression.split()\n        return individual_tok\n\n    @staticmethod\n    def eval_tokens(tokens):\n        new_stack = MyStack(len(tokens))\n        for token in tokens:\n            if token.isdigit():\n               new_stack.push(token)\n            elif token == RpnCalculator.ADDITION:\n              operand_1 = new_stack.pop()\n              operand_2 = new_stack.pop()\n              operation_result = operand_1 + operand_2\n              new_stack.push(operation_result)\n            elif token == RpnCalculator.SUBTRACTION:\n              operand_1 = new_stack.pop()\n              operand_2 = new_stack.pop()\n              operation_result = operand_1 - operand_2\n              new_stack.push(operation_result)\n            elif token == RpnCalculator.FLOOR_DIVISION:\n              operand_1 = new_stack.pop()\n              operand_2 = new_stack.pop()\n              operation_result = operand_1 // operand_2\n              new_stack.push(operation_result)\n            elif token == RpnCalculator.MULTIPLICATION:\n              operand_1 = new_stack.pop()\n              operand_2 = new_stack.pop()\n              operation_result = RpnCalculator.multiply_it(operand_1, operand_2)\n              new_stack.push(operation_result)\n            elif not token.isdigit() or token not in RpnCalculator.OPERATORS:\n                raise ValueError (\"ILLEGAL value/operator\")\n        if new_stack.top_of_stack > 1:\n            raise ValueError (\"Too many operands!!\")\n        return new_stack\n\n    @staticmethod\n    def multiply_it(multiplicand, multiplier):\n        if multiplier == 0:\n            return 0\n        elif multiplier < 0:\n            return -RpnCalculator.multiply_it(multiplicand, - multiplier)\n        else:\n            return multiplicand + RpnCalculator.multiply_it(multiplicand,\n                                                            multiplier - 1)\n\n\ndef test_my_stack():\n    # The following lines of code would've worked with the original MyStack\n    # because it would've used lists, rather than stacks. Lists allow for\n    # heterogeneous data (hence, floats, ints, strings), plus such lists are\n    # allowed to increase and decrease in size without constraint, as opposed to\n    # the size restriction of stacks.\n    s1.append(15.0)\n    new_element = s1.pop(3)   # pop item at index 3\n    s2.append(new_element)\n    s2.append(\"pizza\")\n    s1.append(s2)   # append list to list\n\n\ndef test_rpn():\n    print(\"\\n----------- Test RPN -----------\\n\")\n    r1 = RpnCalculator()\n    try:\n        save_result = r1.eval(\"2\")\n        print(\"single number 2 = \", save_result.stack[0])\n    except Exception as e:\n        print(\"single number 2 =\" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"3 4 +\")\n        print(\"3 4 + = \", save_result.stack[0])\n    except Exception as e:\n        print(\"3 4 + \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"3 4 -\")\n        print(\"3 4 - = \", save_result.stack[0])\n    except Exception as e:\n        print(\"3 4 - \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"3 4 *\")\n        print(\"3 4 * = \", save_result.stack[0])\n    except Exception as e:\n        print(\"3 4 * \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"3 4 //\")\n        print(\"3 4 // = \", save_result.stack[0])\n    except Exception as e:\n        print(\"3 4 // \" + \"EXCEPTION HANDLED: \" + str(e))\n\n    #multiple-operation expressions\n    try:\n        save_result = r1.eval(\"4 6 * 5 +\")\n        print(\"4 6 * 5 + = \", save_result.stack[0])\n    except Exception as e:\n        print(\"4 6 * 5 + \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"20 10 + 2 * 80 - 80 //\")\n        print(\"20 10 + 2 * 80 - 80 // = \", save_result.stack[0])\n    except Exception as e:\n        print(\"20 10 + 2 * 80 - 80 // \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"5 6 8 9 * // - 4 +\")\n        print(\"5 6 8 9 * // - 4 + = \", save_result.stack[0])\n    except Exception as e:\n        print(\"5 6 8 9 * // - 4 + \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"10 10 // 10 * 20 + 10 - 3333 +\")\n        print(\"10 10 // 10 * 20 + 10 - 3333 + = \", save_result.stack[0])\n    except Exception as e:\n        print(\"10 10 // 10 * 20 + 10 - 3333 + \" + \"EXCEPTION HANDLED: \" + str(e))\n\n    # handle exceptions\n    try:\n        save_result = r1.eval(\"1 2 hello\")\n        print(\"1 2 hello = \", save_result.stack[0])\n    except Exception as e:\n        print(\"1 2 hello = \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"7 3 * 2 ?\")\n        print(\"7 3 * 2 ? = \", save_result.stack[0])\n    except Exception as e:\n        print(\"7 3 * 2 ? = \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"3 py 5 + #\")\n        print(\"3 py 5 + # = \", save_result.stack[0])\n    except Exception as e:\n        print(\"3 py 5 + # = \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"1 2 3 4 -\")\n        print(\"1 2 3 - = \", save_result.stack[0])\n    except Exception as e:\n        print(\"1 2 3 4 + - = \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"7 6 * 9\")\n        print(\"7 6 * 9 = \", save_result.stack[0])\n    except Exception as e:\n        print(\"7 6 * 9 = \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"8 0 4 + 9 7 - 3 // 6\")\n        print(\"8 0 4 + 9 7 - 3 // 6 = \", save_result.stack[0])\n    except Exception as e:\n        print(\"8 0 4 + 9 7 - 3 // 6 = \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"1 2 - - -\")\n        print(\"1 2 - - - = \", save_result.stack[0])\n    except Exception as e:\n        print(\"1 2  - - - = \" + \"EXCEPTION HANDLED: \" + str(e))\n    try:\n        save_result = r1.eval(\"3 6 * 7 - +\")\n        print(\"3 6 * 7 - + = \", save_result.stack[0])\n    except Exception as e:\n        print(\"3 6 * 7 - + = \" + \"EXCEPTION HANDLED: \" + str(e))\n\n\nif __name__ == \"__main__\":\n    test_rpn()\n","sub_path":"02_Calculator.py","file_name":"02_Calculator.py","file_ext":"py","file_size_in_byte":10893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"576869007","text":"\"\"\"\nThis script detects faces in images, crop them and save to disk.\nInput:\n.jpg, .jpg\nResult:\nimages\n    \n        _.jpg\nlabels\n    .json\n\"\"\"\nimport argparse\nimport json\nfrom pathlib import Path\n\nimport albumentations as albu\nimport cv2\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom iglovikov_helper_functions.utils.img_tools import load_rgb\nfrom jpeg4py import JPEGRuntimeError\nfrom pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.ops import nms\nfrom tqdm import tqdm\n\nfrom data import cfg_mnet, cfg_re50\nfrom layers.functions.prior_box import PriorBox\nfrom models.retinaface import RetinaFace\nfrom utils.box_utils import decode, decode_landm\nfrom utils.general import load_model, split_array\n\n\ndef get_args():\n    parser = argparse.ArgumentParser(description=\"Retinaface\")\n    arg = parser.add_argument\n    arg(\"-g\", \"--gpu_id\", type=int, help=\"GPU_id\")\n    arg(\"--num_gpu\", type=int, help=\"number of GPUs\")\n    arg(\n        \"-m\",\n        \"--trained_model\",\n        default=\"./weights/Resnet50_Final.pth\",\n        type=str,\n        help=\"Trained state_dict file path to open\",\n    )\n    arg(\"-i\", \"--input_path\", type=Path, help=\"Path where images are stored\", required=True)\n    arg(\n        \"-o\",\n        \"--output_path\",\n        type=Path,\n        help=\"Path where results will be saved: \" \"images folder for images and\" \"labels folder for bounding boxes\",\n        required=True,\n    )\n    arg(\"--network\", default=\"resnet50\", help=\"Backbone network mobile0.25 or resnet50\")\n    arg(\"--cpu\", action=\"store_true\", default=False, help=\"Use cpu inference\")\n    arg(\"-c\", \"--confidence_threshold\", default=0.7, type=float, help=\"confidence_threshold\")\n    arg(\"--top_k\", default=5000, type=int, help=\"top_k\")\n    arg(\"--nms_threshold\", default=0.4, type=float, help=\"nms_threshold\")\n    arg(\"--keep_top_k\", default=750, type=int, help=\"keep_top_k\")\n\n    arg(\"-j\", \"--num_workers\", type=int, help=\"Number of CPU threads\", default=64)\n    arg(\"-s\", \"--save_crops\", action=\"store_true\", default=False, help=\"If we want to store crops.\")\n    arg(\"-b\", \"--save_boxes\", action=\"store_true\", default=False, help=\"If we want to store bounding boxes.\")\n    arg(\"--origin_size\", default=True, type=str, help=\"Whether use origin image size to evaluate\")\n    arg(\"--fp16\", action=\"store_true\", help=\"Whether use fp16\")\n    arg(\n        \"--batch_size\",\n        type=int,\n        help=\"Size of the batch size. Use non 1 value only if you are sure that\" \"all images are of the same size.\",\n        default=1,\n    )\n    return parser.parse_args()\n\n\nclass InferenceDataset(Dataset):\n    def __init__(self, file_paths, origin_size, transform):\n        self.file_paths = file_paths\n        self.transform = transform\n\n        self.origin_size = origin_size\n\n    def __len__(self) -> int:\n\n        return len(self.file_paths)\n\n    def __getitem__(self, idx):\n        image_path = self.file_paths[idx]\n\n        try:\n            raw_image = load_rgb(image_path, lib=\"jpeg4py\")\n        except JPEGRuntimeError:\n            raw_image = load_rgb(image_path, lib=\"cv2\")\n\n        image = raw_image.astype(np.float32)\n\n        if self.origin_size:\n            resize = 1\n        else:\n            # testing scale\n            target_size = 1600\n            max_size = 2150\n            im_shape = image.shape\n            image_size_min = np.min(im_shape[0:2])\n            image_size_max = np.max(im_shape[0:2])\n            resize = float(target_size) / float(image_size_min)\n            # prevent bigger axis from being more than max_size:\n            if np.round(resize * image_size_max) > max_size:\n                resize = float(max_size) / float(image_size_max)\n\n            image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)\n\n        image = self.transform(image=image)[\"image\"]\n\n        return {\n            \"torched_image\": tensor_from_rgb_image(image),\n            \"resize\": resize,\n            \"raw_image\": raw_image,\n            \"image_path\": str(image_path),\n        }\n\n\ndef main():\n    args = get_args()\n    torch.set_grad_enabled(False)\n\n    if args.network == \"mobile0.25\":\n        cfg = cfg_mnet\n    elif args.network == \"resnet50\":\n        cfg = cfg_re50\n    else:\n        raise NotImplementedError(f\"Only mobile0.25 and resnet50 are suppoted.\")\n\n    # net and model\n    net = RetinaFace(cfg=cfg, phase=\"test\")\n    net = load_model(net, args.trained_model, args.cpu)\n    net.eval()\n    if args.fp16:\n        net = net.half()\n\n    print(\"Finished loading model!\")\n    cudnn.benchmark = True\n    device = torch.device(\"cpu\" if args.cpu else \"cuda\")\n    net = net.to(device)\n\n    file_paths = sorted(args.input_path.rglob(\"*.jpg\"))\n\n    if args.num_gpu is not None:\n        start, end = split_array(len(file_paths), args.num_gpu, args.gpu_id)\n        file_paths = file_paths[start:end]\n\n    output_path = args.output_path\n\n    if args.save_boxes:\n        output_label_path = output_path / \"labels\"\n        output_label_path.mkdir(exist_ok=True, parents=True)\n\n    if args.save_crops:\n        output_image_path = output_path / \"images\"\n        output_image_path.mkdir(exist_ok=True, parents=True)\n\n    transform = albu.Compose([albu.Normalize(p=1, mean=(104, 117, 123), std=(1.0, 1.0, 1.0), max_pixel_value=1)], p=1)\n\n    test_loader = DataLoader(\n        InferenceDataset(file_paths, args.origin_size, transform=transform),\n        batch_size=args.batch_size,\n        num_workers=args.num_workers,\n        pin_memory=True,\n        drop_last=False,\n    )\n\n    with torch.no_grad():\n        for raw_input in tqdm(test_loader):\n            torched_images = raw_input[\"torched_image\"]\n\n            if args.fp16:\n                torched_images = torched_images.half()\n\n            resizes = raw_input[\"resize\"]\n            image_paths = Path(raw_input[\"image_path\"])\n            raw_images = raw_input[\"raw_image\"]\n\n            labels = []\n\n            if (\n                args.batch_size == 1\n                and args.save_boxes\n                and (output_label_path / f\"{Path(image_paths[0]).stem}.json\").exists()\n            ):\n                continue\n\n            loc, conf, land = net(torched_images.to(device))  # forward pass\n\n            batch_size = torched_images.shape[0]\n\n            image_height, image_width = torched_images.shape[2:]\n\n            scale1 = torch.Tensor(\n                [\n                    image_width,\n                    image_height,\n                    image_width,\n                    image_height,\n                    image_width,\n                    image_height,\n                    image_width,\n                    image_height,\n                    image_width,\n                    image_height,\n                ]\n            )\n\n            scale1 = scale1.to(device)\n\n            scale = torch.Tensor([image_width, image_height, image_width, image_height])\n            scale = scale.to(device)\n\n            priorbox = PriorBox(cfg, image_size=(image_height, image_width))\n            priors = priorbox.forward()\n            priors = priors.to(device)\n            prior_data = priors.data\n\n            for batch_id in range(batch_size):\n                image_path = image_paths[batch_id]\n                file_id = Path(image_path).stem\n                raw_image = raw_images[batch_id]\n\n                resize = resizes[batch_id].float()\n\n                boxes = decode(loc.data[batch_id], prior_data, cfg[\"variance\"])\n\n                boxes *= scale / resize\n                scores = conf[batch_id][:, 1]\n\n                landmarks = decode_landm(land.data[batch_id], prior_data, cfg[\"variance\"])\n                landmarks *= scale1 / resize\n\n                # ignore low scores\n                valid_index = torch.where(scores > args.confidence_threshold)[0]\n                boxes = boxes[valid_index]\n                landmarks = landmarks[valid_index]\n                scores = scores[valid_index]\n\n                order = scores.argsort(descending=True)\n\n                boxes = boxes[order]\n                landmarks = landmarks[order]\n                scores = scores[order]\n\n                # do NMS\n                keep = nms(boxes, scores, args.nms_threshold)\n                boxes = boxes[keep, :].int()\n\n                landmarks = landmarks[keep].int()\n\n                if boxes.shape[0] == 0:\n                    continue\n\n                scores = scores[keep].cpu().numpy().astype(np.float64)\n\n                for crop_id, bbox in enumerate(boxes):\n\n                    bbox = bbox.cpu().numpy()\n\n                    labels += [\n                        {\n                            \"crop_id\": crop_id,\n                            \"bbox\": bbox.tolist(),\n                            \"score\": scores[crop_id],\n                            \"landmarks\": landmarks[crop_id].tolist(),\n                        }\n                    ]\n\n                    if args.save_crops:\n                        x_min, y_min, x_max, y_max = bbox\n\n                        x_min = max(0, x_min)\n                        y_min = max(0, y_min)\n\n                        crop = raw_image[y_min:y_max, x_min:x_max].cpu().numpy()\n\n                        target_folder = output_image_path / f\"{file_id}\"\n                        target_folder.mkdir(exist_ok=True, parents=True)\n\n                        crop_file_path = target_folder / f\"{file_id}_{crop_id}.jpg\"\n\n                        if crop_file_path.exists():\n                            continue\n\n                        cv2.imwrite(\n                            str(crop_file_path),\n                            cv2.cvtColor(crop, cv2.COLOR_BGR2RGB),\n                            [int(cv2.IMWRITE_JPEG_QUALITY), 90],\n                        )\n\n                if args.save_boxes:\n                    result = {\n                        \"file_path\": image_path,\n                        \"file_id\": file_id,\n                        \"bboxes\": labels,\n                    }\n\n                    with open(output_label_path / f\"{file_id}.json\", \"w\") as f:\n                        json.dump(result, f, indent=2)\n\n\n#\n\nif __name__ == \"__main__\":\n    main()\n","sub_path":"detect_and_crop_on_folder.py","file_name":"detect_and_crop_on_folder.py","file_ext":"py","file_size_in_byte":10225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"603116709","text":"#!/usr/bin/env python\nfrom __future__ import division\nimport tty\nimport select\nimport sys\nimport termios\nimport rospy\nimport os\nimport thread\nimport signal\nimport math\nfrom std_msgs.msg import Header\nfrom sensor_msgs.msg import LaserScan\nfrom geometry_msgs.msg import Twist, Vector3, Point, PointStamped\nfrom nav_msgs.msg import Odometry\nimport Xlib.display as display\nfrom tf.transformations import euler_from_quaternion, rotation_matrix, quaternion_from_matrix\n\ndef convert_pose_to_xy_and_theta(pose):\n    \"\"\" Convert pose (geometry_msgs.Pose) to a (x,y,yaw) tuple \"\"\"\n    orientation_tuple = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n    angles = euler_from_quaternion(orientation_tuple)\n    return (pose.position.x, pose.position.y, angles[2])\n\ndef distanceTo(a, b):\n    '''Calculate distance from point a to point b'''\n    return math.sqrt((a[0]-b[0])*(a[0]-b[0]) + (a[1]-b[1])*(a[1]-b[1]))\n\ndef input_thread(avoid_obst):\n    '''Seperate thread for user input, loops, checking for button input'''\n    running = True\n    while running:\n        tty.setraw(sys.stdin.fileno())\n        select.select([sys.stdin], [], [], 0)\n        key = sys.stdin.read(1)\n        termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n        if key == 'q':\n            '''Kill the program and stop the robot'''\n            running = False\n            os.kill(os.getpid(), signal.SIGINT)\n\nclass avoid_obst(object):\n    def __init__ (self):\n        rospy.init_node(\"avoid_obst\", disable_signals=True)\n        self.scan_subscriber = rospy.Subscriber('scan', LaserScan, self.getScan)\n        self.odom_sub = rospy.Subscriber('odom', Odometry, self.getOdom)\n        self.velocityPublisher = rospy.Publisher('cmd_vel', Twist, queue_size=5)\n        self.speed = 0.1\n        self.rate = rospy.Rate(10)\n        self.vel = Twist()\n        self.pose = (0, 0, 0)\n        self.scan = []\n        self.minimumObjectPoints = 2\n        self.maxObjectDistance = 0.2\n        self.maxTrackDistance = 1\n        self.totalForce = (0, 0)\n        self.goal = (0, 0)\n\n    def getOdom(self, msg):\n        '''Receive and process odom messages, store them as self.pose'''\n        self.pose = convert_pose_to_xy_and_theta(msg.pose.pose)\n        if self.goal == (0, 0):\n            self.goal = (self.pose[0], self.pose[1]-2)\n\n    def getScan(self, msg):\n        '''Receive and process scan messages, store them as self.scan'''\n        self.scan = msg.ranges\n\n    def convertToXY(self, angle, r):\n        '''Given a polar coordinate, return the corresponding cartesian coordinate'''\n        newX = math.cos(math.radians(angle-90))*r\n        newY = math.sin(math.radians(angle-90))*r\n        return (newX, newY)\n\n    def convertToPolar(self, x, y):\n        '''Given a cartesian coordinate, return the corresponding polar coordinate'''\n        r = math.sqrt(x*x + y*y)\n        theta = math.atan2(y, x)+1.57\n        return (r, theta)\n\n    def processScan(self, angle, prev):\n        '''Given an angle and whether or not the previous point was part of an object, create or add to an object and return if this point is part of an object'''\n        if self.scan[angle] > 0.0 and self.scan[angle] < self.maxTrackDistance:\n            newPoint = self.convertToXY(angle, self.scan[angle])\n            if not prev or distanceTo(self.objects[-1][-1], newPoint) > self.maxObjectDistance:\n                self.objects.append([])\n                self.objects[-1].append((newPoint[0], newPoint[1], angle))\n                prev = True\n            else:\n                self.objects[-1].append((newPoint[0], newPoint[1], angle))\n                prev = True\n        else:\n            prev = False\n\n        return prev\n\n    def processObjects(self):\n        '''Use the current object list to calculate the forces each object applies to the robot by finding each object's closest point'''\n        closestPoints = []\n        if distanceTo((self.objects[0][0][0], self.objects[0][0][1]), (self.objects[-1][-1][0], self.objects[-1][-1][1])) < self.maxObjectDistance:\n            self.objects[0] = self.objects[0] + self.objects[-1]\n            self.objects = self.objects[:-1]\n\n        for object in self.objects:\n            if len(object) >= self.minimumObjectPoints:\n                closestPoints.append(5)\n                for point in object:\n                    if math.sqrt((point[0]*point[0])+(point[1]*point[1])) < closestPoints[-1]:\n                        closestPoints[-1] = point\n            else:\n                self.objects.remove(object)\n\n        for point in closestPoints:\n            self.totalForce = (self.totalForce[0]-point[0], self.totalForce[1]-point[1]*10)\n\n        return closestPoints\n\n    def run(self):\n        while not rospy.is_shutdown():\n            prev = False\n            self.objects = []\n            closestPoints = []\n            self.vel.linear.x = self.speed/4\n            self.vel.angular.z = 0\n            if len(self.scan) > 0:\n                theta = math.atan2(self.goal[1]-self.pose[1], self.goal[0]-self.pose[0])\n                self.totalForce = (distanceTo(self.goal, (self.pose[0], self.pose[1]))*math.cos(theta), distanceTo(self.goal, (self.pose[0], self.pose[1]))*math.sin(theta))\n                for angle in range(360):\n                    prev = self.processScan(angle, prev)\n\n                if len(self.objects) > 0:\n                    closestPoints = self.processObjects()\n\n                self.vel.linear.x = self.speed*self.convertToPolar(self.totalForce[0], self.totalForce[1])[0]/2\n                self.vel.angular.z = self.speed*self.convertToPolar(self.totalForce[0], self.totalForce[1])[1]*5\n\n            self.velocityPublisher.publish(self.vel)\n\nif __name__ == \"__main__\":\n    settings = termios.tcgetattr(sys.stdin)\n    avoid_obst = avoid_obst()\n\n    #Start new thread for input so it is not on the same thread as the robot processing\n    thread.start_new_thread(input_thread, (avoid_obst, ))\n    try:\n        avoid_obst.run()\n    except KeyboardInterrupt:\n        avoid_obst.vel.linear.x = 0\n        avoid_obst.vel.angular.z = 0\n        avoid_obst.velocityPublisher.publish(avoid_obst.vel)\n","sub_path":"scripts/obstacle_avoidance.py","file_name":"obstacle_avoidance.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"354699870","text":"# # Working with bigger data - online algorithms and out-of-core learning\n\nimport re\n\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\n\ndef tokenizer(text):\n    text = re.sub('<[^>]*>', '', text)\n    emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text.lower())\n    text = re.sub('[\\W]+', ' ', text.lower()) +        ' '.join(emoticons).replace('-', '')\n    tokenized = [w for w in text.split() if w not in stop]\n    return tokenized\n\n\ndef stream_docs(path):\n    with open(path, 'r', encoding='utf-8') as csv:\n        next(csv)  # skip header\n        for line in csv:\n            text, label = line[:-3], int(line[-2])\n            yield text, label\n\n\n\n# 2019.09.23 change\n#next(stream_docs(path='movie_data.csv'))\nif __name__ == '__main__':\n    print(next(stream_docs(path='movie_data.csv')))\n","sub_path":"ch08/stream_docs.py","file_name":"stream_docs.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"433034040","text":"import sys\nimport string\nimport itertools\n\n#from commute_stringgroups_v2 import *\nfrom eqn_class_defn import *\nfrom numpy import linalg as LA\nimport pylab as plt\nfrom itertools import product\nimport numpy as np\n\n\ndef norm_fn(A_lamb):    \n    return LA.norm(A_lamb, 'fro')\n\n\ndef Ham_non_int_fn(L):\n    '''\n    Open boundary condition\n\n    '''\n    J = -1.\n    hz=(np.sqrt(5)+1)/4 #parameters used by Kim and Huse\n    hx=(np.sqrt(5)+5)/8\n\n    \n\n    list_zz=[]\n    for i in range(L-1):\n        identity_arr=list('1'*L)\n        identity_arr[i:i+2]='zz'\n        zz_arr=''.join(identity_arr)\n        list_zz.append(zz_arr)\n    id_x=[1]*(L-1)\n\n    H_zz=equation(dict(zip(list_zz,id_x)))\n    H_z=hz*equation({'1'*(i)+'z' + '1'*(L-1-i):1  for i in range(L)})\n    H_x=hx*equation({'1'*(i)+'x' + '1'*(L-1-i):1  for i in range(L)})\n\n    H=H_zz+ H_z + H_x\n\n    return H.make_operator().toarray()\n\n\n\ndef Ham_int_fn(L):\n    '''\n    Open boundary condition. I should ideally move to periodic boundary condition\n\n    '''\n\n\n    J = 1.\n    \n    hx=0.5\n    \n    \n\n    list_zz=[]\n    for i in range(L-1):\n        identity_arr=list('1'*L)\n        identity_arr[i:i+2]='zz'\n        zz_arr=''.join(identity_arr)\n        list_zz.append(zz_arr)\n    id_x=[1]*(L-1)\n\n    H_zz=equation(dict(zip(list_zz,id_x)))\n    H_x=hx*equation({'1'*(i)+'x' + '1'*(L-1-i):1  for i in range(L)})\n\n    H=H_zz + H_x\n\n    #print H.make_operator().toarray()\n    return H.make_operator().toarray()\n\n\ndef Ham_int_fn_periodic(L):\n\n    J = 1.\n        \n    hx=0.5\n\n\n\n    list_zz=[]\n    for i in range(L-1):\n        identity_arr=list('1'*L)\n        identity_arr[i:i+2]='zz'\n        zz_arr=''.join(identity_arr)\n        list_zz.append(zz_arr)\n    #print list_zz\n    #Periodic boundary condition    \n    identity_arr=list('1'*L)\n    identity_arr[0]='z'\n    identity_arr[L-1]='z'\n    zz_arr=''.join(identity_arr)\n    list_zz.append(zz_arr)\n    #print list_zz\n    id_x=[1]*L\n\n    H_zz=equation(dict(zip(list_zz,id_x)))\n    H_x=hx*equation({'1'*(i)+'x' + '1'*(L-1-i):1  for i in range(L)})\n\n    H=H_zz + H_x\n    return H.make_operator().toarray()\n\n\n\ndef LZ_fn(Delta, lambda_t):\n    L=1\n    #Delta = 1.\n    #lambda_t=0.5\n\n    H_z=Delta*equation({'1'*(i)+'z' + '1'*(L-1-i):1  for i in range(L)})\n    H_x=lambda_t*equation({'1'*(i)+'x' + '1'*(L-1-i):1  for i in range(L)})\n\n    H= H_z + H_x\n\n    return H.make_operator().toarray()\n\ndef three_site_1_Ham_fn(J, lambda_t):\n    L=3\n    #Delta = 1.\n    #lambda_t=0.5\n    list_zz=[]\n    for i in range(L-1):\n        identity_arr=list('1'*L)\n        identity_arr[i:i+2]='zz'\n        zz_arr=''.join(identity_arr)\n        list_zz.append(zz_arr)\n    id_x=[1]*(L-1)\n\n    H_zz=equation(dict(zip(list_zz,id_x)))\n    \n\n\n    #H_z=Delta*equation({'1'*(i)+'z' + '1'*(L-1-i):1  for i in range(L)})\n    \n    H_x=lambda_t*equation({'1x1':1})\n\n    H= J*H_zz + H_x\n\n    return H.make_operator().toarray()\n\n\ndef three_site_2_Ham_fn(J, lambda_t):\n    L=3\n    #Delta = 1.\n    #lambda_t=0.5\n    list_zz=[]\n    for i in range(L-1):\n        identity_arr=list('1'*L)\n        identity_arr[i:i+2]='zz'\n        zz_arr=''.join(identity_arr)\n        list_zz.append(zz_arr)\n    id_x=[1]*(L-1)\n\n    H_zz=equation(dict(zip(list_zz,id_x)))\n    \n\n\n    #H_z=Delta*equation({'1'*(i)+'z' + '1'*(L-1-i):1  for i in range(L)})\n    \n    H_x=lambda_t*equation({'1x1':1, 'x11':1})\n\n    H= J*H_zz + H_x\n\n    return H#.make_operator().toarray()\n\n\n\n\ndef two_site_1_Ham_fn(J, lambda_t):\n    L=2\n    #Delta = 1.\n    #lambda_t=0.5\n    list_zz=[]\n    for i in range(L-1):\n        identity_arr=list('1'*L)\n        identity_arr[i:i+2]='zz'\n        zz_arr=''.join(identity_arr)\n        list_zz.append(zz_arr)\n    id_x=[1]*(L-1)\n\n    H_zz=equation(dict(zip(list_zz,id_x)))\n    \n\n\n    #H_z=Delta*equation({'1'*(i)+'z' + '1'*(L-1-i):1  for i in range(L)})\n    \n    H_x=lambda_t*equation({'1x':1})\n\n    H= J*H_zz + H_x\n\n    return H#.make_operator().toarray()\n\n\n\ndef del_lambda_Ham_fn(L):\n\n    dH=equation({'1'*(i)+'x' + '1'*(L-1-i):1  for i in range(L)})\n    #print dH#.make_operator().toarray()\n    return dH.make_operator()#.toarray()\n      \n#print del_lambda_Ham_fn(3)      \ndef gauge_potent_mu_fn(Ham,L,mu, dH):\n    E,V= LA.eigh(Ham)#Ham.eigh()\n    op_lamb_arr = dH#del_lambda_Ham_fn(L)\n    x=np.dot(op_lamb_arr, V)\n    num_lamb_mat =np.dot(V.T,x) #matrix multiplication\n    wij = np.outer(E,np.ones(2**L))-np.outer(np.ones(2**L),E)\n    \n    A_lamb_energy_basis = -1j*np.multiply(wij,num_lamb_mat)/(wij**2+ mu**2)#element-wise multiplication\n\n\n    \n    A_lamb_real_basis=np.dot(V,np.dot(A_lamb_energy_basis, V.T))\n\n    return A_lamb_real_basis\n\n\n##############\n#String manipulation\n################\n\ndef general_op_1(L):\n    list_temp=[]\n    list_op=['x','z', 'y']\n    for i in range(L):\n        identity_arr=list('1'*L)\n        for value in list_op:\n            identity_arr[i]=value\n            str_temp=''.join(identity_arr)\n            list_temp.append(str_temp)\n    #print list_temp\n    #id_x=[1]*(len(list_temp))\n    return list_temp#equation(dict(zip(list_temp,id_x))).make_operator().toarray()\n\n\ndef general_op_2(L):\n    list_temp=[]\n    list_op=['x','z', 'y']\n\n    for value1 in list_op:\n        for value2 in list_op:\n            for i in range(L-1):\n                identity_arr=list('1'*L)\n                identity_arr[i]=value1\n                identity_arr[i+1]=value2\n                str_temp=''.join(identity_arr)\n                list_temp.append(str_temp)\n\n    #id_x=[1]*(len(list_temp))\n\n    return list_temp#equation(dict(zip(list_temp,id_x))).make_operator().toarray()\n\n\n\ndef general_op_bigger_than_3(L, supp_int):\n    if supp_int < 3:\n        raise ValueError('support cannot be smaller than 3' )\n    else:\n        list_temp=[]\n        list_op=['x','z', 'y', '1']\n\n        all_pos_values=[p for p in itertools.product(list_op,repeat=supp_int)]\n        for j in range(len(all_pos_values)):\n            for i in range(L-supp_int + 1):\n                identity_arr=list('1'*L)\n                for k in range(supp_int):\n                    identity_arr[i+k]=all_pos_values[j][k]\n                                    \n                if all_pos_values[j][0] !='1' and all_pos_values[j][supp_int-1] !='1':\n                    str_temp=''.join(identity_arr)\n                    list_temp.append(str_temp)\n            \n\n        return list_temp#equation(dict(zip(list_temp,id_x))).make_operator().toarray()\n    \n    \ndef general_op_fn(supp_int,L):\n    if supp_int==1:\n        list_temp=general_op_1(L)\n    elif supp_int==2:\n        list_temp=general_op_2(L)\n    elif supp_int > 2:\n        list_temp=general_op_bigger_than_3(L, supp_int)\n    else:\n        raise ValueError('IDK')\n    return list_temp\n\n\n\ndef mean_sigma_fn(alpha_arr,L):\n    N_loop=len(alpha_arr[:,0])\n    mean_length_arr=np.zeros(N_loop, dtype=np.float)\n    x_square_length_arr=np.zeros(N_loop, dtype=np.float)\n\n    sigma_length_arr=np.zeros(N_loop, dtype=np.float)\n\n    denom=0.0\n    for i in range(L):\n        mean_length_arr+=(i+1)*alpha_arr[:,i]\n        x_square_length_arr+=(i+1)**2*alpha_arr[:,i]\n\n        denom+=alpha_arr[:,i]\n        \n    mean_length_arr=mean_length_arr[1:]/denom[1:]\n    x_square_length_arr=x_square_length_arr[1:]/denom[1:]\n    \n    sigma_length_arr=np.sqrt(x_square_length_arr-mean_length_arr**2)\n    return mean_length_arr,sigma_length_arr","sub_path":"CD_cutoff/ED_method_zero_denom/python_jupyter_ED/v_5_A_Op_spreading_open_boundary/entropy_IPR_data_ssh/data_ssh/v2_bend_in_data/dH_i5/Ham_gauge_potential.py","file_name":"Ham_gauge_potential.py","file_ext":"py","file_size_in_byte":7274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"17376032","text":"from flask import Flask, jsonify, send_file, request, render_template\nimport argparse\nimport threading\nimport uuid\nimport json\nimport time\nimport datetime\n\nargParser = argparse.ArgumentParser()\nargParser.description = 'For learning use only, DO NOT use it for illegal purposes. -- InJeCTrL'\nargParser.add_argument(\"-i\", \"--input\", help = \"Input questions file.\", required=True)\nargParser.add_argument(\"-n\", \"--nvoters\", help = \"Count of voters.\", required=True)\nargParser.add_argument(\"-p\", \"--port\", help = \"Port of server.\", required=True)\nargs = argParser.parse_args()\n\nn_Voters = int(args.nvoters)\nmanual_port = int(args.port)\nfile_input = args.input\n\nGUIDs = {}\nQuestions_Scored = []\nQuestions = []\nTotal = 0\nFull = 0\nTip = \"\"\nTip_timeout = \"\"\nTopic = \"\"\nn_Voted = 0\nn_Abstain = 0\nm_vote = threading.Lock()\n\n# Generate and export GUID list\nwith open(\"VoteGUID.txt\", \"w\", encoding='utf-8') as f:\n    for i in range(n_Voters):\n        tg = str(uuid.uuid1())\n        GUIDs[tg] = False\n        f.write(tg + \"\\n\")\n\napp = Flask(__name__)\n\n@app.route(\"/api/result\", methods = [\"GET\"])\ndef getResult():\n    return jsonify({\"total_score\": Total, \"full_score\": Full, \"rest_score\": Full - Total,\n                    \"tip\": Tip, \"tip_timeout\": Tip_timeout,\n                    \"total_vcount\": n_Voters, \"voted_vcount\": n_Voted, \"abstain_vcount\": n_Abstain,\n                    \"topic\": Topic, \"question_count\": len(Questions) ,\"data\": Questions_Scored})\n\n@app.route(\"/api/question\", methods = [\"GET\"])\ndef getQuestionList():\n    return jsonify({\"topic\": Topic, \"tip\": Tip, \"tip_timeout\": Tip_timeout, \"question_count\": len(Questions) ,\"data\": Questions})\n\n@app.route(\"/api/vote\", methods = [\"POST\"])\ndef postVote():\n    global Questions_Scored\n    global Total\n    global Full\n    global GUIDs\n    global n_Voted\n    var_request = {}\n    try:\n        v_guid = request.values.get(\"guid\")\n        v_answers = json.loads(request.values.get(\"data\"))\n        if len(v_answers) != len(Questions_Scored):\n            return jsonify({\"success\": 1, \"data\": \"Illegal Vote!\"})\n        else:\n            if not GUIDs.__contains__(v_guid):\n                return jsonify({\"success\": 1, \"data\": \"Illegal GUID!\"})\n            elif GUIDs[v_guid] == True:\n                return jsonify({\"success\": 1, \"data\": \"No duplicate votes!\"})\n            for i_question in range(len(Questions_Scored)):\n                if int(v_answers[i_question]) in list(range(len(Questions_Scored[i_question][\"Answers\"]))):\n                    var_request[i_question] = int(v_answers[i_question])\n                else:\n                    return jsonify({\"success\": 1, \"data\": \"Illegal Vote!\"})\n    except:\n        return jsonify({\"success\": 1, \"data\": \"Illegal Vote!\"})\n    with m_vote:\n        for index, question in enumerate(Questions_Scored):\n            Full += Questions_Scored[index][\"Full\"]\n            Total += Questions_Scored[index][\"Answers\"][var_request[index]][\"Score\"]\n            Questions_Scored[index][\"Answers\"][var_request[index]][\"Count\"] += 1\n        GUIDs[v_guid] = True\n        n_Voted += 1\n        return jsonify({\"success\":0, \"data\":\"Vote successfully!\"})\n\n@app.route(\"/api/abstain\", methods = [\"POST\"])\ndef postAbstain():\n    global GUIDs\n    global n_Abstain\n    try:\n        v_guid = request.values.get(\"guid\")\n        if not GUIDs.__contains__(v_guid):\n            return jsonify({\"success\": 1, \"data\": \"Illegal GUID!\"})\n        elif GUIDs[v_guid] == True:\n            return jsonify({\"success\": 1, \"data\": \"No duplicate votes!\"})\n    except:\n        return jsonify({\"success\": 1, \"data\": \"Illegal Vote!\"})\n    with m_vote:\n        GUIDs[v_guid] = True\n        n_Abstain += 1\n        return jsonify({\"success\":0, \"data\":\"Abstained successfully!\"})\n\n@app.route(\"/\", methods = [\"GET\"])\n@app.route(\"/\", methods = [\"GET\"])\ndef index(guid = \"\"):\n    with m_vote:\n        if GUIDs.__contains__(guid) and GUIDs[guid] == False:\n            return render_template(\"./vote.html\", GUID = guid)\n        else:\n            return send_file(\"./index.html\")\n\ndef timedout(TimeLimit):\n    global GUIDs\n    global n_Abstain\n    global Tip_timeout\n    time.sleep(TimeLimit)\n    with m_vote:\n        for guid in GUIDs:\n            if GUIDs[guid] == False:\n                GUIDs[guid] = True\n                n_Abstain += 1\n    Tip_timeout = \"The vote is over.\"\n\ndef inputParser(path):\n    global Questions\n    global Questions_Scored\n    global Topic\n    global Tip\n    global Tip_timeout\n    global timedout_task\n    with open(path, \"r\") as f:\n        content = f.read()\n        data_parsed = json.loads(content)\n        Topic = data_parsed[\"Topic\"]\n        Tip = data_parsed[\"Tip\"]\n        Tip_timeout = \"The vote will end on %s.\" % (datetime.datetime.now() + datetime.timedelta(seconds=int(data_parsed[\"TimeLimit\"]))).strftime(\"%Y-%m-%d %H:%M:%S\")\n        threading.Thread(target=timedout, daemon = True, args=(int(data_parsed[\"TimeLimit\"]),)).start()\n        Questions = data_parsed[\"Questions\"].copy()\n        Questions_Scored = Questions.copy()\n        for question in Questions_Scored:\n            question[\"Full\"] = 0\n            for answer in question[\"Answers\"]:\n                answer[\"Count\"] = 0\n                question[\"Full\"] = max(question[\"Full\"], answer[\"Score\"])\n\ninputParser(file_input)\napp.run(host='0.0.0.0', port=manual_port)\n","sub_path":"vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"510572596","text":"expected_output  = {'Entropy_target': '256 bits',\n 'entropies': {1: {'entropy_bits': '384',\n                   'requests': 'None',\n                   'source': 'ACT-2',\n                   'status': 'Working',\n                   'type': 'HW'},\n               2: {'entropy_bits': '128(*)',\n                   'requests': 'None',\n                   'source': 'randfill',\n                   'status': 'Working',\n                   'type': 'SW'},\n               3: {'entropy_bits': '160(*)',\n                   'requests': 'None',\n                   'source': 'getrandombytes',\n                   'status': 'Working',\n                   'type': 'SW'}},\n 'entropy_actual_collection': '384 bits',\n 'entropy_collection': '60 minutes',\n 'entropy_collection_recent': '10 minutes ago'}","sub_path":"src/genie/libs/parser/iosxe/tests/ShowCryptoEntropyStatus/cli/equal/golden_output_without_requests_expected.py","file_name":"golden_output_without_requests_expected.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"155837326","text":"import Algo_test01\nimport Db\nimport pandas as pd\nclass BackTradeTest:\n    def __init__(self):\n        self.algo = Algo_test01.Algo_test01(0.5)\n        self.db = Db.StockDb()\n\n    def day_setting(self,start,end):\n        date_data = self.db.select_Date(start, end)\n        date_len = len(date_data)\n        print(len(date_data))\n        # self.profit_datas = pd.DataFrame(columns=('date', 'profit', 'total_profit'))\n        self.total_profit = 1\n        # if date_len == 0:\n        #     print(\"두 날짜 사이의 거래일이 없음\")\n        for date in range(date_len-1):\n            if (date!=0) :\n                self.total_profit *= self.day_trading(date_data,date)\n                print(\"%f %s\" % (self.total_profit, date))\n\n\n        print(\"%d일 동안의 총 수익률 : %f프로\" %(date_len,(self.total_profit-1)*100))\n\n    def day_trading(self,date_data,date):\n        day = date_data['date'][date][0:8]\n        last_day = date_data['date'][date - 1][0:8]\n        next_day = date_data['date'][date + 1][0:8]\n        self.daily_data = self.db.select_MinuteData(day)\n        self.next_day_data = self.db.select_Daily_Data(next_day)\n        self.last_day_data = self.db.select_Daily_Data(last_day)\n\n        high = self.last_day_data['high']\n        low = self.last_day_data['low']\n        price_range = int(high)-int(low)\n        start_price = self.daily_data['now'][0]\n        next_start_price = self.next_day_data['start']\n        daliy_min_len = len(self.daily_data) - 1  # 3시 20분에서 3시 30분 제외\n\n        for min in range(daliy_min_len):\n            now_price = self.daily_data['now'][min]\n            if self.algo.cal_sell(now_price,price_range,start_price):\n                #profit  = int(self.daily_data['now'][daliy_min_len]) - int(self.daily_data['now'][min])\n                profit = int(next_start_price) / int(self.daily_data['now'][min])*0.995\n                print(\"%f %s\" %(profit,day))\n                return profit\n\n        return 1\n\nif __name__ == \"__main__\":\n    main = BackTradeTest()\n    main.day_setting(\"20200217\", \"20200721\")","sub_path":"BackTradeTest.py","file_name":"BackTradeTest.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"586116888","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\nfrom django.conf import settings\n\nfrom showcase import views\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nurlpatterns = patterns('',\n                       # Examples:\n                       # url(r'^$', 'project.views.home', name='home'),\n                       # url(r'^blog/', include('blog.urls')),\n\n                       # main page\n                       url(r'^$', views.manufacturers, name='manufacturers'),\n\n                       # snowboards page\n                       url(r'^manufacture/(?P\\d+)/$',\n                           views.snowboards, name='snowboards'),\n\n                       # snowboards_details page\n                       url(r'^snowboard/(?P\\d+)/$',\n                           views.details,\n                           name='details'),\n\n                       # comments_view\n                       url(r'^snowboard/addcomment/(?P\\d+)/$',\n                           views.addcomment,\n                           name='addcomment'),\n\n                       # trying add media\n                       url('^media/(?P.*)$', 'django.views.static.serve',\n                           {'document_root': os.path.join(BASE_DIR, 'media')})\n                       )\n","sub_path":"showcase/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"240412402","text":"\"\"\"MazeGenerators package contains all things for making Mazes.\"\"\"\nfrom random import choice, random\n\ndef fill_maze_no_inner_walls(maze):\n\t\"\"\"Fill maze with just outer walls.\"\"\"\n\tmaze.slabs = [[False for i in range(len(maze))]\n\t\t\t\t   for j in range(len(maze)+1)]\n\n\tmaze.columns = [[False for i in range(len(maze))]\n\t\t\t\t\t for j in range(len(maze)+1)]\n\n\tmaze.slabs[0] = [True for i in range(len(maze))]\n\tmaze.slabs[-1] = [True for i in range(len(maze))]\n\n\tmaze.columns[0] = [True for i in range(len(maze))]\n\tmaze.columns[-1] = [True for i in range(len(maze))]\n\nclass Maze:\n\t\"\"\"Represents a square Maze, its cells, walls, and all.\"\"\"\n\n\tdef __init__(self, size=0, gen_func=fill_maze_no_inner_walls):\n\t\t\"\"\"\n\t\tInitialize Maze object.\n\n\t\tArgs:\n\t\t\tsize (int) -- The side length of the maze (default: 0).\n\t\t\tgen_func (method) -- The method that will be called to set\n\t\t\tthe initial state of the maze\n\t\t\t(default: fill_maze_no_inner_walls).\n\t\t\"\"\"\n\t\tself.size = size\n\t\tself.slabs = None # Represents horizontal cell borders.\n\t\tself.columns = None # Represents vertical cell borders.\n\n\t\tgen_func(self)\n\n\tdef __getitem__(self, key):\n\t\t\"\"\"\n\t\tCompute and return a dict containing info on the borders around\n\t\ta cell.\n\n\t\tArgs:\n\t\t\tkey (int, int) -- Indicates the cell location.\n\n\t\tReturns:\n\t\t\tA dictionary. For each side of the cell, True indicates the\n\t\t\tpresence of a wall and False the lack thereof.\n\t\t\"\"\"\n\t\tx = key[0]\n\t\ty = key[1]\n\t\treturn {'N':self.slabs[y][x], 'S':self.slabs[y+1][x],\n\t\t\t\t'W':self.columns[x][y], 'E':self.columns[x+1][y] }\n\n\tdef __setitem__(self, key, value):\n\t\t\"\"\"\n\t\tSet the existence of a wall to value.\n\n\t\tArgs:\n\t\t\tkey (int, int, str) -- Indicates cell location and direction\n\t\t\tof a border.\n\t\t\tvalue (bool) -- The value that the border will take on.\n\t\t\"\"\"\n\t\tx = key[0]\n\t\ty = key[1]\n\t\tletter = key[2]\n\n\t\tif letter == 'N':   self.slabs[y][x] = value\n\t\telif letter == 'S': self.slabs[y+1][x] = value\n\t\telif letter == 'W': self.columns[x][y] = value\n\t\telif letter == 'E': self.columns[x+1][y] = value\n\n\tdef __str__(self):\n\t\t\"\"\"Return a string representation of the maze.\"\"\"\n\t\tresult = []\n\n\t\tfor i in range(self.size + 1):\n\n\t\t\t# handle slabs first\n\t\t\ttext_row = []\n\t\t\tfor j in range(self.size+1):\n\t\t\t\ttext_row.append('+')\n\t\t\t\tif j < self.size:\n\t\t\t\t\tif self.slabs[i][j]:\n\t\t\t\t\t\tsymbol = '-'\n\t\t\t\t\telse:\n\t\t\t\t\t\tsymbol = ' '\n\t\t\t\t\ttext_row.append(symbol)\n\n\t\t\tresult.append(''.join(text_row))\n\n\t\t\t# handle columns second\n\t\t\ttext_row = []\n\t\t\tif i < self.size:\n\t\t\t\tfor j in range(self.size+1):\n\t\t\t\t\tif self.columns[j][i]:\n\t\t\t\t\t\tsymbol = '|'\n\t\t\t\t\telse:\n\t\t\t\t\t\tsymbol = ' '\n\t\t\t\t\ttext_row.append(symbol)\n\t\t\t\t\tif j < self.size:\n\t\t\t\t\t\ttext_row.append(' ')\n\n\t\t\tresult.append(''.join(text_row))\n\n\t\treturn '\\n'.join(result)\n\n\tdef __len__(self):\n\t\t\"\"\"Return the side length of the maze.\"\"\"\n\t\treturn self.size\n\n\tdef __repr__(self):\n\t\t\"\"\"Return a representation of the maze.\"\"\"\n\t\treturn 'Maze Object with side length ' + str(self.size)\n\ndef fill_maze_all_walls(maze):\n\t\"\"\"Fill maze with all possible walls.\"\"\"\n\tfor row in maze.slabs:\n\t\tfor i in range(len(row)):\n\t\t\trow[i] = True\n\tfor row in maze.columns:\n\t\tfor i in range(len(row)):\n\t\t\trow[i] = True\n\nclass Node:\n\t\"\"\"A Node represents a location with x,y coordinates.\"\"\"\n\n\tdef __init__(self, x, y):\n\t\t\"\"\"\n\t\tInitialize Node object.\n\n\t\tArgs:\n\t\t\tx (int) -- x coordinate.\n\t\t\ty (int) -- y coordinate.\n\t\t\"\"\"\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef __eq__(self, other):\n\t\t\"\"\"\n\t\tEvaluate whether or not two nodes are equivalent.\n\n\t\tArgs:\n\t\t\tother (Node) -- Node to be compared.\n\n\t\tReturns:\n\t\t\tbool that indicates whether or not self and other are\n\t\t\tequivalent in their x and y values.\n\t\t\"\"\"\n\t\treturn self.x == other.x and self.y == other.y\n\n\tdef __getitem__(self, key):\n\t\t\"\"\"\n\t\tReturn x or y value of Node.\n\n\t\tArgs:\n\t\t\tkey (int) -- Indication of x or y coordinate (0 or 1).\n\n\t\tReturns:\n\t\t\tint that is the desired coordinate value.\n\t\t\"\"\"\n\t\tif key == 0: return self.x\n\t\telif key == 1: return self.y\n\t\traise KeyError(\"Key:\", key, \" not recognized. Try [0] or [1].\")\n\n\tdef __str__(self):\n\t\t\"\"\"Return string representation of Node.\"\"\"\n\t\treturn \"(\" + str(self.x) + \",\" + str(self.y) + \")\"\n\n\tdef move_to(self, direction):\n\t\t\"\"\"\n\t\tCreate and return a Node that is one space over from the\n\t\tself Node, in the direction desired.\n\n\t\tArgs:\n\t\t\tdirection (str) -- Cardinal direction that the new Node will\n\t\t\tbe created in.\n\n\t\tReturns:\n\t\t\tA Node that is one space over in the desired direction.\n\t\t\"\"\"\n\t\tx = self.x\n\t\ty = self.y\n\n\t\tif direction == 'N': y -= 1\n\t\telif direction == 'S': y += 1\n\t\telif direction == 'W': x -= 1\n\t\telif direction == 'E': x += 1\n\n\t\treturn Node(x,y)\n\n\tdef copy(self):\n\t\t\"\"\"Return copy of Node.\"\"\"\n\t\treturn Node(self.x, self.y)\n\nclass Edge:\n\t\"\"\"Represents an edge between two Nodes.\"\"\"\n\n\tdef __init__(self, first, second, weight=None):\n\t\t\"\"\"\n\t\tInitialize Edge object.\n\n\t\tArgs:\n\t\t\tfirst (Node) -- The first node in the Edge.\n\t\t\tsecond (Node) -- The second node in the Edge.\n\t\t\tweight (int) -- The weight of the edge (default: None).\n\t\t\"\"\"\n\t\tself.first = first\n\t\tself.second = second\n\t\tself.weight = weight\n\n\tdef __contains__(self, item):\n\t\t\"\"\"\n\t\tDetermine whether or not item is a member of Edge.\n\n\t\tArgs:\n\t\t\titem (Node) -- Node to search for in the Edge.\n\n\t\tReturns:\n\t\t\tbool indicating whether or not item is a member of Edge.\n\t\t\"\"\"\n\t\treturn self.first == item or self.second == item\n\n\tdef __eq__(self, other):\n\t\t\"\"\"\n\t\tDetermine whether or not self and other are equivalent edges,\n\t\twithout regard for Node order.\n\n\t\tArgs:\n\t\t\tother (Edge) -- The Edge self will be compared to.\n\n\t\tReturns:\n\t\t\tbool indicating whether or not the two edges are equivalent.\n\t\t\"\"\"\n\t\treturn (self.first == other.first and self.second == other.second) or (self.first == other.second and self.second == other.first)\n\n\tdef __lt__(self, other):\n\t\t\"\"\"\n\t\tDetermine whether or not self has a lower weight than other.\n\n\t\tArgs:\n\t\t\tother (Edge) -- Edge that will have its weight compared to that of self.\n\n\t\tReturns:\n\t\t\tbool indicating whether or not self has a lower weight than other.\n\t\t\"\"\"\n\t\treturn self.weight < other.weight\n\n\tdef __str__(self):\n\t\t\"\"\"Return string representation of Edge.\"\"\"\n\t\treturn str(self.first) + \"-->\" + str(self.second)\n\n\tdef get_direction(self):\n\t\t\"\"\"\n\t\tDetermine the cardinal direction that Edge points in.\n\n\t\tReturns:\n\t\t\tstr indicating the cardinal direction that Edge points in.\n\t\t\"\"\"\n\t\tresult = None\n\t\tif self.second.y > self.first.y:\n\t\t\tresult = 'S'\n\t\telif self.second.y < self.first.y:\n\t\t\tresult = 'N'\n\t\telif self.second.x > self.first.x:\n\t\t\tresult = 'E'\n\t\telif self.second.x < self.first.x:\n\t\t\tresult = 'W'\n\t\treturn result\n\ndef find_unvisited_neighbors(pos, visited):\n\t\"\"\"\n\tDetermine all unvisited neighbors for a position.\n\n\tArgs:\n\t\tpos (Node) -- The position around which neighbors will be searched for.\n\t\tvisited ([bool][bool]) -- A 2D bool table indicating visited Nodes.\n\n\tReturns:\n\t\tlist of str's indicating which cardinal directions have unvisited Nodes.\n\t\"\"\"\n\tsize = len(visited)\n\tgood_neighbors = []\n\n\t# Check for the edge cases and check if the square in that direction\n\t# has been visited or not. If not, add it to the list of good_neighbors.\n\tif pos.x > 0 and not visited[pos.x - 1][pos.y]: good_neighbors.append('W')\n\tif pos.x < size - 1 and not visited[pos.x + 1][pos.y]: good_neighbors.append('E')\n\tif pos.y > 0 and not visited[pos.x][pos.y - 1]: good_neighbors.append('N')\n\tif pos.y < size - 1 and not visited[pos.x][pos.y + 1]: good_neighbors.append('S')\n\n\treturn good_neighbors\n\nclass MGAlgorithm:\n\t\"\"\"Template class for Maze Generation Algorithms.\"\"\"\n\n\tdef __init__(self, maze):\n\t\t\"\"\"Initialize MGAlgorithm object.\"\"\"\n\t\tself.maze = maze\n\t\tfill_maze_all_walls(self.maze)\n\t\tself.visited = [[False for j in range(len(maze))]\n\t\t\t\t\t\t for i in range(len(maze))]\n\n\tdef step(self):\n\t\t\"\"\"\n\t\tRemove one wall and visit a new square on the maze.\n\n\t\tReturns:\n\t\t\tNone. Should return None or an Edge in an extended class.\n\t\t\"\"\"\n\t\treturn None # or return an edge.\n\nclass PriorityQueue:\n\t\"\"\"\n\tQueue that always pops the highest priority item in the queue.\n\n\tImplementation Description:\n\t\tThis priority queue uses a binary heap abstarcted over a\n\t\tdynamic list. Using the insert, get_min, and pop methods\n\t\tallow the queue to maintain the heap property.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Initialize PriorityQueue.\"\"\"\n\t\tself.queue = [None]\n\n\tdef insert(self, item):\n\t\t\"\"\"\n\t\tInsert item into the queue and maintain the heap property.\n\n\t\tArgs:\n\t\t\titem -- any object that can be ordered with another object.\n\t\t\"\"\"\n\t\tif type(item) is not list:\n\t\t\tself.insert([item])\n\t\t\treturn\n\t\tfor element in item:\n\t\t\tself.queue.append(element)\n\t\t\tself.percolate_up(len(self))\n\n\tdef pop(self, index):\n\t\t\"\"\"\n\t\tRemove item at index, maintain heap property, and return the\n\t\titem.\n\n\t\tArgs:\n\t\t\tindex (int) -- Indicates the index of the item to remove.\n\n\t\tReturns:\n\t\t\titem of arbitary type.\n\t\t\"\"\"\n\t\tresult = self.queue.pop(index+1)\n\t\tif len(self) - index > 1:\n\t\t\tself.queue.insert(index+1, self.queue.pop(len(self)))\n\t\t\tself.percolate_down(index+1)\n\n\t\treturn result\n\n\tdef __len__(self):\n\t\t\"\"\"Return the number of items in the queue.\"\"\"\n\t\treturn len(self.queue) - 1\n\n\tdef get_min(self):\n\t\t\"\"\"Return the highest priority item & keep heap property.\"\"\"\n\t\treturn self.pop(0)\n\n\tdef percolate_up(self, index): # TODO: change to iterative.\n\t\t\"\"\"\n\t\tCompare the item at index to its parent and switch it with\n\t\tits parent if it his higher priority, continuing to percolate\n\t\tup until it is either the highest priority item or its parent\n\t\thas a higher priority.\n\t\t\"\"\"\n\t\tif index == 1:\n\t\t\treturn\n\n\t\tcurrent = self.queue[index]\n\t\tparent = self.queue[index // 2]\n\t\tif current < parent:\n\t\t\ttemp = current\n\t\t\tself.queue[index] = parent\n\t\t\tself.queue[index // 2] = temp\n\t\t\tself.percolate_up(index // 2)\n\n\tdef percolate_down(self, index): # TODO: change to iterative.\n\t\t\"\"\"\n\t\tCompare the item at index to its children and switch it with\n\t\tthe highest priority child if that child is of higher priority\n\t\tthan the item at index, and thus continuing until it is a\n\t\tparent that has no higher priority children.\n\t\t\"\"\"\n\t\tcurrent = self.queue[index]\n\n\t\tif index > len(self) / 2:\n\t\t\treturn\n\n\t\tchild = index * 2\n\t\tif index * 2 + 1 < len(self):\n\t\t\tchallenger = index * 2 + 1\n\t\t\tif self.queue[child] < self.queue[challenger]:\n\t\t\t\tchild = challenger\n\n\t\tif self.queue[child] < current:\n\t\t\ttemp = current\n\t\t\tself.queue[index] = self.queue[child]\n\t\t\tself.queue[child] = temp\n\t\t\tself.percolate_down(child)\n\nclass DepthFirstMazeGenerator(MGAlgorithm):\n\t\"\"\"\n\tMaze generation algorithm that uses a randomized depth\n\tfirst search.\n\t\"\"\"\n\n\tdef __init__(self, maze):\n\t\t\"\"\"\n\t\tInitialize DepthFirstMazeGenerator.\n\n\t\tArgs:\n\t\t\tmaze (Maze) -- The maze object that will be turned into a\n\t\t\ta maze by the algorithm.\n\t\t\"\"\"\n\t\tsuper().__init__(maze)\n\n\t\tself.start = Node(0,0)\n\n\t\tself.visited[self.start.x][self.start.y] = True\n\n\t\tself.move_memory = [self.start.copy()]\n\n\tdef step(self):\n\t\t\"\"\"\n\t\tDetermines a random square to move to from its current\n\t\tposition.\n\n\t\tReturns:\n\t\t\tEdge corresponding to the step that is taken.\n\t\t\"\"\"\n\t\twhile True:\n\t\t\tif len(self.move_memory) == 0:\n\t\t\t\treturn None\n\n\t\t\tcurr_pos = self.move_memory[-1]\n\n\t\t\tmove_options = find_unvisited_neighbors(curr_pos,\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.visited)\n\n\t\t\tif len(move_options) == 0:\n\t\t\t\tself.move_memory.pop()\n\t\t\t\tcontinue\n\n\t\t\tdirection = choice(move_options)\n\n\t\t\tself.maze[curr_pos.x, curr_pos.y, direction] = False\n\n\t\t\tnew_position = curr_pos.move_to(direction)\n\n\t\t\tself.move_memory.append(new_position)\n\n\t\t\tself.visited[new_position.x][new_position.y] = True\n\n\t\t\treturn Edge(curr_pos, new_position)\n\nclass BinaryTreeMazeGenerator(MGAlgorithm):\n\t\"\"\"\n\tMaze generation algorithm that creates a binary branching tree.\n\t\"\"\"\n\n\tdef __init__(self, maze):\n\t\t\"\"\"\n\t\tInitialize BinaryTreeMazeGenerator.\n\n\t\tArgs:\n\t\t\tmaze (Maze) -- The maze that will be mutated into an actual\n\t\t\tmaze by the algorithm.\n\t\t\"\"\"\n\t\tsuper().__init__(maze)\n\n\n\t\tself.root_position = Node(0, self.maze.size-1)\n\t\tself.wandering_position = self.root_position.copy()\n\t\tself.finished = False\n\t\tself.traversing = True\n\n\tdef traverse(self):\n\t\t\"\"\"Create an exit from the current position to left or top.\"\"\"\n\t\tx = self.wandering_position.x\n\t\ty = self.wandering_position.y\n\t\tif self.visited[x][y]:\n\t\t\treturn None\n\n\t\tself.visited[x][y] = True\n\n\t\toptions = ['N', 'W']\n\t\tif y == 0: options.remove('N')\n\t\tif x == 0: options.remove('W')\n\n\t\tif len(options) == 0:\n\t\t\treturn None # i.e.: top left corner has been reached.\n\n\t\tdirection = choice(options)\n\t\tself.maze[x, y, direction] = False # remove wall\n\n\t\tnew_position = self.wandering_position.move_to(direction)\n\t\tresult = Edge(self.wandering_position, new_position)\n\t\tself.wandering_position = new_position\n\n\t\treturn result\n\n\tdef increment(self):\n\t\t\"\"\"Shift the root_position by 1 and handling overflow.\"\"\"\n\t\tself.root_position.x += 1\n\t\tif self.root_position.x >= self.maze.size:\n\t\t\tself.root_position.x = 0\n\t\t\tself.root_position.y -= 1\n\n\n\tdef step(self):\n\t\t\"\"\"\n\t\tDetermine an unvisited square and create a path out of it.\n\n\t\tReturns:\n\t\t\tEdge corresponding to the step that is taken.\n\t\t\"\"\"\n\t\tif self.finished:\n\t\t\treturn None\n\n\t\twhile True:\n\t\t\tif not self.traversing:\n\t\t\t\tself.increment()\n\t\t\t\tself.wandering_position = self.root_position.copy()\n\t\t\t\tif self.root_position.y < 0:\n\t\t\t\t\tself.finished = True\n\t\t\t\t\treturn None\n\t\t\t\tself.traversing = True\n\n\t\t\tmove = self.traverse()\n\t\t\tif move is not None:\n\t\t\t\treturn move\n\n\t\t\tself.traversing = False\n\nclass PrimsAlgorithmMazeGenerator(MGAlgorithm):\n\t\"\"\"\n\tMaze generation Algorithm based off of Prim's (Greedy) Algorithm.\n\t\"\"\"\n\n\tdef __init__(self, maze):\n\t\t\"\"\"\n\t\tInitialize PrimsAlgorithmGenerator.\n\n\t\tArgs:\n\t\t\tmaze (Maze) -- The maze that will be mutated into an actual\n\t\t\tmaze by the algorithm.\n\t\t\"\"\"\n\t\tsuper().__init__(maze)\n\n\n\t\tself.start = Node(self.maze.size // 2, self.maze.size // 2)\n\n\t\tmax = 1000\n\t\tself.queue = PriorityQueue()\n\n\t\trandom_int = lambda max : int(random() * max) + 1\n\n\t\tself.create_weighted_edges = lambda pos : \\\n\t\t\t[Edge(pos, pos.move_to(direction), random_int(max))\n\t\t\t for direction in find_unvisited_neighbors(\n                                                pos, self.visited)]\n\n\t\tself.visited[self.start.x][self.start.y] = True\n\t\tself.queue.insert(self.create_weighted_edges(self.start))\n\n\n\tdef step(self):\n\t\t\"\"\"\n\t\tDetermines the lowest weight edge and moves to it.\n\n\t\tReturns:\n\t\t\tEdge corresponding to the step that is taken.\n\t\t\"\"\"\n\t\twhile True:\n\t\t\tif len(self.queue) == 0:\n\t\t\t\treturn None\n\t\t\tmove = self.queue.get_min()\n\t\t\tfirst = move.first\n\t\t\tsecond = move.second\n\n\t\t\tif self.visited[second.x][second.y]:\n\t\t\t\tcontinue\n\n\t\t\tself.visited[second.x][second.y] = True\n\t\t\tself.maze[first.x, first.y, move.get_direction()] = False\n\n\t\t\tneighbors = self.create_weighted_edges(second)\n\t\t\tself.queue.insert(neighbors)\n\n\t\t\treturn move\n","sub_path":"MazeGenerators.py","file_name":"MazeGenerators.py","file_ext":"py","file_size_in_byte":14712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"60188881","text":"# -*-coding:utf-8-*-\nimport datetime\nimport os\nfrom threading import Timer\nimport schedule\nimport itchat\nfrom itchat.content import *\nimport time\nimport GlobalData\nimport threading\nfrom modle import NewsModle, AutoSendModle\n\n# 工程目录\n\noutputs_path = os.path.abspath(os.path.join(os.getcwd(), \"output/\"))\n# 生成文件目录\noutputs_pictures_path = outputs_path + \"/pictures/\"\n\n\n# 群发文本\n@itchat.msg_register([TEXT], isGroupChat=True)\ndef text_reply(msg):\n    # group_list = [u'蓝师傅小号8人群', u'蓝师傅专属1']\n    # group_name = []\n    # for group in group_list:\n    #     chat = itchat.search_chatrooms(name=group)\n    #     if len(chat) > 0:\n    #         group_name.append(chat[0]['UserName'])\n    AutoSendModle.autoSendToGroupMember(msg)\n\n\n@itchat.msg_register([TEXT])\ndef text_reply(msg):\n    try:\n        # print(msg)\n        name = msg['User'].NickName\n        fromUserName = msg['FromUserName']\n        print('fromUserName = ' + fromUserName)\n        remarkName = msg['User'].RemarkName  # 备注\n        if (remarkName is not None):\n            name = remarkName\n        text = msg.text\n        print(msg)\n        print(\"文本消息--> \" + name + \" :\" + text + '  ' + str(datetime.datetime.now()))\n        AutoSendModle.autoSend(msg, text)\n    except Exception as e:\n        msg.user.send(e.args)\n        pass\n\n\n# @itchat.msg_register([MAP, CARD, NOTE, SHARING])\n# def text_reply(msg):\n#     print('%s: %s' % (msg.type, msg.text))\n#     msg.user.send('%s: %s' % (msg.type, msg.text))\n#\n#\n@itchat.msg_register([PICTURE, RECORDING, ATTACHMENT, VIDEO])\ndef download_files(msg):\n    msg.download(outputs_pictures_path + msg.fileName)\n    typeSymbol = {\n        PICTURE: 'img',\n        VIDEO: 'vid', }.get(msg.type, 'fil')\n    print('%s: %s' % (typeSymbol, msg.fileName))\n    return '@%s@%s' % (typeSymbol, outputs_pictures_path + msg.fileName)\n#\n#\n# @itchat.msg_register(FRIENDS)\n# def add_friend(msg):\n#     msg.user.verify()\n#     msg.user.send('Nice to meet you!')\n\n\n# 每个半个小时发依次信息貌似能防止掉线\ndef loop_send():\n    global count\n    text = '现在时间:' + str(datetime.datetime.now())\n    itchat.send(text, 'filehelper')\n    # itchat.send('2小时报时',toUserName=itchat.search_chatrooms(name=u'蓝师傅')[0]['UserName'])\n    count += 1\n    if count < 10000:\n        Timer(7200, loop_send).start()\n\n\ndef lc():\n    print('登录成功')\n    itchat.send('登录成功!', 'filehelper')\n    itchat.send('登录成功!', toUserName=getAdminUserName())\n\n    author = itchat.search_friends()\n    author.send('greeting, littlecoder!')\n    # 获取微信通讯录\n    # friends = itchat.get_friends()\n    # print(friends)\n\n\n    t = threading.Thread(target=threadAction)\n    t.start()\n\n\n\ndef ec():\n    print('退出登录')\n\n\"\"\"子线程开启定时任务\"\"\"\ndef threadAction():\n\n    schedule.every().day.at(\"18:20\").do(everyDayJob,\"下班啦\")\n\n    schedule.every().day.at(\"21:00\").do(everyDayJob,\"跑步时间到\")\n\n    schedule.every().day.at(\"22:30\").do(everyDayJob,\"早点洗澡\")\n\n    schedule.every().day.at(\"23:00\").do(everyDayJob,\"早点睡觉\")\n\n\n\n    while True:\n        schedule.run_pending()\n        time.sleep(10)\n    pass\n\n\ndef everyDayJob(test):\n    print('任务定时执行  everyDayJob ' + test)\n    if(\"每日一文\" == test):\n        test = NewsModle.getNewsToday()\n    elif(\"随机一文\" == test):\n        test = NewsModle.getNewsRandom()\n\n    itchat.send(test, toUserName=getAdminUserName())\n    itchat.send(test, 'filehelper')\n    pass\n\n# 获取主号UserName\ndef getAdminUserName():\n    user = itchat.search_friends(name='蓝师傅')\n    # 回来的数据不能用json解析,只能截取\n    userName = \"@35836a4154ddc94dcf0ea2a42db4ed18\"\n    # print(type(user[0]))  # \n    print(user[0]['UserName'])\n    userName = user[0]['UserName']\n    return userName\n\n\nif __name__ == '__main__':\n    GlobalData._init()\n    count = 0\n    Timer(10800, loop_send).start()\n    # 退出程序后暂存登陆状态\n    itchat.auto_login(enableCmdQR=2, hotReload=True, loginCallback=lc, exitCallback=ec)\n    # itchat.auto_login()\n    itchat.run()\n","sub_path":"wechat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"603418457","text":"import tensorflow as tf\r\nimport tensorflow_datasets as tfds\r\n\r\n#import helper libraries\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#improve progress bar display\r\nimport tqdm\r\nimport tqdm.auto\r\ntqdm.tqdm = tqdm.auto.tqdm\r\n\r\nclass FMNIST():\r\n    def __init__(self):\r\n        self.dataset, self.metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)\r\n        self.train_dataset, self.test_dataset = self.dataset['train'], self.dataset['test']\r\n        self.num_train_examples = self.metadata.splits['train'].num_examples\r\n        self.num_test_examples = self.metadata.splits['test'].num_examples\r\n\r\n    def PreProcess(self, images, labels):\r\n        images = tf.cast(images, tf.float32)\r\n        images /= 255\r\n        return images, labels\r\n\r\n    def generateModel(self):\r\n        model = tf.keras.Sequential([\r\n            tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation=tf.nn.relu, input_shape=(28, 28, 1)),\r\n            tf.keras.layers.MaxPooling2D((2, 2), strides=2),\r\n            tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation=tf.nn.relu, input_shape=(28, 28, 1)),\r\n            tf.keras.layers.MaxPooling2D((2, 2), strides=2),\r\n            tf.keras.layers.Flatten(),\r\n            tf.keras.layers.Dense(128, activation=tf.nn.relu),\r\n            tf.keras.layers.Dense(10, activation=tf.nn.softmax)\r\n        ])\r\n        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\r\n        return model\r\n\r\n    def train(self, BATCH_SIZE, model):\r\n        self.train_dataset = self.train_dataset.repeat().shuffle(self.num_train_examples).batch(BATCH_SIZE)\r\n        history = model.fit(self.train_dataset, epochs=5, steps_per_epoch=math.ceil(self.num_train_examples / BATCH_SIZE))\r\n        return history\r\n\r\n    def test(self, BATCH_SIZE, model):\r\n        self.test_dataset = self.test_dataset.batch(BATCH_SIZE)\r\n        test_loss, test_accuracy = model.evaluate(self.test_dataset, steps=math.ceil(self.num_test_examples / BATCH_SIZE))\r\n        return test_loss,test_accuracy\r\n\r\n    def display_single_img(self,index):\r\n        for image, label in self.test_dataset.take(index):\r\n            break\r\n        image = image.numpy().reshape((28, 28))\r\n        plt.figure()\r\n        plt.imshow(image, cmap=plt.cm.binary)\r\n        plt.colorbar()\r\n        plt.grid(False)\r\n        plt.show()\r\n\r\n    def plot_results(self, history):\r\n        plt.xlabel(\"Epochs numbers\")\r\n        plt.ylabel(\"Loss Magnitude\")\r\n        plt.plot(history.history['loss'])\r\n        plt.show()\r\n        plt.xlabel(\"Epochs numbers\")\r\n        plt.ylabel(\"Accuracy Magnitude\")\r\n        plt.plot(history.history['accuracy'])\r\n        plt.show()\r\n\r\n\r\n\r\n\r\ndef main():\r\n    object1 = FMNIST()\r\n    BATCH_SIZE = 32\r\n    object1.train_dataset = object1.train_dataset.map(object1.PreProcess)\r\n    object1.test_dataset = object1.test_dataset.map(object1.PreProcess)\r\n    model = object1.generateModel()\r\n    history = object1.train(BATCH_SIZE, model)\r\n    test_loss, test_accuracy = object1.test(BATCH_SIZE, model)\r\n    object1.plot_results(history)\r\n    print(\"Testing accuracy =\",test_accuracy)\r\n    print(\"Testing loss = \",  test_loss)\r\n    object1.display_single_img(10)\r\n\r\nmain()\r\n\r\n\r\n\r\n\r\n","sub_path":"TensorFlow_Tutorials/5.1 Function CNN.py","file_name":"5.1 Function CNN.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"194867730","text":"import pymel.core as pm\r\nimport logging \r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\n\r\ndef align(jnt1=None,jnt2=None,jnt3=None,viz=True,helper=None):\r\n    \"\"\" aligns any 3 joints\"\"\"\r\n\r\n    jnt1 = jnt1 or pm.selected()[0]\r\n    jnt2 = jnt2 or pm.selected()[1]\r\n    jnt3 = jnt3 or pm.selected()[2]\r\n    \r\n    if helper:\r\n        helper = pm.PyNode(helper)\r\n    elif pm.selected() and len(pm.selected()) == 4:\r\n        helper = pm.selected()[3]\r\n\r\n\r\n    jnt1par = jnt1.getParent()\r\n    jnt1Children = jnt1.getChildren()\r\n    \r\n    jnt2par = jnt2.getParent()\r\n    jnt2Children = jnt2.getChildren()\r\n        \r\n    jnt3par = jnt3.getParent()\r\n    jnt3Children = jnt3.getChildren()\r\n\r\n    jnt1.setParent(world=True)\r\n    jnt2.setParent(world=True)\r\n    jnt3.setParent(world=True)\r\n    if helper:\r\n        helper.setParent(world=True)\r\n\r\n\r\n    allChildren = jnt1Children + jnt2Children + jnt3Children\r\n    for ch in allChildren:\r\n        ch.setParent(world=True)\r\n\r\n\r\n    jnt1pos = jnt1.getTranslation()\r\n    jnt2pos = jnt2.getTranslation()\r\n    jnt3pos = jnt3.getTranslation()\r\n    if helper:\r\n        helperpos = helper.getTranslation()\r\n    \r\n    # obtain xVec\r\n    xVec = jnt3pos - jnt2pos\r\n    _logger.debug(\"xVec = %s\" % xVec)\r\n    \r\n    if viz:\r\n        vecViz(xVec,jnt2,name=\"xVec\")\r\n    \r\n    \r\n    if helper:\r\n        oppVec = jnt1pos - helperpos\r\n    else:\r\n        oppVec = jnt1pos - jnt2pos\r\n    \r\n    if viz:\r\n        vecViz(oppVec,jnt2,name=\"oppVec\")\r\n\r\n\r\n    zVec = oppVec.cross(xVec)\r\n    if viz:\r\n        vecViz(zVec,jnt2,name=\"zVec\")\r\n        \r\n    yVec = xVec.cross(zVec)\r\n    if viz:\r\n        vecViz(yVec,jnt2,name=\"yVec\")\r\n        \r\n    # SHOULDER\r\n    xVec2 = jnt2pos - jnt1pos\r\n    if viz:\r\n        vecViz(xVec2,jnt1,\"xVec\")\r\n        vecViz(zVec,jnt1,\"zVec\")\r\n    \r\n    yVec2 = xVec2.cross(zVec)\r\n    if viz:\r\n        vecViz(yVec2,jnt1,name=\"yVec\") \r\n     \r\n    # SHOULDER MATRIX \r\n    jnt1M = pm.dt.Matrix(xVec2,yVec2,zVec,jnt1pos).homogenize()\r\n        \r\n    # ELBOW MATRIX    \r\n    jnt2M = pm.dt.Matrix(xVec,yVec,zVec,jnt2pos).homogenize()\r\n    \r\n    # WRIST MATRIX\r\n    jnt3M = pm.dt.Matrix(xVec,yVec,zVec,jnt3pos).homogenize()\r\n    \r\n    \r\n    # SET JOINTS TO MATRIX\r\n    jnt1.setMatrix(jnt1M)\r\n    jnt2.setMatrix(jnt2M)\r\n    jnt3.setMatrix(jnt3M)\r\n    \r\n    \r\n    # \r\n    jnt1.setParent(jnt1par)\r\n    pm.makeIdentity(jnt1,apply=True,t=False,r=True,s=False,n=False)\r\n    \r\n    \r\n    jnt2.setParent(jnt2par)   \r\n    pm.makeIdentity(jnt2,apply=True,t=False,r=True,s=False,n=False)\r\n     \r\n    jnt3.setParent(jnt3par)\r\n    pm.makeIdentity(jnt3,apply=True,t=False,r=True,s=False,n=False)\r\n    \r\n    \r\n    for child in jnt1Children:\r\n        try:\r\n            child.setParent(jnt1)\r\n        except:\r\n            _logger.debug(\"jnt1 child not properly being parented back in: %s\" %child)\r\n    \r\n    for child in jnt2Children:\r\n        try:\r\n            child.setParent(jnt2)\r\n        except:\r\n            _logger.debug(\"jnt2 child not properly being parented back in: %s\" %child)\r\n            \r\n    for child in jnt3Children:\r\n        try:\r\n            child.setParent(jnt3)\r\n        except:\r\n            _logger.debug(\"jnt3 child not properly being parented back in: %s\" %child)\r\n    \r\n\r\n\r\n\r\n\r\ndef vecViz(vector,tfm,name='vectorPointer'):\r\n    \"\"\" visual aid for vectors\"\"\"\r\n        \r\n    vec = pm.dt.Vector(vector)\r\n    grp = pm.group(em=True)\r\n    loc = pm.spaceLocator()\r\n    pointer = pm.cone(name=name,esw=360,ch=1,d=1,hr=20,ut=0,ssw=0,s=3,r=0.25,tol=0.01,nsp=1,ax=(1, 0, 0))[0]\r\n    \r\n    loc.setParent(grp)\r\n    pointer.setParent(grp)\r\n    \r\n    loc.setTranslation(vec)\r\n    pm.delete(pm.aimConstraint(loc,pointer,aimVector=(1,0,0)))\r\n    \r\n    vecNorm = vec.normal()\r\n    pointer.translate.set(vecNorm *(2.5,2.5,2.5))\r\n    \r\n    pm.delete(pm.pointConstraint(tfm,grp,mo=False))\r\n    pointer.setParent(world=True)\r\n    pm.delete(grp)\r\n\r\n    # ADDED FOR CLASS\r\n    import colorMesh\r\n    reload(colorMesh)\r\n    pm.select(pointer,r=True)\r\n    \r\n    if name==\"xVec\":\r\n        color = (1,0,0)\r\n    elif name == \"yVec\":\r\n        color = (0,1,0)\r\n    elif name == \"zVec\":\r\n        color = (0,0,1)\r\n    else:\r\n        color = (0.5,0.5,0.5)    \r\n    \r\n    colorMesh.assignLambert(meshes=pm.selected(), rgb=color)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"maya/CodeReference/www_cgcircuit_com/pymel/mod05_AligningJoints/alignJoints.py","file_name":"alignJoints.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"217581589","text":"# D. Hellfeld 4/27/17\n# Various transforms\n\n# Imports\nimport numpy as np\nimport sys; sys.dont_write_bytecode = True\n\n\ndef Cart2Sphere(xyz):\n    '''\n    Convert array of xyz coordinates to theta and phi.\n    \n    Theta goes from {0,pi}\n    Phi goes from {0, 2*pi}\n    \n    Parameters\n    -----------\n    xyz (array, size=[npts, 3]) = Array of XYZ locations.\n    \n    Output\n    ------\n    theta (array, size=[npts]) = Polar angle (from +Z)\n    phi (array, size=[npts]) = Azimuthal angle (from +X in XY plane)\n    \n    '''\n    if len(np.shape(xyz)) < 2:\n        xyz = xyz[None, :]\n\n    r = np.linalg.norm(xyz, axis=1)\n    theta = np.arccos(xyz[:, 2] / r)\n    phi = np.arctan2(xyz[:, 1], xyz[:, 0])\n    phi[phi < 0] += 2 * np.pi\n    \n    return theta, phi\n","sub_path":"Transforms/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"300192623","text":"# -*- coding: utf-8 -*-\n\ndef uri():\n    valores = []\n    par = 0\n    for i in range(0 , 5):\n        valores.append(input())\n        if(float(valores[i]) % 2 == 0): par += 1\n\n    print('{} valores pares'.format(par))\n\nif __name__ == '__main__':\n    uri()\n","sub_path":"iniciante/exerc1065.py","file_name":"exerc1065.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"82818367","text":"import tornado.web\nfrom dbMysql import dbMysql\nimport config\nimport json\nfrom easyOAuth.userinfo import Token\n\n# \nclass Handler(tornado.web.RequestHandler):\n    \n    def gotoErrorPage(self,error_code) :\n        self.set_header('Access-Control-Allow-Origin','*')\n        self.redirect('/o2b/v1.0.0/error/%d'% error_code )\n        \n    def checkAppKey(self):\n        if self.request.headers.get('app-key')!=config.App_Key :\n            r = False\n        else :\n            r = True\n        return r\n        \n        \n    def tokenToUser(self):\n        token=self.request.headers.get('Authorization')\n        if token is not None  :\n            myToken=Token(config.redisConfig)\n            try :\n                user=myToken.getUser(token).decode('utf-8')\n            except:\n                user=None\n        else :\n            user=None\n        return user\n    \n    def delUserToken(self):\n        token=self.request.headers.get('Authorization')\n        if token is not None  :\n            try :\n                myToken=Token(config.redisConfig)\n                s=myToken.delUser(token) # 成功删除应返回成功受影响的记录数\n            except:\n                s=0\n        else :\n            s=None\n        return s\n    \n\n    def options(self,id=''):\n        self.set_header('Access-Control-Allow-Origin','*')\n        self.set_header('Access-Control-Allow-Methods','GET,POST,PUT,DELETE,PATCH')\n        self.set_header('Access-Control-Allow-Headers', 'app-key,authorization,Content-type')\n      \n\n    def delete(self):  # 用户logout\n\n        if not self.checkAppKey() :\n            # 601 : 未经授权的第三方应用\n            self.gotoErrorPage(601)\n            return\n        \n        token=self.request.headers.get('Authorization')\n        user=self.tokenToUser()\n        if user is None :\n            # 602 : 未经登录授权的应用\n            self.gotoErrorPage(602)\n            return        \n\n        r=self.delUserToken()\n        \n        if r>0:\n            strLogs='User:%s Token:%s [ Redisk 删除成功,r.delte()=%d ]' % (user,token,r)\n        else :\n            strLogs='User:%s Token:%s [ Redisk 删除失败,r.delete()=%d ]' % (user,token,r)\n\n        try :\n            db=dbMysql(config.dbConfig)\n        except :\n            # 701 : 数据库连接失败\n            self.gotoErrorPage(701)\n            return\n        \n        #1. 查询产品属性\n        try :\n\n            db.begin()\n            \n            #2.1 更新 tbUser 表的用户最后一次登出时间 \n            sqlSelect=\"Select user from tbUser where user='%s' for update\" % (user)\n            db.query(sqlSelect)\n            \n            sqlUpdate =\"Update tbUser set lastLogout=now() where user='%s'\" % (user)\n            db.update(sqlUpdate)                    \n            \n            #2.2 插入 tbLogs 日志库;\n            sqlInsert = (\n              \"INSERT INTO tbLogs(user,level,content,createTime) \"\n              \"VALUES (%s, %s, %s, now())\"\n            )\n            \n            strLogs+=' Logout 操作完成.'\n            addressId=db.save(sqlInsert,(user,'USE',strLogs))\n            \n            db.commit()\n            \n        except :\n            db.rollback()\n            # 702 : SQL执行失败\n            self.gotoErrorPage(702)\n            return\n        \n        #3. 返回\n        self.set_header('Access-Control-Allow-Origin','*')\n        self.set_header('Access-Control-Expose-Headers','Authorization')\n        self.set_header('Authorization', '')     \n        # 下面需要观察,有无Header的返回\n        self.set_status(204)  # 204 操作成功,无返回\n        return\n\n ","sub_path":"easyOAuth/logout.py","file_name":"logout.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"5193066","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Statement: Sorry for this shit code \n@Time     : 2020/4/21 11:09\n@Author   : Jarvis\n\"\"\"\nfrom main_code.sql_file import tabs, sqls\nfrom main_code.data_base import get_mysql_conn, db_info\n\n\ninsert_sql = \"insert into `tb_model_info` VALUES(%s, %s, %s, %s, %s)\"\nvalues = [('298e66d5-3420-41d3-8a0e-f4578a962769', '本体模型', '用于重训练生成新的本体预警模型', 'major', '1'),\n          ('70041dec-3fb2-4ce0-a4fd-e9bcdd87792d', '轴温模型', '轴温故障预警模型', 'shaft', '0'),\n          ('7a760093-3dac-43a3-bc5d-52d10c1640f9', '油温模型', '用于重训练生成新的油温预警模型', 'oil', '1'),\n          ('7cb1ca48-e28e-4735-bbc5-a64fae2e9d0a', '轴温模型', '用于重训练生成新的轴温预警模型', 'shaft', '1'),\n          ('87164820-d57f-4ab8-999d-ce88a8f98015', '本体模型', '本体故障预警模型', 'major', '0'),\n          ('ac740815-ec9a-4c17-8dd5-6b5dcbab3a67', '油温模型', '油温故障预警模型', 'oil', '0')]\n\n\ndef init_table(logger):\n    mysql_conn = get_mysql_conn(logger)\n    cur = mysql_conn.cursor()\n    is_exist_sql = \"select table_name from information_schema.tables where table_schema='%s' and table_name='%s'\"\n    for table in tabs:\n        if cur.execute(is_exist_sql % (db_info['db'], table)):\n            pass\n        else:\n            logger.info(f'创建表{table}')\n            sql = sqls[table]\n            cur.execute(sql)\n            if table == 'tb_model_info':\n                cur.executemany(insert_sql, values)\n            mysql_conn.commit()\n","sub_path":"main_code/initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"512160541","text":"import logging, os\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nlogging.disable(logging.WARNING)\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport pickle\n\nimport maddpg.common.tf_util as U\nfrom maddpg.trainer.macl import MACLAgentTrainer\nimport tensorflow.contrib.layers as layers\n\n# python train_hanabi.py --num-episode 100000 --good-mic 0.001 --save-dir ./Hanabi/MACL/0p001/a1/ --plots-dir ./Hanabi/MACL/0p001/a1/ --exp-name a1_0p001\n\n# python train_hanabi.py --num-episode 100000 --good-mic 0 --save-dir ./Hanabi/DDPG/a1/ --plots-dir ./Hanabi/DDPG/a1/ --exp-name a1\n\n# python train_hanabi.py --num-episode 100000 --good-mic 0 --save-dir ./Hanabi/Test/a1/ --plots-dir ./Hanabi/Test/a1/ --exp-name test1\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\"Reinforcement Learning experiments for multiagent environments\")\n    # Environment\n    parser.add_argument(\"--num-episodes\", type=int, default=100000, help=\"number of episodes\")\n    parser.add_argument(\"--good-policy\", type=str, default=\"maddpg\", help=\"policy for good agents\")\n    parser.add_argument(\"--good-mic\", type=str, default=1e-3, help=\"mutual information coefficient for good agents\")\n    parser.add_argument(\"--max-episode-len\", type=int, default=100, help=\"maximum episode length, not used to limit game length\" )\n    # Core training parameters\n    parser.add_argument(\"--lr\", type=float, default=1e-2, help=\"learning rate for Adam optimizer\")\n    parser.add_argument(\"--gamma\", type=float, default=0.99, help=\"discount factor\")\n    parser.add_argument(\"--batch-size\", type=int, default=1024, help=\"number of episodes to optimize at the same time\")\n    parser.add_argument(\"--num-units\", type=int, default=128, help=\"number of units in the mlp\")\n    parser.add_argument(\"--sleep-regimen\", action=\"store_true\", default=False, help=\"only use mic while sleeping\")\n    # Checkpointing\n    parser.add_argument(\"--exp-name\", type=str, default=\"a1\", help=\"name of the experiment\")\n    parser.add_argument(\"--save-dir\", type=str, default=\"./tmp/policy/\", help=\"directory in which training state and model should be saved\")\n    parser.add_argument(\"--save-rate\", type=int, default=1000, help=\"save model once every time this many episodes are completed\")\n    parser.add_argument(\"--load-dir\", type=str, default=\"\", help=\"directory in which training state and model are loaded\")\n    # Evaluation\n    parser.add_argument(\"--restore\", action=\"store_true\", default=False)\n    parser.add_argument(\"--display\", action=\"store_true\", default=False)\n    parser.add_argument(\"--benchmark\", action=\"store_true\", default=False)\n    parser.add_argument(\"--benchmark-iters\", type=int, default=10000, help=\"number of iterations run for benchmarking\")\n    parser.add_argument(\"--benchmark-dir\", type=str, default=\"./benchmark_files/\", help=\"directory where benchmark data is saved\")\n    parser.add_argument(\"--plots-dir\", type=str, default=\"./Hanabi/\", help=\"directory where plot data is saved\")\n    return parser.parse_args()\n\ndef mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):\n    # This model takes as input an observation and returns values of all actions\n    with tf.variable_scope(scope, reuse=reuse):\n        out = input\n        out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n        out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n        out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)\n        return out\n\ndef make_env(arglist, benchmark=False):\n    from multiagent.scenarios.simple_hanabi import HanabiEnv\n\n    env = HanabiEnv()\n    return env\n\ndef get_trainers(env, obs_shape_n, arglist):\n    trainers = []\n    model = mlp_model\n    trainer = MACLAgentTrainer\n    for i in range(env.n):\n        act_shape_n = [env.action_space for i in range(env.n)]\n        trainers.append(trainer(\n            \"agent_%d\" % i, model, obs_shape_n, act_shape_n, i, arglist, agent_type=\"good\",\n            local_q_func=(arglist.good_policy=='ddpg')))\n    return trainers\n\ndef train(arglist):\n    with U.single_threaded_session():\n        # Create environment\n        env = make_env(arglist, arglist.benchmark)\n        \n        # Create agent trainers\n        obs_shape_n = [env.observation_space.shape for i in range(env.n)]\n        trainers = get_trainers(env, obs_shape_n, arglist)\n        print('Using good policy {}'.format(arglist.good_policy))\n\n        # Initialize\n        U.initialize()\n\n        # Load previous results, if necessary\n        if arglist.load_dir == \"\":\n            arglist.load_dir = arglist.save_dir\n        if arglist.display or arglist.restore or arglist.benchmark:\n            print('Loading previous state...')\n            U.load_state(arglist.load_dir)\n        \n        episode_rewards = [0.0]  # sum of rewards for all agents\n        agent_rewards = [[0.0] for _ in range(env.n)]  # individual agent reward\n        final_ep_rewards = []  # sum of rewards for training curve\n        final_ep_ag_rewards = []  # agent rewards for training curve\n        agent_info = [[[]]]  # placeholder for benchmarking info\n        saver = tf.train.Saver()\n        obs_n = env.reset()\n        episode_step = 0\n        train_step = 0\n        t_start = time.time()\n        done = 0\n        current_player_index = 0\n\n        no_op_actions = False \n\n        print('Starting iterations...')\n        while True:\n            # get action\n            current_player_obs = np.asarray(obs_n)\n            original_action = trainers[current_player_index].action(current_player_obs)\n\n            if(no_op_actions):\n                action = np.random.choice(np.linspace(0, env.action_space.n-1, num=env.action_space.n, dtype=int), 1, p=original_action)[0] \n                mask = env.getValidActions()\n\n                while(mask[action] == 0):\n                    action = np.random.choice(np.linspace(0, env.action_space.n-1, num=env.action_space.n, dtype=int), 1, p=original_action)[0] \n            else:\n                # get action mask\n                mask = env.getValidActions()\n                # zero out invalid options \n                masked_actions = mask * original_action\n                # normalize \n                masked_actions = masked_actions / np.nansum(masked_actions)\n                # Get action with given probability \n                if(np.isnan(masked_actions).any()):\n                    print(current_player_obs)\n                    print(masked_actions)\n                    print(np.nansum(masked_actions))\n                    print(original_action)\n                try:\n                    action = np.random.choice(np.linspace(0, env.action_space.n-1, num=env.action_space.n, dtype=int), 1, p=masked_actions)[0] \n                except: \n                    print(\"Exception: choosing random action\")\n                    action = np.random.choice(np.linspace(0, env.action_space.n-1, num=env.action_space.n, dtype=int), 1)[0]\n\n            new_obs, rew, done, info = env.step(action)\n\n            #trainers[current_player_index].experience(current_player_obs, original_action, mask, rew, new_obs, done)\n            trainers[current_player_index].experience(current_player_obs, masked_actions, mask, rew, new_obs, done)\n\n            current_player_index += 1\n            if(current_player_index >= len(trainers)):\n                current_player_index = 0\n                \n            \n            obs_n = new_obs\n\n            episode_rewards[-1] += rew\n            agent_rewards[current_player_index][-1] += rew\n\n            if done:\n                obs_n = env.reset()\n                episode_step = 0\n                episode_rewards.append(0)\n                for a in agent_rewards:\n                    a.append(0)\n                agent_info.append([[]])\n                current_player_index = 0\n\n            # increment global step counter\n            train_step += 1\n\n            # for benchmarking learned policies\n            if arglist.benchmark:\n                for i, info in enumerate(info_n):\n                    agent_info[-1][i].append(info_n['n'])\n                if train_step > arglist.benchmark_iters and (done):\n                    file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'\n                    print('Finished benchmarking, now saving...')\n                    with open(file_name, 'wb') as fp:\n                        pickle.dump(agent_info[:-1], fp)\n                    break\n                continue\n\n            # for displaying learned policies\n            if arglist.display:\n                time.sleep(0.1)\n                env.render()\n                continue\n\n            # update all trainers, if not in display or benchmark mode\n            loss = None\n            for agent in trainers:\n                agent.preupdate()\n            for agent in trainers:\n                loss = agent.update(trainers, train_step)\n                if(loss is not None and agent.sleep_regimen and agent.agent_mic != 0 and train_step % 100 == 0): # Change sleep frequency here if desired\n                    original_policy_loss = loss[1]\n                    new_loss = agent.update(trainers, train_step, sleeping=True)[1]\n                    sleep_iteration = 0\n                    while((sleep_iteration < 10) and (new_loss < original_policy_loss * 1.05)):\n                        new_loss = agent.update(trainers, train_step, sleeping=True)[1]\n                        sleep_iteration += 1 \n                        #print(\"sleep walking\")\n\n            # save model, display training output\n            if done and  (len(episode_rewards) % arglist.save_rate == 0):\n                U.save_state(arglist.save_dir, saver=saver)\n                print(\"steps: {}, episodes: {}, mean episode reward: {}, time: {}\".format(\n                    train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))\n                t_start = time.time()\n                # Keep track of final episode reward\n                final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))\n                for rew in agent_rewards:\n                    final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))\n\n            # saves final episode reward for plotting training curve later\n            if len(episode_rewards) > arglist.num_episodes:\n                print(arglist.plots_dir)\n                print(arglist.exp_name)\n\n                rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards.pkl'\n                with open(rew_file_name, 'wb') as fp:\n                    pickle.dump(final_ep_rewards, fp)\n                agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards.pkl'\n                with open(agrew_file_name, 'wb') as fp:\n                    pickle.dump(final_ep_ag_rewards, fp)\n                print('...Finished total of {} episodes.'.format(len(episode_rewards)))\n                break\n\nif __name__ == '__main__':\n    arglist = parse_args()\n    train(arglist)\n","sub_path":"experiments/train_hanabi.py","file_name":"train_hanabi.py","file_ext":"py","file_size_in_byte":11065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"336671845","text":"\"\"\"\nThis utility script sets up aws credentials for user.\n**WARNING**: This script modifies current user's aws config.\n\"\"\"\nfrom os.path import expanduser, exists, join\nfrom os import environ, makedirs\nimport sys\n\n# Check mandatory environment variable settings\nENV_AWS_ACCESS_KEY_ID = \"AWS_ACCESS_KEY_ID\"\nENV_AWS_SECRET_ACCESS_KEY = \"AWS_SECRET_ACCESS_KEY\"\nfor v in [ENV_AWS_ACCESS_KEY_ID, ENV_AWS_SECRET_ACCESS_KEY]:\n    if v not in environ:\n        print(\"Environment variable {} not set. Aborted.\".format(v))\n        sys.exit(1)\n\nPROFILE_NAME=\"[k-eb-deploy]\"\n\ncredential_data = [\n    \"\",\n    \"# The profile {} is added by eb-app-template/deploy/setup_aws_profile.py\".format(PROFILE_NAME),\n    \"{}\".format(PROFILE_NAME),\n    \"aws_access_key_id = {}\".format(environ[ENV_AWS_ACCESS_KEY_ID]),\n    \"aws_secret_access_key = {}\".format(environ[ENV_AWS_SECRET_ACCESS_KEY])\n]\n\n\ndef main():\n    \"\"\"Add aws profile and credential for current user.\n    \"\"\"\n    # Prepare user specific aws config directory\n    user_aws_root = join(expanduser(\"~\"), \".aws\")\n    if not exists(user_aws_root):\n        makedirs(user_aws_root)\n\n    cred_file = join(user_aws_root, \"credentials\")\n    print(\"Checking if profile {} exists ...\".format(credential_data[2]))\n    cont = True\n    if exists(cred_file):\n        with open(cred_file, \"r\") as c_file:\n            if credential_data[2] in c_file.read():\n                print(\"Profile {} exists. No change to {}.\".format(credential_data[2], cred_file))\n                cont = False\n\n    if cont is True:\n        with open(cred_file, \"a\") as c_file:\n            c_file.write(\"\\n\".join(credential_data))\n        print(\"Appended profile {} to {}.\".format(credential_data[2], cred_file))\n\n\nif  __name__ ==\"__main__\": sys.exit(main())\n","sub_path":"deploy/setup_aws_profile.py","file_name":"setup_aws_profile.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"232228243","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\nepsVals = [0.05, 0.1, 0.15, 0.2]\nunivVals = [100, 1000, 10000]\ncolumns = [4, 5, 6, 7]\nqueryTypes = [\"rank\", \"quant\"]\n\n\ndef plotXYGraph(x, y, plotName, xLabel, yLabel, filepath):\n    print(x, \"VS\", y)\n    plt.title(plotName)\n    plt.xlabel(xLabel)\n    plt.ylabel(yLabel)\n    plt.plot(x, y, color=\"#ff6770\")\n    plt.savefig(filepath)\n    plt.close()\n\n\ndef plotXYGraphWithBounds(x, y, minY, maxY, plotName, xLabel, yLabel, filepath):\n    maxYval = 0\n    minYval = 1000000000000\n\n    for yVal in minY:\n        if yVal < minYval and yVal >= 0:\n            minYval = yVal\n\n    for yVal in maxY:\n        if yVal > maxYval:\n            maxYval = yVal\n\n    if minYval == maxYval:\n        maxYval += 1\n\n    plt.title(plotName)\n    plt.xlabel(xLabel)\n    plt.ylabel(yLabel)\n\n    ax = plt.gca()\n    ax.set_title(plotName, pad=20)\n\n    plt.plot(x, y, color=\"#009ddf\")\n\n    print([x[0], x[-1], minYval, maxYval])\n    plt.axis([x[0], x[-1], minYval, maxYval])\n\n    plt.fill_between(x, minY, maxY, alpha=0.2, edgecolor='#ff0084', facecolor='#ff0084',\n                     linewidth=1, linestyle='dashdot', antialiased=True)\n\n    plt.savefig(filepath)\n    plt.close()\n\n\ndef plotMemoryGraphs():\n    for qType in queryTypes:\n        for column in columns:\n            filename = f\"plotCsv/memory-time/{qType}-{column}.csv\"\n\n            for univ in univVals:\n                x = []\n                y = []\n                plotName = f\"{qType} variant with univ = {univ} for column {column}\"\n                xLabel = \"Eps\"\n                yLabel = \"Memory in bytes\"\n                filepath = f\"gkPlots/memory/{qType}-{column}/univ-{univ}.png\"\n\n                lineCount = 0\n                with open(filename) as reader:\n                    print(filename)\n                    for line in reader.readlines():\n                        if lineCount > 0:\n                            values = line.split(',')\n                            print(values[1])\n                            eps = float(values[1])\n                            univL = int(values[2])\n\n                            if univL == univ:\n                                memory = int(values[5]) / 8\n                                x.append(eps)\n                                y.append(memory)\n                        lineCount += 1\n\n                plotXYGraph(x, y, plotName, xLabel, yLabel, filepath)\n\n\ndef plotTimeGraphs():\n    for qType in queryTypes:\n        for column in columns:\n            filename = f\"plotCsv/memory-time/{qType}-{column}.csv\"\n\n            for univ in univVals:\n                x = []\n                y = []\n                plotName = f\"{qType} variant with univ = {univ} for column {column}\"\n                xLabel = \"Eps\"\n                yLabel = \"Time in seconds\"\n                filepath = f\"gkPlots/time/{qType}-{column}/univ-{univ}.png\"\n\n                lineCount = 0\n                with open(filename) as reader:\n                    for line in reader.readlines():\n                        if lineCount > 0:\n                            values = line.split(',')\n                            eps = float(values[1])\n                            univL = int(values[2])\n\n                            if univL == univ:\n                                time = float(values[4])\n                                x.append(eps)\n                                y.append(time)\n                        lineCount += 1\n\n                plotXYGraph(x, y, plotName, xLabel, yLabel, filepath)\n\n\ndef plotQueryGraphs():\n    for qType in queryTypes:\n        for column in columns:\n            filename = f\"plotCsv/queries/{qType}-{column}.csv\"\n\n            for univ in univVals:\n                for eps in epsVals:\n                    x = []\n                    y = []\n                    yMin = []\n                    yMax = []\n\n                    plotName = f\"{qType} variant with univ = {univ} and eps = {eps} for column {column}\"\n                    yLabel = \"\"\n\n                    if qType == \"rank\":\n                        yLabel = \"Rank(x)\"\n                    else:\n                        yLabel = \"Quantile(x)\"\n\n                    xLabel = \"x (query)\"\n                    filepath = f\"gkPlots/queries/{qType}-{column}/univ-{univ}-eps-{eps}.png\"\n\n                    lineCount = 0\n                    with open(filename) as reader:\n                        for line in reader.readlines():\n                            if lineCount > 0:\n                                values = line.split(',')\n                                epsL = float(values[1])\n                                univL = int(values[2])\n\n                                if univL == univ and epsL == eps:\n                                    query = 0\n\n                                    if qType == \"rank\":\n                                        query = int(values[6])\n                                    else:\n                                        query = float(values[6])\n\n                                    estimate = 0\n\n                                    if qType == \"rank\":\n                                        estimate = int(values[7])\n                                    else:\n                                        estimate = float(values[9])\n\n                                    minAcceptable = int(values[10])\n                                    maxAcceptable = int(values[11])\n                                    x.append(query)\n                                    y.append(estimate)\n                                    yMin.append(minAcceptable)\n                                    yMax.append(maxAcceptable)\n\n                            lineCount += 1\n\n                    plotXYGraphWithBounds(\n                        x, y, yMin, yMax, plotName, xLabel, yLabel, filepath)\n\n\nplotMemoryGraphs()\nplotTimeGraphs()\nplotQueryGraphs()\n","sub_path":"Projeto_2/src/plot-gk.py","file_name":"plot-gk.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"568787770","text":"# File  : i2c_accel.py\n# What  : Accelerometer sensor\n\nfrom i2c_base import i2c_sensor\nfrom collections import namedtuple\n\nimport math\n\nclass accel(i2c_sensor):\n    _I2C_ADDR = 0x19\n\n    _REG_CTRL_1     = 0x20\n    _REG_CTRL_4     = 0x23\n    _REG_OUT_X_L    = 0x28\n    _REG_OUT_X_H    = 0x29\n    _REG_OUT_Y_L    = 0x2a\n    _REG_OUT_Y_H    = 0x2b\n    _REG_OUT_Z_L    = 0x2c\n    _REG_OUT_Z_H    = 0x2d\n\n    # note : values recommended by ST's AppNote : 10Hz, full-scale, +-2gauss\n    _INI_REG_CTRL_1 = 0b01000111    # 50Hz, normal mode, all axis enabled\n    _INI_REG_CTRL_4 = 0b00000000    # high-res disable\n    _MASK_HI_RES    = 0b00001000\n\n    t_fscale = namedtuple('t_fscale','scale value')\n    FSCALE_2G = t_fscale(0.001,0b00000000)\n    FSCALE_4G = t_fscale(0.002,0b00010000)\n    FSCALE_8G = t_fscale(0.004,0b00100000)\n    FSCALE_16G = t_fscale(0.012,0b00110000)\n    _MASK_FSCALE = 0b00110000\n\n    # RAW sample filtering : new_sample*alpha + old_value*(1-alpha)\n    _FILTER_ALPHA = 1.0\n\n    def __init__(self,hi_res=True,fscale=FSCALE_2G,T_ms=0):\n        # instance variables\n        self.hi_res = hi_res                # hi_res 12 bit reading, low_res 10 bit reading\n        self.hi_res_factor = 2*int(hi_res)  # used by shifting operations and comp2 conversion\n        self.fscale = fscale\n        self.raw = [0,0,0]\n        self.raw_norm = [0,0,0]\n        self.angles = [0,0,0]       # roll,pitch,yaw\n\n        # superclass and sensor initialization\n        super(accel,self).__init__(accel._I2C_ADDR,T_ms=T_ms)\n\n    def i2c_init(self):\n        if(not self._check_st(ST_ERR_INI_BUS)):\n            self.write_byte(self._REG_CTRL_1,self._INI_REG_CTRL_1)\n            self.write_byte(self._REG_CTRL_4,self._INI_REG_CTRL_4)\n            # set hi-res\n            self._set_hi_res(self.hi_res)\n            # set full-scale\n            self._set_full_scale(self.fscale)\n        else:\n            pass\n            # TODO : do something else here\n\n    def _set_hi_res(self,hi_res):\n        # store new hi-res value and factor\n        self.hi_res = hi_res\n        self.hi_res_factor = 2*int(hi_res)\n        # read-modify-write\n        self.read_write_byte(self._REG_CTRL_4,self._MASK_HI_RES,self._MASK_HI_RES*int(hi_res))\n\n    def _set_full_scale(self,fscale):\n        # store new scale value\n        self.fscale = fscale\n        # read-modify-write\n        self.read_write_byte(self._REG_CTRL_4,self._MASK_FSCALE,self.fscale.value)\n\n    def _calc_angles(self):\n        \"\"\" calculate roll, pitch and yaw angles.\"\"\"\n        x_acc = self.get_x()\n        y_acc = self.get_y()\n        z_acc = self.get_z()\n        tmp = 0.0\n\n        # PITCH\n        tmp = (y_acc*y_acc)+(z_acc*z_acc)\n        self.angles[1] = -math.atan2(x_acc,math.sqrt(tmp))\n        # ROLL\n        tmp = (x_acc*x_acc)+(z_acc*z_acc)\n        self.angles[0] = math.atan2(y_acc,math.sqrt(tmp))\n\n    def i2c_read(self):\n        tmp = [0,0,0]\n        read = [0,0,0,0,0,0]\n        try:\n            read[0] = self.read_byte(self._REG_OUT_X_L)\n            read[1] = self.read_byte(self._REG_OUT_X_H)\n            read[2] = self.read_byte(self._REG_OUT_Y_L)\n            read[3] = self.read_byte(self._REG_OUT_Y_H)\n            read[4] = self.read_byte(self._REG_OUT_Z_L)\n            read[5] = self.read_byte(self._REG_OUT_Z_H)\n        except Exception as e:\n            self._set_st(ST_ERR_SAMPLE_PROCESSING)\n            return\n        else:\n            self._clear_st(ST_ERR_SAMPLE_PROCESSING)\n            \n        try:\n            read_shift = 6-self.hi_res_factor\n            read_bits = 10+self.hi_res_factor\n            # X\n            tmp[0] = self._comp2_to_dec(((read[1]<<8)|read[0])>>read_shift,read_bits)\n            self.raw[0] = self._filter_sample(float(tmp[0])*self.fscale.scale,self.raw[0],self._FILTER_ALPHA)\n            # Y\n            tmp[1] = self._comp2_to_dec(((read[3]<<8)|read[2])>>read_shift,read_bits)\n            self.raw[1] = self._filter_sample(float(tmp[1])*self.fscale.scale,self.raw[1],self._FILTER_ALPHA)\n            # Z\n            tmp[2] = self._comp2_to_dec(((read[5]<<8)|read[4])>>read_shift,read_bits)\n            self.raw[2] = self._filter_sample(float(tmp[2])*self.fscale.scale,self.raw[2],self._FILTER_ALPHA)\n\n            # store normalized values\n            del tmp\n            tmp = float(math.sqrt(self.raw[0]*self.raw[0]+self.raw[1]*self.raw[1]+self.raw[2]*self.raw[2]))\n            if(tmp!= 0):\n                self.raw_norm[0] = (self.raw[0] / tmp)\n                self.raw_norm[1] = (self.raw[1] / tmp)\n                self.raw_norm[2] = (self.raw[2] / tmp)\n            else:\n                # do not update normalized values\n                pass\n        except Exception as e:\n            self._set_st(ST_ERR_SAMPLE_PROCESSING)\n            return\n        else:\n            self._clear_st(ST_ERR_SAMPLE_PROCESSING)\n        try:\n            # calculate roll,pitch & yaw\n            self._calc_angles()\n        except Exception as e:\n            self._set_st(ST_ERR_CALCULUS)\n        else:\n            self._clear_st(ST_ERR_CALCULUS)\n\n    def get_x(self,normalized=True):\n        return(self.raw_norm[0] if normalized else self.raw[0])\n    def get_y(self,normalized=True):\n        return(self.raw_norm[1] if normalized else self.raw[1])\n    def get_z(self,normalized=True):\n        return(self.raw_norm[2] if normalized else self.raw[2])\n\n    def get_roll(self,degrees=True):\n        return(math.degrees(self.angles[0]) if degrees else self.angles[0])\n    def get_pitch(self,degrees=True):\n        return(math.degrees(self.angles[1]) if degrees else self.angles[1])\n    def get_yaw(self,degrees=True):\n        return(math.degrees(self.angles[2]) if degrees else self.angles[2])\n","sub_path":"ulmo/sensors/i2c_accel.py","file_name":"i2c_accel.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"527402053","text":"#!/usr/bin/env python\n\n'''\nto find four-digit vampire numbers\n\nhttp://en.wikipedia.org/wiki/Vampire_number\n'''\n\nfrom itertools import permutations\n\ndef find_vampire(num):\n\tnl = list(str(num))\n\tif len(nl) % 2 != 0:\n\t\tprint(\"length is not even!\")\n\t\treturn\n\n\tjj = permutations(nl, 4)\n\tfor cc in jj:\n\t\tn1 = int(cc[0] + cc[1])\n\t\tn2 = int(cc[2] + cc[3])\n\t\tif n1 * n2 == num:\n\t\t\tprint(str(num) + \" is a vampire number! \" + str(n1) + \" x \" + str(n2))\n\nif __name__ == '__main__':\n\tfor val in range(1000, 9999):\n\t\tfind_vampire(val)\n","sub_path":"python/vampire_number.py","file_name":"vampire_number.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"347796127","text":"\"\"\"\n* Get sidebar menus\n* Save Acl Permissions\n* To get SubSubmenu details for create add/edit/delete toolbar.\n\"\"\"\nfrom rest_framework.response import Response\nfrom rest_framework import authentication\nfrom shared.views import TimeDelayed_APIView\n\nfrom dashboard.serializers import create_aclpermissions_for_role, AclPermissionsSerializer, \\\n    SideBarMenusSerializer, SubSubMenusSerializer\nfrom dashboard.models import AclPermissions, Roles, SubMenus, SubSubMenus, UserRoles\n\nclass DashboardMenu(TimeDelayed_APIView):\n    \"\"\"\n    To get all side bar menus.\n\n    * Requires token authentication.    \n    \"\"\"\n    authentication_classes = (authentication.TokenAuthentication,)\n\n    def get(self, request, format=None):\n        role = request.GET.get('roleid', '')\n        # print('role',role)\n        if role == 'for_sidebar_menu':\n            \"For sidebar menu\"\n            try:\n                userrole = UserRoles.objects.get(user=request.user)\n                role = userrole.role\n            except UserRoles.DoesNotExist:\n                # if user doesn't have any userroles\n                role, created = Roles.objects.get_or_create(name='Anonymous')\n                if created: create_aclpermissions_for_role(role)\n\n            queryset = AclPermissions.objects.filter(role_id=role, view=True).order_by('-ordering', 'menu_text')\n            serializer = SideBarMenusSerializer(queryset, many=True)\n        else:\n            \"Get aclpermission menus, submenus and subsubmenus for display of Aclpermision edit form\"\n            queryset = AclPermissions.objects.filter(role_id=role).order_by('menu_text')\n            serializer = AclPermissionsSerializer(queryset, many=True)\n        return Response(serializer.data)\n\n    def post(self, request):\n        \"\"\"\n        Save aclpermission checkbox datas\n        \"\"\"\n        chk_value = request.data['check_box']\n        chk_type = request.data['chk_type']\n        # view_all,add_all, edit_all, row_all\n        if chk_type in ['view_all', 'all', 'add_all', 'edit_all']:\n            # if all checkbox is clicked\n            role_id = request.data['role']\n            items = AclPermissions.objects.filter(role_id=role_id)\n            if chk_type in ['view_all', 'all']:\n                items.update(view=chk_value)\n\n            for menu in items:\n                # if chk_type in ['view_all','all']:\n                # menu.submenus.all().update(view = chk_value)\n                for submenu in menu.submenus.all():\n                    if chk_type == 'all':\n                        submenu.view = chk_value\n                        submenu.save()\n                        submenu.subsubmenus.all().update(\n                            view=chk_value,\n                            add=chk_value,\n                            edit=chk_value,\n                            trash=chk_value)\n                    elif chk_type == 'view_all':\n                        submenu.view = chk_value\n                        submenu.save()\n                        submenu.subsubmenus.all().update(view=chk_value, )\n                    elif chk_type == 'add_all':\n                        submenu.subsubmenus.all().update(add=chk_value, )\n                    elif chk_type == 'edit_all':\n                        submenu.subsubmenus.all().update(edit=chk_value, )\n                    elif chk_type == 'delete_all':\n                        submenu.subsubmenus.all().update(trash=chk_value, )\n\n        elif 'submenus' in request.data:\n            # if main menu checkbox clicked\n            chk_id = request.data['id']\n            item = AclPermissions.objects.get(id=chk_id)\n            if chk_type == 'ordering':\n                item.ordering = chk_value\n            else:\n                item.view = chk_value\n            item.save()\n\n        elif 'subsubmenus' in request.data:\n            # if submenu checkbox is clicked\n            chk_id = request.data['id']\n            item = SubMenus.objects.get(id=chk_id)\n            if chk_type == 'ordering':\n                item.ordering = chk_value\n            else:\n                item.view = chk_value\n            item.save()\n\n        elif 'sub_menu' in request.data:\n            # if subsubmenu checkbox is clicked\n            chk_id = request.data['id']\n            item = SubSubMenus.objects.get(id=chk_id)\n            if chk_type == 'ordering':\n                item.ordering = chk_value\n            elif chk_type == 'add':\n                item.add = chk_value\n            elif chk_type == 'edit':\n                item.edit = chk_value\n            elif chk_type == 'delete':\n                item.trash = chk_value\n            elif chk_type == 'view':\n                item.view = chk_value\n            elif chk_type == 'row_change':\n                f = chk_value\n                item.view, item.add, item.edit, item.trash = (f, f, f, f)\n            item.save()\n\n        return Response('SAVED')\n\n\nclass getSubSubmenuDetails(TimeDelayed_APIView):\n    \"\"\"\n    To get SubSubmenu details for create add/edit/delete toolbar.\n    \"\"\"\n    authentication_classes = (authentication.TokenAuthentication,)\n\n    def get(self, request, format=None):\n        # print(request.data)\n        menu_id = request.GET.get('id', '')\n        if not menu_id: return Response('')\n        queryset = SubSubMenus.objects.get(id=menu_id)\n        serializer = SubSubMenusSerializer(queryset)\n        return Response(serializer.data)","sub_path":"dashboard/views/accesspermissions.py","file_name":"accesspermissions.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"183456179","text":"import ast\n\n# Grammar:\n# expr = muldiv {(\"-\", \"+\") muldiv}\n# muldiv = power {(\"*\", \"/\") power}\n# power = brackets [\"^\" power]\n# brackets = \"(\" expr \")\" | number\n\nclass Parser:\n\n    def __init__(self, lex):\n        self.lex = lex\n\n    def match(self, tok):\n        if self.lex.curr() != tok:\n            raise ValueError(tok)\n        self.lex.eat()\n\n    def brackets(self):\n        if type(self.lex.curr()) == int:\n            number = ast.Number(self.lex.curr())\n            self.lex.eat()\n            return number\n        \n        self.match(\"(\")\n        expr = self.expr()\n        self.match(\")\")\n        return expr\n\n    def power(self):\n        value = self.brackets()\n        if self.lex.curr() == \"^\":\n            self.lex.eat()\n            topower = self.power()\n            return ast.BinOp(value, topower, ast.Power)\n        return value\n\n    def muldiv(self):\n        value = self.power()\n        if self.lex.curr() not in (\"*\", \"/\"):\n            return value\n        \n        while self.lex.curr() in (\"*\", \"/\"):\n            if self.lex.curr() == \"*\":\n                self.lex.eat()\n                value = ast.BinOp(value, self.power(), ast.Mul)\n            elif self.lex.curr() == \"/\":\n                self.lex.eat()\n                value = ast.BinOp(value, self.power(), ast.Div)\n\n        return value\n\n    def expr(self):\n        value = self.muldiv()\n        if self.lex.curr() not in (\"+\", \"-\"):\n            return value\n        \n        while self.lex.curr() in (\"+\", \"-\"):\n            if self.lex.curr() == \"+\":\n                self.lex.eat()\n                value = ast.BinOp(value, self.muldiv(), ast.Add)\n            elif self.lex.curr() == \"-\":\n                self.lex.eat()\n                value = ast.BinOp(value, self.muldiv(), ast.Sub)\n\n        return value\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"237209839","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 21 01:14:36 2018\n\n@author: Ayoub El khallioui\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass ANN_tf:\n    \n    def __init__(self,eta=0.001,epochs=50,size_batch=100,n_hidden=50,shuffle=True,random_state=None,dim_x=784,n_classes=10):\n        self.eta=eta\n        self.epochs=epochs\n        self.size_batch=size_batch\n        self.n_hidden=n_hidden\n        self.shuffle=shuffle\n        self.random_state=random_state\n        self.dim_x=dim_x\n        self.n_classes=n_classes\n        self.g=tf.Graph()\n        with self.g.as_default():\n            tf.set_random_seed(self.random_state)\n            self.build()\n            self.init=tf.global_variables_initializer()\n        \n    def build(self):\n        \n        self.X=tf.placeholder(dtype=tf.float32,shape=(None,self.dim_x))\n        self.y=tf.placeholder(dtype=tf.int32,shape=(None))\n        y_onehot=tf.one_hot(indices=self.y,depth=self.n_classes)\n        self.weights={'h':tf.get_variable(name='w_h',shape=(self.dim_x,self.n_hidden)),\n                      'out':tf.get_variable(name='w_out',shape=(self.n_hidden,self.n_classes))}\n        self.bais={'b_h':tf.get_variable(name='b_h',shape=(self.n_hidden)),\n                   'b_out':tf.get_variable(name='b_out',shape=(self.n_classes))}\n        z_h=tf.add(tf.matmul(self.X,self.weights['h']),self.bais['b_h'])\n        a_h=tf.nn.sigmoid(z_h)\n        self.z_out=tf.add(tf.matmul(a_h,self.weights['out']),self.bais['b_out'])\n        a_out=tf.nn.softmax(self.z_out)\n        #self.out_labels=tf.argmax(self.z_out,axis=1)\n        self.cost=tf.losses.softmax_cross_entropy(onehot_labels=y_onehot,logits=self.z_out)\n        #self.cost=tf.reduce_mean(loss)\n        sigma_out=a_out-y_onehot\n        sigma_h=tf.matmul(sigma_out,tf.transpose(self.weights['out']))*(a_h*(1-a_h))\n        gradw_out=tf.matmul(tf.transpose(a_h),sigma_out)\n        gradb_out=tf.reduce_sum(sigma_out,axis=0)\n        gradw_h=tf.matmul(tf.transpose(self.X),sigma_h)\n        gradb_h=tf.reduce_sum(sigma_h,axis=0)\n        \n        self.weights['h']=tf.assign(self.weights['h'],self.weights['h']-self.eta*gradw_h)\n        self.bais['b_h']=tf.assign(self.bais['b_h'],self.bais['b_h']-self.eta*gradb_h)\n        self.weights['out']=tf.assign(self.weights['out'],self.weights['out']-self.eta*gradw_out)\n        self.bais['b_out']=tf.assign(self.bais['b_out'],self.bais['b_out']-self.eta*gradb_out)\n        self.train=tf.group(self.weights['h'],self.bais['b_h'],self.weights['out'],self.bais['b_out'],name='train')\n        self.prediction={'classes':tf.argmax(self.z_out,axis=1),\n                    'proba':a_out}\n\nmodel=ANN_tf(random_state=10)\n\n\nmnist=np.load('D:\\\\python\\\\MNN from scratch\\\\mnist_scaled.npz')\nfiles=mnist.files    \nX_train,y_train,X_test,y_test=[mnist[f] for f in files]  \n\ndef gen_mini_batch(model,X_train,y_train,shuffle=True):\n    X_p=X_train\n    y_p=y_train\n    data=np.column_stack((X_p,y_p))\n    if shuffle:\n        np.random.shuffle(data)\n    X_p=data[:,:-1]\n    y_p=data[:,-1]\n    for i in range(1,X_train.shape[0],model.size_batch):\n        yield(X_p[i:i+model.size_batch,:],y_p[i:i+model.size_batch])\n\nepochs=model.epochs\nsess=tf.Session(graph=model.g)\nsess.run(model.init)\nfor epoch in range(epochs):\n    liste_Loss=[]\n    for X_batch,y_batch in gen_mini_batch(model,X_train,y_train,shuffle=True):\n        _,loss=sess.run(['train',model.cost],feed_dict={model.X:X_batch,model.y:y_batch})\n        liste_Loss.append(loss)\n    print('epoch=%d => loss=%.3f'%(epoch,np.mean(liste_Loss)))\n       \ny_pred=sess.run(model.prediction['classes'],feed_dict={model.X:X_test})    \nprint('accuracy_score=%.2f'%((y_pred==y_test).sum()/len(y_test)))    \n    \n    ","sub_path":"low_TF_MNIST.py","file_name":"low_TF_MNIST.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"513088305","text":"# COUNT THE NUMBER OF LETTERS IN AN OBJECT WORKS ALSO FOR INTS\ndef count_letters(list_letters):\n    obj = {}\n    for letter in list_letters:\n        if(obj.get(letter) == None):\n            obj[letter] = 1\n        else:\n            obj[letter] += 1\n    print(obj)\n\n\ncount_letters(['a', 'b', 'c', 'd', 'b', 'b'])\n","sub_path":"src/17_count-letters.py","file_name":"17_count-letters.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"371784519","text":"from JumpScale import j\n\ntry:\n    import regex\nexcept:    \n    pass\n\nclass REGEXTOOL():\n\n    @staticmethod       \n    def match(pattern,text):\n        m = regex.match(pattern,text)\n        if m:\n            print(\"%s %s\"%(pattern,text))\n            return True\n        else:\n            return False        \n\n    @staticmethod           \n    def matchContent(path,contentRegexIncludes=[], contentRegexExcludes=[]):\n        content=j.system.fs.fileGetContents(path)\n        if REGEXTOOL.matchMultiple(patterns=contentRegexIncludes,text=content) and not REGEXTOOL.matchMultiple(patterns=contentRegexExcludes,text=content):\n            return True\n        return False\n\n    @staticmethod       \n    def matchMultiple(patterns,text):\n        \"\"\"\n        see if any patterns matched\n        if patterns=[] then will return False\n        \"\"\"\n        if type(patterns).__name__!='list' :\n            raise RuntimeError(\"patterns has to be of type list []\")\n        if patterns==[]:\n            return True\n        for pattern in patterns:\n            pattern=REGEXTOOL._patternFix(pattern)\n            if REGEXTOOL.match(pattern,text):\n                return True\n        return False\n\n\n    @staticmethod       \n    def matchPath(path,regexIncludes=[],regexExcludes=[]):\n        if REGEXTOOL.matchMultiple(patterns=regexIncludes,text=path) and not REGEXTOOL.matchMultiple(patterns=regexExcludes,text=path):\n            return True\n        return False        \n\n    @staticmethod       \n    def _patternFix(pattern):\n        if pattern.find(\"(?m)\")==-1:\n            pattern=\"%s%s\" % (\"(?m)\",pattern)\n        return pattern        \n\n\nj.base.regex=REGEXTOOL\n","sub_path":"lib/JumpScale/base/REGEXTOOL.py","file_name":"REGEXTOOL.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"504287259","text":"\"\"\"etesync_server URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n    https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n    1. Add an import:  from my_app import views\n    2. Add a URL to urlpatterns:  url(r'^$', views.home, name='home')\nClass-based views\n    1. Add an import:  from other_app.views import Home\n    2. Add a URL to urlpatterns:  url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n    1. Import the include() function: from django.conf.urls import url, include\n    2. Add a URL to urlpatterns:  url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom journal import views\nfrom rest_framework_nested import routers\n\nrouter = routers.DefaultRouter()\nrouter.register(r'journals', views.JournalViewSet)\nrouter.register(r'journal/(?P[^/]+)', views.EntryViewSet)\nrouter.register(r'user', views.UserInfoViewSet)\n\njournals_router = routers.NestedSimpleRouter(router, r'journals',\n                                             lookup='journal')\njournals_router.register(r'members', views.MembersViewSet,\n                         base_name='journal-members')\njournals_router.register(r'entries', views.EntryViewSet,\n                         base_name='journal-entries')\n\n\nurlpatterns = [\n    url(r'^api/v1/', include(router.urls)),\n    url(r'^api/v1/', include(journals_router.urls)),\n]\n\n# Adding this just for testing, this shouldn't be here normally\nurlpatterns += url(r'^reset/$', views.reset, name='reset_debug'),\n","sub_path":"tests/storage/etesync/etesync_server/etesync_server/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"73449417","text":"#-*- coding:utf-8 -*-\n# __author__ = 'xjh'\n\nimport mail_check, config_manager, data_analyze\nimport logging,time\nfrom datetime import datetime\n\nclass UpdateTask(object):\n    def __init__(self):\n        self.stop = False\n        self.first = True\n        pass\n\n    # 定时查收一次邮件作业\n    def update(self):\n        count = 6\n        last = 10\n        while self.stop == False:\n            #每过30分钟查一次作业\n            try:\n                always = True\n                print('check mail')\n                mail_check.MailCheck().check_by_user('shiliangyi@bitedu.tech','x', last, always)\n                mail_check.MailCheck().check_by_user('xujinghang@bitedu.tech','x', last, always)\n                mail_check.MailCheck().check_by_user('zhangpengwei@bitedu.tech','x', last, always)\n                #data_analyze.Data_Analyze().analyze_work_info()\n                #data_analyze.Data_Analyze().analyze_summary_work_info()\n                #data_analyze.Data_Analyze().analyze_student_info()\n\n                logging.info('check mail')\n            except BaseException as e:\n                errmsg = 'mail_check error:{}'.format(e)\n                print(errmsg)\n                logging.error(errmsg)\n            finally:\n                last = 500\n\n            count -= 1\n            if count == 0:\n                count = 6\n                last = -1\n\n            # 睡眠30分钟分钟\n            print('sleep')\n            time.sleep(60*30)\n\nif __name__ == \"__main__\":\n    config_manager.ConfigManager().initialize()\n    UpdateTask().update()","sub_path":"bittech/update_task.py","file_name":"update_task.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"128723788","text":"from django.http import JsonResponse, HttpResponseBadRequest\nfrom django.shortcuts import render, HttpResponse, redirect, get_object_or_404\n\nfrom .models import CartItem\nfrom .decorators import check_item_owner, collect_cart_info\n\nfrom account.models import Account\n\n\n@collect_cart_info\ndef cart_page(request, products, summary_data):\n    context = {'items': products, 'summary': summary_data}\n    return render(request, 'cart/cart_page.html', context)\n\n\n@collect_cart_info\ndef get_summary_info(request, products, summary_data):\n    return JsonResponse({'summary': summary_data})\n\n\n@check_item_owner\ndef increase_quantity(request, item_id):\n    \"\"\"\n    Receives the id of a cart item and increments its value by one\n\n    :param request:\n    :param item_id:\n    :return: HttpResponse\n    \"\"\"\n    if request.method == 'POST':\n        item = CartItem.objects.get(id=item_id)\n        if item:\n            item.quantity = item.quantity + 1\n            item.save()\n            return HttpResponse({item.quantity})\n        else:\n            return HttpResponseBadRequest()\n    else:\n        return redirect('home')\n\n\n@check_item_owner\ndef decrease_quantity(request, item_id):\n    \"\"\"\n    Receives the id of a cart item and decrements its value by one\n\n    :param request:\n    :param item_id:\n    :return: HttpResponse\n    \"\"\"\n    if request.method == 'POST':\n        item = CartItem.objects.get(id=item_id)\n        if item:\n            if item.quantity == 1:\n                item.delete()\n                return HttpResponse({0})\n            else:\n                item.quantity = item.quantity - 1\n                item.save()\n                return HttpResponse({item.quantity})\n        else:\n            return HttpResponseBadRequest()\n    else:\n        return redirect('home')\n\ndef get_item_count(request):\n    \"\"\" Returns the number of cart items belonging to the user \"\"\"\n    try:\n        cart = request.user.account.cart\n    except AttributeError:\n        device = request.COOKIES['device']\n        account, created = Account.objects.get_or_create(device=device)\n        cart = account.cart\n\n    return JsonResponse({'data': cart.cartitem_set.all().count()})\n\n\ndef remove_item(request, item_id):\n    \"\"\" Removes item from the users cart if that item belongs to the request.user \"\"\"\n    if request.method == 'POST':\n        item = CartItem.objects.get(id=item_id)\n\n        try:\n            account = get_object_or_404(Account, user=request.user)\n\n        except TypeError:\n            device = request.COOKIES['device']\n            account, created = Account.objects.get_or_create(device=device)\n\n        if item:\n            if item.cart.account == account:\n                item.delete()\n        return redirect(\"cart\")\n    return redirect('cart')\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"443902878","text":"from tkinter import * \r\n \r\ndef draw_pattern_in_canvas(a_canvas): \r\n\tgrid_size = 50\r\n\t# You complete this function\r\n\tcircle_colour = \"red\"\r\n\tsquare_colour = \"green\"\r\n\toutline_thickness = 2\r\n\toutline_colour = \"black\"\r\n\t\r\n\ty = 0\r\n\tfor row in range(0,7):\r\n\t\tx = 0\r\n\t\tfill_in = row % 2 == 0\r\n\t\tfor column in range(0,10):\r\n\t\t\tif fill_in:\r\n\t\t\t\ta_canvas.create_oval(x, y, x+grid_size, y+grid_size, fill = circle_colour, outline = outline_colour, width = outline_thickness)\r\n\t\t\telse:\r\n\t\t\t\ta_canvas.create_rectangle(x, y, x+grid_size, y+grid_size, fill = square_colour, outline = outline_colour, width = outline_thickness)\r\n\t\t\t\ta_canvas.create_line(x+(grid_size/2), y, x+(grid_size/2), y+grid_size, fill = outline_colour)\r\n\t\t\t\ta_canvas.create_line(x, y+(grid_size/2), x+grid_size, y+(grid_size/2), fill = outline_colour)\r\n\t\t\tx = x + grid_size\r\n\t\t\tfill_in = not fill_in\r\n\t\ty = y + grid_size\r\n\t\r\ndef draw_grid(a_canvas):\r\n\tfor row in range(50, 350, 50):\r\n\t\ta_canvas.create_line(-1, row, 501, row, fill = \"lightblue\")\r\n\tfor column in range(50, 500, 50):\r\n\t\ta_canvas.create_line(column, -1, column, 351, fill = \"lightblue\")\r\n\t\t\r\ndef main(): \r\n\twindow = Tk()  \r\n\twindow.title(\"Red and Green Pattern\")  \r\n\twindow.config(background = 'white')   \r\n\twindow.geometry(\"500x350+10+20\") \r\n\r\n\ta_canvas = Canvas(window) \r\n\ta_canvas.config(background = \"white\")   \r\n\ta_canvas.pack(fill = BOTH, expand = True) #Canvas fills the whole top level window \r\n\tdraw_grid(a_canvas)\r\n\tdraw_pattern_in_canvas(a_canvas) \r\n\twindow.mainloop()   \r\n \r\nmain()","sub_path":"COMPSCI 101/Lab09/Lab09Ex2.py","file_name":"Lab09Ex2.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"382306384","text":"from typing import List, Tuple\n\nimport pygame as pg\nfrom pygame.math import Vector2\nfrom pygame.rect import Rect\nfrom pygame.sprite import Sprite\n\nimport model\nimport mods\nimport settings\nfrom creatures.players import Player\nfrom view.screen import ScreenAccess\nfrom tilemap import TiledMap\nfrom view import draw_utils\nfrom view import images\nfrom view.camera import Camera\nfrom view.hud import HUD\n\nNO_SELECTION = -1\n\n\nclass DungeonView(model.GroupsAccess, ScreenAccess):\n    def __init__(self) -> None:\n        super().__init__()\n\n        dim_screen = pg.Surface(self.screen.get_size()).convert_alpha()\n        dim_screen.fill((0, 0, 0, 180))\n\n        self.camera: Camera = Camera(800, 600)\n\n        self._hud = HUD()\n\n        self._draw_debug = False\n        self._night = False\n        self.draw_teleport_text = False\n\n        self._fog = pg.Surface((settings.WIDTH, settings.HEIGHT))\n        self._fog.fill(settings.NIGHT_COLOR)\n\n        # lighting effect for night mode\n        self._light_mask = images.get_image(images.LIGHT_MASK)\n        self._light_rect = self._light_mask.get_rect()\n\n        self.title_font = images.get_font(images.ZOMBIE_FONT)\n\n    def set_camera_range(self, width: int, height: int) -> None:\n        x, y = self.camera.rect.x, self.camera.rect.y\n        self.camera.rect = Rect(x, y, width, height)\n\n    def draw(self, player: Player, tile_map: TiledMap) -> None:\n\n        self.camera.update(player)\n\n        self.screen.blit(tile_map.img, self.camera.get_shifted_rect(tile_map))\n\n        for sprite in self.groups.all_sprites:\n            self._draw_sprite(sprite)\n\n        if self._draw_debug:\n            self._draw_debug_rects()\n\n        if self._night:\n            self.render_fog(player)\n\n        if self.draw_teleport_text:\n            self._draw_teleport_text()\n\n        # draw hud on top of everything\n        self._hud.draw(player)\n\n    def _draw_sprite(self, sprite: Sprite) -> None:\n        image = sprite.image\n        rect = image.get_rect().copy()\n        new_center = Vector2(sprite.pos)\n        new_center.x += self.camera.rect.topleft[0]\n        new_center.y += self.camera.rect.topleft[1]\n        rect.center = new_center\n\n        if self._rect_on_screen(rect):\n            self.screen.blit(image, rect)\n\n    def _draw_teleport_text(self) -> None:\n\n        font = images.get_font(images.ZOMBIE_FONT)\n        draw_utils.draw_text(self.screen, 'Press T to continue', font,\n                             16, settings.GREEN, 16, 8)\n\n    def _rect_on_screen(self, rect: Rect) -> bool:\n        return self.screen.get_rect().colliderect(rect)\n\n    def _draw_debug_rects(self) -> None:\n        for sprite in self.groups.all_sprites:\n            if hasattr(sprite, 'motion'):\n                rect = sprite.motion.hit_rect\n            else:\n                rect = sprite.rect\n            shifted_rect = self.camera.shift_by_topleft(rect)\n            if self._rect_on_screen(shifted_rect):\n                pg.draw.rect(self.screen, settings.CYAN, shifted_rect, 1)\n        for obstacle in self.groups.walls:\n            assert obstacle not in self.groups.all_sprites\n            shifted_rect = self.camera.shift_by_topleft(obstacle.rect)\n            if self._rect_on_screen(shifted_rect):\n                pg.draw.rect(self.screen, settings.CYAN, shifted_rect, 1)\n\n    def render_fog(self, player: Player) -> None:\n        # draw the light mask (gradient) onto fog image\n        self._fog.fill(settings.NIGHT_COLOR)\n        self._light_rect.center = self.camera.get_shifted_rect(player).center\n        self._fog.blit(self._light_mask, self._light_rect)\n        self.screen.blit(self._fog, (0, 0), special_flags=pg.BLEND_MULT)\n\n    def toggle_debug(self) -> None:\n        self._draw_debug = not self._draw_debug\n\n    def toggle_night(self) -> None:\n        self._night = not self._night\n\n    def try_click_hud(self, pos: Tuple[int, int]) -> None:\n        self._try_click_mod(pos)\n        self._try_click_item(pos)\n\n    def _try_click_mod(self, pos: Tuple[int, int]) -> None:\n        rects = [self._hud.mod_rects[l] for l in mods.ModLocation]\n        index = self.clicked_rect_index(rects, pos)\n        if index == self.selected_mod:\n            self._hud.selected_mod = NO_SELECTION\n        else:\n            self._hud.selected_mod = index\n\n    def _try_click_item(self, pos: Tuple[int, int]) -> None:\n        index = self.clicked_rect_index(self._hud.backpack_rects, pos)\n        if index == self._hud.selected_item:\n            self._hud.selected_item = NO_SELECTION\n        else:\n            self._hud.selected_item = index\n\n    def clicked_rect_index(self, rects: List[pg.Rect],\n                           pos: Tuple[int, int]) -> int:\n\n        x, y = pos\n        for idx, r in enumerate(rects):\n            if r.collidepoint(x, y):\n                return idx\n        return NO_SELECTION\n\n    def hud_collide_point(self, pos: Tuple[int, int]) -> bool:\n        return self._hud.collide_point(pos)\n\n    def selected_item(self) -> int:\n        return self._hud.selected_item\n\n    def selected_mod(self) -> mods.ModLocation:\n        if self._hud.selected_mod == NO_SELECTION:\n            return NO_SELECTION\n\n        locs = [l for l in mods.ModLocation]\n        return locs[self._hud.selected_mod]\n\n    def set_selected_item(self, idx: int) -> None:\n        self._hud.selected_item = idx\n\n    def toggle_hide_backpack(self) -> None:\n        self._hud.toggle_hide_backpack()\n","sub_path":"src/view/dungeon_view.py","file_name":"dungeon_view.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"73969889","text":"#import cv2 as cv\nimport numpy as np\nimport threading\nimport time\nfrom sharedVars import *\nimport random\nimport math\nimport sys\nimport json\n\nfrom Graphics import *\n#from ImageRecognition import *\n\n\n'''\nIn order to allow for ease of developent, concidering we have 1 set of hardware, we wrote a replacement of main.py\nthat acts as a SIMULATOR for the image recognition system.\n\nUsing sim.py as the entry point for the program allows the user to run the system without running the image recognition library and without needing a webcam plugged in\nthe board state is simulated via a file sim_data.json\nEditing sim_data.json will modify the current baord state in real time\n'''\n# manually importing the games. Writing plugin support not worth for starter\nimport games.test123.test123 as test123\nimport games.chess.chess as chess\nimport games.chess.chess1P as chess1P\nimport games.checkers.checkers as checkers\nimport games.checkers.checkers1P as checkers1P\nimport games.colorTest.colorTest as colorTest\nimport games.rock_paper_scissors.rock_paper_scissors as rps\n\nclass Sim( threading.Thread ):\n    def __init__( self, name ):\n        threading.Thread.__init__(self)\n        print_sim(\"SIMulator Started\")\n        print_sim(\"\\tThread name: \" + name )\n\n        self.recognition_delay = 0.05\n        self.image_count_max = 3\n        self.image_current_count = 0\n\n        self.local_board_states = []\n\n    def run( self ):\n        while( sharedVars.DONE ):\n            if( self.image_current_count == self.image_count_max ):\n\n                sharedVars.BOARD_STATE = mode_of_boards( self.local_board_states )\n\n                time.sleep( self.recognition_delay )\n\n                self.image_current_count = 0\n                self.local_board_states = []\n\n            else:\n                self.image_current_count += 1\n\n                #ret, frame = self.cap.read()\n                #self.local_board_states.append(Stamp.stamps( frame ))\n                with open(\"sim_data.json\") as f:\n                    data = json.load(f)\n                    new_data = []\n                    for item in data[\"data\"]:\n                        new_data_temp = []\n                        for i in reversed( range( len(item) ) ):\n                            new_data_temp.append( item[i] )\n\n                        new_data.append(new_data_temp)\n\n                    time.sleep( 0.5 )\n                    self.local_board_states.append(new_data)\n\n\n\n\n\n\ndef main():\n    image_recognition = Sim( \"thread_imgrec\" )\n    graphics = Graphics( \"thread_gphc\" )\n\n\n    graphics.addGame( chess1P.Chess(\"D Chess 1P\") )\n    graphics.addGame( chess.Chess(\"D Chess 2P\") )\n    graphics.addGame( checkers.Checkers(\"D Checkers 2P\") )\n    graphics.addGame( checkers1P.Checkers(\"D Checkers 1P\") )\n\n    graphics.addGame( rps.rockPaperScissors(\"R/P/S\") )\n    graphics.addGame( test123.test123(\"Ball Demo\") )\n    graphics.addGame( colorTest.colorTest(\"Color Test\") )\n\n\n    graphics.start()\n    image_recognition.start()\n\n    graphics.join()\n    image_recognition.join()\n\n\n\nif __name__ == '__main__': main()\n","sub_path":"src/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"340919682","text":"from django.contrib.auth.decorators import user_passes_test\nfrom datetime import datetime, timedelta\nimport logging\nfrom carddirector.cd_auth.signals import login_failure\nfrom carddirector.cd_auth.repos import increase_failures, reset_failures,\\\n    get_login_attempt\nfrom carddirector.cd_auth.views import lockout_response\nfrom carddirector.cd_auth.services import get_source_ip_from_request\nfrom authority.decorators import permission_required\nfrom carddirector.cd_auth import services\nimport types\nfrom django.shortcuts import redirect\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef permission_required_or_access_denied(perm, *args, **kwargs):\n    kwargs['login_url'] = '/access_denied/'\n    kwargs['redirect_to_login'] = True\n    return permission_required(perm, *args, **kwargs)\n    \n\n\ndef check_permission (privilege_required_decorator = None , permission_required_decorator = None):\n    \"\"\"\n    Check permissions for groups.\n        T24 group will be checked against privilege_required_decorator\n        Reseller group will be checked against permission_required_decorator\n        \n    This decorator takes 2 decoratoras as parameters and executes the proper one.\n    \"\"\"\n    \n    if privilege_required_decorator is not None:\n            if not isinstance(privilege_required_decorator, types.FunctionType) and privilege_required_decorator.func_name is not 'privilege_required':\n                raise ValueError(\"First argument of check_permission() is not a privilege_required() function.\")\n    if permission_required_decorator is not None:\n            if not isinstance(permission_required_decorator, types.FunctionType) and permission_required_decorator.func_name is not 'permission_required_or_access_denied':\n                raise ValueError(\"First argument of check_permission() is not a permission_required_or_access_denied() function.\")\n    \n    def decorate(view_func):\n        \n         \n        def decorated(request, *args, **kwargs): \n            if request.user.is_authenticated():\n                if services.user_in_group_t24(request.user) or request.user.is_superuser:\n                    return privilege_required_decorator(view_func)(request, *args, **kwargs)\n                elif services.user_in_group_reseller(request.user):\n                    if permission_required_decorator:\n                        return permission_required_decorator(view_func)(request, *args, **kwargs)\n            return redirect('/access_denied/')\n        return decorated\n\n    return decorate\n    \n    \n\n\ndef privilege_required(*privilege_names):\n    \"\"\"Requires user membership in at least one of the groups passed in.\"\"\"\n    def has_privileges(u):\n        if u.is_authenticated():\n            if u.is_superuser:\n                return True\n            for role in u.profile.roles.all():\n                if bool(role.privileges.filter(name__in=privilege_names)):\n                    return True\n        return False\n\n    return user_passes_test(has_privileges, login_url = '/access_denied/')\n\n\n\n\ndef watch_login(func):\n    \"\"\"\nUsed to decorate the django.contrib.admin.site.login method.\nLoosely based on axes https://github.com/codekoala/django-axes\n\"\"\"\n\n    BLOCKING_TIMEOUT_MINS = 30\n\n    MAX_FAILED_ATTEMPTS = 6\n\n    def decorated_login(request, *args, **kwargs):\n\n        if request.method == 'GET' and  'username' not in request.GET:\n            # Login Form\n            response = func(request, *args, **kwargs)\n            return response\n\n#        if func.__name__ == 'decorated_login':\n#            # if we're dealing with this function itself, don't bother checking\n#            # for invalid login attempts.  I suppose there's a bunch of\n#            # recursion going on here that used to cause one failed login\n#            # attempt to generate 10+ failed access attempt records (with 3\n#            # failed attempts each supposedly)\n#            return response\n\n        if request.method == 'POST':\n\n            if 'username' in request.POST:\n                login_attempt = get_login_attempt(request.POST['username'])\n                now = datetime.now()\n                elapsed = now - login_attempt.last_attempt_time\n\n                if elapsed >= timedelta(minutes=BLOCKING_TIMEOUT_MINS):\n                    # Reset attempts\n                    login_attempt.failures = 0\n                else:\n                    if login_attempt.failures >= MAX_FAILED_ATTEMPTS:\n                        send_login_failure_signal(request)\n                        if login_attempt.failures == MAX_FAILED_ATTEMPTS:\n                            logger.error('Login blocked for username: ' + login_attempt.username + ' (' + str(MAX_FAILED_ATTEMPTS) + ' failed attempts in less than ' + str(BLOCKING_TIMEOUT_MINS) +  ' mins)')\n                        # ATTEMPT BLOQUED\n                        increase_failures(login_attempt)\n                        return lockout_response(request)\n\n                response = func(request, *args, **kwargs)\n\n                login_unsuccessful = (\n                    response and\n                    not response.has_header('location') and\n                    response.status_code != 302\n                    )\n\n                if login_unsuccessful:\n                    send_login_failure_signal(request)\n                    # Fail\n                    increase_failures(login_attempt)\n                    return response\n\n                # Success\n                reset_failures(login_attempt)\n\n                return response\n\n\n    return decorated_login\n\n\ndef send_login_failure_signal(request):\n    login_failure.send_robust(request, user=request.POST.get('username', 'N/A'), source_ip=get_source_ip_from_request(request), request=request)\n\n\n","sub_path":"apps/carddirector/cd_auth/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"644118092","text":"import requests\nimport json\n\n\nasync def get_professor(message):  # extract professor's name from the command\n\n    try:\n\n        command = message.content\n        arr = command.strip().split()  # strip whitespace and split the message into an array\n        first_name = arr[1]  # extract a first name\n        last_name = arr[2]  # extract a last name\n        # generate the full name of the professor\n        formatted_name = first_name + \"%20\" + last_name\n        url = \"https://coursebook-api.herokuapp.com/v1/prof/\" + \\\n            formatted_name  # use this to access the API of UTD Coursebook\n        # output professor information\n        await output_prof(message, first_name, last_name, url)\n\n    # if this fails, return error message\n    except (IndexError):\n        await message.channel.send(\"Invalid format. Please enter a name like 'John Smith'\")\n\n\nasync def output_prof(message, first_name, last_name, url):  # output professor information\n\n    try:\n        response = requests.get(url)\n        print(response.status_code)\n        # set a list equal to the json list from the API of UTD Coursebook\n        response_dict = response.json()\n        data = response_dict[\"data\"][0]  # extract dictionary from the list\n\n        output = \"```\"  # start creating output\n\n        output += \"Name: \" + data[\"name\"]  # add name to output, first!\n        output += \"\\n\"\n\n        for key in data.keys():  # look through entire dictionary\n            if key != \"name\":  # add the contents of dictionary to the output\n                output += key.title() + \": \" + data[key] + '\\n'\n\n        output += \"```\"  # end creating output\n        await message.channel.send(output)\n\n    # if this fails, return error message\n    except (IndexError, RuntimeError):\n        await message.channel.send(f\"The professor '{first_name.title()} {last_name.title()}' could not be found.\")\n","sub_path":"find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"458569102","text":"import unittest\nfrom data_handler import HandleData\nimport pandas as pd\nimport numpy as np\n\nclass TestHandleData(unittest.TestCase):\n    def test_check_file_format(self):\n        hd = HandleData()\n        self.assertTrue(hd.check_file_format(\"some_file_name.csv\"))\n        self.assertTrue(hd.check_file_format(\"some_file_name.CsV\"))\n        self.assertFalse(hd.check_file_format(\"some_file_name.html\"))\n        self.assertFalse(hd.check_file_format(\"some_file_name.HTTml\"))\n\n    def test_check_file_name(self):\n        hd = HandleData()\n        self.assertTrue(hd.check_file_name(\"orders\"))\n        self.assertTrue(hd.check_file_name(\"OrdERS\"))\n        self.assertFalse(hd.check_file_name(\"wrong_file_name\"))\n        self.assertFalse(hd.check_file_name(\"WroNNg_File_name\"))\n\n    def test_write_csv_to_df(self):\n        hd = HandleData()\n        self.assertIsInstance(hd.write_csv_to_df(\"orders\"), pd.DataFrame)\n        self.assertIsInstance(hd.write_csv_to_df(\"OrdERS\"), pd.DataFrame)\n        self.assertIsNone(hd.write_csv_to_df(\"wrong_file_name\"))\n        self.assertIsNone(hd.write_csv_to_df(\"WroNNg_File_name\"))\n\n    def test_has_df_nan(self):\n        hd = HandleData()\n        df_orders = hd.write_csv_to_df(\"orders\")\n        df_order_lines = hd.write_csv_to_df(\"order_lines\")\n        self.assertTrue(hd.has_df_nan(df_orders))\n        self.assertTrue(hd.has_df_nan(df_order_lines))\n\n    def test_has_df_cols(self):\n        hd = HandleData()\n        df_orders = hd.write_csv_to_df(\"orders\")\n        df_test = pd.DataFrame(columns=['A','B','C','D','E','F','G'])\n        self.assertTrue(hd.has_df_cols(df_orders, \"id\", \"created_at\", \"vendor_id\", \"customer_id\"))\n        self.assertFalse(hd.has_df_cols(df_test, 'A','B','C','D','E','F'))\n\n    def test_validate_df(self):\n        hd = HandleData()\n        df_order_lines = hd.write_csv_to_df(\"order_lines\")\n        df_test = pd.DataFrame(columns=['A','B','C','D','E','F','G'])\n        self.assertTrue(hd.has_df_cols(df_order_lines, \"order_id\", \"product_id\", \"product_description\", \"product_price\", \"product_vat_rate\",\\\n                \"discount_rate\", \"quantity\",\"full_price_amount\", \"discounted_amount\", \"vat_amount\", \"total_amount\"))\n        self.assertFalse(hd.has_df_cols(df_test, 'A','B','C','D','E','F'))\n\n    def test_round_up(self):\n        hd = HandleData()\n        n1 = 155000171.52235325\n        d1 = 2\n        num1 = hd.round_up(n1, d1)\n        self.assertEqual(num1, 155000171.53)\n        n2 = 168091848.74244543\n        d2 = 3\n        num2 = hd.round_up(n2, d2)\n        self.assertEqual(num2, 168091848.743)\n        n3 = 185569868.96233523\n        d3 = 2\n        num3 = hd.round_up(n3, d3)\n        self.assertNotEqual(num3, 185569868.972)\n        \n\nif __name__ == \"__main__\":\n    unittest.main()","sub_path":"test_data_handler.py","file_name":"test_data_handler.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"401337508","text":"from django.urls import path\n\nfrom webapp.views import TaskView, CreateTaskView, EditTaskView, IndexView, DeleteTaskView, StatusAddView, \\\n    EditStatusView, DeleteStatusView, TypeAddView, EditTypeView, DeleteTypeView, ProjectsView, ProjectView, \\\n    ProjectCreateView, ProjectEditView, ProjectDeleteView\n\nurlpatterns = [\n    path('', IndexView.as_view(), name='main_page'),\n    path('task//', TaskView.as_view(), name='view task'),\n    path('project//task/create/', CreateTaskView.as_view(), name='create task'),\n    path('task//edit/', EditTaskView.as_view(), name='edit task'),\n    path('task//delete/', DeleteTaskView.as_view(), name='delete task'),\n\n    path('status/add/', StatusAddView.as_view(), name='add status'),\n    path('status//edit/', EditStatusView.as_view(), name='edit status'),\n    path('status//delete/', DeleteStatusView.as_view(), name='delete status'),\n\n    path('types/add/', TypeAddView.as_view(), name='add type'),\n    path('types//edit/', EditTypeView.as_view(), name='edit type'),\n    path('types//delete/', DeleteTypeView.as_view(), name='delete type'),\n\n    path('projects/', ProjectsView.as_view(), name='view projects'),\n    path('project//', ProjectView.as_view(), name='view project'),\n    path('project/create/', ProjectCreateView.as_view(), name='create project'),\n    path('project//edit/', ProjectEditView.as_view(), name='edit project'),\n    path('project//delete/', ProjectDeleteView.as_view(), name='delete project'),\n]\n\napp_name='webapp'","sub_path":"source/webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"381052482","text":"from uuid import UUID\n\nfrom furl import furl\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse as drf_reverse\n\nfrom . import BaseAPITestCase\n\n\nclass TrackTests(BaseAPITestCase):\n    def setUp(self):\n        self.track_name = \"Last Exit\"\n        self.track_uuid = UUID(\"b3083319-47a9-40ed-a4e0-a79d050d9df7\")\n        self.album_uuid = UUID(\"b4fee0db-0c93-4470-96b3-cebd158033a0\")\n\n    def test_list_tracks(self):\n        url = drf_reverse(\"track-list\", kwargs={\"version\": self.version})\n        r = self.client.get(url)\n        self.assertEqual(r.status_code, status.HTTP_200_OK)\n        self.assertEqual(r.data[\"count\"], 3695)\n\n    def test_search_tracks(self):\n        url = drf_reverse(\"track-list\", kwargs={\"version\": self.version})\n        url = furl(url).set({\"name\": self.track_name}).url\n        r = self.client.get(url)\n        self.assertEqual(r.status_code, status.HTTP_200_OK)\n        self.assertEqual(r.data[\"count\"], 4)\n        self.assertEqual(r.data[\"results\"][0][\"uuid\"], self.track_uuid)\n\n    def test_get_track(self):\n        url = drf_reverse(\n            \"track-detail\", kwargs={\"version\": self.version, \"uuid\": self.track_uuid}\n        )\n        r = self.client.get(url)\n        self.assertEqual(r.status_code, status.HTTP_200_OK)\n        self.assertEqual(r.data[\"name\"], self.track_name)\n        self.assertEqual(r.data[\"album\"][\"uuid\"], self.album_uuid)\n","sub_path":"grunge/tests/test_tracks.py","file_name":"test_tracks.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"405401005","text":"import logging\nimport formatting\n\nlogger_inspectParameters = logging.getLogger('inspectParameters')\nlogger_inspectParameters.setLevel(logging.DEBUG)\nFORMAT = '[$BOLD%(filename)s$RESET:%(lineno)d][%(levelname)-5s]: %(message)s '\nformatter = logging.Formatter(formatting.formatter_message(FORMAT, False))\n\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nch.setFormatter(formatter)\nlogger_inspectParameters.addHandler(ch)\n\n\nimport numpy as np\n\ndef load_model(model_path, logger=logger_inspectParameters):\n    logger.info(\"Loading stored model...\")\n\n    # restore network weights\n    with np.load(model_path) as f:\n        all_params = [f['arr_%d' % i] for i in range(len(f.files))][0]\n\n    logger.info(\"number of layers: %s\", len(all_params))\n\n    for i in range(len(all_params)):\n        layer_params = all_params[i]\n        logger.info(\"layer %s.shape: %s\", i, layer_params.shape)\n\n    import pdb;pdb.set_trace()\n\nmodel_path = '/home/matthijs/TCDTIMIT/audioSR/combined/results/BEST/2_LSTMLayer64_64_nbMFCC39_bidirectional_combined.npz'\nload_model(model_path=model_path)","sub_path":"code/report/inspectParameters.py","file_name":"inspectParameters.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"150789570","text":"i = input('your expression : ')\nf=[]\nfor x in i:\n\tf.append(x)\ntop = -1\ninfix = ['(']+f+[')']\nstack = [None]*len(infix)\nprior = {'+':0,'-':0,'/':1,'*':1,'^':2}\noutput = []\n\ndef stackpush(val):\n\tglobal top\n\tglobal stack\n\tif top >= len(stack)-1:\n\t\tprint('stack overflow')\n\telse:\n\t\ttop += 1\n\t\tstack[top] = val\n\ndef stackpop():\n\tglobal top\n\tglobal stack\n\tif top == -1:\n\t\tprint('stack overflow')\n\telse:\n\t\ta = stack[top]\n\t\tdel stack[top]\n\t\ttop -= 1\n\t\treturn a\n\nfor i in range(len(infix)):\n\tif infix[i] == '(':\n\t\tstackpush(infix[i])\n\telif infix[i] in ['+','-','/','*','^']:\n\t\tif infix[i-1] in ['+','-','/','*','^']:\n\t\t\tif prior[infix[i]]>prior[infix[i-1]]:\n\t\t\t\tstackpush(infix[i-1])\n\t\t\telse:\n\t\t\t\ta=stackpop()\n\t\t\t\toutput += [a]\n\t\t\t\tstackpush(infix[i])\n\t\telse:\n\t\t\tstackpush(infix[i])\n\n\telif infix[i] == ')':\n\t\twhile stack[top] != '(':\n\t\t\tb=stackpop()\n\t\t\toutput = output+[b]\n\t\tstackpop()\n\n\telse:\n\t\toutput += [infix[i]]\n\nprint (\"\".join(output)) \n\n\n\n\n\n\n","sub_path":"INFIX_TO_POSTFIX.py","file_name":"INFIX_TO_POSTFIX.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"298150877","text":"from django.contrib.auth import get_user_model\nfrom django.test.testcases import SimpleTestCase\nfrom djet import restframework\nfrom rest_framework.request import Request, override_method\n\nimport djoser.constants\nimport djoser.serializers\nimport djoser.signals\nimport djoser.utils\nimport djoser.views\n\n\nclass UserEmailFactoryBaseTest(SimpleTestCase):\n    def test_get_context_returns_data(self):\n        valid_data = {\n            'from_email': 'test@example.net',\n            'user': get_user_model()(),\n            'protocol': 'https',\n            'domain': 'example.net',\n            'site_name': 'example.net',\n            'arbitrary_data': 'lorem ipsum'\n\n        }\n\n        factory = djoser.utils.UserEmailFactoryBase(**valid_data)\n        self.assertIsNotNone(factory.get_context())\n\n\nclass TestDjoserViewsSupportActionAttribute(restframework.APIViewTestCase):\n    # any arbitraty view from djoser\n    view_class = djoser.views.UserView\n\n    def test_action_reflect_http_method(self):\n        request = self.factory.get()\n\n        view = self.view_class()\n        view.action_map = {'get': 'retrieve'}\n\n        # reproduce DRF wrapping\n        with override_method(view, Request(request), 'GET') as request:\n            view.dispatch(request)\n            self.assertEqual(view.action, 'retrieve')\n","sub_path":"testproject/testapp/tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"145377837","text":"from __future__ import (absolute_import, division, print_function)\n\nfrom pyugrid.util import point_in_tri\nimport numpy as np\n\n\ndef test_point_in_tri():\n    test_datasets = [\n        {\n            'triangle': np.array([[0., 0.],[1.,0.],[0.,1.]]),\n            'points_inside': [np.array([0.1,0.1]), np.array([0.3,0.3])],\n            'points_outside': [np.array([5.,5.])],\n        },\n    ]\n\n    for dataset in test_datasets:\n        for point in dataset['points_inside']:\n            assert point_in_tri(dataset['triangle'], point)\n        for point in dataset['points_outside']:\n            assert ~point_in_tri(dataset['triangle'], point)\n","sub_path":"pyugrid/test/point_in_triangle_test.py","file_name":"point_in_triangle_test.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"650194976","text":"# -*- coding:utf-8 -*-\n\n\n# Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.\n#\n# Example:\n#\n# Input: \"babad\"\n#\n# Output: \"bab\"\n#\n# Note: \"aba\" is also a valid answer.\n#\n#\n#\n# Example:\n#\n# Input: \"cbbd\"\n#\n# Output: \"bb\"\n\n\nclass Solution(object):\n    def longestPalindrome(self, s):\n        \"\"\"\n        :type s: str\n        :rtype: str\n        \"\"\"\n        sz = len(s)\n        if sz > 1000:\n            raise NameError(\"the len of s can't more than 1000\")\n        i, mark, max_len = 0, 0, 1\n        while i < sz and 2 * (sz - i) > max_len:\n            a = b = i\n            while b + 1 < sz and s[b] == s[b + 1]:\n                b += 1\n            i = b + 1\n            while a > 0 and b + 1 < sz and s[a - 1] == s[b + 1]:\n                a -= 1\n                b += 1\n            if b - a + 1 > max_len:\n                mark = a\n                max_len = b - a + 1\n        return s[mark:max_len + mark]\n","sub_path":"005-longest-palindromic-substring/longest-palindromic-substring.py","file_name":"longest-palindromic-substring.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"508991691","text":"import random\nimport math\nimport numpy\n\n#### Othello Shell\n#### P. White 2016-2018\n# Sophia Wang's code 2/9/18\n\n\nEMPTY, BLACK, WHITE, OUTER = '.', '@', 'o', '?'\n\n# To refer to neighbor squares we can add a direction to a square.\nN, S, E, W = -10, 10, 1, -1\nNE, SE, NW, SW = N + E, S + E, N + W, S + W\nDIRECTIONS = (N, NE, E, SE, S, SW, W, NW)\nOPP_DIRECTIONS = {N: S, S: N, E: W, W: E, SE: NW, NW: SE, SW: NE, NE: SW}\nPLAYERS = {BLACK: \"Black\", WHITE: \"White\"}\nCORNERS = [11, 18, 81, 88]\nCORNER_DIRECTIONS = {11: (E, S, SE), 18: (W, S, SW), 81: (N, E, NE), 88: (N, W, NW)}\n\n\n########## ########## ########## ########## ########## ##########\n# The strategy class for your AI\n# You must implement this class\n# and the method best_strategy\n# Do not tamper with the init method's parameters, or best_strategy's parameters\n# But you can change anything inside this you want otherwise\n#############################################################\nclass Node:\n    def __init__(self, b, m=None, s=None):\n        # self.name= tempname\n        self.board = b\n        self.children = []\n        # self.player = p\n        self.move = m\n        self.score = s\n\n    def __repr__(self):\n        # return self.board\n        return self.board\n\n    def __lt__(self, other):\n        return self.score < other.score\n\n\nclass Strategy():\n    def __init__(self):\n        pass\n\n    def get_starting_board(self):\n        \"\"\"Create a new board with the initial black and white positions filled.\"\"\"\n        board = \"?\" * 10 + (\"?\" + \".\" * 8 + \"?\") * 8 + \"?\" * 10\n        board = self.replace_square(board, WHITE, 44)\n        board = self.replace_square(board, WHITE, 55)\n        board = self.replace_square(board, BLACK, 45)\n        board = self.replace_square(board, BLACK, 54)\n\n        return board\n\n    def get_pretty_board(self, board):  # checked\n        \"\"\"Get a string representation of the board.\"\"\"\n        values = [x for x in board]\n        values = numpy.array(values).reshape(10, 10)\n        return values\n\n    def print_pretty_board(self, board):  # checked\n        board = self.get_pretty_board(board)\n        for line in board:\n            print(\"  \".join(line))\n\n    def opponent(self, player):\n        \"\"\"Get player's opponent.\"\"\"\n        if player is BLACK: return WHITE\n        if player is WHITE: return BLACK\n        return None\n\n    def find_match(self, board, player, square, direction):  # checked\n        \"\"\"\n        Find a square that forms a match with `square` for `player` in the given\n        `direction`.  Returns None if no such square exists.\n        \"\"\"\n        position = square + direction\n        opp = self.opponent(player)\n        assert opp is not None\n        while position in range(11, 89) and board[position] is opp:\n            position = position + direction\n            if board[position] == player:\n                return position\n        return None\n\n    def is_move_valid(self, board, player, move):\n        \"\"\"Is this a legal move for the player?\"\"\"\n        pass\n\n    def replace_square(self, board, player, square):\n        return board[:square] + player + board[square + 1:]\n\n    def make_move(self, board, player, move):\n        \"\"\"Update the board to reflect the move by the specified player.\"\"\"\n        # returns a new board/string\n        for dir in DIRECTIONS:\n            match = self.find_match(board, player, move, dir)\n            if match is not None:\n                for x in range(move, match, dir):\n                    board = self.replace_square(board, player, x)\n\n        return board\n\n    def pieces_on_board(self, board, player):\n        return [x for x in range(11, 89) if board[x] is player]\n\n    def remove_corners(self, moves, corner):\n        if corner in moves and len(moves) > 1:\n            moves.remove(corner)\n        return moves\n\n    def get_valid_moves(self, board, player):  # checked\n        \"\"\"Get a list of all legal moves for player.\"\"\"\n        moves = []\n\n        for square in [x for x in range(11, 89) if board[x] is EMPTY]:\n            # if square==11: topleft= 1\n            # if square==19: topright= 2\n            # if square==81: bottomleft= 3\n            # if square==91: bottomright= 4\n            for dir in DIRECTIONS:\n                if board[square + dir] == self.opponent(player):\n                    match = self.find_match(board, player, square, dir)\n                    if match is not None and square not in moves:\n                        moves.append(square)\n        # squarecorners = {topleft: [12, 14], topright: [18, 29], bottomleft: [71, 82], bottomright: [88, 79]}\n\n        for c in [22, 28, 72, 77]:  # diagonal corners\n            self.remove_corners(moves, c)\n        # for x in [topleft, topright, bottomleft, bottomright]:\n        #     if type(x)==int:\n        #         for m in squarecorners[x]:\n        #             self.remove_corners(moves, m)\n        # for c in [12,14,18,29,71,82,88,79]:\n        #    self.remove_corners(moves,c)\n        return moves\n\n    def has_any_valid_moves(self, board, player):\n        return len(self.get_valid_moves(board, player)) > 0\n\n    def next_player(self, board, prev_player):\n        \"\"\"Which player should move next?  Returns None if no legal moves exist.\"\"\"\n        opp = self.opponent(prev_player)\n        if self.has_any_valid_moves(board, opp): return opp\n        if self.has_any_valid_moves(board, prev_player): return prev_player\n        return None\n\n    def score(self, board, player=BLACK):\n        \"\"\"Compute player's score (number of player's pieces minus opponent's).\"\"\"\n        return len(self.pieces_on_board(board, BLACK)) - len(self.pieces_on_board(board, WHITE))\n\n    def stability(self, board, move, player, status, prev_move,\n                  prev_player):  #rewards how many levels it stays the same color\n        pass\n\n\n    def mob_stability_weighted_score(self, board, player=BLACK, prev_move=None, prev_player=BLACK):\n        matrix = [\n            0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n            0, 120, -30, 20, 5, 5, 20, -30, 120, 0,\n            0, -30, -80, -5, -5, -5, -5, -80, -30, 0,\n            0, 20, -5, 15, 3, 3, 15, -5, 20, 0,\n            0, 5, -5, 3, 3, 3, 3, -5, 5, 0,\n            0, 5, -5, 3, 3, 3, 3, -5, 5, 0,\n            0, 20, -5, 15, 3, 3, 15, -5, 20, 0,\n            0, -30, -80, -5, -5, -5, -5, -80, -30, 0,\n            0, 120, -30, 20, 5, 5, 20, -30, 120, 0,\n            0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n        ]\n        score = 0\n        board_pieces = [x for x in range(11, 89) if board[x] is not EMPTY]\n        for corner in CORNERS:\n            for dir in CORNER_DIRECTIONS:\n                if board[corner] is BLACK:\n                    matrix[corner] = 10\n                    corner += dir\n        for b in board_pieces:\n            if board[b] is BLACK:\n                score += self.stability(board, b, BLACK)\n                score += matrix[b]\n            else:\n                score -= self.stability(board, b, WHITE)\n                score -= matrix[b]\n\n        return score\n\n    def game_over(self, board, player):\n        \"\"\"Return true if player and opponent have no valid moves\"\"\"\n        return self.next_player(board, player) is None\n\n    ### Monitoring players\n\n    class IllegalMoveError(Exception):\n        def __init__(self, player, move, board):\n            self.player = player\n            self.move = move\n            self.board = board\n\n        def __str__(self):\n            return '%s cannot move to square %d' % (PLAYERS[self.player], self.move)\n\n    ################ strategies #################\n    def alphabeta_search(self, node, player, alpha, beta, depth, prev_move=None):\n        best = {BLACK: max, WHITE: min}\n        board = node.board\n        if depth == 0:\n            node.score = self.mob_stability_weighted_score(board, prev_move)\n            return node\n        my_moves = self.get_valid_moves(board, player)\n        children = []\n\n        for corner in CORNERS:\n            if corner in my_moves:\n                next_board = self.make_move(board, player, corner)\n                next_player = self.next_player(next_board, player)\n                if next_player is None:  # is winning board\n                    c = Node(next_board, corner, s=1000 * self.score(next_board))\n                    children.append(c)\n                else:\n                    c = Node(next_board, corner)\n                    c.score = self.alphabeta_search(c, next_player, alpha, beta, depth=depth - 1).score\n                    children.append(c)\n        if len(children) > 0:\n            winner = best[player](children)\n            node.score = winner.score\n            return winner\n\n        for move in my_moves:\n            next_board = self.make_move(board, player, move)\n            next_player = self.next_player(next_board, player)\n            if next_player is None:  # is winning board\n                c = Node(next_board, move, s=1000 * self.score(next_board))\n                children.append(c)\n            else:\n                c = Node(next_board, move)\n                c.score = self.alphabeta_search(c, next_player, alpha, beta, depth=depth - 1).score\n                children.append(c)\n            if player is BLACK:\n                alpha = max(alpha, c.score)\n            if player is WHITE:\n                beta = min(beta, c.score)\n            if alpha >= beta:\n                break\n        winner = best[player](children)\n        node.score = winner.score\n        return winner\n\n    def alphabeta_strategy(self, board, player, depth=3):\n        # calls minmax_search\n        # feel free to adjust the parameters\n        # returns an integer move\n\n        move = self.alphabeta_search(Node(board), player, -1000000, 1000000, depth=1).move\n        return move\n\n    def random_strategy(self, board, player):\n        return random.choice(self.get_valid_moves(board, player))\n\n    def best_strategy(self, board, player, best_move, still_running):\n        ## THIS IS the public function you must implement\n        ## Run your best search in a loop and update best_move.value\n        depth = 1\n        while (True):\n            board = \"\".join(board)\n            best_move.value = self.alphabeta_strategy(board, player)\n            depth += 1\n\n    standard_strategy = alphabeta_strategy\n\n\n###############################################\n# The main game-playing code\n# You can probably run this without modification\n################################################\nimport time\nfrom multiprocessing import Value, Process\nimport os, signal\n\nsilent = False\n\n\n#################################################\n# StandardPlayer runs a single game\n# it calls Strategy.standard_strategy(board, player)\n#################################################\nclass StandardPlayer():\n    def __init__(self):\n        pass\n\n    def play(self):\n        ### create 2 opponent objects and one referee to play the game\n        ### these could all be from separate files\n        ref = Strategy()\n        black = Strategy()\n        white = Strategy()\n\n        print(\"Playing Standard Game\")\n        board = ref.get_starting_board()\n        player = BLACK\n        strategy = {BLACK: black.standard_strategy, WHITE: white.minmax_strategy}\n        print(ref.get_pretty_board(board))\n        tic = time.clock()\n        while player is not None:\n            move = strategy[player](board, player)\n            print(\"Player %s chooses %i\" % (player, move))\n            board = ref.make_move(board, player, move)\n            print(ref.get_pretty_board(board))\n            player = ref.next_player(board, player)\n        toc = time.clock()\n        print(\"Time %i\" % (toc - tic))\n        print(strategy)\n        print(\"Final Score %i.\" % ref.score(board), end=\" \")\n        print(\"%s wins\" % (\"Black\" if ref.score(board) > 0 else \"White\"))\n\n\n\n\nif __name__ == \"__main__\":\n    game = ParallelPlayer(1)\n    game = StandardPlayer()\n    # game.play()\n    s = Strategy()\n    b = s.get_starting_board()\n    b = b[:35] + BLACK + b[36:]\n    b = b[:45] + BLACK + b[46:]\n    b = b[:55] + WHITE + b[56:]\n    # s.print_pretty_board(b)\n    # rint(s.find_match(b, WHITE, 55, N))\n    sp = StandardPlayer()\n    sp.play()\n","sub_path":"PycharmProjects/Othello/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":12081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"30490789","text":"from heft.experiments.comparison_experiments.common.ExecutorRunner import ExecutorsFactory\nwf_name = \"Montage_250\"\ntsk_period = 10\nrepeat_count = 50\n\nsave_path = \"D:/wspace/heft/results/gaheft_ext_150/\"\ndef fnc(tsk):\n    return ExecutorsFactory.default().run_gaheftoldpop_executor(\n                                     reliability=0.95,\n                                     is_silent=True,\n                                     wf_name=wf_name,\n                                     logger=None,\n                                     key_for_save='small_run',\n                                     #task_id_to_fail=\"ID00005_000\",\n                                     task_id_to_fail=tsk,\n                                     fixed_interval_for_ga=6,\n                                     save_path=save_path,\n                                     ga_params={\n                                        \"population\": 30,\n                                        \"crossover_probability\": 0.8,\n                                        \"replacing_mutation_probability\": 0.5,\n                                        \"sweep_mutation_probability\": 0.4,\n                                        \"generations\": 100\n                                     },\n                                     check_evolution_for_stopping=False,\n                                     nodes_conf=[10, 15, 25, 30] + [10, 15, 25, 30])\n\n#==================================================\n# parallel run\n#==================================================\ntnum = int(wf_name.split(\"_\")[1])\ntasks_to_fail = [\"ID000{0}_000\".format(\"0\"+str(t) if t < 10 else str(t)) for t in range(0, tnum, tsk_period)]\n# to_exec = [t for i in range(repeat_count) for t in tasks_to_fail]\nto_exec = [\"ID00150_000\" for i in range(repeat_count)]\nif __name__ == \"__main__\":\n    # res = list(futures.map_as_completed(fnc, to_exec))\n    res = list(map(fnc, to_exec))\n    pass\n","sub_path":"heft/experiments/comparison_experiments/GaHeftOldPop.py","file_name":"GaHeftOldPop.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"170148063","text":"# coding=utf-8\n__author__ = 'Simon Zhang'\n__date__ = '2019/9/11 23:15'\n\n\n# 给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引。如果目标值不存在于数组中,返回它将会被按顺序插入的位置。\n#\n# 你可以假设数组中无重复元素。\n#\n# 示例 1:\n#\n# 输入: [1,3,5,6], 5\n# 输出: 2\n# 示例 2:\n#\n# 输入: [1,3,5,6], 2\n# 输出: 1\n# 示例 3:\n#\n# 输入: [1,3,5,6], 7\n# 输出: 4\n# 示例 4:\n#\n# 输入: [1,3,5,6], 0\n# 输出: 0\n\ndef searchInsert(nums, target):\n    for i, v in enumerate(nums):\n        # 找到大于target的元素, 则返回当前索引\n        if v >= target:\n            return i\n    # 说明没有任何元素大于target, 则插入到数组的末尾\n    return len(nums)\n\nprint(searchInsert([1,3,5,6], 5))\nprint(searchInsert([1,3,5,6], 2))\nprint(searchInsert([1,3,5,6], 7))\nprint(searchInsert([1,3,5,6], 0))\n","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"585071558","text":"'''\nAsynchronous DNS client\n'''\nimport asyncio\nimport os\nfrom .. import *\nfrom . import tcp, udp\nfrom ..cache import DNSMemCache\n__all__ = ['Resolver', 'ProxyResolver']\n\nA_TYPES = types.A, types.AAAA\n\nclass Resolver:\n    '''\n    Asynchronous DNS resolver.\n    '''\n    recursive = 1\n    rootdomains = ['.lan']\n\n    def __init__(self, protocol=UDP, cache=None):\n        self.futures = {}\n        if cache is None:\n            cache = DNSMemCache()\n        self.cache = cache\n        self.protocol = InternetProtocol.get(protocol)\n\n    async def query_cache(self, res, fqdn, qtype):\n        '''Returns a boolean whether a cache hit occurs.'''\n        # if cached CNAME\n        cname = list(self.cache.query(fqdn, types.CNAME))\n        if cname:\n            res.an.extend(cname)\n            if not self.recursive or qtype == types.CNAME:\n                return True\n            for rec in cname:\n                cres = await self.query(rec.data, qtype)\n                if cres is None or cres.r > 0:\n                    continue\n                res.an.extend(cres.an)\n                res.ns = cres.ns\n                res.ar = cres.ar\n            return True\n        # else\n        data = list(self.cache.query(fqdn, qtype))\n        cache_hit = False\n        if data:\n            for rec in data:\n                if rec.qtype in (types.NS,):\n                    nres = list(self.cache.query(rec.data, A_TYPES))\n                    if nres:\n                        res.ar.extend(nres)\n                        res.ns.append(rec)\n                        if rec.qtype == qtype:\n                            cache_hit = True\n                else:\n                    res.an.append(rec.copy(name=fqdn))\n                    if qtype == types.CNAME or rec.qtype != types.CNAME:\n                        cache_hit = True\n        if any(fqdn.endswith(root) for root in self.rootdomains):\n            if not cache_hit:\n                res.r = 3\n                cache_hit = True\n            # should only be added for domains that are resolved by this server\n            res.aa = 1  # Authoritative answer\n            res.ns.append(Record(name=fqdn, qtype=types.NS, data='localhost', ttl=-1))\n            res.ar.append(Record(name=fqdn, qtype=types.A, data='127.0.0.1', ttl=-1))\n        return cache_hit\n\n    def get_nameservers(self, fqdn):\n        '''Return a generator of parent domains'''\n        empty = True\n        while fqdn and empty:\n            _sub, _, fqdn = fqdn.partition('.')\n            for rec in self.cache.query(fqdn, types.NS):\n                host = rec.data\n                if address.Address(host, allow_domain=True).ip_type is None:\n                    # host is a hostname instead of IP address\n                    for res in self.cache.query(host, A_TYPES):\n                        yield address.Address(res.data, 53)\n                        empty = False\n                else:\n                    yield address.Address(host, 53)\n                    empty = False\n\n    async def request(self, qdata, addr, timeout=3.0, protocol=None):\n        '''Return response to a request.\n\n        Send DNS request data according to `protocol`.\n        '''\n        if protocol is None:\n            protocol = self.protocol\n        if protocol is TCP:\n            request = tcp.request\n        else:\n            request = udp.request\n        data = await request(qdata, addr, timeout)\n        return data\n\n    async def query_remote(self, res, fqdn, qtype):\n        '''Return a boolean indicating whether results are found.\n\n        No cache will be used and requests will sent to remote servers.\n        '''\n        if fqdn.endswith('.in-addr.arpa'):\n            # Reverse DNS lookup only occurs locally\n            return\n        # look up from other DNS servers\n        nameservers = address.NameServers(self.get_nameservers(fqdn))\n        cname = [fqdn]\n        req = DNSMessage.request()\n        has_result = False\n        while not has_result:\n            if not cname:\n                break\n            # seems that only one qd is supported by most NS\n            req.qd = [Record(REQUEST, cname[0], qtype)]\n            qdata = req.pack()\n            del cname[:]\n            qid = qdata[:2]\n            for addr in nameservers:\n                try:\n                    data = await self.request(qdata, addr)\n                    if not data.startswith(qid):\n                        raise DNSError(-1, 'Message id does not match!')\n                    cres = DNSMessage.parse(data)\n                    assert cres.r != 2\n                except (asyncio.TimeoutError, AssertionError):\n                    nameservers.fail(addr)\n                except DNSError:\n                    pass\n                else:\n                    break\n            else:\n                break\n            for rec in cres.an + cres.ns + cres.ar:\n                if rec.ttl > 0 and rec.qtype not in (types.SOA, types.MX):\n                    self.cache.add_host(rec)\n            for rec in cres.an:\n                res.an.append(rec)\n                if rec.qtype == types.CNAME:\n                    cname.append(rec.data)\n                if qtype == types.CNAME or rec.qtype != types.CNAME:\n                    has_result = True\n            for rec in cres.ns:\n                if not self.recursive:\n                    res.ns.append(rec)\n                    has_result = True\n                elif rec.qtype == types.SOA or qtype == types.NS:\n                    has_result = True\n            if not self.recursive:\n                res.ar.extend(cres.ar)\n            nameservers = address.NameServers(i.data for i in cres.ar if i.qtype in A_TYPES)\n            if not nameservers:\n                for ns_r in cres.ns:\n                    host = ns_r.data.mname if ns_r.qtype == types.SOA else ns_r.data\n                    try:\n                        ns_res = await self.query(host)\n                        assert ns_res\n                    except (AssertionError, asyncio.TimeoutError):\n                        pass\n                    except Exception as e:\n                        logger.error(host)\n                        logger.error(e)\n                    else:\n                        if ns_res:\n                            for ans in ns_res.an:\n                                if ans.qtype in A_TYPES:\n                                    nameservers.add(ans.data)\n            res.r = cres.r\n        return has_result\n\n    async def query(self, fqdn, qtype=types.ANY, timeout=3.0):\n        '''Return query result.\n\n        Cache queries for hostnames and types to avoid repeated requests at the same time.\n        '''\n        key = fqdn, qtype\n        future = self.futures.get(key)\n        if future is None:\n            loop = asyncio.get_event_loop()\n            future = self.futures[key] = loop.create_future()\n            asyncio.ensure_future(self.do_query(key))\n        try:\n            res = await asyncio.wait_for(future, timeout)\n        except (AssertionError, asyncio.TimeoutError, asyncio.CancelledError):\n            pass\n        else:\n            return res\n\n    async def do_query(self, key):\n        '''\n        Starts a query asynchronously, add the future object to cache.\n        '''\n        fqdn, qtype = key\n        res = DNSMessage(ra=self.recursive)\n        res.qd.append(Record(REQUEST, name=fqdn, qtype=qtype))\n        future = self.futures[key]\n        ret = (\n            await self.query_cache(res, fqdn, qtype)\n        ) or (\n            await self.query_remote(res, fqdn, qtype)\n        )\n        if not ret and not res.r:\n            res.r = 2\n        self.futures.pop(key)\n        if not future.cancelled():\n            future.set_result(res)\n\nclass ProxyResolver(Resolver):\n    '''Proxy DNS resolver.\n    Resolve hostnames from remote proxy servers instead of root servers.\n    '''\n    DEFAULT_NAMESERVERS = [\n        '114.114.114.114',\n        '180.76.76.76',\n        '223.5.5.5',\n        '223.6.6.6',\n    ]\n    proxies = address.NameServers(DEFAULT_NAMESERVERS)\n\n    def get_nameservers(self, fdqn):\n        return self.proxies or super().get_nameservers(fdqn)\n\n    def set_proxies(self, proxies):\n        '''Set proxy servers.'''\n        self.proxies = address.NameServers(proxies)\n","sub_path":"async_dns/resolver/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"601337516","text":"# coding: utf-8\nfrom pupa.scrape import Scraper\n\nfrom utils import csv_reader, CanadianLegislator as Legislator\n\nimport re\n\nCOUNCIL_PAGE = 'https://docs.google.com/spreadsheets/d/1wqBO6Ti1GduIhuYH_UTBkYbP_rOcANIpQ1FgmrEdB5A/export?gid=0&format=csv'\n\n\nKEYS = {\n  'District Name': 'post_id',\n  'Full Name': 'name',\n  'Party Name': 'party',\n  'Gender': 'gender',\n}\nIGNORE_KEYS = set((\n  'First Name',\n  'Last Name',\n  'Email Note',\n))\nCONTACT_TYPE_KEYS = {\n  'Telephone': 'voice',\n  'Fax': 'fax',\n  'Postal Address': 'address',\n}\nLINKS_KEYS = set((\n  \"Incumbent's Legislative URL\",\n  \"Personal URL\",\n  \"Campaign Website URL\",\n  \"Photo URL\",\n))\nEXTRA_KEYS = set((\n  'Incumbent?',\n  'Twitter',\n  'Facebook',\n  'Instagram',\n  'Flickr',\n))\nPARTY_MAP = {\n  'GRN': 'Green Party of Ontario',\n  'LIB': 'Ontario Liberal Party',\n  'NDP': 'New Democratic Party of Ontario',\n  'PC': 'Progressive Conservative Party of Ontario',\n}\n\nclass OntarioPersonScraper(Scraper):\n\n  def get_people(self):\n    reader = csv_reader(COUNCIL_PAGE, header=True)\n    for row in reader:\n      kwargs = {'role': 'candidate'}\n      email = None\n      links = []\n      extra = {}\n      offices = []\n\n      for k, v in row.items():\n        v = v.strip()\n        if not v:\n          continue\n\n        k = k.strip()\n        match = re.search(r'\\AOffice (\\d): ', k)\n        if match:\n          index = int(match.group(1))\n          while index > len(offices):\n            offices.append({})\n          if k[10:] == 'Type':\n            offices[index - 1]['note'] = v\n          elif k[10:] in CONTACT_TYPE_KEYS:\n            offices[index - 1][CONTACT_TYPE_KEYS[k[10:]]] = v\n          else:\n            raise Exception(k)\n        elif k == 'Party Name':\n          kwargs['party'] = PARTY_MAP[v]\n        elif k in KEYS:\n          kwargs[KEYS[k]] = v\n        elif k == 'Email':\n          email = v\n        elif k in LINKS_KEYS:\n          links.append({'url': v, 'note': k})\n        elif k in IGNORE_KEYS:\n          continue\n        elif k in EXTRA_KEYS:\n          extra[re.sub(r'[^a-z0-9_]', '', k.lower().replace(' ', '_'))] = v\n        else:\n          raise Exception(k)\n\n      contacts = []\n      for office in offices:\n        for _, type in CONTACT_TYPE_KEYS.items():\n          if office.get(type):\n            contacts.push({'note': office['note'], type: type, 'value': office[type]})\n\n      if 'name' in kwargs:\n        p = Legislator(**kwargs)\n        p.add_source(COUNCIL_PAGE)\n        if email:\n          p.add_contact('email', email, None)\n        for link in links:\n          p.add_link(**links)\n        for contact in contacts:\n          p.add_contact(**contact)\n        for k, v in extra.items():\n          p.add_extra(k, v)\n        yield p\n","sub_path":"ca_on_candidates/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"606547661","text":"import requests\nimport os\nimport threading\nimport m3u8\nimport shutil\nfrom M3u8 import M3u8\nfrom binascii import hexlify, unhexlify\nfrom utils import log, save\nfrom threading import Lock\nfrom Crypto.Cipher import AES\nfrom config import headers\nfrom hex import read_hex_file\n\n\ncompletion_number = 0\ntotal = 0\nlo = Lock()\nmaxthreads = 30\nsema = threading.Semaphore(value=maxthreads)\nfail_urls = []\n\n\ndef create_dir(url):\n    url_list = url.split('/')\n    dirname = url_list[-2]\n    path = dirname\n    if not os.path.exists(path):\n        os.makedirs(path)\n    return dirname\n\n\ndef read_key_and_iv(dirname):\n    m3u8_path = dirname + '/' + 'm3u8.m3u8'\n    m3u8_obj = m3u8.load(m3u8_path)\n\n    uri = ''\n    iv = ''\n    for key in m3u8_obj.keys:\n        if key:  # First one could be None\n            # print(key.uri)\n            # print(key.method)\n            # print(key.iv)\n            uri = key.uri\n            iv = key.iv\n    key_file = uri[0:-3] + '.key'\n    key_file = dirname + '/' + key_file\n\n    key_bytes = read_hex_file(key_file)\n    # [key, iv]\n    # log('str(key_bytes), iv[2:]', str(key_bytes), iv[2:])\n    return [key_bytes, iv[2:]]\n\n\ndef m3u8_decode(dirname, filename):\n    [key, iv] = read_key_and_iv(dirname)\n    log('len(key), len(iv)', len(key), len(iv))\n    new_filename = '0' + filename\n    cmd = f'openssl aes-128-cbc -d -in {filename} -out {new_filename} -nosalt -iv {iv[2:]} -K {key}'\n    cmd = f'cd {dirname} && {cmd} '\n    # log(cmd)\n    os.system(cmd)\n    os.remove(f'{dirname}/{filename}')\n\n\ndef file_list(dirname):\n    for root, dirs, files in os.walk(dirname):\n        # print(root)  # 当前目录路径\n        # print(dirs)  # 当前路径下所有子目录\n        return files\n\n\n# 判断文件是否已存在\ndef check_file(url, dirname):\n    filename = url.split('/')[-1]\n    files = file_list(dirname)\n    # log('filename', filename, filename in files)\n    return filename in files\n\n\ndef thread_download(urls, dirname):\n    threads = []\n    l = len(urls)\n    for i, url in enumerate(urls):\n        # 创建线程01,不指定参数\n        progress = [str(i), str(l)]\n        if check_file(url, dirname) == False:\n            t = threading.Thread(\n                target=download, args=(url, dirname, progress))\n            # 启动线程01\n            threads.append(t)\n            t.start()\n        else:\n            add_completion_number()\n    for t in threads:\n        t.join()\n\n\ndef download(url, dirname, progress):\n    try:\n        sema.acquire()\n        response = requests.get(url, headers=headers, timeout=10)\n        status_code = response.status_code\n        # log('status_code', status_code)\n        if status_code == 200:\n            filename = url.split('/')[-1]\n            decrypt_save(dirname, filename, response.content)\n            # m3u8_decode(dirname, filename)\n            add_completion_number()\n        else:\n            raise Exception(\"not 200\")\n\n        if url in fail_urls:\n            fail_urls.remove(url)\n        sema.release()\n    except Exception as e:\n        log(' fail_url', e, url)\n\n        fail_urls.append(url)\n\n\ndef merge_m3u8(dirname, filename, sava_path):\n    if not os.path.exists(sava_path):\n        os.makedirs(sava_path)\n    ts_filenames = []\n    # Parse playlist for filenames with ending .ts and put them into the list ts_filenames\n    m3u8 = f'{dirname}/m3u8.m3u8'\n    with open(m3u8, 'r') as playlist:\n        # ts_filenames = [line.rstrip() for line in playlist\n        #                 if line.rstrip().endswith('.ts')]\n        for line in playlist:\n            if line.rstrip().endswith('.ts'):\n                f = f'{dirname}/{line}'.rstrip()\n                ts_filenames.append(f)\n\n    # print('ts_filenames', len(ts_filenames), ts_filenames[0])\n    # open one ts_file from the list after another and append them to merged.ts\n    sava_path = f'{sava_path}/{filename}.ts'\n    print('merge m3u8 sava path', sava_path)\n    with open(sava_path, 'wb') as merged:\n        for ts_file in ts_filenames:\n            with open(ts_file, 'rb') as mergefile:\n                shutil.copyfileobj(mergefile, merged)\n\n\ndef add_completion_number():\n    global lo\n    with lo:\n        global completion_number\n        global total\n        # log('completion_number, total', completion_number, total)\n        completion_number += 1\n        percent = 'percent: {:.0%}'.format(completion_number / total)\n        log(\n            '\\r' + str(completion_number),\n            str(total),\n            percent,\n            end='',\n            flush=True)\n\n\ndef decrypt_save(dirname, filename, content):\n    [key, iv] = read_key_and_iv(dirname)\n    # key = unhexlify('c8a9ded8b41a7daa57e224968934f86f')\n    # iv = unhexlify('962ec00083ed2a46d7c1c8a8271157c3')\n    key = bytes.fromhex(key)\n    iv = bytes.fromhex(iv)\n\n    # log('len key', key, len(key))\n    # log('len iv', iv, len(iv))\n\n    decipher = AES.new(key, AES.MODE_CBC, iv)\n    pt = decipher.decrypt(content)\n    # decrypt method parameter needs to be a multiple of 16, if not, you need to add binary \"0\"\n    # while len(content) % 16 != 0:\n    #     content += b\"0\"\n    with open(f'{dirname}/{filename}', \"wb\") as file:\n        file.write(pt)\n        # print(\"save success:\" + filename)\n\n\ndef main(page_url):\n    dirname = create_dir(page_url)\n    filename = dirname\n\n    # log(filename)\n    log(dirname)\n    # 获取 m3u8 page_url 地址\n    m = M3u8(page_url, dirname)\n\n    # 下载m3u8文件\n    m.download_m3u8_file()\n\n    # 下载key文件\n    m.download_m3u8_key()\n\n    # 获取m3u8列表\n    urls = m.m3u8_url_list()\n    log('m3u8 number', len(urls))\n    global total\n    total = len(urls)\n\n    # 下载所有ts文件并解密保存\n    thread_download(urls, dirname)\n    if len(fail_urls) > 0:\n        thread_download(fail_urls, dirname)\n    fs = file_list(dirname)\n    log(f' {dirname} file count ', len(fs))\n    merge_m3u8(dirname, filename, 'videos')\n\n\nif __name__ == \"__main__\":\n    page_url = 'https://jable.tv/videos/vec-448/'\n    main(page_url)\n","sub_path":"jable.tv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"553100998","text":"import math,os, sys,random,time,turtle,operator\nfrom pythonds.graphs import Graph, Vertex\nfrom pythonds.basic import Queue\n\nos.chdir(\"C:\\\\Users\\\\LF\\\\.atom\\\\inter\")\n\n\n\ndef knightGraph(bdSize):\n    ktGraph=Graph()\n    for row in range(bdSize):\n        for col in range(bdSize):\n            nodeId=posToNodeId(row,col,bdSize)\n            newPosition=genLegalMoves(row,col,bdSize)\n            for e in newPosition:\n                nid=posToNodeId(e[0],e[1],bdSize)\n                ktGraph.addEdge(nodeId,nid)\n    return ktGraph\n\ndef posToNodeId(row,column,board_size):\n    return (row*board_size)+column\n\ndef genLegalMoves(x,y,bdSize):\n    newMoves=[]\n    moveOffsets=[(-1,-2),(-1,2),(-2,-1),(-2,1),\n                (1,-2), (1,2),(2,-1),(2,1)]\n    for i in moveOffsets:\n        newX=x+i[0]\n        newY=y+i[1]\n        if legalCoord(newX,bdSize) and legalCoord(newY,bdSize):\n            newMoves.append((newX,newY))\n    return newMoves\n\ndef legalCoord(x,bdSize):\n    if x>=0 and x dist): #첫번째를 max로 유지\n                q.append((k,dist))\n                q=collections.deque(sorted(q,reverse=True,key=lambda x:x[1]),maxlen=self.num)\n            \n        for idx,i in enumerate(q):\n            print(idx+1,'번째 가까운 강의',i[0])\n\n    def l2_dis(self,x,y):\n        return ((x-y)**2).sum()    \n\n    def cossim(self,x,y):\n        return 1/(((x*y).sum()/(x**2).sum()**0.5/(y**2).sum()**0.5))\n\nclass preprocessing:\n\n    def __init__(self,data,tokenizer,remove_pos,start_idx):\n        self.data=data\n        self.tokenizer=tokenizer\n        self.remove_pos=remove_pos\n        self.start_index=start_idx\n        self.k=re.compile('[ㄱ-ㅎ|ㅏ-ㅣ]')\n\n    def removing(self,sent):\n        \n        sample_sentence=list(map(lambda x:self.k.sub('',x),sent))\n        sample_sentence=list(map(lambda x:re.sub('\\n','',x),sample_sentence))\n        sample_sentence=list(map(lambda x:tokenizer.pos(x),sample_sentence))\n        token=sample_sentence\n    \n        for idx,i in enumerate(sample_sentence):\n            tokenized=list(filter(lambda x:x[1][0] not in remove_pos and x[1] not in remove_pos,i))\n            tokenized=list(filter(lambda x:x[0] not in ['것','수'],tokenized))\n            tokenized=list(map(lambda x:str(x),tokenized))\n            token[idx]=' '.join(tokenized)\n        return ' '.join(token)\n\n    def prepro(self):\n        \n        self.idexes=[]\n        def removed(x):\n                       \n            if len(x)!=0 and len(x)!=1:\n                \n                self.start_index+=1\n                return x\n            else:\n                self.idexes.append(self.start_index)\n                self.start_index+=1\n                return None\n\n        lecture_sentences=list(map(lambda x:ast.literal_eval(x),self.data))\n        \n        lecture_sentences=list(filter(lambda x:removed(x),lecture_sentences)) #강의평이 없가나 1개 밖에 없는 경우 날림\n        lecture_sentences=list(map(lambda x:self.removing(x),lecture_sentences))\n        return lecture_sentences,self.idexes\n\n\n\nif __name__==\"__main__\":\n\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument('--data_dir',default=r\"C:\\tensor_code\\kluebot\\data\\raw\\2018_2.csv\", help='datadir?',type=str)\n    parser.add_argument('--classtype',default=\"all\", help='type?',type=str)\n\n    args = parser.parse_args()\n\n    sem=args.data_dir.split('\\\\')[-1][:-4]\n\n    #komoran = Komoran() \n    #tokenizer=Mecab()\n    tokenizer=Okt()\n    print(tokenizer.pos(u'졸린데 수업 끝내주세요'))\n    print(tokenizer.nouns(u'모기 물렸다'))\n\n    df=pd.read_csv(args.data_dir)\n    \n    df_art=df[df.Classification==args.classtype]\n    if args.classtype=='all':\n        df_art=df\n    \n    df_start_idx=df_art.index.values[0]\n\n    if 'raw' in args.data_dir:\n        df_art['keyword_sent']=df_art['LectureEval']\n\n    #remove_pos=['J','E','X','SF','SE','SSO','SSC','SC','SY']\n    remove_pos=['Josa','Suffix','Foreign','Punctuation']\n\n    M=preprocessing(df_art['keyword_sent'].values,tokenizer,remove_pos,df_start_idx)\n    lecture_sentences,drop_idexes=M.prepro()\n    \n    \n\n    drop_list=df_art.loc[drop_idexes]\n    df_art.drop(drop_idexes,inplace=True,axis=0)\n\n\n    df_art['pos_sent']=lecture_sentences\n\n\n\n    df_art['doc_id']=list(zip(df_art['ProfessorName'],df_art['className']))\n\n    df_extract=pd.concat([df_art['doc_id'],df_art['pos_sent']],axis=1).reset_index(drop=True)\n    \n\n\n    tfidf,vocab=tf_idf(df_extract['pos_sent']).cal_tf()\n  \n    data=pd.DataFrame(tfidf)\n \n    df_new=pd.concat([df_extract.reset_index(drop=True),data.reset_index(drop=True)],axis=1,ignore_index=True)\n    df_new.to_csv('./lecture_vector_okt_'+sem+'.csv',index=False)\n\n    with open('./voacb_okt_'+sem+'.pickle','wb') as f:\n        pickle.dump(vocab,f)\n        \n    # with open('./lec_vec_2017_1','wb') as f:\n    #     pickle.dump(lec_vec,f)\n    idx=random.randint(1,df_new.shape[0])\n    x=df_new[df_new.columns[0]].iloc[idx]\n    print(x,'와 과제 관련 가장 가까운 강의들: ')\n    recommend(x,idx,5,'l2_dis',df_new).getit()\n    #print('\\n')\n    #print(\"코사인유사도:\" ,recommend(x,5,cossim))\n","sub_path":"recommendation/tfidf/.ipynb_checkpoints/apply_tfidf-checkpoint.py","file_name":"apply_tfidf-checkpoint.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"509418770","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport time\n\n# constants\nG = 1.0       # gravitational constant\ndt = 1.0e-2   # time interval for data update\n\n# Initial condition and Settings\n#################################\nALGORITHM = {\"KDK\": (1, \"KDK\"), \"DKD\": (2, \"DKD\"), \"RK4\": (3, \"RK4\")}\nuse_algorithm = ALGORITHM[\"RK4\"]  # TODO\nt = 0.0\n\n# Set up each particles initial conditions, (Mass, (x,y), (vx, vy))\n# particles = {\"0\": (2.0, (0, 0), (0, 0)),\n#              \"1\": (0.000000000001, (1.0, 0.0), (0.0, (G * 2.0 / ((1.0**2 + 0.0**2)**0.5))**0.5))}\nparticles = {\"0\": (1.0, (-0.97000436, 0.24308753), (0.4662036850, 0.4323657300)),\n             \"1\": (1.0, (0.0, 0.0), (-0.93240737, -0.86473146)),\n             \"2\": (1.0, (0.97000436, -0.24308753), (0.4662036850, 0.4323657300))}\nparticles = {\"0\": (1.0, (-0.97000436, 0.24308753), (0.4662036850, 0.4323657300)),\n             \"1\": (1.0, (0.0, 0.0), (-0.93240737, -0.86473146)),\n             \"2\": (1.0, (0.97000436, -0.24308753), (0.4662036850, 0.4323657300)),\n             \"3\": (0.5, (1.0, 1.0), (0.0, 0.0))}\n# particles = {\"0\": (1.0, (0.0, 0.0), (0.0, 0.0)),\n#              \"1\": (3.00e-6, (1.0, 0.0), (0.0, 1.0)),\n#              \"2\": (3.7e-8, (0.0, 2.56e-3), (0.01, 0.0))}\n# particles = {\"0\": (1.0, (1.0, 1.0), (0.0, 0.0)),\n#              \"1\": (1.0, (-1.0, 1.0), (0.0, 0.0)),\n#              \"2\": (1.0, (-1.0, -1.0), (0.0, 0.0)),\n#              \"3\": (1.0, (1.0, -1.0), (0.0, 0.0))}\nN = len(particles)\n\n# Assign them to array\nmass_arr = np.array([], dtype=np.float64)\nx_arr = np.array([], dtype=np.float64)\ny_arr = np.array([], dtype=np.float64)\nvx_arr = np.array([], dtype=np.float64)\nvy_arr = np.array([], dtype=np.float64)\n\nfor i in range(N):\n    mass_arr = np.append(mass_arr, particles[str(i)][0])\n    x_arr = np.append(x_arr, particles[str(i)][1][0])\n    y_arr = np.append(y_arr, particles[str(i)][1][1])\n    vx_arr = np.append(vx_arr, particles[str(i)][2][0])\n    vy_arr = np.append(vy_arr, particles[str(i)][2][1])\n\n# Calculate the Total Energy E0\nE0 = (0.5 * mass_arr * (vx_arr**2 + vy_arr**2)).sum()\nfor i in range(N):\n    for j in range(i + 1, N):\n        distance = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)\n        potential = - G * mass_arr[i] * mass_arr[j] / distance\n        E0 = E0 + potential\n\n# Algorithms\n###############################\n\n\ndef DKD(t_start, t_end):\n    global t, x_arr, y_arr, vx_arr, vy_arr\n\n    while t < t_end and t >= t_start:\n\n        # Drift\n        x_arr = x_arr + vx_arr * 0.5 * dt\n        y_arr = y_arr + vy_arr * 0.5 * dt\n\n        # Kicks\n        ax_arr = np.zeros(N, dtype=np.float64)\n        ay_arr = np.zeros(N, dtype=np.float64)\n        for i in range(N):\n            for j in range(N):\n                if i == j:\n                    continue\n                # j: source / i: self\n                distance = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)\n                a_abs = G * mass_arr[j] / distance**2\n                ax_arr[i] = ax_arr[i] + a_abs * (x_arr[j] - x_arr[i]) / distance\n                ay_arr[i] = ay_arr[i] + a_abs * (y_arr[j] - y_arr[i]) / distance\n        vx_arr = vx_arr + ax_arr * dt\n        vy_arr = vy_arr + ay_arr * dt\n\n        # Drift\n        x_arr = x_arr + vx_arr * 0.5 * dt\n        y_arr = y_arr + vy_arr * 0.5 * dt\n\n        t = t + dt\n\n\ndef KDK(t_start, t_end):\n    global t, x_arr, y_arr, vx_arr, vy_arr\n\n    while t < t_end and t >= t_start:\n\n        # Kicks\n        ax_arr = np.zeros(N, dtype=np.float64)\n        ay_arr = np.zeros(N, dtype=np.float64)\n        for i in range(N):\n            for j in range(N):\n                if i == j:\n                    continue\n                # j: source / i: self\n                distance = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)\n                a_abs = G * mass_arr[j] / distance**2\n                ax_arr[i] = ax_arr[i] + a_abs * (x_arr[j] - x_arr[i]) / distance\n                ay_arr[i] = ay_arr[i] + a_abs * (y_arr[j] - y_arr[i]) / distance\n        vx_arr = vx_arr + ax_arr * 0.5 * dt\n        vy_arr = vy_arr + ay_arr * 0.5 * dt\n\n        # Drift\n        x_arr = x_arr + vx_arr * dt\n        y_arr = y_arr + vy_arr * dt\n\n        # Kicks\n        ax_arr = np.zeros(N, dtype=np.float64)\n        ay_arr = np.zeros(N, dtype=np.float64)\n        for i in range(N):\n            for j in range(N):\n                if i == j:\n                    continue\n                # j: source / i: self\n                distance = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)\n                a_abs = G * mass_arr[j] / distance**2\n                ax_arr[i] = ax_arr[i] + a_abs * (x_arr[j] - x_arr[i]) / distance\n                ay_arr[i] = ay_arr[i] + a_abs * (y_arr[j] - y_arr[i]) / distance\n        vx_arr = vx_arr + ax_arr * 0.5 * dt\n        vy_arr = vy_arr + ay_arr * 0.5 * dt\n\n        t = t + dt\n\n\ndef RK4(t_start, t_end):\n    global t, x_arr, y_arr, vx_arr, vy_arr\n\n    t = t_start\n\n    while t < t_end and t >= t_start:\n\n        vx_arr_0 = np.copy(vx_arr)\n        vy_arr_0 = np.copy(vy_arr)\n        x_arr_0 = np.copy(x_arr)\n        y_arr_0 = np.copy(y_arr)\n\n        k1 = np.zeros((4, N, 2), dtype=np.float64)\n        k2 = np.zeros((4, N, 2), dtype=np.float64)\n\n        # Run through k[0], k[1], k[2]\n        for k in range(3):\n\n            # Update k for velocity\n            for i in range(N):\n                for j in range(N):\n                    if i == j:\n                        continue\n                    # j: source / i: self\n                    distance = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)\n                    a_abs = G * mass_arr[j] / distance**2\n                    k1[k, i, 0] = k1[k, i, 0] + a_abs * (x_arr[j] - x_arr[i]) / distance\n                    k1[k, i, 1] = k1[k, i, 1] + a_abs * (y_arr[j] - y_arr[i]) / distance\n\n            # Update k for coordinates\n            k2[k, :, 0] = vx_arr\n            k2[k, :, 1] = vy_arr\n\n            vx_arr = vx_arr_0 + 0.5 * dt * k1[k, :, 0]\n            vy_arr = vy_arr_0 + 0.5 * dt * k1[k, :, 1]\n            x_arr = x_arr_0 + 0.5 * dt * k2[k, :, 0]\n            y_arr = y_arr_0 + 0.5 * dt * k2[k, :, 1]\n\n        # Run k[3]\n        vx_arr = vx_arr_0 + dt * k1[2, :, 0]\n        vy_arr = vy_arr_0 + dt * k1[2, :, 1]\n        x_arr = x_arr_0 + dt * k2[2, :, 0]\n        y_arr = y_arr_0 + dt * k2[2, :, 1]\n        for i in range(N):\n            for j in range(N):\n                if i == j:\n                    continue\n                # j: source / i: self\n                distance = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)\n                a_abs = G * mass_arr[j] / distance**2\n                k1[3, i, 0] = k1[3, i, 0] + a_abs * (x_arr[j] - x_arr[i]) / distance\n                k1[3, i, 1] = k1[3, i, 1] + a_abs * (y_arr[j] - y_arr[i]) / distance\n\n        k2[3, :, 0] = vx_arr\n        k2[3, :, 1] = vy_arr\n\n        # Calculate velocity and coordinate through k\n        vx_arr = vx_arr_0 + (1.0 / 6.0) * dt * (k1[0, :, 0] + 2.0 * k1[1, :, 0] + 2.0 * k1[2, :, 0] + k1[3, :, 0])\n        vy_arr = vy_arr_0 + (1.0 / 6.0) * dt * (k1[0, :, 1] + 2.0 * k1[1, :, 1] + 2.0 * k1[2, :, 1] + k1[3, :, 1])\n        x_arr = x_arr_0 + (1.0 / 6.0) * dt * (k2[0, :, 0] + 2.0 * k2[1, :, 0] + 2.0 * k2[2, :, 0] + k2[3, :, 0])\n        y_arr = y_arr_0 + (1.0 / 6.0) * dt * (k2[0, :, 1] + 2.0 * k2[1, :, 1] + 2.0 * k2[2, :, 1] + k2[3, :, 1])\n\n        t = t + dt\n\n\n# Plottings\n# ##############################\n# plotting parameters\nperiod = 10.0\nend_time = 3.0 * period\nnstep_per_image = 10\nplot_points_num = 10\npadded_percent = 0.05\n\n# create figure\nfig, ax = plt.subplots(1, 2)\nfig.suptitle(\"%d-body Simulation with \" % (N) + use_algorithm[1], fontsize=16)\n\nax[0].set_xlim(-1.5, +1.5)\nax[0].set_ylim(-1.5, +1.5)\nball, = ax[0].plot([], [], 'ro', ms=10)\ntext = ax[0].text(0.0, 1.3, '', fontsize=12, color='black', ha='center', va='center')\nax[0].set_xlabel('x')\nax[0].set_ylabel('y')\nax[0].set_aspect('equal')\nax[0].tick_params(top=True, right=True, labeltop=True, labelright=True)\n# ax[0].add_artist(plt.Circle((0.0, 0.0), r, color='b', fill=False))\n\nerror_plot, = ax[1].plot([], [], 'b.-')\nax[1].set_title(\"Error=\" + r'$\\frac{E-E0}{E0}$', fontsize=14)\nax[1].set_ylim(-0.01, 0.01)\nax[1].set_xlim(0.0, dt * plot_points_num)\nax[1].set_xlabel('time')\n\nerror_array = np.array([])\ntime_array = np.array([])\n\ntotal_time = 0.0\n\n\ndef init():\n    ball.set_data([], [])\n    text.set(text='')\n    error_plot.set_data([], [])\n\n    return ball, text\n\n\ndef update_orbit(ii):\n    global t, x_arr, y_arr, vx_arr, vy_arr, error_array, time_array, total_time\n\n    for step in range(nstep_per_image):\n        if use_algorithm[0] == 1:\n            start_time = time.time()\n            KDK(t, t + dt)\n            end_time = time.time()\n        elif use_algorithm[0] == 2:\n            start_time = time.time()\n            DKD(t, t + dt)\n            end_time = time.time()\n        else:\n            start_time = time.time()\n            RK4(t, t + dt)\n            end_time = time.time()\n\n        total_time = total_time + (end_time - start_time)\n\n        if (t >= end_time):\n            break\n\n#   calculate energy error\n    E = (0.5 * mass_arr * (vx_arr**2 + vy_arr**2)).sum()\n    for i in range(N):\n        for j in range(i + 1, N):\n            distance = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)\n            potential = - G * mass_arr[i] * mass_arr[j] / distance\n            E = E + potential\n\n    err = (E - E0) / E0\n\n    error_array = np.append(error_array, err)\n    print(err)\n\n    time_array = np.append(time_array, t)\n\n#   update plot\n    ball.set_data(x_arr, y_arr)\n    text.set(text='t/T = %6.3f, error = %10.3e' % (t / period, err))\n    error_plot.set_data(time_array, error_array)\n\n#   plot error settings\n    padded = (np.max(error_array) - np.min(error_array)) * padded_percent\n    error_plot.axes.set_xlim(np.min(time_array), np.max(time_array))\n    error_plot.axes.set_ylim(np.min(error_array) - padded, np.max(error_array) + padded)\n\n#   plot simulation settings\n    # if (np.max(x_arr) - np.min(x_arr)) > (np.max(y_arr) - np.min(y_arr)):\n    #     padded = (np.max(x_arr) - np.min(x_arr)) * padded_percent\n    #     ball.axes.set_xlim(np.min(x_arr) - padded, np.max(x_arr) + padded)\n    #     ball.axes.set_ylim(y_arr.sum() - (0.5 * (np.max(x_arr) - np.min(x_arr)) + padded), y_arr.sum() + (0.5 * (np.max(x_arr) - np.min(x_arr)) + padded))\n    # else:\n    #     padded = (np.max(y_arr) - np.min(y_arr)) * padded_percent\n    #     ball.axes.set_ylim(np.min(y_arr) - padded, np.max(y_arr) + padded)\n    #     ball.axes.set_xlim(x_arr.sum() - (0.5 * (np.max(x_arr) - np.min(x_arr)) + padded), x_arr.sum() + (0.5 * (np.max(x_arr) - np.min(x_arr)) + padded))\n\n    return ball, text, error_plot\n\n\n# create movie\nnframe = int(np.ceil(end_time / (nstep_per_image * dt)))\nanim = animation.FuncAnimation(fig, func=update_orbit, init_func=init,\n                               frames=nframe, interval=10, repeat=False)\nplt.show()\n\n# Save the error as txt file\nnp.savetxt(\"./result/error_\" + use_algorithm[1] + \".txt\", error_array)\nnp.savetxt(\"./result/time.txt\", time_array)\n\nprint(\"time used in %s = %f\" % (use_algorithm[1], total_time))\n","sub_path":"Homework/hw5/N-body.py","file_name":"N-body.py","file_ext":"py","file_size_in_byte":11198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"199231288","text":"from __future__ import absolute_import, print_function\n\nimport json\nimport os\nimport logging\nimport gzip\nimport solr\nfrom elasticsearch import Elasticsearch, helpers\n\n\nfrom topik.utils import batch_concat\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n\ndef iter_document_json_stream(filename, field):\n    \"\"\"Iterate over a json stream of items and get the field that contains the text to process and tokenize.\n\n    Parameters\n    ----------\n    filename: string\n        The filename of the json stream.\n\n    field: string\n        The field name that contains the text that needs to be processed\n\n    $ head -n 2 ./topik/tests/data/test-data-1\n        {\"id\": 1, \"topic\": \"interstellar film review\", \"text\":\"'Interstellar' was incredible. The visuals, the score...\"}\n        {\"id\": 2, \"topic\": \"big data\", \"text\": \"Big Data are becoming a new technology focus both in science and in...\"}\n    >>> document = iter_document_json_stream('./topik/tests/test-data-1.json', \"text\")\n    >>> next(document)[1]\n    [u\"'Interstellar' was incredible. The visuals, the score, the acting, were all amazing. The plot is definitely one\n    of the most original I've seen in a while.\"]\n\n    \"\"\"\n    with open(filename, 'r') as f:\n        for n, line in enumerate(f):\n            try:\n                dictionary = json.loads(line)\n                content = dictionary.get(field)\n                id = \"%s/%s[%d]\" % (filename, field, n)\n                yield id, content\n            except ValueError:\n                logging.warning(\"Unable to process line: %s\" %\n                                str(line))\n\n\ndef iter_documents_folder(folder):\n    \"\"\"Iterate over the files in a folder to retrieve the content to process and tokenize.\n\n    Parameters\n    ----------\n    folder: string\n        The folder containing the files you want to analyze.\n\n    $ ls ./topik/tests/test-data-folder\n        doc1  doc2  doc3\n    >>> doc_text = iter_documents_folder('./topik/tests/test-data-1.json')\n    >>> fullpath, content = next(doc_text)\n    >>> content\n    [u\"'Interstellar' was incredible. The visuals, the score, the acting, were all amazing. The plot is definitely one\n    of the most original I've seen in a while.\"]\n\n    \"\"\"\n    for directory, subdirectories, files in os.walk(folder):\n        for file in files:\n            _open = gzip.open if file.endswith('.gz') else open\n            try:\n                fullpath = os.path.join(directory, file)\n                with _open(fullpath, 'rb') as f:\n                    yield fullpath, f.read().decode('utf-8')\n            except (ValueError, UnicodeDecodeError) as err:\n                logging.warning(\"Unable to process file: %s\" % fullpath)\n\n\ndef iter_large_json(json_file, prefix_value, event_value):\n    import ijson\n\n    parser = ijson.parse(open(json_file))\n\n    for prefix, event, value in parser:\n        # For Flowdock data ('item.content', 'string')\n        if (prefix, event) == (prefix_value, event_value):\n            yield \"%s/%s\" % (prefix, event), value\n\n\ndef iter_solr_query(solr_instance, field, query=\"*:*\"):\n    s = solr.SolrConnection(solr_instance)\n    response = s.query(query)\n    return batch_concat(response, field,  content_in_list=False)\n\n\ndef iter_elastic_query(instance, index, field, subfield=None):\n    es = Elasticsearch(instance)\n\n    # initial search\n    resp = es.search(index, body={\"query\": {\"match_all\": {}}}, scroll='5m')\n\n    scroll_id = resp.get('_scroll_id')\n    if scroll_id is None:\n        return\n\n    first_run = True\n    while True:\n        for hit in resp['hits']['hits']:\n            s = hit['_source']\n            try:\n                if subfield is not None:\n                    yield \"%s/%s\" % (field, subfield), s[field][subfield]\n                else:\n                    yield field, s[field]\n            except ValueError:\n                    logging.warning(\"Unable to process row: %s\" %\n                                    str(hit))\n\n        scroll_id = resp.get('_scroll_id')\n        # end of scroll\n        if scroll_id is None or not resp['hits']['hits']:\n            break\n","sub_path":"topik/readers.py","file_name":"readers.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"636806309","text":"from greenlet import greenlet\nimport time\n\n\ndef work1():\n    for i in range(10):\n        print('work1.....')\n        gr2.switch(10,'ab')\n        time.sleep(0.1)\n\n\ndef work2(num,ab):\n    for i in range(num):\n        print('work2.....%s' % ab)\n        gr1.switch()\n        time.sleep(0.1)\n\n\nif __name__ == '__main__':\n    gr1 = greenlet(work1)\n    gr2 = greenlet(work2)\n    gr1.switch()\n","sub_path":"test/my_greenlet.py","file_name":"my_greenlet.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"185459495","text":"from tkinter import *\nfrom tkinter.simpledialog import *\nfrom tkinter.filedialog import *\nimport math\nimport os\nimport os.path\nfrom PIL import Image, ImageFilter, ImageEnhance, ImageOps\nimport colorsys\nimport time\nimport pymysql\n# 파일을 선택해서 메모리로 로딩하는 함수\n\n###################################################################\n# 전역변수 설정\n###################################################################\nIP_ADDR = '192.168.56.109'; USER_NAME = 'root'; USER_PASS = '1234'\nDB_NAME = 'BigData_DB'; CHAR_SET = 'utf8'\n\n\n\n####################\n# 메모리를 할당해서 리스트(참조)를 반환하는 함수\ndef malloc(h, w, initValue=0) :\n    retMemory= []\n    for _ in range(h) :\n        tmpList = []\n        for _ in range(w) :\n            tmpList.append(initValue)\n        retMemory.append(tmpList)\n    return retMemory\n\n\n# 파일을 메모리로 로딩하는 함수\ndef loadImageColor(fname) :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    global photo\n    inImage = []\n    photo = Image.open(fname) # PIL 객체\n    inW = photo.width; inH=photo.height\n    ## 메모리 확보\n    for _ in range(3) :\n        inImage.append(malloc(inH, inW))\n\n    photoRGB = photo.convert('RGB')\n    for i in range(inH) :\n        for k in range(inW) :\n            r, g, b = photoRGB.getpixel((k,i))\n            inImage[R][i][k] = r\n            inImage[G][i][k] = g\n            inImage[B][i][k] = b\n\ndef openImageColor() :\n    global window, canvas, paper, filename, inImage, outImage,inH, inW, outH, outW\n    filename = askopenfilename(parent=window,\n                filetypes=((\"칼라 파일\", \"*.jpg;*.png;*.bmp;*.tif\"), (\"모든 파일\", \"*.*\")))\n    if filename == '' or filename == None :\n        return\n    loadImageColor(filename)\n    equalImageColor()\n\n    displayImageColor()\n\ndef displayImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    if canvas != None : # 예전에 실행한 적이 있다.\n        canvas.destroy()\n    VIEW_X = outW;    VIEW_Y = outH;   step = 1\n\n    window.geometry(str(int(VIEW_X*1.2)) + 'x' + str(int(VIEW_Y*1.2)))  # 벽\n    canvas = Canvas(window, height=VIEW_Y, width=VIEW_X)\n    paper = PhotoImage(height=VIEW_Y, width=VIEW_X)\n    canvas.create_image((VIEW_X // 2, VIEW_Y // 2), image=paper, state='normal')\n\n    import numpy\n    rgbStr = '' # 전체 픽셀의 문자열을 저장\n    for i in numpy.arange(0,outH, step) :\n        tmpStr = ''\n        for k in numpy.arange(0,outW, step) :\n            i = int(i); k = int(k)\n            r , g, b = outImage[R][i][k], outImage[G][i][k], outImage[B][i][k]\n            tmpStr += ' #%02x%02x%02x' % (r,g,b)\n        rgbStr += '{' + tmpStr + '} '\n    paper.put(rgbStr)\n\n    canvas.pack(expand=1, anchor=CENTER)\n    canvas.bind('', mouseClickColor)\n    canvas.bind('', mouseDropColor)\n    status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))\n\ndef saveImagePIL():\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    # if outImage == None :\n    #     return\n    # saveFp = asksaveasfile(parent=window, mode='wb',\n    #                        defaultextension='*.jpg', filetypes=((\"JPG 파일\", \"*.jpg\"), (\"모든 파일\", \"*.*\")))\n    # if saveFp == '' or saveFp == None:\n    #     return\n    # outImage.save(saveFp.name)\n    # print('Save~')\n\n###############################################\n##### 컴퓨터 비전(영상처리) 알고리즘 함수 모음 #####\n###############################################\n# 동일영상 알고리즘\ndef  equalImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ## 메모리 확보\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ############################\n    ### 진짜 컴퓨터 비전 알고리즘 ###\n    for RGB in range(3) :\n        for i in range(inH) :\n            for k in range(inW) :\n                outImage[RGB][i][k] = inImage[RGB][i][k]\n    #############################\n    displayImageColor()\n\n\ndef addImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ## 메모리 확보\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ############################\n    ### 진짜 컴퓨터 비전 알고리즘 ###\n    value = askinteger(\"밝게/어둡게\", \"값-->\", minvalue=-255, maxvalue=255)\n    for RGB in range(3) :\n        for i in range(inH) :\n            for k in range(inW) :\n                if inImage[RGB][i][k] + value > 255 :\n                    outImage[RGB][i][k] = 255\n                elif inImage[RGB][i][k] + value < 0 :\n                    outImage[RGB][i][k] = 0\n                else :\n                    outImage[RGB][i][k] = inImage[RGB][i][k] + value\n    #############################\n    displayImageColor()\n\ndef revImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;\n    outW = inW;\n    ## 메모리 확보\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ############################\n    ### 진짜 컴퓨터 비전 알고리즘 ###\n    for RGB in range(3):\n        for i in range(inH):\n            for k in range(inW):\n                outImage[RGB][i][k] = 255 - inImage[RGB][i][k]\n    #############################\n    displayImageColor()\n\n\n\n# 이진화 알고리즘\ndef  bwImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    ## 영상의 평균 구하기.\n    sum = []\n    for RGB in range(3):\n        sum.append(0)\n        for i in range(inH) :\n            for k in range(inW) :\n                sum[RGB] += inImage[RGB][i][k]\n    avg = [s // (inW * inH) for s in sum]\n\n    for RGB in range(3):\n        for i in range(inH) :\n            for k in range(inW) :\n                if inImage[RGB][i][k] > avg[RGB] :\n                    outImage[RGB][i][k] = 255\n                else :\n                    outImage[RGB][i][k] = 0\n\n    displayImageColor()\n\n\ndef paraImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;\n    outW = inW;\n    ## 메모리 확보\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ############################\n    ### 진짜 컴퓨터 비전 알고리즘 ###\\\n    LUT = [0 for _ in range(256)]\n    for input in range(256):\n        LUT[input] = int(255 - 255 * math.pow(input / 128 - 1, 2))\n\n    for RGB in range(3):\n        for i in range(inH):\n            for k in range(inW):\n                outImage[RGB][i][k] = LUT[inImage[RGB][i][k]]\n    #############################\n    displayImageColor()\n\ndef morphImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;\n    outW = inW;\n    ## 추가 영상 선택\n    filename2 = askopenfilename(parent=window,\n                               filetypes=((\"칼라 파일\", \"*.jpg;*.png;*.bmp;*.tif\"), (\"모든 파일\", \"*.*\")))\n    if filename2 == '' or filename2 == None:\n        return\n    inImage2 = []\n    photo2 = Image.open(filename2) # PIL 객체\n    inW2 = photo2.width; inH2=photo2.height\n    ## 메모리 확보\n    for _ in range(3) :\n        inImage2.append(malloc(inH2, inW2))\n\n    photoRGB2 = photo2.convert('RGB')\n    for i in range(inH2) :\n        for k in range(inW2) :\n            r, g, b = photoRGB2.getpixel((k,i))\n            inImage2[R][i][k] = r\n            inImage2[G][i][k] = g\n            inImage2[B][i][k] = b\n\n    ## 메모리 확보\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n\n    import threading\n    import time\n    def morpFunc():\n        w1 = 1;\n        w2 = 0\n        for _ in range(20):\n            for RGB in range(3) :\n                for i in range(inH):\n                    for k in range(inW):\n                        newValue = int(inImage[RGB][i][k] * w1 + inImage2[RGB][i][k] * w2)\n                        if newValue > 255:\n                            newValue = 255\n                        elif newValue < 0:\n                            newValue = 0\n                        outImage[RGB][i][k] = newValue\n            displayImageColor()\n            w1 -= 0.05;\n            w2 += 0.05\n            time.sleep(0.5)\n\n    threading.Thread(target=morpFunc).start()\n\n\n# 상하반전 알고리즘\ndef  upDownImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = [];\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    for RGB in range(3) :\n        for i in range(inH) :\n            for k in range(inW) :\n                outImage[RGB][inH-i-1][k] = inImage[RGB][i][k]\n\n    displayImageColor()\n\n# 영상 축소 알고리즘\ndef  zoomOutImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    scale = askinteger(\"축소\", \"값-->\", minvalue=2, maxvalue=16)\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH//scale;  outW = inW//scale;\n    ###### 메모리 할당 ################\n    outImage = [];\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    for RGB in range(3) :\n        for i in range(outH) :\n            for k in range(outW) :\n                outImage[RGB][i][k] = inImage[RGB][i*scale][k*scale]\n\n    displayImageColor()\n\n# 영상 확대 알고리즘\ndef  zoomInImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    scale = askinteger(\"확대\", \"값-->\", minvalue=2, maxvalue=8)\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH*scale;  outW = inW*scale;\n    ###### 메모리 할당 ################\n    outImage = [];\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    for RGB in range(3) :\n        for i in range(outH) :\n            for k in range(outW) :\n                outImage[RGB][i][k] = inImage[RGB][i//scale][k//scale]\n\n    displayImageColor()\n\n\n\n\n# 영상 축소 알고리즘 (평균변환)\ndef  zoomOutImage2Color() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    scale = askinteger(\"축소\", \"값-->\", minvalue=2, maxvalue=16)\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH//scale;  outW = inW//scale;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    for RGB in range(3):\n        for i in range(inH) :\n            for k in range(inW) :\n                outImage[RGB][i//scale][k//scale] += inImage[RGB][i][k]\n        for i in range(outH):\n            for k in range(outW):\n                outImage[RGB][i][k] //= (scale*scale)\n\n    displayImageColor()\n\n\n# 영상 확대 알고리즘 (양선형 보간)\ndef  zoomInImage2Color() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    scale = askinteger(\"확대\", \"값-->\", minvalue=2, maxvalue=8)\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH*scale;  outW = inW*scale;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    rH, rW, iH, iW = [0] * 4 # 실수위치 및 정수위치\n    x, y = 0, 0 # 실수와 정수의 차이값\n    C1,C2,C3,C4 = [0] * 4 # 결정할 위치(N)의 상하좌우 픽셀\n    for RGB in range(3):\n        for i in range(outH) :\n            for k in range(outW) :\n                rH = i / scale ; rW = k / scale\n                iH = int(rH) ;  iW = int(rW)\n                x = rW - iW; y = rH - iH\n                if 0 <= iH < inH-1 and 0 <= iW < inW-1 :\n                    C1 = inImage[RGB][iH][iW]\n                    C2 = inImage[RGB][iH][iW+1]\n                    C3 = inImage[RGB][iH+1][iW+1]\n                    C4 = inImage[RGB][iH+1][iW]\n                    newValue = C1*(1-y)*(1-x) + C2*(1-y)* x+ C3*y*x + C4*y*(1-x)\n                    outImage[RGB][i][k] = int(newValue)\n\n    displayImageColor()\n\n\n\n# 영상 회전 알고리즘\ndef  rotateImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    angle = askinteger(\"회전\", \"값-->\", minvalue=1, maxvalue=360)\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    radian = angle * math.pi / 180\n    for RGB in range(3):\n        for i in range(inH) :\n            for k in range(inW) :\n                xs = i ; ys = k;\n                xd = int(math.cos(radian) * xs - math.sin(radian) * ys)\n                yd = int(math.sin(radian) * xs + math.cos(radian) * ys)\n                if 0<= xd < inH and 0 <= yd < inW :\n                    outImage[RGB][xd][yd] = inImage[RGB][i][k]\n\n    displayImageColor()\n\n\n\n# 영상 회전 알고리즘 - 중심, 역방향\ndef  rotateImage2Color() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    angle = askinteger(\"회전\", \"값-->\", minvalue=1, maxvalue=360)\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    radian = angle * math.pi / 180\n    cx = inW//2; cy = inH//2\n    for RGB in range(3):\n        for i in range(outH) :\n            for k in range(outW) :\n                xs = i ; ys = k;\n                xd = int(math.cos(radian) * (xs-cx) - math.sin(radian) * (ys-cy)) + cx\n                yd = int(math.sin(radian) * (xs-cx) + math.cos(radian) * (ys-cy)) + cy\n                if 0<= xd < outH and 0 <= yd < outW :\n                    outImage[RGB][xs][ys] = inImage[RGB][xd][yd]\n                else :\n                    outImage[RGB][xs][ys] = 255\n\n    displayImageColor()\n\n\n## 엠보싱 처리\ndef  embossImageRGB():\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    MSIZE = 3\n    mask = [ [-1, 0, 0],\n             [ 0, 0, 0],\n             [ 0, 0, 1] ]\n    ## 임시 입력영상 메모리 확보\n    tmpInImage = []\n    tmpOutImage = []\n    for _ in range(3):\n        tmpInImage.append(malloc(inH + MSIZE - 1, inW + MSIZE - 1, 127))\n        tmpOutImage.append(malloc(outH, outW))\n    ## 원 입력 --> 임시 입력\n    for RGB in range(3):\n        for i in range(inH) :\n            for k in range(inW) :\n                tmpInImage[RGB][i+MSIZE//2][k+MSIZE//2] = inImage[RGB][i][k]\n        ## 회선연산\n        for i in range(MSIZE//2, inH + MSIZE//2) :\n            for k in range(MSIZE//2, inW + MSIZE//2) :\n                # 각 점을 처리.\n                S = 0.0\n                for m in range(0, MSIZE) :\n                    for n in range(0, MSIZE) :\n                        S += mask[m][n]*tmpInImage[RGB][i+m-MSIZE//2][k+n-MSIZE//2]\n                tmpOutImage[RGB][i-MSIZE//2][k-MSIZE//2] = S\n        ## 127 더하기 (선택)\n        for i in range(outH) :\n            for k in range(outW) :\n                tmpOutImage[RGB][i][k] += 127\n        ## 임시 출력 --> 원 출력\n        for i in range(outH):\n            for k in range(outW):\n                value = tmpOutImage[RGB][i][k]\n                if value > 255 :\n                    value = 255\n                elif value < 0 :\n                    value = 0\n                outImage[RGB][i][k] = int(value)\n\n    displayImageColor()\n\n\n# 히스토그램\nimport matplotlib.pyplot as plt\ndef  histoImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    inCountList = [[0] * 256 for _ in range(3)]\n    outCountList = [[0] * 256 for _ in range(3)]\n\n    for RGB in range(3):\n        for i in range(inH) :\n            for k in range(inW) :\n                inCountList[RGB][inImage[RGB][i][k]] += 1\n        for i in range(outH) :\n            for k in range(outW) :\n                outCountList[RGB][outImage[RGB][i][k]] += 1\n\n    plt.plot(inCountList[R], \"r-\")\n    plt.plot(inCountList[G], \"g-\")\n    plt.plot(inCountList[B], \"b-\")\n    plt.legend([\"R\", \"G\", \"B\"])\n    plt.show()\n\n\ndef  histoImage2Color() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    outCountList = [[0] * 256 for _ in range(3)]\n    normalCountList = [[0] * 256 for _ in range(3)]\n    # 빈도수 계산\n    for RGB in range(3):\n        for i in range(outH) :\n            for k in range(outW) :\n                outCountList[RGB][outImage[RGB][i][k]] += 1\n        maxVal = max(outCountList[RGB]); minVal = min(outCountList[RGB])\n        High = 256\n        # 정규화 = (카운트값 - 최소값) * High / (최대값 - 최소값)\n        for i in range(len(outCountList[RGB])) :\n            normalCountList[RGB][i] = (outCountList[RGB][i] - minVal) * High  / (maxVal-minVal)\n\n    ## 서브 윈도창 생성 후 출력\n    subWindow = Toplevel(window)\n    subWindow.geometry('%dx%d' % (256*3, 256))\n    subCanvas = Canvas(subWindow, width=256*3, height=256)\n    subPaper = PhotoImage(width=256*3, height=256)\n    subCanvas.create_image((256*3 // 2, 256 // 2), image=subPaper, state='normal')\n    for RGB in range(3):\n        for i in range(len(normalCountList[RGB])) :\n            for k in range(int(normalCountList[RGB][i])) :\n                # data= 0\n                # dataWhite = 255\n                if RGB == R:\n                    subPaper.put('#d62719', (256*RGB + i, 255-k))\n                elif RGB == G:\n                    subPaper.put('#4fc34e', (256*RGB + i, 255-k))\n                elif RGB == B:\n                    subPaper.put('#1948b4', (256*RGB + i, 255-k))\n    subCanvas.pack(expand=1, anchor=CENTER)\n    subWindow.mainloop()\n\n\n\n# 스트레칭 알고리즘\ndef  stretchImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    for RGB in range(3):\n        maxVal = minVal = inImage[RGB][0][0]\n        for i in range(inH) :\n            for k in range(inW) :\n                if inImage[RGB][i][k] < minVal :\n                    minVal = inImage[RGB][i][k]\n                elif inImage[RGB][i][k] > maxVal :\n                    maxVal = inImage[RGB][i][k]\n        for i in range(inH) :\n            for k in range(inW) :\n                outImage[RGB][i][k] = int(((inImage[RGB][i][k] - minVal) / (maxVal - minVal)) * 255)\n\n    displayImageColor()\n\n\n\n# 스트레칭 알고리즘\ndef  endinImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    minAdd = askinteger(\"최소\", \"최소에서추가-->\", minvalue=0, maxvalue=255)\n    maxAdd = askinteger(\"최대\", \"최대에서감소-->\", minvalue=0, maxvalue=255)\n    for RGB in range(3):\n        maxVal = minVal = inImage[RGB][0][0]\n        for i in range(inH) :\n            for k in range(inW) :\n                if inImage[RGB][i][k] < minVal :\n                    minVal = inImage[RGB][i][k]\n                elif inImage[RGB][i][k] > maxVal :\n                    maxVal = inImage[RGB][i][k]\n\n        minVal += minAdd\n        maxVal -= maxAdd\n\n        for i in range(inH) :\n            for k in range(inW) :\n                value = int(((inImage[RGB][i][k] - minVal) / (maxVal - minVal)) * 255)\n                if value < 0 :\n                    value = 0\n                elif value > 255 :\n                    value = 255\n                outImage[RGB][i][k] = value\n\n    displayImageColor()\n\n\n\n# 평활화 알고리즘\ndef  equalizeImageColor() :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    for RGB in range(3):\n        histo = [0] * 256; sumHisto = [0]*256; normalHisto = [0] * 256\n        ## 히스토그램\n        for i in range(inH) :\n            for k in range(inW) :\n                histo[inImage[RGB][i][k]] += 1\n        ## 누적히스토그램\n        sValue = 0\n        for i in range(len(histo)) :\n            sValue += histo[i]\n            sumHisto[i] = sValue\n        ## 정규화 누적 히스토그램\n        for i in range(len(sumHisto)):\n            normalHisto[i] = int(sumHisto[i] / (inW*inH) * 255)\n        ## 영상처리\n        for i in range(inH) :\n            for k in range(inW) :\n                outImage[RGB][i][k] = normalHisto[inImage[RGB][i][k]]\n    displayImageColor()\n\n\n# 화면이동 알고리즘\ndef moveImageColor() :\n    global panYN\n    panYN = True\n    canvas.configure(cursor='mouse')\n\n\ndef mouseClickColor(event) :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    global sx,sy,ex,ey, panYN\n    if panYN == False :\n        return\n    sx = event.x; sy = event.y\n\n\ndef mouseDropColor(event) :\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    global sx, sy, ex, ey, panYN\n    if panYN == False :\n        return\n    ex = event.x;    ey = event.y\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    mx = sx - ex; my = sy - ey\n    for RGB in range(3):\n        for i in range(inH) :\n            for k in range(inW) :\n                if  0 <= i-my < outW and 0 <= k-mx < outH :\n                    outImage[RGB][i-my][k-mx] = inImage[RGB][i][k]\n    panYN = False\n    displayImageColor()\n\n\n#\n# ## 임시 경로에 outImage를 저장하기.\n# import random\n# import struct\n# def saveTempImage() :\n#     global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n#     import tempfile\n#     saveFp = tempfile.gettempdir() + \"/\" + str(random.randint(10000, 99999)) + \".raw\"\n#     if saveFp == '' or saveFp == None :\n#         return\n#     print(saveFp)\n#     saveFp = open(saveFp, mode='wb')\n#     for i in range(outH) :\n#         for k in range(outW) :\n#             for RGB in range(3):\n#                 saveFp.write(struct.pack('B', outImage[RGB][i][k]))\n#     saveFp.close()\n#     return saveFp\n#\n#\n# def findStatColor(fname) :\n#     # 파일 열고, 읽기.\n#     fsize = os.path.getsize(fname) # 파일의 크기(바이트)\n#     inH = inW = int(math.sqrt(fsize)) # 핵심 코드\n#     ## 입력영상 메모리 확보 ##\n#     inImage = []\n#     outImage = []\n#     avg = []\n#     maxVal = []\n#     minVal = []\n#     for _ in range(3):\n#         inImage.append(malloc(inH, inH))\n#     # 파일 --> 메모리\n#     with open(fname, 'rb') as rFp:\n#         for i in range(inH) :\n#             for k in range(inW) :\n#                 for RGB in range(3):\n#                     inImage[RGB][i][k] = int(ord(rFp.read(1)))\n#     for RGB in range(3):\n#         sum = 0\n#         for i in range(inH) :\n#             for k in range(inW) :\n#                 sum += inImage[RGB][i][k]\n#         avg.append(sum // (inW * inH))\n#         maxVal.append(inImage[RGB][0][0])\n#         minVal.append(inImage[RGB][0][0])\n#         for i in range(inH):\n#             for k in range(inW):\n#                 if inImage[RGB][i][k] < minVal[RGB]:\n#                     minVal[RGB] = inImage[RGB][i][k]\n#                 elif inImage[RGB][i][k] > maxVal[RGB]:\n#                     maxVal[RGB] = inImage[RGB][i][k]\n#     return avg, maxVal, minVal\n#\n#\n# def saveMysqlColor() :\n#     global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n#     con = pymysql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS,\n#                           db=DB_NAME, charset=CHAR_SET)\n#     cur = con.cursor()\n#\n#     try:\n#         sql = '''\n#                 CREATE TABLE rawImage_TBL (\n#                 raw_id INT AUTO_INCREMENT PRIMARY KEY,\n#                 raw_fname VARCHAR(30),\n#                 raw_extname CHAR(5),\n#                 raw_height SMALLINT, raw_width SMALLINT,\n#                 raw_avgR  TINYINT UNSIGNED ,\n#                 raw_avgG  TINYINT UNSIGNED ,\n#                 raw_avgB  TINYINT UNSIGNED ,\n#                 raw_maxR  TINYINT UNSIGNED,\n#                 raw_maxG  TINYINT UNSIGNED,\n#                 raw_maxB  TINYINT UNSIGNED,\n#                 raw_minR  TINYINT UNSIGNED,\n#                 raw_minG  TINYINT UNSIGNED,\n#                 raw_minB  TINYINT UNSIGNED,\n#                 raw_dataR LONGBLOB,\n#                 raw_dataG LONGBLOB,\n#                 raw_dataB LONGBLOB);\n#             '''\n#         cur.execute(sql)\n#     except:\n#         pass\n#\n#     ## outImage를 임시 폴더에 저장하고, 이걸 fullname으로 전달.\n#     fullname = saveTempImage()\n#     fullname = fullname.name\n#     with open(fullname, 'rb') as rfp:\n#         binData = rfp.read()\n#     for i in range(0, len(binData), 3):\n#         binDataR = binData[i+R]\n#         binDataG = binData[i+G]\n#         binDataB = binData[i+B]\n#\n#     fname, extname = os.path.basename(fullname).split(\".\")\n#     fsize = os.path.getsize(fullname)\n#     height = width = int(math.sqrt(fsize))\n#     avgVal, maxVal, minVal = findStatColor(fullname)  # 평균,최대,최소\n#     avgValR, avgValG, avgValB = avgVal\n#     maxValR, maxValG, maxValB = maxVal\n#     minValR, minValG, minValB = minVal\n#     sql = \"INSERT INTO rawImage_TBL(raw_id , raw_fname,raw_extname,\"\n#     sql += \"raw_height,raw_width,raw_avg,raw_max,raw_min,raw_data) \"\n#     sql += \" VALUES(NULL,'\" + fname + \"','\" + extname + \"',\"\n#     sql += str(height) + \",\" + str(width) + \",\"\n#     sql += str(avgValR) + \",\" + str(avgValG) + \",\" + str(avgValB) + \",\"\n#     sql += str(maxValR) + \",\" + str(maxValG) + \",\" + str(maxValB) + \",\"\n#     sql += str(minValR) + \",\" + str(minValG) + \",\" + str(minValB) + \",\"\n#     sql += \"%s, %s, %s )\"\n#     tupleData = (binDataR,binDataG, binDataB)\n#     cur.execute(sql, tupleData)\n#     con.commit()\n#     cur.close()\n#     con.close()\n#     os.remove(fullname)\n#     print(\"업로드 OK -->\" + fullname)\n#\n#\n# def loadMysqlColor() :\n#     global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n#     con = pymysql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS,\n#                           db=DB_NAME, charset=CHAR_SET)\n#     cur = con.cursor()\n#     sql = \"SELECT raw_id, raw_fname, raw_extname, raw_height, raw_width \"\n#     sql += \"FROM rawImage_TBL\"\n#     cur.execute(sql)\n#\n#     queryList = cur.fetchall()\n#     rowList = [ ':'.join(map(str,row)) for row in queryList]\n#     import tempfile\n#     def selectRecord( ) :\n#         global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n#         selIndex = listbox.curselection()[0]\n#         subWindow.destroy()\n#         raw_id = queryList[selIndex][0]\n#         sql = \"SELECT raw_fname, raw_extname, raw_data FROM rawImage_TBL \"\n#         sql += \"WHERE raw_id = \" + str(raw_id)\n#         cur.execute(sql)\n#         fname, extname, binData = cur.fetchone()\n#\n#         fullPath = tempfile.gettempdir() + '/' + fname + \".\" + extname\n#         with open(fullPath, 'wb') as wfp:\n#             wfp.write(binData)\n#         cur.close()\n#         con.close()\n#\n#         loadImage(fullPath)\n#         equalImage()\n#\n#     ## 서브 윈도에 목록 출력하기.\n#     subWindow = Toplevel(window)\n#     listbox = Listbox(subWindow)\n#     button = Button(subWindow, text='선택', command = selectRecord)\n#\n#     for rowStr in rowList :\n#         listbox.insert(END, rowStr)\n#\n#     listbox.pack(expand=1, anchor=CENTER)\n#     button.pack()\n#     subWindow.mainloop()\n#\n#\n#     cur.close()\n#     con.close()\n\ndef embossImagePIL(): # fix here\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    global photo\n    photo2 = photo.copy()\n    photo2 = photo2.filter(ImageFilter.EMBOSS)\n\n    outH = inH\n    outW = inW\n\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n\n    ## 임시 출력 --> 원 출력\n\n\nimport colorsys\nsx, sy, ex, ey = [0]*4\ndef embossImageHSV():\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    global sx, sy, ex, ey\n    ## 이벤트 바인드\n    canvas.bind('', rightMouseClick_embossImageHSV)\n    canvas.bind('', leftMouseClick)\n    canvas.bind('', leftMouseMove)\n    canvas.bind('', leftMouseDrop_embossImageHSV)\n\ndef rightMouseClick_embossImageHSV():\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    global sx, sy, ex, ey\n    sx = 0; sy = 0; ex = inH - 1; ey = inW - 1\n    ###############\n    __embossImageHSV()\n    ###############\n\nboxLine = None\n\ndef leftMouseClick(event):\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    global sx, sy, ex, ey, boxLine\n    ex = event.x; ey = event.y\n    if not boxLine:\n        pass\n    else:\n        canvas.delete(boxLine)\n        boxLine = canvas.create_rectangle(sx, sy, ex, ey, fill=None)\n        \n\n\ndef leftMouseMove():\n    pass\n\ndef leftMouseDrop_embossImageHSV():\n    pass\n\ndef __embossImageHSV():\n    global window\n    ## 입력 RGB --> 입력 HSV\n\n    # 메모리 확보\n    inImageHSV = []\n    for _ in range(3):\n        inImageHSV.append(malloc(inH, inW))\n    # RGB -> HSV\n    for i in range(inH):\n        for k in range(inW):\n            r, g, b = inImage[R][i][k], inImage[G][i][k], inImage[B][i][k]\n            h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255)\n            inImageHSV[0][i][k], inImageHSV[1][i][k], inImageHSV[2][i][k] = h, s, v\n\n\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    MSIZE = 3\n    mask = [[-1, 0, 0],\n            [0, 0, 0],\n            [0, 0, 1]]\n    ## 임시 입력영상 메모리 확보\n    tmpInImage = []\n    tmpOutImage = []\n    for _ in range(3):\n        tmpInImage.append(malloc(inH + MSIZE - 1, inW + MSIZE - 1, 127))\n        tmpOutImage.append(malloc(outH, outW))\n    ## 원 입력 --> 임시 입력\n    for RGB in range(3):\n        for i in range(inH):\n            for k in range(inW):\n                tmpInImage[RGB][i + MSIZE // 2][k + MSIZE // 2] = inImage[RGB][i][k]\n        ## 회선연산\n        for i in range(MSIZE // 2, inH + MSIZE // 2):\n            for k in range(MSIZE // 2, inW + MSIZE // 2):\n                # 각 점을 처리.\n                S = 0.0\n                for m in range(0, MSIZE):\n                    for n in range(0, MSIZE):\n                        S += mask[m][n] * tmpInImage[RGB][i + m - MSIZE // 2][k + n - MSIZE // 2]\n                tmpOutImage[RGB][i - MSIZE // 2][k - MSIZE // 2] = S\n        ## 127 더하기 (선택)\n        for i in range(outH):\n            for k in range(outW):\n                tmpOutImage[RGB][i][k] += 127\n        ## 임시 출력 --> 원 출력\n        for i in range(outH):\n            for k in range(outW):\n                value = tmpOutImage[RGB][i][k]\n                if value > 255:\n                    value = 255\n                elif value < 0:\n                    value = 0\n                outImage[RGB][i][k] = int(value)\n\n    displayImageColor()\n\n\n\ndef  blurImageRGB():\n    global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\n    ## 중요! 코드. 출력영상 크기 결정 ##\n    outH = inH;  outW = inW;\n    ###### 메모리 할당 ################\n    outImage = []\n    for _ in range(3):\n        outImage.append(malloc(outH, outW))\n    ####### 진짜 컴퓨터 비전 알고리즘 #####\n    MSIZE = 3\n    mask = [ [1/9, 1/9, 1/9],\n             [ 1/9, 1/9, 1/9],\n             [ 1/9, 1/9, 1/9] ]\n    ## 임시 입력영상 메모리 확보\n    tmpInImage = []\n    tmpOutImage = []\n    for _ in range(3):\n        tmpInImage.append(malloc(inH + MSIZE - 1, inW + MSIZE - 1, 127))\n        tmpOutImage.append(malloc(outH, outW))\n    ## 원 입력 --> 임시 입력\n    for RGB in range(3):\n        for i in range(inH) :\n            for k in range(inW) :\n                tmpInImage[RGB][i+MSIZE//2][k+MSIZE//2] = inImage[RGB][i][k]\n        ## 회선연산\n        for i in range(MSIZE//2, inH + MSIZE//2) :\n            for k in range(MSIZE//2, inW + MSIZE//2) :\n                # 각 점을 처리.\n                S = 0.0\n                for m in range(0, MSIZE) :\n                    for n in range(0, MSIZE) :\n                        S += mask[m][n]*tmpInImage[RGB][i+m-MSIZE//2][k+n-MSIZE//2]\n                tmpOutImage[RGB][i-MSIZE//2][k-MSIZE//2] = S\n        # ## 127 더하기 (선택)\n        # for i in range(outH) :\n        #     for k in range(outW) :\n        #         tmpOutImage[RGB][i][k] += 127\n        ## 임시 출력 --> 원 출력\n        for i in range(outH):\n            for k in range(outW):\n                value = tmpOutImage[RGB][i][k]\n                if value > 255 :\n                    value = 255\n                elif value < 0 :\n                    value = 0\n                outImage[RGB][i][k] = int(value)\n\n    displayImageColor()\n####################\n#### 전역변수 선언부 ####\n####################\nR, G, B = 0, 1, 2\ninImage, outImage = [], []  # 3차원 리스트(배열)\ninH, inW, outH, outW = [0] * 4\nwindow, canvas, paper = None, None, None\nfilename = \"\"\nVIEW_X, VIEW_Y = 512, 512 # 화면에 보일 크기 (출력용)\n####################\n#### 메인 코드부 ####\n####################\nwindow = Tk()\nwindow.geometry(\"500x500\")\nwindow.title(\"컴퓨터 비전(딥러닝) ver 0.01\")\n\nstatus = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)\nstatus.pack(side=BOTTOM, fill=X)\n\n## 마우스 이벤트\n\nmainMenu = Menu(window)\nwindow.config(menu=mainMenu)\n\nfileMenu = Menu(mainMenu)\nmainMenu.add_cascade(label=\"파일\", menu=fileMenu)\nfileMenu.add_command(label=\"파일 열기\", command=openImageColor)\nfileMenu.add_separator()\nfileMenu.add_command(label=\"파일 저장\", command=saveImagePIL)\n\ncomVisionMenu1 = Menu(mainMenu)\nmainMenu.add_cascade(label=\"화소점 처리\", menu=comVisionMenu1)\ncomVisionMenu1.add_command(label=\"덧셈/뺄셈\", command=addImageColor)\ncomVisionMenu1.add_command(label=\"반전하기\", command=revImageColor)\ncomVisionMenu1.add_command(label=\"���라볼라\", command=paraImageColor)\ncomVisionMenu1.add_separator()\ncomVisionMenu1.add_command(label=\"모핑\", command=morphImageColor)\n\ncomVisionMenu2 = Menu(mainMenu)\nmainMenu.add_cascade(label=\"통계\", menu=comVisionMenu2)\ncomVisionMenu2.add_command(label=\"이진화\", command=bwImageColor)\ncomVisionMenu2.add_command(label=\"축소(평균변환)\", command=zoomOutImage2Color)\ncomVisionMenu2.add_command(label=\"확대(양선형보간)\", command=zoomInImage2Color)\ncomVisionMenu2.add_separator()\ncomVisionMenu2.add_command(label=\"히스토그램\", command=histoImageColor)\ncomVisionMenu2.add_command(label=\"히스토그램(내꺼)\", command=histoImage2Color)\ncomVisionMenu2.add_command(label=\"명암대비\", command=stretchImageColor)\ncomVisionMenu2.add_command(label=\"End-In탐색\", command=endinImageColor)\ncomVisionMenu2.add_command(label=\"평활화\", command=equalizeImageColor)\n\ncomVisionMenu3 = Menu(mainMenu)\nmainMenu.add_cascade(label=\"기하학 처리\", menu=comVisionMenu3)\ncomVisionMenu3.add_command(label=\"상하반전\", command=upDownImageColor)\ncomVisionMenu3.add_command(label=\"이동\", command=moveImageColor)\ncomVisionMenu3.add_command(label=\"축소\", command=zoomOutImageColor)\ncomVisionMenu3.add_command(label=\"확대\", command=zoomInImageColor)\ncomVisionMenu3.add_command(label=\"회전1\", command=rotateImageColor)\ncomVisionMenu3.add_command(label=\"회전2(중심,역방향)\", command=rotateImage2Color)\n\ncomVisionMenu4 = Menu(mainMenu)\nmainMenu.add_cascade(label=\"화소영역 처리\", menu=comVisionMenu4)\ncomVisionMenu4.add_command(label=\"엠보싱(RGB)\", command=embossImageRGB)\ncomVisionMenu4.add_command(label=\"블러링(RGB)\", command=blurImageRGB)\n\n# comVisionMenu5 = Menu(mainMenu)\n# mainMenu.add_cascade(label=\"기타 입출력\", menu=comVisionMenu5)\n# comVisionMenu5.add_command(label=\"MySQL에서 불러오기\", command=loadMysqlColor)\n# comVisionMenu5.add_command(label=\"MySQL에 저장하기\", command=saveMysqlColor)\n# comVisionMenu5.add_separator()\n# comVisionMenu5.add_command(label=\"CSV 열기\", command=openCSV)\n# comVisionMenu5.add_command(label=\"CSV로 저장\", command=saveCSV)\n# comVisionMenu5.add_separator()\n# comVisionMenu5.add_command(label=\"엑셀 열기\", command=openExcel)\n# comVisionMenu5.add_command(label=\"엑셀로 저장\", command=saveExcel)\n# comVisionMenu5.add_command(label=\"엑셀 아트로 저장\", command=saveExcelArt)\n\n\nwindow.mainloop()","sub_path":"3-ComputerVision/codes/Code13-01 컴퓨터 비젼(딥러닝) RGB 2 HSV 01.py","file_name":"Code13-01 컴퓨터 비젼(딥러닝) RGB 2 HSV 01.py","file_ext":"py","file_size_in_byte":38779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"581049259","text":"import glob\nimport os\n\nconfigPath = 'config'\n\n\ndef configWrite(typeCounter: int, coreList: list)->None:\n    if not os.path.exists(configPath):\n        os.makedirs(configPath)\n    try:\n        with open(os.path.join(configPath, str(typeCounter)+'.config'), 'w') as w:\n            w.write(str(typeCounter)+'\\n')\n            for i in coreList:\n                w.write(str(i) + '\\n')\n    except Exception as e:\n        print('😔[ERROR]\\t%s' % str(e))\n\n\ndef configRead(fileName: str) -> (int, list):\n    try:\n        with open(fileName, 'r') as r:\n            typeCounter = int(r.readline())\n            coreList = r.readlines()\n            for i in range(len(coreList)):\n                coreList[i] = int(coreList[i])\n            return (typeCounter, coreList)\n    except Exception as e:\n        print('😔[ERROR]\\t%s' % str(e))\n\ndef fetchConfigList() -> tuple:\n    for fileName in glob.glob(configPath+'\\*.config'):\n        print(fileName)\n\n\nif __name__ == '__main__':\n    print(configRead(os.path.join(configPath, '3.config')))\n","sub_path":"国赛训练/2019培训4/code/configIO.py","file_name":"configIO.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"237247501","text":"### L63 Runge Kutta 4 ###\nfrom pylab import *\n\ndef RK4(tstep,h,init_cond,a,r,b):\n    \n    #a is sigma\n    #b is beta\n    #r is rho\n    \n    x = zeros(tstep+1)\n    y = zeros(tstep+1)\n    z = zeros(tstep+1)\n    x[0] = init_cond[0]\n    y[0] = init_cond[1]\n    z[0] = init_cond[2]\n    \n    for i in range(tstep):\n        #time derivative of coordinates\n        xk1 = a*(y[i]-x[i])\n        yk1 = x[i]*(r-z[i]) - y[i]\n        zk1 = x[i]*y[i] - b*z[i]\n        \n        #temporary storage\n        xtemp = x[i] + (h/2)*xk1\n        ytemp = y[i] + (h/2)*yk1\n        ztemp = z[i] + (h/2)*zk1\n        \n        #find the time derivative of new coords\n        xk2 = a*(ytemp-xtemp)\n        yk2 = xtemp*(r-ztemp) - ytemp\n        zk2 = xtemp*ytemp - b*ztemp\n        \n        #temporary storage\n        xtemp = x[i] + (h/2)*xk2\n        ytemp = y[i] + (h/2)*yk2\n        ztemp = z[i] + (h/2)*zk2\n        \n        #find the time derivative of new coords\n        xk3 = a*(ytemp-xtemp)\n        yk3 = xtemp*(r-ztemp) - ytemp\n        zk3 = xtemp*ytemp - b*ztemp\n        \n        #temporary storage\n        xtemp = x[i] + h*xk3\n        ytemp = y[i] + h*yk3\n        ztemp = z[i] + h*zk3\n        \n        #find the time derivative of new coords\n        xk4 = a*(ytemp-xtemp)\n        yk4 = xtemp*(r-ztemp) - ytemp\n        zk4 = xtemp*ytemp - b*ztemp\n        \n        #propagate to the i+1 time step\n        x[i+1] = x[i] + (1.0/6.0)*h*(xk1 + 2*xk2 + 2*xk3 + xk4)\n        y[i+1] = y[i] + (1.0/6.0)*h*(yk1 + 2*yk2 + 2*yk3 + yk4)\n        z[i+1] = z[i] + (1.0/6.0)*h*(zk1 + 2*zk2 + 2*zk3 + zk4)\n        \n    integ_path = array([x,y,z])\n\n    return(integ_path)","sub_path":"old_junk/RK4L63.py","file_name":"RK4L63.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"124287446","text":"from typing import Dict\n\nfrom starkware.cairo.lang.compiler.identifier_definition import (\n    IdentifierDefinition, MemberDefinition, OffsetReferenceDefinition, ReferenceDefinition)\nfrom starkware.cairo.lang.compiler.identifier_manager import (\n    IdentifierManager, IdentifierSearchResult)\nfrom starkware.cairo.lang.compiler.scoped_name import ScopedName\n\n\ndef get_struct_members(\n        struct_name: ScopedName,\n        identifier_manager: IdentifierManager) -> Dict[str, MemberDefinition]:\n    \"\"\"\n    Returns the member definitions of a struct sorted by offset.\n    \"\"\"\n\n    scope_items = identifier_manager.get_scope(struct_name).identifiers\n    members = (\n        (name, indentifier_def)\n        for (name, indentifier_def) in scope_items.items()\n        if isinstance(indentifier_def, MemberDefinition))\n\n    return {\n        name: indentifier_def\n        for name, indentifier_def in sorted(members, key=lambda key_value: key_value[1].offset)\n    }\n\n\ndef resolve_search_result(\n        search_result: IdentifierSearchResult,\n        identifiers: IdentifierManager) -> IdentifierDefinition:\n    \"\"\"\n    Returns a fully parsed identifier definition for the given identifier search result.\n    If search_result contains a reference with non_parsed data, returns an instance of\n    OffsetReferenceDefinition.\n    \"\"\"\n    identifier_definition = search_result.identifier_definition\n    if isinstance(identifier_definition, ReferenceDefinition) and \\\n            len(search_result.non_parsed) > 0:\n        identifier_definition = OffsetReferenceDefinition(\n            parent=identifier_definition,\n            identifier_values=identifiers.as_dict(),\n            member_path=search_result.non_parsed)\n    else:\n        search_result.assert_fully_parsed()\n\n    return identifier_definition\n","sub_path":"examples/starkex-cairo/starkware/cairo/lang/compiler/identifier_utils.py","file_name":"identifier_utils.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"218786268","text":"from fond4ltlfpltl.PDDLparser.formula import FormulaAnd, FormulaOneOf\nfrom fond4ltlfpltl.PDDLparser.literal import Literal\nfrom fond4ltlfpltl.PDDLparser.predicate import Predicate\n\nclass Action:\n\n    def __init__(self, name, parameters, preconditions, effects):\n        self.name = name #string\n        self.parameters = parameters #list\n        self.preconditions = preconditions #formula.FormulaXXX\n        self.effects = effects #formula.FormulaXXX\n\n    def __str__(self):\n        operator_str = '{0}\\n'.format(self.name)\n        operator_str += '\\t:parameters ({0})\\n'.format(' '.join(map(str, self.parameters)))\n        operator_str += '\\t:precondition {0}\\n'.format(self.preconditions)\n        operator_str += '\\t:effect {0}\\n'.format(self.effects)\n        return operator_str\n\n    def add_to_precond(self):\n        if isinstance(self.preconditions, FormulaAnd):\n            self.preconditions.complete_domain_turn(True)\n        else:\n            old_formula = self.preconditions\n            precond_to_be_added = Literal.positive(Predicate('turnDomain'))\n            self.preconditions = FormulaAnd([old_formula,precond_to_be_added])\n\n    def add_to_effect(self):\n        if isinstance(self.effects, FormulaAnd):\n            self.effects.complete_domain_turn(False)\n        else:\n            old_formula = self.effects\n            effect_to_be_added = Literal.negative(Predicate('turnDomain'))\n            self.effects = FormulaAnd([old_formula,effect_to_be_added])\n\n    def add_turn_domain(self):\n        self.add_to_precond()\n        self.add_to_effect()\n\n    def isOneOf(self):\n        if isinstance(self.effects, FormulaOneOf):\n            return True\n        else:\n            return False\n","sub_path":"fond4ltlfpltl/PDDLparser/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"235980412","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django.contrib.gis.db import models as modelsgis\nfrom taggit.managers import TaggableManager\nfrom mptt.models import MPTTModel, TreeForeignKey\nimport mptt\nfrom apps.POI.esearch import GisPOIIndex\n\n\nclass Category(MPTTModel):\n    class Meta:\n        db_table = 'category'\n\n    name = models.CharField(max_length=150)\n    slug = modelsgis.SlugField(null=True, unique=True, max_length=25)\n    parent = TreeForeignKey('self', null=True, blank=True, related_name='Category')\n\n    class MPTTMeta:\n        order_insertion_by = ['name']\n\n    def __str__(self):\n        return \"-- %s\" % (self.name)\n\n\nmptt.register(Category, )\n\n\nclass BaseGisPOI(modelsgis.Model):\n    name = modelsgis.CharField(max_length=300)\n    point = modelsgis.PointField(geography=True, null=True, blank=True)\n    addres = modelsgis.TextField(null=True, blank=True)\n    description = modelsgis.TextField()\n    create_in = modelsgis.DateTimeField(auto_now_add=True)\n    radius = modelsgis.PositiveIntegerField(default=0, blank=True)\n    image = modelsgis.ImageField(null=True, blank=True)\n    extra_data = modelsgis.TextField(null=True, blank=True)\n    tags = TaggableManager()\n    is_moderate = models.BooleanField(default=False)\n\n    class Meta:\n        abstract = True\n\n\nclass DraftGisPOI(BaseGisPOI):\n    category = modelsgis.ManyToManyField(Category, blank=True, related_name='draftpoi')\n    created_was = modelsgis.ForeignKey(User, on_delete=modelsgis.SET_NULL, null=True, blank=True)\n\n    def __str__(self):\n        return \"ID: %s\" % (self.id)\n\n\nclass GisPOI(BaseGisPOI):\n    created_was = modelsgis.ForeignKey(User, on_delete=modelsgis.SET_NULL, null=True, blank=True,\n                                       related_name='cratedpoi')\n    category = modelsgis.ManyToManyField(Category, blank=True, related_name='poi')\n    moderated_was = modelsgis.ForeignKey(User, on_delete=modelsgis.SET_NULL, null=True, blank=True)\n    moderation_on = models.DateField(null=True, blank=True, auto_now_add=True)\n\n    def __str__(self):\n        return \"ID: %s\" % (self.id)\n\n    @property\n    def anchor(self):\n        return {\"geolocation\": { \"lat\": self.point.coords[1], \"lon\": self.point.coords[0] }}\n\n    @property\n    def text(self):\n        return {\n         \"title\": self.name,\n         \"description\": self.description,\n        }\n\n    def indexing(self):\n        # print('indexing')\n        obj = GisPOIIndex(\n            meta={'id': self.id},\n            id=self.id,\n            name=self.name,\n            description=self.description,\n            date=self.create_in\n        )\n        # print('indexing POI : ', obj.name, obj.description)\n        obj.save()\n        return obj.to_dict(include_meta=True)\n","sub_path":"Artefaktor/apps/POI/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"599416277","text":"import re\n\nimport log\nimport requests\nfrom memoize import memoize\n\n\nMI_SOS_URL = \"https://webapps.sos.state.mi.us/MVIC/\"\n\n\n@memoize(timeout=60)\ndef fetch_registration_status_data(voter):\n    response = requests.get(MI_SOS_URL)\n    log.debug(response.text)\n    response.raise_for_status()\n\n    form_data = {\n        \"__EVENTVALIDATION\": find_or_abort(\n            'id=\"__EVENTVALIDATION\" value=\"(.*?)\"', response.text\n        ),\n        \"__VIEWSTATE\": find_or_abort(\n            'id=\"__VIEWSTATE\" value=\"(.*?)\"', response.text\n        ),\n        \"__VIEWSTATEGENERATOR\": find_or_abort(\n            'id=\"__VIEWSTATEGENERATOR\" value=\"(.*?)\"', response.text\n        ),\n        \"__VIEWSTATEENCRYPTED\": \"\",\n        \"ctl00$ContentPlaceHolder1$vsFname\": voter.first_name,\n        \"ctl00$ContentPlaceHolder1$vsLname\": voter.last_name,\n        \"ctl00$ContentPlaceHolder1$vsMOB2\": voter.birth_month,\n        \"ctl00$ContentPlaceHolder1$vsMOB1\": voter.birth_month,\n        \"ctl00$ContentPlaceHolder1$vsYOB2\": voter.birth_year,\n        \"ctl00$ContentPlaceHolder1$vsZip\": voter.zip_code,\n        \"ctl00$ContentPlaceHolder1$btnSearchByName\": \"Search\",\n    }\n    log.debug(form_data)\n\n    response = requests.post(\n        MI_SOS_URL,\n        headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n        data=form_data,\n    )\n    log.debug(response.text)\n    response.raise_for_status()\n\n    registered = bool(re.search(\"Yes, You Are Registered\", response.text))\n\n    # const registered = !!body.match(/Yes\\, You Are Registered/);\n    #     if (!registered) return { registered: false };\n    #     const ret = { registered: !!body.match(/Yes\\, You Are Registered/) };\n    #     const rex = /districtCell\">[\\s\\S]*?(.*?): <\\/b>[\\s\\S]*?districtCell\">[\\s\\S]*?\">(.*?)<\\/span>/g\n    #     do {\n    #         var m = rex.exec(body);\n    #         if (m) {\n    #             ret[m[1].toLowerCase().replace(/\\s/g, '_')] = m[2];\n    #         }\n    #     } while (m);\n    #     return ret;\n\n    return {\"registered\": registered}\n\n\ndef find_or_abort(pattern, text):\n    match = re.search(pattern, text)\n    assert match, f\"Unable for match {pattern!r} to {text!r}\"\n    return match[1]\n","sub_path":"elections/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"169750200","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author  : huxiansheng (you@example.org)\nfrom System_setting.Logger import Logger\n\nlogger = Logger(logger='common_class').getlog()\n\nclass common_class():\n\n    '''\n    参数:\n    obj 需要转换类型的对象\n    type_ 需要转换的类型\n    返回:\n    obj 转换后的对象\n    '''\n    def change_type(self,obj,type_):\n        _type = type(obj)\n        if _type ==type_:\n            return obj\n        else:\n            try:\n                obj = type_(obj)\n                return obj\n            except BaseException as e:\n                logger.error('object类型转换出错:%s'%e)\n                return obj\n\n\n\n\n\n\n\n\n","sub_path":"All_class/Common_class.py","file_name":"Common_class.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"471036357","text":"import os\r\nfrom azure.storage.blob import BlockBlobService\r\n\r\nroot_path = ''\r\ndir_name = 'images'\r\npath = f\"{root_path}/{dir_name}\"\r\nfile_names = os.listdir(path)\r\n\r\naccount_name = ''\r\naccount_key = ''\r\ncontainer_name = ''\r\n\r\nblock_blob_service = BlockBlobService(\r\n    account_name=account_name,\r\n    account_key=account_key\r\n)\r\n\r\nfor file_name in file_names:\r\n    blob_name = f\"{dir_name}/{file_name}\"\r\n    file_path = f\"{path}/{file_name}\"\r\n    block_blob_service.create_blob_from_path(container_name, blob_name, file_path)","sub_path":"850959-Gayathri.py","file_name":"850959-Gayathri.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"44050905","text":"\n##\n# Create simulation data from adaptive boxes results\n##\n\nimport matplotlib.colors as colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom adabox.legacy.postproc_gpu.tools import create_groups, get_xy_units\n\ncolors_list = list(colors._colors_full_map.values())\n\nin_path = \"/adabox/decomposition/samples/decomposition_n_20.csv\"  # .csv\nout_path = \"/adabox/decomposition/samples/postdata\"  # without extension\n\ndata = np.array(pd.read_csv(in_path, header=None))\nsep_value = 1           # it is a constant because adabox GPU returns partitions with this value. DONT CHANGE!\n\n# data prepros adds boundaries to json rectangles\ngroups_details, summary = create_groups(data, sep_value)\n\nfor s in summary:\n    print(s)\n#\n# # Plot Rectangles by groups\n# plt.figure()\n# for rec in groups_details:\n#     x1 = rec[0]\n#     x2 = rec[1]\n#     y1 = rec[2]\n#     y2 = rec[3]\n#\n#     p1 = np.array([x1, y1])\n#     p2 = np.array([x1, y2])\n#     p3 = np.array([x2, y1])\n#     p4 = np.array([x2, y2])\n#\n#     ps = np.array([p1, p2, p4, p3, p1])\n#     plt.plot(ps[:, 0], ps[:, 1])\n\n# Save in a csv file\nn_split_sep_value = 3\nerror_val = 0.6\ny_units, x_units = get_xy_units(groups_details, sep_value, n_split_sep_value, error_val)\n\n# Creating units\nx_unit_list = []\nfor x_unit in x_units:\n    # print(str(x_unit.group) + ' ' + str(x_unit.position))\n    x_unit_list.append([x_unit.group[0][0],\n                        x_unit.group[0][1],\n                        x_unit.position[0],\n                        x_unit.group[1][0],\n                        x_unit.group[1][1],\n                        x_unit.position[1],\n                        ])\n\ny_unit_list = []\nfor y_unit in y_units:\n    # print(str(y_unit.group) + ' ' + str(y_unit.position))\n    y_unit_list.append([y_unit.group[0][0],\n                        y_unit.group[0][1],\n                        y_unit.position[0],\n                        y_unit.group[1][0],\n                        y_unit.group[1][1],\n                        y_unit.position[1],\n                        ])\n\n# x-units and y-units\n# columns: (0: group, 1:partition, 2:interface_position) (3:group, 4:partition, 5:interface_position)\nx_unit_df = pd.DataFrame(x_unit_list)\ny_unit_df = pd.DataFrame(y_unit_list)\n\nc_names_interfaces = ['group_0',\n                      'partition_0',\n                      'interface_position_0',\n                      'group_1',\n                      'partition_1',\n                      'interface_position_1']\n\nx_unit_df.columns = c_names_interfaces\ny_unit_df.columns = c_names_interfaces\n\nx_unit_df.to_csv(out_path + \"/x_units.csv\", header=True, index=None)\ny_unit_df.to_csv(out_path + \"/y_units.csv\", header=True, index=None)\n\n# Saving summary\nsummary_groups = pd.DataFrame(summary)\nsummary_groups.iloc[:, 2:] = summary_groups.iloc[:, 2:] * n_split_sep_value\n\nsummary_groups.columns = ['n_group', 'n_partitions', 'num_div_y', 'num_div_x']\nsummary_groups.to_csv(out_path + \"/summary_groups.csv\", header=True, index=None)\n\n# saving groups details\ngroups_details_df = pd.DataFrame(groups_details)\n# header: x1 x2 y1 y2 is_checked? gi gj\n#     |-------------o(x2,y2)|\n#     |                     |\n#     |                     |\n#     |o(x1,y1)-------------|\ngroups_details_subset_df = groups_details_df[[0, 1, 2, 3, 5, 6]]\ngroups_details_subset_df.columns = ['x1', 'x2', 'y1', 'y2', 'gi', 'gj']\n# Normalizing\nx_offset = abs(groups_details_subset_df.loc[:, 'x1'].min())\ngroups_details_subset_df.loc[:, 'x1'] = n_split_sep_value * (groups_details_subset_df.loc[:, 'x1'] + x_offset)\ngroups_details_subset_df.loc[:, 'x2'] = n_split_sep_value * (groups_details_subset_df.loc[:, 'x2'] + x_offset)\n\ny_offset = abs(groups_details_subset_df.loc[:, 'y1'].min())\ngroups_details_subset_df.loc[:, 'y1'] = n_split_sep_value * (groups_details_subset_df.loc[:, 'y1'] + y_offset)\ngroups_details_subset_df.loc[:, 'y2'] = n_split_sep_value * (groups_details_subset_df.loc[:, 'y2'] + y_offset)\n\ngroups_details_subset_df.to_csv(out_path + \"/group_details.csv\", header=True, index=None)\n\n\n# Plot Rectangles by groups\nplt.figure()\nfor i in range(groups_details_subset_df.shape[0]):\n    rec = groups_details_subset_df.iloc[i, :]\n    x1 = rec[0]\n    x2 = rec[1]\n    y1 = rec[2]\n    y2 = rec[3]\n\n    p1 = np.array([x1, y1])\n    p2 = np.array([x1, y2])\n    p3 = np.array([x2, y1])\n    p4 = np.array([x2, y2])\n\n    ps = np.array([p1, p2, p4, p3, p1])\n    plt.plot(ps[:, 0], ps[:, 1])\n\n","sub_path":"adabox/legacy/postproc_gpu/post_process_csv_gpu.py","file_name":"post_process_csv_gpu.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"481091380","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 10 10:28:43 2019\n\n@author: Lucie\n\"\"\"\n\nfrom lxml import etree\nxmlfile = \"valery_ame-et-danse_1921.xml\"\ntree = etree.parse(xmlfile)\nroot = tree.getroot()\nTEI_NAMESPACE = \"http://www.tei-c.org/ns/1.0\"\nTEI = \"{%s}\" % TEI_NAMESPACE\n\n#affiche le nom de l'éditeur \nfor element in root.iter(TEI + 'edition'):\n    print(element.text)\n        \n#affiche l'url de la licence \nfor element in root.iter(TEI + 'ref'):\n    print(element.attrib.get('target'))\n\n#afficher le personnage avec le plus de réplique (@speaker)\n#et le nombre de répliques\nd={}\nfor element in root.iter(TEI + \"label\"):\n    d[element.text] = d.get(element.text,0)+1\nprint(sorted(d.items(),key=lambda x:x[1],reverse=True))\n\n#ajouter un autre  avec deux enfants nam et resp contenant du texte\nfor element in root.iter(TEI + 'editionStmt'):\n    respStmt = etree.SubElement(element,\"respStmt\")\n    name = etree.SubElement(respStmt,\"name\")\n    resp = etree.SubElement(respStmt,\"resp\")\n    name.text = \"name\"\n    resp.text = \"resp\"\n#print(etree.tostring(root, pretty_print=True))\ntree.write('xmlfile.xml',encoding = 'utf-8', pretty_print=True)\n\n#modifier la valeur de la signateur  pour afficher le texte en majuscule\nfor element in root.iter(TEI + 'signed'):\n    for child in element:\n        child.text = child.text.upper()\n#print(etree.tostring(root, pretty_print=True))\ntree.write('xmlfile.xml',encoding = 'utf-8',pretty_print=True)","sub_path":"seance_03/danse.py","file_name":"danse.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"100593395","text":"import json\nfrom pathlib import Path\n\nENCODING='utf8'\n\ndef parseSheet(source,target):\n\n    with source.open(mode='r', encoding=ENCODING) as f:\n        sheet_data = json.load(f)\n            \n    print(\"Parsing local sheet...\")\n\n    cats = {}\n\n    for s in sheet_data:\n        feed = s['feed']\n        title = feed['title']['$t']\n        entry = feed['entry']\n        timestamp = feed['updated']['$t']\n\n        if title not in cats.keys():\n            cats.update({title:{}})\n\n        subcats = {}\n\n        for e in entry:\n            if not e['gsx$category'][\"$t\"]:\n                print(\"NOSUBCAT: \"+ e[\"gsx$header\"][\"$t\"] + \" \" +e[\"gsx$title\"][\"$t\"])\n                scat = 'undefined'\n            else:\n                scat = e['gsx$category'][\"$t\"].replace(\" \",\"_\")\n            \n\n            data = {\n                \"employer\":e[\"gsx$employer\"][\"$t\"],\n                \"dates\":e[\"gsx$dates\"][\"$t\"],\n                \"title\":e[\"gsx$title\"][\"$t\"],\n                \"location\":e[\"gsx$location\"][\"$t\"],\n                \"year\":e[\"gsx$year\"][\"$t\"],\n                \"description\":e[\"gsx$description\"][\"$t\"],\n                \"narrative\":e[\"gsx$narrative\"][\"$t\"],\n                \"url\":e[\"gsx$url\"][\"$t\"],\n                \"timestamp\" : e[\"gsx$timestamp\"][\"$t\"],\n            }\n            \n            if scat not in subcats.keys():\n                subcats.update({scat:{}})\n            \n            subcats[scat].update({\n                \"subsection\" : e[\"gsx$category\"][\"$t\"],\n            })\n            \n            if \"data\" not in subcats[scat].keys():\n                subcats[scat].update({\"data\":[]})\n            \n            subcats[scat]['data'].append(data)\n            \n            cats[title].update({\n                \"section\": e[\"gsx$header\"][\"$t\"],\n                \"timestamp\":timestamp,\n                \"subcategories\":subcats,\n            })\n\n\n    # for i in cats:\n    #     for j in cats[i]['subcategories']:\n    #         emp = {}\n    #         for k in cats[i]['subcategories'][j]['data']:\n    #             # print(k)\n    #             kk = cats[i]['subcategories'][j]['data']\n    #             empkey = k['employer'].replace(\" \",\"_\")\n    #             if k['employer'] not in kk:\n    #                 emp.update({empkey:[]})\n    #             for key,value in k.items():\n    #                 # print(key,value)\n    #                 if 'employer' not in key:\n    #                     emp[empkey].append(value)\n    #         cats[i]['subcategories'][j]['data'] = emp\n\n\n    sheet_dicts = cats\n\n\n    with target.open(mode='wb') as f:\n        f.write(json.dumps(sheet_dicts, \n            sort_keys=True,\n            indent=4,\n            separators=(',',':'),\n            ensure_ascii=False,\n            ).encode(ENCODING)\n        )\n\nif __name__ == '__main__':\n\n    source  = Path(\"../.data/.sheet_data.json\")\n    target  = Path(\"../.data/.sheet_data_parsed-test.json\")\n\n    parseSheet(source, target)\n","sub_path":"src/bibliography.py","file_name":"bibliography.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"300032240","text":"#!/bin/python\n# encoding: utf-8\nfrom __future__ import print_function\nimport optparse\nimport itertools\nfrom collections import OrderedDict\nimport loader2\nimport torch\nimport time\n#import cPickle\nfrom torch.autograd import Variable\n#import matplotlib.pyplot as plt\nimport sys\n#import visdom\nfrom utils import *\nfrom loader2 import *\nfrom model2 import BiLSTM_CRF\nt = time.time()\nmodels_path = \"models/\"\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n\noptparser = optparse.OptionParser()\noptparser.add_option(\n   \"-T\", \"--train\", default=\"dataset/spec_train.txt\",\n    help=\"Train set location\"\n)\noptparser.add_option(\n    \"-d\", \"--dev\", default=\"dataset/spec_example.txt\",\n    help=\"Dev set location\"\n)\noptparser.add_option(\n    \"-t\", \"--test\", default=\"dataset/spec_test.txt\",\n    help=\"Test set location\"\n)\noptparser.add_option(\n    '--test_train', default='dataset/new_dis_nature_spec_example.txt',\n    help='test train'\n)\noptparser.add_option(\n    '--score', default='evaluation/temp/score.txt',\n    help='score file location'\n)\noptparser.add_option(\n    \"-s\", \"--tag_scheme\", default=\"iobes\",\n    help=\"Tagging scheme (IOB or IOBES)\"\n)\noptparser.add_option(\n    \"-l\", \"--lower\", default=\"1\",\n    type='int', help=\"Lowercase words (this will not affect character inputs)\"\n)\noptparser.add_option(\n    \"-z\", \"--zeros\", default=\"0\",\n    type='int', help=\"Replace digits with 0\"\n)\noptparser.add_option(\n    \"-c\", \"--char_dim\", default=\"25\",\n    type='int', help=\"Char embedding dimension\"\n)\noptparser.add_option(\n    \"-C\", \"--char_lstm_dim\", default=\"25\",\n    type='int', help=\"Char LSTM hidden layer size\"\n)\noptparser.add_option(\n    \"-b\", \"--char_bidirect\", default=\"1\",\n    type='int', help=\"Use a bidirectional LSTM for chars\"\n)\noptparser.add_option(\n    \"-w\", \"--word_dim\", default=\"50\",\n    type='int', help=\"Token embedding dimension\"\n)\noptparser.add_option(\n    \"-W\", \"--word_lstm_dim\", default=\"200\",\n    type='int', help=\"Token LSTM hidden layer size\"\n)\noptparser.add_option(\n    \"-B\", \"--word_bidirect\", default=\"1\",\n    type='int', help=\"Use a bidirectional LSTM for words\"\n)\noptparser.add_option(\n    \"-p\", \"--pre_emb\", default=\"models/glove.6B.100d.txt\",\n    help=\"Location of pretrained embeddings\"\n)\noptparser.add_option(\n    \"-A\", \"--all_emb\", default=\"1\",\n    type='int', help=\"Load all embeddings\"\n)\noptparser.add_option(\n    \"-a\", \"--cap_dim\", default=\"0\",\n    type='int', help=\"Capitalization feature dimension (0 to disable)\"\n)\noptparser.add_option(\n    \"-f\", \"--crf\", default=\"1\",\n    type='int', help=\"Use CRF (0 to disable)\"\n)\noptparser.add_option(\n    \"-D\", \"--dropout\", default=\"0.5\",\n    type='float', help=\"Droupout on the input (0 = no dropout)\"\n)\noptparser.add_option(\n    \"-r\", \"--reload\", default=\"0\",\n    type='int', help=\"Reload the last saved model\"\n)\noptparser.add_option(\n    \"-g\", '--use_gpu', default='1',\n    type='int', help='whether or not to ues gpu'\n)\noptparser.add_option(\n    '--loss', default='loss.txt',\n    help='loss file location'\n)\noptparser.add_option(\n    '--name', default='spec_test',\n    help='model name'\n)\noptparser.add_option(\n    '--char_mode', choices=['CNN', 'LSTM'], default='CNN',\n    help='char_CNN or char_LSTM'\n)\nopts = optparser.parse_args()[0]\n\nparameters = OrderedDict()#OrderedDict会根据放入元素的先后顺序进行排序。所以输出的值是排好序的。\nparameters['tag_scheme'] = opts.tag_scheme\nparameters['lower'] = opts.lower == 1\nparameters['zeros'] = opts.zeros == 1\nparameters['char_dim'] = opts.char_dim\nparameters['char_lstm_dim'] = opts.char_lstm_dim\nparameters['char_bidirect'] = opts.char_bidirect == 1\nparameters['word_dim'] = opts.word_dim\nparameters['word_lstm_dim'] = opts.word_lstm_dim\nparameters['word_bidirect'] = opts.word_bidirect == 1\nparameters['pre_emb'] = opts.pre_emb\nparameters['all_emb'] = opts.all_emb == 1\nparameters['cap_dim'] = opts.cap_dim\nparameters['crf'] = opts.crf == 1\nparameters['dropout'] = opts.dropout\nparameters['reload'] = opts.reload == 1\nparameters['name'] = opts.name\nparameters['char_mode'] = opts.char_mode\n\nparameters['use_gpu'] = opts.use_gpu == 1 and torch.cuda.is_available()\nuse_gpu = parameters['use_gpu']\n\nmapping_file = 'models/mapping.pkl'\n\nname = parameters['name']\nmodel_name = models_path + name #get_name(parameters)\ntmp_model = model_name + '.tmp'\n\n\nassert os.path.isfile(opts.train)\nassert os.path.isfile(opts.dev)\nassert os.path.isfile(opts.test)\nassert parameters['char_dim'] > 0 or parameters['word_dim'] > 0\nassert 0. <= parameters['dropout'] < 1.0\nassert parameters['tag_scheme'] in ['iob', 'iobes']\nassert not parameters['all_emb'] or parameters['pre_emb']\nassert not parameters['pre_emb'] or parameters['word_dim'] > 0\n#assert not parameters['pre_emb'] or os.path.isfile(parameters['pre_emb'])\n\nif not os.path.isfile(eval_script):\n    raise Exception('CoNLL evaluation script not found at \"%s\"' % eval_script)\nif not os.path.exists(eval_temp):\n    os.makedirs(eval_temp)\nif not os.path.exists(models_path):\n    os.makedirs(models_path)\n\nlower = parameters['lower']\nzeros = parameters['zeros']\ntag_scheme = parameters['tag_scheme']\n\ntrain_sentences = loader2.load_sentences(opts.train)##3重列表\ndev_sentences = loader2.load_sentences(opts.dev)\ntest_sentences = loader2.load_sentences(opts.test)\n\ndico_words, word_to_id, id_to_word = word_mapping(train_sentences + test_sentences, lower)\ndico_tags, tag_to_id, id_to_tag = tag_mapping(train_sentences)\n\n\ntrain_data = prepare_dataset(\n    train_sentences, word_to_id, tag_to_id, lower\n)\ndev_data = prepare_dataset(\n   dev_sentences, word_to_id, tag_to_id, lower\n)\ntest_data = prepare_dataset(\n    test_sentences, word_to_id,  tag_to_id,lower\n)\n\n\nprint(\"%i / %i sentences in train / test.\" % (\n    len(train_data),  len(test_data)))\n\n#exit()\n\ndef counts(dic):\n    vocab = set()  # 定义vocab为set型\n    maxlen = 0\n    for w in dic:\n        #w = w.lower()\n        vocab.add(w)\n    vocab = sorted(list(vocab))  # 转换为list后进行排序\n    return vocab\nvocab = counts(dico_words)\ndef _load_vocab(vocab_file):  # 加载vocab,senna_words.lst文件\n    # load vocab from file\n    vocab = []\n    with open(vocab_file) as f:\n        for line in f:\n            w = line.strip()\n            vocab.append(w)\n    return vocab\ndef _load_embedding(embed_file, words_file):\n    words2id = {}\n    embed = np.load(embed_file)\n    words = _load_vocab(words_file)\n    for id, w in enumerate(words):\n        words2id[w] = id  # 字典的键是words中的一行(即词),值为索引号(即行数)\n    return embed, words2id\ndef final_embeddings(pretrain_embed_file, pretrain_words_file, vocab, word_to_id):\n    word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), opts.word_dim))\n    pretrain_embed, pretrain_words2id = _load_embedding(\n        pretrain_embed_file,  # 已经训练好的向量文件和词文件\n        pretrain_words_file)\n    vocab_size = len(vocab)\n    # words2id = {}\n    for w in vocab:\n        if w in pretrain_words2id:  # 判断token是否在外部大词典senna_words.lst中,若没有则采用numpy进行随机初始化\n            idx = pretrain_words2id[w]\n            word_embeds[word_to_id[w]] = pretrain_embed[idx]\n            # id = word_to_id[w]\n            # word_embed.append(pretrain_embed[id])  # 从embed_file里面取向量\n\n        else:\n            vec = np.random.normal(0, 0.1, parameters['word_dim'])\n            word_embeds[word_to_id[w]] = vec\n            # word_embed.append(vec)\n        # words2id[w] = idx\n    #word_embeds[''] = np.zeros(parameters['word_dim'])\n    # word_embed[0] = np.zeros(parameters['word_dim'])  # 将embed第0行向量置为0  对应\n    return word_embeds, pretrain_words2id\npretrain_embed_file = \"models/embed50.senna.npy\"\npretrain_words_file = \"models/senna_words.lst\"\nword_embeds, pretrain_words2id = final_embeddings(pretrain_embed_file, pretrain_words_file, vocab, word_to_id)\n\nprint('Loaded %i pretrained embeddings.' % len(pretrain_words2id))\n\n\n\nwith open(mapping_file, 'wb') as f:\n    mappings = {\n        'word_to_id': word_to_id,\n        'tag_to_id': tag_to_id,\n        'parameters': parameters,\n        'word_embeds': word_embeds\n    }\n   # cPickle.dump(mappings, f)\n\nprint('word_to_id: ', len(word_to_id))\nmodel = BiLSTM_CRF(vocab_size=len(word_to_id),\n                   tag_to_ix=tag_to_id,\n                   embedding_dim=parameters['word_dim'],\n                   hidden_dim=parameters['word_lstm_dim'],\n                   use_gpu=use_gpu,\n                   pre_word_embeds=word_embeds,\n                   use_crf=True,     #parameters['crf'],\n                   ) #parameters['char_mode'])\n\t\t   # pos_embedding_dim=50,\n            #        nature_embedding_dim=20)\n                  # conNode_embedding_dim=10,\n                  # depNode_embedding_dim=10,\n                  # semroles_embedding_dim=10)\n                   # n_cap=4,\n                   # cap_embedding_dim=10)\nif parameters['reload']:\n    model.load_state_dict(torch.load(model_name))\nif use_gpu:\n    model.cuda()\nlearning_rate = 0.015\n\n#optimizer = torch.optim.Adam(model.parameters(), learning_rate)  # It has been proposed in Adam: A Method for Stochastic Optimization\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)\n\nlosses = []\nloss = 0.0\nbest_dev_F = -1.0\nbest_test_F = -1.0\nbest_train_F = -1.0\n\nbest_dev_Acc = -1.0\nbest_test_Acc = -1.0\nbest_train_Acc = -1.0\nbest_dev_precision = -1.0\nbest_test_precision = -1.0\nbest_train_precision = -1.0\nbest_dev_recall = -1.0\nbest_test_recall = -1.0\nbest_train_recall = -1.0\nbest_dev_F1 = -1.0\nbest_test_F1 = -1.0\nbest_train_F1 = -1.0\nmax_epoch = 0\n\nall_F = [[0, 0, 0]]\nplot_every = 10\neval_every = 20\ncount = 0\n#vis = visdom.Visdom()\nsys.stdout.flush()\n\ndef Score_eval(prediction,datas,Score_Seq,isTest,flag=False):#评估函数,求PCS,[word, id_to_tag[true_id], id_to_tag[pred_id]],每个句子以‘’隔开\n    sentence_index = 1\n    if isTest:\n        f = codecs.open(\"ans_label\", 'w', 'utf-8')\n    TP_ = 0\n    FP_ = 0\n    FN_ = 0\n    score = Score_Seq\n    labels = ['A','B','O']\n    #labels = ['I','O']\n    #labels = ['B','I','O']\n    for relation in labels:\n        relation_count = 0\n        TP = 0  # 预测和正确一样\n        FP = 0  # 预测中没标对的\n        FN = 0  # 预测和正确不一样\n        TN = 0\n        P = 0.0\n        R = 0.0\n        F = 0.0\n        for word_tag in prediction:\n            if word_tag == '':\n                continue\n            else:\n                word_tag = word_tag.split()  # 按空格切分\n                # word = word_tag[0]#词\n                gold_label = word_tag[1]  # 正确标签\n                pred_label = word_tag[2]  # 预测标签\n                if relation in gold_label:\n                    relation_count += 1\n                    if gold_label == pred_label:\n                        TP += 1\n                    else:\n                        FN += 1\n                else:\n                    if relation in pred_label:\n                        FP += 1\n                    else:\n                        TN += 1\n        if (TP + FP) != 0:\n            P = 1.0 * TP / (TP + FP)  # 分母为预测标签的总和\n        if (TP + FN) != 0:\n            R = 1.0 * TP / (TP + FN)  # 分母为正确标签的总和\n        if (P + R) != 0:\n            F = 2.0 * P * R / (P + R)\n        if relation != 'O':\n            TP_ += TP\n            FP_ += FP\n            FN_ += FN\n        if flag:\n            if (TP+FP) == 0: TP = 0.0000001\n            if relation_count == 0: relation_count = 0.000001\n            pp = 1.0 * TP / (TP + FP)\n            rr = 1.0 * TP / relation_count\n            sys.stderr.write(\"##%-12s samples:%-6s\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t%4d,%4d,%4d,%4d,pp=%.4f\\trr=%.4f\\n\" % (\n                relation, relation_count, P, R, F, TP, FP, FN, (TP + FP), pp, rr))\n    if (TP_ + FP_) == 0: TP_ = 0.000001\n    if (TP_ + FN_) == 0: FN_ = 1.0\n    P = 1.0 * TP_ / (TP_ + FP_)\n    R = 1.0 * TP_ / (TP_ + FN_)\n    MicroF = 2.0 * P * R / (P + R)\n\n    sentence_word = []\n    senGold_Lab = []#一个句子中的所有词对应的正确标签\n    senPre_Lab = []#一个句子中的所有词对应的预测标签\n   # score = []\n    ifAcc = 1#默认预测标签为正确标签\n    acc_num = 0.0\n    i = 0\n    j = 1\n    for e in prediction:\n        if e == '':\n            if ifAcc == 1:\n               acc_num += 1\n            if isTest:\n                for W,GL,PL,Score in zip(sentence_word,senGold_Lab,senPre_Lab,score):\n                    f.write('Sen: '+str(sentence_index)+ '\\t'+GL+'\\t' + PL +'\\t'+str(Score)+'\\t' +W +'\\n')\n            sentence_index += 1\n            sentence_word = []\n            senGold_Lab = []\n            senPre_Lab = []\n            score = []\n            i = 0\n            j = 1\n            ifAcc = 1\n        else:\n            e = e.split()#按空格切分\n            word = e[0]#词\n            gold_label = e[1]#正确标签\n            pred_label = e[2]#预测标签\n            sentence_word.append(word)\n            senGold_Lab.append(gold_label)\n            senPre_Lab.append(pred_label)\n            score = Score_Seq\n            if gold_label != pred_label:\n                ifAcc = 0\n    Acc = acc_num/len(datas)\n    print(len(datas))\n    if isTest:\n        f.close()\n    return Acc,P,R,MicroF\n\n\n\ndef evaluating(model, datas, best_Acc,best_precision, best_recall, best_F1, is_Test,flag, epoch):\n    prediction = []\n    Score_Seq = []\n    prelabel_seq = []\n    str_words = []\n    ground_truth_id = []\n    save = False\n    new_F = 0.0\n    global max_epoch\n    for data in datas:\n        ground_truth_id = data['tags']\n        words = data['str_words']\n        dwords = Variable(torch.LongTensor(data['words']))\n\n        if use_gpu:\n            val, out,score_out = model(dwords.cuda())\n#, dconNode.cuda(), ddepNode.cuda(), dsemroles.cuda())#score , tag_sequence\n        else:\n            val, out,score_out = model(dwords)\n#, dconNode, ddepNode, dsemroles)\n        predicted_id = out\n        Score_Seq.extend(score_out)\n        #print(score_out)\n        #print(out)\n        for (word, true_id, pred_id) in zip(words, ground_truth_id, predicted_id):\n            line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])\n            #print(id_to_tag[pred_id])\n            prediction.append(line)\n           # confusion_matrix[true_id, pred_id,s_id] += 1\n        prediction.append('')\n    Acc,P,R,MicroF = Score_eval(prediction,datas, Score_Seq,is_Test,flag)\n    new_Acc = Acc\n    new_MicroF = MicroF\n    new_P = P\n    new_R = R\n    if new_Acc > best_Acc:\n        best_Acc = new_Acc\n        best_F1 = new_MicroF\n        best_precision = new_P\n        best_recall = new_R\n        save = True\n        if is_Test:\n            max_epoch = epoch\n\n    return best_Acc, new_Acc,best_recall,new_R,best_precision,new_P,best_F1,new_MicroF, save\n\nmodel.train(True)\nfor epoch in range(1, 70):\n    for i, index in enumerate(np.random.permutation(len(train_data))):\n        tr = time.time()\n        count += 1\n        data = train_data[index]\n\t#print(data)\n        model.zero_grad()\n\n        sentence_in = data['words']\n        sentence_in = Variable(torch.LongTensor(sentence_in))\n        tags = data['tags']\n\n        targets = torch.LongTensor(tags)\n\n        if use_gpu:\n            neg_log_likelihood = model.neg_log_likelihood(sentence_in.cuda(), targets.cuda())\n#, conNode.cuda(), depNode.cuda(), semroles.cuda())\n        else:\n            neg_log_likelihood = model.neg_log_likelihood(sentence_in, targets)\n#, conNode, depNode, semroles)\n        #print(neg_log_likelihood.data[0])\n        loss += neg_log_likelihood.data[0] / len(data['words'])\n        #print(loss)\n        neg_log_likelihood.backward()\n        torch.nn.utils.clip_grad_norm(model.parameters(), 5.0)\n        optimizer.step()\n\n        if count % plot_every == 0:\n            loss /= plot_every\n            print(count, ': ', loss)\n            if losses == []:\n                losses.append(loss)\n            losses.append(loss)\n            loss = 0.0\n        if count % 50 == 0:\n            model.train(False)\n            #best_dev_Acc, new_dev_Acc, save = evaluating(model, dev_data, best_dev_Acc, False, epoch)\n            #print('develop PCS: ' + str(new_dev_Acc))\n            best_dev_Acc, new_dev_Acc,best_dev_precision,new_dev_P,best_dev_recall,new_dev_R,best_dev_F1,new_dev_MicroF, save = evaluating(model, dev_data, best_dev_Acc, best_dev_precision, best_dev_recall, best_dev_F1,False,False, epoch)\n            print('develop Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t'%(new_dev_Acc,new_dev_P,new_dev_R,new_dev_MicroF) )\n            if save:\n               torch.save(model, model_name)\n            #best_test_Acc, new_test_Acc, _ = evaluating(model, test_data, best_test_Acc, True, epoch)\n            #print('test PCS: ' + str(new_test_Acc))\n            best_test_Acc, new_test_Acc,best_test_precision,new_test_P,best_test_recall,new_test_R,best_test_F1,new_test_MicroF, _ = evaluating(model, test_data, best_test_Acc,best_test_precision, best_test_recall, best_test_F1, True,True, epoch)\n            print('test Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t'% (new_test_Acc,new_test_P,new_test_R,new_test_MicroF) )\n\n            if count % 500 == 0:\n               print('epoch: ' + str(max_epoch) + '  Best test PCS: ' + str(best_test_Acc))\n               print('Bestdevelop Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t ' %(best_dev_Acc,best_dev_precision,best_dev_recall,best_dev_F1))\n               print('epoch: ' + str(max_epoch) + '  Best test Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t ' %(best_test_Acc,best_test_precision,\n                                                                         best_test_recall, best_test_F1))\n            sys.stdout.flush()\n            model.train(True)\n\n        if count % len(train_data) == 0:\n            adjust_learning_rate(optimizer, lr=learning_rate/(1+0.05*count/len(train_data)))\n\nprint('epoch: ' + str(max_epoch) + '  All Best develop Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t '%(best_dev_Acc,best_dev_precision,best_dev_recall,best_dev_F1))\nprint('epoch: ' + str(max_epoch) + '  All Best test Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t '%(best_test_Acc,best_test_precision,\n                                                                         best_test_recall, best_test_F1))\nprint(time.time() - t)\nfp = open(\"result.txt\",'w')\n\nfp.write('epoch: ' + str(max_epoch) + '  All Best develop Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t '%(best_dev_Acc,best_dev_precision,best_dev_recall,best_dev_F1)+'\\n')\nfp.write('epoch: ' + str(max_epoch) + '  All Best test Acc=%.4f\\tP=%.4f\\tR=%.4f\\tF=%.4f\\t' %(best_test_Acc,best_test_precision,\n                                                                         best_test_recall, best_test_F1))\nfp.close()\n\n\n\n#plt.plot(losses)\n#plt.show()\n","sub_path":"train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":18725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"625824053","text":"# Read the input\na, b, c = map(int, input().split())\n\n\n# Solve the problem\naverage = (a + b + c) / 3\n\na = int(abs(a - average))\nb = int(abs(b - average))\nc = int(abs(c - average))\n# Output the result\nprint(a, b, c)","sub_path":"Morning Problems/balance/soln/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"518589094","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom timeit import default_timer as timer\r\nfrom math import radians, cos, sin, asin, sqrt\r\nimport random\r\nimport math\r\n\r\n\r\n\r\n\r\n### gifts.csv is the original dataset containing 100k destinations and I am going\r\n### to random sample from it in order to create smaller ones\r\n\r\ngifts = pd.read_csv('gifts.csv')\r\n\r\ngifts1 = gifts.sample (n=10,random_state=1)      ### 10 - gifts dataset\r\ngifts2 = gifts.sample (n=100,random_state=2)     ### 100 - gifts dataset\r\ngifts3 = gifts.sample (n=1000,random_state=3)    ### 1000 - gifts dataset\r\n\r\n### set the gifts' Ids from 1 to N (size of the problem) for every dataset\r\n\r\ngifts1.index = pd.Series(np.arange(1,gifts1.shape[0]+1))\r\ngifts2.index = pd.Series(np.arange(1,gifts2.shape[0]+1))\r\ngifts3.index = pd.Series(np.arange(1,gifts3.shape[0]+1))\r\n\r\ngifts1.iloc[:,0] = pd.Series(np.arange(0,gifts1.shape[0]+1))\r\ngifts2.iloc[:,0] = pd.Series(np.arange(0,gifts2.shape[0]+1))\r\ngifts3.iloc[:,0] = pd.Series(np.arange(0,gifts3.shape[0]+1))\r\n\r\n\r\n\r\ngifts1.to_csv('gifts1.csv',encoding='utf-8')\r\ngifts2.to_csv('gifts2.csv',encoding='utf-8')\r\ngifts3.to_csv('gifts3.csv',encoding='utf-8')\r\n\r\n\r\n\r\n\r\n### implement the haversine distance function that calculates the distance between\r\n### two destinations given their longitude and latitude\r\n\r\ndef haversine(lat1, lon1, lat2, lon2):\r\n    \"\"\" Calculate the great-circle distance between two points on the Earth surface.\r\n    Takes 4 numbers, containing the latitude and longitude of each point in decimal degrees.\r\n\r\n    The default returned unit is kilometers.\r\n    \"\"\"\r\n    # mean earth radius - https://en.wikipedia.org/wiki/Earth_radius#Mean_radius\r\n    avg_earth_radius = 6371.0 # 6371.0088\r\n\r\n    # convert all latitudes/longitudes from decimal degrees to radians\r\n    lat1, lon1, lat2, lon2 = map(radians, (lat1, lon1, lat2, lon2))\r\n\r\n    # calculate haversine\r\n    dlon = lon2 - lon1\r\n    dlat = lat2 - lat1\r\n    d = sin(dlat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(dlon * 0.5) ** 2\r\n    c = 2.0 * avg_earth_radius\r\n    return c  * asin(sqrt(d))\r\n\r\n\r\n\r\n\r\n\r\n\r\n####  create a function that gives us a tabular matrix, containing all the pairwise \r\n####  distances between our datasets' deliveries as well as the distances from\r\n####  and to North Pole, meaning Matrix[0,1] is the distance from North Pole to\r\n####  the first gift and Matrix[1,0] is again the distance between  the first \r\n####  gift and North Pole\r\n\r\ndef calculate_distance(gifts):\r\n    \r\n    \r\n      lats = np.array(gifts.iloc[:,1])   ### column 1 contains the latitudes of our gifts\r\n      longs = np.array(gifts.iloc[:,2])  ### column 2 contains the longitudes of our gifts\r\n      \r\n      lats = np.insert(lats,0,90)        ### we include the North Pole's longitude \r\n      longs = np.insert(longs,0,0)       ### and latitude in the distances matrix\r\n      \r\n                 \r\n      trip_distances = np.zeros(shape=(len(lats),len(longs)))\r\n      \r\n  ### we calculate every pairwise distance using the haversine function\r\n      \r\n      for i in range(len(lats)):\r\n           for j in range(len(longs)):\r\n               trip_distances[i,j]=haversine(lats[i],longs[i],lats[j],longs[j])\r\n        \r\n      return trip_distances\r\n\r\n\r\n### create the distance matrices for each of our problem instances\r\n\r\ndistances_problem_1 = calculate_distance(gifts1)\r\ndistances_problem_2 = calculate_distance(gifts2)\r\ndistances_problem_3 = calculate_distance(gifts3)\r\n\r\n\r\n\r\n\r\n### objective function : this function computes the total weariness of all needed trips\r\n\r\ndef weighted_reindeer_weariness(gifts):      \r\n    \r\n  \r\n    if (gifts.shape == gifts1.shape):        ### we define the distances matrix that\r\n        x = distances_problem_1              ### we are going to use according to\r\n    elif (gifts.shape == gifts2.shape):      ### the problem size\r\n        x = distances_problem_2\r\n    else:\r\n        x = distances_problem_3\r\n        \r\n    \r\n    gifts = gifts.reindex(np.random.permutation(gifts.index))     ### we perform a random permutation of the gifts\r\n    gifts = gifts.values\r\n   \r\n    dist_total = 0.0\r\n    weight = np.sum(gifts[:,3])             ### the total weight of gifts that need to\r\n                                            ### be delivered\r\n    \r\n    while  (weight > 0.001):                ### by checking the total weight we ensure that all gifts are delivered\r\n        extracted_gifts = None              ### python cannot recognise absolut 0 because of the many decimal points                     \r\n        extracted_gifts = np.compress(np.cumsum(gifts[:,3])<=330.0,gifts,axis=0)\r\n                                            ### we define the maximum weight limit for each trip as 330kg\r\n                                            ### as well as a subset of the initial dataset that is going to be delivered\r\n        sleigh_weight = 10                     \r\n        dist = 0.0\r\n        \r\n        \r\n        prev_weight = np.sum(extracted_gifts[:,3]) + sleigh_weight\r\n    \r\n        \r\n        dist += x[0,extracted_gifts[0,0].astype(int)]*prev_weight \r\n        prev_weight -= extracted_gifts[0,3]\r\n        \r\n        for delivery in range(len(extracted_gifts[:,0])-1):\r\n            \r\n        \r\n        \r\n           dist +=  x[extracted_gifts[delivery,0].astype(int),extracted_gifts[delivery+1,0].astype(int)]* prev_weight\r\n           prev_weight -= extracted_gifts[delivery+1,3]\r\n        \r\n    \r\n        \r\n        dist +=  x[extracted_gifts[-1,0].astype(int),0]* sleigh_weight\r\n\r\n        gifts = np.delete(gifts,np.s_[:len(extracted_gifts)],0)   ### we delete the subset of the\r\n        weight -= np.sum(extracted_gifts[:,3])                    ### catalogue that has been delivered\r\n        \r\n        dist_total += dist\r\n     \r\n        \r\n    \r\n    return dist_total\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n### this is another variant of the objective function according to the metaheuristic \r\n### that is going to be used\r\n\r\ndef weighted_reindeer_weariness_2(gifts):    ### we use a different function in order\r\n                                             ### to estimate total weariness because now\r\n                                             ### we need to obtain the initial sequence of deliveries\r\n                                             ### as well as the total number of trips. We are going to\r\n    if (gifts.shape == gifts1.shape):        ### use this function at the beggining in order to \r\n        x = distances_problem_1              ### to obtain our initial solution\r\n    elif (gifts.shape == gifts2.shape):      \r\n        x = distances_problem_2\r\n    else:\r\n        x = distances_problem_3\r\n        \r\n    counter_trips = np.array([1])                    ### counter of trips\r\n    wght = np.array([0],dtype=float)                 ### sleigh's weight per trip\r\n    trip = np.empty(shape=[0],dtype=int)             ### an array containing every gift's TripId !!!!!\r\n   \r\n    \r\n    gifts = gifts.values                             \r\n    for i in range(1,len(gifts)+1):\r\n        \r\n        if (wght + gifts[i-1,3]) <= 330.0:           ### we ensure that our weight limit is not exceeded\r\n            trip = np.append(trip,counter_trips)     ### the fourth column is the weight column\r\n            wght += gifts[i-1,3]                     ### the gift is loaded in the sleigh\r\n            \r\n        else:                                        ### the sleigh is full and we need to define a new trip\r\n            \r\n            wght = np.array([0],dtype=float)\r\n            counter_trips += 1\r\n            trip = np.append(trip,counter_trips)\r\n            wght += gifts[i-1,3]   \r\n    \r\n    gifts = np.insert(gifts,4,trip,axis=1)           ### insert column with trip IDs !!!!!!\r\n  \r\n    \r\n  \r\n    distol = 0.0\r\n    for t in range(1,np.max(gifts[:,4].astype(int))+1):              ### for the total of trips\r\n        gifts_new = np.empty(shape=[np.count_nonzero(trip == t),5])  ### how many gifts in the t trip\r\n        gifts_new = np.array(gifts[gifts[:,4]== t])                  ### gifts_new is a submatrix for every trip\r\n        sleigh_weight = 10\r\n        dist = 0.0\r\n    \r\n        prev_weight = np.sum(gifts_new[:,3]) + sleigh_weight\r\n    \r\n    \r\n        dist += x[0,gifts_new[0,0].astype(int)]*prev_weight \r\n        prev_weight -= gifts_new[0,3]\r\n    \r\n        for delivery in range(len(gifts_new[:,0])-1):\r\n    \r\n        \r\n           dist +=  x[gifts_new[delivery,0].astype(int),gifts_new[delivery+1,0].astype(int)]* prev_weight\r\n           prev_weight -= gifts_new[delivery+1,3]\r\n        \r\n    \r\n    \r\n        dist +=  x[gifts_new[-1,0].astype(int),0]* sleigh_weight\r\n        \r\n        distol += dist\r\n        \r\n    return int(distol),gifts               ### returns total weariness as well as\r\n                                           ### the dataset as 2D numpy array\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n                                                     ### we will use this function in order to obtain\r\n                                                     ### our 1000xN solution evaluations\r\ndef  weighted_reindeer_weariness_3(gifts,x):         ### the input arguments of this function is the gifts dataset - \r\n                                                     ### after the swaps as a 2D np array as well as the distances matrix\r\n                                                         \r\n    trip = gifts[:,4].astype(int)                    ### the fifth column contains the TripIds     \r\n    distol = 0.0\r\n    \r\n    for t in range(1,np.max(trip)+1):                                 \r\n        gifts_new = np.empty(shape=[np.count_nonzero(trip == t),5])   \r\n        gifts_new = np.array(gifts[gifts[:,4]== t])                   \r\n        sleigh_weight = 10\r\n        dist = 0.0\r\n    \r\n        prev_weight = np.sum(gifts_new[:,3]) + sleigh_weight\r\n    \r\n    \r\n        dist += x[0,gifts_new[0,0].astype(int)]*prev_weight \r\n        prev_weight -= gifts_new[0,3]\r\n    \r\n        for delivery in range(len(gifts_new[:,0])-1):\r\n    \r\n        \r\n           dist +=  x[gifts_new[delivery,0].astype(int),gifts_new[delivery+1,0].astype(int)]* prev_weight\r\n           prev_weight -= gifts_new[delivery+1,3]\r\n        \r\n    \r\n    \r\n        dist +=  x[gifts_new[-1,0].astype(int),0]* sleigh_weight\r\n        \r\n        distol += dist\r\n        \r\n    return int(distol)\r\n\r\n","sub_path":"Fundamental_Calculation_Functions.py","file_name":"Fundamental_Calculation_Functions.py","file_ext":"py","file_size_in_byte":10274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"88896563","text":"\r\nf1 = lambda x,y,z: (30-y+5*z)/20\r\nf2 = lambda x,y,z: (-18-4*x+z)/20\r\nf3 = lambda x,y,z: (25-1*x+7*y)/20\r\n\r\nx0 = 0\r\ny0 = 0\r\nz0 = 0\r\ncount = 1\r\n\r\ne = float(input('Error tolerable: '))\r\n\r\n\r\nprint('\\nCount\\tx\\ty\\tz\\n')\r\n\r\ncondition = True\r\n\r\nwhile condition:\r\n    x1 = f1(x0,y0,z0)\r\n    y1 = f2(x0,y0,z0)\r\n    z1 = f3(x0,y0,z0)\r\n    print('%d\\t%0.4f\\t%0.4f\\t%0.4f\\n' %(count, x1,y1,z1))\r\n    e1 = abs(x0-x1);\r\n    e2 = abs(y0-y1);\r\n    e3 = abs(z0-z1);\r\n    \r\n    count += 1\r\n    x0 = x1\r\n    y0 = y1\r\n    z0 = z1\r\n    \r\n    condition = e1>e and e2>e and e3>e\r\n\r\nprint('\\nSolucion: x=%0.3f, y=%0.3f and z = %0.3f\\n'% (x1,y1,z1))","sub_path":"GabrielValenciaArana/jacobi.py","file_name":"jacobi.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"287554462","text":"#!/usr/bin/env python\n\n\"\"\"\nBase class for converters\n\"\"\"\n\nfrom typing import Union\nfrom slyr.parser.symbol_parser import (\n    Symbol,\n    SymbolLayer,\n    FillSymbol,\n    FillSymbolLayer,\n    LineSymbol,\n    LineSymbolLayer,\n    MarkerSymbol,\n    MarkerSymbolLayer\n)\n\n\nclass NotImplementedException(Exception):\n    \"\"\"\n    Raised when a symbol type or symbol layer type is not yet implemented in the converter\n    \"\"\"\n    pass\n\n\nclass Converter:\n    \"\"\"\n    Base class for symbol Converters\n    \"\"\"\n\n    def convert_symbol(self, symbol: Symbol):\n        \"\"\"\n        Converts a symbol.\n        :param symbol: symbol to convert\n        \"\"\"\n        if issubclass(symbol.__class__, (FillSymbolLayer, FillSymbol)):\n            self.convert_fill_symbol(symbol)\n        elif issubclass(symbol.__class__, (LineSymbolLayer, LineSymbol)):\n            self.convert_line_symbol(symbol)\n        elif issubclass(symbol.__class__, (MarkerSymbolLayer, MarkerSymbol)):\n            self.convert_marker_symbol(symbol)\n        else:\n            raise NotImplementedException(str(symbol.__class__))\n\n    def convert_fill_symbol(self, symbol: Union[SymbolLayer, FillSymbol]):  # pylint: disable=unused-argument\n        \"\"\"\n        Converts a FillSymbol\n        \"\"\"\n        raise NotImplementedException('Fill symbol conversion not implemented')\n\n    def convert_line_symbol(self, symbol: Union[SymbolLayer, LineSymbol]):  # pylint: disable=unused-argument\n        \"\"\"\n        Converts a LineSymbol\n        \"\"\"\n        raise NotImplementedException('Line symbol conversion not implemented')\n\n    def convert_marker_symbol(self, symbol: Union[SymbolLayer, MarkerSymbol]):  # pylint: disable=unused-argument\n        \"\"\"\n        Converts a MarkerSymbol\n        \"\"\"\n        raise NotImplementedException('Marker symbol conversion not implemented')\n","sub_path":"slyr/converters/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"281831985","text":"from django.core.management.base import BaseCommand, CommandError\n\nfrom reservation_system.models import PlatformSettings\n\nclass Command(BaseCommand):\n    help = \"Inserts into the DB the default settings\"\n\n    def handle(self, *args, **options):\n        self.stdout.write(\"Inserting into the DB the default settings\")\n\n        self.stdout.write(\"Inserting default os setting (settingId: default_os)\")\n        defaultOs = PlatformSettings(settingId=\"default_os\",\n                                    value=\"ubuntu/xenial:xenial:amd64/ga-16.04\",\n                                    note=\"name_full:distro_series:hwe_kernel\")\n        defaultOs.save()\n\n        self.stdout.write(\"Done\")\n","sub_path":"maas_reservation_system/reservation_system/management/commands/setupDefaultSettings.py","file_name":"setupDefaultSettings.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"600003241","text":"from flask import Flask, render_template, request\r\nimport numpy as np\r\nimport os\r\nfrom newspaper import fulltext\r\nimport requests\r\nfrom summarizer import Summarizer,TransformerSummarizer\r\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config\r\nimport torch\r\n\r\napp = Flask(__name__)\r\n@app.route(\"/\", methods=['GET'])\r\ndef home():\r\n    options = ['Bert','XLNET','GPT-2','T5-Transformer']\r\n    return render_template('index.html',options=options)\r\n\r\n@app.route(\"/predict\", methods=['GET', 'POST'])\r\ndef predict():\r\n    if request.method == 'POST':\r\n        article_url = request.form['url']\r\n        model_name = request.form['options']\r\n        article = fulltext(requests.get(article_url).text)\r\n        if model_name=='Bert':\r\n            model = Summarizer()\r\n            summary = ''.join(model(article, min_length=60))\r\n        elif model_name=='XLNET':\r\n            model = TransformerSummarizer(transformer_type=\"XLNet\",transformer_model_key=\"xlnet-base-cased\")\r\n            summary = ''.join(model(article, min_length=60))\r\n        elif model_name=='GPT-2':\r\n            model = TransformerSummarizer(transformer_type=\"GPT2\",transformer_model_key=\"gpt2-medium\")\r\n            summary = ''.join(model(article, min_length=60))\r\n        else:\r\n            model = T5ForConditionalGeneration.from_pretrained('t5-small')\r\n            tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n            device = torch.device('cpu')\r\n            preprocess_text = article.strip().replace(\"\\n\",\"\")\r\n            t5_prepared_Text = \"summarize: \"+preprocess_text\r\n\r\n            tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors=\"pt\").to(device)\r\n    \r\n            summary_ids = model.generate(tokenized_text,\r\n                                    num_beams=4,\r\n                                    no_repeat_ngram_size=2,\r\n                                    min_length=500,\r\n                                    max_length=2000,\r\n                                    early_stopping=True)\r\n\r\n            summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)\r\n\r\n        f = open(\"Web-app/templates/summary.txt\", \"w\")\r\n        f.write(summary)\r\n        f.close()\r\n        return render_template('sec.html', pred_output=summary)#, user_image=\"../static/user uploaded/\"+filename)\r\n\r\nif __name__ == \"__main__\":\r\n    app.run(threaded=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"149826927","text":"from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7\nimport time\n\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.GPUSolversApplication import *\n\nspace_utils = UblasSparseSpace()\n\npA = space_utils.CreateEmptyMatrixPointer()\npb = space_utils.CreateEmptyVectorPointer()\n\nA = (pA).GetReference()\nb = (pb).GetReference()\n\n\nmatrix_filename = \"mat8.mm\"\n# matrix_filename = \"A_0.01005_.mm\"\n# matrix_filename = \"toTest/mat80.mm\"\n\nReadMatrixMarketMatrix(matrix_filename, A)\nprint(\"finished reading matrix\")\n\nvector_filename = \"vecb8.mm\"\n# vector_filename = \"b_0.01005_.mm\"\n# vector_filename = \"toTest/vecb80.mm\"\n\nReadMatrixMarketVector(vector_filename, b)\nprint(\"finished reading vector\")\n\nprint(\"reading finished\")\n\npreSweeps = Vector(5)\npostSweeps = Vector(5)\n\npreSweeps[0] = 1\npostSweeps[0] = 1\npreSweeps[1] = postSweeps[1] = 1\npreSweeps[2] = postSweeps[2] = 1\npreSweeps[3] = postSweeps[3] = 1\npreSweeps[4] = postSweeps[4] = 1\n\nW = 4.0 / 3.0\n\n# TESTING IF CG AND BICGSTAB CAN ITERATE INDEFINITIVELY\nx = Vector(len(b))\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"AMG standalone\")\nt1 = time.time()\nlinear_solver2 = AMGSolver(1e-9, 5000, W, 2, False, 5, 100, preSweeps, postSweeps)\nlinear_solver2.Solve(A, x, b)\nprint(linear_solver2)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n\nexit(0)\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"GPU CG + SA-AMG\")\nt1 = time.time()\nprecond1 = KratosAMGPreconditioner(W, 2, True, 5, 1000, preSweeps, postSweeps, True)\nlinear_solver1 = GPUCGSolver(1e-9, 5000, precond1)\nlinear_solver1.Solve(A, x, b)\nprint(linear_solver1)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n# linear_solver=0\n\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"GPU CG + diag precond\")\nt1 = time.time()\nprecond2 = GPUDiagonalPreconditioner()\nlinear_solver2 = GPUCGSolver(1e-9, 5000, precond2)\nlinear_solver2.Solve(A, x, b)\nprint(linear_solver2)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n# linear_solver=0\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"GPU CG\")\nt1 = time.time()\nlinear_solver2 = GPUCGSolver(1e-9, 5000)\nlinear_solver2.Solve(A, x, b)\nprint(linear_solver2)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n# linear_solver=0\n\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"CPU CG + diag precond\")\nt1 = time.time()\nprecond3 = DiagonalPreconditioner()\nlinear_solver3 = CGSolver(1e-9, 5000, precond3)\nlinear_solver3.Solve(A, x, b)\nprint(linear_solver3)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n# linear_solver3=0\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"GPU BICG + diag precond\")\nt1 = time.time()\nprecond2 = GPUDiagonalPreconditioner()\nlinear_solver2 = GPUBICGSTABSolver(1e-9, 5000, precond2)\nlinear_solver2.Solve(A, x, b)\nprint(linear_solver2)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n# linear_solver=0\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"GPU BICG\")\nt1 = time.time()\nlinear_solver2 = GPUBICGSTABSolver(1e-9, 5000)\nlinear_solver2.Solve(A, x, b)\nprint(linear_solver2)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n# linear_solver=0\n\nspace_utils.SetToZeroVector(x)\n\nprint(\"CPU BICG + diag precond\")\nt1 = time.time()\nprecond2 = DiagonalPreconditioner()\nlinear_solver2 = BICGSTABSolver(1e-9, 5000, precond2)\nlinear_solver2.Solve(A, x, b)\nprint(linear_solver2)\nprint(\"\\n\\n\")\nt2 = time.time()\nprint('Solve time: %0.3f ms' % ((t2 - t1) * 1000.0))\n# linear_solver=0\n\n\n# linear_solver=0\n\nexit(0)\n","sub_path":"kratos/applications/gpu_solvers_application/test_examples/LinearSolversTesting/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"11750688","text":"'''\nThis is a rock , paper,scissors game where two players play and one wins\nIn this game, one of the below happens\nRock beats scissors\nScissors beat paper\nPaper beats Rock\n'''\nimport random\nGame_list = ['Rock', 'scissors', 'paper']\nplayer1_score = 0\nplayer2_score = 0\nround_num = 1\n#Game_On = True\nMAX_POINTS = int(input('for how many points, you guys want to play the Game: '))\n\n'''function to deide the winner'''\ndef game_func(player1, player2):\n    if player1 == player2:\n\t    return \"Tie Match\"\n    elif player1 == 'Rock' and player2 == 'scissors':\n\t    return 'Player1 wins'\n    elif player1 == 'Rock' and player2 == 'paper':\n        return 'Player2 wins'\n    elif player1 == 'scissors' and player2 == 'Rock':\n        return 'Player2 wins'\n    elif player1 == 'scissors' and player2 == 'paper':\n        return 'Player1 wins'\n    elif player1 == 'paper' and player2 == 'Rock':\n        return 'Player1 wins'\n    elif player1 == 'paper' and player2 == 'scissors':\n        return 'Player2 wins'\n\nwhile player1_score < MAX_POINTS and player2_score < MAX_POINTS:\n\trandom.shuffle(Game_list)\n\tprint(f' Round Number {round_num} : ')\n\tchoice1 = int(input('Player1, please choose b/w (0,1,2): '))\n\tchoice2 = int(input('Player2, please choose b/w (0,1,2): '))\n\tplayer1 = Game_list[choice1]\n\tprint(f'player1 value is: {player1}')\n\tplayer2 = Game_list[choice2]\n\tprint(f'player2 value is: {player2}')\n\t'''\n\tCall the game function defined above\n\t'''\n\t#print('Executing the game function')\n\t\n\tresult = game_func(player1, player2)\n\tprint(result)\n\tif result == 'Player1 wins':\n\t\tplayer1_score += 1\n\telif result == 'Player2 wins':\n\t\tplayer2_score += 1\n\tround_num += 1\n\tprint(f'Player1 score is : {player1_score}')\n\tprint(f'Player2 score is : {player2_score}')\n\tprint('\\n')\n\n\"\"\"\nBased on the final score deciding the winner of the Game\n\"\"\"\nif player1_score > player2_score:\n\tprint('Final Winner is PLAYER-1')\nelse:\n\tprint('Final Winner is PLAYER-2')\n\n\t\n\n\t#ANSWER = input('do you want to play again: ')\n\t#if ANSWER not in ['YES', 'Yes', 'yes', 'Y', 'y']:\n\t#\tGame_On = False\n\n\n\n\n\n\n\t\n","sub_path":"Rock_paper_scisor.py","file_name":"Rock_paper_scisor.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"135083179","text":"#Time Complexity : O(N) where N is number of elemeents\r\n#Space Complexity :O(1)\r\nclass Solution:\r\n    def canJump(self, nums: List[int]) -> bool:\r\n        if (nums == None or len(nums) < 2):\r\n            return True\r\n        destination = len(nums) - 1\r\n        for i in range(destination - 1, -1, -1):\r\n            if (i + nums[i] >= destination):\r\n                destination = i     \r\n        return (destination == 0) ","sub_path":"jump1.py","file_name":"jump1.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"121091808","text":"from Crypto.Random import get_random_bytes\nfrom Crypto.Cipher import DES, AES\nfrom Crypto.Util.Padding import pad,unpad\nfrom Crypto.Util import Counter\n\nclass AES_CIPHER:\n\n    BLOCK_SIZE_AES = 16 # Bloque de 128 bits\n\n    def __init__(self,key):\n        \"\"\"Inicializa las variables locales\"\"\"\n        self.key = key\n\n   # Cifrado AES con los distintos modos de operación\n\n    def cifrarECB(self, cadena, IV):\n         cipherECB = AES.new(self.key, AES.MODE_ECB)\n         ciphertextECB = cipherECB.encrypt(pad(cadena.encode(\"utf-8\"),self.BLOCK_SIZE_AES))\n         print(\"Texto cifrado en modo ECB: \")\n         print(ciphertextECB)\n         return ciphertextECB\n\n    def cifrarCTR(self, cadena, IV):\n        ctr = Counter.new(128) #Bloques de 128 bits\n        cipherCTR = AES.new(self.key, AES.MODE_CTR, counter = ctr)\n        ciphertextCTR = cipherCTR.encrypt(pad(cadena.encode(\"utf-8\"),self.BLOCK_SIZE_AES))\n        print(\"Texto cifrado en modo CTR: \")\n        print(ciphertextCTR)\n        return ciphertextCTR\n\n    def cifrarOFB(self, cadena, IV):\n        cipherOFB = AES.new(self.key, AES.MODE_OFB, IV)\n        ciphertextOFB = cipherOFB.encrypt(pad(cadena.encode(\"utf-8\"),self.BLOCK_SIZE_AES))\n        print(\"Texto cifrado en modo OFB: \")\n        print(ciphertextOFB)\n        return ciphertextOFB\n\n    def cifrarCFB(self, cadena, IV):\n        cipherCFB = AES.new(self.key, AES.MODE_CFB, IV)\n        ciphertextCFB = cipherCFB.encrypt(pad(cadena.encode(\"utf-8\"),self.BLOCK_SIZE_AES))\n        print(\"Texto cifrado en modo CFB: \")\n        print(ciphertextCFB)\n        return ciphertextCFB\n\n    # Descifrado AES con los distintos modos de operación\n\n    def descifrarECB(self, cifrado, IV):\n        decipher_aesECB = AES.new(key, AES.MODE_ECB)\n        new_dataECB = unpad(decipher_aesECB.decrypt(cifrado), self.BLOCK_SIZE_AES).decode(\"utf-8\", \"ignore\")\n        print(\"Texto descifrado en modo ECB: \")\n        print(new_dataECB)\n        return new_dataECB\n\n    def descifrarCTR(self, cifrado, IV):\n        ctr = Counter.new(128) # Bloques de 128 bits\n        decipher_aesCTR = AES.new(key, AES.MODE_CTR, counter = ctr)\n        new_dataCTR = unpad(decipher_aesCTR.decrypt(cifrado), self.BLOCK_SIZE_AES).decode(\"utf-8\", \"ignore\")\n        print(\"Texto descifrado en modo CTR: \")\n        print(new_dataCTR)\n        return new_dataCTR\n\n    def descifrarOFB(self, cifrado, IV):\n        decipher_aesOFB = AES.new(key, AES.MODE_OFB, IV)\n        new_dataOFB = unpad(decipher_aesOFB.decrypt(cifrado), self.BLOCK_SIZE_AES).decode(\"utf-8\", \"ignore\")\n        print(\"Texto descifrado en modo OFB: \")\n        print(new_dataOFB)\n        return new_dataOFB\n\n    def descifrarCFB(self, cifrado, IV):\n        decipher_aesCFB = AES.new(key, AES.MODE_CFB, IV)\n        new_dataCFB= unpad(decipher_aesCFB.decrypt(cifrado), self.BLOCK_SIZE_AES).decode(\"utf-8\", \"ignore\")\n        print(\"Texto descifrado en modo CFB: \")\n        print(new_dataCFB)\n        return new_dataCFB\n\n\n# Datos necesarios\nkey = get_random_bytes(16) # Clave aleatoria de 128, 192 o 256 bits (16, 24 o 32 bytes)\nIV = get_random_bytes(16)  # IV aleatorio de 128 bits\ndata = \"Hola Amigos de Seguridad\" # Datos a cifrar\nprint(data)\na = AES_CIPHER(key)\ncifradoECB = a.cifrarECB(data,IV)\ncifradoCTR = a.cifrarCTR(data,IV)\ncifradoOFB = a.cifrarOFB(data,IV)\ncifradoCFB = a.cifrarCFB(data,IV)\ndescifradoECB = a.descifrarECB(cifradoECB, IV)\ndescifradoCTR = a.descifrarCTR(cifradoCTR, IV)\ndescifradoOFB = a.descifrarOFB(cifradoOFB, IV)\ndescifradoCFB = a.descifrarCFB(cifradoCFB, IV)\n\n\n","sub_path":"Seguridad de la Información/Práctica 2 - Seguridad /Ej3.py","file_name":"Ej3.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"291194608","text":"from sqlalchemy import Column, String, Boolean, Integer, DateTime\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\n# declare a base for database\nBase = declarative_base()\n\n# declare engine\nengine = create_engine('\\\npostgres+psycopg2:///test_chema',  pool_use_lifo=True, pool_pre_ping=True)\n\nSession = sessionmaker(bind=engine)\n\n\nclass Server(Base):\n    __tablename__ = 'server'\n    name = Column(String, primary_key=True)\n    enabled = Column(Boolean)\n    info = relationship(\"ServerInfo\", back_populates=\"server\")\n    owners = relationship(\"ServerOwner\", back_populates=\"servers\")\n\n    def __repr__(self):\n        return 'Server' % (self.name, self.enabled)\n\n\nclass ServerInfo(Base):\n    __tablename__ = \"server_info\"\n    server_name = Column(String, ForeignKey('server.name'), primary_key=True)\n    ram = Column(Integer)\n    cpu = Column(Integer)\n    server = relationship(\"Server\", back_populates=\"info\")\n\n    def __repr__(self):\n        return \"ServerInfo\" % (self.server_name, self.ram, self.cpu)\n\n\nclass ServerOwner(Base):\n    __tablename__ = \"server_owner\"\n    id = Column(Integer, primary_key=True)\n    server_name = Column(String, ForeignKey('server.name'))\n    owner_name = Column(String)\n    servers = relationship(\"Server\", back_populates=\"owners\")\n\n    def __repr__(self):\n        return \"ServerOwner\" %(self.server_name, self.owner_name)\n\n\nclass Escalate(Base):\n    __tablename__ = \"escalate\"\n    id = Column(Integer, primary_key=True)\n    time = Column(DateTime)\n    reporter = Column(String)\n    type = Column(String)\n\n    def __repr__(self):\n        return \"Escalate\" % (self.time, self.reporter, self.type)\n\n\nclass ContactInfo(Base):\n    __tablename__ = 'contact_info'\n    id = Column(Integer, primary_key=True)\n    start_time = Column(DateTime, nullable=False)\n    end_time = Column(DateTime, nullable=False)\n    owner_name = Column(String, nullable=False)\n    tel = Column(String, nullable=False)\n\n    def __repr__(self):\n        return \"ContactInfo\" % (self.owner_name, self.tel, self.start_time, self.end_time)\n\n\nBase.metadata.create_all(engine)\n\n\n","sub_path":"db/connect_db.py","file_name":"connect_db.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"395546699","text":"from setuptools import setup, find_packages\n\n# Enforce python 3\nimport sys\nif sys.version_info < (3,0):\n    sys.exit('Sorry, Python < 3.0 is not supported')\n\n# Read requirements.txt as canonical dependencies\nwith open(\"requirements.txt\") as data:\n    install_requires = [line for line in filter(lambda x: x, data.read().split(\"\\n\"))]\n\n# To link in these files (for development) directly just run \"pip install -e .\"\nsetup(name=\"sourcerer\",\n    version=\"0.1\",\n    description=\"Source control for your source controlled folders\",\n    author=\"Steve Armstrong\",\n    author_email=\"steve@horsefire.com\",\n    url=\"https://github.com/stevearm/sourcerer.git\",\n    licence=\"Apache Software License\",\n    packages=find_packages(),\n    install_requires=install_requires,\n    entry_points={\n        \"console_scripts\": [\n            \"srcr = sourcerer.cmd:main\",\n        ],\n    },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"283127230","text":"def task1(string):\n    return 'Input string \"{}\"\\nOutput string \"{}\"\\n' \\\n           '\"BC\" pairs count = {}, \"DE\" pairs count = {}'.format(\n            string, string.replace(',A', ','), string.count('BC'), string.count('DE'))\n\n\ndef task2(string):\n    words = string.split()\n    count = 0\n    for word in words:\n        if 'П' in word:\n            count += 1\n    return 'Input string \"{}\"\\nWords\\' with \"П\" count is equal to {}'.format(string, count)\n\n\ndef task3(string):\n    words = string.split()\n    rev_words = []\n    for word in words:\n        if word == word[-1:: -1]:\n            rev_words.append(word)\n    return 'Input string \"{}\"\\nChangeling words: {}'.format(string, rev_words)\n\n\ndef task4(string):\n    alpha_dict = dict()\n    for let in string:\n        if let.isalpha():\n            if let.upper() in alpha_dict:\n                alpha_dict[let.upper()] += 1\n            else:\n                alpha_dict[let.upper()] = 1\n    count_dict = dict()\n    for key, value in alpha_dict.items():\n        if value in count_dict:\n            count_dict[value].append(key)\n        else:\n            count_dict[value] = [key]\n    return 'Input string \"{}\"\\nThe most common letters: {}'.format(string, sorted(count_dict[max(count_dict.keys())]))\n\n\ndef find_max_len_words(string):\n    words = string.split()\n    words.sort(key=lambda word: len(word))\n    return sorted([word for word in words if len(word) == len(words[-1])])\n\n\ndef task5(string):\n    return 'Input string \"{}\"\\n' \\\n           'The biggest word \"{}\"'.format(string, find_max_len_words(string)[0])\n\n\ndef task6(string, a):\n    max_words = ' '.join(find_max_len_words(string))\n    return 'Input string \"{}\"\\n' \\\n           'The letter {} occurs {} times in words of maximum length'.format(string, a, max_words.count(a))\n\n\ntask7 = task5\n\n\ndef task8(string, first_word):\n    count = len(first_word)\n    word = first_word\n    while count < len(string):\n        new_word = ''\n        for let in word:\n            if let == 'A':\n                new_word += 'BAB'\n            else:\n                new_word += 'A'\n        count += len(new_word)\n        word = new_word\n    return 'Input string \"{}\"\\n' \\\n           'The last word of string \"{}\"'.format(string, word)\n","sub_path":"course2/term1/kpiyap/lab7/lab7.py","file_name":"lab7.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"150512362","text":"assignments = []\r\n\r\ndef assign_value(values, box, value):\r\n    \"\"\"\r\n    Please use this function to update your values dictionary!\r\n    Assigns a value to a given box. If it updates the board record it.\r\n    \"\"\"\r\n\r\n    # Don't waste memory appending actions that don't actually change any values\r\n    if values[box] == value:\r\n        return values\r\n\r\n    values[box] = value\r\n    if len(value) == 1:\r\n        assignments.append(values.copy())\r\n    return values\r\n\r\ndef solve(grid):\r\n    \"\"\"\r\n    Find the solution to a Sudoku grid.\r\n    Args:\r\n        grid(string): a string representing a sudoku grid.\r\n            Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\r\n    Returns:\r\n        The dictionary representation of the final sudoku grid. False if no solution exists.\r\n    \"\"\"\r\n    global rows\r\n    rows = 'ABCDEFGHI'\r\n    global cols\r\n    cols = '123456789'\r\n    global dan\r\n    dan = cross(rows,cols)\r\n    row_units = [cross(r, cols) for r in rows]\r\n    column_units = [cross(rows, c) for c in cols]\r\n    square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\r\n    diagonal_units = [['A1','B2','C3','D4','E5','F6','G7','H8','I9'],['A9','B8','C7','D6','E5','F4','G3','H2','I1']]\r\n    global unitlist\r\n    da = []\r\n    w = 0\r\n    for i in range(9):\r\n        if grid[w] != '.':\r\n            da.append(grid[w])\r\n        else:\r\n            da = da\r\n        w = w + 10\r\n\r\n\r\n\r\n    e = 0\r\n    for i in da:\r\n        e = 0\r\n        for p in da:\r\n            if p == i:\r\n                e = e +1\r\n            else:\r\n                e = e\r\n        if e == 1:\r\n            unitlist = row_units + column_units + square_units + diagonal_units\r\n           \r\n        else:\r\n            unitlist = row_units + column_units + square_units\r\n            break\r\n\r\n    db = []\r\n    v = 8\r\n    for i in range(9):\r\n        if grid[v] != '.':\r\n            db.append(grid[v])\r\n        else:\r\n            db = db\r\n        v = v + 8\r\n\r\n\r\n    g = 0\r\n    for i in db:\r\n        g = 0\r\n        for p in db:\r\n            \r\n            if p == i:\r\n                g = g +1\r\n            else:\r\n                g = g\r\n        if g == 1:\r\n            unitlist = row_units + column_units + square_units + diagonal_units\r\n\r\n        else:\r\n            unitlist = row_units + column_units + square_units\r\n            break\r\n\r\n    global units\r\n    units= dict((s, [u for u in unitlist if s in u]) for s in dan)\r\n    global peers\r\n    peers= dict((s, set(sum(units[s],[]))-set([s])) for s in dan)\r\n\r\n        \r\n    values = grid_values(dan, grid)\r\n    values = search(values)\r\n    #return values\r\n    print (display(values))\r\n\r\ndef cross(A, B):\r\n    return [s+t for s in A for t in B]\r\n    \"Cross product of elements in A and elements in B.\"\r\n\r\n    \r\ndef grid_values(dan , grid):\r\n    \"\"\"\r\n    Convert grid into a dict of {square: char} with '123456789' for empties.\r\n    Args:\r\n        grid(string) - A grid in string form.\r\n    Returns:\r\n        A grid in dictionary form\r\n            Keys: The boxes, e.g., 'A1'\r\n            Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\r\n    \"\"\"\r\n    a = []\r\n    for i in grid:\r\n        if i == '.':\r\n            a.append( '123456789')\r\n        else:\r\n            a.append(i)\r\n    assert len(grid) == 81, \"Input grid must be a string of length 81 (9x9)\"\r\n    return dict(zip(dan, a))\r\n    return values\r\n\r\n    pass\r\n\r\n\r\ndef eliminate(values):\r\n    '''eliminates digits from other boxes in a box's peers once the digit have been assigned to a box'''\r\n\r\n    p = []\r\n    for i in values:\r\n        if len(values[i]) == 1:\r\n            p.append(i)\r\n    o = ''\r\n    for i in p:\r\n        o = values[i]\r\n        for e in peers[i]:\r\n            #assign_value(values, e, values[e].replace(o,''))\r\n            values[e] = values[e].replace(o,'')\r\n    return values\r\ndef only_choice(values):\r\n    '''Assigns a digit to a particular box once it's determined it belongs to that box'''\r\n    for unit in unitlist:\r\n        for i in '123456789':\r\n            s = [e for e in unit if i in values[e]]\r\n            \r\n            if len(s) == 1:\r\n                assign_value(values, s[0], i)\r\n                values[s[0]] = i\r\n    return values\r\n    pass\r\n\r\ndef naked_twins(values):\r\n    \"\"\"Eliminate values using the naked twins strategy.\r\n    Args:\r\n        values(dict): a dictionary of the form {'box_name': '123456789', ...}\r\n\r\n    Returns:\r\n        the values dictionary with the naked twins eliminated from peers.\r\n    \"\"\"\r\n\r\n    # Find all instances of naked twins\r\n    # Eliminate the naked twins as possibilities for their peers\r\n    big =[] #set of all keys with length 2\r\n    k = []\r\n    g = values.keys()\r\n    p = values.values()\r\n    j = len(values)\r\n    for i in g:\r\n        if len (values[i]) == 2:\r\n            big.append(i)\r\n        else:\r\n            big = big\r\n            \r\n            \r\n    for i in big:\r\n        d = units[i]\r\n        for e in d:\r\n            if counter(values, i, e) == 'yes':\r\n                remove(values, i, e)\r\n            else:\r\n                big == big\r\n    return values\r\n    \r\n\r\ndef remove(values, i, e):\r\n    s = []\r\n    for a in values[i]:\r\n        s.append(a)\r\n    for o in e:\r\n        k = []\r\n        for h in values[o]:\r\n            k.append(h)\r\n        if len(values[o])>1 and s != k :\r\n            t = []\r\n            for y in values[o]:\r\n                t.append(y)\r\n            for r in s:\r\n                if r in t:\r\n                    t.remove(r)\r\n                else:\r\n                    t = t\r\n            p = ''\r\n            for i in t:\r\n                p =  p+i\r\n            values[o] = p\r\n            #assign_value(values, o, p)\r\n        \r\n   \r\n        else:\r\n            values[o] = values[o]\r\n    return values\r\n\r\n            \r\n\r\ndef counter(values,i,e):\r\n    u = 0\r\n    for f in e:\r\n        if values[i] == values[f]:\r\n            u = u + 1\r\n        else:\r\n            u = u\r\n    if u == 2:\r\n        return 'yes'\r\n    else:\r\n        return 'no'\r\n\r\n\r\n\r\n\r\ndef reduce_puzzle(values):\r\n    '''uses all the functions in loops to reduce the puzzle'''\r\n    stalled = False\r\n    while not stalled:\r\n        # Check how many boxes have a determined value\r\n        solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\r\n\r\n        # Your code here: Use the Eliminate Strategy\r\n        values = eliminate(values)\r\n\r\n        # Your code here: Use the Only Choice Strategy\r\n        values = only_choice(values)\r\n        #Your code here: Use the naked twins strategy\r\n        values = (naked_twins(values))\r\n        # Check how many boxes have a determined value, to compare\r\n        solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\r\n        # If no new values were added, stop the loop.\r\n        stalled = solved_values_before == solved_values_after\r\n        # Sanity check, return False if there is a box with zero available values:\r\n        if len([box for box in values.keys() if len(values[box]) == 0]):\r\n            return False\r\n    return values\r\n\r\n    pass\r\n\r\n\r\ndef search(values):\r\n    \"Using depth-first search and propagation, create a search tree and solve the sudoku.\"\r\n    # First, reduce the puzzle using the previous function\r\n    values = (reduce_puzzle(values))\r\n    if values is False:\r\n        return False\r\n    if all(len(values[s]) == 1 for s in dan): \r\n        return values\r\n    # Choose one of the unfilled squares with the fewest possibilities\r\n    n,s = min((len(values[s]), s) for s in dan if len(values[s]) > 1)\r\n    # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\r\n    for value in values[s]:\r\n        new_sudoku = values.copy()\r\n        new_sudoku[s] = value\r\n        attempt = search(new_sudoku)\r\n        if attempt:\r\n            return attempt\r\n    pass\r\n\r\n\r\n\r\ndef display(values):\r\n    \"\"\"\r\n    Display the values as a 2-D grid.\r\n    Args:\r\n        values(dict): The sudoku in dictionary form\r\n    \"\"\"\r\n    width = 1+max(len(values[s]) for s in dan)\r\n    line = '+'.join(['-'*(width*3)]*3)\r\n    for r in rows:\r\n        print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\r\n                      for c in cols))\r\n        if r in 'CF': print(line)\r\n    return\r\n\r\n    #pass\r\n\r\n\r\n\r\n\r\nsolve('8..........36......7..9.2...5...7.......457.....1...3...1....68..85...1..9....4..')\r\n\r\n\r\n#print(assignments)\r\n","sub_path":"AIND-Sudoku-master/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":8464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"186631449","text":"import re\n\ndef word_count(s):\n    l = ['\"', ':', ';', ',', '.', '-', '+', '=', '/', '\\\\', '|',\n         '[', ']', '{', '}', '(', ')', '*', '^', '&']\n    wordList = re.sub(\"[^\\w]\", \" \",  s).split()\n    d = {}\n    if len(wordList) == 0:\n        return d\n    for i in l:\n        s = s.replace(i, \"\")\n    lst = s.split(' ')\n    for i in range(len(lst)):\n        lst[i] = lst[i].replace(' ', '')\n        # lst[i] = lst[i].replace('\"', '')\n        # lst[i] = lst[i].replace(',', '')\n    lst = list(filter(('').__ne__, lst))\n    lst = [n.lower() for n in lst]\n    lst1 = list(set(lst))\n    for i in range(len(lst1)):\n        d[lst1[i]] = lst.count(lst1[i])\n    return d\n\n\n\n\nif __name__ == \"__main__\":\n    print(word_count(\"\"))\n    print(word_count(\"Hello\"))\n    print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n    print(word_count('This is a test of the emergency broadcast network. This is only a test.'))","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"503857142","text":"from collections import OrderedDict\n\nfrom SharedMethods import get_hash_type, uni, urlsafe\nfrom Config.ResourceType import ResourceType\nfrom IndicatorObject import IndicatorObject\n\n\nclass AddressIndicatorObject(IndicatorObject):\n    __slots__ = ()\n\n    def __init__(self, obj_from=None):\n        super(AddressIndicatorObject, self).__init__()\n        if obj_from is not None:\n            self.copy_slots(obj_from)\n        self._set_resource_type(ResourceType.ADDRESSES)\n\n    # def set_indicator(self, data, resource_type=None, update=True):\n    #     self._ip = uni(data)\n    #     self._reference_indicator = urlsafe(self._ip)\n    #\n    #     # additional resource type specific attributes\n    #     self._properties['_ip'] = {\n    #         'api_field': 'ip',\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n\n    #\n    #   Read-Only\n    #\n    @property\n    def indicator(self):\n        return self._ip\n\n\nclass EmailAddressIndicatorObject(IndicatorObject):\n    __slots__ = ()\n\n    def __init__(self, obj_from=None):\n        super(EmailAddressIndicatorObject, self).__init__()\n        if obj_from is not None:\n            self.copy_slots(obj_from)\n        self._set_resource_type(ResourceType.EMAIL_ADDRESSES)\n    #\n    # def set_indicator(self, data, resource_type=None, update=True):\n    #     self._address = uni(data)\n    #     self._reference_indicator = urlsafe(self._address)\n    #\n    #     # additional resource type specific attributes\n    #     self._properties['_address'] = {\n    #         'api_field': 'address',\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n\n    @property\n    def indicator(self):\n        return self._address\n\n\nclass FileIndicatorObject(IndicatorObject):\n    __slots__ = ()\n\n    def __init__(self, obj_from=None):\n        super(FileIndicatorObject, self).__init__()\n        if obj_from is not None:\n            self.copy_slots(obj_from)\n        self._set_resource_type(ResourceType.FILES)\n\n    # def set_indicator(self, data, resource_type=None, update=True):\n    #     # handle different hash type\n    #     hash_type = get_hash_type(data)\n    #     if hash_type == 'MD5':\n    #         self._md5 = data\n    #         if self._reference_indicator is None:  # reference indicator for attr, tag, etc adds\n    #             self._reference_indicator = urlsafe(self._md5)\n    #     elif hash_type == 'SHA1':\n    #         self._sha1 = data\n    #         if self._reference_indicator is None:  # reference indicator for attr, tag, etc adds\n    #             self._reference_indicator = urlsafe(self._sha1)\n    #     elif hash_type == 'SHA256':\n    #         self._sha256 = data\n    #         if self._reference_indicator is None:  # reference indicator for attr, tag, etc adds\n    #             self._reference_indicator = urlsafe(self._sha256)\n    #\n    #     self._properties['_md5'] = {\n    #         'api_field': 'md5',\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n    #     self._properties['_sha1'] = {\n    #         'api_field': 'sha1',\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n    #     self._properties['_sha256'] = {\n    #         'api_field': 'sha256',\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n    #     self._properties['_size'] = {\n    #         'api_field': 'size',\n    #         'method': 'set_size',\n    #         'required': False,\n    #     }\n    #\n    #     if update and self._phase == 0:\n    #         self._phase = 2\n\n    @property\n    def indicator(self):\n        return {\n            'md5': self._md5,\n            'sha1': self._sha1,\n            'sha256': self._sha256\n        }\n\n\nclass HostIndicatorObject(IndicatorObject):\n    __slots__ = ()\n\n    def __init__(self, obj_from=None):\n        super(HostIndicatorObject, self).__init__()\n        if obj_from is not None:\n            self.copy_slots(obj_from)\n        self._set_resource_type(ResourceType.HOSTS)\n\n    # def set_indicator(self, data, resource_type=None, update=True):\n    #     self._hostname = uni(data)\n    #     self._reference_indicator = urlsafe(self._hostname)\n    #\n    #     # additional resource type specific attributes\n    #     self._properties['_hostname'] = {\n    #         'api_field': 'hostName',\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n    #     self._properties['_dns_active'] = {\n    #         'api_field': 'dnsActive',\n    #         'method': 'set_dns_active',\n    #         'required': False,\n    #     }\n    #     self._properties['_whois_active'] = {\n    #         'api_field': 'whoisActive',\n    #         'method': 'set_whois_active',\n    #         'required': False,\n    #     }\n\n    @property\n    def indicator(self):\n        return self._hostname\n\n\nclass UrlIndicatorObject(IndicatorObject):\n    __slots__ = ()\n\n    def __init__(self, obj_from=None):\n        super(UrlIndicatorObject, self).__init__()\n        if obj_from is not None:\n            self.copy_slots(obj_from)\n        self._set_resource_type(ResourceType.URLS)\n\n    # def set_indicator(self, data, resource_type=None, update=True):\n    #     self._text = uni(data)\n    #     self._reference_indicator = urlsafe(self._text)\n    #\n    #     # additional resource type specific attributes\n    #     self._properties['_text'] = {\n    #         'api_field': 'text',\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n\n    @property\n    def indicator(self):\n        return self._text\n\n\nclass CustomIndicatorObject(IndicatorObject):\n    __slots__ = ()\n\n    def __init__(self, obj_from=None):\n        super(CustomIndicatorObject, self).__init__()\n        if obj_from is not None:\n            self.copy_slots(obj_from)\n        self._set_resource_type(ResourceType.CUSTOM_INDICATORS)\n\n    # def set_indicator(self, data, resource_type=None, update=True, field_names=None):\n    #     # make sure they're in the right order\n    #     if not isinstance(data, OrderedDict):\n    #         raise AttributeError(\"Custom Indicator must be an OrderedDict\")\n    #\n    #     self._custom_fields = uni(data)\n    #     self._reference_indicator = urlsafe(' : '.join(self._custom_fields.values()))\n    #\n    #     # additional resource type specific attributes\n    #     self._properties['_custom_fields'] = {\n    #         'api_field': self.api_entity,\n    #         'method': 'set_indicator',\n    #         'required': True,\n    #     }\n\n    @property\n    def indicator(self):\n        \"\"\"\n        returns custom indicator as an OrderedDict of 1-3 fields\n        which when delimited represent the indicator\n        \"\"\"\n\n        return self._custom_fields\n\n","sub_path":"threatconnect/IndicatorObjectTyped.py","file_name":"IndicatorObjectTyped.py","file_ext":"py","file_size_in_byte":6723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"484360708","text":"import json\n\nif ('si' == input('Desea crear un archivo?: ')):\n    dicc = {}\n    for x in range(2):\n        usuario = input(\"Ingrese un nombre de usuario: \")\n        dicc[usuario] = {\"Puntaje\": float(input(\"Puntaje: \")),\n                         \"Nivel\": int(input(\"Nivel Alcanzado: \")),\n                         \"Tiempo\": float(input(\"Tiempo: \"))\n                         }\n\n    with open('Eje Jugadores.txt', 'w') as f:\n        json.dump(dicc, f)\n\n# ---------------------------------------------------------------------------\n\n\ndef modifico_datos(archivo):\n    nombre = str(input('Ingrese el nombre del jugador que desea modificar: '))\n    if nombre in archivo.keys():\n        print('Se realizara la modificacion de un archivo existente')\n    else:\n        print('Se creara un nuevo jugador')\n    archivo[nombre] = {\"Puntaje\": float(input(\"Puntaje: \")),\n                       \"Nivel\": int(input(\"Nivel Alcanzado: \")),\n                       \"Tiempo\": float(input(\"Tiempo: \"))\n                       }\n    return archivo\n\n\nwith open('Eje Jugadores.txt') as f:\n    jugadores = json.load(f)\n\nwith open('Eje Jugadores.txt', 'w') as f:\n    json.dump(modifico_datos(jugadores), f)\n","sub_path":"Practica3/Ej03.py","file_name":"Ej03.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"404364293","text":"print(\"Zenith Bank Transfer System\")\nreal_balance = 50000\ndef transaction(real_user_id, real_balance, valid_account_number):\n  real_user_id = \"6664\"\n  valid_account_number = \"99574\"\n  user_id = input(\"enter user ID:\")\n  maximum_overdraft_allowed = 0.5 * real_balance\n  \n  while real_user_id != user_id:\n    print(\"wrong account id\")\n    exit(1)\n\n\n    transfer_amount = float(input(\"enter the amount you wish to transfer: \"))   \n    \n    recipient_account_number = input(\"enter the recipient account number: \")\n\n    while transfer_amount <= 0:\n        print(\"invalid transfer amount\")\n        exit(2)\n    \n    charges = 0.05 * transfer_amount\n    total_deducted_charges = transfer_amount\n    overdraft = total_deducted_charges - real_balance\n\n    print(\"applicable charges:\", charges)\n    print(\"transfer_amount:\", transfer_amount)\n    print(\"total debit amount:\", total_deducted_charges)\n    if real_balance < transfer_amount:\n        print(\"overdraft:\", overdraft)\n\n    if real_balance < transfer_amount and overdraft > maximum_overdraft_allowed:\n        print(\"dear customer you are not eligible to withdraw an overdraft thank you!\")\n        input(\"do want to perform another trannsaction :\" )\n        exit(3)\n\n    elif recipient_account_number != valid_account_number:\n        print(\"invalid account number:\", recipient_account_number)\n        exit(4)\n\n    print(\"transfer sucessful\")\n    real_balance -= total_deducted_charges\n    print(f\"current balance: {real_balance:2f}\")","sub_path":"bankaccount2.py","file_name":"bankaccount2.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"219906239","text":"from django.shortcuts import render_to_response, redirect, render\nfrom django.template import RequestContext\nfrom signups.forms import *\nimport post.settings as settings\nfrom mailsnake import MailSnake\nimport json\nfrom django.http import HttpResponse, HttpResponseRedirect\nimport datetime\n\ndef connect(request):\n\n  if request.method == 'POST':\n    ms = MailSnake(settings.MAILCHIMP_API_KEY)\n    lists = ms.lists()\n    form = ConnectForm(request.POST)\n    message = 'the sky is falling'\n    if form.is_valid():\n      ms.listSubscribe(\n          id=lists['data'][0]['id'],\n          email_address=(form.cleaned_data['email_signup']),\n          update_existing=True,\n          double_optin=False,\n      )\n\n      if request.is_ajax():  # success with js\n        message = 'success!'\n        status = True\n        return HttpResponse(json.dumps({'message': message, 'status': status}), 'application/json')\n      else:  # success with no js\n        return redirect('success')\n    else:\n      if request.is_ajax():  # error with js\n        message = 'Invalid email address'\n        status = False\n        return HttpResponse(json.dumps({'message': message, 'status': status}), 'application/json')\n      else:  # error with no js\n        form.addError('Invalid email address')\n  else:\n    form = ConnectForm()\n\n  return render_to_response(\n      'signups/connect.html',\n      {\n          'form': form,\n\n      },\n      context_instance=RequestContext(request)\n  )\n\n\ndef success(request):\n  return render_to_response('signups/success.html', context_instance=RequestContext(request))\n","sub_path":"signups/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"387168638","text":"import pandas as pd\nimport numpy as np\nimport os, re, collections\nfrom collections import namedtuple\nfrom functools import partial\nfrom sklearn.metrics import mean_squared_error\nfrom env import FX_DATASET_DIR\n\ndef rmse_rel_std(y_true, y_predict):\n    return np.sqrt(mean_squared_error(y_true, y_predict))/y_true.std()\n\ndef split_dataset(ds, *sizes):\n    \"\"\" splits a dataframe into parts of relative size given by: sizes.\"\"\"\n\n    total_size = sum(sizes)\n    total_items = len(ds)\n\n    n_items = [round(total_items * s / total_size) for s in sizes]\n\n    # compensate for rounding by adjusting the largest item\n    largest_idx = n_items.index(max(n_items))\n    n_items[largest_idx] += total_items - sum(n_items)\n\n    assert sum(n_items) == total_items\n\n    results = []\n    last_idx = 0\n    for n in n_items:\n        idx = int(last_idx + n)\n        results.append(ds[last_idx:idx])\n        last_idx = idx\n\n    return results\n\n\n\ndef load_stock_data(sym):\n    df = pd.read_csv('stockData/{sym}.csv'.format(sym=sym), index_col=0, parse_dates=True)\n    df['px'] = df['Adjusted Close']\n    del df['Adjusted Close']\n    return df\n\n\ndef random_price_series(samples, std):\n    px = pd.Series(((np.random.randn(samples) * std) + 1).cumprod(), name='px')\n    return pd.DataFrame(px)\n\n\nDataSet = namedtuple('DataSet', ['name', 'X_train', 'Y_train', 'X_dev', 'Y_dev', 'X_test', 'Y_test'])\n\n\ndef fx_1minute_bar_catalog():\n    res = collections.defaultdict(list)\n    files = os.listdir(FX_DATASET_DIR)\n    for f in files:\n        m = re.search('DAT_ASCII_(\\w+)_M1_(\\d+).csv',f)\n        if m:\n            sym, date = m.groups()\n            res[sym].append(date)\n    return res\n\ndef load_1minute_fx_bars(sym, date):\n    filename = os.path.join(FX_DATASET_DIR, 'DAT_ASCII_{sym}_M1_{date}.csv'.format(sym=sym, date=date))\n    df = pd.read_csv(filename, header=None, sep=';',\n                     names = ['ts', 'open','high', 'low','close','volume']\n                    )\n    df['ts'] = pd.to_datetime(df['ts'])\n    df.set_index('ts', inplace=True, verify_integrity=True)\n    return df\n\ndef load_stock_datasets(features_and_targets_fn, train_frac=75, dev_frac=15, test_frac=15,\n                        sym_filter_fn=lambda x: True):\n    res = {}\n    files = os.listdir('stockData')\n    for f in files:\n        sym = f.split('.')[0]\n        if sym_filter_fn(sym):\n            raw = load_stock_data(sym)\n            train, dev, test = split_dataset(raw, train_frac, dev_frac, test_frac)\n\n            X_train, Y_train = features_and_targets_fn(train)\n            X_dev, Y_dev = features_and_targets_fn(dev)\n            X_test, Y_test = features_and_targets_fn(test)\n\n            ds = DataSet(sym, X_train, Y_train, X_dev, Y_dev, X_test, Y_test)\n            res[sym] = ds\n    return res\n\n\ndef create_random_datasets(features_and_targets_fn,\n                           samples=6000, train_frac=75, dev_frac=15, test_frac=15,\n                           syms_to_std_map={'S1': 0.015}):\n    res = {}\n    for sym in syms_to_std_map.keys():\n        std = syms_to_std_map[sym]\n        raw = random_price_series(samples=samples, std=std)\n        train, dev, test = split_dataset(raw, train_frac, dev_frac, test_frac)\n\n        X_train, Y_train = features_and_targets_fn(train)\n        X_dev, Y_dev = features_and_targets_fn(dev)\n        X_test, Y_test = features_and_targets_fn(test)\n\n        ds = DataSet(sym, X_train, Y_train, X_dev, Y_dev, X_test, Y_test)\n        res[sym] = ds\n    return res\n\n\ndef logreturn(px_latest, px_prev):\n    return np.log(px_latest / px_prev)\n\n\ndef FT_logreturn_vs_logreturn(df, return_lookbacks=[1], target_lookaheads=[1]):\n    \"\"\" features and targets function: \n        features: - log returns with various lookbacks.\n        targets: - log return\n    \"\"\"\n    results = pd.DataFrame(index=df.index)\n    feature_cols = []\n    target_cols = []\n    for lb in return_lookbacks:\n        col = 'lret-' + str(lb)\n        results[col] = logreturn(df['px'], df['px'].shift(lb))\n        feature_cols.append(col)\n\n        # add target feature to predict\n    for la in target_lookaheads:\n        col = 'target-' + str(la)\n        results[col] = logreturn(df['px'].shift(-la), df['px'])\n        target_cols.append(col)\n\n    results = results.dropna()  # so that features and targets are all complete, and have aligned samples\n    return results[feature_cols], results[target_cols]\n\n\ndef FT_ma_ewma_logreturns_vs_abs_logreturn(df, ma_windows=[10], ewma_halflifes=[10], lret_lookbacks=[]):\n    \"\"\" features and targets function: \n        features: \n        - moving average of abs log return with various lookbacks.\n        - ewma of abs log return with various lookbacks\n        targets:\n        - abs log return\n    \"\"\"\n    results = pd.DataFrame(index=df.index)\n    feature_cols = []\n    target_cols = []\n\n    lret = logreturn(df['px'], df['px'].shift(1))\n    vol = lret.abs()\n    results['vol'] = vol\n    feature_cols.append('vol')\n    future_vol = vol.shift(-1)\n    for ma_win in ma_windows:\n        col = 'ma-' + str(ma_win)\n        ma = vol.rolling(ma_win).mean()\n        results[col] = ma\n        feature_cols.append(col)\n\n    for ewma_hl in ewma_halflifes:\n        col = 'ewma-' + str(ewma_hl)\n        ewma = vol.ewm(halflife=ewma_hl).mean()\n        results[col] = ewma\n        feature_cols.append(col)\n\n    for lb in lret_lookbacks:\n        col = 'lret-' + str(lb)\n        lret_lb = lret.shift(lb)\n        results[col] = lret_lb\n        feature_cols.append(col)\n\n        # add target feature to predict\n    results['target-1'] = future_vol\n    target_cols.append('target-1')\n\n    results = results.dropna()  # so that features and targets are all complete, and have aligned samples\n    return results[feature_cols], results[target_cols]\n\n\ndef lret(px): return logreturn(px, px.shift(1))\n\n\ndef FT2_ma_ewma_logreturns_vs_abs_logreturn(ma_windows=[10], ewma_halflifes=[10], lret_lookbacks=[]):\n    \"\"\" features and targets function:\n        features:\n        - moving average of abs log return with various lookbacks.\n        - ewma of abs log return with various lookbacks\n        - log return\n        targets:\n        - abs log return\n    \"\"\"\n    features = {}\n    targets = {}\n\n    def vol(px):\n        lret(px).abs()\n\n    features['vol'] = vol\n    targets['target-1'] = lambda x: vol(x).shift(-1)\n\n    for ma_win in ma_windows:\n        features['ma-' + str(ma_win)] = lambda x: vol(x).rolling(ma_win).mean()\n\n    for ewma_hl in ewma_halflifes:\n        features['ewma-' + str(ewma_hl)] = lambda x: vol(x).ewm(halflife=ewma_hl).mean()\n\n    for lb in lret_lookbacks:\n        features['lret-' + str(lb)] = lambda x: lret(x).shift(lb)\n\n    return features, targets\n\n\ndef create_features_and_targets(series, feature_defs, target_defs):\n    results = pd.DataFrame(index=series.index)\n    for col, func in feature_defs.items():\n        results[col] = func(series)\n\n    for col, func in target_defs.items():\n        results[col] = func(series)\n\n    results = results.dropna()  # so that features and targets are all complete, and have aligned samples\n    return results[feature_defs.keys()], results[target_defs.keys()]\n\n\ndef load_ds1():\n    features_and_targets = partial(FT_logreturn_vs_logreturn, return_lookbacks=np.arange(40) + 1, target_lookaheads=[1])\n    return load_stock_datasets(features_and_targets)\n\n\ndef load_ds2():\n    features_and_targets = partial(FT_ma_ewma_logreturns_vs_abs_logreturn, ma_windows=np.arange(40) + 1,\n                                   ewma_halflifes=np.arange(40) + 1)\n    return load_stock_datasets(features_and_targets)\n\n\ndef load_ds2_rand(syms_to_std_map={'S1': 0.03}):\n    features_and_targets = partial(FT_ma_ewma_logreturns_vs_abs_logreturn, ma_windows=np.arange(40) + 1,\n                                   ewma_halflifes=np.arange(40) + 1)\n    return create_random_datasets(features_and_targets, syms_to_std_map=syms_to_std_map)\n\n\ndef load_ds3():\n    features_and_targets = partial(\n        create_features_and_targets,\n        feature_defs={'vol': lambda x: lret(x)},\n        target_defs={'target-1': lambda x: lret(x).abs().shift(-1)})\n    return create_random_datasets(features_and_targets)\n\ndef preview(*series, **kwargs):\n    n = kwargs.get('n',4)\n    return pd.concat([ser.head(n) for ser in series], axis=1)\n\n\ndef concatenate_datasets(data, name=\"combined\"):\n    def remove_index(x):\n        return x.reset_index()[[c for c in x.columns if not 'index' == c]]\n\n    X_train = pd.concat([remove_index(ds.X_train) for ds in data.values()])\n    Y_train = pd.concat([remove_index(ds.Y_train) for ds in data.values()])\n    X_dev = pd.concat([remove_index(ds.X_dev) for ds in data.values()])\n    Y_dev = pd.concat([remove_index(ds.Y_dev) for ds in data.values()])\n    X_test = pd.concat([remove_index(ds.X_test) for ds in data.values()])\n    Y_test = pd.concat([remove_index(ds.Y_test) for ds in data.values()])\n\n    ds = DataSet(name, X_train, Y_train, X_dev, Y_dev, X_test, Y_test)\n    return ds\n","sub_path":"returnprediction/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"402118537","text":"\n\n#calss header\nclass _NACRE():\n\tdef __init__(self,): \n\t\tself.name = \"NACRE\"\n\t\tself.definitions = [u'a smooth, hard substance forming a layer inside the shells of some sea creatures. It is white but also seems to shine with different colours, and is used to make buttons and for decoration.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_nacre.py","file_name":"_nacre.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"550718111","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport math\n\nimport rospy\nimport actionlib\nimport moveit_commander\n\nfrom geometry_msgs.msg import Quaternion, PoseStamped\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom tf.transformations import quaternion_from_euler\n\nSLEEP_TIME = 1.5\n\n# configuration for moveit\nrobot = moveit_commander.RobotCommander()\nscene = moveit_commander.PlanningSceneInterface()\n\n# group name\nupper = moveit_commander.MoveGroupCommander(\"upper_body\")\nrarm_waist = moveit_commander.MoveGroupCommander(\"rarm_with_waist\")\nlifter = moveit_commander.MoveGroupCommander(\"lifter\")\n\nlifter.set_pose_reference_frame(\"base_link\")\nupper.set_pose_reference_frame(\"base_link\")\nrarm_waist.set_pose_reference_frame(\"base_link\")\n\ndef lifter_down():\n\n\t## reset pose\n\tupper_goal = upper.get_current_joint_values()\n\tfor i in range(len(upper_goal)):\n\t\tupper_goal[i] = 0\n\tupper_goal[6] = -3\n\tupper_goal[16] = -3\n\tupper.go(upper_goal, wait=True)\n\n\tlifter_goal = lifter.get_current_joint_values()\n\tprint(\"lifter get current state :=\", lifter_goal)\n\tfor i in range(len(lifter_goal)):\n\t\tlifter_goal[i] = 0\n\tlifter_goal[0] = 1.570131778717041\n\tlifter_goal[1] = -1.570131778717041\n\tlifter_goal[2] = -1.5704814195632935\n\tlifter_goal[3] = 1.5704814195632935\n\tlifter.set_max_velocity_scaling_factor(1.0)\n\tlifter.go(lifter_goal, wait=True)\n\n\tprint(\"robot group:\", robot.get_group_names())\n\tprint (\"\")\n\t# print(\"robot current state:\", robot.get_current_state())\n\t# print (\"\")\n\t#print(\"robot joint name:\", robot.get_joint_names(\"upper\") )\n\n\trospy.sleep(SLEEP_TIME)\n\ndef lifter_up():\n\t## reset pose\n\tupper_goal = upper.get_current_joint_values()\n\tfor i in range(len(upper_goal)):\n\t\tupper_goal[i] = 0\n\tupper_goal[6] = -3\n\tupper_goal[16] = -3\n\tupper.go(upper_goal, wait=True)\n\n\tlifter_goal = lifter.get_current_joint_values()\n\tprint(\"lifter get current state :=\", lifter_goal)\n\tfor i in range(len(lifter_goal)):\n\t\tlifter_goal[i] = 0\n\tlifter.set_max_velocity_scaling_factor(0.5)\n\tlifter.go(lifter_goal, wait=True)\n\n\t# print(\"robot group:\", robot.get_group_names())\n\t# print (\"\")\n\t# print(\"robot current state:\", robot.get_current_state())\n\t# print (\"\")\n\t# print(\"robot joint name:\", robot.get_joint_names(\"lifter\") )\n\n\trospy.sleep(SLEEP_TIME)\n\n\n\ndef main():\n\t\n\trospy.init_node(\"lifter\")\n\n\ti = 0\n\tcount = 0\n\twhile True:\n\t\tlifter_down()\n\t\tlifter_up()\n\t\tcount += 1\n\t\tprint(\"count :=\", count)\n\t\n\t\t\n\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept rospy.ROSInterruptException:\n\t\texit()\n","sub_path":"seed_noid/test_lifter.py","file_name":"test_lifter.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"269000414","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport logging.config\nimport os\n\n\ndef setup_logging(\n        default_path='logging.json',\n        default_level=logging.INFO,\n        env_key='LOG_CFG'):\n\n    path = default_path\n    value = os.getenv(env_key, None)\n    if value:\n        path = value\n    if os.path.exists(path):\n        with open(path, 'rt') as f:\n            config = json.load(f)\n        logging.config.dictConfig(config)\n    else:\n        logging.basicConfig(level=default_level)\n\n\ndef basic_config():\n    logging.basicConfig(level=logging.DEBUG,\n                        format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')","sub_path":"log_init.py","file_name":"log_init.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"67242597","text":"import zmq\nimport json\n\ndef to_openbts(result=None, **kw):\n\n    for key, value in result.items():\n        if key == \"_id\":\n            _id = value\n\n        elif key == \"clid\":\n            clid = value\n\n        elif key == \"imsi\":\n            imsi = value\n\n    request =  {\n                \"command\":\"subscribers\",\n                \"action\":\"create\",\n                \"fields\":{\n                        \"name\": str(_id),\n                        \"imsi\":\"IMSI\" + str(imsi),\n                        \"msisdn\":str(clid) ,\n                        \"ki\":\"\"\n                        }\n                }\n\n\n    context = zmq.Context()\n    socket = context.socket(zmq.REQ)\n    socket.connect(\"tcp://127.0.0.1:45064\")\n\n    socket.send_string(json.dumps(request),encoding='utf-8')\n","sub_path":"openbts.py","file_name":"openbts.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"542179969","text":"class Solution(object):\n  def searchRange(self, nums, target):\n    \"\"\"\n        :type nums: List[int]\n        :type target: int\n        :rtype: List[int]\n        \"\"\"\n    lo = bisect.bisect_left(nums, target)\n    if lo == len(nums) or nums[lo] != target:\n      return [-1, -1]\n    else:\n      hi = bisect.bisect_right(nums, target, lo, len(nums))\n      return [lo, hi-1]\n\n","sub_path":"py/34.py","file_name":"34.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"414249426","text":"#!/usr/bin/python\n\nimport Adafruit_BBIO.ADC as ADC\nimport time\n\nADC.setup()\n\nfile = open(\"teste5.txt\",\"w+\") # abre arquivo de texto chamado teste.txt\n\nfim = time.time() + 1 # fim = 5 s\n\nwhile time.time() < fim:\n  valor = ADC.read(\"P9_39\") # BBB le o valor no Analog Input 0 no pino P9_39\n  tensao = valor*1.8 # converte o valor de tensao lido para a escala de volts\n  file.write(\"tensao %.3f\\n\" %(tensao)) # escreve o valor de tensao no arquivo de texto\n\nfile.close() # fecha o arquivo de texto\n","sub_path":"serie-testes/teste1/leValor.py","file_name":"leValor.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"27515924","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport datetime\nfrom TwitterSpider.items import TweetItem, CommentItem\nfrom urllib.parse import quote\n\n\nclass PersonaltitterSpider(scrapy.Spider):\n    name = 'personaltitter'\n    url_api = \"https://twitter.com/i/api/graphql/jMaTS-_Ea8vh9rpKggJbCQ/UserByScreenName?variables=%7B%22screen_name%22%3A%22{screen_name}%22%2C%22withHighlightedLabel%22%3Atrue%7D\"\n    comment_api = \"https://twitter.com/i/api/2/timeline/conversation/{id_str}.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&count=20&include_ext_has_birdwatch_notes=false&ext=mediaStats%2ChighlightedLabel\"\n    next_comment_api = \"https://api.twitter.com/2/timeline/conversation/{id_str}.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&referrer=tweet&controller_data=DAACDAAFDAABDAABDAABCgABAAAAAAAAAAAAAAwAAgoAAQAAAAAAAAABCgACALz4GwCn3T4LAAMAAAAIZWxlY3Rpb24AAAAAAA%3D%3D&count=20&cursor={cursor}&include_ext_has_birdwatch_notes=false&ext=mediaStats%2ChighlightedLabel\"\n\n    first_url = \"https://twitter.com/i/api/2/timeline/profile/{user_id}.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&include_tweet_replies=false&count=20&userId={user_id}&ext=mediaStats%2ChighlightedLabel\"\n    next_url = \"https://twitter.com/i/api/2/timeline/profile/{user_id}.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&include_tweet_replies=false&count=20&cursor={cursor}&userId={user_id}&ext=mediaStats%2ChighlightedLabel\"\n\n    #这里填写作者详情页链接,可以填写多个作者\n    user_urls = [\"https://twitter.com/JustinAaronUH91\"]\n\n    #设置爬取几天内的数据,如爬取近7天的数据\n    day = 7\n\n\n    def start_requests(self):\n        \"\"\"\n        根据user_urls中的链接获取user_id,使用user_id才能抓取数据\n        \"\"\"\n        headers = self.settings['TWITTER_HEADERS']\n        for url in self.user_urls:\n            screen_name = url.split(\"com/\")[-1]\n            author_url = self.url_api.format(screen_name=screen_name)\n            yield scrapy.Request(author_url,\n                                  headers=headers,\n                                  meta={\"headers\":headers,'name':screen_name},\n                                  dont_filter=True)\n\n    def parse(self, response):\n        \"\"\"\n        获取到user_id,构造first_url,发起请求\n        \"\"\"\n        headers = response.meta['headers']\n        name = response.meta['name']\n        datas = json.loads(response.text)\n        user_id = datas['data']['user']['rest_id']\n        first_url = self.first_url.format(user_id=user_id)\n        yield scrapy.Request(first_url,callback=self.parse_post_list,\n                              headers=headers,\n                              meta={\"user_id\":user_id,\"headers\":headers,\"page\":1,\"stop\":False,'name':name},\n                              dont_filter=True)\n\n\n    def parse_post_list(self, response):\n        \"\"\"\n        获取到个人主页下面的帖子信息,解析数据\n        \"\"\"\n        page = response.meta['page']\n        name = response.meta['name']\n        stop = response.meta['stop']\n        user_id = response.meta['user_id']\n        headers = response.meta['headers']\n        tweet_info = json.loads(response.body.decode())\n        try:\n            tweets = tweet_info['globalObjects']['tweets']\n        except:\n            print(\"token失效,换一个x-guest-token参数再试一下\")\n            return\n        user_datas = tweet_info['globalObjects']['users']\n        for post_id,status in tweets.items():\n            created_time = self.format_time(status['created_at'])\n            #如果发帖时间小于设定的时间,就不再往下抓了\n            if created_time <= (datetime.datetime.now() - datetime.timedelta(days=self.day)).strftime('%Y-%m-%d %H:%M:%S'):\n                stop = True\n                break\n            item = TweetItem()\n            user_id1 = status['user_id_str']\n            item['user'] = user_datas[user_id1]['name']\n            item['user_id'] = user_id1\n            item['post_time'] = created_time\n            item['content'] = status['full_text']\n            comment_num = status['reply_count']\n            item['comment_num'] = comment_num\n            item['repost_num'] = status['retweet_count']\n            item['like_num'] = status['favorite_count']\n            id_str = status['id_str']\n            item['id_str'] = id_str\n            url = f'https://twitter.com/{user_datas[user_id][\"screen_name\"]}/status/{status[\"id_str\"]}'\n            item['url'] = url\n            item['name'] = name\n            yield item\n\n            #如果该帖子有评论就去抓取评论\n            if int(comment_num):\n                comment_url = self.comment_api.format(id_str=id_str)\n                yield scrapy.Request(comment_url,\n                                     callback=self.parse_comment,\n                                     headers=headers,\n                                     meta={\"url\":url,\"id_str\":id_str,\"comment_num\":comment_num,\n                                           \"page\":1,\"headers\": headers}, dont_filter=True)\n\n        tweet_post_nums = user_datas[user_id]['statuses_count'] #帖子总数\n        pages = int(tweet_post_nums) // 20 +1  #获取帖子页数,然后遍历\n        if page < pages and not stop: #如果还在需要爬取的时间内且总页数没有翻完,就继续翻页\n            page+=1\n            cursor = tweet_info['timeline']['instructions'][0]['addEntries']['entries'][-1]['content']['operation']['cursor']['value']\n            next_url = self.next_url.format(user_id=user_id,cursor=quote(cursor))\n            yield scrapy.Request(next_url,callback=self.parse_post_list,\n                                  headers=headers,\n                                  meta={\"user_id\":user_id,\"headers\":headers,\"page\":page,'name':name},\n                                  dont_filter=True)\n\n\n    def parse_comment(self, response):\n        \"\"\"\n        解析提取评论\n        \"\"\"\n        headers = response.meta['headers']\n        comment_num = response.meta['comment_num']\n        page = response.meta['page']\n        id_str = response.meta['id_str']\n        tweet_info = json.loads(response.body.decode())\n        try:\n            tweets = tweet_info['globalObjects']['tweets']\n        except:\n            print(tweet_info)\n            return\n        user_datas = tweet_info['globalObjects']['users']\n        for post_id,status in tweets.items():\n            item = CommentItem()\n            user_id = status['user_id_str']\n            item['comment_id'] = status['id_str']\n            item['conversation_id'] = id_str\n            item['user'] = user_datas[user_id]['name']\n            item['user_id'] = user_id\n            item['reply_time_dt'] = self.format_time(status['created_at'])\n            item['content'] = status['full_text']\n            item['reply_num'] = status['retweet_count']\n            item['repost_num'] = status['retweet_count']\n            item['like_num'] = status['favorite_count']\n            item['ts'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n            item['tweet_id'] = id_str\n            yield item\n        pages = int(comment_num) // 20 + 1 #获取评论页数\n        print(\"当前页:\", page)\n        if page < pages:\n            page += 1\n            cursor = \\\n            tweet_info['timeline']['instructions'][0]['addEntries']['entries'][-1]['content']['operation']['cursor'][\n                'value']\n            next_url = self.next_comment_api.format(id_str=id_str, cursor=quote(cursor))\n            yield scrapy.Request(next_url, callback=self.parse_comment,\n                                 headers=headers,\n                                 meta={\"headers\": headers, \"page\": page,\n                                       \"id_str\": id_str, \"comment_num\": comment_num},\n                                 dont_filter=True)\n\n\n    def format_time(self, dt):\n        dt_obj = datetime.datetime.strptime(dt, '%a %b %d %H:%M:%S %z %Y').astimezone(tz=None)\n        return dt_obj.strftime('%Y-%m-%d %H:%M:%S')","sub_path":"twitterspider/TwitterSpider/spiders/personaltitter.py","file_name":"personaltitter.py","file_ext":"py","file_size_in_byte":9789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"164115528","text":"import layers as ly\ntry:\n    import cPickle as pickle\nexcept ImportError:\n    import pickle\n\n\nclass NET:\n    def __init__(self, learning_rate, input_shape, BS):#input_shape example: [BS,1,28,28]\n        self.lr = learning_rate\n\n        self.conv2d_1 = ly.conv2d(input_shape,[5,5,1,32],[1,1])\n        self.relu_1 = ly.relu()\n        self.max_pool_1 = ly.max_pooling(self.conv2d_1.output_shape, filter_shape=[2,2], strides=[2,2])\n\n        self.conv2d_2 = ly.conv2d(self.max_pool_1.output_shape,[5,5,32,64],[1,1])\n        self.relu_2 = ly.relu()\n        self.max_pool_2 = ly.max_pooling(self.conv2d_2.output_shape, filter_shape=[2,2], strides=[2,2])\n\n        self.flatter = ly.flatter()\n\n        self.full_connect_1 = ly.full_connect(input_len=7*7*64,output_len=1024)\n        self.relu_3 = ly.relu()\n        self.dropout_1 = ly.dropout(1024)\n\n        self.full_connect_2 = ly.full_connect(input_len=1024,output_len=10)\n        self.loss_func = ly.softmax_cross_entropy_error()\n\n\n    def forward_propagate(self,input, one_hot_labels, keep_prob):\n        z_conv1 = self.conv2d_1.forward_propagate(input)\n        a_conv1 = self.relu_1.forward_propagate(z_conv1)\n        p_conv1 = self.max_pool_1.forward_propagate(a_conv1)\n\n        z_conv2 = self.conv2d_2.forward_propagate(p_conv1)\n        a_conv2 = self.relu_2.forward_propagate(z_conv2)\n        p_conv2 = self.max_pool_2.forward_propagate(a_conv2)\n\n        flatten_p_conv2 = self.flatter.flat(p_conv2)\n\n        z_fc1 = self.full_connect_1.forward_propagate(flatten_p_conv2)\n        a_fc1 = self.relu_3.forward_propagate(z_fc1)\n        drop_fc1 = self.dropout_1.forward_propagate(a_fc1,keep_prob=keep_prob)\n\n        z_fc2 = self.full_connect_2.forward_propagate(drop_fc1)\n\n        loss, prob = self.loss_func.forward_propagate(z_fc2,one_hot_labels)\n        #print(loss)\n        return prob\n\n\n    def back_propagate(self):\n        dout_z_fc2 = self.loss_func.back_propagate()\n        dout_drop_fc1 = self.full_connect_2.back_propagate(dout_z_fc2)\n\n        dout_a_fc1 = self.dropout_1.back_propagate(dout_drop_fc1)\n        dout_z_fc1 = self.relu_3.back_propagate(dout_a_fc1)\n        dout_p_conv2_flatten = self.full_connect_1.back_propagate(dout_z_fc1)\n\n        dout_p_conv2 = self.flatter.de_flat(dout_p_conv2_flatten)\n\n        dout_a_conv2 = self.max_pool_2.back_propagate(dout_p_conv2)\n        dout_z_conv2 = self.relu_2.back_propagate(dout_a_conv2)\n        dout_p_conv1 = self.conv2d_2.back_propagate(dout_z_conv2)\n\n        dout_a_conv1 = self.max_pool_1.back_propagate(dout_p_conv1)\n        dout_z_conv1 = self.relu_1.back_propagate(dout_a_conv1)\n        din_conv1 = self.conv2d_1.back_propagate(dout_z_conv1)\n\n\n    def optimize(self):\n        self.conv2d_1.optimize(self.lr)\n        self.conv2d_2.optimize(self.lr)\n        self.full_connect_1.optimize(self.lr)\n        self.full_connect_2.optimize(self.lr)\n\n\nclass MODEL:\n    def save(self,net_object, step, dir='model/'):\n        print('save model')\n        txt_file = open(dir+str(step)+'_net1.txt', 'wb')\n        pickle.dump(net_object, txt_file)\n        txt_file.close()\n\n    def restore(self, step, dir='model/'):\n        print('load model')\n        txt_file = open(dir+str(int(step))+'_net1.txt', 'wb')\n        net_object = pickle.load(txt_file)\n        txt_file.close()\n        return net_object","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"255916692","text":"from matplotlib import pyplot as plt\nfrom scipy.integrate import odeint\nimport numpy as np\nfrom matplotlib.animation import FuncAnimation\ndsdt=[]\ndef f(s,t):\n    l1=1\n    l2=1\n    m1=2\n    m2=2\n    dsdt[0] = s[1]\n    dsdt[1] = (-(9.81*(m1+m2)*np.sin(s[0]))-(m2*l2*np.sin(s[0]-s[2])*s[3]**2)-(m2*l2*dsdt[3]*np.cos(s[0]-s[2])))/((m1+m2)*l1)\n    dsdt[2] = s[3]\n    dsdt[3] = (-(m2*9.81*np.sin(s[2]))+(m2*l1*np.sin(s[0]-s[2])*s[1]**2)-(m2*l1*dsdt[1]*np.cos(s[0]-s[1])))/(m2*l2)\n    return dsdt\n\ntheeta0=[0,0,np.pi/3,0]\n\nt = np.linspace(0,30,600)\n\ntheeta=odeint(f,theeta0,t)\n\nfig = plt.figure()\n\npoint1, = plt.plot([0],[0],\"o\")\nline1, = plt.plot([0,0],[0,0])\n\npoint2, = plt.plot([0],[0],\"o\")\nline2, = plt.plot([0,0],[0,0])\n\n#trace, = plt.plot([0,0],[0,0])\n\nplt.axis(\"scaled\")\nplt.xlim(-3,3)\nplt.ylim(-3,3)\n\nx1=np.sin(theeta[:,0])\ny1=-np.cos(theeta[:,0])\n\nx2=x1[len(x1)-1]+np.sin(theeta[:,2])\ny2=y2[len(y2)-1]-np.cos(theeta[:,2])\n\ndef animate(time):\n    time=time-1\n    point1.set_data(1*np.sin(theeta[time,0]),-1*np.cos(theeta[time,0]))\n    point2.set_data(x2[time],y[time])\n    \n    line1.set_data([0 , 2*np.sin(theeta[time,0])],[0 ,-2*np.cos(theeta[time,0])])\n    line2.set_data([x1[len(x1)-1],y1[len(y1)-1]],[x1[len(x1)-1]+x2[time],y1[len(y1)-1]+y2[time]])\n    \n    return point1, line1, point2, line2\n\nanim = FuncAnimation(fig,animate,frames=len(t),interval=0.05*1000)\nanim.save(\"Simplependulum_trace.mp4\",fps=20)\nplt.show()\n","sub_path":"DoublePendulum.py","file_name":"DoublePendulum.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"46899638","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, User\nfrom .models import Student\n\n\nclass StudentForm(forms.ModelForm):\n    full_name = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter your Full Name'}),label='Full Name')\n    dob = forms.DateField(widget=forms.DateInput(attrs={'class':'form-control','placeholder':'YYYY-MM-DD'}),label='Date of Birth')\n    profile_photo = forms.ImageField()\n    address = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control','placeholder':'Enter your Address'}),label='Address')\n    contactno = forms.IntegerField(widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Contact No.'}),label='Contact No.')\n    class Meta:\n        model = Student\n        fields = ['full_name','dob', 'profile_photo','address', 'contactno']\n\n","sub_path":"OnlineTestApplication/student/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"395073387","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 29 20:18:46 2020\r\n\r\n@author: COM\r\n\"\"\"\r\n\r\nimport sys\r\nimport pygame\r\nfrom pygame.locals import * # QUIT\r\n\r\npygame.init()\r\nSURFACE = pygame.display.set_mode((400,300))\r\npygame.display.set_caption(\"pygame\")\r\n\r\ndef main(): # main routine\r\n    while True:\r\n        SURFACE.fill((255,255,255))\r\n        \r\n        for event in pygame.event.get():\r\n            if event.type == QUIT:\r\n                pygame.quit()\r\n                sys.exit()\r\n        \r\n        pygame.display.update()\r\n            \r\nif __name__ == '__main__':\r\n    main()\r\n        \r\n\r\n\r\n\r\n","sub_path":"python game 01 pygame/pygame.py","file_name":"pygame.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"267336279","text":"from threading import current_thread\nfrom flask import Flask, render_template, session, redirect, url_for, flash\nfrom flask_bootstrap import Bootstrap\nfrom flask_moment import Moment\nfrom datetime import datetime\nfrom flask_wtf import Form\nfrom flask_wtf.recaptcha import validators\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import Required, ValidationError\n\nclass NameForm(Form):\n    name = StringField('What is your name?', validators=[Required()])\n    email = StringField('What is your UofT Email address?', validators=[Required()])\n\n    def validate_email(form, field):\n        if \"@\" not in field.data:\n            raise ValidationError(\"Please inlclude an '@' in the email address. '%s' is missing an '@'.\" % field.data)\n    submit = SubmitField('Submit')\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'sdbfudsbfjskedfnsedfnjksdfnksdfel'\n\nbootstrap=Bootstrap(app)\nmoment = Moment(app)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n    form = NameForm()\n    if form.validate_on_submit():\n        old_name = session.get('name')\n        old_email = session.get('email')\n        if old_name is not None and old_name != form.name.data:\n            flash('Looks like you have changed your name!')\n        if old_email is not None and old_email != form.email.data:\n            flash('Looks like you have changed your email!')\n        session['name'] = form.name.data\n        session['email'] = form.email.data\n        return redirect(url_for('index'))\n    return render_template('index.html', form=form, name=session.get('name'), email=session.get('email'), current_time=datetime.utcnow())\n\n@app.route('/user/')\ndef user(name):\n    return render_template('user.html', name=name)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n    return render_template('404.html'), 404\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n    return render_template('505.html'), 500\n\nif __name__ == '__main__':\n    app.run(debug=True)","sub_path":"flasky/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"516282731","text":"import linecache\nimport sys\n\nargs = sys.argv\n\nFILENAME1 = args[1]\nFILENAME2 = args[2]\nFILENAME3 = args[3]\n\nv = linecache.getline(FILENAME1, 2)[:-1]\nv2 = linecache.getline(FILENAME2, 2)[:-1]\nv3 = linecache.getline(FILENAME3, 2)[:-1]\nprint(v2,v3)\n\nsumofv = linecache.getline(FILENAME1, 3)[:-1]\ntmp = []\ndegree = []\ndegree_id = [[0 for j in range(2)] for i in range(int(v))]\n\nwith open(FILENAME1) as f:\n    data = f.readlines()[5:int(v)+5]\nfor item in data:\n    tmp.append(item[:-1])\ntmp.append(sumofv)\n\nfor i in range(len(tmp)-1):\n    degree.append(int(tmp[int(i)+1]) - int(tmp[int(i)]))\nfor i in range(len(degree)):\n    degree_id[i][0] = degree[i]\n    degree_id[i][1] = i\ndegree_id.sort(reverse = True)\n\n\n#ハイパーエッジに含まれる頂点を出す\noh = []\neh = []\ntmp = []\n\nif(int(v2) <= int(v3)):\n    #ハイパーエッジに含まれる頂点を出す\n    start = int(linecache.getline(FILENAME3, 2)[:-1]) + int(linecache.getline(FILENAME3, 3)[:-1])\n    e = linecache.getline(FILENAME3, 4)[:-1]\n    sumofe = linecache.getline(FILENAME3, 5)[:-1]\n\n    with open(FILENAME3) as f:\n        data = f.readlines()[int(start)+5 : int(start)+int(e)+5]\n    for item in data:\n        oh.append(item[:-1])\n    oh.append(sumofe)\n\n    with open(FILENAME3) as f:\n        data = f.readlines()[int(start)+int(e)+5 : int(start)+int(e)+int(sumofe)+5]\n    for item in data:\n        eh.append(item[:-1])\n\n    count = [[0 for j in range(2)] for i in range(int(v3))]\n    for i in range(int(len(oh))-1):\n        count[i][0] = i\n        for j in eh[int(oh[i]):int(oh[i+1])]:\n            if(int(j) < int(v2)):\n                count[i][1] = 1\n        for j in eh[int(oh[i]):int(oh[i+1])]:\n            if(0 < int(count[i][1])):\n                tmp.append(j)\n\nif(int(v3) < int(v2)):\n    #ハイパーエッジに含まれる頂点を出す\n    start = int(linecache.getline(FILENAME2, 2)[:-1]) + int(linecache.getline(FILENAME2, 3)[:-1])\n    e = linecache.getline(FILENAME2, 4)[:-1]\n    sumofe = linecache.getline(FILENAME2, 5)[:-1]\n\n    with open(FILENAME2) as f:\n        data = f.readlines()[int(start)+5 : int(start)+int(e)+5]\n    for item in data:\n        oh.append(item[:-1])\n    oh.append(sumofe)\n\n    with open(FILENAME2) as f:\n        data = f.readlines()[int(start)+int(e)+5 : int(start)+int(e)+int(sumofe)+5]\n    for item in data:\n        eh.append(item[:-1])\n\n    count = [[0 for j in range(2)] for i in range(int(v2))]\n    for i in range(int(len(oh))-1):\n        count[i][0] = i\n        for j in eh[int(oh[i]):int(oh[i+1])]:\n            if(int(j) < int(v3)):\n                count[i][1] = 1\n        for j in eh[int(oh[i]):int(oh[i+1])]:\n            if(0 < int(count[i][1])):\n                tmp.append(j)\n\nprint(len(degree_id))\nwith open(args[1] + \"_result\", \"w\") as f:\n    for i in range(len(degree)):\n        if(0 < tmp.count(str(degree_id[i][1]))):\n            f.write('d: ')\n            f.write(str(degree_id[i][0]))\n            f.write('\\t')\n            f.write('ID: ')\n            f.write(str(degree_id[i][1]))\n            f.write('\\n')\n","sub_path":"heya/degreeranking_result.py","file_name":"degreeranking_result.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"438094672","text":"from typing import List\n\ndef bubbleSort(nums: List[int]):\n    n = len(nums)\n    for i in range(n-1):\n        flag = True\n        for j in range(n-i-1):\n            if nums[j] > nums[j+1]:\n                nums[j], nums[j+1], nums[j+1], nums[j]\n                flag = False\n        if flag: break\n    \ndef insertionSort(nums: List[int]):\n    n = len(nums)\n    for i in range(1, n):\n        value = nums[i]\n        j = i - 1\n        while j >= 0:\n            if nums[j] > value:\n                nums[j+1] = nums[j]\n                j -= 1\n            else:\n                break\n        nums[j+1] = value \n\ndef selectionSort(nums: List[int]):\n    n = len(nums)\n    for i in range(n-1):\n        idx = i\n        for j in range(i+1, n):\n            if nums[j] < nums[idx]:\n                idx = j\n        if i != idx:\n            nums[i], nums[idx] = nums[idx], nums[i]\n        \ndef mergeSort(nums: List[int]):\n    def _merge(nums: List[int], p: int, q: int, r: int):\n        tmp = [None]*(r-p)\n        i, j, k = p, q, 0\n        while i < q and j < r:\n            if nums[i] <= nums[j]:\n                tmp[k] = nums[i]\n                k += 1\n                i += 1\n            else:\n                tmp[k] = nums[j]\n                k += 1\n                j +=1\n        \n        start, end = i, q \n        if j < r: start, end = j, r\n        tmp[k:] = nums[start:end]\n        nums[p:r] = tmp[:]\n        \n    def _mergeSort(nums: List[int], p: int, r: int):\n        if r - p <= 1: return\n        q = p + ((r - p) >> 1)\n        _mergeSort(nums, p, q)\n        _mergeSort(nums, q, r)\n        _merge(nums, p, q, r)\n    \n    _mergeSort(nums, 0, len(nums))\n\ndef quickSort(nums: List[int]):\n    def _partition(nums: List[int], p: int, r: int) -> int:\n        pivot = nums[r-1]\n        i = p\n        for j in range(p, r-1):\n            if nums[j] < pivot:\n                nums[i], nums[j] = nums[j], nums[i]\n                i += 1\n        nums[i], nums[r-1] = nums[r-1], nums[i]\n        return i\n\n    def _quickSort(nums: List[int], p: int, r: int):\n        if r - p <= 1: return\n        q = _partition(nums, p, r)\n        _quickSort(nums, p, q)\n        _quickSort(nums, q, r)\n\n    _quickSort(nums, 0, len(nums))\n\ndef countingSort(nums: List[int]):\n    n = len(nums)\n    if n <= 1: return\n    max_num = max(nums)\n    counts = [0] * (max_num + 1)\n    for i in range(n):\n        counts[nums[i]] += 1\n    \n    for i in range(1, max_num+1):\n        counts[i] = counts[i-1]+counts[i]\n\n    tmp = [None]*n\n    for i in range(n):\n        idx = counts[nums[i]] - 1\n        tmp[idx] = nums[i]\n        counts[nums[i]] -= 1\n\n    nums[:] = tmp[:]\n\ndef kthElement(nums: List[int], k: int):\n    def _partition(nums: List[int], p: int, r: int):\n        pivot = nums[r-1]\n        i = p\n        for j in range(p, r-1):\n            if nums[j] < pivot:\n                nums[i], nums[j] = nums[j], nums[i]\n                i += 1\n        nums[i], nums[r-1] = nums[r-1], nums[i]\n\n        return i\n\n    def _kthElement(nums: List[int], p: int, r: int, k: int):\n        q = _partition(nums, p, r)\n        if q == k: return nums[k]\n        elif q < k: return _kthElement(nums, q, r, k)\n        else: return _kthElement(nums, p, q, k)\n        \n    assert(k >= 0 and k < len(nums))\n    return _kthElement(nums, 0, len(nums), k)\n\nif __name__ == '__main__':\n    import random\n    nums = [2, 5, 1, 3]\n    \n    random.shuffle(nums)\n    bubbleSort(nums)\n    print(nums)\n\n    random.shuffle(nums)\n    insertionSort(nums)\n    print(nums)\n\n    random.shuffle(nums)\n    selectionSort(nums)\n    print(nums)\n\n    random.shuffle(nums)\n    mergeSort(nums)\n    print(nums)\n\n    random.shuffle(nums)\n    quickSort(nums)\n    print(nums)\n\n    random.shuffle(nums)\n    countingSort(nums)\n    print(nums)\n\n    random.shuffle(nums)\n    val = kthElement(nums, 2)\n    print(val)\n","sub_path":"others/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"490446321","text":"import os\nfrom PIL import Image\nimport cv2\nfrom collections import Counter\nimport pickle\n\nroot = './105_classes_pins_dataset/'\nresized_root = './pins_resized/'\ncleansed_root = './pins_cleansed/'\n\ndef resizer():\n    for dir in os.listdir(root):\n        name_dir = root + dir\n        img_list = os.listdir(name_dir)\n\n        for idx, i in enumerate(img_list):\n            img = Image.open(os.path.join(name_dir, i))\n            img_resize = img.resize((256, 256), Image.ANTIALIAS)\n            resized_name_dir = resized_root + dir\n            try:\n                img_resize.save(os.path.join(resized_name_dir, '{}.png'.format(i[:-4])), quality = 95)\n            except:\n                os.mkdir(resized_name_dir)\n                img_resize.save(os.path.join(resized_name_dir, '{}.png'.format(i[:-4])), quality = 95)\n\ndef get_tops():\n    counter = []\n    try:\n        with open('scores.pickle', 'rb') as pck:\n            scores = pickle.load(pck)\n    except:\n        d = {}\n        for dir in [f for f in os.listdir(resized_root) if not f.startswith(\".\")]:\n            name_dir = resized_root + dir\n            img_list = [f for f in os.listdir(name_dir) if not f.startswith(\".\")]\n            d[dir] = []\n\n            for idx, i in enumerate(img_list):\n                img = cv2.imread(os.path.join(name_dir, i))\n                gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n                fm = cv2.Laplacian(gray,cv2.CV_64F).var()\n                thres = 200\n                if fm >= thres:\n                    d[dir].append(i)\n                    counter.append(dir)\n            # tops.append((dir, len(d[dir])))\n\n        tops = Counter(counter).most_common()\n        scores = [d, tops]\n        with open('scores.pickle', 'wb') as pck:\n            pickle.dump(scores, pck, protocol=pickle.HIGHEST_PROTOCOL)\n\n    # tops = Counter(counter).most_common()\n\n    return scores[0], scores[1]\n\ndef cleansing():\n    d, tops = get_tops()\n    names = []\n    # for (a, b) in tops:\n    #     names.selected.append(a)\n\n    for i, (k, vs) in enumerate(d.items()):\n        # if k in names:\n        cleansed_name_dir = cleansed_root + k\n        for v in vs:\n            img = Image.open(os.path.join(resized_root, k, v))\n            try:\n                img.save(os.path.join(cleansed_name_dir, v), quality = 95)\n                print(\"{} successfully saved\".format(v))\n            except:\n                os.mkdir(cleansed_name_dir)\n                img.save(os.path.join(cleansed_name_dir, v),  quality = 95)\n                print(\"{} successfully saved\".format(v))\n        # else:\n        #     continue\n\n# s\n\n","sub_path":"scripts/resize_cleansing.py","file_name":"resize_cleansing.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"272290782","text":"import sys\nread = sys.stdin.readline\n\nr, c = map(int, read().split())\nmax_score = 0\n\nvisited = {}\nboard = []\n\nfor _ in range(r):\n\tboard.append(read().rstrip())\n\nfor row in board:\n\tfor char in row:\n\t\tif char in visited: pass\n\t\telse: visited[char] = False\n\n\ndef search(idx_tuple, steps, vis_dict):\n\tglobal r, c\n\trnum, cnum = idx_tuple\n\tvis_dict[board[rnum][cnum]] = True\n\tdef score():\n\t\tglobal max_score\n\t\tif steps > max_score: max_score = steps\n\n\tif rnum > 0 and not vis_dict[board[rnum-1][cnum]]:\n\t\tsearch((rnum-1, cnum), steps + 1, vis_dict.copy())\n\tif rnum < r - 1 and not vis_dict[board[rnum+1][cnum]]:\n\t\tsearch((rnum+1, cnum), steps + 1, vis_dict.copy())\n\tif cnum > 0 and not vis_dict[board[rnum][cnum-1]]:\n\t\tsearch((rnum, cnum-1), steps + 1, vis_dict.copy())\n\tif cnum < c - 1 and not vis_dict[board[rnum][cnum+1]]:\n\t\tsearch((rnum, cnum+1), steps + 1, vis_dict.copy())\n\n\tscore()\n\nsearch((0,0),1, visited.copy())\nprint(max_score)\n\n","sub_path":"1000/01987_baekjoon.py","file_name":"01987_baekjoon.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"49368844","text":"# coding:utf-8\n# **\n# Tag : Backpack DP\nimport sys\nMAX_INT = sys.maxint\n\ndef max_duffel_bag_value(cake_tuples, weight_capacity):\n    max_values_at_capacits = [0] * (weight_capacity+1)\n\n    for current_capacity in range(weight_capacity+1):\n        current_max = 0\n        for cake_weight, cake_value in cake_tuples:\n            # Handle the edge case\n            if cake_weight == 0 and cake_value != 0:\n                return MAX_INT\n\n            # if current cake could be hold in the bag\n            if cake_weight <= current_capacity:\n                current_max = max(current_max, max_values_at_capacits[current_capacity - cake_weight] + cake_value)\n\n        max_values_at_capacits[current_capacity] = current_max\n\n    return max_values_at_capacits[weight_capacity]\n","sub_path":"InterviewCake/cake-thief.py","file_name":"cake-thief.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"607207912","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nfrom enum import Enum\n\nfrom analysis import global_vars\n\nfrom components.UserReviewComponent import UserReviewComponent\nfrom components.NewsClassificationComponent import NewsClassificationComponent\n\nexternal_stylesheets = [\n    'https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/2.4.1/semantic.min.css',\n    '/assets/main.css',\n]\n\nexternal_scripts = [\n    'https://code.jquery.com/jquery-3.4.1.min.js',\n    'https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/2.4.1/semantic.min.js',\n    '/assets/main.js',\n    '/assets/autosize.min.js',\n]\n\nclass Page(Enum):\n    \"\"\"\n    Page and Tab are the same here. Click on a tab will also change the page.\n    \"\"\"\n    UserReview = 'review'\n    NewsClassification = 'news'\n\n    @property\n    def title(self):\n        if self == Page.UserReview:\n            return '1. User Review'\n        elif self == Page.NewsClassification:\n            return '2. News Classification'\n        else:\n            raise ValueError('Invalid value')\n\n    @property\n    def content(self):\n        if self == Page.UserReview:\n            return UserReviewComponent().render()\n        elif self == Page.NewsClassification:\n            return NewsClassificationComponent().render()\n        else:\n            raise ValueError('Invalid value')\n\n\npages = [Page.UserReview, Page.NewsClassification]\n\n# Main app\napp = dash.Dash(\n    __name__, \n    external_stylesheets=external_stylesheets,\n    external_scripts=external_scripts,\n    static_folder='assets',\n    meta_tags=[\n        {\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}\n    ],\n)\n\napp.config['suppress_callback_exceptions'] = True\n\nglobal_vars.initialize_global_vars_for_user_review_section()\nglobal_vars.initialize_global_vars_for_news_section()\n\n# Root of all views\nroot_layout = html.Div([\n    # dcc.Location(id='url', refresh=False),\n    dcc.Markdown(\n\"\"\"\n# CSE 256: Explanation of Classifier\n- Wirawit Rueopas (A53277204)\n- Saideep Reddy Pakkeer (A53269319)\n\"\"\",\n        id='app-header-text'\n    ),\n\n    html.Div(dcc.Tabs(id=\"tabs\", value=Page.UserReview.value, children=[\n        dcc.Tab(label=page.title, value=page.value) for page in pages        \n    ])),\n\n    html.Div(id='page-content', style={'padding-top': '30px'}),\n])\n\n# Link Tab to URL & Content (page content, url's pathname)\n@app.callback(\n    Output('page-content', 'children'), \n    [Input('tabs', 'value')]\n)\ndef display_page(tab_value):\n    try:\n        page = Page(tab_value)\n    except ValueError as e:\n        # Default page\n        page = Page.UserReview\n        print(\"Invalid Page/Tab Encountered:\", e, \"...Redirect to UserReview\")\n    return page.content\n\n# Here we try to map url to tab, but result in circular dependency ... So just don't support other url path for now.\n# @app.callback(\n#     Output('tabs', 'value'),\n#     [Input('url', 'pathname')]\n# )\n# def display_url(pathname):\n#     # Default path\n#     if pathname == '/':    \n#         tab_value = Page.UserReview.value\n#     else:\n#         pathname = pathname[1:] # exclude '/'\n#         tab_value = pathname\n#     return tab_value\n\napp.layout = root_layout\napp.title = 'CSE 256 - Model Explanation'\n\n# Register all page's callbacks\nUserReviewComponent().register_callbacks(app)\nNewsClassificationComponent().register_callbacks(app)\n\n# Server when deploy* (see `Procfile`)\nserver = app.server\nPORT = 3000\n\nif __name__ == '__main__':\n    app.run_server(debug=True, port=PORT)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"530344808","text":"# coding: UTF-8\r\nfrom copy import deepcopy\r\nfrom pyrogram import Client, Filters, KeyboardButton, ReplyKeyboardMarkup, Message\r\nimport json\r\nimport datetime as dt\r\nimport requests\r\nfrom math import *\r\nfrom random import choice\r\n\r\nxizmat_narxi = {}\r\ntime = dt.datetime.now()\r\ninfo = {}\r\nkomment = {}\r\nbot_token = \"961191162:AAFCjhCFTcA2ZQir7fjqggDGk20esrV3v9Y\"\r\napi_hash = \"9f8cd674a86791512c2baebef59a3a09\"\r\napi_id = 394876\r\nphone_numbers = {}\r\nbuyurtmalar = {}\r\nmanzil = {}\r\ngeolokatsiya = {}\r\nfakt_kanal_id = -1001354904206\r\nguruh_id = -1001229948049\r\n# komandalar\r\n\r\nqoidalar = \"📑 Qoidalar\"\r\nortga = \"🔙 Ortga\"\r\nmenu = \"🏠 Asosiy menyu\"\r\nbuyurtma = \"🛵 Buyurtma Berish\"\r\nkorzina = \"🛒 Savat\"\r\nha = \"✅ Ha, buyurtma beraman\"\r\nyuq = \"❌ Yo'q, tozalansin\"\r\nqol = \"🛒 Savatda qolsin\"\r\ntozalash = \"🔄 Tozalash\"\r\nboglanish = \"👨‍💻 Bog'lanish\"\r\nmanzil_ = \"📍 Manzil\"\r\nshikoyat = \"🖍 Shikoyat qilmoqchiman\"\r\ntaklif = \"✏️ Taklifim bor\"\r\nxatolik = \"⚠️ Botda hatolik topdim\"\r\nshunchaki = \"🔖 Shunchaki...\"\r\nsozlamalar = \"⚙ Sozlamalar\"\r\ntelefon = \"🔘 Telefon raqamni almashtirish\"\r\nfikr = {}\r\n# bo'limlar\r\ntaomlar = {\r\n    \"1⃣ Suyuq Taomlar\": [],\r\n    \"2⃣ Quyuq Taomlar\": [],\r\n    \"🍜 Uyg'ur Taomlar\": [],\r\n    \"🍢 Kaboblar\": [],\r\n    \"🥗 Salatlar\": [],\r\n    \"🍻 BAR\": [],\r\n\r\n}\r\nfaktlar = deepcopy(taomlar)\r\ntezt = deepcopy(taomlar)\r\n\r\nbosh_shablon = \"\"\"{} {}  {}° {}\r\n\r\n🔴 Yangiyo'l Shahar va Tuman  \r\n\r\n👇 Kerakli taomlar bo'limni tanlang:\r\n\"\"\"\r\nbosh = \"\"\r\nob_havo = \"\", \"\"\r\nhafta = [\"Dushanba\", \"Seshanba\", \"Chorshanba\", \"Payshanba\", \"Juma\", \"Shanba\", \"Yakshanba\"]\r\nicon = {\r\n    \"01d\": \"☀️\",\r\n    \"01n\": \"🌑\",\r\n    \"02d\": \"🌤\",\r\n    \"02n\": \"☁️\",\r\n    \"03d\": \"⛅️\",\r\n    \"03n\": \"☁️\",\r\n    \"04d\": \"☁️\",\r\n    \"04n\": \"☁️\",\r\n    \"09d\": \"🌧\",\r\n    \"09n\": \"🌧\",\r\n    \"10d\": \"🌦\",\r\n    \"10n\": \"🌧\",\r\n    \"11d\": \"⛈\",\r\n    \"11n\": \"⛈\",\r\n    \"13d\": \"❄️\",\r\n    \"13n\": \"❄️\",\r\n    \"50d\": \"🌫\",\r\n    \"50n\": \"🌫\",\r\n}\r\n\r\n# bo'lim nomi #taom nomi #soni\r\ntemp = {}\r\nbot = Client(\"EZAZ\", bot_token=bot_token, api_hash=api_hash, api_id=api_id)\r\n\r\n\r\ndef distance(lat1,long1, lat2,  long2):\r\n    lat1 = lat1 / 180 * pi\r\n    lat2 = lat2 / 180 * pi\r\n    long1 = long1 / 180 * pi\r\n    long2 = long2 / 180 * pi\r\n    r = 6371\r\n    dlon = long1 - long2\r\n    dlat = lat1 - lat2\r\n    a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\r\n    c = 2 * asin(sqrt(a))\r\n    return c * r\r\n\r\n\r\ndef save():\r\n    f = open(\"phone_number.json\", \"w\")\r\n    f.write(json.dumps(phone_numbers))\r\n    f.close()\r\n\r\n\r\ndef load():\r\n    global phone_numbers\r\n    try:\r\n        f = open(\"phone_number.json\", 'r')\r\n    except:\r\n        return\r\n    phone_numbers = json.loads(f.read())\r\n\r\n\r\ndef rasmlilar(m_list):\r\n    n_list = []\r\n    for x in m_list:\r\n        try:\r\n            a = x.caption\r\n            if a:\r\n                n_list.append(x)\r\n        except:\r\n            continue\r\n    return n_list\r\n\r\n\r\ndef fakt(m_list):\r\n    global faktlar\r\n    for x in m_list:\r\n        try:\r\n            if x.text:  # and not faktlar.get(x.text.split('\\n')[0].strip(), 0):\r\n                faktlar[x.text.split('\\n')[0].strip()].append('\\n'.join(x.text.split('\\n')[1:]))\r\n        except:\r\n            pass\r\n\r\n\r\ndef yangilash():\r\n    global taomlar\r\n    taomlar = deepcopy(tezt)\r\n    taom = rasmlilar(bot.get_messages(-1001149535338, range(337, 437)))\r\n    fakt(bot.get_messages(fakt_kanal_id, range(100)))\r\n    #    taom += rasmlilar(bot.get_messages(-1001149535338, range(337, 437)))\r\n    for t in taom:\r\n        x = t.caption.split('\\n')\r\n        x = [i for i in x if i]\r\n        bolim = x[0].strip()\r\n\r\n        nomi = ''\r\n        try:\r\n            nomi = x[1].strip()\r\n\r\n            narxi = float(x[2])\r\n        except:\r\n            narxi = 0\r\n        rasm = t.photo.file_id\r\n        if len(x) >= 5:\r\n            variant1 = x[3]\r\n            variant2 = x[4]\r\n            if len(x) > 5:\r\n                description = '\\n'.join(x[5:])\r\n            else:\r\n                description = \"\"\r\n        else:\r\n            variant1 = variant2 = None\r\n            description = ''\r\n        taomlar[bolim].append([rasm, nomi, narxi, variant1, variant2, description])\r\n\r\n\r\ndef get_weather():\r\n    try:\r\n        a = requests.get(\r\n            \"http://api.openweathermap.org/data/2.5/weather?lat=41.35&lon=69.29&appid=9993d5f60ccd41752f2ba809ea2682b3\")\r\n        x = json.loads(a.text)\r\n        return \"{0:+}\".format(x[\"main\"][\"temp\"] - 273.15), icon[x[\"weather\"][0][\"icon\"]]\r\n    except:\r\n        return\r\n\r\n\r\ndef edit_home():\r\n    global bosh, ob_havo\r\n    if time.minute == dt.datetime.now().minute:\r\n        ob_havo = get_weather() or ob_havo\r\n        bosh = bosh_shablon.format(hafta[dt.date.today().weekday()], dt.date.today(), *ob_havo)\r\n\r\n\r\nedit_home()\r\nbolimlar = ReplyKeyboardMarkup([\r\n    [\"1⃣ Suyuq Taomlar\", \"2⃣ Quyuq Taomlar\"],\r\n    [\"🍜 Uyg'ur Taomlar\", \"🍢 Kaboblar\", ],\r\n    [\"🍻 BAR\", \"🥗 Salatlar\"],\r\n    [korzina,  # tayyor\r\n     buyurtma],\r\n    [sozlamalar, qoidalar ],\r\n    [boglanish,  # tayyor\r\n     manzil_  # tayyor\r\n     ],\r\n\r\n], resize_keyboard=True)\r\nraqamlar = ReplyKeyboardMarkup([\r\n    [\"1\", \"2\", \"3\"],\r\n    [\"4\", \"5\", \"6\"],\r\n    [\"7\", \"8\", \"9\"],\r\n    [ortga, menu],\r\n    [korzina, ]\r\n], resize_keyboard=True)\r\n\r\n\r\n@bot.on_message(Filters.command([\"start\"]))\r\ndef handle_start(c, m):\r\n    edit_home()\r\n    yangilash()\r\n    buyurtmalar[m.chat.id] = []\r\n    bot.send_message(m.chat.id,\"\"\"Assalomu Alaykum,\r\n\r\nEZAZ | Milliy Taomlar botimizga xush kelibsiz.\r\n\r\nIltimos, avval siz bilan bog'lanishimiz uchun o'z raqamingizni 👤 Kontakt ko'rinishda jo'nating yoki pastdagi \"📲 Telefon raqamni jo'natish\" tugmasini bosing:\"\"\",\r\n                     reply_markup=ReplyKeyboardMarkup(\r\n                         [[KeyboardButton(\"📱 Telefon raqamni jo'natish\", request_contact=True), ]],\r\n                         resize_keyboard=True))\r\n\r\n\r\n@bot.on_message(Filters.contact)\r\ndef handle_contact(c, m):\r\n    edit_home()\r\n    phone_numbers[m.chat.id] = m.contact.phone_number\r\n    save()\r\n    bot.send_message(m.chat.id, \"\"\"\r\nBOT V2.1.0 TEST\r\n\r\nRahmat!\r\nEslatib o'tamiz: BOT test rejimida qayta ishlanmoqda.Yangi o'zgarishlarga o'tish uchun iltimos /start kommandasini tez-tez jo'natib turing ana shunda o'zgarishlar sizda paydo bo'ladi.\r\n\"\"\")\r\n    bot.send_message(m.chat.id, bosh, reply_markup=bolimlar)\r\n\r\n\r\n@bot.on_message(Filters.text)\r\ndef handle_text(c, m: Message):\r\n    edit_home()\r\n    global buyurtmalar, temp, info\r\n    if m.text.lower() == \"yangila\":\r\n        yangilash()\r\n        return\r\n    if m.chat.id == -1001149535338:\r\n        yangilash()\r\n        return\r\n    if m.chat.id == guruh_id:\r\n        if m.text.lower().startswith(\"info\"):\r\n            text = \"Bugun:\\n\"\r\n            for k, v in info.items():\r\n                text += \"{}ta {};\\n\".format(v, k)\r\n            text += \"sotildi\"\r\n            bot.send_message(m.chat.id, text)\r\n            return\r\n        elif m.text.lower().startswith(\"tozala\"):\r\n            info = {}\r\n            m.reply(\"Ma'lumotlar to'liq tozalandi!\")\r\n            return\r\n        elif m.text.lower().startswith(\"javob\"):\r\n            if m.text.split(\"\\n\")[1] == \"hammaga\":\r\n                for i in phone_numbers.keys():\r\n                    try:\r\n                        bot.send_message(i, '\\n'.join(m.text.split('\\n')[2:]))\r\n                    except:\r\n                        pass\r\n            else:\r\n                try:\r\n                    bot.send_message(m.text.split('\\n')[1], '\\n'.join(m.text.split('\\n')[2:]))\r\n                except:\r\n                    pass\r\n            return\r\n    elif m.text == ortga:\r\n        fikr[m.chat.id] = None\r\n        if not temp.get(m.chat.id) or len(temp.get(m.chat.id, [])) <= 1:\r\n            temp[m.chat.id] = None\r\n            bot.send_message(m.chat.id, bosh, reply_markup=bolimlar)\r\n            return\r\n        else:\r\n            # print(1)\r\n            temp[m.chat.id] = [temp[m.chat.id][0]]\r\n            x = []\r\n            try:\r\n                bot.send_message(m.chat.id, choice(faktlar[temp[m.chat.id][0]]))\r\n            except:\r\n                pass\r\n            for i, t in enumerate(taomlar[temp[m.chat.id][0]]):\r\n                a, b = i // 2, i % 2\r\n                if b == 0:\r\n                    x.append([])\r\n                x[a].append(t[1])\r\n            x.append([ortga, korzina])\r\n            x.append([buyurtma])\r\n            # print(2)\r\n            x = ReplyKeyboardMarkup(x, resize_keyboard=True)\r\n            bot.send_message(m.chat.id, \"{} :\".format(temp[m.chat.id][0]), reply_markup=x)\r\n            return\r\n    elif fikr.get(m.chat.id, 0):\r\n        text = \"{} `{}` mavzu: {}\\n\\n\".format(m.from_user.first_name, m.from_user.id, fikr[m.chat.id])\r\n        text += m.text\r\n        bot.send_message(guruh_id, text)\r\n        bot.send_message(m.chat.id, \"Rahmat adminstratorlar albatta siz bilan bo'glanadi.\", reply_markup=bolimlar)\r\n        fikr[m.chat.id] = None\r\n    elif m.text == manzil_:\r\n        bot.send_location(m.chat.id, 41.120233069494155, 69.07184731700151)\r\n        bot.send_message(m.chat.id, \"\"\"🍴 Ezaz Milliy Taomlar:\r\n\r\n🇺🇿 Toshkent viloyati,Yangiyo'l shahar,Toshkent shox. 34-uy.  \r\n\"\"\")\r\n        return\r\n    elif m.text == menu:\r\n        bot.send_message(m.chat.id, bosh, reply_markup=bolimlar)\r\n        temp[m.chat.id] = None\r\n        return\r\n    elif m.text == boglanish:\r\n        x = ReplyKeyboardMarkup([[shikoyat],\r\n                                 [taklif],\r\n                                 [xatolik],\r\n                                 [shunchaki],\r\n                                 [ortga, ]], resize_keyboard=True)\r\n        bot.send_message(m.chat.id, \"\"\"✉️ Telegram orqali: @EzazAdmin \r\n\r\n📞 Telefon orqali: +998951466616\r\n\r\n📧 Elektron Pochta: \r\nezazsprt@gmail.com\r\n\r\n📷 Instagram: \r\nhttp://instagram.com/ezazmilliytaomlar\r\n\r\n✍ Marhamat: qay tarzda bog'lanmoqchisiz ?\"\"\", reply_markup=x)\r\n        return\r\n    elif m.text == qoidalar:\r\n        bot.send_message(m.chat.id, \"\"\"\r\n        👤 Hurmatli mijoz,siz tanlagan taomlar uchun doimiy ravishda \"Bir martalik\" idish qo'shib oboriladi.\r\nshuning uchun BOT avtomatik tarzda Quyuq taom uchun: quyuq idish(1000so'm) suyuq taom uchun: suyuq idish(1500so'm) hisoblaydi.\r\n\r\n🛵 Yetkazib berish xizmatimiz Yangiyo'l shahar va Tumanida amal qiladi.Yetkazib berish xizmatimiz 3kmdan uzoq masofaga km hisobida 1000so'mdan qo'shib boriladi.Bu ishni BOTning o'zi avtomatik tarzda bajaradi.\r\n\r\n⚠️Bundan tashqari albatta mahsulot yetkazib berilgandan so'ng siz mahsulotlar nomi yozilgan mahsus chekni talab qilishingiz mumkun bo'ladi.\r\n\r\n👨‍💻Shikoyat yoki qandaydir taklif murojaatlar bo'lsa \"Bog'lanish\" bo'limiga o'tishingiz mumkin,Adminstratorlar albatta siz bilan bog'lanadi.\r\n\r\n👤 KONTAKTLAR:\r\n@EZAZSUPPORTBOT\r\n+998951466616\r\n+998903366807\r\n\"\"\")\r\n    elif m.text == korzina:\r\n        try:\r\n            if not buyurtmalar[m.chat.id]:\r\n                bot.send_message(m.chat.id,\r\n                                 \"🧐 Siz hali taom tanlamadingiz \\\"🔙 Ortga\\\" qaytib taom tanlang va Savatga yig'ib \"\r\n                                 \"boring.🛒\", reply_markup=ReplyKeyboardMarkup([[ortga]], resize_keyboard=True))\r\n                return\r\n        except KeyError:\r\n            buyurtmalar[m.chat.id] = []\r\n            bot.send_message(m.chat.id,\r\n                             '🧐 Siz hali taom tanlamadingiz \"🔙 Ortga\" qaytib taom tanlang va Savatga yig\\'ib boring.🛒',\r\n                             reply_markup=ReplyKeyboardMarkup([[ortga]], resize_keyboard=True))\r\n            return\r\n        else:\r\n            x = []\r\n            text = \"👇 Siz tanlagan taomlar:\\n\\n\"\r\n            for i in buyurtmalar[m.chat.id]:\r\n                # print(i)\r\n                text += \"{}x{}: {}00\\n\".format(i[2], i[0], i[1] * i[2])\r\n                x.append([KeyboardButton(\"❌ {}ni bekor qilish\".format(i[0]), request_contact=False)])\r\n            x.append([buyurtma])\r\n            x.append([tozalash])\r\n            x.append([ortga])\r\n            x = ReplyKeyboardMarkup(x, resize_keyboard=True)\r\n            bot.send_message(m.chat.id, text, reply_markup=x)\r\n            return\r\n    elif m.text == sozlamalar:\r\n        bot.send_message(m.chat.id,\r\n                         \"Sozlamalar orqali siz bilan bog'lanishimiz mumkin bo'lgan raqamingizni o'zgartirishingiz mumkin:\",\r\n                         reply_markup=ReplyKeyboardMarkup(\r\n                             [[KeyboardButton(telefon, request_contact=True)], [ortga]], resize_keyboard=True))\r\n        return\r\n    elif m.text == tozalash:\r\n        buyurtmalar[m.chat.id] = []\r\n        bot.send_message(m.chat.id, \"\"\"Siz tanlagan barcha taomlar to'liq tozalandi.\r\n       \r\n        👇 Kerakli taomlar bo'limni tanlang:\"\"\",\r\n                         reply_markup=ReplyKeyboardMarkup([[ortga, menu]], resize_keyboard=True))\r\n        return\r\n    elif m.text == shikoyat:\r\n        fikr[m.chat.id] = 'shikoyat'\r\n        bot.send_message(m.chat.id, \"\"\"Qanday shikoyat qilmoqchisiz ?\r\n\r\n🔸 O'z fikringizni yozib qoldiring albatta inobatga olamiz:\"\"\",\r\n                         reply_markup=ReplyKeyboardMarkup([[ortga]], resize_keyboard=True))\r\n        return\r\n    elif m.text == taklif:\r\n        fikr[m.chat.id] = 'taklif'\r\n        bot.send_message(m.chat.id, \"\"\"Qanday taklifingiz bor ?\r\n\r\n🔸 O'z fikringizni yozib qoldiring albatta inobatga olamiz:\"\"\",\r\n                         reply_markup=ReplyKeyboardMarkup([[ortga]], resize_keyboard=True))\r\n        return\r\n    elif m.text == xatolik:\r\n        fikr[m.chat.id] = 'xatolik'\r\n        bot.send_message(m.chat.id, \"\"\"Qanday xatolik topdingiz ?\r\n\r\n🔸 O'z fikringizni yozib qoldiring albatta inobatga olamiz:\"\"\",\r\n                         reply_markup=ReplyKeyboardMarkup([[ortga]], resize_keyboard=True))\r\n        return\r\n    elif m.text == shunchaki:\r\n        fikr[m.chat.id] = 'shunchaki'\r\n        bot.send_message(m.chat.id, \"\"\"🔸 O'z fikringizni yozib qoldiring albatta inobatga olamiz:\"\"\",\r\n                         reply_markup=ReplyKeyboardMarkup([[ortga]], resize_keyboard=True))\r\n        return\r\n\r\n    elif m.text == buyurtma:\r\n        x = buyurtmalar.get(m.chat.id, None)\r\n        if x is None:\r\n            buyurtmalar[m.chat.id] = []\r\n        if not x:\r\n            bot.send_message(m.chat.id,\r\n                             '🧐 Siz hali taom tanlamadingiz \"🔙 Ortga\" qaytib taom tanlang va Savatga yig\\'ib boring.🛒',\r\n                             reply_markup=ReplyKeyboardMarkup([[ortga, menu]], resize_keyboard=True))\r\n            return\r\n\r\n        jami = 0.0\r\n        text = \"👇 Siz tanlagan taomlar: \\n\\n\"\r\n        for i in buyurtmalar[m.chat.id]:\r\n            # print(i)\r\n            text += \"{} x {}: {}00\\n\".format(i[2], i[0], i[1]*i[2])\r\n            jami += i[1] * i[2]\r\n        bot.send_message(m.chat.id, text)\r\n        x = ReplyKeyboardMarkup([[ha], [yuq], [qol]], resize_keyboard=True)\r\n        bot.send_message(m.chat.id, \"Tasdiqlaysizmi ❓\", reply_markup=x)\r\n        return\r\n    elif m.text == ha and not xizmat_narxi.get(m.chat.id, 0):\r\n        manzil[m.chat.id] = True\r\n        bot.send_message(m.chat.id, \"\"\"Iltimos oldin manzilni aniqlash uchun \"📍Geolokatsiya\" tugmasini bosib manzilni jo'nating:\"\"\",\r\n                         reply_markup=ReplyKeyboardMarkup([[KeyboardButton(\"📍 Geolokatsiya\", request_location=True)]],\r\n                                                          resize_keyboard=True))\r\n\r\n        return\r\n    elif m.text == ha:\r\n        komment[m.chat.id] = True\r\n        bot.send_message(m.chat.id, \"💬 Istasangiz, buyurtmangiz haqida izoh kommentariya yozib qoldirishingiz mumkin.Biz uni albatta inobatga olamiz.\",\r\n                         reply_markup= ReplyKeyboardMarkup([[\"Shart emas\"]], resize_keyboard=True))\r\n        return\r\n    elif m.text == yuq:\r\n        buyurtmalar[m.chat.id] = []\r\n        bot.send_message(m.chat.id, \"\"\"Siz tanlagan taomlaringiz to'liq tozalandi \r\n        \r\n        👇 Kerakli taomlar bo'limni tanlang:\"\"\",\r\n                         reply_markup=ReplyKeyboardMarkup([[ortga, menu]], resize_keyboard=True))\r\n        return\r\n    elif m.text == qol:\r\n        bot.send_message(m.chat.id, \"\"\"Taomlaringiz savatda saqlandi \r\n        \r\n        👇 Kerakli taomlar bo'limni tanlang:\"\"\", reply_markup=ReplyKeyboardMarkup([[ortga, menu]],\r\n                                                                                  resize_keyboard=True))\r\n    elif m.text in taomlar.keys():\r\n        x = []\r\n        try:\r\n            bot.send_message(m.chat.id, choice(faktlar[m.text]))\r\n        except:\r\n            pass\r\n        for i, t in enumerate(taomlar[m.text]):\r\n            a, b = i // 2, i % 2\r\n            if b == 0:\r\n                x.append([])\r\n            x[a].append(t[1])\r\n        x.append([ortga, korzina])\r\n        x = ReplyKeyboardMarkup(x, resize_keyboard=True)\r\n        temp[m.chat.id] = [m.text]\r\n        bot.send_message(m.chat.id, \"{} :\".format(m.text), reply_markup=x)\r\n        return\r\n    elif manzil.get(m.chat.id, 0) and komment.get(m.chat.id, 0):\r\n\r\n        text = \"{} (`{}`) {} buyurtma berdi:\\n\".format(m.from_user.first_name, m.chat.id,\r\n                                                       m.from_user.last_name or \"\")\r\n        idishlar = 0.0\r\n        jami = 0.0\r\n        for n, i in enumerate(buyurtmalar[m.chat.id]):\r\n            text += \"{}) {} x {} {}00\\n\".format(n+1, i[0], i[2], i[1] * i[2])\r\n            jami += i[1] * i[2]\r\n            if \"suyuq\" in i[-1].lower() or \"uyg'ur\" in i[-1].lower():\r\n                idishlar += 1.5*i[2]\r\n            if \"quyuq\" in i[-1].lower() or \"salatlar\" in i[-1].lower():\r\n                idishlar += 1.0*i[2]\r\n        if idishlar:\r\n            text += \"🍽 Bir martalik idishlar: {}00\\n\".format(idishlar)\r\n        text += \"🛵 Yetkazib berish hizmati: {}00\\n\".format(xizmat_narxi[m.chat.id])\r\n        jami += xizmat_narxi[m.chat.id]\r\n        text += \"🔖 Jami: {0}00\\n\\n\".format(jami+idishlar)\r\n        text += \"📞 Tel: {}\\n📍 Manzil: {}\".format(phone_numbers[m.chat.id], manzil[m.chat.id])\r\n        text += \"\\n💬 Komment: {}\".format(m.text)\r\n        bot.send_message(guruh_id, text)\r\n        bot.send_location(guruh_id, *geolokatsiya[m.chat.id])\r\n        for i in buyurtmalar[m.chat.id]:\r\n            a = info.get(i[-1], 0) + i[2]\r\n            info[i[-1]] = a\r\n        temp[m.chat.id] = None\r\n        manzil[m.chat.id] = None\r\n        komment[m.chat.id] = None\r\n        buyurtmalar[m.chat.id] = []\r\n        bot.send_message(m.chat.id, \"Rahmat,tez orada adminstratorlarimiz albatta siz bilan bog'lanishadi \"\r\n\r\n\r\n                                    \"👇 Kerakli taomlar bo'limini tanlang:\", reply_markup=bolimlar)\r\n        return\r\n    elif \"bekor qilish\" in m.text:\r\n        import re\r\n        s = re.search(r\"(\\w*) bekor\", m.text)\r\n        s = s.groups()[0][:-2]\r\n        # print(s)\r\n        for i in buyurtmalar[m.chat.id]:\r\n            if s in i[0]:\r\n                buyurtmalar[m.chat.id].remove(i)\r\n                break\r\n        x = []\r\n        jami = 6.0\r\n        text = \"👇 Siz tanlagan taomlar:\\n\\n\"\r\n        for i in buyurtmalar[m.chat.id]:\r\n            # print(i)\r\n            text += \"{}x{}: {}00\\n\".format(i[2], i[0], i[1] * i[2])\r\n            jami += i[1] * i[2]\r\n            x.append([KeyboardButton(\"❌ {}ni bekor qilish\".format(i[0]), request_contact=False)])\r\n        x.append([buyurtma])\r\n        x.append([tozalash])\r\n        x.append([ortga])\r\n        text += \"\\nYetkazib berish: 6.000\\n\\nJami: {}00\".format(jami)\r\n        x = ReplyKeyboardMarkup(x, resize_keyboard=True)\r\n        bot.send_message(m.chat.id, text, reply_markup=x)\r\n        return\r\n\r\n    elif temp.get(m.chat.id, 0) and len(temp[m.chat.id]) == 1:\r\n        bol = temp[m.chat.id][0]\r\n        x = None\r\n        for i in taomlar[bol]:\r\n            if m.text in i:\r\n                x = i\r\n                break\r\n        if x is None:\r\n            return\r\n        # print(x)\r\n        if \"ro'yxat\" in m.text.lower():\r\n            bot.send_photo(m.chat.id, x[0])\r\n            return\r\n        temp[m.chat.id].append(m.text)\r\n        temp[m.chat.id].append(x[2])\r\n\r\n        if x[3]:\r\n            bot.send_photo(m.chat.id, x[0], caption=\"\"\"{}\\nNarxi: {}00so'm\"\"\".format(x[1], x[2]),\r\n                           reply_markup=ReplyKeyboardMarkup([[x[3]], [x[4]]], resize_keyboard=True))\r\n            if x[-1]:\r\n                bot.send_message(m.chat.id, x[-1])\r\n        else:\r\n            bot.send_photo(m.chat.id, x[0], caption=\"\"\"{}\\nNarxi: {}00so'm\"\"\".format(x[1], x[2]), reply_markup=raqamlar)\r\n            if x[-1]:\r\n                bot.send_message(m.chat.id, x[-1])\r\n            bot.send_message(m.chat.id, \"👇 Miqdorni kiriting yoki o'zingiz yozing:\")\r\n\r\n        return\r\n    elif temp.get(m.chat.id, 0) and len(temp[m.chat.id]) == 3:\r\n        if len(m.text) <= 2:\r\n            soni = int(m.text)\r\n            temp[m.chat.id].append(soni)\r\n\r\n            try:  # nomi     #narxi         #soni\r\n                buyurtmalar[m.chat.id].append((temp[m.chat.id][1], temp[m.chat.id][2], soni, temp[m.chat.id][0]))\r\n            except KeyError:\r\n                buyurtmalar[m.chat.id] = []\r\n                buyurtmalar[m.chat.id].append((temp[m.chat.id][1], temp[m.chat.id][2], soni, temp[m.chat.id][0]))\r\n            bot.send_message(m.chat.id,\r\n                             \"🛒 Siz tanlagan taomlar savatga qo'shildi  Buyurtma Berish bo'limiga kirib buyurtma qilishingiz mumkin!\"\r\n                             \"\", reply_markup=bolimlar)\r\n            return\r\n        else:\r\n            temp[m.chat.id][1] += \"  {}\".format(m.text)\r\n            bot.send_message(m.chat.id, \"👇 Miqdorni kiriting yoki o'zingiz yozing:\", reply_markup=raqamlar)\r\n    elif m.text == \"🔙 Ortga\":\r\n        bot.send_message(m.chat.id, \"👇 Kerakli taomlar bo'limini tanlang:\", reply_markup=bolimlar)\r\n        temp[m.chat.id] = None\r\n        return\r\n    elif m.text == \"👨‍💻 Bog'lanish\":\r\n        bot.send_message(m.chat.id, \"\"\"✉️ Telegram orqali: @EzazAdmin \r\n\r\n📞 Telefon orqali: +998951466616\r\n\r\n📧 Elektron Pochta: \r\nezazsprt@gmail.com\r\n\r\n📷 Instagram: \r\nhttp://instagram.com/ezazmilliytaomlar \"\"\")\r\n        return\r\n\r\n\r\n@bot.on_message(Filters.location)\r\ndef handle_location(c, m: Message):\r\n    if manzil.get(m.chat.id, None):\r\n        manzil[m.chat.id] = \"Geolokatsiya\"\r\n        geolokatsiya[m.chat.id] = (m.location.latitude, m.location.longitude)\r\n        a = distance( 41.120233069494155, 69.07184731700151, m.location.latitude, m.location.longitude)\r\n        xizmat_narxi[m.chat.id] = 6.0\r\n        if a>=3:\r\n            xizmat_narxi[m.chat.id] += ceil(a-3)\r\n        jami = 0.0\r\n        text = \"\"\"👤 Hurmatli mijoz,siz tanlagan taomlar uchun doimiy ravishda \"Bir martalik\" idish qo'shib oboriladi.\r\nshuning uchun BOT avtomatik tarzda Quyuq taom uchun: quyuq idish(1000so'm) suyuq taom uchun: suyuq idish(1500so'm) hisoblaydi.\r\n\r\n🛵 Yetkazib berish xizmatimiz Yangiyo'l shahar va Tumanida amal qiladi.Yetkazib berish xizmatimiz 3kmdan uzoq masofaga km hisobida 1000so'mdan qo'shib boriladi.Bu ishni BOTning o'zi avtomatik tarzda bajaradi.\r\n\r\n👇 Siz tanlagan taomlar:\\n\r\n--------------\"\"\"\r\n        idishlar = 0.0\r\n        for i in buyurtmalar[m.chat.id]:\r\n            if \"suyuq\" in i[-1].lower() or \"uyg'ur\" in i[-1].lower():\r\n                idishlar += 1.5*i[2]\r\n            if \"quyuq\" in i[-1].lower() or \"salatlar\" in i[-1].lower():\r\n                idishlar += 1.0*i[2]\r\n            text += \"{} x {}: {}00\\n\".format(i[2], i[0], i[1]*i[2])\r\n            jami += i[1] * i[2]\r\n        if idishlar:\r\n            text += \"🍽 Bir martalik idishlar: {}00\\n\".format(idishlar)\r\n            jami += idishlar\r\n        text += \"🛵 Yetkazib berish hizmati: {}00\\n\".format(xizmat_narxi[m.chat.id])\r\n        jami += xizmat_narxi[m.chat.id]\r\n        text += \"📋 Jami: {}00\\n\" \\\r\n                \"--------------\".format(jami)\r\n        bot.send_message(m.chat.id, text, reply_markup=ReplyKeyboardMarkup(\r\n            [[ha], [yuq], [qol]], resize_keyboard=True\r\n        ))\r\n        return\r\n\r\n\r\n@bot.on_message(Filters.media)\r\ndef ls(c, m: Message):\r\n    if m.chat.id == guruh_id:\r\n        for i in phone_numbers.keys():\r\n            try:\r\n                m.forward(i, as_copy=True)\r\n            except:\r\n                pass\r\n\r\n\r\n@bot.on_message()\r\ndef handle_default(c, m):\r\n    print(m)\r\n\r\n\r\nload()\r\nif __name__ == '__main__':\r\n    bot.run()\r\n","sub_path":".github/workflows/Ezaz_v1_org.py","file_name":"Ezaz_v1_org.py","file_ext":"py","file_size_in_byte":24485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"474136233","text":"# Copyright 2012 Locaweb.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\");\n#    you may not use this file except in compliance with the License.\n#    You may obtain a copy of the License at\n#\n#        http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS,\n#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#    See the License for the specific language governing permissions and\n#    limitations under the License.\n#\n# @author: Juliano Martinez (ncode), Locaweb.\n# @author: Luiz Ozaki, Locaweb.\n\nfrom sqlalchemy.orm import sessionmaker\nfrom simplenet.db import models\nfrom simplenet.common.config import get_logger\n\nlogger = get_logger()\n\n_engine = models.engine\n_maker = None\n\ndef get_database_session(autocommit=True, expire_on_commit=True):\n    global _maker, _engine\n    if not _maker:\n        assert _engine\n        _maker = sessionmaker(bind=_engine,\n                              autocommit=autocommit,\n                              expire_on_commit=expire_on_commit)\n    return _maker()\n\ndef unregister_database_models(base):\n    global _engine\n    assert _engine\n    base.metadata.drop_all(_engine)\n","sub_path":"src/simplenet/db/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"363944486","text":"import subprocess\nimport json\nimport time\nimport sys\nimport os\nfrom datetime import datetime\n\npath_to_key = '/Users/maxim/maxims-mb-us-west-2.pem'\ndata_volume = '073f54c95f233748f'\n\nkey_name = path_to_key.split('/')[-1].split('.')[0]\n\n## Request ec2 instance\nbatcmd = 'aws ec2 request-spot-instances --instance-count 1 --type one-time --launch-specification file://spot-instance.json'\nrequest_spot = subprocess.check_output(batcmd, shell=True).decode('utf8')\n\nprint('submitted request, waiting 60 seconds')\ntime.sleep(60)\n\n#Obtain instance id of request\nbatcmd = 'aws ec2 describe-spot-instance-requests'\n\nspot_request_info = subprocess.check_output(batcmd, shell=True).decode('utf8')\nspot_request_info = json.loads(spot_request_info)\n\ndef return_instance_id():\n\twhile True:\n\t\tfor idx, request in enumerate(spot_request_info['SpotInstanceRequests']):\n\t\t\tif (request['State'] == 'active') & (request['LaunchSpecification']['KeyName'] == key_name):\n\t\t\t\tinstance_id = spot_request_info['SpotInstanceRequests'][idx]['InstanceId']\n\t\t\t\treturn(instance_id)\n\t\t\t\ttime.sleep(10)\n\t\t\telif request['LaunchSpecification']['KeyName'] == key_name:\n\t\t\t\tprint('request state is {}'.format(request['State']))\n\t\t\telse:\n\t\t\t\tpass\n\ninstance_id = return_instance_id()\n\n#Obtain public dns name of instance\nbatcmd = 'aws ec2 describe-instances --instance-ids {}'.format(instance_id)\n\nspot_instance = subprocess.check_output(batcmd, shell=True).decode('utf8')\nspot_instance = json.loads(spot_instance)\npublic_dns = spot_instance['Reservations'][0]['Instances'][0]['PublicDnsName']\nspot_instance_state = spot_instance['Reservations'][0]['Instances'][0]['State']['Name']\n\nprint('public dns = {}'.format(public_dns))\n\nwhile True:\n\tif spot_instance_state == 'running':\n\t\tprint('instance running, waiting 5 minutes to set up server')\n\t\ttime.sleep(60*5)\n\t\t# Attach EBS Volume\n\t\tbatcmd = 'aws ec2 attach-volume --volume-id vol-{} --instance-id {} --device /dev/xvdf'.format(data_volume, instance_id)\n\t\tsubprocess.check_output(batcmd, shell=True)\n\t\tprint('Attached Volume')\n\t\t## Ssh in and configure server\n\t\tprint('building jupyter notebook bridge')\n\t\tsubprocess.call('ssh -i ' + path_to_key + ' -N -f -L 8157:127.0.0.1:8888 ec2-user@{}'.format(public_dns), shell=True)\n\t\tprint('opening interactive shell')\n\t\tsubprocess.call('ssh -i ' + path_to_key + ' ec2-user@{}'.format(public_dns), shell=True)\n\t\tbreak\n\telse:\n\t\tprint('state of instance id {}: {}'.format(instance_id, spot_instance_state))\n\t\ttime.sleep(30)","sub_path":"aws_mgmt/aws_script.py","file_name":"aws_script.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"42664543","text":"import math\n# Ex.1\nr = 5\nv = 4/3*math.pi*r**3\nprint('V of sphere which r = 5:', v)\n# Ex.2\nprice = 24.95\ndiscount = 0.6  # 40% discount\nnumber = 60\nshipping = 3 + 0.75*(number-1)\nprint('Price (with shipping) for 60 books:', number*price + shipping)\n# Ex.3\ntime_out = 6*60**2+52*60  # 6:52\nprint('Started run at 6:52:0')\nslow_pace = 8*60+15 # mile in 8 minutes and 15 sec\nfast_pace = 7*60+12 # mile in 7 minutes and 12 sec\ntime_back = time_out + 2*slow_pace + 3*fast_pace\nhours = time_back//60**2\nminutes = (time_back - hours*60**2)//60\nseconds = time_back - hours*60**2 - minutes*60\nprint('Came back for breakfast at ', hours, ':', minutes, ':', seconds, sep='')\n","sub_path":"BookTasks/Chapter2.py","file_name":"Chapter2.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"376748661","text":"# Author: Ricardo Baptista and Matthias Poloczek\n# Date:   June 2018\n#\n# See LICENSE.md for copyright information\n#\n\nimport numpy as np\nfrom .utils import Cat_LinReg, Cat_simulated_annealing, Cat_sample_inputs\nimport time\n\nfrom copy import deepcopy\n\n\ndef Cat_BOCS_suggest(X, Y, n_Cats, order=2, SABudget=100,\n\t\t\t\t\t           SA_reruns=50, gibbs_time_limit=30):\n\t# Cat_BOCS_suggest: makes a suggestion about where to evaluate next\n\t#\n\t# ARGS\n\t#  X: N*categories integer matrix\n\t#  Y: N real values\n\t#  n_Cats: upper bounds of each categorical/integer argument\n\t#  order: the highest order polynomial term in regression model\n\t#  SABudget: iterations of (and SA) BOCS to perform\n\t#  SA_reruns: number of restarts for SA\n\t#  iteration_time_limit: integer, seconds per iteration\n\t#\n\t# RETURNS\n\t#  x_new: a integer/categorical vector\n\n\tx_vals = deepcopy(X)\n\ty_vals = deepcopy(Y)\n\n\t# Rescale y-values to [0, 1]\n\ty_vals = y_vals - y_vals.min()\n\tif y_vals.max()==0:\n\t\tymax=0.01\n\telse:\n\t\tymax=y_vals.max()\n\ty_vals = y_vals/ymax\n\n\tassert all([all(xi=0) for xi in x_vals]), \"x_vals cannot be negative\"\n\tassert x_vals.shape[0]==len(y_vals), \"x_vals and y_vals must be same length\"\n\n\t# start the while loop with an invalid x_new\n\tx_new = x_vals[0,:]\n\n\t# to avoid getting stuck\n\tstart_time = time.time()\n\n\tpenalize = lambda x: np.any(np.all(x_vals == x, axis=1))* np.max(y_vals)\n\n\t# keep thompson sampling until x_new is not in past data\n\twhile np.any(np.all(x_vals == x_new, axis=1)):\n\n\t\t# if time.time() - start_time > iteration_time_limit:\n\t\t# \tprint(\"Hit time limit, random sampling x_new instead\")\n\t\t#\n\t\t# \t# keep random sampling until x_new is not in past data\n\t\t# \twhile np.any(np.all(x_vals == x_new, axis=1)):\n\t\t# \t\tx_new = Cat_sample_inputs(1, n_Cats).reshape((-1,))\n\t\t#\n\t\t# else:\n\t\t# train linear model\n\t\tLR = Cat_LinReg(n_Cats, order)\n\t\tLR.train(x_vals, y_vals, gibbs_time_limit)\n\n\t\t# define aqcuitistion function\n\t\tstat_model = lambda x: LR.surrogate_model(x, LR.alpha) + penalize(x)\n\n\t\t# optimize acquisition function\n\t\tSA_model = np.zeros((SA_reruns, len(n_Cats)))\n\t\tSA_obj\t = np.zeros(SA_reruns)\n\t\tfor j in range(SA_reruns):\n\t\t\t(optModel, objVals) = Cat_simulated_annealing(stat_model, n_Cats, SABudget)\n\t\t\tSA_model[j,:] = optModel[-1,:]\n\t\t\tSA_obj[j]\t  = objVals[-1]\n\n\t\tmin_idx = np.argmin(SA_obj)\n\t\tx_new = SA_model[min_idx,:]\n\n\treturn x_new.astype(int)\n\n\ndef Cat_BOCS1(fnoisy,f, n_Cats, n_init=None, n_evals=20, verbose=False,knownmax=0, **kwargs):\n\t# Miimizes f over the constrained search space of integers\n\t#\n\t# ARGS\n\t#  f: objective function to be minimized\n\t#  n_Cats: a vector of the number of categories for each input to f\n\t#  n_init: number of (x, f(x)) to warm start the optimization\n\t#  n_evals: total number of calls to f(x)\n\t#  verbose: boolean, execute print statements\n\t#  **kwargs: passed to Cat_BOCS_suggest()\n\t#\n\t# RETURNS\n\t#  dict: x, y, min_x, min_y\n\t#\n##\tAmmended by Paul Kent to return an array of the value of the best solution at iteration\n\tARRAY2=[]\n\tTimerArray=[]\n\tTrueVals=np.zeros((0))\n##\n\tx_vals = Cat_sample_inputs(n_init, n_Cats)\n\ty_vals = np.zeros((0))\n\n\tfor xi in x_vals:\n\t\ty_new = fnoisy(xi)\n\t\ty_vals  = np.append(y_vals, y_new)\n\t\tTrueVals = np.append(TrueVals,-f(x_vals[np.argmin(y_vals),:]))\n\n##\n\t\tARRAY2 = np.append(ARRAY2,-np.min(y_vals))\n##\n\t\tif(verbose):\n\t\t\t\tprint(\"Initial sampling\", len(y_vals),\". x_new:\", xi, \", y_new:\", -y_new, \", best x:\", x_vals[np.argmin(y_vals),:], \", best y:\", -np.min(y_vals))\n\n\twhile x_vals.shape[0] < n_evals:\n##\n\t\tStart = time.time()\n##\n\t\tx_new = Cat_BOCS_suggest(X=x_vals, Y=y_vals, n_Cats=n_Cats, **kwargs)\n\n\t\ty_new = fnoisy(x_new)\n\n\t\tx_vals = np.vstack([x_vals, x_new])\n\t\ty_vals = np.append(y_vals, y_new)\n##\n\t\tFinish = time.time()\n\t\tARRAY2 = np.append(ARRAY2,-np.min(y_vals))\n\t\tTrueVals = np.append(TrueVals,-f(x_vals[np.argmin(y_vals),:]))\n\t\tTimerArray = np.append(TimerArray,(Finish-Start))\n##\n\t\tif verbose:\n\t\t\tprint(x_vals.shape[0], \". x_new:\", x_new, \", y_new:\", -y_new,\n\t\t\t\t\t\t\", best x:\", x_vals[np.argmin(y_vals),:],\", best y:\", -np.min(y_vals))\n\t\tif knownmax!=0:\n\t\t\tif np.min(y_vals)==-knownmax:\n\t\t\t\tprint(n_evals)\n\t\t\t\tprint(x_vals.shape[0])\n\t\t\t\tfor j in range(n_evals-x_vals.shape[0]):\n\t\t\t\t\tARRAY2 = np.append(ARRAY2,-np.min(y_vals))\n\t\t\t\t\tTimerArray = np.append(TimerArray,0)\n\t\t\t\t#ARRAY2=ARRAY2.reshape(1,n_evals)\n\t\t\t\t#print(ARRAY2.shape)\n\t\t\t\treturn(ARRAY2,TimerArray)\n\t#print (TrueVals)\n\treturn (ARRAY2, TimerArray,TrueVals)\n\t#'x': x_vals,\n\t#'y': y_vals,\n\t#'min_x':x_vals[np.argmin(y_vals),:],\n\t#'min_y':np.min(y_vals)}\n\n\n\ndef MO_Cat_BOCS_suggest(X, Y, **kwargs):\n    # Takes in past observations and returns a new x value.\n    # Y may have more than one dimension, i.e. multi-objective and\n    # a random projection is used.\n    #\n    # ARGS\n    #  X: N*x_dims matrix of inputs\n    #  Y: N*y_dims matrix of outputs\n    #  **kwargs: passed to Cat_BOCS_suggest()\n    #\n    # RETURNS\n    #  x_new: an x_dims array of integers to evaluate the objective next\n\n\n    x_vals = np.array(deepcopy(X))\n    y_vals = np.array(deepcopy(Y))\n\n    if len(y_vals.shape)==1:\n        y_vals = y_vals.reshape((-1,1))\n\n    assert x_vals.shape[0]==y_vals.shape[0], \"x and y must have equal entries\"\n\n\n    # normalize y values to unit square\n    y_vals = y_vals - y_vals.min(axis=0)\n    y_vals = y_vals/y_vals.max(axis=0)\n\n    # pick random direction in positive quadrant\n    n_objs = y_vals.shape[1]\n    w = np.abs(np.random.normal(size = (n_objs, 1)))\n    w = w / np.sum(w*w)\n\n    # project y values onto random direction\n    scalar_y_vals = np.matmul(y_vals, w).reshape(-1,)\n\n    x_new = Cat_BOCS_suggest(x_vals, scalar_y_vals, **kwargs)\n\n    return(x_new)\n\ndef MO_Cat_BOCS(f, n_Cats, n_init=None, n_evals=20, verbose=False, **kwargs):\n    # Minimizes f over the constrained search space of integers\n    #\n    # ARGS\n    #  f: objective function to be minimized\n    #  n_Cats: a vector of the number of categories for each input to f\n    #  n_init: number of (x, f(x)) to warm start the optimization\n    #  n_evals: total number of calls to f(x)\n    #  verbose: boolean, print eaqcxh iteration\n    #  **kwargs: passed to Multi_Cat_BOCS_suggest\n    #\n    # RETURNS\n    #  dict: x, y, x_pareto, y_pareto\n    #\n\n    if n_init is None:\n        n_init = np.max(n_Cats)\n\n    x_vals = Cat_sample_inputs(n_init, n_Cats)\n    y_vals = f(x_vals[0,:])\n\n    for xi in x_vals[1:]:\n        y_new = f(xi)\n        y_vals  = np.vstack([y_vals, y_new])\n        if(verbose):\n                print(\"Initial sampling\", len(y_vals),\". x_new:\", xi, \", y_new:\", y_new)\n\n    while x_vals.shape[0] < n_evals:\n        x_new = MO_Cat_BOCS_suggest(X=x_vals, Y=y_vals, n_Cats=n_Cats, **kwargs)\n        y_new = f(x_new)\n\n        x_vals = np.vstack([x_vals, x_new])\n        y_vals = np.vstack([y_vals, y_new])\n\n        if verbose:\n            print(x_vals.shape[0], \". x_new:\", x_new, \", y_new:\", y_new)\n\n\n    # get the non-dominated points and sort them by first column\n    dominated = [np.any(np.all(y_vals result:\r\n                min_df = result\r\n        #print(\"Minimum Signal Length: \", min_df+1)\r\n        for i in range(len(ctg)):\r\n            #Df without merge\r\n            #df.append(ctg[i].iloc[:min_df+1])\r\n            #Merge 2 ann and ctg\r\n            df.append(pd.merge(ctg[i].iloc[:min_df+1], ann, on=\"Name\"))\r\n        #print(df[1])\r\n        return df, ann, min_df+1\r\n\r\n    def Normalization(self,feature,n):\r\n        feature = feature.reshape((len(feature), 1))\r\n        scaler = MinMaxScaler(feature_range=(0, 1))\r\n\r\n        scaler_fit = scaler.fit(feature)\r\n        #print(\"Frame: \", n)\r\n        #print('Min: %f, Max: %f' % (scaler.data_min_, scaler.data_max_))\r\n        return scaler.transform(feature)\r\n\r\n    def Normalize_X(self,X, name):\r\n        #X = X.replace(0, 1)\r\n        n = name\r\n        for i in range(1,3,1):\r\n            if i ==1 :\r\n                j= 1;\r\n            else:\r\n                j = 0.01\r\n\r\n            X.iloc[:,i] = X.iloc[:,i].replace(0, j)\r\n            feature = X.iloc[:,i].values\r\n            X.iloc[:,i] = self.Normalization(feature,n)\r\n        \r\n        return X\r\n\r\n    def Normalize_y(self,y, name):\r\n        n = name\r\n        #feature = y.iloc[:,:].values\r\n        #y.iloc[:,:] = self.Normalization(feature,n)\r\n        feature = y.values\r\n        y = self.Normalization(feature,n)\r\n        return y\r\n\r\n    def iterate_train(self,batch_size=16):\r\n        total_seqs = self.X_train.shape[1]\r\n        permutation = np.random.permutation(total_seqs)\r\n        total_batches = total_seqs // batch_size\r\n\r\n        for i in range(total_batches):\r\n            start = i*batch_size\r\n            end = start + batch_size\r\n            batch_x = self.X_train[:,permutation[start:end]]\r\n            batch_y = self.y_train[:,permutation[start:end]]\r\n            yield (batch_x,batch_y)\r\n\r\n    def batching_X(self, X,signal_length,window):\r\n        #print(\"X rows: \",X.shape[0])\r\n        sn = signal_length\r\n        w = window\r\n        X = X.to_numpy()\r\n        X = X.reshape(X.shape[0]//w,w, X.shape[1])\r\n        return X\r\n\r\n    def batching_y(self, y, signal_length, window):\r\n        #print(\"y rows: \", y.shape[0])\r\n        sn = signal_length\r\n        w = window\r\n        y = y.to_numpy()\r\n        y = np.where(y <= 7, 1, y)\r\n        y = np.where(y != 1, 0, y)\r\n        y = y.reshape(y.shape[0]//w,w)\r\n        return y\r\n        #print(\"Y rows: \",y.shape[0])\r\n        #y = y.to_numpy()\r\n        #y = y.reshape(1,y.shape[0])\r\n        #return y\r\n    \r\n    def __init__(self,w):\r\n        #CTG Set initialize\r\n        ctg = []\r\n        ctg, ann, signal_length = self.data_frame()\r\n        self.window = w\r\n        #Split \r\n        train, test_valid = train_test_split(ctg, test_size = 0.3, random_state=42)\r\n        test, valid = train_test_split(test_valid, test_size = 0.66, random_state=42)\r\n        #print(len(train),len(test),len(valid),len(ctg))\r\n\r\n        #Without Merge\r\n        #y = pd.DataFrame(ann.iloc[:,0:1])\r\n        X=[]\r\n        y=[]\r\n        for i in ctg:\r\n            X.append(i.iloc[:,1:4])\r\n            y.append(i.iloc[:,4])\r\n        #Split train, test and validation\r\n        X_train, X_test_valid, y_train, y_test_valid = train_test_split(X, y, test_size=0.3, random_state=42)\r\n        X_test, X_valid, y_test, y_valid = train_test_split(X_test_valid, y_test_valid, test_size=0.66, random_state=42)\r\n        #Concat\r\n        X_train = pd.concat(X_train)\r\n        X_test = pd.concat(X_test)\r\n        X_valid = pd.concat(X_valid)\r\n        y_train = pd.concat(y_train)\r\n        y_test = pd.concat(y_test)\r\n        y_valid = pd.concat(y_valid)\r\n        #Normalize X\r\n        self.X_train = self.Normalize_X(X_train,'X_train')\r\n        self.X_test = self.Normalize_X(X_test,'X_test')\r\n        self.X_valid = self.Normalize_X(X_valid,'X_valid')\r\n        #Normalize Y\r\n        #self.y_train = self.Normalize_y(y_train,'y_train')\r\n        #self.y_test = self.Normalize_y(y_test,'y_test')\r\n        #self.y_valid = self.Normalize_y(y_valid,'y_valid')\r\n        #Batching X\r\n        self.X_train = self.batching_X(X_train,signal_length,self.window)\r\n        self.X_test = self.batching_X(X_test,signal_length,self.window)\r\n        self.X_valid = self.batching_X(X_valid,signal_length,self.window)\r\n        #Batching Y without Merge\r\n        #self.y = self.batching_y(y)\r\n        #self.y_train = self.batching_y(y_train)\r\n        #self.y_test = self.batching_y(y_test)\r\n        #self.y_valid = self.batching_y(y_valid)\r\n\r\n        #Batching Y with Merge\r\n        self.y_train = self.batching_y(y_train,signal_length,self.window)\r\n        self.y_test = self.batching_y(y_test,signal_length,self.window)\r\n        self.y_valid = self.batching_y(y_valid,signal_length,self.window)\r\n\r\n\r\nclass TrainingModel:\r\n    #Similar - Person\r\n    #Similar loss, acc - Binary Cross Entropy, Reduce Mean\r\n    #Learning Rate: 0.01-0.02 for LTC, 0.001 for all other models.\r\n    def __init__(self,window, model_type,model_size,sparsity_level=0.0,learning_rate = 0.001):\r\n        self.model_type = model_type\r\n        self.window = window\r\n        self.constrain_op = []\r\n        self.sparsity_level = sparsity_level\r\n        self.X = tf.placeholder(dtype=tf.float32,shape=[None,None,3])\r\n        self.target_y = tf.placeholder(dtype=tf.int32,shape=[None,None])\r\n\r\n        self.model_size = model_size\r\n        head = self.X\r\n        \r\n        #Print Shape 1\r\n        print(\"Head Shape 1\",head.shape)\r\n        if(model_type == \"lstm\"):\r\n            #unstacked_signal = tf.unstack(self.X,axis=0)\r\n            self.fused_cell = tf.nn.rnn_cell.LSTMCell(model_size)\r\n\r\n            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)\r\n        elif(model_type.startswith(\"ltc\")):\r\n            learning_rate = 0.01 # LTC needs a higher learning rate\r\n            self.wm = ltc.LTCCell(model_size)\r\n            if(model_type.endswith(\"_rk\")):\r\n                self.wm._solver = ltc.ODESolver.RungeKutta\r\n            elif(model_type.endswith(\"_ex\")):\r\n                self.wm._solver = ltc.ODESolver.Explicit\r\n            else:\r\n                self.wm._solver = ltc.ODESolver.SemiImplicit\r\n\r\n            head,_ = tf.nn.dynamic_rnn(self.wm,head,dtype=tf.float32,time_major=True)\r\n            self.constrain_op.extend(self.wm.get_param_constrain_op())\r\n        elif(model_type == \"node\"):\r\n            self.fused_cell = NODE(model_size,cell_clip=-1)\r\n            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)\r\n        elif(model_type == \"ctgru\"):\r\n            self.fused_cell = CTGRU(model_size,cell_clip=-1)\r\n            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)\r\n        elif(model_type == \"ctrnn\"):\r\n            self.fused_cell = CTRNN(model_size,cell_clip=-1,global_feedback=True)\r\n            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)\r\n        else:\r\n            raise ValueError(\"Unknown model type '{}'\".format(model_type))\r\n        target_y = tf.expand_dims(self.target_y,axis=-1)\r\n        print(target_y.shape)\r\n        \r\n        #Print Shape 2\r\n        print(\"Head Shape 2\",head.shape)\r\n        if(self.sparsity_level > 0):\r\n            self.constrain_op.extend(self.get_sparsity_ops())\r\n        #Change Logit shape\r\n        self.y = tf.layers.Dense(2,activation=None)(head)\r\n        print(\"logit shape: \",str(self.y.shape))\r\n        self.loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(\r\n            labels = self.target_y,\r\n            logits = self.y,\r\n        ))\r\n        optimizer = tf.train.AdamOptimizer(learning_rate)\r\n        self.train_step = optimizer.minimize(self.loss)\r\n\r\n        model_prediction = tf.argmax(input=self.y, axis=2)\r\n        self.accuracy = tf.reduce_mean(tf.cast(tf.equal(model_prediction, tf.cast(self.target_y,tf.int64)), tf.float32))\r\n\r\n        self.sess = tf.InteractiveSession()\r\n        self.sess.run(tf.global_variables_initializer())\r\n\r\n        # self.result_file = os.path.join(\"results\",\"ctg\",\"{}_{}_{:02d}.csv\".format(model_type,model_size,int(100*self.sparsity_level)))\r\n        self.result_file = os.path.join(\"results\",\"ctg_class\",\"{}_{}.csv\".format(model_type,model_size))\r\n        if(not os.path.exists(\"results/ctg_class\")):\r\n            os.makedirs(\"results/ctg_class\")\r\n        if(not os.path.isfile(self.result_file)):\r\n            with open(self.result_file,\"w\") as f:\r\n                f.write(\"window size, best epoch, train loss, train accuracy, valid loss, valid accuracy, test loss, test accuracy\\n\")\r\n\r\n        self.checkpoint_path = os.path.join(\"tf_sessions\",\"ctg_class\",\"{}\".format(model_type))\r\n        if(not os.path.exists(\"tf_sessions/ctg_class\")):\r\n            os.makedirs(\"tf_sessions/ctg_class\")\r\n            \r\n        self.saver = tf.train.Saver()\r\n\r\n    def get_sparsity_ops(self):\r\n        tf_vars = tf.trainable_variables()\r\n        op_list = []\r\n        for v in tf_vars:\r\n            # print(\"Variable {}\".format(str(v)))\r\n            if(v.name.startswith(\"rnn\")):\r\n                if(len(v.shape)<2):\r\n                    # Don't sparsity biases\r\n                    continue\r\n                if(\"ltc\" in v.name and (not \"W:0\" in v.name)):\r\n                    # LTC can be sparsified by only setting w[i,j] to 0\r\n                    # both input and recurrent matrix will be sparsified\r\n                    continue\r\n                op_list.append(self.sparse_var(v,self.sparsity_level))\r\n                \r\n        return op_list\r\n        \r\n    def sparse_var(self,v,sparsity_level):\r\n        mask = np.random.choice([0, 1], size=v.shape, p=[sparsity_level,1-sparsity_level]).astype(np.float32)\r\n        v_assign_op = tf.assign(v,v*mask)\r\n        print(\"Var[{}] will be sparsified with {:0.2f} sparsity level\".format(\r\n            v.name,sparsity_level\r\n        ))\r\n        return v_assign_op\r\n\r\n    def save(self):\r\n        self.saver.save(self.sess, self.checkpoint_path)\r\n\r\n    def restore(self):\r\n        self.saver.restore(self.sess, self.checkpoint_path)\r\n\r\n\r\n    def fit(self,ctg_data,epochs,verbose=True,log_period=50):\r\n\r\n        best_valid_accuracy = 0\r\n        best_valid_stats = (0,0,0,0,0,0,0)\r\n        self.save()\r\n        print(\"Entering training loop\")\r\n        #print(\"self.X: \",self.X.shape)\r\n        #print(\"ctg_data.X_test: \",ctg_data.X_test.shape)\r\n        #print(\"self.target_y\",self.target_y.shape)\r\n        #print(\"ctg_data.y_test\",ctg_data.y_test.shape)\r\n        #print(\"self.accuracy\",self.accuracy)\r\n        #print(\"self.loss\",self.loss)\r\n        for e in range(epochs):\r\n            if(verbose and e%log_period == 0):\r\n                test_acc,test_loss = self.sess.run([self.accuracy,self.loss],{self.X:ctg_data.X_test,self.target_y: ctg_data.y_test})\r\n                valid_acc,valid_loss = self.sess.run([self.accuracy,self.loss],{self.X:ctg_data.X_valid,self.target_y: ctg_data.y_valid})\r\n                if(valid_acc > best_valid_accuracy and e > 0):\r\n                    best_valid_accuracy = valid_acc\r\n                    best_valid_stats = (\r\n                        e,\r\n                        np.mean(losses),np.mean(accs)*100,\r\n                        valid_loss,valid_acc*100,\r\n                        test_loss,test_acc*100\r\n                    )\r\n                    self.save()\r\n\r\n            #Training\r\n            print(\"Epoch: \",e)\r\n            losses = []\r\n            accs = []\r\n            for batch_x,batch_y in ctg_data.iterate_train(batch_size=32):\r\n                acc,loss,_ = self.sess.run([self.accuracy,self.loss,self.train_step],{self.X:batch_x,self.target_y: batch_y})\r\n                if(len(self.constrain_op) > 0):\r\n                    self.sess.run(self.constrain_op)\r\n\r\n                losses.append(loss)\r\n                accs.append(acc)\r\n                #print(\"loss: \" + str(loss))\r\n                #print(\"acc: \" + str(acc))\r\n\r\n            if(verbose and e%log_period == 0):\r\n                print(\"Epochs {:03d}, train loss: {:0.2f}, train accuracy: {:0.2f}%, valid loss: {:0.2f}, valid accuracy: {:0.2f}%, test loss: {:0.2f}, test accuracy: {:0.2f}%\".format(\r\n                    e,\r\n                    np.mean(losses),np.mean(accs)*100,\r\n                    valid_loss,valid_acc*100,\r\n                    test_loss,test_acc*100\r\n                ))\r\n            if(e > 0 and (not np.isfinite(np.mean(losses)))):\r\n                break\r\n        self.restore()\r\n        best_epoch,train_loss,train_acc,valid_loss,valid_acc,test_loss,test_acc = best_valid_stats\r\n        print(\"Best epoch {:03d}, train loss: {:0.2f}, train accuracy: {:0.2f}%, valid loss: {:0.2f}, valid accuracy: {:0.2f}%, test loss: {:0.2f}, test accuracy: {:0.2f}%\".format(\r\n            best_epoch,\r\n            train_loss,train_acc,\r\n            valid_loss,valid_acc,\r\n            test_loss,test_acc\r\n        ))\r\n        with open(self.result_file,\"a\") as f:\r\n            f.write(\"{:03d}, {:03d}, {:0.2f}, {:0.2f}, {:0.2f}, {:0.2f}, {:0.2f}, {:0.2f}\\n\".format(\r\n            self.window,\r\n            best_epoch,\r\n            train_loss,train_acc,\r\n            valid_loss,valid_acc,\r\n            test_loss,test_acc\r\n        ))\r\n\r\nctg_data = CtgData(i)\r\nprint(\"Window: \", i)\r\ntf.reset_default_graph()\r\nmodel = TrainingModel(window=ctg_data.window, model_type = \"lstm\", model_size=32, sparsity_level=0.0)\r\n\r\nmodel.fit(ctg_data ,epochs=200,log_period=1)\r\n","sub_path":"CTG_Workspace/ctg_classify_pH.py","file_name":"ctg_classify_pH.py","file_ext":"py","file_size_in_byte":15217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"627770043","text":"from io import StringIO\nimport pandas as pd\nimport time\nimport datetime\nimport numpy as np\nimport boto3\nimport io\nimport requests\nimport os\n# import s3fs\n\n\ndef readxls_rawprod_convertcsv(bucket, fileName, dateTimeObj):\n    s3 = boto3.client('s3')\n    obj = s3.get_object(Bucket=bucket, Key=fileName)\n    file_obj = io.BytesIO(obj['Body'].read())\n\n    newdata = None\n    cols_skip = []\n    datarawcheck = None\n\n    ###Check excel file\n    try:\n        datarawcheck = pd.read_excel(file_obj, sheet_name='input', header=None)\n        print(\"bucket:\" + bucket + \" filename:\" + fileName)\n        print(\"Excel Openned\")\n    except Exception as e:\n        print(\"Can't open the file. Please check the S3 bucket\")\n        print(str(e))\n        return\n\n    #checking is there any blank column we'll tag the position and skip them in next uploading\n    for i in range(len(datarawcheck.columns)):\n        if (((pd.isna(datarawcheck[i])).nunique()) == 1) & (((\n            (pd.isna(datarawcheck[i])).unique())[0]) == True):\n            cols_skip.append(i)\n        else:\n            break\n\n    #checking is there any blank rows for each rows we'll skip it\n    rows_skip = []\n    for i in range(len(datarawcheck.iloc[i])):\n        if (((pd.isna(datarawcheck.iloc[i])).nunique()) == 1) & (((\n            (pd.isna(datarawcheck.iloc[i])).unique())[0]) == True):\n            rows_skip.append(i)\n        else:\n            break\n\n    # define unblank columns and rows\n    cols = [i for i in range(len(datarawcheck.columns)) if i not in cols_skip]\n    rows = len(rows_skip)\n\n    del datarawcheck\n\n    newdata = pd.read_excel(file_obj,\n                            sheet_name='input',\n                            skiprows=rows,\n                            usecols=cols)\n    # newdata= pd.read_excel(file_obj, sheet_name='input')\n    newdata.reset_index()\n    newdata = newdata.dropna(subset=['site'])\n    newdata['date'] = pd.to_datetime(newdata['date'])\n    print(newdata.head())\n    if (newdata is not None or not newdata.empty):\n        # newchecking_date = min(newdata['date']) #just to check if user upload file with previous date data\n        # flag with timeupload and user upload\n        a = str(dateTimeObj)\n        b = fileName\n        print(a)\n        print(b)\n        newdata['timesupload'] = a\n        newdata['userupload'] = b\n\n        # create new filename\n        yr = str(dateTimeObj.year)\n        mo = str(dateTimeObj.month)\n        day = str(dateTimeObj.day)\n        hr = str(dateTimeObj.hour+7)\n        mn = str(dateTimeObj.minute)\n        sc = str(dateTimeObj.second)\n        up_filename = yr + mo + day + hr + mn + sc + '.csv'\n\n        target_bucket = 'abm-data-platform-s3-raw'\n        target_object = 'rsw/raw/rawcsv_rsw_ds_rawtest1/' + up_filename\n        csv_buffer = StringIO()\n        newdata.to_csv(csv_buffer, index=False)\n        # s3_resource = boto3.resource('s3',aws_access_key_id=aws_id, aws_secret_access_key=aws_secret)\n        s3_resource = boto3.resource('s3')\n        s3_resource.Object(target_bucket,\n                           target_object).put(Body=csv_buffer.getvalue())\n        client = boto3.client('glue')\n        response = client.start_workflow_run(\n            Name='abm-rsw-dataingestion-rawtest1'\n        )  #groupname-subgroup-dataingestion-table_name\n        print('Lambda function is DONE')\n\n    else:\n        print(\"No new data inserted\")\n\n    return\n","sub_path":"Python-for-Data-Analysis/Pandas with AWS S3 and Lambda/.ipynb_checkpoints/rawprod_to_csv-checkpoint.py","file_name":"rawprod_to_csv-checkpoint.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"399957370","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom NeuralNet import *\n\n# 1. mpg:           continuous\n# 2. cylinders:     multi-valued discrete\n# 3. displacement:  continuous\n# 4. horsepower:    continuous\n# 5. weight:        continuous\n# 6. acceleration:  continuous\n# 7. model year:    multi-valued discrete\n# 8. origin:        multi-valued discrete\n# 9. car name:      string (unique for each instance)\n\nnames =  ['mpg','cylinders','displacement','horsepower','weight',\n          'acceleration','year','origin']\n\ndef plotPredictions(targets, predictions):\n    plt.subplot(2, 1, 1)\n    plt.plot(targets[:, 0], 'o-')\n    plt.plot(predictions[:, 0], 'o-')\n    plt.legend((\"Testing MPG\", \"Model\"), 'upper right')\n\n    plt.subplot(2, 1, 2)\n    plt.plot(targets[:, 1], 'x-')\n    plt.plot(predictions[:, 1], 'x-')\n    plt.legend((\"Testing HP\", \"Model\"), 'upper right')\n\ndef missingIsNan(s):\n    if s == '?':\n        return np.nan\n    else:\n        return float(s)\n\ndef partitionSet(data):\n\n    original_ind_data = np.concatenate(((data[:,1:3]), (data[:,4:])), axis=1)\n    original_dep_data = np.concatenate(((data[:,0:3]), (data[:,3:4])), axis=1)\n\n    train_pct = 0.60\n    test_pct = 0.20\n    valid_pct = 0.20\n\n    # indices for all data rows\n    nrow = np.shape(original_ind_data)[0]\n    all_indices = xrange(nrow)\n\n    training_size = int(round(nrow*train_pct))\n    testing_size = int(round(nrow*test_pct))\n    valid_size = int(round(nrow*valid_pct))\n\n    # number of training samples\n    training_set_size = int(round(nrow*train_pct))\n\n    remaining_set_size = nrow - training_set_size\n\n    # row indices for training samples\n    training_set_indices = list(set(random.sample(all_indices,training_set_size))) \n\n    # row indices of remaining samples\n    remaining_indices = list(set(all_indices).difference(set(training_set_indices)))\n \n    # adjusted testing set sample percentage for the remainder data\n    adj_pct = ((test_pct*nrow)/(nrow-training_size))\n\n    # row indices of testing samples\n    testing_set_indices = list(set(random.sample(remaining_indices,\\\n                                   int(round(len(remaining_indices)*adj_pct)))))                      \n\n    # remaining belong to validation set\n    validation_set_indices = list(set(remaining_indices).difference(set(testing_set_indices)))\n\n    # create independent data\n    training_set_ind = original_ind_data[training_set_indices,:]\n    testing_set_ind = original_ind_data[testing_set_indices,:]\n    valid_set_ind = original_ind_data[validation_set_indices,:]\n\n    # create dependent data\n    training_set_dep = original_dep_data[training_set_indices,:]\n    testing_set_dep = original_dep_data[testing_set_indices,:]\n    valid_set_dep = original_dep_data[validation_set_indices,:]\n\n    return(training_set_ind, training_set_dep, testing_set_ind, testing_set_dep,\\\n           valid_set_ind, valid_set_dep)         \n\ndataOriginal = np.loadtxt('data/auto-mpg.data',usecols=range(8),\n                          converters={3: missingIsNan})\n\nnotNans = np.isnan(dataOriginal) == False\ngoodRowsMask = notNans.all(axis=1)\ndata = dataOriginal[goodRowsMask,:]\n(Xtrain, Ttrain, Xtest, Ttest, Xvalid, Tvalid) = partitionSet(data)\n\nnnet = NeuralNet(Xtrain,Ttrain,10,nIterations=100,errorPrecision=1.e-4,weightPrecision=1.e-4)\npredictions = nnet.use(Xtrain)\n\n# Plot 1: RMSE for training data versus scg epochs\n#plt.figure(1)\n#plt.clf()\n#nnet.plotError()\n#plt.show()\n\n\n# loop over 5-10 different hidden weight number values using the validation data\nhiddenUnits = np.array([1,2,3,4,5])\n#hiddenUnits = np.array([1,2,3,4,5,6,7,8,9,10])\n#plt.figure(1)\n#plt.clf()\n\nerrors = np.zeros((len(hiddenUnits), 2))\nerrors[:] = np.nan\ni = 0\nfor hidden in hiddenUnits:\n\n    # Construct and train a neural network with different hidden units to approximately \n    # fit Xtrain and Ttrain\n    nnet = NeuralNet(Xtrain,Ttrain,hidden,nIterations=1000,errorPrecision=0,weightPrecision=0)\n\n    # run the network on training data set\n    stdTrain = nnet.standardize(Xtrain)\n    TrainPredictions = nnet.use(stdTrain)\n\n    # now run it on validation data set\n    stdValid = nnet.standardize(Xvalid)\n    ValidPredictions = nnet.use(stdValid)\n    \n    errors[i, 0] = sqrt(np.mean(((TrainPredictions - Ttrain)**2).flat))\n    errors[i, 1] = sqrt(np.mean(((ValidPredictions - Tvalid)**2).flat))\n    i = i + 1\n    \n    #plt.figure(i)\n    #plt.clf()\n    #plotPredictions(Tvalid, ValidPredictions)\n\n#plt.show()\n\n# now determine the best hidden unit value to use from the errors\nvalidErrors=errors[:,1]\nlow=999\nindex=0\nfor j in range(len(validErrors)):\n    if (validErrors[j] < low):\n        low = validErrors[j]\n        index = j\n    #get the best hidden unit value\n    best = hiddenUnits[index]\n\n\n# use the network with the best number of \n\nnet2 = NeuralNet(Xtrain, Ttrain, best, weightPrecision=0, errorPrecision=0, nIterations=1000)\n#use the test data in the neural network\nstdTest = net2.standardize(Xtest)\nTestPredictions = net2.use(stdTest)\n#plot the predictions versus actuals for test data\n\nplt.figure(1)\nplt.clf()\nplotPredictions(Ttest, TestPredictions)\n#plotPredVsAct(Ttest, predictionsTest, 0, 1, 'Testing MPG', 'Model MPG')\n#plotPredVsAct(Ttest, predictionsTest, 1, 2, 'Testing Horsepower', 'Model Horsepower')\n\t\n#plot the RMSE values\nplt.ion()\nplt.figure(2)\nplt.clf()\nplt.plot(hiddenUnits, errors)\nplt.legend((\"HiddenUnits\", \"Errors\"), 'upper right')\nplt.show()\n\n\n","sub_path":"Neural Networks/Gradient Descent/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"439234608","text":"from __future__ import print_function\nimport sys,os\nfrom time import sleep\nimport traceback\nimport etcd\nimport requests\nimport fleet.v1 as fleet\n\nETCDHOST = \"172.17.42.1\"\nETCDPORT = 2379\n\nETCDKEY = os.getenv('ETCDKEY', \"/services/satellite/\")\n\nFLEET = 'http+unix://%2Fvar%2Frun%2Ffleet.sock'\n\nSATELLITEHOST = os.getenv('SATELLITE_PORT_7020_TCP_ADDR',None)\nSATELLITEPORT = os.getenv('SATELLITE_PORT_7020_TCP_PORT',None)\n\nCOREOS_PRIVATE_IPV4 = os.getenv('COREOS_PRIVATE_IPV4',None)\n\n\n\nif SATELLITEHOST is None or SATELLITEPORT is None or COREOS_PRIVATE_IPV4 is None:\n    print(\"Satellite container not linked or COREOS host ip not in env\")\n    sys.exit(1)\n\nwhile True:\n    try:\n        r = requests.get(\"http://\" + SATELLITEHOST+\":\"+SATELLITEPORT+\"/v1/satellite/\", timeout=10)\n        #We don't care what state the API is in, just that it's there\n        etc = etcd.Client(host=ETCDHOST, port=ETCDPORT)\n        fleet_client = fleet.Client(FLEET)\n        hostid = [m.id for m in fleet_client.list_machines() if m.primaryIP == COREOS_PRIVATE_IPV4][0]\n        key = ETCDKEY + hostid\n        value = COREOS_PRIVATE_IPV4 + \":\" + SATELLITEPORT\n        print(\"Writing value '\" + value + \"' to key '\" + key + \"'\")\n        etc.write(key, value, ttl=30)\n    except Exception as e:\n        print(e)\n        traceback.print_exc(file=sys.stdout)\n    finally:\n        sleep(20)","sub_path":"Docker/satellite-discovery/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"512114119","text":"#! /usr/bin/env python\nimport os.path\nimport sys\nimport sferes\n\nprint(sys.path[0])\n\n# # Adding Module paths (to take additional waf_tools from subdirs into account)\n# MODULES_PATH = os.path.abspath(os.path.join(sys.path[0], os.pardir, os.pardir, 'modules'))\n# for specific_module_folder in os.listdir(MODULES_PATH):\n#     sys.path.append(os.path.join(MODULES_PATH, specific_module_folder))\n# sys.path.insert(0, os.path.abspath(os.path.join(sys.path[0], os.pardir)))\n\nfrom waflib.Configure import conf\n\nimport sferes\n\nprint(sys.path[0])\nsys.path.insert(0, sys.path[0]+'/waf_tools')\nimport boost\nimport eigen\nimport corrade\nimport magnum\nimport magnum_integration\nimport magnum_plugins\n\nPROJECT_NAME = \"imagesd\"\n\n\ndef get_relative_path(waf_tool_name):\n    return PROJECT_NAME + '.' + 'waf_tools' + '.' + waf_tool_name\n\n\ndef options(opt):\n    opt.load('corrade')\n    opt.load('magnum')\n    opt.load('magnum_integration')\n    opt.load('magnum_plugins')\n    opt.load('robox2d')\n\n\n# @conf\ndef configure(conf):\n    print('conf exp:')\n    conf.load('corrade')\n    conf.load('magnum')\n    conf.load('magnum_integration')\n    conf.load('magnum_plugins')\n    conf.load('robox2d')\n\n    conf.check_corrade(components='Utility PluginManager', required=False)\n    conf.env['magnum_dep_libs'] = 'MeshTools Primitives Shaders SceneGraph GlfwApplication'\n    if conf.env['DEST_OS'] == 'darwin':\n        conf.env['magnum_dep_libs'] += ' WindowlessCglApplication'\n    else:\n        conf.env['magnum_dep_libs'] += ' WindowlessGlxApplication'\n    conf.check_magnum(components=conf.env['magnum_dep_libs'], required=False)\n    conf.check_magnum_plugins(components='AssimpImporter', required=False)\n    \n    conf.get_env()['BUILD_MAGNUM'] = True\n    conf.env['magnum_libs'] = magnum.get_magnum_dependency_libs(conf, conf.env['magnum_dep_libs'])\n    conf.check_robox2d()\n    \n    print('done')\n    conf.env.append_unique('LINKFLAGS', '-Wl,--no-as-needed')\n\n\ndef build(bld):\n    bld.env.LIBPATH_PYTORCH = '/workspace/lib/torch/'\n    bld.env.LIB_PYTORCH = 'torch_cpu torch_cuda torch_global_deps shm torch c10 c10_cuda'.split(' ')\n    bld.env.INCLUDES_PYTORCH = ['/workspace/include/torch', '/workspace/include/torch/torch/csrc/api/include']\n\n    bld.env.LIBPATH_PYTHON = '/usr/lib/x86_64-linux-gnu/'\n    bld.env.LIB_PYTHON = ['python3.7m']\n    bld.env.INCLUDES_PYTHON = '/usr/include/python3.7m'\n\n\n    bld.env.INCLUDES_KDTREE = ['/workspace/include']\n\n\n    print(bld.env['magnum_libs'])\n    sferes.create_variants(bld,\n                           source = 'src/imagesd.cpp',\n                           includes='./src . ../../',\n                           uselib='TBB BOOST EIGEN PTHREAD MPI'\n                                + 'PYTHON PYTORCH KDTREE SDL ROBOX2D BOX2D' + bld.env['magnum_libs'],\n                           use = 'sferes2',\n                           target = 'imagesd',\n                           variants = ['VAE', 'AURORA'])\n","sub_path":"airhockey/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"196189363","text":"# -*- coding:utf-8 -*-\n__author__ = 'IanChen'\nimport requests\nimport time\n\n# 文件下载,主要下载训练集\ndef download_pics(pic_name):\n    url = 'http://smart.gzeis.edu.cn:8081/Content/AuthCode.aspx'\n    res = requests.get(url, stream=True)\n\n    with open(u'J:/数据分析学习/python/机器学习之验证码识别/pics/%s.jpg' % (pic_name), 'wb') as f:\n        for chunk in res.iter_content(chunk_size=1024):\n            if chunk:\n                f.write(chunk)\n                f.flush()\n        f.close()\n\n\nif __name__ == '__main__':\n    for i in range(100):\n        pic_name = int(time.time() * 1000000)\n        download_pics(pic_name)","sub_path":"excercise-master/captcha_test/test_set.py","file_name":"test_set.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"79759254","text":"#===================================================================================\r\n#\r\n#   Code (đã cái tiến, đã đóng gói) cho PP chia đôi. \r\n#       * Input: f(x) trong pt f(x) = 0; khoảng cách li ban đầu (a, b); sai số epsilon\r\n#       * Output: Nghiệm PT f(x) = 0;\r\n#       * Hạn chế: Chưa có gói tìm khoảng cách ly nghiệm\r\n#       * Cải tiến: Giảm khối lượng tính toán - tính f(a) 1 lần + thêm SymPy\r\n#       \r\n#===================================================================================\r\nfrom sympy import *\r\nfrom math import *\r\nimport sys\r\n\r\n\r\n#===================================================================================\r\n# Phần thuật toán chính\r\nclass bisection_oop:\r\n#{\r\n    def __init__(self, a_0, b_0, eps, expr):\r\n    #{\r\n        self.symf = sympify(expr)\r\n        self.f = lambdify(symbols(\"x\"), self.symf, \"math\")\r\n        self.a_0 = a_0\r\n        self.b_0 = b_0\r\n        self.eps = eps\r\n    #}\r\n\r\n\r\n    def __checkInputValidity(self):\r\n    #{\r\n        L = self.a_0\r\n        R = self.b_0\r\n\r\n        # Corner case: f(L) = 0 or f(R) = 0\r\n        if(self.f(L) == 0 or self.f(R) == 0): return 1\r\n\r\n        # Check if a < b\r\n        if(L > R or (L == R and self.f(L) != 0)): return 0\r\n\r\n        # Check if f(a) * f(b) < 0\r\n        if(self.f(L) * self.f(R) >= 0): return 0\r\n        \r\n        return 1\r\n    #}\r\n    def __bisectionMethod(self):\r\n    #{\r\n        # Internal function \r\n        # Return root of f(x) = 0 which f(x), eps and range [a_0, b_0] are given.\r\n        # Assign [a, b] and eps\r\n        nIterations = 0\r\n        left    = self.a_0\r\n        right   = self.b_0\r\n        epsilon = self.eps\r\n\r\n        # Special case: f(a) = 0 or f(b) = 0\r\n        if(self.f(left) == 0): return left\r\n        if(self.f(right) == 0): return right\r\n\r\n        # Evaluation phase\r\n        mid   = (left + right) / 2\r\n        lft_sign = 1 if self.f(left) >= 0 else -1\r\n        while abs(right - left) >= epsilon:\r\n        #{\r\n            mid = (left + right) / 2\r\n            val = self.f(mid)\r\n\r\n            # print(left, mid, right, sep=',', file=sys.stderr)\r\n\r\n            if(val == 0): return mid\r\n            if(val * lft_sign < 0):\r\n                right = mid\r\n            else:\r\n                left = mid\r\n\r\n            nIterations = nIterations + 1\r\n        #}\r\n        \r\n        # print(left, mid, right, sep=',', file=sys.stderr)\r\n        print(f\"Phương pháp chia đôi kết thúc sau {nIterations} lần lặp\")\r\n        return mid\r\n    #}\r\n\r\n\r\n    def Solve(self):\r\n    #{\r\n        if(self.__checkInputValidity() == 0):\r\n        #{\r\n            print(\"Invalid input. The program will now exit\", file=sys.stderr)\r\n            return float(\"NaN\")\r\n        #}\r\n        return self.__bisectionMethod()\r\n    #}\r\n#}","sub_path":"Topic 1 - Find solution for single-variable functions/02.Chia_doi/lib_bisection.py","file_name":"lib_bisection.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"478351873","text":"from django.urls import path, re_path\nfrom . import views\n\n# 注意 re_path 和 path 的区别\n# path 只能 具体值匹配\napp_name = 'polls'\nurlpatterns = [\n    re_path(r'^$', views.index, name='index'),\n    re_path(r'^(?P\\w)/$', views.detail, name='detail'),\n    re_path(r'^(?P\\w)/results/$', views.results, name='results'),\n    re_path(r'^(?P\\w)/vote/$', views.vote, name=\"vote\"),\n    # 带路径可以访问\n    path('hello/',views.index),\n    # 根目录可以访问\n    path('', views.index),\n]","sub_path":"Django/FirstDjango/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"119131637","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 19 11:03:16 2020\r\n\r\n@author: charlie.henry\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# Trip data from Austin Open Data Portal:\r\nfile_name=\"Austin_MetroBike_Trips.csv\"\r\n\r\ndata = pd.read_csv(file_name)\r\n\r\n# Kiosk data from Austin Open Data Portal:\r\nkiosks = pd.read_csv(\"Austin_MetroBike_Kiosk_Locations.csv\")\r\n\r\ndata= data.dropna(subset=['Checkout Kiosk ID','Return Kiosk ID','Checkout Date'])\r\n\r\n#Group by date and Checkout/Return Kiosk ID\r\ncheckouts = data.groupby(['Checkout Date','Checkout Kiosk ID'],as_index = False).count()\r\nreturns = data.groupby(['Checkout Date','Return Kiosk ID'],as_index = False).count()\r\n\r\n# Name field for joining later\r\ncheckouts['name'] = checkouts['Checkout Date'] + \" \" + checkouts['Checkout Kiosk ID'].astype(str)\r\nreturns['name'] = returns['Checkout Date'] + \" \" + returns['Return Kiosk ID'].astype(str)\r\n\r\n# Outer join of the checkouts and returns\r\ntrips = pd.merge(checkouts, returns, left_on='name', right_on='name',how = 'outer')\r\n\r\n# Creating Kiosk ID and Date fields \r\ntrips[\"Kiosk ID\"] = \"\"\r\ntrips[\"Date\"] = \"\"\r\nfor index, row in trips.iterrows():\r\n    if np.isnan(row[\"Checkout Kiosk ID_x\"]):\r\n        trips.at[index,\"Kiosk ID\"] = row[\"Return Kiosk ID_y\"]\r\n        trips.at[index,\"Date\"] = row[\"Checkout Date_y\"]\r\n    else:\r\n        trips.at[index,\"Kiosk ID\"] = row[\"Checkout Kiosk ID_x\"]\r\n        trips.at[index,\"Date\"] = row[\"Checkout Date_x\"]\r\n\r\n# Formatting Columns\r\nexport = trips[['Kiosk ID','Date','Trip ID_x','Trip ID_y']].copy()\r\nexport = export.rename(columns={'Trip ID_x' : 'Checkout Count','Trip ID_y' : 'Return Count'})\r\n\r\n# Merge back with Kiosks data\r\nexport = pd.merge(export, kiosks, left_on='Kiosk ID', right_on='Kiosk ID', how = 'left')\r\n\r\n# More formatting, missing joins are zero trips\r\nexport['Checkout Count'] = export['Checkout Count'].fillna(0)\r\nexport['Return Count'] = export['Return Count'].fillna(0)\r\n\r\n# Date formatting and sorting\r\nexport['Date'] = pd.to_datetime(export.Date)\r\nexport = export.sort_values(by='Date')\r\n\r\n# export to csv\r\nexport.to_csv('MetroBike table.csv',index=False)\r\n","sub_path":"code/metrobike tables.py","file_name":"metrobike tables.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"163146403","text":"import os\nimport traceback\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom torchvision import transforms, datasets\nfrom torchvision import utils\nfrom torchvision.utils import save_image\nimport torchvision.utils as vutils\n\nfrom net import Generator, Discriminator\n\nimage_size = 64\ndevice = 'cuda'\nnz = 100 # z vector size\nngf = 64\nndf = 64\nnc = 3 # Channel\n\ndef create_transform():\n    mean = (0.5, 0.5, 0.5)\n    std = (0.5, 0.5, 0.5)\n\n    return  transforms.Compose([\n        transforms.Resize((image_size, image_size)),\n        transforms.ToTensor(),\n        transforms.Normalize(mean, std)\n    ])\n\n\ndef set_dataloader(dataset_path, transform, batch_size):\n    dataset = datasets.ImageFolder(root=dataset_path, transform=transform)\n    return torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n                                       shuffle=True, num_workers=4)\n\n\ndef weights_init(m):\n    classname = m.__class__.__name__\n    if classname.find('Conv') != -1:\n        m.weight.data.normal_(0.0, 0.02)\n    elif classname.find('BatchNorm') != -1:\n        m.weight.data.normal_(1.0, 0.02)\n        m.bias.data.fill_(0)\n\n\ndef training_with_real(D, criterion, label, real_image):\n    # 勾配の更新\n    D.zero_grad()\n    real_image_output = D(real_image)\n\n    error_real_image = criterion(real_image_output,\n                                 label)\n    error_real_image.backward()\n    D_x = real_image_output.mean().item()\n\n    return error_real_image, D_x\n\ndef training_with_fake(G, D, criterion, batch_size, label):\n    fake_label = 0\n    noise = torch.randn(batch_size, nz, 1, 1, device=device)\n    fake = G(noise)\n    label.fill_(fake_label)\n    fake_image = D(fake.detach())\n    error_discriminator = criterion(fake_image, label)\n    error_discriminator.backward()\n    D_G_z1 = fake_image.mean().item()\n\n    return error_discriminator, fake, D_G_z1\n\ndef update_network(D, G, criterion, fake, real_label, label):\n    # Update Network\n    G.zero_grad()\n    label.fill_(real_label) \n    output = D(fake) # 鑑定を行う\n    errorG = criterion(output, label)\n    errorG.backward()\n    D_G_z2 = output.mean().item()\n\n    return errorG, D_G_z2\n\n\ndef run_train(netD, netG, dataloader, options):\n    netG.train()\n    netG.apply(weights_init)\n    netD.train()\n    netD.apply(weights_init)\n\n    criterion = nn.BCELoss()\n\n    print(netG)\n    print(netD)\n    \n    fixed_noise = torch.randn(options[\"batch_size\"], nz, 1, 1, device=device)\n\n    # Setup Optimizer\n    optimizerD = optim.Adam(netD.parameters(),\n                             lr=options[\"lr\"],\n                             betas=(0.5, 0.999))\n\n    optimizerG = optim.Adam(netG.parameters(),\n                             lr=options[\"lr\"],\n                             betas=(0.5, 0.999))\n\n    real_label = 1\n    errorG = 0\n\n    for epoch in range(options['epoch']):\n        print(f'{epoch + 1}')\n        for i, data in enumerate(dataloader):\n            raw_image, raw_label = data\n            real_image = raw_image.to(device)\n            batch_size = real_image.size(0)\n            label = torch.full((batch_size,), real_label, device=device)\n\n            # Train with Real\n            error_real_image, D_x = training_with_real(netD, criterion,\n                                                       label, real_image)\n\n            # # train with fake\n            error_discriminator, fake, D_G_z1 = training_with_fake(netG, netD, criterion, \n                                                                   batch_size, label)\n            optimizerD.step()\n\n            error_discriminator = error_real_image + error_discriminator\n\n            errorG, D_G_z2 = update_network(netD, netG, criterion,\n                                            fake, real_label, label)\n            optimizerG.step()\n\n            print(f'[{epoch}/{options[\"epoch\"]}][{i}/{len(dataloader)}] \\\n                     Loss_D: {error_discriminator.item()} Loss_G: {errorG.item()} \\\n                     D(x): {D_x} D(G(z)): {D_G_z1} / {D_G_z2}')\n\n            print(\"Save\")\n            vutils.save_image(real_image,\n                              f'result_image/real_{epoch}_samples.png',\n                              normalize=False)\n            \n            fake_image = netG(fixed_noise)\n            vutils.save_image(fake_image.detach(),\n                                f'result_image/fake_{epoch}_samples.png',\n                                normalize=True)\n            print(\"END Save\")\n\n            torch.save(netD.state_dict(), f'result_pth/real_{epoch}.pth')\n            torch.save(netG.state_dict(), f'result_pth/fake_{epoch}.pth')\n\n\ndef main():\n    train_dataset_path = 'D:\\project\\dcgan2\\dataset'\n    # train_dataset_path = 'D:\\project\\dataset\\\\food'\n    options = {\n        'batch_size': 128,\n        'epoch': 1500,\n        'lr': 1e-5\n    }\n\n    data_transform = create_transform()\n    data_loader = set_dataloader(train_dataset_path, data_transform,\n                                options['batch_size'])\n\n    G = Generator().to('cuda')\n    D = Discriminator().to('cuda')\n\n    run_train(D, G, data_loader, options)\n\nmain() if __name__ == '__main__' else None","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"116428615","text":"\r\nimport sys\r\nimport codecs\r\n\r\n\"\"\"\r\nScript for extracting category labels and higher-order OR concepts from \r\nAmazon product metadata available at http://snap.stanford.edu/data/amazon-meta.html\r\n\r\nUsage: cat amazon-meta-data.txt | python extract-amazon-data.py\r\n\"\"\"\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\treload(sys)  \r\n\tsys.setdefaultencoding('utf8')\r\n\r\n\tlabelIdPairs = set()\r\n\torConcepts = set()\r\n\r\n\tdef getId(line): \r\n\t\treturn int(line.split(' ')[-1].strip())\r\n\r\n\tdef extractLabel(line, id):\r\n\t\t label = \":\".join(line.split(':')[1:]).strip().replace (' ', '_')\r\n\t\t labelIdPairs.add((label, id))\r\n\r\n\tdef extractConcepts(line, id):\r\n\r\n\t\tdef getLabelId(c):\r\n\t\t\tp = c[:-1].split('[')\r\n\t\t\treturn (p[0].replace (' ', '_'), int(p[-1]))\r\n\r\n\t\tcategorySequence = line[1:].split('|')\t\t\r\n\t\tlabelPairs = [getLabelId(c) for c in categorySequence]\r\n\t\tfor p in labelPairs: labelIdPairs.add(p)\r\n\t\tids = [i for (l, i) in labelPairs]\r\n\t\tfor (i, j) in zip(ids[:-1], ids[1:]): orConcepts.add((j, i))\r\n\t\torConcepts.add((id, ids[-1]))\r\n\r\n\tfor line in sys.stdin:\r\n\t\tl = line.strip()\r\n\t\tif l.startswith(\"Id\"): id = getId(l)\r\n\t\telif l.startswith(\"title\"): extractLabel(l, id)\r\n\t\telif l.startswith(\"|\"): extractConcepts(l, id)\r\n\r\n\tlabelsFile = open(\"amazon-labels.txt\", 'w')\r\n\tconceptsFile = open(\"amazon-or-concepts.txt\", 'w')\r\n\r\n\tfor (l, i) in labelIdPairs: labelsFile.write(l + \" \" + str(i) + \"\\n\")\r\n\tfor (i, j) in orConcepts: conceptsFile.write(str(i) + \" \" + str(j) + \"\\n\")","sub_path":"concepts-master/code/scripts/extract-amazon-data.py","file_name":"extract-amazon-data.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"604355946","text":"import json\nfrom watson_developer_cloud import LanguageTranslatorV3\n\nlanguage_translator = LanguageTranslatorV3(\n    version='2018-05-01',\n    iam_api_key='nJaQPa0o-NFEoMDp28vtgBb5vQNIpjlxH1301ruyk45U')\n\n\ndef convert(text,language):\n    translation = language_translator.translate(text=text, model_id=language)\n    data = (json.dumps(translation, indent=2, ensure_ascii=False))\n    data = json.loads(data)\n    data = data['translations'][0]['translation']\n#    print(data['translations'])\n    print(data)\n    return data\n\n#output= convert('Dies ist eine gute Art zu essen','de-en')\n#print (output)\n\n\n\n\n#models = language_translator.list_models()\n#print(json.dumps(models, indent=2))\n","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"109030314","text":"import os\nimport pyHook\nimport pythoncom\nimport socket\nimport sys\nimport threading\nclass KeyBoardHook():\n\n    def onKeyboardEvent(self, event):\n##        channel.send(event.WindowName)\n##        sys.stdout.write(event.WindowName+'\\n')\n##        channel.send(str(event.KeyID))\n##        sys.stdout.write(str(event.KeyID)+'\\n')\n##        channel.send(str(event.Ascii))\n##        sys.stdout.write(str(event.Ascii)+'\\n')\n        channel.send(event.WindowName+'\\n'+str(event.KeyID)+'\\n'+str(event.Ascii)+'\\n')\n        return True\n\nclass ClientThread(threading.Thread):\n\n    def run(self):\n        hm = pyHook.HookManager()\n        hm.KeyDown = KeyBoardHook().onKeyboardEvent\n        hm.HookKeyboard()\n        pythoncom.PumpMessages()\n\n    def __init__(self):\n        threading.Thread.__init__(self)\n\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(('192.168.0.100', 3000))\nserver.listen(1)\n\nwhile True:\n    channel, details = server.accept()\n    ClientThread().start()\n","sub_path":"Python/Python scripts/hooks/usingKeyID.pyw","file_name":"usingKeyID.pyw","file_ext":"pyw","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"82324684","text":"import os\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom monorepo_builder.configuration import ConfigurationManager, Configuration\n\n\nclass ProjectType(Enum):\n    Library = (1,)\n    Standard = (2,)\n\n\n@dataclass(frozen=True)\nclass File:\n    file: str\n    last_changed_time: int\n\n    @staticmethod\n    def file_factory(file: Path):\n        return File(str(file), file.stat().st_mtime)\n\n\nclass ProjectFileListBuilder:\n    def build(self, path: Path) -> List[File]:\n        files: List[File] = []\n        configuration = ConfigurationManager.get()\n        for file in path.iterdir():\n            if not self.process_file(file, configuration):\n                continue\n            if file.is_dir():\n                files.extend(self.build(file))\n            else:\n                files.append(File.file_factory(file))\n        return files\n\n    def process_file(self, file_path: Path, configuration: Configuration) -> bool:\n        if file_path.name in configuration.filenames_to_skip:\n            return False\n        if configuration.skip_hidden_folders:\n            if file_path.is_dir() and file_path.name.startswith(\".\"):\n                return False\n        if configuration.skip_hidden_files:\n            if file_path.is_file() and file_path.name.startswith(\".\"):\n                return False\n        if file_path.suffix in configuration.extensions_to_skip:\n            return False\n        return True\n\n\n@dataclass\nclass Project:\n    project_path: str\n    file_list: List[File] = field(default_factory=list)\n    needs_build: bool = field(default=False)\n\n    @property\n    def path(self) -> Path:\n        return Path(self.project_path)\n\n    @property\n    def name(self) -> str:\n        path = self.path\n        return path.name.replace(\"_\", \"-\")\n\n    @property\n    def project_type(self) -> ProjectType:\n        if ConfigurationManager().get().library_folder_name in self.path.parts:\n            return ProjectType.Library\n        return ProjectType.Standard\n\n    def set_needs_build(self):\n        self.needs_build = True\n\n    def set_needs_build_due_to_file_changes(\n        self, project_from_last_run: \"Optional[Project]\"\n    ):\n        self.needs_build = self._did_files_change_from_last_run(project_from_last_run)\n\n    def set_needs_build_due_to_updated_library_reference(\n        self, updated_library_names: List[str]\n    ):\n        if self.project_references_updated_library(updated_library_names):\n            self.set_needs_build()\n\n    def _did_files_change_from_last_run(\n        self, project_from_last_run: \"Optional[Project]\"\n    ) -> bool:\n        if not project_from_last_run:\n            return True\n        if len(self.file_list) != len(project_from_last_run.file_list):\n            return True\n        sorted_current_file_list = sorted(self.file_list, key=lambda x: x.file)\n        sorted_last_file_list = sorted(\n            project_from_last_run.file_list, key=lambda x: x.file\n        )\n        for current, previous in zip(sorted_current_file_list, sorted_last_file_list):\n            if current.file != previous.file:\n                return True\n            if current.last_changed_time != previous.last_changed_time:\n                return True\n        return False\n\n    def project_references_updated_library(\n        self, library_project_names: List[str]\n    ) -> bool:\n        ## Should be updated to parse the requirements to match against entire project names.\n        ## For example, a library project name of \"thing\" will match to a requirements of \"something\".\n        requirements = self.read_requirements_file()\n        for library_project_name in library_project_names:\n            if library_project_name in requirements:\n                return True\n        return False\n\n    def read_requirements_file(self) -> str:\n        requirement_filenames = [\n            os.path.join(self.project_path, \"requirements.txt\"),\n            os.path.join(self.project_path, \"package.json\"),\n        ]\n        for requirement_filename in requirement_filenames:\n            requirement_file_path = Path(requirement_filename)\n            if requirement_file_path.exists():\n                return requirement_file_path.read_text()\n        raise RequirementsFileNotFoundException(self)\n\n\nclass RequirementsFileNotFoundException(Exception):\n    def __init__(self, project: Project):\n        super().__init__(f\"Requirements file not found for {project.name}\")\n","sub_path":"monorepo_builder/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"639062035","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom fendi.tasks import add_scrap_item\nfrom scrapy import signals\n\n\nclass ScrapFendiPipeline(object):\n    item_list = []\n\n    def process_item(self, item, spider):\n        self.item_list.append(dict(item))\n        if len(self.item_list) == 20:\n            add_scrap_item.delay(self.item_list)\n            self.item_list = []\n        return item\n\n    @classmethod\n    def from_crawler(cls, crawler, *args, **kwargs):\n        pipeline = cls()\n        crawler.signals.connect(pipeline.spider_idle,\n                                signal=signals.spider_idle)\n        return pipeline\n\n    def spider_idle(self):\n        if len(self.item_list) > 0:\n            add_scrap_item.delay(self.item_list, end=True)\n            print('Sending last {} items'.format(len(self.item_list)))\n            self.item_list = []\n","sub_path":"fendi_shop/scrap_fendi/scrap_fendi/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"290108659","text":"N = int(input())\n\nBA = []\nfor i in range(N):\n    a, b = list(map(int, input().split()))\n    BA.append([b, a])\n\nBA.sort()\n\nans = 0\nlast = 0\nfor b, a in BA:\n    if last < a:\n        ans += 1\n        last = b\n\nprint(ans)\n","sub_path":"PAST/typical_algorithm_b.py","file_name":"typical_algorithm_b.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"262451113","text":"import os\nimport requests\nimport time\nimport threading\n\n# 进度条模块函数\ndef progressbar(url, path):\n\n    if not os.path.exists(path):  # 看是否有该文件夹,没有则创建文件夹\n        os.mkdir(path)\n\n    start = time.time()  # 下载开始时间\n    response = requests.get(url, stream = True)\n    size = 0  # 初始化已下载大小\n    chunk_size = 1  # 每次下载的数据大小\n    content_size = int(response.headers['content-length'])  # 下载文件总大小\n    try:\n        if response.status_code == 200:  # 判断是否响应成功\n            print('Start download,[File size]:{size:.2f} MB'.format(\n                size = content_size / chunk_size / 1024))  # 开始下载,显示下载文件大小\n            filepath = path + '\\Pikachu.jpg'  # 设置图片name,注:必须加上扩展名\n\n            with open(filepath, 'wb') as file:  # 显示进度条\n                for data in response.iter_content(chunk_size=chunk_size):\n                    file.write(data)\n                    size += len(data)\n                    print('\\r' + '[下载进度]:%s%.2f%%' % (\n                    '>' * int(size * 50 / content_size), float(size / content_size * 100)), end=' ')\n        end = time.time()  # 下载结束时间\n        print('Download completed!,times: %.2f秒' % (end - start))  # 输出下载用时时间\n    except:\n        print('Error!')\n\n\n# 自定义线程类\nclass MyThread(threading.Thread):\n    def __init__(self, n):\n        super(MyThread, self).__init__()\n        self.n = n\n\n    def run(self):\n        url = 'https://img.moegirl.org/common/thumb/e/ed/%E7%9A%AE%E5%8D%A1%E4%B8%98%E4%B9%8B%E6%AD%8C.jpg/1200px-%E7%9A%AE%E5%8D%A1%E4%B8%98%E4%B9%8B%E6%AD%8C.jpg'\n        path = r'E:\\123'  # 设置下载到本地的地址\n        progressbar(url, path)\n\n\n# 主函数\nif __name__ == '__main__':\n    t = MyThread('download')\n    t.start()","sub_path":"Hand training procedure/Download Progress.py","file_name":"Download Progress.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"271866586","text":"'''\nCreated on 16 dec. 2014\n\n@author: Narcis2007\n'''\nfrom sortari.generic_sort import SortareGenerica\n\n\nclass MergeSort(SortareGenerica):\n    def __init__(self,l,key,reverse):\n        '''\n        super() initializeaza toate clasele mostenite?\n        '''\n        super().__init__(l,key,reverse)\n        \n    def sort(self):\n        \n        self.get_lista()[:]=self.__merge_sort(self.get_lista())\n        \n    def __merge_sort(self, l):\n        if len(l)<=1:\n            return l\n        mijloc=len(l)//2\n        a=self.__merge_sort(l[:mijloc])\n        b=self.__merge_sort(l[mijloc:])\n        return self.__merge(a,b)\n    \n    def __merge(self,left,right):\n        '''\n        returneaza concatenarea listelor a si b sortate crescator\n        \n        l=[]\n        i=0\n        j=0\n        while i=b[j]:\n                    l.append(b[j])\n                    j=j+1\n        while i 0 and len(right) > 0:\n            if self.get_key()(left[0]) < self.get_key()(right[0]):\n                rez.append(left.pop(0))\n            else:\n                rez.append(right.pop(0))\n        rez.extend(left + right)\n        return rez\n","sub_path":"src/sortari/algoritmi/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"547541470","text":"import urllib.request\nimport re\nimport os\nimport sys\nimport shutil\n\nclass DribbbleImageSpider(object):\n    def __init__(self):\n        self.searchName = sys.argv[1]\n        self.url = \"https://dribbble.com/search?q=\" + self.searchName\n        self.downloadDirectory = \"/Users/yuxi/Desktop/\" + self.searchName + \"/\"\n\n    def makeDirectory(self):\n        try:\n            shutil.rmtree(self.downloadDirectory)\n        except:\n            pass\n        os.mkdir(self.downloadDirectory)\n\n    def getHTML(self):\n        print(\"getting HTML ......\")\n        try:\n            resultHTML = urllib.request.urlopen(self.url).read().decode(\"utf-8\")\n        except:\n            print(\"ERROR\")\n\n#        htmlFile = open(\"/Users/yuxi/Desktop/html.txt\", \"w\")\n#        htmlFile.write(resultHTML)\n#        htmlFile.close()\n\n        return resultHTML\n\n    def getImageUrls(self):\n        print(\"getting image urls ......\")\n        imageUrls = re.findall(r'', self.getHTML())\n        print(\"got \" + str(len(imageUrls)) + \" imageUrls\")\n        return imageUrls\n\n    def downloadImages(self):        \n        self.makeDirectory()\n        imageUrls = self.getImageUrls() \n        currentNum = 1 \n        for imageUrl in imageUrls:\n            print(\"downloading image: \" + str(currentNum) + \"/\" + str(len(imageUrls)))\n            imageDirectory = self.downloadDirectory + self.searchName + \"_\" + str(currentNum) + \".jpg\"\n            urllib.request.urlretrieve(imageUrl, imageDirectory, None)\n            currentNum += 1\n        print(\"\\nfinished!\\nall images saved at: \" + self.downloadDirectory + \"\\n\")\n\ndef main():\n    spider = DribbbleImageSpider()\n    spider.downloadImages()\n\nif __name__ == \"__main__\":\n    main()\n","sub_path":"DribbbleImageSpider.py","file_name":"DribbbleImageSpider.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"582151260","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport keras\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras import backend as K\nfrom keras import applications\n\n\n\n#Test Gpu\nconfig = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 4} ) \nsess = tf.Session(config=config) \nkeras.backend.set_session(sess)\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\nprint(device_lib.list_local_devices())\nK.tensorflow_backend._get_available_gpus()\n\n\n#Load data\nx_train = np.load('../Data/train_data_64.npy')\ny_train = np.load('../Data/train_lbl_64.npy')\nx_test = np.load('../Data/test_data_64.npy')\ny_test = np.load('../Data/test_lbl_64.npy')\nclasses = 5\n\n\n#model initilize\nmodel = applications.ResNet50(include_top=True, weights=None, input_shape=(64,64,3),classes=classes)\nmodel.compile(loss='categorical_crossentropy',\noptimizer='rmsprop',\nmetrics=['accuracy'])\nmodel.summary()\n\n\n\n#Set checkpoint\ncheckpoint = ModelCheckpoint(filepath='Result/resnet50_64_ckpt.hdf5',monitor='val_acc',\n                             verbose=1,save_best_only=True)\ndef lr_sch(epoch):\n    if epoch <30:\n        return 1e-3\n    if 30<=epoch<70:\n        return 1e-4\n    if epoch>=70:\n        return 1e-5\nlr_scheduler = LearningRateScheduler(lr_sch)\nlr_reducer = ReduceLROnPlateau(monitor='val_acc',factor=0.2,patience=5,\n                               mode='max',min_lr=1e-3)\ncallbacks = [checkpoint,lr_scheduler,lr_reducer]\n\n\n#Train\nhistory = model.fit(x_train,y_train,batch_size=64,epochs=100,validation_split=0.3,validation_data=None,verbose=1,callbacks=callbacks)\nmodel.save('Result/ResNet50_64_model.h5')\n\n\n#Test\nscores = model.evaluate(x_test,y_test,verbose=1)\nprint('Test loss:',scores[0])\nprint('Test accuracy:',scores[1])\n\n#Confustion matrix\ny_pred=model.predict(x_test)\ny_test = np.argmax(y_test,axis = 1)\ny_pred = np.argmax(y_pred,axis = 1)\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\ncm= cm.astype('float')/cm.sum(axis=1)[:, np.newaxis]\nprint(cm)\nfig, ax = plt.subplots()\nim = ax.imshow(cm, interpolation='nearest')\nax.figure.colorbar(im, ax=ax)\nplt.savefig('Result_plot/ResNet50_64_confusion.png')\nplt.show()\n\n\n#Save history\n# convert the history.history dict to a pandas DataFrame:     \nhist_df = pd.DataFrame(history.history) \n\n# or save to csv: \nhist_csv_file = 'Result/history_ResNet50_64.csv'\nwith open(hist_csv_file, mode='w') as f:\n    hist_df.to_csv(f)\n\n\n","sub_path":"Classification/dog-breed-classification-GPU-ResNet50_64.py","file_name":"dog-breed-classification-GPU-ResNet50_64.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"339956818","text":"# -*- coding=utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport errno\nimport os\nimport sys\nimport warnings\n\nfrom tempfile import mkdtemp\n\nimport six\n\n\n__all__ = [\n    \"Path\",\n    \"get_terminal_size\",\n    \"finalize\",\n    \"partialmethod\",\n    \"JSONDecodeError\",\n    \"FileNotFoundError\",\n    \"ResourceWarning\",\n    \"FileNotFoundError\",\n    \"PermissionError\",\n    \"IsADirectoryError\",\n    \"fs_str\",\n    \"lru_cache\",\n    \"TemporaryDirectory\",\n    \"NamedTemporaryFile\",\n    \"to_native_string\",\n]\n\nif sys.version_info >= (3, 5):\n    from pathlib import Path\n    from functools import lru_cache\nelse:\n    from pathlib2 import Path\n    from pipenv.vendor.backports.functools_lru_cache import lru_cache\n\nfrom .backports.tempfile import NamedTemporaryFile as _NamedTemporaryFile\nif sys.version_info < (3, 3):\n    from pipenv.vendor.backports.shutil_get_terminal_size import get_terminal_size\n    NamedTemporaryFile = _NamedTemporaryFile\nelse:\n    from tempfile import NamedTemporaryFile\n    from shutil import get_terminal_size\n\ntry:\n    from weakref import finalize\nexcept ImportError:\n    from pipenv.vendor.backports.weakref import finalize\n\ntry:\n    from functools import partialmethod\nexcept Exception:\n    from .backports.functools import partialmethod\n\ntry:\n    from json import JSONDecodeError\nexcept ImportError:  # Old Pythons.\n    JSONDecodeError = ValueError\n\nif six.PY2:\n\n    class ResourceWarning(Warning):\n        pass\n\n    class FileNotFoundError(IOError):\n        \"\"\"No such file or directory\"\"\"\n\n        def __init__(self, *args, **kwargs):\n            self.errno = errno.ENOENT\n            super(FileNotFoundError, self).__init__(*args, **kwargs)\n\n    class PermissionError(OSError):\n        def __init__(self, *args, **kwargs):\n            self.errno = errno.EACCES\n            super(PermissionError, self).__init__(*args, **kwargs)\n\n    class IsADirectoryError(OSError):\n        \"\"\"The command does not work on directories\"\"\"\n        pass\n\nelse:\n    from builtins import ResourceWarning, FileNotFoundError, PermissionError, IsADirectoryError\n\nsix.add_move(six.MovedAttribute(\"Iterable\", \"collections\", \"collections.abc\"))\nfrom six.moves import Iterable\n\n\nif not sys.warnoptions:\n    warnings.simplefilter(\"default\", ResourceWarning)\n\n\nclass TemporaryDirectory(object):\n    \"\"\"Create and return a temporary directory.  This has the same\n    behavior as mkdtemp but can be used as a context manager.  For\n    example:\n\n        with TemporaryDirectory() as tmpdir:\n            ...\n\n    Upon exiting the context, the directory and everything contained\n    in it are removed.\n    \"\"\"\n\n    def __init__(self, suffix=\"\", prefix=None, dir=None):\n        if \"RAM_DISK\" in os.environ:\n            import uuid\n\n            name = uuid.uuid4().hex\n            dir_name = os.path.join(os.environ[\"RAM_DISK\"].strip(), name)\n            os.mkdir(dir_name)\n            self.name = dir_name\n        else:\n            suffix = suffix if suffix else \"\"\n            if not prefix:\n                self.name = mkdtemp(suffix=suffix, dir=dir)\n            else:\n                self.name = mkdtemp(suffix, prefix, dir)\n        self._finalizer = finalize(\n            self,\n            self._cleanup,\n            self.name,\n            warn_message=\"Implicitly cleaning up {!r}\".format(self),\n        )\n\n    @classmethod\n    def _rmtree(cls, name):\n        from .path import rmtree\n\n        def onerror(func, path, exc_info):\n            if issubclass(exc_info[0], (PermissionError, OSError)):\n                try:\n                    try:\n                        if path != name:\n                            os.chflags(os.path.dirname(path), 0)\n                        os.chflags(path, 0)\n                    except AttributeError:\n                        pass\n                    if path != name:\n                        os.chmod(os.path.dirname(path), 0o70)\n                    os.chmod(path, 0o700)\n\n                    try:\n                        os.unlink(path)\n                    # PermissionError is raised on FreeBSD for directories\n                    except (IsADirectoryError, PermissionError, OSError):\n                        cls._rmtree(path)\n                except FileNotFoundError:\n                    pass\n            elif issubclass(exc_info[0], FileNotFoundError):\n                pass\n            else:\n                raise\n\n        rmtree(name, onerror=onerror)\n\n    @classmethod\n    def _cleanup(cls, name, warn_message):\n        cls._rmtree(name)\n        warnings.warn(warn_message, ResourceWarning)\n\n    def __repr__(self):\n        return \"<{} {!r}>\".format(self.__class__.__name__, self.name)\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc, value, tb):\n        self.cleanup()\n\n    def cleanup(self):\n        if self._finalizer.detach():\n            self._rmtree(self.name)\n\n\ndef fs_str(string):\n    \"\"\"Encodes a string into the proper filesystem encoding\n\n    Borrowed from pip-tools\n    \"\"\"\n    if isinstance(string, str):\n        return string\n    assert not isinstance(string, bytes)\n    return string.encode(_fs_encoding)\n\n\n_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n\n\ndef to_native_string(string):\n    from .misc import to_text, to_bytes\n    if six.PY2:\n        return to_bytes(string)\n    return to_text(string)\n","sub_path":"weatherenv/Lib/site-packages/pipenv/vendor/vistir/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"40876891","text":"class Solution:\r\n    def isValid(self, s):\r\n        \"\"\"\r\n        判断字符串中的括号是否匹配\r\n        :param s:给定字符串\r\n        :return: True是 False否\r\n        \"\"\"\r\n        s_map = {\"(\": \")\", \"[\": \"]\", \"{\": \"}\"}\r\n\r\n        stack = []\r\n\r\n        for i in s:\r\n            if i in s_map.keys():\r\n                stack.append(i)\r\n            else:\r\n                if len(stack) == 0:\r\n                    return False\r\n                j = stack.pop()\r\n                if s_map[j] != i:\r\n                    return False\r\n        if len(stack) != 0:\r\n            return False\r\n        return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n    print(Solution().isValid(\"([]{}\"))\r\n","sub_path":"LeetCode_Python/Test_20_isValid.py","file_name":"Test_20_isValid.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"522244560","text":"# Copyright 2020, Google, Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport time\nimport uuid\n\nfrom google.api_core import exceptions\n\nimport pytest\n\nimport create_config\nimport create_deployment\nimport create_realm\nimport delete_config\nimport delete_deployment\nimport delete_realm\nimport get_config\nimport get_deployment\nimport get_rollout\nimport list_configs\nimport list_deployments\nimport update_deployment\nimport update_rollout_default\nimport update_rollout_override\nimport update_rollout_remove_default\nimport update_rollout_remove_override\n\nPROJECT_ID = \"python-docs-samples-tests\"\nCONFIG_ID = \"my-game-server-config\"\nREALM_LOCATION = \"global\"\n\n# The format of realm ID. This is used in the unit tests and cleanup below.\nrealm_id_format = 'test-realm-{}-{}'\n\n# The format of deployment ID. This is used in the unit tests and cleanup below.\ndeployment_id_format = 'test-deployment-{}-{}'\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef clean_up_old_deployments():\n    all_deployments = list_deployments.list_deployments(PROJECT_ID)\n    for deployment in all_deployments:\n        deployment_name = deployment.name\n        deployment_id = deployment_name[deployment_name.rfind('/') + 1: len(deployment_name)]\n        if deployment_id.find('test-deployment-') == 0:\n            time_str = deployment_id[deployment_id.rfind('-') + 1: len(deployment_id)]\n            test_date = datetime.datetime.utcfromtimestamp(int(time_str))\n            now_date = datetime.datetime.utcfromtimestamp(int(time.time()))\n            difftime = now_date - test_date\n\n            # *NOTE* Restrict to deployments used in the tests older than 2 days\n            #        to prevent thrashing in the case of async tests\n            if (difftime.days > 2):\n                print(f\"Cleaning up old deployment {deployment_id} and its configs, difftime: {difftime}\")\n                clean_up_deployment_and_configs(deployment_id)\n\n\n@pytest.fixture(scope=\"function\")\ndef test_deployment():\n    deployment_id = deployment_id_format.format(uuid.uuid4().hex, int(time.time()))\n\n    print(f\"Creating deployment {deployment_id} in project {PROJECT_ID}\")\n    create_deployment.create_deployment(PROJECT_ID, deployment_id)\n\n    yield deployment_id\n\n    print(f\"Cleaning up deployment {deployment_id} in teardown\")\n    clean_up_deployment(deployment_id)\n\n\ndef clean_up_deployment(deployment_id):\n    # Delete the deployment if it still exists.\n    print(f\"Deleting deployment: {deployment_id}\")\n    try:\n        delete_deployment.delete_deployment(PROJECT_ID, deployment_id)\n    except exceptions.NotFound:  # Ignore the non-existent deployment\n        return\n\n\n@pytest.fixture(scope=\"function\")\ndef test_deployment_with_config():\n    deployment_id = deployment_id_format.format(uuid.uuid4().hex, int(time.time()))\n\n    print(f\"Creating deployment {deployment_id} in project {PROJECT_ID}\")\n    create_deployment.create_deployment(PROJECT_ID, deployment_id)\n\n    print(f\"Creating config {CONFIG_ID} in deployment {deployment_id} in project {PROJECT_ID}\")\n    create_config.create_config(PROJECT_ID, deployment_id, CONFIG_ID)\n\n    yield deployment_id\n\n    print(f\"Cleaning up deployment {deployment_id} in teardown\")\n    clean_up_deployment_and_configs(deployment_id)\n\n\ndef clean_up_deployment_and_configs(deployment_id):\n    # Delete the deployment and the game server configs in the deployment.\n    try:\n        get_deployment.get_deployment(PROJECT_ID, deployment_id)\n    except exceptions.NotFound:  # Ignore the non-existent deployment\n        return\n\n    try:\n        update_rollout_remove_default.update_rollout_remove_default(PROJECT_ID, deployment_id)\n    except exceptions.NotFound:  # Ignore the non-existent deployment\n        return\n\n    try:\n        update_rollout_remove_override.update_rollout_remove_override(PROJECT_ID, deployment_id)\n    except exceptions.NotFound:  # Ignore the non-existent deployment\n        return\n\n    configs = list_configs.list_configs(PROJECT_ID, deployment_id)\n    for config in configs:\n        config_id = config.name.rsplit('/', 1)[-1]\n        print(f\"Deleting config {config_id} in deployment {deployment_id}\")\n        try:\n            delete_config.delete_config(PROJECT_ID, deployment_id, config_id)\n        except exceptions.NotFound:  # Ignore the non-existent config\n            return\n\n    print(f\"Deleting deployment: {deployment_id}\")\n    try:\n        delete_deployment.delete_deployment(PROJECT_ID, deployment_id)\n    except exceptions.NotFound:  # Ignore the non-existent deployment\n        return\n\n\n@pytest.fixture(scope=\"function\")\ndef test_realm():\n    realm_id = realm_id_format.format(uuid.uuid4().hex, int(time.time()))\n\n    print(f\"Creating realm {realm_id} in location {REALM_LOCATION} in project {PROJECT_ID}\")\n    create_realm.create_realm(PROJECT_ID, REALM_LOCATION, realm_id)\n\n    yield realm_id\n\n    print(f\"Cleaning up realm {realm_id} in teardown\")\n    clean_up_realm(realm_id)\n\n\ndef clean_up_realm(realm_id):\n    # Delete the realm if it still exists.\n    print(f\"Deleting realm: {realm_id}\")\n    try:\n        delete_realm.delete_realm(PROJECT_ID, REALM_LOCATION, realm_id)\n    except exceptions.NotFound:  # Ignore the non-existent realm\n        return\n\n\ndef test_create_deployment(test_deployment):\n    print(f\"Created deployment {test_deployment} in project {PROJECT_ID}\")\n\n\ndef test_get_deployment(test_deployment):\n    deployment = get_deployment.get_deployment(PROJECT_ID, test_deployment)\n    assert deployment.name == f\"projects/{PROJECT_ID}/locations/global/gameServerDeployments/{test_deployment}\"\n\n\ndef test_list_deployments(test_deployment):\n    deployments = list_deployments.list_deployments(PROJECT_ID)\n\n    deployment_name_list = []\n    for deployment in deployments:\n        deployment_name_list.append(deployment.name)\n\n    deployment_name = f\"projects/{PROJECT_ID}/locations/global/gameServerDeployments/{test_deployment}\"\n    assert deployment_name in deployment_name_list\n\n\ndef test_update_deployment(test_deployment):\n    update_deployment.update_deployment(PROJECT_ID, test_deployment)\n    deployment = get_deployment.get_deployment(PROJECT_ID, test_deployment)\n    assert deployment.labels == {\"label-key-1\": \"label-value-1\", \"label-key-2\": \"label-value-2\"}\n\n\ndef test_delete_deployment(test_deployment):\n    delete_deployment.delete_deployment(PROJECT_ID, test_deployment)\n    with pytest.raises(exceptions.NotFound):\n        get_deployment.get_deployment(PROJECT_ID, test_deployment)\n\n\ndef test_get_rollout(test_deployment):\n    rollout = get_rollout.get_rollout(PROJECT_ID, test_deployment)\n    assert rollout.name == f\"projects/{PROJECT_ID}/locations/global/gameServerDeployments/{test_deployment}/rollout\"\n\n\ndef test_update_rollout_default(test_deployment_with_config):\n    update_rollout_default.update_rollout_default(PROJECT_ID, test_deployment_with_config, CONFIG_ID)\n    rollout = get_rollout.get_rollout(PROJECT_ID, test_deployment_with_config)\n    assert rollout.default_game_server_config == f\"projects/{PROJECT_ID}/locations/global/gameServerDeployments/{test_deployment_with_config}/configs/{CONFIG_ID}\"\n\n\ndef test_update_rollout_override(test_realm, test_deployment_with_config):\n    update_rollout_override.update_rollout_override(PROJECT_ID, test_deployment_with_config, CONFIG_ID, REALM_LOCATION, test_realm)\n    rollout = get_rollout.get_rollout(PROJECT_ID, test_deployment_with_config)\n    assert len(rollout.game_server_config_overrides) == 1\n    assert rollout.game_server_config_overrides[0].config_version == f\"projects/{PROJECT_ID}/locations/global/gameServerDeployments/{test_deployment_with_config}/configs/{CONFIG_ID}\"\n    assert rollout.game_server_config_overrides[0].realms_selector.realms == [f\"projects/{PROJECT_ID}/locations/{REALM_LOCATION}/realms/{test_realm}\"]\n\n\ndef test_update_rollout_remove_default(test_deployment):\n    update_rollout_remove_default.update_rollout_remove_default(PROJECT_ID, test_deployment)\n    rollout = get_rollout.get_rollout(PROJECT_ID, test_deployment)\n    assert rollout.default_game_server_config == \"\"\n\n\ndef test_update_rollout_remove_override(test_deployment):\n    update_rollout_remove_override.update_rollout_remove_override(PROJECT_ID, test_deployment)\n    rollout = get_rollout.get_rollout(PROJECT_ID, test_deployment)\n    assert len(rollout.game_server_config_overrides) == 0\n\n\ndef test_create_conifg(test_deployment_with_config):\n    print(f\"Created config {CONFIG_ID} in deployment {test_deployment_with_config} in project {PROJECT_ID}\")\n\n\ndef test_get_config(test_deployment_with_config):\n    config = get_config.get_config(PROJECT_ID, test_deployment_with_config, CONFIG_ID)\n    assert config.name == f\"projects/{PROJECT_ID}/locations/global/gameServerDeployments/{test_deployment_with_config}/configs/{CONFIG_ID}\"\n\n\ndef test_list_configs(test_deployment_with_config):\n    configs = list_configs.list_configs(PROJECT_ID, test_deployment_with_config)\n\n    config_name_list = []\n    for config in configs:\n        config_name_list.append(config.name)\n\n    config_name = f\"projects/{PROJECT_ID}/locations/global/gameServerDeployments/{test_deployment_with_config}/configs/{CONFIG_ID}\"\n    assert config_name in config_name_list\n\n\ndef test_delete_config(test_deployment_with_config):\n    delete_config.delete_config(PROJECT_ID, test_deployment_with_config, CONFIG_ID)\n    with pytest.raises(exceptions.NotFound):\n        get_config.get_config(PROJECT_ID, test_deployment_with_config, CONFIG_ID)\n","sub_path":"samples/snippets/deployment_and_config_test.py","file_name":"deployment_and_config_test.py","file_ext":"py","file_size_in_byte":9959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"509935484","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 等差数列生成器\n# 第一版\nclass ArithmeticProgression1:\n    def __init__(self, begin, step, end=None):\n        self.begin = begin\n        self.step = step\n        self.end = end  # None ->无穷数列\n\n    def __iter__(self):\n        # 先做加法,根据结果强制类型转换begin\n        result = type(self.begin + self.step)(self.begin)\n        forever = self.end is None\n        index = 0\n        while forever or result < self.end:\n            yield result\n            index += 1\n            result = self.begin + self.step * index\n","sub_path":"program/Fluent-Python/ArithmeticProgression.py","file_name":"ArithmeticProgression.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"405091926","text":"import sys\nimport os\nimport Forms.Base\n\nfrom PySide.QtGui import *\n\n\nclass App:\n    @staticmethod\n    def Run(argv=[], form=None):\n        if hasattr(form, '__base__'):\n            if form.__base__ == QMainWindow:\n                try:\n                    app = QApplication(argv)\n                    app.setStyleSheet(open(\"{0}\\\\{1}\".format(os.getcwd(), \"App.css\"), \"r\").read())\n                    base = form()\n                    base.show()\n                    return app.exec_()\n                except Exception as error:\n                    return error.args\n\n\nif __name__ == '__main__':\n    sys.exit(App.Run(sys.argv, Forms.Base.Base))","sub_path":"Demo/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"47454709","text":"from vpython import *\nimport random\n\nclass Star():\n\tdef __init__(self, mass, radius, pos, color):\n\t\tself.pos = pos\n\t\tself.obj = sphere(pos = self.pos, radius = radius, color = color)\n\t\tself.mass = mass\n\t\tself.radius = radius\n\t\tself.velocity = vector(0,0,0)\n\t\tself.acceleration = vector(0,0,0)\n\t\tself.color = color\n\t\tself.theta = 0\n\t\tself.distance = 0\n\t\tself.force = 0\n\n\t\n","sub_path":"star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"183459691","text":"import os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom plyfile import PlyData, PlyElement\nfrom utils import getDensity, drawDensityImage\nimport pdb\n\n\ndef _bytes_feature(value):\n    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n    return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _float_feature(value):\n    return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef read_scene_pc(file_path):\n    with open(file_path, 'rb') as f:\n        plydata = PlyData.read(f)\n        dtype = plydata['vertex'].data.dtype\n    print('dtype of file{}: {}'.format(file_path, dtype))\n\n    points_data = np.array(plydata['vertex'].data.tolist())\n\n    return points_data\n\n\ndef write_scene_pc(points, output_path):\n    vertex = np.array([tuple(x) for x in points],\n                      dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ])\n    vertex_el = PlyElement.describe(vertex, 'vertex')\n    PlyData([vertex_el]).write(output_path)  # write the new ply file\n\n\nclass RecordWriter:\n    def __init__(self, num_points, base_dir, phase, im_size, max_num_corners=300):\n        self.num_points = num_points\n        self.base_dir = base_dir\n        self.phase = phase\n        self.im_size = im_size  # HEIGHT, WIDTH = SIZE\n        self.max_num_corners = max_num_corners\n\n        self.file_paths = self.get_pc_filepaths()\n\n        self.writer = tf.python_io.TFRecordWriter(self.base_dir + '_' + self.phase + '.tfrecords')\n\n    def get_pc_filepaths(self):\n        filenames = os.listdir(self.base_dir)\n        file_paths = [os.path.join(self.base_dir, filename) for filename in filenames]\n        return file_paths\n\n    def write(self):\n        for file_path in self.file_paths:\n            self.write_example(file_path)\n\n    def write_example(self, file_path):\n        points = read_scene_pc(file_path)\n\n        axis_trans_mat = np.array([[0, 0, 1, 0],\n                                   [-1, 0, 0, 0],\n                                   [0, -1, 0, 0],\n                                   [0, 0, 0, 1]])\n\n        xyz = points[:, :3]\n        xyz = np.concatenate([xyz, np.ones([xyz.shape[0], 1])], axis=1)\n\n        transformed_xyz = np.matmul(axis_trans_mat, xyz.transpose([1, 0])).transpose([1, 0])\n        transformed_xyz = transformed_xyz[:, :3]\n\n        mins = transformed_xyz.min(0, keepdims=True)\n        maxs = transformed_xyz.max(0, keepdims=True)\n\n        max_range = (maxs - mins)[:, :2].max()\n        padding = max_range * 0.05\n        mins = (maxs + mins) / 2 - max_range / 2\n        mins -= padding\n        max_range += padding * 2\n        transformed_xyz = (transformed_xyz - mins) / max_range\n\n        new_points = np.concatenate([transformed_xyz, points[:, 3:6]], axis=1)\n        points = new_points\n\n        if points.shape[0] < self.num_points:\n            indices = np.arange(points.shape[0])\n            points = np.concatenate([points, points[np.random.choice(indices, self.num_points - points.shape[0])]], axis=0)\n        else:\n            sampled_indices = np.arange(points.shape[0])\n            np.random.shuffle(sampled_indices)\n            points = points[sampled_indices[:self.num_points]]\n\n        # For testing purpose: draw the density image to check the quality\n        # write_scene_pc(points, './test.ply')\n        # density_img = drawDensityImage(getDensity(points=points))\n        # cv2.imwrite('./test_density.png', density_img)\n\n        points[:, 3:] = points[:, 3:] / 255 - 0.5\n\n        coordinates = np.clip(np.round(points[:, :2] * self.im_size).astype(np.int32), 0, self.im_size - 1)\n\n        points_indices = self.get_projection_indices(coordinates)\n\n        # prepare other g.t. related inputs to be zeros for now\n\n        corner_gt = np.zeros([self.max_num_corners, 3], dtype=np.int64)\n\n        num_corners = 0\n\n        icon_segmentation = np.zeros((self.im_size, self.im_size), dtype=np.uint8)\n\n        room_segmentation = np.zeros((self.im_size, self.im_size), dtype=np.uint8)\n\n        flags = np.zeros(2, np.int64)\n        flags[0] = 1\n        flags[1] = 0\n\n        example = tf.train.Example(features=tf.train.Features(feature={\n            'image_path': _bytes_feature(file_path),\n            'points': _float_feature(points.reshape(-1)),\n            'point_indices': _int64_feature(points_indices.reshape(-1)),\n            'corner': _int64_feature(corner_gt.reshape(-1)),\n            'num_corners': _int64_feature([num_corners]),\n            'icon': _bytes_feature(icon_segmentation.tostring()),\n            'room': _bytes_feature(room_segmentation.tostring()),\n            'flags': _int64_feature(flags),\n        }))\n\n        self.writer.write(example.SerializeToString())\n\n    def get_projection_indices(self, coordinates):\n        indices_map = np.zeros([self.num_points], dtype=np.int64)\n        for i, coord in enumerate(coordinates):\n            x, y = coord\n            indices_map[i] = y * self.im_size + x\n        return indices_map\n\n\nif __name__ == '__main__':\n    base_dir = '/local-scratch/cjc/FloorNet/data/Lianjia-samples'\n    record_writer = RecordWriter(num_points=50000, base_dir=base_dir, phase='test', im_size=256)\n    record_writer.write()\n","sub_path":"RecordWriterLianjia.py","file_name":"RecordWriterLianjia.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"25347687","text":"from sys import stdin\n\nwordsAll = stdin.read().split()\nwordsDict = dict()\nfor word in wordsAll:\n    wordsDict[word] = wordsDict.get(word, 0) + 1\nlistWordAll = []\nfor wordList in wordsDict:\n    listWordAll.append([wordsDict[wordList] * -1, wordList])\nfinalValue = []\nfor wordd in sorted(listWordAll):\n    finalValue.append(wordd[1])\nprint('\\n'.join(finalValue))\n","sub_path":"coursera/7 week/descFreq.py","file_name":"descFreq.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"431076304","text":"\"\"\"\r\n\r\nPython Interchangeable Virtual Instrument Library\r\n\r\nCopyright (c) 2016-2017 Alex Forencich\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in\r\nall copies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\nTHE SOFTWARE.\r\n\r\n\"\"\"\r\n\r\nfrom .tektronixMDO4000 import *\r\nfrom .tektronixMDOAFG import *\r\nimport re\r\n\r\n\r\nclass tektronixMDO4104C(tektronixMDO4000, tektronixMDOAFG):\r\n    \"Tektronix MDO4104C IVI oscilloscope driver\"\r\n\r\n    def __init__(self, *args, **kwargs):\r\n        self.__dict__.setdefault(\"_instrument_id\", \"MDO4104C\")\r\n\r\n        super().__init__(*args, **kwargs)\r\n\r\n        self._analog_channel_count = 4\r\n        self._digital_channel_count = 16\r\n        self._bandwidth = 1e9\r\n\r\n        # AFG option\r\n        self._output_count = 1\r\n\r\n        # Add fetch isf\r\n        self._add_method(\r\n            \"channels[].measurement.fetch_isf\",\r\n            self._measurement_fetch_isf,\r\n            ivi.Doc(\r\n                \"\"\"\r\n                This function calls self._ask_raw(b':WAVFrm?') and format the raw bytearray as\r\n                ISF file format, and return the raw byte again.\r\n                \"\"\"\r\n            ),\r\n        )\r\n\r\n        self._init_channels()\r\n        self._init_outputs()\r\n\r\n    def _measurement_fetch_isf(self, index):\r\n        index = ivi.get_index(self._channel_name, index)\r\n\r\n        if self._driver_operation_simulate:\r\n            return b\"\"\r\n\r\n        self._write(\":data:source %s\" % self._channel_name[index])\r\n        self._write(\":data:encdg fastest\")\r\n        self._write(\":data:width 2\")\r\n        self._write(\":data:start 1\")\r\n        self._write(\":data:stop 1e10\")\r\n        self._write(\":VERBose ON\")\r\n        self._write(\":HEADer ON\")\r\n        # check if the channel is valid\r\n        if \"NR_P\" not in self._ask(\":WFMOutpre?\"):\r\n            raise Exception(f\"Channel {self._channel_name[index]} has no waveform data\")\r\n        # Read whole thing\r\n        isf_unformatted = b\"\"\r\n        try:\r\n            isf_unformatted = self._ask_raw(b\":WAVFrm?\")\r\n        except Exception as e:\r\n            print(e)\r\n        finally:\r\n            # reset the verbosity\r\n            self._write(\":VERBose OFF\")\r\n            self._write(\":HEADer OFF\")\r\n\r\n        return isf_unformatted\r\n\r\n    def __del__(self):\r\n        self.close()\r\n\r\n","sub_path":"ivi/tektronix/tektronixMDO4104C.py","file_name":"tektronixMDO4104C.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"63966829","text":"# -*- coding: utf-8 -*-\nimport constant as const\nfrom bson import ObjectId\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef get_applications(db,mdb,aids=[]):\n    application_name = []\n    # devide the aids into email reusme or noemail resume.\n    resid = []\n    # use no-injection method.and fix one aid error\n    resumes_info = db.query(\" select id app_id,resume_id from job_application where  id in %(app_id)s \",\n                            app_id=tuple(aids))\n    for res in resumes_info:\n        if res.get('resume_id', ''):\n            resid.append({'_id': ObjectId(res.get('resume_id',''))})\n    mresumes = list(mdb.application.find({'$or': resid}, {'_id': 1, 'basicinfo': 1, 'file': 1, 'filename': 1, 'source': 1}))\n    # contact application id with resume id.\n    for res in resumes_info:\n        for mresume in mresumes:\n            if res.get('resume_id') == str(mresume.get('_id','')):\n                mresume.update({'app_id': res.get('app_id', 0)})\n    return mresumes\n\n\ndef get_resume_name(mapps):\n    resumes = []\n    for mapp in mapps:\n        r = {}\n        if str(mapp.get('source', 0)) == str(const.RESUME_EMAIL):\n            r.update({'source': 13})\n            r.update({'file': mapp.get('file')})\n            r.update({'filename': mapp.get('filename')})\n            r.update({'download_name': mapp.get('filename')})\n        else:\n            mobile = mapp.get('basicinfo', {}).get('mobile','')\n            name = mapp.get('basicinfo', {}).get('name','')\n            r.update({'source': mapp.get('source',0)})\n            r.update({'name': name})\n            r.update({'mobile': mobile})\n            show_mobile = \"({m})\".format(m=mobile) if mobile else ''\n            dname = \"{name}{mobile}的简历.pdf\".format(name=name, mobile=show_mobile)\n            filename = \"{app_id}.pdf\".format(app_id=mapp.get('app_id', ''))\n            r.update({\"file\": filename.replace(\" \", \"_\")})\n            r.update({'download_name': dname})\n        resumes.append(r)\n    return resumes\n\n\n","sub_path":"src/dao/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"199339120","text":"import cv2\nimport numpy as np\nimport time\nfrom imutils.video import FPS, FileVideoStream, WebcamVideoStream\nfrom multiprocessing import Process, Queue\nimport multi_tracker as MT\n\nmultiTracker = MT.MultiTracker() # Initialize Mutli-Tracker\n\ndef showMe(frameQueue):\n    while True:\n        frame, total, fps, vw, vh, line_thick = frameQueue.get()\n        if frame is not None:            \n            counter_text = 'Total Fish: {}'.format(total)\n            counter_text_size, counter_text_baseline = cv2.getTextSize(counter_text, cv2.FONT_HERSHEY_PLAIN, 1, 1)\n            frame = cv2.rectangle(frame, (5, 0), (counter_text_size[0] + 5, 20), (0,0,255), -1)\n            frame = cv2.putText(frame, counter_text, (5, 15), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0), 1)\n\n            fps_text = 'FPS: {}'.format(round(fps.fps(), 2))\n            fps_text_size, fps_text_baseline = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_PLAIN, 1, 1)    \n            frame = cv2.putText(frame, fps_text, (vw - fps_text_size[0], 15), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 1)\n\n            cv2.imshow('ttt', frame)\n            cv2.waitKey(30)\n\ndef main():        \n\n    # Initialize background substractor\n    # Currently CV2 provide 2 types, \"createBackgroundSubtractorKNN\" and \"createBackgroundSubtractorMOG2\"\n    # 1. \"createBackgroundSubtractorKNN\" \n    #   - subtraction is alot cleaner if background color more consistence\n    # 2. \"createBackgroundSubtractorMOG2\" \n    #   - substraction based on Machine Learning (ML), more cleaner if background have too many color for KNN to work.\n    #   - Result of substraction is not as complete compare to KNN\n    backSub = cv2.createBackgroundSubtractorKNN(history=450, dist2Threshold=150.0, detectShadows=True)\n\n    # cap = cv2.VideoCapture('fish4.mp4')\n    #cap = FileVideoStream('fish4.mp4').start() #initialize file video reader\n    cap = WebcamVideoStream(0)\n    cap.start()\n    \n    time.sleep(1.0) # Block for 1 sec, to let \"cap\" buffer frames\n    frame = cap.read() # Get current frame    \n    vh, vw = frame.shape[:2] # Get current frame height and width, [0]: height, [1]: width\n    vh = int(vh / 2)\n    vw = int(vw / 2)\n    videWriter = cv2.VideoWriter('out.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (vw, vh)) #Initialize video writer\n\n    # Set default center line\n    line_thickness = 5 # default line thickness to 5px\n    line_center = int((vw / 2)) # divide 2 with video width (vw) to get video center pixel\n\n    fishes = [] # store fish id & tracker\n    counter = 0 # fish counter\n\n    fps = FPS().start() #Initialize FPS counter\n\n    ttQ = Queue()\n    tt = Process(target=showMe, args=(ttQ,))\n    tt.daemon = True\n    tt.start()\n\n    # While cap has more frame continue\n    while True:            \n        frame = cv2.resize(frame, (vw, vh), cv2.INTER_AREA)\n        blank_frame = frame.copy() # Create a copy of original image for filtering    \n        trackers = multiTracker.update(frame)\n        if len(trackers) > 0:\n            for tracker in trackers:\n                c, bbox = tracker # c = id\n                #success, bbox = tracker.update(frame) # run tracker update based on current frame\n                \n                # Should tracker success = false, removed it from array to prevent future processes\n                #if not success:\n                    # fishes.pop(idx)\n                    # continue\n                \n                # Calculate & Draw\n                offset = 0\n                xy1 = (int(bbox[0]) - offset, int(bbox[1]) - offset) # staring X and Y bounding box\n                xy2 = (int(bbox[2]) + offset, int(bbox[3]) + offset) # Width and Height bounding box\n\n                # Note:\n                # OpenCV Tracker return Region of Interest (ROI) which consist\n                # 1. starting point X and Y coordinate in pixel\n                # 2. width and height of the bounding box in pixel \n                # OpenCV rectangle function however is draw using 2 set of coordinate in pixel\n                # starting point (Top Left) and end point (Bottom Right)\n                # \"xy1\" is the starting coordinate (Top Left), as such using\n                #     ((xy1[0] + xy2[0]), (xy1[1] + xy2[1]))\n                # we can calculate the 2nd set coordinate (Bottom Right)            \n                # Bottom = starting X point + bounding box width        \n                # Right = starting Y point + bounding box height    \n                cv2.rectangle(frame, xy1, ((xy1[0] + xy2[0]), (xy1[1] + xy2[1])), (0,0,255), 1)\n                cv2.putText(frame, 'id: {}'.format(c), (xy1[0], xy1[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0), 1, cv2.LINE_AA)\n                cv2.rectangle(blank_frame, xy1, ((xy1[0] + xy2[0]), (xy1[1] + xy2[1])), (255, 255, 255), -1)\n\n        # Note:\n        # OpenCV \"findContours\" function only work with image in gray color    \n        gFrame = cv2.cvtColor(blank_frame, cv2.COLOR_RGB2GRAY) # Convert image to gray\n        fMask = backSub.apply(gFrame) # Apply background seperation algorythm\n        fMask = cv2.morphologyEx(fMask, cv2.MORPH_OPEN, np.ones((5,5), np.uint8), iterations=2) # Fix deform contour\n        fMask = cv2.morphologyEx(fMask, cv2.MORPH_CLOSE, np.ones((5,5), np.uint8), iterations=2) # Fix deform contour    \n        fMask = cv2.bitwise_and(gFrame, gFrame, mask=fMask) # combine targeted frame with mask    \n        fMask = cv2.GaussianBlur(fMask, (5,5), 0) # add blur to further reduce pixel deform\n        ret, thresh = cv2.threshold(fMask, 50, 255, cv2.THRESH_BINARY) # Create threshold algorythm\n        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)  # find contours\n\n        # Loop through all found contours\n        # Should any contour found and is within \"center_line\" tag and track it\n        for contour in contours:\n            x, y, w, h = cv2.boundingRect(contour)        \n            if x + w >= int(line_center) and x + w <= int(line_center + line_thickness):\n                counter += 1\n                # tracker = cv2.TrackerCSRT_create()\n                # tracker.init(frame, (int(x), int(y), int(w), int(h)))\n                # fishes.append((counter, tracker))\n\n                multiTracker.add(counter, frame, (int(x), int(y), int(w), int(h)))\n                # cv2.rectangle(frame, (int(x), int(y)), (int(x + w), int(y + h)), (0,0,255), 1)\n\n        # Draw \"line_center\"\n        frame = cv2.rectangle(frame, (line_center, vh), (line_center + line_thickness, 0), (0,0,0), -1)\n        # Calculate, Generate and Draw \"Total Fish\" text\n        \n        # Calculate, Generate and Draw \"FPS\" text\n        fps.update()\n        fps.stop()\n        \n        tt = (\n            frame,\n            counter,\n            fps,\n            vw,\n            vh,\n            line_thickness\n        )\n        ttQ.put(tt)\n\n        # Display the final combine of the orignal frame including tracked item\n        # cv2.imshow('frame', frame)\n        # cv2.imshow('mask', fMask)\n        # Display frame is being refresh every 30ms, change to 0 if manual forward required \n        key = cv2.waitKey(30)\n        # should \"Q\" is press, stop loop\n        if key == ord('q'):\n            break\n\n        # videWriter.write(frame) # Write frame to video output\n        # ok, frame = cap.read()\n        frame = cap.read()\n\n\nif __name__ == '__main__':\n    main()","sub_path":"run4.py","file_name":"run4.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"530764907","text":"import re, sys, os, shutil, operator\r\nfrom collections import OrderedDict\r\n\t\r\ndef freq_patterns(inputFolder, outputFolder):\r\n\r\n\tfor k in range(0,10):\r\n\t\r\n\t\tfileName = 'R'+str(k)\r\n\t\t\r\n\t\tf = open(inputFolder+\"/\"+fileName+\".txt\",\"r\")\r\n\t\r\n\t\t#temporary lists for storing files and blocks\r\n\t\tfilesList = []\r\n\t\tfinalFilesList = []\r\n\t\ttempBlocksList = []\r\n\t\tfinalBlocksList = []\r\n\t\ttempFileBlockList=[]\r\n\t\ttempFileBlockFinalList=[]\r\n\t\ttempFileBlockFinalList2=[]\r\n\t\tdataList = []\r\n\t\tfinalDataList = []\r\n\t\tfileSupport={}\r\n\t\tblockSupport={}\r\n\t\tfileForPattern=[]\r\n\t\ttempDictBlockList=[]\r\n\t\tfileBlockDict={}\r\n\t\tforpattern=[]\r\n\t\tforpattern2=[]\r\n\t\ttempfeqele=[]\r\n\t\ttempfeqele2=[]\r\n\t\tpattern=\"\"\r\n\t\tpattern2=\"\"\r\n\t\tpattern3=\"\"\r\n\t\tfinalPattern=[]\r\n\t\tfinalPattern2=[]\r\n\t\tfinalPattern3=[]\r\n\t\tfileSupport2={}\r\n\t\tfile=[]\r\n\t\tfilesListPattern=[]\r\n\t\t\r\n\t\tprint(\"Scanning Files for \"+fileName)\r\n\t\tfor x in f:\r\n\t\t\tdataList.extend(re.findall('D[0-9]*[0-9]',x))   #Datasets list\r\n\t\t\r\n\t\tprint(\"Initiating the process \"+fileName)\r\n\t\t#print(\"Caclulating File support, do not close the terminal\\n\")\r\n\t\t\t\t\r\n\t\tfor x in dataList:\r\n\t\t\tif not x in finalDataList:\r\n\t\t\t\tfinalDataList.append(x)\t #Deleting duplicate datasets for getting the dataset list\r\n\t\t\r\n\t\tprint(\"Creating temporary files \"+fileName)\r\n\t\t#creating split logs datasets wise\r\n\t\tfor y in finalDataList:\t\r\n\t\t\tf7 = open(inputFolder+\"/\"+fileName+\".txt\",\"r\")\r\n\t\t\tif not os.path.exists(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+fileName.rstrip(\".txt\")):\r\n\t\t\t\tos.makedirs(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+fileName.rstrip(\".txt\"))\t\r\n\t\t\tfor x in f7:\t\t\t\r\n\t\t\t\tf8 = open(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+fileName.rstrip(\".txt\")+\"/\"+y+\".txt\",\"a+\")\r\n\t\t\t\tif y in x:\r\n\t\t\t\t\tf8.writelines(x)\r\n\t\t\tf7.close()\r\n\t\t\tf8.close()\r\n\t\t# ...\r\n\t\t\r\n\t\tf2 = open(outputFolder+\"/\"+fileName.rstrip('.txt')+\"-BlockSupport.txt\",\"a+\")\r\n\t\t\r\n\t\tprint(\"Caclulating Block support, do not close the terminal \"+fileName)\r\n\t\t\r\n\t\t#seggregating blocks files wise and storing in temporary files\r\n\t\tfor j in finalDataList:\r\n\t\t\t\r\n\t\t\tif not os.path.exists(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+j):\r\n\t\t\t\tos.makedirs(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+j)\r\n\t\t\t\r\n\t\t\tf6=open(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+fileName.rstrip(\".txt\")+\"/\"+j+\".txt\",\"r\")\r\n\t\t\t\r\n\t\t\tfor x in f6:\r\n\t\t\t\tfilesList.extend(re.findall('F[0-9]*[0-9]',x)) #files list\r\n\t\t\t\r\n\t\t\tfor x in filesList:\r\n\t\t\t\tif not x in finalFilesList:\r\n\t\t\t\t\tfinalFilesList.append(x) #Deleting duplicate files for getting the files list\r\n\t\t\t\t\tl=filesList.count(x) #Calculating file support\r\n\t\t\t\t\tfileSupport[x]=l #Stroring file support in a dictonary\r\n\t\t\t\t\tfileSupport2[fileName.rstrip(\".txt\")+\" \"+j+\" \"+x]=l\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\t\t\r\n\t\t\tfor y in finalFilesList:\r\n\t\t\t\tf4 = open(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+fileName.rstrip(\".txt\")+\"/\"+j+\".txt\",\"r\")\r\n\t\t\t\tfor x in f4:\r\n\t\t\t\t\tif re.search(y+\" \",x):\r\n\t\t\t\t\t\ttempFileBlockList.extend(re.findall('B[0-9]*[0-9]',x))\r\n\t\t\t\t\t\tfor k in tempFileBlockList:\r\n\t\t\t\t\t\t\tif not k in tempFileBlockFinalList:\r\n\t\t\t\t\t\t\t\ttempFileBlockFinalList.append(k)\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\ttempFileBlockFinalList2.extend(tempFileBlockFinalList)\r\n\t\t\t\t\tdel tempFileBlockFinalList[:]\r\n\t\t\t\t\tdel tempFileBlockList[:]\r\n\t\t\t\t\r\n\t\t\t\tf3 = open(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+j+\"/\"+fileName.rstrip(\".txt\")+\" \"+j+\" \"+y+\".txt\",\"a+\")\r\n\t\t\t\tfor z in tempFileBlockFinalList2:\r\n\t\t\t\t\tf3.write(z + \"\\n\")\r\n\t\t\t\tf3.close()\r\n\t\t\t\r\n\t\t\t\tdel tempFileBlockFinalList[:]\r\n\t\t\t\tdel tempFileBlockFinalList2[:]\r\n\t\t\t\tf4.close()\r\n\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t#Calculating File-Block support\t\t\t\r\n\t\t\tfor x in finalFilesList:\r\n\t\t\t\tf5= open(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/\"+j+\"/\"+fileName.rstrip(\".txt\")+\" \"+j+\" \"+x+\".txt\",\"r\")\r\n\t\t\t\tfor y in f5:\r\n\t\t\t\t\tr=y.rstrip('\\n')\r\n\t\t\t\t\ttempBlocksList.append(str(r))\r\n\t\t\t\r\n\t\t\t\tfor z in tempBlocksList:\r\n\t\t\t\t\tif not z in finalBlocksList:\r\n\t\t\t\t\t\tfinalBlocksList.append(z)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\tfor b in finalBlocksList:\r\n\t\t\t\t\tq=float(tempBlocksList.count(b))\r\n\t\t\t\t\tw=float(fileSupport[x])\t\r\n\t\t\t\t\tsupport2=q/w\r\n\t\t\t\t\tsupport=round(support2,2)\r\n\t\t\t\t\tif support >= 0.6:\r\n\t\t\t\t\t\tblockSupport[fileName.rstrip('.txt')+j+\" \"+x+b+\" \"]=str(support) \r\n\t\t\t\t\t\ttempDictBlockList.extend(b)\r\n\t\t\t\t\t\tforpattern.append(fileName.rstrip('.txt')+\" \"+j+\" \"+x)\r\n\t\t\t\t\t\tf9=open(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/FrequentPatterns/\"+fileName.rstrip('.txt')+\" \"+j+\" \"+x+\".txt\",\"a+\")\r\n\t\t\t\t\t\tf9.writelines(b+\"\\n\")\r\n\t\t\t\t\t\tf9.close()\r\n\t\t\t\tfileBlockDict[x]=tempDictBlockList\t\r\n\t\t\t\tdel tempBlocksList[:]\r\n\t\t\t\tdel finalBlocksList[:]\r\n\t\t\t\tdel tempDictBlockList[:]\r\n\t\t\tfileBlockDict.clear()\r\n\t\t\tfileSupport.clear()\r\n\t\t\tdel filesList[:]\r\n\t\t\tdel finalFilesList[:]\r\n\t\t\t\r\n\t\t\tblockSupportSorted = sorted(blockSupport, key=blockSupport.get, reverse=True)\r\n\t\t\t\t\r\n\t\t\tfor x in blockSupportSorted:\r\n\t\t\t\tf2.writelines(x+\" \"+blockSupport[x]+\"\\n\")\r\n\t\t\t\r\n\t\t\tdel blockSupportSorted[:]\r\n\t\t\tblockSupport.clear()\r\n\t\t\t\r\n\t\tfor z in forpattern:\r\n\t\t\tif not z in forpattern2:\r\n\t\t\t\tforpattern2.append(z)\t\r\n\t\t\t\r\n\t\tf8= open(outputFolder+\"/\"+fileName.rstrip('.txt')+\"-FrequentPattern.txt\",\"a+\")\r\n\t\t\r\n\t\t###\r\n\t\tprint(\"Creating Frequent Patterns \"+fileName)\r\n\t\tfor x in forpattern2:\r\n\t\t\tf10=open(inputFolder+\"/\"+fileName+\".txt\",\"r\")\r\n\t\t\tfor y in f10:\r\n\t\t\t\tif re.search(x+\" \",y):\r\n\t\t\t\t\tf11=open(\"temp/\"+fileName.rstrip(\".txt\")+\"_temp/FrequentPatterns/\"+x+\".txt\",\"r\")\r\n\t\t\t\t\t\r\n\t\t\t\t\tfor z in f11:\r\n\t\t\t\t\t\tif z.rstrip('\\n')+\"\" in y:\r\n\t\t\t\t\t\t\ttempfeqele.append(z)\r\n\t\t\t\t\t\r\n\t\t\t\t\tfor p in tempfeqele:\r\n\t\t\t\t\t\tif not p in tempfeqele2:\r\n\t\t\t\t\t\t\ttempfeqele2.append(p)\t\r\n\t\t\t\t\t\r\n\t\t\t\t\tdel tempfeqele[:]\t\r\n\t\t\t\t\t\r\n\t\t\t\t\tif len(tempfeqele2) > 0:\r\n\t\t\t\t\t\tfor d in tempfeqele2:\t\t\r\n\t\t\t\t\t\t\tpattern=pattern+d.rstrip('\\n')+\" \"\r\n\t\t\t\t\t\r\n\t\t\t\t\t\tpattern2=x+\" \"+pattern\r\n\t\t\t\t\t\r\n\t\t\t\t\t\tfinalPattern.append(pattern2)\r\n\t\t\t\t\t\r\n\t\t\t\t\t\tpattern=\"\"\r\n\t\t\t\t\t\tpattern2=\"\"\r\n\t\t\t\t\tf11.close() \r\n\t\t\t\t\tdel tempfeqele2[:]\r\n\t\t\r\n\t\t\tdel tempfeqele[:]\r\n\t\t\tdel tempfeqele2[:]\r\n\t\t\tf10.close()\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\tfor x in finalPattern:\r\n\t\t\tif not x in finalPattern2:\r\n\t\t\t\tfinalPattern2.append(x)\r\n\t\t\t\t\r\n\t\tfor x in finalPattern:\t\r\n\t\t\tfilesListPattern.extend(re.findall('F[0-9]*[0-9]',x))\t\r\n\t\t\r\n\t\tfor x in finalPattern2:\r\n\t\t\tfile.append(re.findall('F[0-9]*[0-9]',x))\r\n\t\t\ta=float(finalPattern.count(x))\r\n\t\t\tb=float(filesListPattern.count(file[0][0]))\r\n\t\t\tsup=a/b\r\n\t\t\tsup2=round(sup,2)\r\n\t\t\tf8.writelines(x+\" \"+str(sup2)+\"\\n\")\r\n\t\t\tdel file[:]\r\n\t\t###\r\n\t\t\r\n\t\t#f.close(); f2.close(); f4.close(); f5.close()    (re.findall('R[0-9]+ D[0-9]*[0-9]+ F[0-9]*[0-9]',x))\r\n\t\t\r\n\t\t#removing all temporary files created\t\r\n\t\t#shutil.rmtree('temp')\r\n\t\t\r\n\t\tprint(\"Sucessfully done!!! \\nCheck the output folder\")\r\n\t\t\r\n#freq_patterns(\"D:\\input\",\"D:\\op\")\t\t","sub_path":"localSupport.py","file_name":"localSupport.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"99198586","text":"import os\n\nfrom Loadsmart import CreateAssociation\nfrom Loadsmart import OptimalMapping\nfrom Parser import ParseFile\n\n\n# Main Entry Point\ndef main():\n    # For simplicity, no exception error handling is being implemented\n    \n    # The 'csv' data file names must be; 'cargo.csv' and 'trucks.csv'\n    # The data files must reside in the same directory as the executable\n    \n    # Get the application directory\n    path = os.path.dirname(os.path.abspath(__file__))\n    \n    cargo = path + r'/data/cargo.csv'\n    truck = path + r'/data/trucks.csv'\n    \n    # Parse each file according to its type\n    cargos = ParseFile(cargo, 'Cargo')\n    trucks = ParseFile(truck, 'Trucks')\n    \n    # Create the collection containing the 'Cargos-Trucks' association\n    mmp = CreateAssociation(cargos, trucks)\n    \n    # Discover and store the optimal truck for each possible cargo delivery\n    optimal = OptimalMapping(cargos, mmp)\n    \n    # Display the result\n    for item in sorted(optimal.items()):\n        # Unpack first tuple\n        product, val = item\n        # Unpack second tuple\n        truck, distance = val\n        print('{:<40} {:<40} {:10.2f}'.format(product, truck, distance))\n\n\n# Execute Main Entry Point\nif __name__ == \"__main__\":\n    main()\n","sub_path":"Windows/Loadsmart/PyLoadsmart/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"502894898","text":"#printing all number in same order\nn=int(input())\ncount=0\na=[]\ntemp=n\nwhile temp>0:\n    letter=temp%10\n    a.append(letter)\n    temp=int(temp/10)\nprint((a[::-1]))\n","sub_path":"Printingallnuminsameor.py","file_name":"Printingallnuminsameor.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"473545222","text":"'''\ncollection of neurons with weights for inputs\n'''\n\nfrom random import random\nfrom math import exp\nclass Layer(object):\n    '''\n    this represents a layer of an artificial neural network\n    '''\n    def __init__(self, number_of_nodes, number_of_inputs, activation, activation_derivative):\n        self.activation = activation\n        self.activation_derivative = activation_derivative\n        self.number_of_nodes = number_of_nodes\n        self.number_of_inputs = number_of_inputs\n        self.weights = [[random() for i in range(number_of_inputs + 1)] for j in range(number_of_nodes)]\n        self.errors = [0.0 for i in range(number_of_nodes)]\n        self.results = [0.0 for i in range(number_of_nodes)]\n        self.inputs = None\n\n    def forward(self, set_of_inputs):\n        '''\n        calculates the output of a layer to a set of given inputs\n        '''\n        self.inputs = set_of_inputs\n        if not len(set_of_inputs) == self.number_of_inputs:\n            print('wrong amount of inputs given. expected {} but got {}'.\n                  format(self.number_of_inputs, len(set_of_inputs)))\n        for i in range(self.number_of_nodes):\n            weighted_sum = self.weights[i][-1]\n            for j, value in enumerate(set_of_inputs):\n                weighted_sum += value * self.weights[i][j]\n            self.results[i] = self.activation(weighted_sum)\n        return self.results[:]\n\n    def calculate_errors_output(self, set_of_expected_outputs):\n        '''\n        calculates the errors in this output layer\n        '''\n        if not len(set_of_expected_outputs) == self.number_of_nodes:\n            print('wrong amount of expected outputs given. expected {} but got {}'.\n                  format(self.number_of_nodes, len(set_of_expected_outputs)))\n        for i, t in enumerate(set_of_expected_outputs):\n            o = self.results[i]\n            self.errors[i] = (t-o) * self.activation_derivative(o)\n\n    def calculate_errors_hidden(self, next_layer):\n        '''\n        calculates the errors in this hidden layer using the layer next to it\n        '''\n        if not next_layer.number_of_inputs == self.number_of_nodes:\n            print('wrong amount of expected outputs given. expected {} but got {}'.\n                  format(self.number_of_nodes, next_layer.number_of_inputs))\n        for j in range(self.number_of_nodes):\n            o = self.results[j]\n            downstream_weighted_errors = sum([next_layer.weights[k][j]*next_layer.errors[k] for k in range(next_layer.number_of_nodes)])\n            self.errors[j] = downstream_weighted_errors * self.activation_derivative(o)\n\n    def update_weights(self, learning_rate):\n        '''\n        updates weights of all neurons in a layer by a given learning rate\n        '''\n        for node in range(self.number_of_nodes):\n            learning_rate_error_product = learning_rate * self.errors[node]\n            for input_value in range(self.number_of_inputs):\n                delta = learning_rate_error_product * self.inputs[input_value]\n                self.weights[node][input_value] += delta\n            self.weights[node][-1] += learning_rate_error_product\n\n    def __str__(self):\n        return str(self.weights)\n","sub_path":"Assignments/Assignment 4/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"389950714","text":"# coding=utf-8\nimport json\nimport logging\nimport random\nfrom typing import Any, Dict\n\nfrom discord import Embed\nfrom discord.ext.commands import AutoShardedBot, Context, command\n\nlog = logging.getLogger(__name__)\n\n\nclass Snakes:\n    \"\"\"\n    Snake-related commands\n    \"\"\"\n\n    def __init__(self, bot: AutoShardedBot):\n        self.bot = bot\n\n    async def get_snek(self, name: str = None) -> Dict[str, Any]:\n        \"\"\"\n        Go online and fetch information about a snake\n\n        The information includes the name of the snake, a picture of the snake, and various other pieces of info.\n        What information you get for the snake is up to you. Be creative!\n\n        If \"python\" is given as the snake name, you should return information about the programming language, but with\n        all the information you'd provide for a real snake. Try to have some fun with this!\n\n        :param name: Optional, the name of the snake to get information for - omit for a random snake\n        :return: A dict containing information on a snake\n        \"\"\"\n        with open('bot/db/snakes.json', 'r') as file:\n                snakes_dict = json.load(file)\n\n        if not name:\n            _, snake = random.choice(list(snakes_dict.items()))\n\n        elif name.lower() not in snakes_dict:\n            snake = \"Not Found\"\n\n        else:\n            snake = snakes_dict[name.lower()]\n            if snake['name'] == \"python\":\n                snake = {\n                    'name': snake['name'],\n                    'description': snake['description'],\n                    'creator': snake['creator'],\n                    'created': snake['created'],\n                    'image': snake['image']\n                }\n\n        return snake\n\n    @command(name='get')\n    async def get(self, ctx: Context, name: str = None):\n        \"\"\"\n        Go online and fetch information about a snake\n\n        This should make use of your `get_snek` method, using it to get information about a snake. This information\n        should be sent back to Discord in an embed.\n\n        :param ctx: Context object passed from discord.py\n        :param name: Optional, the name of the snake to get information for - omit for a random snake\n        \"\"\"\n        snake = await self.get_snek(name)\n\n        if snake != \"Not Found\":\n            embed = Embed(\n                title=snake['name'].title(),\n                description=snake['description']\n            )\n\n            if snake['name'] != \"python\":\n                embed.add_field(name=\"Where can you find them?\", value=snake['location'])\n                embed.add_field(name=\"Are they venomous?\", value=snake['venomous'])\n                embed.set_image(url=snake['image'])\n            else:\n                embed.add_field(name=\"Who created it?\", value=snake['creator'])\n                embed.add_field(name=\"When was it created?\", value=snake['created'])\n                embed.set_thumbnail(url=snake['image'])\n        else:\n            embed = Embed(\n                title=\"Snake Not Found\",\n                description=\"The snake you entered was not found.\"\n            )\n\n        await ctx.send(embed=embed)\n\n    @command(name='movies')\n    async def movies(self, ctx: Context, movie_name: str = None):\n        \"\"\"\n        Shows 5 snake movies. Warning: They are all pretty bad.\n        \"\"\"\n\n        with open('bot/db/movies.json', 'r') as file:\n            movies_dict = json.load(file)\n\n        if not movie_name:\n            embed = Embed(\n                title=\"Snake Movies\",\n                description=\"A list of snake movies.\",\n            )\n\n            for movie in movies_dict.values():\n                embed.add_field(name=movie['title'].title(), value=f\"bot.movies('{movie['title'].title()}')\\n\\n\")\n\n            embed.set_thumbnail(url=\"https://i.imgur.com/dB38NwN.png\")\n\n        else:\n            movie_name = movie_name.lower()\n            if movie_name in movies_dict:\n                embed = Embed(\n                    title=movies_dict[movie_name]['title'].title(),\n                    description=movies_dict[movie_name]['description']\n                )\n\n                embed.add_field(name=\"Director\", value=movies_dict[movie_name]['director'])\n                embed.add_field(name=\"Release Date\", value=movies_dict[movie_name]['released'])\n                embed.set_image(url=movies_dict[movie_name]['image'])\n            else:\n                embed = Embed(\n                    title=\"Movie Not Found\",\n                    description=\"The movie you entered was not found.\"\n                )\n\n        await ctx.send(embed=embed)\n\n\ndef setup(bot):\n    bot.add_cog(Snakes(bot))\n    log.info(\"Cog loaded: Snakes\")\n","sub_path":"bot/cogs/snakes.py","file_name":"snakes.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"476045625","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom collections import OrderedDict\nimport os\nimport time\nimport warnings\nfrom itertools import islice\n\nimport numpy as np\nimport torch.nn.functional as F\n\ntry:\n    from apex import amp\n    AMP_AVAILABLE = True\nexcept ModuleNotFoundError:\n    AMP_AVAILABLE = False\nimport torch\nimport torch.cuda as cuda\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom torchvision.models.video.resnet import VideoResNet\n\nfrom .load_model import *\nfrom .module_versions import  *\n\nimport torchvision\n\n# from vu.utils import Config\n# from vu.data import (\n#     DEFAULT_MEAN,\n#     DEFAULT_STD,\n#     show_batch as _show_batch,\n#     VideoDataset,\n# )\n#\n# from vu.utils.metrics import accuracy, AverageMeter, retrieve_gt_pd, epoch_accuracy_fscore\n\n# From https://github.com/moabitcoin/ig65m-pytorch\nTORCH_R2PLUS1D = \"moabitcoin/ig65m-pytorch\"\nMODELS = {\n    # model: output classes\n    'r2plus1d_34_32_ig65m': 359,\n    'r2plus1d_34_32_kinetics': 400,\n    'r2plus1d_34_8_ig65m': 487,\n    'r2plus1d_34_8_kinetics': 400,\n}\n\nclass R2Plus1D(object):\n    def __init__(self, cfgs):\n        self.configs = cfgs\n\n        self.model = self.init_model(\n            self.configs.sample_length,\n            self.configs.base_model,\n            self.configs.num_classes,\n            self.configs.is_treble,\n            self.configs.extended_version,\n            self.configs.is_features\n        )\n        self.model_name = \"r2plus1d_34_{}_{}\".format(self.configs.sample_length, self.configs.base_model)\n\n\n    @staticmethod\n    def init_model(sample_length, base_model, num_classes=None, is_treble=False, extended_version = False, is_features = False):\n        '''if sample_length not in (8, 32):\n            raise ValueError(\n                \"Not supported input frame length {}. Should be 8 or 32\"\n                .format(sample_length)\n            )'''\n        if base_model not in ('ig65m', 'kinetics'):\n            raise ValueError(\n                \"Not supported model {}. Should be 'ig65m' or 'kinetics'\"\n                    .format(base_model)\n            )\n\n        model_name = \"r2plus1d_34_{}_{}\".format(32, base_model)\n\n        print(\"Loading {} model\".format(model_name))\n\n        model = torch.hub.load(\n            TORCH_R2PLUS1D, model_name, num_classes=MODELS[model_name], pretrained=True\n        )\n\n        print('nombre de classe {:d}'.format(num_classes))\n\n        # model.replace_logits(num_classes)\n\n        # Replace head\n        if num_classes is not None:\n            model.fc = nn.Linear(model.fc.in_features, num_classes)\n\n        #model_feat = MyModel(model)\n        if is_treble:\n            if not extended_version:\n                model = MyTwoStreamModel(model)\n            else:\n                model = MyTwoStreamModelExtended(model)\n        else:\n            model = MyModel(model)\n\n\n\n        return model\n\n\n\n    def freeze(self):\n        \"\"\"Freeze model except the last layer\"\"\"\n        self._set_requires_grad(False)\n        for param in self.model.fc.parameters():\n            param.requires_grad = True\n\n    def unfreeze(self):\n        self._set_requires_grad(True)\n\n    def _set_requires_grad(self, requires_grad=True):\n        for param in self.model.parameters():\n            param.requires_grad = requires_grad\n\n    def get_fc(self):\n        return self.model.fc\n\n","sub_path":"models/backbones/r2p1d/r2p1d.py","file_name":"r2p1d.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"400005315","text":"import json\n\ndata = json.load(open('geocoded.json'))\n\nfixture = []\nfor i,datum in enumerate(data):\n    model = {}\n    datum.pop('sid', None)\n    model['model'] = 'maplist.film'\n    model['pk'] = i\n    model['fields'] = datum\n    fixture.append(model)\nf = open('geocoded-fixture.json','w')\nf.write(json.dumps(fixture))\nf.close()\n","sub_path":"_geocode-data/fixture.py","file_name":"fixture.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"576185710","text":"import unittest\nimport importlib\nimport sys\nimport inspect\nimport imp\n\nclass Get_result_of_tests:\n    def __init__(self, filename,package):\n        self.module = package+'.'+filename\n        self.mod = importlib.import_module(self.module)\n        importlib.reload(self.mod)\n        self.filename = filename\n        self.package = package\n        self.classes = []\n        for name, obj in inspect.getmembers(sys.modules[self.module]):\n            if inspect.isclass(obj):\n               self.classes.append(name)\n        self.clas= \"MyTest\"\n    def get_result(self):\n        loader = unittest.TestLoader()\n        mod = getattr(self.mod,self.clas)\n        suite = unittest.TestSuite((\n            loader.loadTestsFromTestCase(mod)\n        ))\n        print(suite)\n\n       # runner = unittest.TextTestRunner(verbosity=2)\n        list_of_result = list()\n        for i in suite:\n            t = i.run()\n            list_of_result.append(1-len(t.failures))\n        print(\"lista wyników unittestów dla wszystkich testów\",list_of_result)\n\n\n\n\n# g = Get_result_of_tests(\"unit\",\"test_package\")\n# g.get_result()\n# #\n# g = Get_result_of_tests(\"tests1\",\"test_package\")\n# g.get_result()\n\n","sub_path":"get_result.py","file_name":"get_result.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"234855055","text":"#!env python3\n# -*- coding: utf-8 -*-\n\ntitles  =  ['Creature of Habit', 'Crewel Fate']\nplots = ['A nun turns into a monster', 'A haunted yarn shop']\nprint({x:y for x, y in zip(titles, plots)})\n\n\n'''\nclass OopsException(Exception):\n    pass\n\na = 1\nb = 0\ntry:\n    raise(OopsException('panic'))\nexcept OopsException as exc:\n    print('Caught an oops')\n\n''\ndef test(func):\n    def new_function(*args, **kwargs):\n        print('start')\n        result = func(*args, **kwargs)\n        print('end')\n        return(result)\n    return(new_function)\n\n@test\ndef add_inits(x, y):\n    return(x + y)\n\nadd_inits(1, 2)\n\n''\ndef get_odds():\n    return((x for x in range(10) if (x % 2) == 1))\n\nfor number in get_odds():\n    print(number)\n\n''\ndef good():\n    return(['Harry', 'Ron', 'Hermione'])\n\nprint(good())\n\n''\nnumber_thing = ('Got ' + str(number) for number in range(10))\nfor number in number_thing:\n    print(number)\n\n''\nodds = {x for x in range(10) if x % 2 == 1}\nprint(odds)\n''\nsquares = {x:x**2 for x in range(10)}\nprint(squares)\n\n''\neven = [x for x in range(10) if x % 2 ==0]\nprint(even)\n''\nfor i in [3, 2, 1, 0]:\n    print(i)\n\n''\nguess_me = 7\nstart = 1\nwhile start <= guess_me:\n    if start < guess_me:\n        print('too low')\n    elif start == guess_me:\n        print('found it')\n    else:\n        print('oops')\n        break\n    start += 1\n\n''\nguess_me = 7\nif guess_me < 7:\n    print('too low')\nelif guess_me > 7:\n    print('too high')\nelse:\n    print('just right')\n\n'''\n","sub_path":"python/books/Introduciing_Python/ch04/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"364257009","text":"from django.contrib import admin\n\nfrom .models import Media, MediaType, Name\n\n\nclass NameInline(admin.TabularInline):\n    model = Name\n\n\nclass MediaAdmin(admin.ModelAdmin):\n    list_display = [\n        'default_name',\n        'slug',\n        'type',\n        'start_date',\n    ]\n\n    search_fields = [\n        'default_name',\n        'name__name',\n    ]\n\n    readonly_fields = [\n        'default_name',\n    ]\n\n    list_filter = [\n        'type'\n    ]\n\n    inlines = [\n        NameInline,\n    ]\n\n\nadmin.site.register(Media, MediaAdmin)\nadmin.site.register(MediaType)\n","sub_path":"media/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"232966415","text":"#!/usr/bin/env python\nimport math,sys\nifile = sys.argv[1]\nf = open(ifile)\nlines = f.readlines()\nl = len(lines)\ntr = int(math.ceil(0.6*l)) #first 60% to the training set\nvl = int(math.ceil(0.8*l)) #next till 80% to the validation set\n#rest to the test set\nftr = open(ifile+\".train\",\"w\")\n[ftr.write(line) for line in lines[0:tr]]\nfvl = open(ifile+\".valid\",\"w\")\n[fvl.write(line) for line in lines[tr:vl]]\nfts = open(ifile+\".test\",\"w\")\n[fts.write(line) for line in lines[vl:]]\n","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"57399936","text":"import numpy as np\nfrom tqdm import tqdm\nfrom tensorflow.python.keras.layers import LSTM, Dense\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.optimizers import Adam\nfrom sklearn.metrics import log_loss\n\nclass Model(object):\n\tdef __init__(self):\n\t\tsuper(Model, self).__init__()\n\t\tself.model = None\n\t\n\tdef fit(self, X_train, y_train):\n\t\traise NotImplementedError\n\t\n\tdef transform(self, X_test):\n\t\traise NotImplementedError\n\nclass RecurrentModel(Model):\n\tdef __init__(self, epochs=500, steps_per_epoch=10):\n\t\tsuper(Model, self).__init__()\n\t\tself.epochs = epochs\n\t\tself.steps_per_epoch = steps_per_epoch\n\t\tself.model = None\n\t\n\tdef get_max_sequence_length(self, X_train):\n\t\tmax_len = 0\n\t\tfor sample in X_train:\n\t\t\tif len(sample) > max_len:\n\t\t\t\tmax_len = len(sample)\n\t\treturn max_len\n\t\n\tdef build_model(self, features_len):\n\t\tmodel = Sequential()\n\t\tmodel.add(LSTM(32, input_shape=(None, features_len)))\n\t\tmodel.add(Dense(1, activation=\"sigmoid\"))\n\t\tmodel.compile(\n\t\t\tloss='binary_crossentropy',\n\t\t\toptimizer=Adam())\n\t\treturn model\n\t\n\tdef data_iterator(self, X, y, max_len):\n\t\t\"\"\"\n\t\t\tThe method is defined as an iterator\n\t\t\tto generate the necessary data to\n\t\t\tfit the model.\n\n\t\t\t@args\n\t\t\t\tX : list[list[word] list[case], \n\t\t\t\t\tlist[numpy array(chars x embedding size)]]\n\t\t\t\ty : list[list[int]]\n\t\t\t\tmax_len : int -> maximum len of sentences\n\t\t\t\t\tin the training examples.\n\t\t\"\"\"\n\t\twhile True:\n\t\t\tfor l in range(1, max_len):\n\t\t\t\tX_len = []\n\t\t\t\ty_len = []\n\t\t\t\tfor features, label in zip(X, y):\n\t\t\t\t\tif len(features) == l:\n\t\t\t\t\t\tX_len.append(features)\n\t\t\t\t\t\tif l == 1:\n\t\t\t\t\t\t\ty_len.append([label])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ty_len.append(label)\n\t\t\t\t\n\t\t\t\t# Preprocess the Data to feed the model\n\t\t\t\tX_train, y_train = self.transform_into_model_data(X_len, y_len)\n\n\t\t\t\t# Check if there are examples with length == l\n\t\t\t\tif y_train.shape[0] == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tyield X_train, y_train\n\t\n\tdef transform_into_model_data(self, X, y=None):\n\t\t# Transform the data into trainable data (numpy arrays)\n\t\tX = np.asarray(X)\n\t\tif y is not None:\n\t\t\ty = np.asarray(y)\n\t\treturn X, y\n\t\n\tdef fit(self, X_train, y_train, X_val=None, y_val=None):\n\t\t# get max sequence length inside the training samples\n\t\t# self.max_sequence_length = self.get_max_sequence_length(X_train)\n\t\tself.max_sequence_length = 5\n\n\t\tprint(\"MAX SEQUENCE LENGTH : \"+str(self.max_sequence_length))\n\t\t\n\t\t# build the model architecture in case of None\n\t\tif self.model == None:\n\t\t\tprint(\"FEATURES DIM : \", len(X_train[0][0]))\n\t\t\tself.model = self.build_model(len(X_train[0][0]))\n\t\t\t\t\n\t\t# Fit the model\n\t\tprint(\"TRAINING MODEL..\")\n\t\tif X_val is not None:\n\t\t\t# fit the model\n\t\t\thistory = self.model.fit_generator(\n\t\t\t\tself.data_iterator(X_train, y_train, self.max_sequence_length),\n\t\t\t\tepochs=self.epochs,\n\t\t\t\tvalidation_data=self.data_iterator(X_val, y_val, self.max_sequence_length),\n\t\t\t\tvalidation_steps=self.steps_per_epoch,\n\t\t\t\tsteps_per_epoch=self.steps_per_epoch,\n\t\t\t\tshuffle=True,\n\t\t\t\tverbose=1\n\t\t\t\t)\n\t\telse:\n\t\t\thistory = self.model.fit_generator(\n\t\t\t\t\tself.data_iterator(X_train, y_train, self.max_sequence_length),\n\t\t\t\t\tepochs=self.epochs,\n\t\t\t\t\tsteps_per_epoch=self.steps_per_epoch,\n\t\t\t\t\tshuffle=True,\n\t\t\t\t\tverbose=1\n\t\t\t\t\t)\n\t\tprint(\"MODEL TRAINED ON {} SAMPLES. \".format(len(X_train)))\n\t\treturn history\n\t\n\tdef evaluate(self, X_test, y_test):\n\t\ty_pred = self.transform(X_test)\n\t\tscore = log_loss(y_test, y_pred)\n\t\tprint(\"LOG LOSS : \", score)\n\t\treturn\n\t\n\tdef predict(self, X):\n\t\tpredictions = []\n\t\tfor sample in tqdm(X):\n\t\t\tif len(sample) == 1:\n\t\t\t\tsample, _ = self.transform_into_model_data([[sample]])\n\t\t\telse:\n\t\t\t\tsample, _ = self.transform_into_model_data([sample])\n\t\t\tprediction = self.model.predict_proba(sample)\n\t\t\tprediction = np.squeeze(prediction)\n\t\t\tpredictions.append(prediction)\n\t\treturn predictions\n\t\n\tdef transform(self, X):\n\t\tpredictions = []\n\t\tfor sample in X:\n\t\t\tif len(sample) == 1:\n\t\t\t\tsample, _ = self.transform_into_model_data([[sample]])\n\t\t\telse:\n\t\t\t\tsample, _ = self.transform_into_model_data([sample])\n\t\t\tprediction = self.model.predict_proba(sample)\n\t\t\tprediction = np.squeeze(prediction)\n\t\t\tpredictions.append([1-prediction, prediction])\n\t\treturn predictions\n","sub_path":"dsg/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"294304230","text":"from pygoridge import create_relay, Worker\nfrom pygoridge.json import json_loads\n\n\nif __name__ == \"__main__\":\n    rl = create_relay(\"pipes\")\n    worker = Worker(rl)\n\n    while True:\n        context, body = worker.receive()\n        context = json_loads(context.tobytes())\n        remote_addr = context[\"remote\"]\n        context[\"worker\"] = \"python\"\n        worker.send(body, context)\n","sub_path":"examples/roadrunner/tcp_echo_server/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"376079123","text":"# re模块的常用函数\n\n'''\nre.match() 函数\n    从头开始匹配,如果第一个就符合要求,那么匹配成功,\n    如果第一个不符合规则,返回None\n    匹配成功后返回Match对象,\n    成功后可以使用group()和span()方法获取数据和下标区间\n\nre.search() 函数\n    从字符串的开头开始进行搜索式的匹配\n    匹配成功则返回Match对象,匹配失败则返回None\n    成功后可以使用group()和span()方法获取数据和下标区间\n\nre.findall()\n\nre.finditer()\n\nre.sub()\n\nre.split()\n'''\n\nimport re\n# 定义的字符串\nvarstr = 'iloveyou521tosimiloveda'\n# 定义正则表达式\nreg = 'love'\n\n# re.findall函数,按照正则表达式的规则去字符串中进行搜索匹配所有符合规则的元素,结果返回一个列表,如果没有找到,则返回空列表\n# res = re.findall(reg,varstr)\n\n\n# re.finditer()函数 和findall是一样的搜索匹配规则,但是结果返回由Match对象组成的迭代器\n# res = re.finditer(reg,varstr)\n# for i in res:\n#     print(i.group())\n\n\n# re.sub() 搜索替换\n'''\n按照正则表达式的规则去搜索匹配要替换的字符串,完成字符串的替换\npattern 正则表达式的规则,匹配需要被替换的字符\nrepl:  替换后的字符\nstring: 原始字符串\n'''\n# res = re.sub(reg,'live',varstr)\n# print(res)\n\n# re.split() 按照指定的正则规则,进行数据切割\nvarstr = 'hello1my2name3is4chuange'\n# res = re.split('\\d',varstr)\n# print(res)\n\n\n# compile() 可以直接将正则表达式定义为 正则对象,使用正则对象直接操作\n\narr = [\n    'i love 123 you',\n    'i love 234 you',\n    'i love 456 you',\n    'i love 789 you',\n]\n\nreg = re.compile('\\d{3}')\n# reg = '\\d{3}'\n\nfor i in arr:\n    # res = re.search(reg,i)\n    # print(res.group())\n    res = reg.search(i).group()\n    print(res)\n\n","sub_path":"kaikeba/AIPython基础/3.re模块的常用函数.py","file_name":"3.re模块的常用函数.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"109282988","text":"#! /usr/bin/python3\n\nimport sys\n\n\ndef kpiCount(pathLogFile):\n    with open(pathLogFile) as f:\n        lineas = f.readlines()  # lista con cada linea del archivo log\n        totalOk = len([x.strip() for x in lineas if 'TRANSOK' in x])  # numero lineas sin errores\n        errort = sum(map(lambda x: 'integrity' in x, lineas))\n        errorn = sum(map(lambda x: 'Nonce' in x, lineas))\n    return \"Total transferencias iniciadas: {:n} \\n\" \\\n           \"Transferencias con errores de integridad; {:n}\\n\" \\\n           \"Transferencias con nonce caducado o inválido: {:n}\\n\" \\\n           \"Porcentaje de transferencias correctas: {:%}\" \\\n           \"\".format(len(lineas), errort, errorn, (totalOk / len(lineas)))\n\n\nif __name__ == '__main__':\n    if len(sys.argv) > 1:\n        if len(sys.argv) > 2:\n            sys.exit('Demasiados argumentos')\n        print(kpiCount(sys.argv[1]))\n    else:\n        sys.exit('Se requiere el nombre del archivo')\n","sub_path":"test/kpi.py","file_name":"kpi.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"93991641","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3401)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /local/hd1/home1/data/acue/rd/p-open-deploy/namedtupledefs/components/namedtupledefs3/tests/namedtupledefs_tests/CallCase.py\n# Compiled at: 2019-06-19 10:51:09\n# Size of source mod 2**32: 448 bytes\nfrom __future__ import absolute_import\n__author__ = 'Arno-Can Uestuensoez'\n__license__ = 'Artistic-License-2.0 + Forced-Fairplay-Constraints'\n__copyright__ = 'Copyright (C) 2015-2016 Arno-Can Uestuensoez @Ingenieurbuero Arno-Can Uestuensoez'\n__version__ = '0.1.0'\n__uuid__ = '19683f50-48f2-4e1e-953f-640455e97340'\nimport unittest\nif __name__ == '__main__':\n    unittest.main()","sub_path":"pycfiles/namedtupledefs-0.1.21.tar/CallCase.cpython-38.py","file_name":"CallCase.cpython-38.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"604319561","text":"# -*- coding: utf-8 -*-\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom FireFoxOption import FireFoxOption\nfrom sava_data import json_file\nimport re\nclass Album:\n     ablum_list = []\n     @classmethod\n     def get_ablum_list_by_id(cls,id):\n         cls.ablum_list=[]\n         driver = FireFoxOption.get_album_url_by_id(id)\n         cls.get_id_by_it(driver, id)\n         #print(cls.ablum_list)\n         driver.quit()\n         return cls.ablum_list\n\n\n     @classmethod\n     def get_id_by_it(cls, driver, id):\n        try:\n          locator = (By.NAME, \"contentFrame\")\n          try:\n              WebDriverWait(driver, 20).until(EC.presence_of_element_located(locator))\n          except:\n              print(\"ablum__failed\"+id)\n              cls.add_album_error(id)\n              return cls.ablum_list\n          else:\n              driver.switch_to_frame(\"contentFrame\")\n              locator = (By.ID, \"m-song-module\")\n              try:\n               WebDriverWait(driver, 20).until(EC.presence_of_element_located(locator))\n              except:\n               print(\"ablum__failed\" + id)\n               cls.add_album_error(id)\n\n               return cls.ablum_list\n              else:\n               lis = driver.find_element_by_id(\"m-song-module\").find_elements_by_tag_name(\"li\")\n               for li in lis:\n                  title = li.find_element_by_tag_name(\"div\").get_attribute(\"title\")\n                  link = li.find_element_by_tag_name(\"a\").get_attribute(\"href\")\n                  aid = FireFoxOption.get_id_by_url(link)\n                  cls.ablum_list.append({\"ablumId\": aid, \"title\": title, \"singerId\": id})\n              #print(cls.ablum_list)\n               locator = (By.CLASS_NAME, \"u-page\")\n               try:\n                  WebDriverWait(driver, 20).until(EC.presence_of_element_located(locator))\n               except:\n                  print(\"ablum__failed:\" + id)\n                  cls.add_album_error(id)\n\n                  return cls.ablum_list\n               else:\n\n                  nextPage=driver.find_element_by_class_name(\"u-page\").find_element_by_class_name(\"znxt\")\n                  href=nextPage.get_attribute(\"href\")\n                  pattern = re.compile(\"javascript\")\n                  x = re.search(pattern, href)\n              #print(x == None)\n                  if(x==None):\n                   ####存在下一页\n                   driver.get(href)\n                   cls.get_id_by_it(driver,id)\n        except:\n         print(Exception)\n         cls.add_album_error(id)\n\n         return cls.ablum_list\n\n\n     @classmethod\n     def add_album_error(cls,id):\n         map={\"albumId\":id}\n         json_file.add_error(map)\n\n","sub_path":"album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"215192833","text":"########################################################################################\r\n# Davi Frossard, 2016                                                                  #\r\n# VGG16 implementation in TensorFlow                                                   #\r\n# Details:                                                                             #\r\n# http://www.cs.toronto.edu/~frossard/post/vgg16/                                      #\r\n#                                                                                      #\r\n# Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md     #\r\n# Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow      #\r\n########################################################################################\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom scipy.misc import imread, imresize\r\n#from imagenet_classes import class_names\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n###############################CLASS###########################################################\r\nclass vgg16:\r\n    def __init__(self, imgs, weights=None, sess=None):  # Intitiate network graph and load weight\r\n        self.imgs = imgs\r\n        self.convlayers()  # build convolutional layers of vgg network\r\n\r\n        if weights is not None and sess is not None:\r\n            self.load_weights(weights, sess)  # Load network weight\r\n\r\n    def convlayers(self):  # build convolutional layers of vgg network\r\n        self.parameters = []\r\n\r\n        # zero-mean input\r\n        with tf.name_scope('preprocess') as scope:\r\n            mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\r\n            images = self.imgs - mean\r\n\r\n        # conv1_1\r\n        with tf.name_scope('conv1_1') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv1_1 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv1_2\r\n        with tf.name_scope('conv1_2') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv1_2 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # pool1\r\n        self.pool1 = tf.nn.max_pool(self.conv1_2,\r\n                                    ksize=[1, 2, 2, 1],\r\n                                    strides=[1, 2, 2, 1],\r\n                                    padding='SAME',\r\n                                    name='pool1')\r\n\r\n        # conv2_1\r\n        with tf.name_scope('conv2_1') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv2_1 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv2_2\r\n        with tf.name_scope('conv2_2') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv2_2 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # pool2\r\n        self.pool2 = tf.nn.max_pool(self.conv2_2,\r\n                                    ksize=[1, 2, 2, 1],\r\n                                    strides=[1, 2, 2, 1],\r\n                                    padding='SAME',\r\n                                    name='pool2')\r\n\r\n        # conv3_1\r\n        with tf.name_scope('conv3_1') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv3_1 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv3_2\r\n        with tf.name_scope('conv3_2') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv3_2 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv3_3\r\n        with tf.name_scope('conv3_3') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv3_3 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # pool3\r\n        self.pool3 = tf.nn.max_pool(self.conv3_3,\r\n                                    ksize=[1, 2, 2, 1],\r\n                                    strides=[1, 2, 2, 1],\r\n                                    padding='SAME',\r\n                                    name='pool3')\r\n\r\n        # conv4_1\r\n        with tf.name_scope('conv4_1') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv4_1 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv4_2\r\n        with tf.name_scope('conv4_2') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv4_2 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv4_3\r\n        with tf.name_scope('conv4_3') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv4_3 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # pool4\r\n        self.pool4 = tf.nn.max_pool(self.conv4_3,\r\n                                    ksize=[1, 2, 2, 1],\r\n                                    strides=[1, 2, 2, 1],\r\n                                    padding='SAME',\r\n                                    name='pool4')\r\n\r\n        # conv5_1\r\n        with tf.name_scope('conv5_1') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv5_1 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv5_2\r\n        with tf.name_scope('conv5_2') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv5_2 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # conv5_3\r\n        with tf.name_scope('conv5_3') as scope:\r\n            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\r\n                                                     stddev=1e-1), name='weights')\r\n            conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')\r\n            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\r\n                                 trainable=True, name='biases')\r\n            out = tf.nn.bias_add(conv, biases)\r\n            self.conv5_3 = tf.nn.relu(out, name=scope)\r\n            self.parameters += [kernel, biases]\r\n\r\n        # pool5\r\n        self.pool5 = tf.nn.max_pool(self.conv5_3,\r\n                                    ksize=[1, 2, 2, 1],\r\n                                    strides=[1, 2, 2, 1],\r\n                                    padding='SAME',\r\n                                    name='pool4')\r\n\r\n    def load_weights(self, weight_file, sess):  # Load network weight\r\n        weights = np.load(weight_file)\r\n        keys = sorted(weights.keys())\r\n        for i, k in enumerate(keys):\r\n            if (i < len(self.parameters)):\r\n                print(i, k, np.shape(weights[k]), len(self.parameters));\r\n                sess.run(self.parameters[i].assign(weights[k]))\r\n\r\n\r\nsess = tf.Session()\r\nimg1 = imread('roof5.jpg', mode='RGB')  # Load image\r\nSy, Sx, dpt = img1.shape  # Get image shape\r\nimg1 = imresize(img1, (300, int(300 / Sy * Sx)))\r\nSy, Sx, dpt = img1.shape  # Get image shape\r\nimgs = tf.placeholder(tf.float32, [None, Sy, Sx, 3])\r\nvgg = vgg16(imgs, 'vgg16_weights.npz', sess)  # Initiate neural network\r\n\r\nconv1_1, conv1_2, conv2_1, conv2_2, conv3_1, conv3_2, conv3_3, conv4_1, conv4_2, conv4_3, conv5_1, conv5_2, conv5_3 = sess.run(\r\n    [vgg.conv1_1, vgg.conv1_2, vgg.conv2_1, vgg.conv2_2, vgg.conv3_1, vgg.conv3_2, vgg.conv3_3, vgg.conv4_1,\r\n     vgg.conv4_2, vgg.conv4_3, vgg.conv5_1, vgg.conv5_2, vgg.conv5_3], feed_dict={vgg.imgs: [img1]})\r\n\r\nfrom scipy import signal\r\nfrom scipy import misc\r\n\r\n\r\ndef DeepSobel(\r\n        im):  # Apply sobel oprator of response map of specific layer in the net to get its total gradient/edge map of this layer\r\n    im = im.squeeze()  # Remove acces dimension\r\n    im = np.swapaxes(im, 0, 2)  # Swap axes to feet thos of standart image (x,y,d)\r\n    im = np.swapaxes(im, 1, 2)\r\n    Gx = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]  # Build sobel x,y gradient filters\r\n    Gy = np.swapaxes(Gx, 0, 1)  # Build sobel x,y gradient filters\r\n    ndim = im[:, 1, 1].shape[0]  # Get the depth (number of filter of the layer)\r\n    TotGrad = np.zeros(im[1, :, :].shape)  # The averge gradient map of the image to be filled later\r\n\r\n    for ii in range(ndim):  # Go over all dimensions (filters)\r\n        # print(ii);\r\n        gradx = signal.convolve2d(im[ii, :, :], Gx, boundary='symm', mode='same');  # Get x sobel response of ii layer\r\n        grady = signal.convolve2d(im[ii, :, :], Gy, boundary='symm', mode='same');  # Get y sobel response of ii layer\r\n        grad = np.sqrt(np.power(gradx, 2) + np.power(grady, 2));  # Get total sobel response of ii layer\r\n        TotGrad += grad  # Add add to the layer average gradient/edge map\r\n    TotGrad /= ndim  # Get layer sobel gradient map\r\n    return TotGrad\r\n\r\n\r\ndef SSobel(im, sp):\r\n    TotGrad = DeepSobel(im)\r\n    NewGrad = misc.imresize(TotGrad, sp, interp='bicubic')  # (‘nearest’,‘bilinear’ , ‘bicubic’ or ‘cubic’)\r\n    print(\"New Size\", NewGrad.shape)\r\n    return NewGrad\r\n\r\n\r\ndef to_rgb1(im):\r\n    # I think this will be slow\r\n    w, h = im.shape\r\n    ret = np.empty((w, h, 3), dtype=np.uint8)\r\n    ret[:, :, 0] = im\r\n    ret[:, :, 1] = im\r\n    ret[:, :, 2] = im\r\n    return ret\r\n\r\n\r\nfrom scipy import signal\r\nfrom scipy import misc\r\n\r\n\r\ndef WriteSobelResultsforAllFilters(im, LayerName,\r\n                                   in_im):  # Apply sobel oprator of response map to all filters respones map in given layer and display the results\r\n    sp = in_im[:, :, 1].shape\r\n\r\n    sp = list(sp)\r\n    sp[0] *= 2\r\n    im = im.squeeze()  # Remove acces dimension\r\n    im = np.swapaxes(im, 0, 2)  # Swap axes to feet thos of standart image (x,y,d)\r\n    im = np.swapaxes(im, 1, 2)\r\n    Gx = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]  # Build sobel x,y gradient filters\r\n    Gy = np.swapaxes(Gx, 0, 1)  # Build sobel x,y gradient filters\r\n    ndim = im[:, 1, 1].shape[0]  # Get the depth (number of filter of the layer)\r\n    TotGrad = np.zeros(im[1, :, :].shape)  # The averge gradient map of the image to be filled later\r\n\r\n\r\n    for ii in range(ndim):  # Go over all dimensions (filters)\r\n        print(LayerName, '    Filter: ', ii + 1);\r\n        gradx = signal.convolve2d(im[ii, :, :], Gx, boundary='symm', mode='same');  # Get x sobel response of ii layer\r\n        grady = signal.convolve2d(im[ii, :, :], Gy, boundary='symm', mode='same');  # Get y sobel response of ii layer\r\n        grad = np.sqrt(np.power(gradx, 2) + np.power(grady, 2));  # Get total sobel response of ii layer\r\n        FileName = LayerName + \" Filter \" + str(ii) + \".png\";\r\n        fim = np.concatenate((im[ii, :, :], grad), axis=0)\r\n\r\n        fim = misc.imresize(fim, sp, interp='bicubic')  # (‘nearest’,‘bilinear’ , ‘bicubic’ or ‘cubic’)\r\n        fim = to_rgb1(fim);\r\n        fim = np.concatenate((in_im, fim), axis=0)\r\n\r\n        plt.imshow(fim)  # Display results for filter\r\n        plt.gray()\r\n        plt.show()\r\n        plt.imsave(FileName, fim)\r\n\r\n\r\n\r\nprint('Display sobel gradient map for all filters in all layers')\r\n# ................Original image........................................................\r\nprint('Input image')\r\nplt.imshow(img1)\r\nplt.gray()\r\nplt.show()\r\n#plt.show(TotGrad)\r\n# ....................11................................................................\r\nprint('Layer Name: conv1_1')\r\nWriteSobelResultsforAllFilters(conv1_1, 'F:', img1)\r\n#C:/Users/Sagi Eppel/Desktop/VGG_RESULTS/conv1_1\r\n\r\n# ....................12................................................................\r\nprint('Layer Name: conv1_2')\r\nWriteSobelResultsforAllFilters(conv1_2, 'F:', img1)\r\n# ....................21................................................................\r\nprint('Layer Name: conv2_1')\r\nWriteSobelResultsforAllFilters(conv2_1, 'F:', img1)\r\n# ....................22................................................................\r\nprint('Layer Name: conv2_2')\r\nWriteSobelResultsforAllFilters(conv2_2, 'F:', img1)\r\n# ....................31................................................................\r\nprint('Layer Name: conv3_1')\r\nWriteSobelResultsforAllFilters(conv3_1, 'F:', img1)\r\n# ....................32................................................................\r\nprint('Layer Name: conv3_2')\r\nWriteSobelResultsforAllFilters(conv3_2, 'F:', img1)\r\n# ....................33................................................................\r\nprint('Layer Name: conv3_3')\r\nWriteSobelResultsforAllFilters(conv3_3, 'F:', img1)\r\n# ....................41................................................................\r\nprint('Layer Name: conv4_1')\r\nWriteSobelResultsforAllFilters(conv4_1, 'F:', img1)\r\n# ....................42................................................................\r\nprint('Layer Name: conv4_2')\r\nWriteSobelResultsforAllFilters(conv4_2, 'F:', img1)\r\n# ....................43................................................................\r\nprint('Layer Name: conv4_3')\r\nWriteSobelResultsforAllFilters(conv4_3, 'F:', img1)\r\n# ....................51................................................................\r\nprint('Layer Name: conv5_1')\r\nWriteSobelResultsforAllFilters(conv5_1, 'F:', img1)\r\n# ....................52................................................................\r\nprint('Layer Name: conv5_2')\r\nWriteSobelResultsforAllFilters(conv5_2, 'F:', img1)\r\n# ....................53................................................................\r\nprint('Layer Name: conv5_3')\r\nWriteSobelResultsforAllFilters(conv5_3, 'F:', img1)","sub_path":"AllFiltersResponse.py","file_name":"AllFiltersResponse.py","file_ext":"py","file_size_in_byte":18457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"326856186","text":"#!/usr/bin/python\n\nimport math\n\ndef recipe_batches(recipe, ingredients, batches=0):\n  batch = batches\n  pendingBatch = []\n  for ingredient in recipe:\n    if ingredient in ingredients: #check if it has ingredient at all\n      if recipe.get(ingredient) <= ingredients.get(ingredient): #check if it has enough ingredient\n        pendingBatch.append(1) #if so, add a tally to pending\n        ingredients[ingredient] -= recipe.get(ingredient) #decrement from ingredients\n        if len(pendingBatch) == len(recipe): #check if the pending batch is finished\n          batch += 1 #if it is, increment batch\n          pendingBatch = [] #and reset pending\n          recipe_batches(recipe, ingredients, batch) #try again to make another batch\n        else:\n          continue #otherwise keep going until a full batch is made\n      else: #if you can't keep going, return how many batches were made thus far\n        print (\"not enough!\")\n        print (batch)\n        return batch\n    else:\n      return 0\nprint(recipe_batches({ 'milk': 100, 'butter': 50, 'cheese': 10 }, { 'milk': 198, 'butter': 52, 'cheese': 10 }))\n\n# if __name__ == '__main__':\n  # Change the entries of these dictionaries to test \n  # your implementation with different inputs\n  # recipe = { 'milk': 100, 'butter': 50, 'flour': 5 }\n  # ingredients = { 'milk': 132, 'butter': 48, 'flour': 51 }\n  # print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(batches=recipe_batches(recipe, ingredients), ingredients=ingredients))","sub_path":"recipe_batches/recipe_batches.py","file_name":"recipe_batches.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"151182106","text":"# Mike Roylance - roylance@uw.edu\nimport unittest\nimport nltk\nfrom nltk.corpus import wordnet as wn\nimport resnik\n\nclass WordNetInterface(unittest.TestCase):\n\tdef test_first(self):\n\t\t# arrange\n\t\t# act\n\t\tres = wn.synsets('dog')\n\n\t\t# assert\n\t\tself.assertTrue(len(res) == 8)\n\n\tdef test_second(self):\n\t\t# arrange\n\t\t# act\n\t\tres = wn.synset('dog.n.01')\n\t\texamples = res.examples\n\t\tlemmas = res.lemmas\n\n\t\t# assert\n\t\tself.assertTrue(res.definition == 'a member of the genus Canis (probably descended from the common wolf) that has been domesticated by man since prehistoric times; occurs in many breeds')\n\t\t\n\t\tself.assertTrue(len(examples) == 1)\n\t\tself.assertTrue(examples[0] == 'the dog barked all night')\n\t\tself.assertTrue(len(lemmas) == 3)\n\nclass Reznik(unittest.TestCase):\n\tdef test_simple(self):\n\t\t# arrange\n\t\tic = nltk.corpus.wordnet_ic.ic('ic-brown-resnik-add1.dat')\n\t\tresnikInstance = resnik.Resnik(ic)\n\n\t\t# act\n\t\titems = list(resnikInstance.processLine(\"tie\tjacket,suit\"))\n\n\t\t# assert\n\t\tself.assertTrue(len(items) == 4)\n\ndef main():\n    unittest.main()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"hw6/source/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"623001745","text":"import asyncio\nimport mock\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\n\nfrom juju.client.client import ConfigValue, ApplicationFacade\nfrom juju.model import Model, ModelObserver\nfrom juju.utils import block_until, run_with_interrupt\n\nimport pytest\n\nfrom .. import base\n\n\nMB = 1\nGB = 1024\nSSH_KEY = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsYMJGNGG74HAJha3n2CFmWYsOOaORnJK6VqNy86pj0MIpvRXBzFzVy09uPQ66GOQhTEoJHEqE77VMui7+62AcMXT+GG7cFHcnU8XVQsGM6UirCcNyWNysfiEMoAdZScJf/GvoY87tMEszhZIUV37z8PUBx6twIqMdr31W1J0IaPa+sV6FEDadeLaNTvancDcHK1zuKsL39jzAg7+LYjKJfEfrsQP+lj/EQcjtKqlhVS5kzsJVfx8ZEd0xhW5G7N6bCdKNalS8mKCMaBXJpijNQ82AiyqCIDCRrre2To0/i7pTjRiL0U9f9mV3S4NJaQaokR050w/ZLySFf6F7joJT mathijs@Qrama-Mathijs'  # noqa\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_deploy_local_bundle(event_loop):\n    from pathlib import Path\n    tests_dir = Path(__file__).absolute().parent.parent\n    bundle_path = tests_dir / 'bundle'\n    mini_bundle_file_path = bundle_path / 'mini-bundle.yaml'\n\n    async with base.CleanModel() as model:\n        await model.deploy(str(bundle_path))\n        await model.deploy(str(mini_bundle_file_path))\n\n        for app in ('wordpress', 'mysql', 'myapp'):\n            assert app in model.applications\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_deploy_local_charm(event_loop):\n    from pathlib import Path\n    tests_dir = Path(__file__).absolute().parent.parent\n    charm_path = tests_dir / 'charm'\n\n    async with base.CleanModel() as model:\n        await model.deploy(str(charm_path))\n        assert 'charm' in model.applications\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_deploy_bundle(event_loop):\n    async with base.CleanModel() as model:\n        await model.deploy('bundle/wiki-simple')\n\n        for app in ('wiki', 'mysql'):\n            assert app in model.applications\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_deploy_channels_revs(event_loop):\n    async with base.CleanModel() as model:\n        charm = 'cs:~johnsca/libjuju-test'\n        stable = await model.deploy(charm, 'a1')\n        edge = await model.deploy(charm, 'a2', channel='edge')\n        rev = await model.deploy(charm + '-2', 'a3')\n\n        assert [a.charm_url for a in (stable, edge, rev)] == [\n            'cs:~johnsca/libjuju-test-1',\n            'cs:~johnsca/libjuju-test-2',\n            'cs:~johnsca/libjuju-test-2',\n        ]\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_add_machine(event_loop):\n    from juju.machine import Machine\n\n    async with base.CleanModel() as model:\n        # add a new default machine\n        machine1 = await model.add_machine()\n\n        # add a machine with constraints, disks, and series\n        machine2 = await model.add_machine(\n            constraints={\n                'mem': 256 * MB,\n            },\n            disks=[{\n                'pool': 'rootfs',\n                'size': 10 * GB,\n                'count': 1,\n            }],\n            series='xenial',\n        )\n\n        # add a lxd container to machine2\n        machine3 = await model.add_machine(\n            'lxd:{}'.format(machine2.id))\n\n        for m in (machine1, machine2, machine3):\n            assert isinstance(m, Machine)\n\n        assert len(model.machines) == 3\n\n        await machine3.destroy(force=True)\n        await machine2.destroy(force=True)\n        res = await machine1.destroy(force=True)\n\n        assert res is None\n        assert len(model.machines) == 0\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_relate(event_loop):\n    from juju.relation import Relation\n\n    async with base.CleanModel() as model:\n        await model.deploy(\n            'ubuntu',\n            application_name='ubuntu',\n            series='trusty',\n            channel='stable',\n        )\n        await model.deploy(\n            'nrpe',\n            application_name='nrpe',\n            series='trusty',\n            channel='stable',\n            # subordinates must be deployed without units\n            num_units=0,\n        )\n\n        relation_added = asyncio.Event()\n        timeout = asyncio.Event()\n\n        class TestObserver(ModelObserver):\n            async def on_relation_add(self, delta, old, new, model):\n                if set(new.key.split()) == {'nrpe:general-info',\n                                            'ubuntu:juju-info'}:\n                    relation_added.set()\n                    event_loop.call_later(2, timeout.set)\n\n        model.add_observer(TestObserver())\n\n        real_app_facade = ApplicationFacade.from_connection(model.connection())\n        mock_app_facade = mock.MagicMock()\n\n        async def mock_AddRelation(*args):\n            # force response delay from AddRelation to test race condition\n            # (see https://github.com/juju/python-libjuju/issues/191)\n            result = await real_app_facade.AddRelation(*args)\n            await relation_added.wait()\n            return result\n\n        mock_app_facade.AddRelation = mock_AddRelation\n\n        with mock.patch.object(ApplicationFacade, 'from_connection',\n                               return_value=mock_app_facade):\n            my_relation = await run_with_interrupt(model.add_relation(\n                'ubuntu',\n                'nrpe',\n            ), timeout, event_loop)\n\n        assert isinstance(my_relation, Relation)\n\n\nasync def _deploy_in_loop(new_loop, model_name, jujudata):\n    new_model = Model(new_loop, jujudata=jujudata)\n    await new_model.connect(model_name)\n    try:\n        await new_model.deploy('cs:xenial/ubuntu')\n        assert 'ubuntu' in new_model.applications\n    finally:\n        await new_model.disconnect()\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_explicit_loop_threaded(event_loop):\n    async with base.CleanModel() as model:\n        model_name = model.info.name\n        new_loop = asyncio.new_event_loop()\n        with ThreadPoolExecutor(1) as executor:\n            f = executor.submit(\n                new_loop.run_until_complete,\n                _deploy_in_loop(new_loop, model_name, model._connector.jujudata))\n            f.result()\n        await model._wait_for_new('application', 'ubuntu')\n        assert 'ubuntu' in model.applications\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_store_resources_charm(event_loop):\n    async with base.CleanModel() as model:\n        ghost = await model.deploy('cs:ghost-19')\n        assert 'ghost' in model.applications\n        terminal_statuses = ('active', 'error', 'blocked')\n        await model.block_until(\n            lambda: (\n                len(ghost.units) > 0 and\n                ghost.units[0].workload_status in terminal_statuses)\n        )\n        # ghost will go in to blocked (or error, for older\n        # charm revs) if the resource is missing\n        assert ghost.units[0].workload_status == 'active'\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_store_resources_bundle(event_loop):\n    async with base.CleanModel() as model:\n        bundle = str(Path(__file__).parent / 'bundle')\n        await model.deploy(bundle)\n        assert 'ghost' in model.applications\n        ghost = model.applications['ghost']\n        terminal_statuses = ('active', 'error', 'blocked')\n        await model.block_until(\n            lambda: (\n                len(ghost.units) > 0 and\n                ghost.units[0].workload_status in terminal_statuses)\n        )\n        # ghost will go in to blocked (or error, for older\n        # charm revs) if the resource is missing\n        assert ghost.units[0].workload_status == 'active'\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_ssh_key(event_loop):\n    async with base.CleanModel() as model:\n        await model.add_ssh_key('admin', SSH_KEY)\n        result = await model.get_ssh_key(True)\n        result = result.serialize()['results'][0].serialize()['result']\n        assert SSH_KEY in result\n        await model.remove_ssh_key('admin', SSH_KEY)\n        result = await model.get_ssh_key(True)\n        result = result.serialize()['results'][0].serialize()['result']\n        assert result is None\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_get_machines(event_loop):\n    async with base.CleanModel() as model:\n        result = await model.get_machines()\n        assert isinstance(result, list)\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_watcher_reconnect(event_loop):\n    async with base.CleanModel() as model:\n        await model.connection().ws.close()\n        await block_until(model.is_connected, timeout=3)\n\n\n@base.bootstrapped\n@pytest.mark.asyncio\nasync def test_config(event_loop):\n    async with base.CleanModel() as model:\n        await model.set_config({\n            'extra-info': 'booyah',\n            'test-mode': ConfigValue(value=True),\n        })\n        result = await model.get_config()\n        assert 'extra-info' in result\n        assert result['extra-info'].source == 'model'\n        assert result['extra-info'].value == 'booyah'\n\n# @base.bootstrapped\n# @pytest.mark.asyncio\n# async def test_grant(event_loop)\n#    async with base.CleanController() as controller:\n#        await controller.add_user('test-model-grant')\n#        await controller.grant('test-model-grant', 'superuser')\n#    async with base.CleanModel() as model:\n#        await model.grant('test-model-grant', 'admin')\n#        assert model.get_user('test-model-grant')['access'] == 'admin'\n#        await model.grant('test-model-grant', 'login')\n#        assert model.get_user('test-model-grant')['access'] == 'login'\n","sub_path":"tests/integration/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":9487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"124607392","text":"from OWDTestToolkit.global_imports import *\n\t\nclass main(GaiaTestCase):\n\n    def selectAddContactButton(self):\n        #\n        # Taps the 'add contact' button and switches to the\n        # correct 'contacts' frame.
\n # Returns the \"src\" of the original iframe.\n #\n x = self.UTILS.getElement(DOM.Messages.add_contact_button, \"Add contact button\")\n x.tap()\n \n time.sleep(2)\n \n #\n # Switch to the contacts frame.\n #\n orig_iframe = self.UTILS.currentIframe()\n self.UTILS.switchToFrame(*DOM.Contacts.frame_locator)\n \n time.sleep(2)\n return orig_iframe\n \n","sub_path":"OWDTestToolkit/apps/Messages/selectAddContactButton.py","file_name":"selectAddContactButton.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"472608440","text":"from datetime import datetime\nimport time\n\nimport numpy as np\nimport pytest\n\nimport labrad\nimport labrad.types as T\nimport labrad.util.hydrant as hydrant\n\n# use the same path for all datasets in a given run of the tests in this module\n_path = None\n\ndef _test_path():\n \"\"\"Path where we'll put test datasets in the data vault\"\"\"\n global _path\n if _path is None:\n _path = ['test', datetime.utcnow().strftime('%Y%m%d')]\n return _path\n\ndef setup_dv(cxn):\n dv = cxn.data_vault\n dv.cd(_test_path(), True)\n return dv\n\n@pytest.yield_fixture\ndef dv():\n with labrad.connect() as cxn:\n dv = setup_dv(cxn)\n yield dv\n\ndef test_create_dataset(dv):\n \"\"\"Create a simple dataset, add some data and read it back\"\"\"\n _path, _name = dv.new('test', ['x', 'y'], ['z'])\n\n data = []\n for x in xrange(10):\n for y in xrange(10):\n data.append([x/10., y/10., x*y])\n\n for row in data:\n dv.add(row)\n\n stored = dv.get()\n assert np.equal(data, stored).all()\n\ndef test_read_dataset():\n \"\"\"Create a simple dataset and read it back while still open and after closed\"\"\"\n data = []\n for x in xrange(10):\n for y in xrange(10):\n data.append([x/10., y/10., x*y])\n\n with labrad.connect() as cxn:\n dv = setup_dv(cxn)\n\n path, name = dv.new('test', ['x', 'y'], ['z'])\n\n for row in data:\n dv.add(row)\n\n # read in new connection while the dataset is still open\n with labrad.connect() as cxn2:\n dv2 = cxn2.data_vault\n dv2.cd(path)\n dv2.open(name)\n stored = dv2.get()\n assert np.equal(data, stored).all()\n\n # add more data and ensure that we get it\n dv.add([1, 1, 100])\n\n row = dv2.get()\n assert np.equal(row, [1, 1, 100]).all()\n\n # read in new connection after dataset has been closed\n with labrad.connect() as cxn:\n dv = cxn.data_vault\n dv.cd(path)\n dv.open(name)\n stored = dv.get(len(data)) # get only up to the last extra row\n assert np.equal(data, stored).all()\n\n\ndef test_parameters(dv):\n \"\"\"Create a dataset with parameters\"\"\"\n dv.new('test', ['x', 'y'], ['z'])\n for i in xrange(100):\n t = hydrant.randType(noneOkay=False)\n a = hydrant.randValue(t)\n name = 'param{}'.format(i)\n dv.add_parameter(name, a)\n b = dv.get_parameter(name)\n sa, ta = T.flatten(a)\n sb, tb = T.flatten(b)\n assert ta == tb\n assert sa == sb\n\n\n# Test asynchronous notification signals.\n# These signals are used by the grapher to do\n# efficient UI updates without polling.\n\ndef test_signal_new_dir(dv):\n \"\"\"Check messages sent when a new directory is created.\"\"\"\n dirname = 'msg_test_dir' + str(time.time())\n\n msg_id = 123\n dv.signal__new_dir(msg_id)\n\n messages = []\n def on_message(ctx, msg):\n messages.append((ctx, msg))\n\n p = dv._cxn._backend.cxn\n p.addListener(on_message, source=dv.ID, ID=msg_id)\n\n dv.mkdir(dirname)\n time.sleep(0.5)\n\n assert len(messages) == 1\n\ndef test_signal_new_dataset(dv):\n \"\"\"Check messages sent when a new dataset is created.\"\"\"\n name = 'msg_test_dataset'\n\n msg_id = 123\n dv.signal__new_dataset(msg_id)\n\n messages = []\n def on_message(ctx, msg):\n messages.append((ctx, msg))\n\n p = dv._cxn._backend.cxn\n p.addListener(on_message, source=dv.ID, ID=msg_id)\n\n dv.new(name, ['x'], ['y'])\n time.sleep(0.5)\n\n assert len(messages) == 1\n\ndef test_signal_tags_updated(dv):\n \"\"\"Check messages sent when tags on directories or datasets are updated.\"\"\"\n dirname = 'msg_test_dir' + str(time.time())\n\n msg_id = 123\n dv.signal__tags_updated(msg_id)\n\n messages = []\n def on_message(ctx, msg):\n messages.append((ctx, msg))\n\n p = dv._cxn._backend.cxn\n p.addListener(on_message, source=dv.ID, ID=msg_id)\n\n dv.mkdir(dirname)\n dv.update_tags('test', [dirname], [])\n time.sleep(0.5)\n\n assert len(messages) == 1\n\ndef test_signal_data_available(dv):\n \"\"\"Check that we get messages when new parameters are added to a data set.\"\"\"\n msg_id = 123\n\n messages = []\n def on_message(ctx, msg):\n messages.append((ctx, msg))\n\n path, name = dv.new('test', ['x'], ['y'])\n\n # open a second connection which we'll use to read data added by the other\n with labrad.connect() as cxn:\n reader = setup_dv(cxn)\n reader.signal__data_available(msg_id)\n\n p = reader._cxn._backend.cxn\n p.addListener(on_message, source=reader.ID, ID=msg_id)\n\n reader.cd(path)\n reader.open(name)\n\n dv.add([1, 2])\n time.sleep(0.1)\n assert len(messages) == 1\n\n dv.add([3, 4])\n time.sleep(0.1)\n assert len(messages) == 1 # we should not get another message until we get the data\n\n data = reader.get()\n time.sleep(0.1)\n\n dv.add([5, 6])\n time.sleep(0.1)\n assert len(messages) == 2 # now we get a new message\n\ndef test_signal_new_parameter(dv):\n \"\"\"Check messages sent when parameter is added to a dataset.\"\"\"\n msg_id = 123\n\n messages = []\n def on_message(ctx, msg):\n messages.append((ctx, msg))\n\n path, name = dv.new('test', ['x'], ['y'])\n\n # open a second connection which we'll use to read params added by the other\n with labrad.connect() as cxn:\n reader = setup_dv(cxn)\n reader.signal__new_parameter(msg_id)\n\n p = reader._cxn._backend.cxn\n p.addListener(on_message, source=reader.ID, ID=msg_id)\n\n reader.cd(path)\n reader.open(name)\n\n reader.parameters() # get the list of parameters to signal our interest\n\n dv.add_parameter('a', 1)\n time.sleep(0.1)\n assert len(messages) == 1\n\n dv.add_parameter('b', 2)\n time.sleep(0.1)\n assert len(messages) == 1 # no new message until we get parameters\n\n params = reader.get_parameters()\n time.sleep(0.1)\n\n dv.add_parameters((('c', 3), ('d', 4)))\n time.sleep(0.1)\n assert len(messages) == 2 # just one message from multiple parameters\n\n\nif __name__ == \"__main__\":\n pytest.main(['-v', __file__])\n","sub_path":"tests/test_datavault.py","file_name":"test_datavault.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"610357369","text":"import csv\nimport datetime\nimport openpyxl\nimport clipboard\nimport requests\nimport hashlib, hmac, base64, requests, time, os\nimport urllib.request, json\nfrom urllib.request import urlopen, Request\nimport random\nimport shutil\nfrom openpyxl import load_workbook\nfrom openpyxl.chart import BarChart, LineChart, Reference, Series\nfrom openpyxl.styles import Border, Alignment, PatternFill\nfrom openpyxl.utils import get_column_letter\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport zipfile\nimport os, time, numpy as np, pandas as pd\nimport xlsxwriter\nfrom bs4 import BeautifulSoup #BeautifulSoup import\nimport time\n\ndef find_file(fname):\n file_flist=os.listdir(FILE_FOLDER)\n for file_f in file_flist:\n if file_f.find(fname)>-1:\n return file_f\n return \"False\"\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Error: Creating directory. ' + directory)\n\ndef test():\n while True:\n clk=input(\"c or e or any~~\")\n try:\n print(clk)\n if clk==\"c\":\n key=input(\"key\")\n driver.find_element_by_xpath(key).click()\n elif clk==\"e\":\n break\n elif clk==\"m\":\n key=input(\"key\")\n posi_m=driver.find_element_by_xpath(key)\n action = ActionChains(driver)\n action.move_to_element(posi_m).perform()\n else:\n key=input(\"key\")\n print(driver.find_element_by_xpath(key).get_attribute(\"innerHTML\"))\n except Exception as e:\n print(\"Error:\" + str(e))\n\ndef generate(timestamp, method, uri, secret_key):\n message = \"{}.{}.{}\".format(timestamp, method, uri)\n# hash = hmac.new(bytes(secret_key, \"utf-8\"), bytes(message, \"utf-8\"), hashlib.sha256)\n hash = hmac.new(secret_key.encode(\"utf-8\"), message.encode(\"utf-8\"), hashlib.sha256)\n hash.hexdigest()\n return base64.b64encode(hash.digest())\n\ndef get_header(method, uri, api_key, secret_key, customer_id):\n timestamp = str(int(time.time() * 1000))\n signature = generate(timestamp, method, uri, SECRET_KEY)\n return {'Content-Type': 'application/json; charset=UTF-8', 'X-Timestamp': timestamp, 'X-API-KEY': API_KEY, 'X-Customer': str(CUSTOMER_ID), 'X-Signature': signature}\n\ndef call_RelKwd(_kwds_string):\n global BASE_URL,CUSTOMER_ID,API_KEY,SECRET_KEY\n BASE_URL = 'https://api.naver.com'\n CUSTOMER_ID = '392590'\n API_KEY = '01000000008fa2a584355277148cf5b1792f0a1650becf66b049812186ce69dbfb7cbf4ec3'\n SECRET_KEY = 'AQAAAACPoqWENVJ3FIz1sXkvChZQySFY24NP0GvvyT3R14cXaQ=='\n\n uri = '/keywordstool'\n method = 'GET'\n prm = {'hintKeywords' : _kwds_string , 'showDetail':1}\n # ManageCustomerLink Usage Sample\n returnData = None\n df = pd.DataFrame()\n # print(_kwds_string)\n repeat_time=0.5\n wait_time=3\n while returnData is None:\n try:\n r = requests.get(BASE_URL + uri, params=prm, headers=get_header(method, uri, API_KEY, SECRET_KEY, CUSTOMER_ID))\n returnData = r.json()\n df = pd.DataFrame(returnData['keywordList'])\n except Exception as e:\n if 'code' in returnData:\n time.sleep(wait_time)\n wait_time=wait_time+1\n print(_kwds_string)\n print(wait_time)\n time.sleep(repeat_time)\n r = requests.get(BASE_URL + uri, params=prm, headers=get_header(method, uri, API_KEY, SECRET_KEY, CUSTOMER_ID))\n returnData = r.json()\n df = pd.DataFrame(returnData['keywordList'])\n pass\n if not 'keywordList' in returnData:\n print(_kwds_string)\n print(returnData)\n return False\n\n # df.to_csv(FILE_FOLDER+\"/sdfasdf.csv\", encoding='utf-8-sig')\n df['총검색']=df['monthlyMobileQcCnt'].astype(str).replace(\"< 10\",0).astype(float)+df['monthlyPcQcCnt'].astype(str).replace(\"< 10\",0).astype(float)\n df['총클릭']=df['monthlyAveMobileClkCnt'].astype(float)+df['monthlyAvePcClkCnt'].astype(float)\n df.rename({'compIdx':'경쟁도', 'monthlyAveMobileClkCnt':'평균클릭(폰)', 'monthlyAveMobileCtr':'평균클릭률(폰)',\n 'monthlyAvePcClkCnt':'평균클릭(PC)', 'monthlyAvePcCtr':'평클릭률(PC)', 'monthlyMobileQcCnt':'검색(폰)',\n 'monthlyPcQcCnt': '검색(PC)', 'plAvgDepth':'노출광고수', 'relKeyword':'연관키워드'},axis=1,inplace=True)\n rate=0.95\n if len(df)>1:\n tmp_df=df[(df['총검색'] > df['총검색'].quantile(rate)) & (df['총클릭'] > df['총클릭'].quantile(rate))]\n while len(tmp_df)==0:\n tmp_df=df[(df['총클릭'] > df['총클릭'].quantile(rate))]\n rate=rate-0.05\n df=tmp_df\n df=df.sort_values(by=\"총클릭\", ascending=False)\n return df\n\ndef make_new_name(old_name,df_rel):\n key_arr=old_name.split(\" \")\n new_arr=[]\n for tmp_key in key_arr:\n tmp_rel=df_rel[df_rel[\"노출키워드\"]==tmp_key]\n tmp_rel=tmp_rel[\"연관키워드\"].copy().drop_duplicates().values\n if not tmp_key in tmp_rel:\n tmp_rel=np.append(tmp_rel,tmp_key)\n if len(tmp_rel)>0:\n rel_idx=random.randint(0, len(tmp_rel)-1)\n new_arr.append(tmp_rel[rel_idx])\n return \" \".join(new_arr)\n\ndef get_cate_key(cate,arr_pro):\n path = 'https://datalab.naver.com/shoppingInsight/sCategory.naver'\n driver_execute(path)\n cate_arr=cate.split(\">>\")\n # 기기별 전체 선택\n driver.find_element_by_xpath('//*[@id=\"18_device_0\"]').click()\n # 성별 전체 선택\n driver.find_element_by_xpath('//*[@id=\"19_gender_0\"]').click()\n # 연령별 전체 선택\n driver.find_element_by_xpath('//*[@id=\"20_age_0\"]').click()\n # 분류 & 기간 선택\n for i in range(0, len(cate_arr)):\n driver.find_element_by_xpath(\"(//span[contains(@class,'select_btn')])[\"+str(i+1)+\"]\").click()\n driver.find_element_by_xpath(\"(//a[text()='\"+cate_arr[i]+\"'])\").click()\n # 조회하기 클릭\n driver.find_element_by_xpath('//*[@id=\"content\"]/div[2]/div/div[1]/div/a').click()\n time.sleep(1)\n for p in range(0, 25):\n # 인기검색어 가져오기\n for i in range(1, 21):\n keyword_path = '//*[@id=\"content\"]/div[2]/div/div[2]/div[2]/div/div/div[1]/ul/li[{}]/a'.format(i)\n key_num=driver.find_element_by_xpath(keyword_path).text.split(\"\\n\")[0]\n key_word=driver.find_element_by_xpath(keyword_path).text.split(\"\\n\")[1].replace(\" \",\"\")\n while(int(key_num)!=p*20+i):\n time.sleep(0.1)\n keyword_path = '//*[@id=\"content\"]/div[2]/div/div[2]/div[2]/div/div/div[1]/ul/li[{}]/a'.format(i)\n key_num=driver.find_element_by_xpath(keyword_path).text.split(\"\\n\")[0]\n key_word=driver.find_element_by_xpath(keyword_path).text.split(\"\\n\")[1]\n if len(arr_pro)\":\n driver.find_element_by_xpath('//button[@class=\"btn btn-sm btn-default btn-icon\"]').click()\n time.sleep(5)\n tmp=driver.find_element_by_xpath(\"(//tbody[@class='has-data ng-star-inserted']/tr[1]/td[11]/elena-mass-result)\").get_attribute(\"innerHTML\")\n driver.find_element_by_xpath(\"(//tbody[@class='has-data ng-star-inserted']/tr[1]/td[11]/elena-mass-result)\").click()\n time.sleep(1)\n\ndef get_data_file():\n today_graph = nowDate + \"_광고대비매출(주단위).csv\"\n if find_file(today_graph)==\"False\":\n arr_pro=[]\n get_pr_report(\"https://sell.smartstore.naver.com/#/bizadvisor/marketing\",arr_pro)\n df = pd.DataFrame(arr_pro)\n df.columns = ['주간', '광고비', '매출건수', '매출액']\n df.to_csv(FILE_FOLDER+\"/\"+today_graph, encoding='utf-8-sig')\n\n tmp_file=find_file(\"Product_\")\n if tmp_file==\"False\" and not nowDate in tmp_file:\n get_pr_keyword(\"https://sell.smartstore.naver.com/#/products/origin-list\")\n tmp_file=find_file(\"Product_\")\n chg_file_arr=tmp_file.split(\"_\")\n os.rename(FILE_FOLDER+\"/\"+tmp_file, FILE_FOLDER+\"/\"+chg_file_arr[1]+\"_\"+chg_file_arr[0]+\"_\"+chg_file_arr[2])\n\n arr_pro=[]\n arr_cate=[]\n tmp_file=find_file(nowDate + \"_Product\")\n if find_file(tmp_file)!=\"False\":\n tmp_file=os.path.join(FILE_FOLDER,tmp_file)\n df=pd.read_csv(tmp_file, encoding='utf-8-sig')[[\"상품명\",\"상품번호(스마트스토어)\",\"할인가(PC)\",\"대분류\",\"중분류\",\"소분류\",\"세분류\",\"대표이미지 URL\"]]\n df[\"카테고리명\"]=\"\"\n df.loc[(df['세분류']==\"\"),\"카테고리명\"]=df['대분류']+\">>\"+df['중분류']+\">>\"+df['소분류']\n df.loc[(df['세분류']!=\"\"),\"카테고리명\"]=df['대분류']+\">>\"+df['중분류']+\">>\"+df['소분류']+\">>\"+df['세분류']\n df2=df[[\"상품명\",\"상품번호(스마트스토어)\"]].copy()\n df2[\"추가홍보문구1\"]=\"\"\n df2[\"추가홍보문구2\"]=\"\"\n df2[\"제외키워드\"]=\"\"\n tmp_file2=find_file(\"※상품별 부가정보\")\n if find_file(tmp_file2)!=\"False\":\n tmp_file2=os.path.join(FILE_FOLDER,tmp_file2)\n df2=pd.read_csv(tmp_file2, encoding='utf-8-sig')\n df=pd.merge(df,df2,how=\"left\")\n df.to_csv(FILE_FOLDER + \"/\" + nowDate + \"_상품 리스트_판매중.csv\", encoding='utf-8-sig' ,index = False)\n df2.to_csv(FILE_FOLDER + \"/※상품별 부가정보.csv\", encoding='utf-8-sig' ,index = False)\n\n today_cate = \"☆카테고리별 인기검색어.csv\"\n if find_file(today_cate)==\"False\":\n arr_pro=[]\n arr_cate=arr_cate[1:]\n arr_pro.append(arr_cate)\n time.sleep(1)\n for cate_it in arr_cate:\n get_cate_key(cate_it,arr_pro)\n df = pd.DataFrame(arr_pro)\n columnNames = df.iloc[0]\n df = df[1:]\n df.columns = columnNames\n df.to_csv(FILE_FOLDER+'/'+startDate+'~'+nowDate+'_'+today_cate, encoding='utf-8-sig' ,index = False)\n\n zp_fname=find_file(\"mas\")\n if zp_fname==\"False\":\n # 검색어\n get_ad_report(\"https://manage.searchad.naver.com/customers/392590/reports/rtt-a001-000000000451507\")\n # 스토어팜\n get_ad_report(\"https://manage.searchad.naver.com/customers/392590/reports/rtt-a001-000000000451508\")\n # 광고소재리스트\n get_ad_mass(\"https://manage.searchad.naver.com/customers/392590/tool/mass\")\n zp_fname=find_file(\"mas\")\n os.chdir(FILE_FOLDER)\n zipfile.ZipFile(zp_fname).extractall()\n os.remove(zp_fname)\n\n na_list=os.listdir(FILE_FOLDER)\n for na in na_list:\n if not nowDate in na and na != \"import\":\n os.rename(FILE_FOLDER+\"/\"+na, FILE_FOLDER+\"/\"+nowDate+\"_광고시스템-\"+na.replace(\"+\",\"-\"))\n\ndef colnum_string(n):\n string = \"\"\n while n > 0:\n n, remainder = divmod(n - 1, 26)\n string = chr(65 + remainder) + string\n return string\n\ndef make_chart(title,minR,maxR,cP1,cP2,wid):\n cats = Reference(ws, min_col=2, max_col=2, min_row=minR, max_row=maxR)\n value_ad = Reference(ws, min_col=3, max_col=3, min_row=minR, max_row=maxR)\n value_cnt = Reference(ws, min_col=4, max_col=4, min_row=minR, max_row=maxR)\n value_price = Reference(ws, min_col=5, max_col=5, min_row=minR, max_row=maxR)\n\n chart_ad = LineChart()\n chart_ad.add_data(value_ad)\n chart_ad.set_categories(cats)\n chart_ad.y_axis.axId = 200\n chart_ad.y_axis.title = '광고비'\n chart_ad.y_axis.majorGridlines = None\n s = chart_ad.series[0]\n s.graphicalProperties.line.solidFill = \"0070C0\"\n\n chart_cnt = LineChart()\n chart_cnt.add_data(value_cnt)\n chart_cnt.y_axis.title = \"매출건\"\n\n chart_cnt.title = title+\"_매출건\"\n chart_cnt.width = wid\n chart_cnt.height = 6\n chart_cnt.y_axis.crosses = \"max\"\n chart_cnt.legend.tagname=\"b\"\n s = chart_cnt.series[0]\n s.graphicalProperties.line.solidFill = \"F4B084\"\n chart_cnt += chart_ad\n chart_cnt.legend=None\n\n chart_price = LineChart()\n chart_price.add_data(value_price)\n chart_price.y_axis.title = \"매출액\"\n\n chart_price.title = title+\"_매출액\"\n chart_price.width = wid+1\n chart_price.height = 6\n chart_price.y_axis.crosses = \"max\"\n s = chart_price.series[0]\n s.graphicalProperties.line.solidFill = \"FFC000\"\n chart_price += chart_ad\n chart_price.legend=None\n\n ws.add_chart(chart_cnt, cP1)\n ws.add_chart(chart_price, cP2)\n\ndef set_data_to_report():\n# 엑셀데이터 가져오기\n global wb\n global ws\n\n wb = load_workbook(THIS_FOLDER+'\\마케팅보고서.xlsx')\n ws = wb['보고서']\n for row in ws['L7:AC22']:\n for cell in row:\n cell.value = None\n\n # 유입키워드 컷트라인\n in_keyword_per_cut=ws['L5'].value\n in_keyword_cnt_cut=ws['L6'].value\n\n # 인기키워드 top 10\n arr_key_rank=[]\n arr_key_cnt=[]\n for row in ws['K13:K22']:\n for cell in row:\n tmp=int(cell.value.replace(\"~\",\"\"))\n if not tmp in arr_key_rank:\n arr_key_rank.append(tmp)\n arr_key_cnt.append(1)\n else:\n arr_key_cnt[len(arr_key_cnt)-1]=arr_key_cnt[len(arr_key_cnt)-1]+1\n # 제외키워드\n arr_key_exc=[]\n for i in range(25,27):\n cnt=11\n tmp_v=ws.cell(i,cnt).value\n while tmp_v!=None:\n arr_key_exc.append(tmp_v)\n cnt=cnt+1\n tmp_v=ws.cell(i,cnt).value\n arr_exc=(\"|\").join(arr_key_exc)\n\n# 유입 csv 데이터 추출\n tmp_file=find_file(\"검색어-보고서\")\n tmp_file=os.path.join(FILE_FOLDER,tmp_file)\n df=pd.read_csv(tmp_file, encoding='utf-8-sig', skiprows=1)\n df=df[1:][[\"검색어\",\"노출수\",\"클릭수\",\"전환수\"]]\n columnNames = df.columns\n for i in range(1,4):\n col=columnNames[i]\n df[col] = df[col].astype(str).str.replace(',', '')\n df=df.astype({col: int})\n df=df.sort_values(by=col, ascending=False)\n df2=df[df[col] > df[col].quantile((100-in_keyword_per_cut)/100)]\n df2=df2.head(in_keyword_cnt_cut)\n ws.cell(6+i,11).value=col\n for j in range(0,df2.value_counts().size):\n ws.cell(6+i,12+j).value=(df2.iloc[j,0]+\"\\r\\n\"+str(df2.iloc[j,i]))\n ws.cell(6+i,12+j).alignment = Alignment(wrapText=True)\n\n# 인기 csv 데이터 추출\n tmp_file=find_file(\"카테고리\")\n tmp_file=os.path.join(FILE_FOLDER,tmp_file)\n df=pd.read_csv(tmp_file, encoding='utf-8-sig')\n for i in range(1,len(df.columns)):\n col=df.columns[i]\n tmp_col=col.split(\">>\")\n ws.cell(11,11+i).value=(\"\\r\\n\").join(tmp_col[0:len(tmp_col)-1])\n ws.cell(11,11+i).alignment = Alignment(wrapText=True)\n ws.cell(12,11+i).value=tmp_col[len(tmp_col)-1]\n ws.column_dimensions[get_column_letter(11+i)].width = 10\n for x in range(1,len(df.columns)):\n pre=0\n pre_cnt=0\n for i in range(0,len(arr_key_rank)):\n if i>0:\n pre=arr_key_rank[i-1]\n pre_cnt=pre_cnt+arr_key_cnt[i-1]\n ws.column_dimensions[get_column_letter(12+x)].width = 10\n df2=df.iloc[pre:arr_key_rank[i],[0,x]]\n df2=df2.sample(arr_key_cnt[i])\n while (df2.iloc[:,1].str.contains(arr_exc)).any():\n df2=df.iloc[pre:arr_key_rank[i],[0,x]]\n df2=df2.sample(arr_key_cnt[i])\n df2=df2.sort_index()\n for y in range(0,df2.value_counts().size):\n mer_t=str(df2.iloc[y,1])+\" > \"+str(df2.iloc[y,0])\n ws.cell(13+y+pre_cnt,11+x).value=mer_t\n\n# 광고대비 매출 csv 추출\n tmp_file=find_file(\"광고대비매출\")\n tmp_file=os.path.join(FILE_FOLDER,tmp_file)\n df=pd.read_csv(tmp_file, encoding='utf-8-sig')\n\n for i in range(1,len(df.columns)):\n col=df.columns[i]\n if i==2 or i==4:\n df[col] = df[col].str.replace(',', '')\n df=df.astype({col: float})\n for x in range(1,len(df.columns)):\n for y in range(0,df.value_counts().size):\n ws.cell(6+y,1+x).value=df.iloc[y,x]\n make_chart(\"주간\",df.value_counts().size+2,df.value_counts().size+5,\"B6\",\"F6\",8)\n make_chart(\"전체\",6,df.value_counts().size+5,\"J28\",\"J39\",35)\n\n tail_df_a=[96,48,16]\n detail_df_a=[[\"반기\",\"B36\",\"F36\",8],[\"분기\",\"B25\",\"F25\",8],[\"월간\",\"B14\",\"F14\",8]]\n idx=0\n last_y=y+1\n for tl in tail_df_a:\n df=df.tail(tl)\n del_tail=tail_df_a[idx]/4\n arr_tmp=[]\n for y in range(0,df.value_counts().size):\n # print(df.iloc[y])\n tmp_idx=(int(y//del_tail))\n if len(arr_tmp) < tmp_idx+1:\n arr_tmp.append([df.iloc[y,1],df.iloc[y,2],df.iloc[y,3],df.iloc[y,4]])\n else:\n for j in range(1,4):\n arr_tmp[tmp_idx][j]=arr_tmp[tmp_idx][j]+df.iloc[y,j+1]\n df_tmp=df\n ws.cell(7+last_y,2).value=detail_df_a[idx][0]+\" 부분합\"\n for x in range(0,len(arr_tmp[0])):\n for y in range(0,len(arr_tmp)):\n ws.cell(8+last_y+y,2+x).value=arr_tmp[y][x]\n make_chart(detail_df_a[idx][0],8+last_y,8+last_y+y,detail_df_a[idx][1],detail_df_a[idx][2],detail_df_a[idx][3])\n last_y=last_y+y+3\n idx=idx+1\n\n max_ad=df.tail(4).iloc[:,2].max()\n next_month=nowDate[4:6]\n ws.cell(1,2).value=str(int(next_month)+1)+\"월\"\n ws.cell(2,3).value=int(max_ad)*4\n\n wb.save(FILE_FOLDER+'/'+startDate+'~'+nowDate+'_☆마케팅보고서.xlsx')\n\ndef check_warngg(p_id,g_id,s_id,s_name):\n path=\"https://manage.searchad.naver.com/customers/\"+str(c_id)+\"/adgroups/\"+str(g_id)\n # print(driver.current_url)\n driver_execute(path)\n\n rslt_flg=\"False\"\n if driver.find_element_by_xpath('(//button[@class=\"btn-sm btn-toggle dropdown-toggle btn btn-default\"])').text!=\"행 표시: 200\":\n driver.find_element_by_xpath('//button[@class=\"btn-sm btn-toggle dropdown-toggle btn btn-default\"]').click()\n driver.find_element_by_xpath(\"(//button[text()=200])\").click()\n # 행표시 200개 / 다음페이지 이동 / 노출가능 클릭\n row_posi = driver.find_element_by_xpath('//td[@data-value=\"'+s_id+'\"]/preceding-sibling::td[1]/span/a')\n row_text=row_posi.text\n\n if row_text==\"소재 연동제한\":\n action = ActionChains(driver)\n action.move_to_element(driver.find_element_by_xpath('//div[@class=\"inner-left\"]')).perform()\n driver.find_element_by_xpath('//td[@data-value=\"'+s_id+'\"]/preceding-sibling::td[1]/preceding-sibling::td[1]/preceding-sibling::td[1]/input').click()\n action.move_to_element(driver.find_element_by_xpath('//div[@class=\"inner-left\"]')).perform()\n driver.find_element_by_xpath('//button[@class=\"ml-2 btn btn-default btn-sm\"]').click()\n driver.find_element_by_xpath('//button[@class=\"btn btn-primary Confirm_modal-button__1Kwk6 btn btn-secondary\"]').click()\n elif row_text==\"노출가능\":\n if stillNew:\n warn_arr=[p_id,g_id,s_id,s_name,wait_val,\"-\"]\n else:\n warn_arr=[p_id,g_id,s_id,s_name,badgg_val,\"-\"]\n rslt_flg=pd.DataFrame([warn_arr],columns=['상품 ID','광고그룹 ID','소재 ID','노출상품명','제한상태','제한사유'])\n else:\n action = ActionChains(driver)\n try:\n action.move_to_element(row_posi).perform()\n row_posi.click()\n except Exception as e:\n row_posi_parent=driver.find_element_by_xpath('//td[@data-value=\"'+s_id+'\"]/parent::tr/preceding-sibling::tr[1]/td[1]')\n action.move_to_element(row_posi_parent).perform()\n row_posi.click()\n row_detail=[]\n for row_detail_each in driver.find_elements_by_xpath('//div[@class=\"list-dot\"]'):\n row_detail.append(row_detail_each.text.replace(\"더 알아보기\",\"\"))\n row_detail_img = driver.find_element_by_xpath('//td[@data-value=\"'+s_id+'\"]/following-sibling::td/div/div/div/div[@class=\"image-preview\"]').get_attribute(\"style\")\n img_link=row_detail_img.split('\"')[1]\n tmp_img_link=img_link.split(\".\")\n urllib.request.urlretrieve(img_link, OUTPUT_FOLDER+\"\\\\\"+s_id+\".\"+tmp_img_link[len(tmp_img_link)-1])\n warn_arr=[p_id,g_id,s_id,s_name,row_text,\"\\r\\n\".join(row_detail)]\n rslt_flg=pd.DataFrame([warn_arr],columns=['상품 ID','광고그룹 ID','소재 ID','노출상품명','제한상태','제한사유'])\n driver.find_element_by_xpath('//button[@class=\"modal-button btn btn-default\"]').click()\n return rslt_flg\n\ndef set_data_to_ad():\n# 엑셀데이터 가져오기\n global wb\n global ws\n global ws3\n global c_id\n global wait_val\n global badgg_val\n\n wb = load_workbook(THIS_FOLDER+'\\광고관리.xlsx')\n ws = wb['검색광고']\n ws3 = wb['추가홍보문구']\n\n # 노출 상태 종류\n goodgg_val=ws['O2'].value\n goodgg_col=ws['O2'].fill.start_color.index\n badgg_val=ws['P2'].value\n badgg_col=ws['P2'].fill.start_color.index\n chapri_val=ws['Q2'].value\n chapri_col=ws['Q2'].fill.start_color.index\n wait_val=ws['R2'].value\n wait_col=\"FFC65911\"\n newgg_val=ws['O3'].value\n newgg_col=ws['O3'].fill.start_color.index\n offgg_val=ws['P3'].value\n # offgg_col=ws['P3'].fill.start_color.index\n offgg_col=\"FFAEAAAA\"\n warngg_val=ws['Q3'].value\n warngg_col=ws['Q3'].fill.start_color.index\n emptygg_val=ws['R3'].value\n # emptygg_col=ws['R3'].fill.start_color.index\n emptygg_col=\"FFFFFFFF\"\n\n gg_val=[goodgg_val,badgg_val,chapri_val,wait_val,newgg_val,offgg_val,warngg_val,emptygg_val]\n gg_col=[goodgg_col,badgg_col,chapri_col,wait_col,newgg_col,offgg_col,warngg_col,emptygg_col]\n\n # 양호소재 컷트라인\n cut_rank=ws['C3'].value\n cut_show=ws['D3'].value\n cut_clk=ws['E3'].value\n cut_buy=ws['F3'].value\n cut_price_u=ws['G3'].value\n cut_price_d=ws['H3'].value\n\n # 입찰가 조절라인\n price_dwn_cut=ws['C5'].value\n price_dwn_val=ws['C6'].value\n price_up_cut=ws['D5'].value\n price_up_val=ws['D6'].value\n\n price_default=ws['F5'].value\n\n# 상품 csv 데이터 추출\n pro_file=find_file(\"상품 리스트\")\n pro_file=os.path.join(FILE_FOLDER,pro_file)\n df_pro=pd.read_csv(pro_file, encoding='utf-8-sig')\n df_pro.rename(columns = {'상품번호(스마트스토어)' : '상품 ID', '할인가(PC)' : '판매가'}, inplace = True)\n\n mas_file=find_file(\"mas\")\n mas_file=os.path.join(FILE_FOLDER,mas_file)\n df_mas=pd.read_csv(mas_file, encoding='utf-8-sig', skiprows=1)\n df_mas.rename(columns = {'쇼핑몰 상품ID' : '상품 ID'}, inplace = True)\n df_mas['노출상품명']=df_mas['노출상품명'].fillna(df_mas['기본상품명'])\n c_id=df_mas[\"CUST_ID\"].iat[0]\n\n report_file=find_file(\"스토어팜-보고서\")\n report_file=os.path.join(FILE_FOLDER,report_file)\n df_report=pd.read_csv(report_file, encoding='utf-8-sig', skiprows=1)\n df_report.rename(columns = {'소재' : '소재 ID', '총비용(VAT포함,원)': '광고비용', '전환매출액(원)': '전환액', '전환율(%)': '전환율' ,'광고그룹': '광고그룹 이름'}, inplace = True)\n sum_col_val=['노출수','클릭수','광고비용','전환수', '전환율', '전환액']\n report_col_val=['노출상태', '노출상품명', '광고그룹 이름', '평균노출순위', '노출수', '클릭수', '전환수', '전환율', '광고비용', '전환액', '소재 상태', '소재 입찰가', '소재 ID', '광고그룹 ID', '상품 ID']\n report_col_val_del=['\\r\\n', '(', ')\\r\\n', '/', '/', '/', '/', '/', '/', '\\r\\n', '/', \"\\r\\n\", '/', '/', \"\"]\n total_col_val=['제외키워드','상품명','상품 ID','대표이미지 URL','판매가','대분류','중분류','소분류','세분류','노출수','클릭수','광고비용','전환수','전환율','전환액']\n\n x=10\n for sum_val in sum_col_val:\n df_report[sum_val] = df_report[sum_val].astype(str).str.replace(',', '')\n df_report=df_report.astype({sum_val: float})\n if sum_val==\"전환율\":\n tmp_sum=(df_report[\"전환수\"].sum()*10000/df_report[\"클릭수\"].sum()//1)/100\n ws.cell(10,x).value=int(tmp_sum)\n else:\n tmp_sum=df_report[sum_val].sum()\n ws.cell(10,x).value=int(tmp_sum)\n x=x+1\n\n df_mas=pd.merge(df_mas,df_report,on=[\"소재 ID\",\"광고그룹 이름\"],how=\"outer\")\n df_mas=df_mas.sort_values(by=['소재 상태','상품 ID','클릭수','광고비용','노출수','광고그룹 이름'], ascending=[False,True,False,True,False,False])\n\n df_mas_sum=df_mas.groupby('상품 ID').sum().reset_index()[['상품 ID','노출수','클릭수','광고비용','전환수','전환액','전환율']].copy()\n df_mas_sum=df_mas_sum.sort_values(by=['노출수','클릭수','광고비용'], ascending=[False,False,True])\n df_mas_sum[\"전환율\"]=(df_mas_sum[\"전환수\"]*10000/df_mas_sum[\"클릭수\"]//1)/100\n\n df_grp_arr=df_mas.groupby('광고그룹 이름').sum().reset_index().copy()['광고그룹 이름'].values\n\n df_mas_each=df_mas.copy()\n # df_mas_each=df_mas[df_mas[\"상품 ID\"].notna()].copy()\n df_mas_each[\"노출상태\"]=goodgg_val\n df_mas_each=df_mas_each[report_col_val]\n df_mas_each.loc[(df_mas_each['평균노출순위']>=price_dwn_cut),\"노출상태\"]=chapri_val+\"인상 \" + df_mas_each[\"소재 입찰가\"].astype(str) + \">\" + (df_mas_each[\"소재 입찰가\"]+price_dwn_val).astype(str) + \"\"\n df_mas_each.loc[(df_mas_each['평균노출순위']<=price_up_cut),\"노출상태\"]=chapri_val+\"인하 \" + df_mas_each[\"소재 입찰가\"].astype(str) + \">\" + (df_mas_each[\"소재 입찰가\"]+price_up_val).astype(str) + \"\"\n df_mas_each.loc[(df_mas_each['평균노출순위']<=cut_rank) & (df_mas_each['노출수']<=cut_show) & (df_mas_each['클릭수']<=cut_clk) & (df_mas_each['전환수']<=cut_buy) & ~((cut_price_d <= df_mas_each['소재 입찰가']) & (df_mas_each['소재 입찰가'] <= cut_price_u)),\"노출상태\"]=badgg_val\n df_mas_each.loc[(pd.isnull(df_mas_each['노출수'])),\"노출상태\"]=warngg_val\n\n df_pid_arr=df_mas_each['상품 ID'].copy().drop_duplicates().values\n for df_pid in df_pid_arr:\n tmp_df=df_mas_each[df_mas_each['상품 ID']==df_pid]\n df_grp_arr_each=tmp_df['광고그룹 이름'].copy().drop_duplicates().values\n grp_minus=np.setdiff1d(df_grp_arr,df_grp_arr_each)\n for x in grp_minus:\n next_each=len(df_mas_each)\n df_mas_each.loc[next_each,\"노출상태\"]=emptygg_val\n df_mas_each.loc[next_each,\"광고그룹 이름\"]=x\n df_mas_each.loc[next_each,\"광고그룹 ID\"]=df_mas[df_mas['광고그룹 이름']==x]['광고그룹 ID'].iloc[0]\n df_mas_each.loc[next_each,\"상품 ID\"]=df_pid\n df_mas_each=df_mas_each.sort_values(by=['상품 ID','노출수','클릭수','광고비용'], ascending=[False,False,False,True])\n df_mas_each=df_mas_each.reset_index(drop=True)\n\n df_pid_arr=df_mas_each['상품 ID'].copy().drop_duplicates().values\n for df_pid in df_pid_arr:\n tmp_df=df_mas_each[df_mas_each['상품 ID']==df_pid].copy()\n tmp_df_idx=df_mas_each.index[df_mas_each['상품 ID']==df_pid].copy()\n cnt_bad=0\n for sub_idx in range(0,len(tmp_df)):\n each_idx=tmp_df_idx[sub_idx]\n if sub_idx<10:\n tmp_what_show=tmp_df.iloc[sub_idx][\"노출상태\"]\n if badgg_val in tmp_what_show:\n cnt_bad=cnt_bad+1\n elif offgg_val in tmp_what_show or emptygg_val in tmp_what_show:\n if not tmp_df['노출상태'].iat[0] in [badgg_val,warngg_val]:\n df_mas_each['노출상태'].iat[each_idx]=newgg_val\n df_mas_each['상품 ID'].iat[each_idx]=tmp_df.iloc[0]['상품 ID']\n df_mas_each['소재 상태'].iat[each_idx]=\"on\"\n df_mas_each['소재 입찰가'].iat[each_idx]=price_default\n elif tmp_df['노출상태'].iat[sub_idx]!=emptygg_val:\n if cnt_bad==0:\n df_mas_each['노출상태'].iat[each_idx]=offgg_val\n else:\n df_mas_each['노출상태'].iat[each_idx]=newgg_val\n cnt_bad=cnt_bad-1\n df_mas_each=df_mas_each.sort_values(by=['상품 ID','노출수','클릭수','광고비용'], ascending=[False,False,False,True])\n\n warn_each_file=find_file(\"☆점검소재리스트\")\n if warn_each_file!=\"False\":\n warn_each_file=os.path.join(FILE_FOLDER,warn_each_file)\n df_warn_detail=pd.read_csv(warn_each_file, encoding='utf-8-sig')\n else:\n df_warn_detail=pd.DataFrame(columns=['광고그룹 ID','소재 ID','노출상품명','제한상태','제한사유'])\n df_warn_sid_arr=df_warn_detail['소재 ID'].values\n for g_name in df_grp_arr:\n g_id=df_mas_each[df_mas_each['광고그룹 이름']==g_name]['광고그룹 ID'].iloc[0]\n df_sid=df_mas_each[(df_mas_each['노출상태']==warngg_val) & (df_mas_each['광고그룹 이름']==g_name) & (df_mas_each['소재 상태']==\"on\")].copy()\n df_sid_arr=df_sid['소재 ID'].values\n df_sname_arr=df_sid['노출상품명'].values\n s_idx=0\n for s_id in df_sid_arr:\n if not s_id in df_warn_sid_arr:\n s_name=df_sname_arr[s_idx]\n p_id=df_mas_each[df_mas_each['소재 ID']==s_id]['상품 ID'].iloc[0]\n tmp_df=check_warngg(p_id,g_id,s_id,s_name)\n if isinstance(tmp_df, pd.DataFrame):\n df_warn_detail=df_warn_detail.append(tmp_df, ignore_index = True)\n s_idx=s_idx+1\n df_warn_detail=df_warn_detail.sort_values(by=['제한사유','상품 ID','제한상태'], ascending=[False,False,False])\n df_warn_detail.to_csv(OUTPUT_FOLDER+'/'+startDate+'~'+nowDate+'_☆점검소재리스트.csv' , encoding='utf-8-sig',index = False)\n\n# todo 신규소재 키워드 부여\n df_rel_key=\"\"\n rel_key_file=find_file(\"☆상품 연관키워드\")\n if rel_key_file!=\"False\":\n rel_key_file=os.path.join(FILE_FOLDER,rel_key_file)\n df_rel_key=pd.read_csv(rel_key_file, encoding='utf-8-sig')\n df_p_id=df_mas_each[df_mas_each['노출상태']==newgg_val]['상품 ID'].drop_duplicates().values\n df_show_name=df_mas_each[(df_mas_each['상품 ID'].isin(df_p_id))]['노출상품명'].drop_duplicates().values\n\n if not isinstance(df_rel_key, pd.DataFrame):\n df_rel_key=pd.DataFrame(columns=(call_RelKwd(\"test\").columns.insert(0,\"노출키워드\")))\n df_rel_key_arr=df_rel_key['노출키워드'].drop_duplicates().values\n for each_show in df_show_name:\n tmp_each_show=str(each_show).split(\" \")\n for each_show_2 in tmp_each_show:\n if not each_show_2 in df_rel_key_arr and not each_show_2 in [\",\", \".\", \"/\", \"(\", \")\", \"\", \"-\"]:\n df_rel_key_each=call_RelKwd(str(each_show_2))\n if isinstance(df_rel_key_each, pd.DataFrame):\n df_rel_key_each['노출키워드']=each_show_2\n df_rel_key=df_rel_key.append(df_rel_key_each, ignore_index = True)\n df_rel_key=df_rel_key.drop_duplicates(subset=['노출키워드','연관키워드'])\n# 적당히 자동화때 삭제\n df_rel_key.to_csv(OUTPUT_FOLDER+'/'+startDate+'~'+nowDate+'_☆상품 연관키워드.csv', encoding='utf-8-sig' ,index = False)\n\n df_rel_key.to_csv(FILE_FOLDER+'/'+startDate+'~'+nowDate+'_☆상품 연관키워드.csv', encoding='utf-8-sig',index = False)\n df_rel_key.to_csv(OUTPUT_FOLDER+'/'+startDate+'~'+nowDate+'_☆상품 연관키워드.csv', encoding='utf-8-sig' ,index = False)\n\n df_pid_arr=df_mas_each['상품 ID'].copy().drop_duplicates().values\n for df_pid in df_pid_arr:\n tmp_df=df_mas_each[df_mas_each['상품 ID']==df_pid].copy()\n tmp_df_idx=df_mas_each.index[df_mas_each['상품 ID']==df_pid].copy()\n for sub_idx in range(0,len(tmp_df)):\n each_idx=tmp_df_idx[sub_idx]\n if newgg_val in tmp_df['노출상태'].iat[sub_idx]:\n prev_name = tmp_df['노출상품명'].iat[0]\n new_name=make_new_name(prev_name,df_rel_key)\n df_mas_each['노출상품명'].iat[each_idx]=new_name\n elif warngg_val in tmp_df['노출상태'].iat[sub_idx]:\n tmp_warn=df_warn_detail[df_warn_detail['소재 ID']==tmp_df['소재 ID'].iat[sub_idx]]\n if tmp_warn['제한상태'].iloc[0]!=\"소재 노출제한\":\n df_mas_each['노출상태'].iat[each_idx]=tmp_warn['제한상태'].iloc[0]\n\n df_mas_each.to_csv(FILE_FOLDER+\"/\"+nowDate+\"_광고소재리스트.csv\", encoding='utf-8-sig',index = False)\n df_mas_each.to_csv(OUTPUT_FOLDER+\"/\"+nowDate+\"_광고소재리스트.csv\", encoding='utf-8-sig',index = False)\n\n # todo 상품등록\n df_product_add=df_mas_each[df_mas_each[\"노출상태\"]==newgg_val][[\"광고그룹 ID\",\"상품 ID\",\"소재 입찰가\",\"노출상품명\"]].copy()\n df_product_add.to_csv(OUTPUT_FOLDER+\"/\"+nowDate+\"_신규소재등록.csv\", encoding='utf-8-sig',index = False)\n # todo 입찰가 변경\n df_product_chgpri=df_mas_each[df_mas_each[\"노출상태\"].str.contains(chapri_val)].copy()\n df_product_chgpri[\"소재 입찰가\"]=df_product_chgpri[\"노출상태\"].str.split(\">\").str[1]\n df_product_chgpri=df_product_chgpri[[\"광고그룹 ID\",\"소재 ID\",\"소재 입찰가\"]]\n df_product_chgpri.to_csv(OUTPUT_FOLDER+\"/\"+nowDate+\"_입찰가변경.csv\", encoding='utf-8-sig',index = False)\n # todo on/off 소재 일괄 변경\n df_product_2on = df_mas_each[df_mas_each[\"노출상태\"]==offgg_val][[\"광고그룹 ID\",\"소재 ID\",\"소재 상태\"]].copy()\n df_product_2on[\"소재 상태\"]=\"off\"\n df_product_2off= df_mas_each[df_mas_each[\"노출상태\"]!=offgg_val][[\"광고그룹 ID\",\"소재 ID\",\"소재 상태\"]].copy()\n df_product_2off[\"소재 상태\"]=\"on\"\n df_product_2on.append(df_product_2off)\n df_product_2on.to_csv(OUTPUT_FOLDER+\"/\"+nowDate+\"_활성상태변경.csv\", encoding='utf-8-sig',index = False)\n\n for df_idx in range(0,len(df_mas_sum)):\n this_pro_id=df_mas_sum.iloc[df_idx]['상품 ID']\n tmp_df=df_mas_each[df_mas_each['상품 ID'].astype(str)==str(this_pro_id.astype(float))].copy()\n this_pro_gid=df_mas_each[df_mas_each['상품 ID'].astype(str)==str(this_pro_id)].copy()\n for sub_idx in range(0,len(tmp_df)):\n if not \"소재\"+str(sub_idx+1) in df_mas_sum.columns:\n total_col_val.append(\"소재\"+str(sub_idx+1))\n df_mas_sum[\"소재\"+str(sub_idx+1)]=\"\"\n tmp_df_val=\"\"\n v_idx=0\n for each_col in report_col_val:\n tmp_df_val = tmp_df_val + str(tmp_df.iloc[sub_idx][each_col]).replace(\".0\",\"\")\n tmp_df_val = tmp_df_val + report_col_val_del[v_idx]\n v_idx=v_idx+1\n df_mas_sum[\"소재\"+str(sub_idx+1)].iat[df_idx]=str(tmp_df_val)\n df_all=pd.merge(df_pro,df_mas_sum,on=\"상품 ID\",how=\"outer\")[total_col_val]\n df_all=df_all.sort_values(by=['노출수','클릭수','광고비용'], ascending=[False,False,True])\n df_all['상품명']=df_all['상품명'].fillna(\"삭제상품\")\n df_all.to_csv(FILE_FOLDER+\"/\"+nowDate+\"_광고소재부분합리스트.csv\", encoding='utf-8-sig',index = False)\n\n rate=0.9\n tmp_df=df_all[(df_all['노출수'] >= df_all['노출수'].quantile(rate)) & (df_all['전환수'] <= df_all['전환수'].quantile(rate))]\n tmp_df=tmp_df.sort_values(by=\"클릭수\", ascending=True)\n tmp_df2=df_warn_detail.drop_duplicates(\"상품 ID\")\n tmp_df=pd.merge(tmp_df,tmp_df2,how=\"left\")\n tmp_df=tmp_df.head(4)\n tmp_df.to_csv(FILE_FOLDER+\"/\"+nowDate+\"연습.csv\", encoding='utf-8-sig',index = False)\n\n y=12\n for df_y in range(0,len(df_all)):\n x=2\n for df_x in range(0,len(df_all.iloc[df_y])):\n ws.cell(y,x).value=df_all.iloc[df_y,df_x]\n if \"소재\" in total_col_val[df_x]:\n ws.column_dimensions[colnum_string(x)].width = 30\n ws.row_dimensions[y].height = 48\n ws.cell(y,x).alignment = Alignment(wrapText=True)\n delim=str(ws.cell(y,x).value).split(\"\\r\\n\")[0]\n for gg in gg_val:\n gg_idx=gg_val.index(gg)\n if gg in delim:\n ws.cell(y,x).fill=PatternFill(\"solid\", fgColor=gg_col[gg_idx])\n x=x+1\n y=y+1\n\n x=2\n for total_val in total_col_val:\n ws.cell(11,x).value=total_val\n if \"소재1\" == total_val:\n v_idx=0\n tmp_df_val=\"\"\n for each_col in report_col_val:\n tmp_df_val = tmp_df_val + each_col\n tmp_df_val = tmp_df_val + report_col_val_del[v_idx]\n v_idx=v_idx+1\n ws.cell(8,x).value=tmp_df_val+\"\\r\\n(노출▶클릭순▶)\"\n ws.cell(8,x).alignment = Alignment(wrapText=True)\n ws.row_dimensions[8].height = 115\n x=x+1\n\n wb.save(FILE_FOLDER+'/'+startDate+'~'+nowDate+'_☆광고관리.xlsx')\n\ndef init():\n global THIS_FOLDER\n global FILE_FOLDER\n global OUTPUT_FOLDER\n global nowDate\n global startDate\n global stillNew\n global driver_exist\n\n driver_exist=False\n now = datetime.datetime.now()\n nowDate = now.strftime('%Y%m%d')\n startDate = nowDate\n stillNew = True\n THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\n FILE_FOLDER = THIS_FOLDER+'\\자료'\n createFolder(FILE_FOLDER)\n filedel_list=os.listdir(FILE_FOLDER)\n day_check=7\n for filedel in filedel_list:\n if not \"※\" in filedel:\n if \"☆\" in filedel:\n startD = datetime.datetime.strptime(filedel.split(\"_\")[0].split(\"~\")[0], \"%Y%m%d\")\n date_diff = now - startD\n if date_diff.days>day_check-2:\n stillNew = False\n if date_diff.days>day_check:\n del_fname=os.path.join(FILE_FOLDER,filedel)\n os.remove(del_fname)\n del_foldname=os.path.join(FILE_FOLDER,\"import\")\n shutil.rmtree(del_foldname)\n elif not \"import\" in filedel:\n before_fname=os.path.join(FILE_FOLDER,filedel)\n after_fname=startD.strftime('%Y%m%d')+\"~\"+nowDate+\"_\"+filedel.split(\"_\")[1]\n os.rename(before_fname, FILE_FOLDER+\"/\"+after_fname)\n startDate = startD.strftime('%Y%m%d')\n elif not nowDate in filedel and not \"import\" in filedel:\n del_fname=os.path.join(FILE_FOLDER,filedel)\n os.remove(del_fname)\n OUTPUT_FOLDER = FILE_FOLDER+'\\import'\n createFolder(OUTPUT_FOLDER)\n\ndef driver_execute(path):\n global driver_exist\n if not driver_exist:\n driver_init()\n driver_exist=True\n time.sleep(1)\n pre_login(\"https://sell.smartstore.naver.com/#/bizadvisor/marketing\")\n time.sleep(1)\n pre_ad_login(\"https://manage.searchad.naver.com/customers/392590/reports/rtt-a001-000000000451507\")\n if driver.current_url!=path:\n time.sleep(1)\n driver.get(path)\n\ndef driver_init():\n global driver\n\n g_name=os.path.join(THIS_FOLDER,\"chromedriver\")\n options = webdriver.ChromeOptions()\n # 창 숨기는 옵션 추가\n # options.add_argument(\"headless\")\n prefs = {\n \"download.default_directory\": FILE_FOLDER,\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing.enabled\": True\n }\n options.add_experimental_option('prefs', prefs)\n\n driver = webdriver.Chrome(g_name, options=options)\n driver.implicitly_wait(20)\n\ndef timer_start():\n global start_t\n start_t = time.time()\n\ndef timer_chk():\n print(\"time :\", time.time() - start_t)\n\ntimer_start()\ninit()\n\nget_data_file()\nset_data_to_report()\nset_data_to_ad()\n\nif driver_exist:\n driver.close()\ntimer_chk()\n","sub_path":"f_key_table.py","file_name":"f_key_table.py","file_ext":"py","file_size_in_byte":44683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"23697642","text":"from decimal import *\nfrom django.db.models import Model\nfrom django.utils import timezone\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom journalize.enums import JournalEntryTypes\nfrom journalize.models import JournalEntry, Transaction\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import detail_route, list_route\nfrom rest_framework.filters import SearchFilter, OrderingFilter\nfrom rest_framework.permissions import AllowAny, DjangoModelPermissions\nfrom rest_framework.response import Response\nfrom project.utils import format_currency, format_percent\nfrom .enums import AccountCategories, AccountClassifications\nfrom .models import Account, AccountType\nfrom .permissions import LAAccountsClosingPermission\nfrom .serializers import AccountSerializer, AccountTypeSerializer, RetrieveAccountSerializer, RetrieveAccountTypeSerializer, LedgerAccountSerializer\n\nclass AccountTypeViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = AccountType.objects.all()\n serializer_class = AccountTypeSerializer\n\n def get_serializer_class(self):\n if self.request.method != 'GET':\n return super(AccountTypeViewSet, self).get_serializer_class()\n\n return RetrieveAccountTypeSerializer\n\n\nclass AccountViewSet(viewsets.ModelViewSet):\n queryset = Account.objects.all()\n filter_backends = (SearchFilter, DjangoFilterBackend, OrderingFilter,)\n search_fields = ('name', 'description', 'account_type__name',)\n filter_fields = {\n 'name': ['icontains'],\n 'description': ['icontains'],\n 'account_type__category': ['exact'],\n 'account_type__name': ['icontains'],\n 'is_active': ['exact']\n }\n ordering_fields = ('name', 'account_type__name', 'account_type__category', 'account_type__order', 'order',)\n serializer_class = AccountSerializer\n permission_classes = (DjangoModelPermissions,)\n\n def get_serializer_class(self):\n if self.request.method != 'GET':\n return super(AccountViewSet, self).get_serializer_class()\n\n return RetrieveAccountSerializer\n\n @detail_route(methods=['get'])\n def ledger(self, request, pk=None):\n serializer = LedgerAccountSerializer(Account.objects.get(pk=pk))\n return Response(serializer.data)\n\n #Current Ratio is the current total of assets divided by the current total of liabilities\n @list_route(methods=['get'])\n def current_ratio(self, request):\n cr = {\n \"status\" : \"\",\n \"ratio\" : 0\n }\n\n accounts = Account.objects.filter(is_active=True, account_type__classification=AccountClassifications.CURRENT,\n account_type__category__in=[\n AccountCategories.ASSET,\n AccountCategories.LIABILITY\n ])\n total_assets = 0\n total_liabilities = 0\n\n for account in accounts:\n if account.account_type.category == AccountCategories.ASSET:\n total_assets += account.get_balance()\n elif account.account_type.category == AccountCategories.LIABILITY:\n total_liabilities += account.get_balance()\n\n cr[\"ratio\"] = 0\n if (total_liabilities != 0):\n cr[\"ratio\"] = Decimal(total_assets / total_liabilities)\n\n if cr[\"ratio\"] < 0.02:\n cr[\"status\"] = \"red\"\n elif cr[\"ratio\"] >= 0.02 and cr[\"ratio\"] <= 0.05:\n cr[\"status\"] = \"yellow\"\n else:\n cr[\"status\"] = \"green\"\n\n cr[\"ratio\"] = format_percent(cr[\"ratio\"] * 100)\n\n return Response(cr)\n\n @list_route(methods=['get'])\n def return_on_assets(self, request):\n ratio = 0\n status = \"\"\n net_profit = 0\n total_assets = 0\n accounts = Account.objects.filter(is_active=True, account_type__category__in=[\n AccountCategories.ASSET,\n AccountCategories.REVENUE,\n AccountCategories.EXPENSE\n ])\n\n for account in accounts:\n account_balance = account.get_balance()\n if account.account_type.category == AccountCategories.ASSET:\n total_assets += account_balance\n elif account.account_type.category == AccountCategories.REVENUE:\n net_profit += account_balance\n elif account.account_type.category == AccountCategories.EXPENSE:\n net_profit -= account_balance\n\n output = 0\n if total_assets != 0:\n output = Decimal(net_profit / total_assets)\n\n if output < 0.05:\n status = \"red\"\n elif output >= 0.05 and output < 0.1:\n status = \"yellow\"\n else:\n status = \"green\"\n\n output = format_percent(output * 100)\n\n return Response({\n 'ratio': output,\n 'status': status\n })\n\n @list_route(methods=['get'])\n def return_on_equity(self, request):\n ratio = 0\n status = \"\"\n net_profit = 0\n total_equity = 0\n accounts = Account.objects.filter(is_active=True, account_type__category__in=[\n AccountCategories.EQUITY,\n AccountCategories.REVENUE,\n AccountCategories.EXPENSE\n ])\n\n for account in accounts:\n account_balance = account.get_balance()\n if account.account_type.category == AccountCategories.EQUITY:\n total_equity += account_balance\n elif account.account_type.category == AccountCategories.REVENUE:\n net_profit += account_balance\n elif account.account_type.category == AccountCategories.EXPENSE:\n net_profit -= account_balance\n\n output = 0\n if total_equity != 0:\n output = Decimal(net_profit / total_equity)\n\n if output < 0.05:\n status = \"red\"\n elif output >= 0.05 and output < 0.1:\n status = \"yellow\"\n else:\n status = \"green\"\n\n output = format_percent(output * 100)\n\n return Response({\n 'ratio': output,\n 'status': status\n })\n\n @list_route(methods=['get'])\n def net_profit_margin(self, request):\n ratio = 0\n status = \"\"\n net_profit = 0\n total_sales = 0\n accounts = Account.objects.filter(is_active=True, account_type__category__in=[\n AccountCategories.REVENUE,\n AccountCategories.EXPENSE\n ])\n\n for account in accounts:\n account_balance = account.get_balance()\n if account.account_type.category == AccountCategories.REVENUE: # TODO: make sure it is correct to use all revenues for this\n total_sales += account_balance\n net_profit += account_balance\n elif account.account_type.category == AccountCategories.EXPENSE:\n net_profit -= account_balance\n\n output = 0\n if total_sales != 0:\n output = Decimal(net_profit / total_sales)\n\n if output < 0.05:\n status = \"red\"\n elif output >= 0.05 and output < 0.1:\n status = \"yellow\"\n else:\n status = \"green\"\n\n output = format_percent(output * 100)\n\n return Response({\n 'ratio': output,\n 'status': status\n })\n\n @list_route(methods=['get'])\n def asset_turnover(self, request):\n ratio = 0\n status = \"\"\n total_assets = 0\n total_sales = 0\n accounts = Account.objects.filter(is_active=True, account_type__category__in=[\n AccountCategories.ASSET,\n AccountCategories.REVENUE\n ])\n\n for account in accounts:\n account_balance = account.get_balance()\n if account.account_type.category == AccountCategories.ASSET:\n total_assets += account_balance\n elif account.account_type.category == AccountCategories.REVENUE: # TODO: make sure it is correct to use all revenues for this\n total_sales += account_balance\n\n output = 0\n if total_assets != 0:\n output = Decimal(total_sales / total_assets)\n\n if output < 0.03:\n status = \"red\"\n elif output >= 0.03 and output < 0.07:\n status = \"yellow\"\n else:\n status = \"green\"\n\n output = format_percent(output * 100)\n\n return Response({\n 'ratio': output,\n 'status': status\n })\n\n @list_route(methods=['get'])\n def quick_ratio(self, request):\n ratio = 0\n status = \"\"\n total_assets = 0\n total_liabilities = 0\n total_inventory = 0\n accounts = Account.objects.filter(is_active=True, account_type__classification=AccountClassifications.CURRENT,\n account_type__category__in=[\n AccountCategories.ASSET,\n AccountCategories.LIABILITY\n ])\n\n for account in accounts:\n account_balance = account.get_balance()\n if account.account_type.category == AccountCategories.ASSET:\n total_assets += account_balance\n if account.account_type.name == \"Inventories\":\n total_inventory += account_balance\n\n elif account.account_type.category == AccountCategories.LIABILITY:\n total_liabilities += account_balance\n\n output = 0\n if total_liabilities != 0:\n output = Decimal((total_assets - total_inventory) / total_liabilities)\n\n if output < 0.02:\n status = \"red\"\n elif output >= 0.02 and output < 0.04:\n status = \"yellow\"\n else:\n status = \"green\"\n\n output = format_percent(output * 100)\n\n return Response({\n 'ratio': output,\n 'status': status\n })\n\n @list_route(methods=['get'])\n def trial_balance(self, request):\n active_accounts = Account.objects.filter(is_active=True)\n nonzero_accounts = []\n debit_total = 0\n credit_total = 0\n\n for account in active_accounts:\n account_balance = account.get_balance()\n\n if account_balance != 0:\n nonzero_accounts.append({\n 'account_id': account.pk,\n 'account_number': account.account_number(),\n 'account_name': account.name,\n 'balance': format_currency(account_balance),\n 'is_debit': account.is_debit(),\n })\n\n if account.is_debit():\n debit_total += account_balance\n else:\n credit_total += account_balance\n\n return Response({\n 'accounts': nonzero_accounts,\n 'debit_total': format_currency(debit_total),\n 'credit_total': format_currency(credit_total),\n 'as_of_date': timezone.now()\n })\n\n @list_route(methods=['get'])\n def income_statement(self, request):\n accounts = Account.objects.filter(is_active=True, account_type__category__in=[\n AccountCategories.REVENUE,\n AccountCategories.EXPENSE\n ])\n\n expenses = []\n revenues = []\n expenses_total = 0\n revenues_total = 0\n\n for account in accounts:\n account_balance = account.get_balance()\n if account_balance != 0:\n account_summary = {\n 'account_id': account.pk,\n 'account_number': account.account_number(),\n 'account_name': account.name,\n 'balance': format_currency(account_balance),\n 'is_debit': account.is_debit()\n }\n\n if account.account_type.category == AccountCategories.REVENUE:\n revenues.append(account_summary)\n revenues_total += account_balance\n elif account.account_type.category == AccountCategories.EXPENSE:\n expenses.append(account_summary)\n expenses_total += account_balance\n\n return Response({\n 'expenses': expenses,\n 'revenues': revenues,\n 'expenses_total': format_currency(expenses_total * -1),\n 'revenues_total': format_currency(revenues_total),\n 'net_profit': format_currency(revenues_total - expenses_total),\n 'as_of_date': timezone.now()\n })\n\n @list_route(methods=['get'])\n def retained_earnings(self, request):\n accounts = Account.objects.filter(is_active=True, account_type__category__in=[\n AccountCategories.EQUITY,\n AccountCategories.REVENUE,\n AccountCategories.EXPENSE\n ])\n\n retained_earnings_beginning = 0\n net_profit = 0\n dividends_total = 0\n\n for account in accounts:\n account_balance = account.get_balance()\n\n if account.account_type.category == AccountCategories.EQUITY:\n if account.name == 'Retained Earnings':\n retained_earnings_beginning = account_balance\n elif 'Drawing' in account.name:\n # or account.name == \"Paid in Capital in Excess of Par/Stated Value--Common Stock\" \\\n # or account.name == 'Paid in Capital in Excess of Par/Stated Value--Preferred Stock' \\\n # or account.name == 'Paid in Capital from Sale of Treasury Stock':\n # NOTE: We are only accounting for business owner equity here, not shareholders equity.\n dividends_total += account_balance\n elif account.account_type.category == AccountCategories.REVENUE:\n net_profit += account_balance\n elif account.account_type.category == AccountCategories.EXPENSE:\n net_profit -= account_balance\n\n return Response({\n 'retained_earnings_beginning': format_currency(retained_earnings_beginning),\n 'net_profit': format_currency(net_profit),\n 'dividends_paid': format_currency(dividends_total),\n 'retained_earnings_ending': format_currency(retained_earnings_beginning + net_profit - dividends_total),\n 'as_of_date': timezone.now()\n })\n\n @list_route(methods=['get'])\n def balance_sheet(self, request):\n active_accounts = Account.objects.filter(is_active=True)\n current_assets = []\n current_assets_total = 0\n noncurrent_assets = []\n noncurrent_assets_total = 0\n current_liabilities = []\n current_liabilities_total = 0\n noncurrent_liabilities = []\n noncurrent_liabilities_total = 0\n\n expenses_total = 0\n revenues_total = 0\n equity = []\n equity_total = 0\n for account in active_accounts:\n account_balance = account.get_balance()\n\n if account.is_contra:\n # Since we are calculating total valuation for each category/classification\n # of account, we need to make sure that the balances of any contra\n # accounts are subtracted from the totals.\n account_balance = account_balance * -1\n\n if account_balance != 0:\n account_summary = {\n 'account_id': account.pk,\n 'account_number': account.account_number(),\n 'account_name': account.name,\n 'balance': format_currency(account_balance),\n }\n if account.account_type.category == AccountCategories.ASSET:\n if account.account_type.classification == AccountClassifications.CURRENT:\n current_assets.append(account_summary)\n current_assets_total += account_balance\n else:\n noncurrent_assets.append(account_summary)\n noncurrent_assets_total += account_balance\n\n elif account.account_type.category == AccountCategories.LIABILITY:\n if account.account_type.classification == AccountClassifications.CURRENT:\n current_liabilities.append(account_summary)\n current_liabilities_total += account_balance\n else:\n noncurrent_liabilities.append(account_summary)\n noncurrent_liabilities_total += account_balance\n\n elif account.account_type.category == AccountCategories.EQUITY:\n equity.append(account_summary)\n equity_total += account_balance\n\n elif account.account_type.category == AccountCategories.REVENUE:\n revenues_total += account_balance\n\n elif account.account_type.category == AccountCategories.EXPENSE:\n expenses_total += account_balance\n\n #Part of Cheaty Method\n # FIXME\n if revenues_total - expenses_total != 0:\n equity.append({\n 'account_id': 0,\n 'account_number': 0,\n 'account_name': 'Income Estimation',\n 'balance': format_currency(revenues_total - expenses_total),\n })\n #####################\n hacky_equity_total = equity_total + revenues_total - expenses_total # FIXME: THIS IS A HACKY SOLUTION DO NOT TRUST\n asset_total = current_assets_total + noncurrent_assets_total\n liability_total = equity_total + current_liabilities_total + noncurrent_liabilities_total + revenues_total - expenses_total\n response = {\n 'current_assets': current_assets,\n 'current_liabilities': current_liabilities,\n 'noncurrent_assets': noncurrent_assets,\n 'noncurrent_liabilities': noncurrent_liabilities,\n 'equity': equity,\n 'current_assets_total': format_currency(current_assets_total) if current_assets_total is not 0 else None,\n 'noncurrent_assets_total': format_currency(noncurrent_assets_total) if noncurrent_assets_total is not 0 else None,\n 'current_liabilities_total': format_currency(current_liabilities_total) if current_liabilities_total is not 0 else None,\n 'noncurrent_liabilities_total': format_currency(noncurrent_liabilities_total) if noncurrent_liabilities_total is not 0 else None,\n 'equity_total': format_currency(hacky_equity_total) if hacky_equity_total is not 0 else None,\n 'asset_total': format_currency(asset_total),\n 'liability_total': format_currency(liability_total),\n 'as_of_date': timezone.now()\n }\n\n return Response(response)\n\n @list_route(methods=['post'], permission_classes=[LAAccountsClosingPermission])\n def close_accounts(self, request):\n accounts = Account.objects.filter(is_active=True, account_type__category__in=[\n AccountCategories.EQUITY,\n AccountCategories.REVENUE,\n AccountCategories.EXPENSE\n ])\n\n income_value = 0\n debits = []\n credits = []\n has_income_adjustment = False\n\n closing_journal = JournalEntry(date=timezone.now(), creator=request.user, description=\"Auto-generated closing journal\",\n entry_type=JournalEntryTypes.CLOSING, is_approved=None)\n closing_journal.save()\n\n for account in accounts:\n balance = account.get_balance();\n if balance == 0:\n # No need to close an account that does not have a balance\n continue\n\n closer = Transaction(affected_account=account, journal_entry=closing_journal, is_debit=not account.is_debit(),\n value=abs(balance))\n\n if account.account_type.category == AccountCategories.REVENUE or account.account_type.category == AccountCategories.EXPENSE:\n has_income_adjustment = True\n\n if closer.is_debit:\n debits.append(closer)\n else:\n credits.append(closer)\n\n income_value += closer.get_value()\n\n elif account.account_type.category == AccountCategories.EQUITY and \"Drawing\" in account.name:\n # NOTE: We are only accounting for business owner equity here, not shareholders equity.\n try:\n equity_adjuster = Account.objects.get_by_natural_key(account.name.replace(\"Drawing\", \"Capital\"))\n\n if not equity_adjuster.is_active:\n equity_adjuster.is_active = True\n equity_adjuster.save()\n\n except Model.DoesNotExist:\n closing_journal.delete()\n\n return Response({\n 'message': 'The Drawing account \"%s\" does not have a corresponding Capital account to adjust.' % \\\n account.name }, status=403)\n\n debits.append(Transaction(affected_account=equity_adjuster, journal_entry=closing_journal,\n value=abs(balance), is_debit=True))\n credits.append(closer)\n\n if not len(debits) and not len(credits):\n closing_journal.delete()\n return Response({ 'message': 'Accounts have already been closed.' }, status=200)\n\n if has_income_adjustment:\n try:\n income_account = Account.objects.get_by_natural_key('Retained Earnings')\n\n if not income_account.is_active:\n income_account.is_active = True\n income_account.save()\n\n except Model.DoesNotExist:\n closing_journal.delete()\n\n return Response({\n 'message': 'There is no acccount named \"Retaining Earnings\".'\n }, status=403)\n\n income_adjuster = Transaction(affected_account=income_account, journal_entry=closing_journal, value=abs(income_value))\n income_adjuster.is_debit = income_value < 0 # if income_value is positive, it debits the Retained Earnings else credits\n\n if income_adjuster.is_debit:\n debits.append(income_adjuster)\n else:\n credits.append(income_adjuster)\n\n transaction_list = debits + credits\n for transaction in transaction_list:\n transaction.save()\n\n return Response({'message': 'Closing Entry has been created.'}, status=200)\n","sub_path":"server/project/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"80699137","text":"import os\ntexto = \"Cientista de dados é a profissão que mais tem crescido no mundo. \\n\"\ntexto = texto + \"Esses profissionais precisam saber estatistica, programação e ML, \"\ntexto += \"e claro, Big Data!!\"\n# #\n# # print(texto)\n# #\n# arq = open(os.path.join('files/cientista2.txt'),'w')\n#\n# #split, separa a string por cada espaço em branco que encontra (default)\n#\n# for palavra in texto.split():\n# arq.write(palavra+' + ')\n#\n# arq.close()\n\n# arquivo = open('files/cientista2.txt')\n# conteudo = arquivo.read()\n# arquivo.close()\n#\n# print(conteudo)\n\n\n# USANDO WITH + slicing\n\nwith open('./files/cientista2', 'w') as arquivo:\n arquivo.write(texto[:21])\n arquivo.write('\\n')\n arquivo.write(texto[::-1])\n\narquivo = open('files/cientista2','r')\nconteudo = arquivo.read()\narquivo.close()\n\nprint(conteudo)\n","sub_path":"python_dc/arquivos/manipulando_txt.py","file_name":"manipulando_txt.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"558475976","text":"from shutil import copyfile\nimport os\n\n\ntimit_dir_path = 'ToolsAndData\\\\TIMIT\\\\'\ntrain_dir_path = timit_dir_path+'TRAIN\\\\'\nwaves_dir_path = 'TimitNistData\\\\'\n\ndef extract_waves():\n if not os.path.exists(timit_dir_path):\n print('Ошибка: Каталог не найден '+train_dir_path)\n print('Текущий каталог '+os.path.dirname(os.path.realpath(__file__)))\n exit(1)\n\n folder = []\n for f in os.walk(train_dir_path):\n folder.append(f)\n\n for address, dirs, files in folder:\n for file in files:\n if os.path.splitext(file)[1] == '.WAV':\n file_name = os.path.splitext(file)[0]\n old_file_path = address+'\\\\'+file\n new_file_path = waves_dir_path+file_name+'.NIST'\n if not os.path.exists(new_file_path):\n copyfile(old_file_path, new_file_path)\n print(new_file_path)\n\nextract_waves()","sub_path":"timitwavfind.py","file_name":"timitwavfind.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"635577200","text":"'''\n30 BOX BOUNCE PROGRAM\n--------------------\nYou will want to incorporate lists to modify the\nBall Bounce Program to create the following:\n\n1.) Screen size 600 x 600\n2.) Draw four 30px wide side rails on all four sides of the window\n3.) Make each side rail a different color.\n4.) Draw 30 black boxes(squares) of random size from 10-50 pixels\n5.) Animate them starting at random speeds from -300 to +300 pixels/second. \n6.) All boxes must be moving.\n7.) Start all boxes in random positions between the rails.\n8.) Bounce boxes off of the side rails when the box edge hits the side rail.\n9.) When the box bounces change its color to the rail it just hit.\n10.)Title the window 30 Boxes\n\nHelpful Hints:\n1.) When you initialize the MyGame class create an empty list called self.boxlist=[] to hold all of your boxes.\n2.) Then use a for i in range(30): list to instantiate boxes and append them to the list.\n3.) In the on_draw section use: for box in self.boxlist: box.draw_box()\n4.) Also in the on_draw section draw the side rails.\n5.) In the on_update section use: for box in self.boxlist: box.update_box()\n'''\n\nimport arcade\nimport random\n\nSW = 600\nSH = 600\nbox_num = 30\n\nclass Box:\n def __init__(self, x_pos, y_pos, dx, dy, side, color):\n self.x_pos = x_pos\n self.y_pos = y_pos\n self.dx = dx\n self.dy = dy\n self.side = side\n self.color = color\n\n def draw_box(self):\n arcade.draw_rectangle_filled(self.x_pos, self.y_pos, self.side, self.side, self.color)\n\n def update_box(self):\n self.y_pos += self.dy\n self.x_pos += self.dx\n\n # Bouncing the boxes off left\n if self.x_pos < 30 + self.side/2:\n self.dx *= -1\n self.color = arcade.color.RED\n\n # Bouncing the boxes off right\n if self.x_pos > SW - 30 - self.side/2:\n self.dx *= -1\n self.color = arcade.color.YELLOW\n\n # Bouncing the boxes off bottom\n if self.y_pos < 30 + self.side/2:\n self.dy *= -1\n self.color = arcade.color.GREEN\n\n # Bouncing the boxes off top\n if self.y_pos > SH - 30 - self.side/2:\n self.dy *= -1\n self.color = arcade.color.BLUE\n\nclass MyGame(arcade.Window):\n def __init__(self, width, height, title):\n super().__init__(width, height, title)\n arcade.set_background_color(arcade.color.WHITE)\n self.box_list = []\n for i in range(box_num):\n dy = random.randint(-5, 5)\n dx = random.randint(-5, 5)\n side = random.randint(10, 50)\n x = random.randrange(30 + int(side/2), SW - 30 - int(side/2))\n y = random.randrange(30 + int(side / 2), SW - 30 - int(side / 2))\n color = arcade.color.BLACK\n\n if dx == 0 and dy == 0:\n dx = 100\n dy = 100\n\n box = Box(x, y, dx, dy, side, color)\n self.box_list.append(box)\n\n def on_draw(self):\n arcade.start_render()\n for box in self.box_list:\n box.draw_box()\n # RAILS\n arcade.draw_rectangle_filled(15, SH/2, 30, SH-60, arcade.color.RED) # LEFT\n arcade.draw_rectangle_filled(SW-15, SH/2, 30, SH-60, arcade.color.YELLOW) # RIGHT\n arcade.draw_rectangle_filled(SW/2, 15, SW-60, 30, arcade.color.GREEN) # TOP\n arcade.draw_rectangle_filled(SW/2, SH-15, SW-60, 30, arcade.color.BLUE) # BOTTOM\n\n def on_update(self, dt):\n for box in self.box_list:\n box.update_box()\n\ndef main():\n window = MyGame(SW, SH, \"30 Boxes\",)\n arcade.run()\n\nif __name__==\"__main__\":\n main()","sub_path":"12.1_Thirty_Box_Bounce.py","file_name":"12.1_Thirty_Box_Bounce.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595050923","text":"import sys\r\nimport string\r\n\r\nif __name__ == \"__main__\":\r\n input = sys.stdin.readlines()\r\n case = int(input[0])\r\n for i in range(case):\r\n input[i * 2 + 1] = list(map(int, input[i * 2 + 1].split()))\r\n action = input[i * 2 + 1][0]\r\n before = input[i * 2 + 1][1]\r\n actions = input[i * 2 + 2]\r\n h = actions.rfind('H')\r\n k = 0\r\n for j in range(h, action + 1):\r\n if actions[j] == 'L':\r\n k = k + 1\r\n elif actions[j] == 'R':\r\n k = k - 1\r\n print(int(k))\r\n\r\n\r\n\r\n","sub_path":"judge/sessions/2018Team/hongliyu/PA_02.py","file_name":"PA_02.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"309955620","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'MFC'\n__time__ = '2019-09-07 22:32'\n\n\"\"\"\n5067. 统计只含单一字母的子串 显示英文描述 \n用户通过次数0\n用户尝试次数0\n通过次数0\n提交次数0\n题目难度Easy\n给你一个字符串 S,返回只含 单一字母 的子串个数。\n\n输入: \"aaaba\"\n输出: 8\n解释: \n只含单一字母的子串分别是 \"aaa\", \"aa\", \"a\", \"b\"。\n\"aaa\" 出现 1 次。\n\"aa\" 出现 2 次。\n\"a\" 出现 4 次。\n\"b\" 出现 1 次。\n所以答案是 1 + 2 + 4 + 1 = 8。\n\n输入: \"aaaaaaaaaa\"\n输出: 55\n\"\"\"\n\nfrom collections import Counter\nfrom itertools import accumulate\n\n# S = \"aaaaaaaaaa\"\nS = \"aaaba\"\n# print(Counter(S))\n\n\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python 3.6\nfrom itertools import accumulate\n\ndef all_sub_string(a_string):\n if len(a_string) == 1:\n return [a_string]\n else:\n return list(accumulate(a_string)) + all_sub_string(a_string[1:])\n\n\n\nall_sub_strs = all_sub_string(S)\nprint(all_sub_strs)\n\nms = dict(Counter(S))\n# print(ms)\n\nret = 0\nfor k,v in ms.items():\n while v > 0:\n for i in k*v:\n ch = i*v\n for ch in all_sub_strs:\n if ch in all_sub_strs:\n print(ch)\n all_sub_strs.remove(ch)\n ret+=1\n v -= 1\n\nprint(ret)\n\n","sub_path":"leetcode/week/5067.py","file_name":"5067.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509331910","text":"\"\"\"Matasano Crypto Challenges - Challenge 2 'Fixed XOR'\n\nPython Version\n---------------\n3.5.1\n\nNotes\n------\no All strings in python 3.x are Unicode characters\no binascii.a2b_hex - only accepts Unicode strings containing ASCII characters\n\n\"\"\"\n# a2b_hex: ascii code points in unicode hex string --> binary ascii \n# character representation\nfrom binascii import a2b_hex, b2a_hex\n\n# make the given hex strings explicityly Unicode strings for python 2.x \n# compatability using u''\nSTRING_1 = '1c0111001f010100061a024b53535009181c'\t\t\nSTRING_2 = '686974207468652062756c6c277320657965'\t\n# expected hex string, must be bytes object because b2a_hex returns a \n# bytes object\nEXPECTED = b'746865206b696420646f6e277420706c6179'\n\n# decode the hex strings to their ASCII binary representaions b''\n# \tPython 2.x\n# \tstring_1_decoded = STRING_1.decode('hex')\nbuffer_1 = a2b_hex(STRING_1)\nbuffer_2 = a2b_hex(STRING_2)\n\n\n# for each byte in the buffers, bitwise xor (^) and return the integer\n# that represents the xor resultant byte, chr takes the integer\n# from an ascii/unicode code point to its character representation\n# returns string object\nxor_decoded = \"\".join(chr(i ^ j) for i, j in zip(buffer_1, buffer_2))\n# bytes() return a bytes object, b2a_hex takes a bytes object and\n# returns the ascii code point in hex as a bytes object\nxor_encoded = b2a_hex(bytes(xor_decoded, 'utf-8'))\n\nprint('xor decoded:\\n' + xor_decoded + '\\n')\nprint('xor encoded:\\n' + str(xor_encoded) + '\\n')\nprint('expected string:\\n' + str(EXPECTED) + '\\n')\n\nif xor_encoded == EXPECTED:\n\tprint('[+] Worked!')\nelse:\n\tprint('[-] Lame Crypto...')\n\t","sub_path":"Set1/Challenge2.py","file_name":"Challenge2.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"179458871","text":"import pandas as pd\nfrom numpy import *\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.estimators import SKCompat\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score,precision_recall_curve, average_precision_score\nimport operator\nimport re\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.preprocessing import OneHotEncoder\nimport time\n\n\n#normalize the features using max-min to convert the values into [0,1] interval\ndef MaxMinNorm(df,col):\n ma, mi = max(df[col]), min(df[col])\n rangeVal = ma - mi\n if rangeVal == 0:\n print (col)\n df[col] = df[col].map(lambda x:(x-mi)*1.0/rangeVal)\n\n\ndef CareerYear(x):\n if not x==x:\n return -1\n #对工作年限进行转换\n #if x.find('n/a') > -1:\n #return -1\n elif x.find(\"10+\")>-1: #将\"10+years\"转换成 11\n return 11\n elif x.find('< 1') > -1: #将\"< 1 year\"转换成 0\n return 0\n else:\n return int(re.sub(\"\\D\", \"\", x)) #其余数据,去掉\"years\"并转换成整数\n\n\ndef DescExisting(x):\n #将desc变量转换成有记录和无记录两种\n if type(x).__name__ == 'float':\n return 'no desc'\n else:\n return 'desc'\n\n\ndef ConvertDateStr(x):\n mth_dict = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10,\n 'Nov': 11, 'Dec': 12}\n if str(x) == 'nan':\n return datetime.datetime.fromtimestamp(time.mktime(time.strptime('9900-1','%Y-%m')))\n #time.mktime 不能读取1970年之前的日期\n else:\n yr = int(x[4:6])\n if yr <=17:\n yr = 2000+yr\n else:\n yr = 1900 + yr\n mth = mth_dict[x[:3]]\n return datetime.datetime(yr,mth,1)\n\n\ndef MonthGap(earlyDate, lateDate):\n if lateDate > earlyDate:\n gap = relativedelta(lateDate,earlyDate)\n yr = gap.years\n mth = gap.months\n return yr*12+mth\n else:\n return 0\n\n\ndef MakeupMissing(x):\n if not x==x:\n return -1\n else:\n return x\n\n\n\n'''\n第一步:数据准备\n'''\nfolderOfData = '/Users/andpay/PycharmProjects/sklearn/GBDT'\nallData = pd.read_excel(folderOfData + 'application.xlsx',header = 0, encoding = 'latin1')\nallData['term'] = allData['term'].apply(lambda x: int(x.replace(' months','')))\n# 处理标签:Fully Paid是正常用户;Charged Off是违约用户\nallData['y'] = allData['loan_status'].map(lambda x: int(x == 'Charged Off'))\n\n'''\n由于存在不同的贷款期限(term),申请评分卡模型评估的违约概率必须要在统一的期限中,\n且不宜太长,所以选取term=36months的行本\n'''\nallData1 = allData.loc[allData.term == 36]\ntrainData, testData = train_test_split(allData1,test_size=0.4)\n\n\n\n'''\n第二步:数据预处理\n'''\n# 将带%的百分比变为浮点数\n#trainData['int_rate_clean'] = trainData['int_rate'].map(lambda x: float(x.replace('%',''))/100)\n# 将工作年限进行转化,否则影响排序\ntrainData['emp_length_clean'] = trainData['emp_length'].map(CareerYear)\n# 将desc的缺失作为一种状态,非缺失作为另一种状态\ntrainData['desc_clean'] = trainData['desc'].map(DescExisting)\n# 处理日期。earliest_cr_line的格式不统一,需要统一格式且转换成python的日期\n#trainData['app_date_clean'] = trainData['issue_d'].map(lambda x: ConvertDateStr(x))\n#trainData['earliest_cr_line_clean'] = trainData['earliest_cr_line'].map(lambda x: ConvertDateStr(x))\n# 处理mths_since_last_delinq。注意原始值中有0,所以用-1代替缺失\ntrainData['mths_since_last_delinq_clean'] = trainData['mths_since_last_delinq'].map(lambda x:MakeupMissing(x))\ntrainData['mths_since_last_record_clean'] = trainData['mths_since_last_record'].map(lambda x:MakeupMissing(x))\ntrainData['pub_rec_bankruptcies_clean'] = trainData['pub_rec_bankruptcies'].map(lambda x:MakeupMissing(x))\n\n'''\n第三步:变量衍生\n'''\n# 考虑申请额度与收入的占比\ntrainData['limit_income'] = trainData.apply(lambda x: x.loan_amnt / x.annual_inc, axis = 1)\n# 考虑earliest_cr_line到申请日期的跨度,以月份记\ntrainData['earliest_cr_to_app'] = trainData.apply(lambda x: MonthGap(x['earliest_cr_line'],x['issue_d']), axis = 1)\n\n\n'''\n对于类别型变量,需要onehot(独热)编码,再训练GBDT模型\n'''\nnum_features = ['int_rate','emp_length_clean','annual_inc', 'dti', 'delinq_2yrs', 'earliest_cr_to_app','inq_last_6mths', \\\n 'mths_since_last_record_clean', 'mths_since_last_delinq_clean','open_acc','pub_rec','total_acc','limit_income','earliest_cr_to_app']\ncat_features = ['home_ownership', 'verification_status','desc_clean', 'purpose', 'zip_code','addr_state','pub_rec_bankruptcies_clean']\n\nv = DictVectorizer(sparse=False)\nX1 = v.fit_transform(trainData[cat_features].to_dict('records'))\n#将独热编码和数值型变量放在一起进行模型训练\nX2 = matrix(trainData[num_features])\nX = hstack([X1,X2])\nY = trainData['y']\n\nx_train,x_test, y_train, y_test = train_test_split(X,Y,test_size = 0.3,random_state = 0)\n\n#numnber of input layer nodes: dimension =\n#number of hidden layer & number of nodes in them: hidden_units\n#full link or not: droput. dropout = 1 means full link\n#activation function: activation_fn. By default it is relu\n#learning rate:\n\n#Example: select the best number of units in the 1-layer hidden layer\n#model_dir = path can make the next iteration starting from last termination\n#define the DNN with 1 hidden layer\n\n'''使用含一个隐藏层的神经网络'''\nno_hidden_units_selection = {}\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension = x_train.shape[1])]\nfor no_hidden_units in range(10,51,10):\n print(\"the current choise of hidden units number is {}\".format(no_hidden_units))\n clf0 = tf.contrib.learn.DNNClassifier(feature_columns = feature_columns,hidden_units=[no_hidden_units, no_hidden_units+10],n_classes=2,dropout = 0.5)\n clf = SKCompat(clf0)\n clf.fit(x_train, y_train, batch_size=256,steps = 1000)\n #monitor the performance of the model using AUC score\n #clf_pred = clf._estimator.predict(x_test)\n #y_pred = [i for i in clf_pred]\n clf_pred_proba = clf._estimator.predict_proba(x_test)\n pred_proba = [i[1] for i in clf_pred_proba]\n auc_score = roc_auc_score(y_test,pred_proba)\n no_hidden_units_selection[no_hidden_units] = auc_score\nbest_hidden_units = max(no_hidden_units_selection.items(), key=lambda x: x[1])[0]\n\n\n#Example: check the dropout effect\ndropout_selection = {}\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension = x_train.shape[1])]\nfor dropout_prob in linspace(0,0.99,100):\n print(\"the current choise of drop out rate is {}\".format(dropout_prob))\n clf0 = tf.contrib.learn.DNNClassifier(feature_columns = feature_columns,\n hidden_units = [no_hidden_units],\n n_classes=2,\n dropout = dropout_prob\n #optimizer=tf.train.ProximalAdagradOptimizer(learning_rate=0.1,l1_regularization_strength=0.001\n #model_dir = path\n #learning_rate=0.1\n )\n clf = SKCompat(clf0)\n clf.fit(x_train, y_train, batch_size=256,steps = 10000)\n\n #monitor the performance of the model using AUC score\n #clf_pred = clf._estimator.predict(x_test)\n #y_pred = [i for i in clf_pred]\n\n\n clf_pred_proba = clf._estimator.predict_proba(x_test)\n pred_proba = [i[1] for i in clf_pred_proba]\n auc_score = roc_auc_score(y_test,pred_proba)\n dropout_selection[dropout_prob] = auc_score\n\nbest_dropout_prob = max(dropout_selection.iteritems(), key=operator.itemgetter(1))[0]\n","sub_path":"deep_learning/DNN.py","file_name":"DNN.py","file_ext":"py","file_size_in_byte":8018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"293673198","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport os\nimport time\n#import psycopg2\n\nimg_format = \"https://i.kinja-img.com/gawker-media/image/upload/{}.{}\"\n\nembed_providers = {\n 'instagram': \"https://www.instagram.com/p/{}/\",\n 'twitter': 'https://twitter.com/statuses/{}',\n 'youtube': \"https://www.youtube.com/watch?v={}\",\n 'vimeo': \"https://vimeo.com/{}\"\n}\n\n## For videos, we're looking for something that looks like:\n##
\n## \n##