diff --git "a/2349.jsonl" "b/2349.jsonl" new file mode 100644--- /dev/null +++ "b/2349.jsonl" @@ -0,0 +1,640 @@ +{"seq_id":"228350614","text":"#\n# Save on a new file with the same name but with \"small_\" prefix\n# on high quality jpeg format.\n#\n# If the script is in /images/ and the files are in /images/2012-1-1-pics\n# call with: python resize.py 2012-1-1-pics\n\nfrom PIL import Image\nimport os\nimport sys\n\ndirectory = 'E:\\\\NETFLIX\\\\static\\\\home\\\\shows\\\\css\\\\images\\\\'\nfor file_name in os.listdir(directory):\n\tif file_name.endswith(\".jpg\") or file_name.endswith(\".png\"):\n\t\tprint(\"Processing %s\" % file_name)\n\t\timage = Image.open(os.path.join(directory, file_name))\n\n\t\tx,y = image.size\n\t\tnew_dimensions = (1800, 2666) #dimension set here\n\t\toutput = image.resize(new_dimensions, Image.ANTIALIAS)\n\n\t\toutput_file_name = os.path.join(directory,file_name)\n\t\toutput.save(output_file_name, \"JPEG\", quality = 95)\n\nprint(\"All done\")","sub_path":"static/home/shows/css/images/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"90414297","text":"\"\"\"\nSome scipy.optimize.minimize methods require a Jacobian\n(or gradient of chi^2) to run. Here is an example of how one would\nimplement such a minimization method using MulensModel.\n\nSimilar to example_02_fitting.py except using the 'Newton-CG\" method to\nminimize the function (and now has a \"Minimizer\" class).\n\"\"\"\nimport sys\nimport os\nimport numpy as np\nimport scipy.optimize as op\nimport matplotlib.pyplot as plt\n\nimport MulensModel as mm\n\n\nclass Minimizer(object):\n \"\"\"\n An object to link an Event to the functions necessary to minimize chi2.\n \"\"\"\n\n def __init__(self, event, parameters_to_fit):\n self.event = event\n self.parameters_to_fit = parameters_to_fit\n\n def set_parameters(self, theta):\n \"\"\"for given event set attributes from parameters_to_fit (list of str)\n to values from theta list\"\"\"\n for (key, val) in enumerate(self.parameters_to_fit):\n setattr(self.event.model.parameters, val, theta[key])\n\n def chi2_fun(self, theta):\n \"\"\"for a given set of parameters (theta), return the chi2\"\"\"\n self.set_parameters(theta)\n return self.event.get_chi2()\n\n def chi2_gradient(self, theta):\n \"\"\"\n for a given set of parameters (theta), return the gradient of chi^2\n \"\"\"\n self.set_parameters(theta) # might be redundant, but probably safer\n return self.event.get_chi2_gradient(self.parameters_to_fit)\n\n\n# Read in the data file\nSAMPLE_FILE_01 = os.path.join(\n mm.DATA_PATH, \"photometry_files\", \"OB08092\",\n \"phot_ob08092_O4.dat\")\ndata = mm.MulensData(file_name=SAMPLE_FILE_01)\n\n# Initialize the fit\nparameters_to_fit = [\"t_0\", \"u_0\", \"t_E\"]\nt_0 = 5380.\nu_0 = 0.2\nt_E = 18.\nmodel = mm.Model({'t_0': t_0, 'u_0': u_0, 't_E': t_E})\n\n# Link the data and the model\nev = mm.Event(datasets=data, model=model)\nprint('Initial Trial\\n{0}'.format(ev.model.parameters))\n\n# Create an object to hold the various minimization routines\nminimizer = Minimizer(ev, parameters_to_fit)\n\n# Find the best-fit parameters\ninitial_guess = [t_0, u_0, t_E]\nresult = op.minimize(\n minimizer.chi2_fun, x0=initial_guess, method='Newton-CG',\n jac=minimizer.chi2_gradient, tol=1e-3)\n\nprint(result.x)\n(fit_t_0, fit_u_0, fit_t_E) = result.x\n\n# Save the best-fit parameters\nchi2 = minimizer.chi2_fun(result.x)\n\n# Output the fit parameters\nmsg = 'Best Fit: t_0 = {0:12.5f}, u_0 = {1:6.4f}, t_E = {2:8.3f}'\nprint(msg.format(fit_t_0, fit_u_0, fit_t_E))\nprint('Chi2 = {0:12.2f}'.format(chi2))\nprint('scipy.optimize.minimize result:')\nprint(result)\n\n# Plot and compare the two models\ninit_model = mm.Model(dict(t_0=t_0, u_0=u_0, t_E=t_E))\ninit_event = mm.Event(model=init_model, datasets=data)\nplt.figure()\ninit_event.plot_model(label='Initial Trial')\nev.plot_model(label='Final Model')\nplt.title('Difference b/w Input and Fitted Model')\nplt.legend(loc='best')\n\n# Plot the fitted model with the data\nplt.figure()\nev.plot_model()\nev.plot_data()\nplt.title('Data and Fitted Model')\n\nplt.show()\n","sub_path":"examples/use_cases/use_case_24_chi2_gradient.py","file_name":"use_case_24_chi2_gradient.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"91738072","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# #first Equation \n# \n# Let us now implement the following equation:\n# $$ y = x^2$$\n# \n# where $x = 2$ \n\n# In[1]:\n\n\nx = 2\ny = x**2\nprint(y)\n\n","sub_path":"01-01/By Yaminingsih.py","file_name":"By Yaminingsih.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630448596","text":"#!/user/bin/python\r\n\r\n# Program: Print below pattern\r\n# *\r\n# **\r\n# ***\r\n# ****\r\n\r\ndef pattern1(no):\r\n for i in range(1,no+1):\r\n for j in range(1,i+1):\r\n print ('*\\t',end='')\r\n print (\"\\n\")\r\n\t\t\r\ndef main():\r\n no=eval(input(\"Enter Number to for pattern1:\"))\r\n pattern1(no)\r\n\t\r\nif __name__=='__main__':\r\n main()\r\n \r\n","sub_path":"Pattern1.py","file_name":"Pattern1.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"74529713","text":"import os\nimport time\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n# Turn interactive plotting off\nplt.ioff()\n\nfrom city_residents import *\nfrom utilities import *\nfrom transportation import *\nfrom disease import *\n\ndef simulate_transportations_with_infections(init_transmitters_num, remote_workers, responsible_people,\n timer_min, timer_max, transmission_time, neighbourhood_radius,\n infect_prob, death_prob, radius, spread_radius,\n quarantine_zone_size, transmitters_test_quota, others_test_quota,\n epochs, debug=False, plot_disease_matrix=None):\n \"\"\" Simulate people transportation and disease spread in a square grid\n\n :param init_transmitters_num: Initial infected people number\n :param remote_workers: Fraction of remote workers\n :param responsible_people: Fraction of responsible people (which have lower probability of getting ill)\n :param timer_min: Min steps (epochs) until infected person can transmit a disease (exception: initial group)\n :param timer_max: Max steps (epochs) until infected person can transmit a disease (exception: initial group)\n :param transmission_time: Disease lifespan\n :param neighbourhood_radius: Maximum distance allowed to travel for each person from his initial location\n :param infect_prob: Base probability for disease to transmit\n :param death_prob: Death probability after disease\n :param radius: Maximum radius for person to travel in single epoch\n :param spread_radius: Disease spreading radius\n :param quarantine_zone_size: Quarantine zone's capacity\n :param transmitters_test_quota: Number of tests for visible transmitters to have possibility to move to quarantine zone with less death rate\n :param others_test_quota: Number of tests for others to have possibility to move to quarantine zone with less death rate\n :param epochs: Steps to perform during each people 1) travel and 2) spread the disease\n :param debug: Debug mode to output functions run times\n :param plot_disease_matrix: Path to save plot of disease (exposure) matrix before transmitting a disease in each epoch\n :return: Number of ill (visible + invisible) people for each epoch, number of ill (visible) people that can transmit a disease for each epoch\n \"\"\"\n\n ###########################\n # Init variables #\n ###########################\n\n cities_list = []\n\n msk = CityResidents(city_num=0, city_code='msk', x_size=506, y_size=506, residents_num=126781,\n init_transmitters_num=init_transmitters_num,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n khi = CityResidents(city_num=1, city_code='khi', x_size=105, y_size=105, residents_num=2596,\n init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n kra = CityResidents(city_num=2, city_code='kra', x_size=51, y_size=51, residents_num=1756, init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n odi = CityResidents(city_num=3, city_code='odi', x_size=44, y_size=44, residents_num=1355, init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n dom = CityResidents(city_num=4, city_code='dom', x_size=126, y_size=126, residents_num=1372,\n init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n pod = CityResidents(city_num=5, city_code='pod', x_size=64, y_size=64, residents_num=3081, init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n lub = CityResidents(city_num=6, city_code='lub', x_size=36, y_size=36, residents_num=2053, init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n sho = CityResidents(city_num=7, city_code='sho', x_size=72, y_size=72, residents_num=1261, init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n bal = CityResidents(city_num=8, city_code='bal', x_size=79, y_size=79, residents_num=5074, init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n myt = CityResidents(city_num=9, city_code='myt', x_size=59, y_size=59, residents_num=2355, init_transmitters_num=0,\n remote_workers=remote_workers, responsible_people=responsible_people, timer_min=timer_min,\n timer_max=timer_max, transmission_time=transmission_time)\n\n cities_list.append(msk)\n cities_list.append(khi)\n cities_list.append(kra)\n cities_list.append(odi)\n cities_list.append(dom)\n cities_list.append(pod)\n cities_list.append(lub)\n cities_list.append(sho)\n cities_list.append(bal)\n cities_list.append(myt)\n\n # Add work location for msk residents\n msk.add_work_location(city_num=khi.city_num, x_size=khi.x_size, y_size=khi.y_size, workers_num=42)\n msk.add_work_location(city_num=kra.city_num, x_size=kra.x_size, y_size=kra.y_size, workers_num=29)\n msk.add_work_location(city_num=odi.city_num, x_size=odi.x_size, y_size=odi.y_size, workers_num=22)\n msk.add_work_location(city_num=dom.city_num, x_size=dom.x_size, y_size=dom.y_size, workers_num=22)\n msk.add_work_location(city_num=pod.city_num, x_size=pod.x_size, y_size=pod.y_size, workers_num=50)\n msk.add_work_location(city_num=lub.city_num, x_size=lub.x_size, y_size=lub.y_size, workers_num=33)\n msk.add_work_location(city_num=sho.city_num, x_size=sho.x_size, y_size=sho.y_size, workers_num=20)\n msk.add_work_location(city_num=bal.city_num, x_size=bal.x_size, y_size=bal.y_size, workers_num=82)\n msk.add_work_location(city_num=myt.city_num, x_size=myt.x_size, y_size=myt.y_size, workers_num=38)\n\n # Add work location for residents of other cities\n khi.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=695)\n kra.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=470)\n odi.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=363)\n dom.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=367)\n pod.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=825)\n lub.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=550)\n sho.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=338)\n bal.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=1359)\n myt.add_work_location(city_num=msk.city_num, x_size=msk.x_size, y_size=msk.y_size, workers_num=631)\n\n ###########################\n # Run simulations #\n ###########################\n\n timer_dict = defaultdict(list)\n\n healthy_tracker = []\n infected_tracker = []\n invisible_transmitters_tracker = []\n transmitters_tracker = []\n cured_tracker = []\n dead_tracker = []\n quarantine_tracker = []\n\n hour = 1\n\n for i in tqdm(range(1, epochs + 1)):\n\n # 3 hours in a day\n if hour > 3:\n hour -= 3\n\n # Make disease visible (and transmittable) & finish disease\n if i > 1:\n start_time = time.time()\n decrement_timers(cities_list, transmission_time, death_prob)\n end_time = time.time()\n timer_dict['decrement_timers'].append(end_time - start_time)\n if debug:\n print('\\tdecrement_timers()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Transport people to work\n elif hour == 2:\n start_time = time.time()\n transport_to_work(cities_list, amount='rest')\n end_time = time.time()\n timer_dict['transport_to_work'].append(end_time - start_time)\n if debug:\n print('\\ttransport_to_work()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # # Transport one-third of the people to work\n # if (hour % 7 == 0) & (hour % 2 != 0) & (hour % 3 != 0):\n # start_time = time.time()\n # transport_to_work(cities_list, amount='third')\n # end_time = time.time()\n # timer_dict['transport_to_work'].append(end_time - start_time)\n # if debug:\n # print('\\ttransport_to_work()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # # Transport one-third of the people to work\n # elif (hour % 8 == 0) & (hour % 2 != 0) & (hour % 3 != 0):\n # start_time = time.time()\n # transport_to_work(cities_list, amount='third')\n # end_time = time.time()\n # timer_dict['transport_to_work'].append(end_time - start_time)\n # if debug:\n # print('\\ttransport_to_work()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # # Transport rest (one-third) of the people to work\n # elif (hour % 9 == 0) & (hour % 2 != 0):\n # start_time = time.time()\n # transport_to_work(cities_list, amount='rest')\n # end_time = time.time()\n # timer_dict['transport_to_work'].append(end_time - start_time)\n # if debug:\n # print('\\ttransport_to_work()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Transport rest people from work\n elif hour == 3:\n start_time = time.time()\n transport_to_home(cities_list, amount='rest')\n end_time = time.time()\n timer_dict['transport_to_home'].append(end_time - start_time)\n if debug:\n print('\\ttransport_to_home()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # # Transport one-third of the people from work\n # elif hour % 19 == 0:\n # start_time = time.time()\n # transport_to_home(cities_list, amount='third')\n # end_time = time.time()\n # timer_dict['transport_to_home'].append(end_time - start_time)\n # if debug:\n # print('\\ttransport_to_home()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # # Transport one-third of the people from work\n # elif hour % 20 == 0:\n # start_time = time.time()\n # transport_to_home(cities_list, amount='third')\n # end_time = time.time()\n # timer_dict['transport_to_home'].append(end_time - start_time)\n # if debug:\n # print('\\ttransport_to_home()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # # Transport rest (one-third) of the people from work\n # elif hour % 21 == 0:\n # start_time = time.time()\n # transport_to_home(cities_list, amount='rest')\n # end_time = time.time()\n # timer_dict['transport_to_home'].append(end_time - start_time)\n # if debug:\n # print('\\ttransport_to_home()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Walk peaple that are near their home\n start_time = time.time()\n walk_iter(cities_list, radius, neighbourhood_radius)\n end_time = time.time()\n timer_dict['walk_iter'].append(end_time - start_time)\n if debug:\n print('\\twalk_iter()\\t\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Observe disease maps\n start_time = time.time()\n disease_mat_list = make_disease_matrices(cities_list, spread_radius)\n end_time = time.time()\n timer_dict['make_disease_matrices'].append(end_time - start_time)\n if debug:\n print('\\tmake_disease_matrices()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Plot & save disease exposure map (cities)\n if plot_disease_matrix is not None:\n\n start_time = time.time()\n\n # Plot & save all cities on single figure\n # plot_disease_exposures(cities_list, spread_radius, epoch=i, path=plot_disease_matrix)\n\n # Plot & save all cities on multiple figures\n for city_idx in range(len(cities_list)):\n plot_disease_exposure(cities_list, city_idx, spread_radius, epoch=i, path=plot_disease_matrix)\n\n # Plot & save Moscow (Hub city)\n # plot_disease_exposure(cities_list, msk.city_num, spread_radius, epoch=i, path=plot_disease_matrix)\n\n end_time = time.time()\n timer_dict['plot_disease_exposure'].append(end_time - start_time)\n if debug:\n print('\\tplot_disease_exposure()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Spread disease (based on the maps above)\n start_time = time.time()\n spread_disease(disease_mat_list, cities_list, timer_min, timer_max, transmission_time, infect_prob)\n end_time = time.time()\n timer_dict['spread_disease'].append(end_time - start_time)\n if debug:\n print('\\tspread_disease()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Screen population to detect disease\n start_time = time.time()\n screen_for_disease(cities_list, quarantine_zone_size, transmitters_test_quota, others_test_quota)\n end_time = time.time()\n timer_dict['screen_for_disease'].append(end_time - start_time)\n if debug:\n print('\\tscreen_for_disease()\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n # Track real stats\n start_time = time.time()\n track_stats(cities_list, healthy_tracker, infected_tracker, invisible_transmitters_tracker,\n transmitters_tracker, cured_tracker, dead_tracker, quarantine_tracker)\n end_time = time.time()\n timer_dict['track_stats'].append(end_time - start_time)\n if debug:\n print('\\ttrack_stats()\\t\\t\\t{:.2f} sec.'.format(end_time - start_time))\n\n hour += 1\n\n # Debug\n print('[epoch={}]\\tinfected={}\\ttransmitters(visible+invisible)={}'.format(i, infected_tracker[-1],\n invisible_transmitters_tracker[-1] +\n transmitters_tracker[-1]))\n if debug:\n print('\\n')\n\n return timer_dict, \\\n np.array(healthy_tracker), np.array(infected_tracker), np.array(invisible_transmitters_tracker), \\\n np.array(transmitters_tracker), np.array(cured_tracker), np.array(dead_tracker), np.array(quarantine_tracker)","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"513495113","text":"import hmac\nimport requests\nimport uuid\nfrom django.conf import settings\n\n\nACCOUNT_ID_KEY_NAME = 'bepay_account_id'\n\ndef hash_256(string):\n return hmac.new(settings.BEPAY_SECRET_KEY.encode(), msg=string.encode(), digestmod='sha256').hexdigest()\n\n\nclass Gateway:\n def raise_if_error(self, response):\n if 'error' in response:\n raise Exception(response['error']['description'])\n return response\n\n def create_account(self, account):\n data = {\n 'externalIdentifier': str(account.pk),\n 'sharedAccount': False,\n 'client': {\n 'name': account.user.name,\n 'socialName': account.user.name,\n 'taxIdentifier': {\n 'taxId': account.user.document_1,\n 'country': 'BRA'\n },\n 'mobilePhone': {\n 'phoneNumber': account.user.mobile_phone,\n 'country': 'BRA'\n },\n 'email': account.user.email\n }\n }\n\n tx_hash = hash_256(str(account.pk) + account.user.document_1)\n headers = {'Api-Access-Key': settings.BEPAY_API_ACCESS_KEY, 'Transaction-Hash': tx_hash}\n response = requests.post(settings.BEPAY_SERVER_URL + '/accounts', json=data, headers=headers).json()\n self.raise_if_error(response)\n\n user = account.user\n account_id = response['data']['account']['accountId']\n user.profile[ACCOUNT_ID_KEY_NAME] = account_id\n user.save()\n\n return account_id\n\n def get_account_id(self, account):\n user = account.user\n if not ACCOUNT_ID_KEY_NAME in user.profile:\n return self.create_account(account)\n return user.profile[ACCOUNT_ID_KEY_NAME]\n\n def get_account(self, account):\n account_id = self.get_account_id(account)\n headers = {'Api-Access-Key': settings.BEPAY_API_ACCESS_KEY}\n response = requests.get(settings.BEPAY_SERVER_URL + f'/accounts/{account_id}', headers=headers).json()\n return self.raise_if_error(response)\n\n def get_balance(self, account):\n account_id = self.get_account_id(account)\n tx_hash = hash_256(account_id)\n headers = {'Api-Access-Key': settings.BEPAY_API_ACCESS_KEY, 'Transaction-Hash': tx_hash}\n response = requests.get(settings.BEPAY_SERVER_URL + f'/accounts/{account_id}/balance', headers=headers).json()\n return self.raise_if_error(response)\n\n def get_statement(self, account):\n account_id = self.get_account_id(account)\n tx_hash = hash_256(account_id)\n headers = {'Api-Access-Key': settings.BEPAY_API_ACCESS_KEY, 'Transaction-Hash': tx_hash}\n response = requests.get(settings.BEPAY_SERVER_URL + f'/accounts/{account_id}/statement', headers=headers).json()\n return self.raise_if_error(response)\n\n def transfer_money(self, account, amount):\n account_id = self.get_account_id(account)\n data = {'totalAmount': amount,\n 'currency': 'BRL',\n 'externalIdentifier': uuid.uuid4().hex,\n 'sender': {'account': {'accountId': settings.BEPAY_MASTER_ACCOUNT_ID}},\n 'paymentInfo':{'transactionType': 'InternalTransfer'},\n 'recipients': [{'account':{'accountId': f'{account_id}'},\n 'amount': amount,\n 'currency': 'BRL',\n 'senderComment': 'Transfer for bank withdraw',\n 'recipientComment': 'Receive transfer for bank withdraw'\n }]\n }\n\n tx_hash = hash_256(settings.BEPAY_MASTER_ACCOUNT_ID + str(amount) + account_id + str(amount))\n headers = {'Api-Access-Key': settings.BEPAY_API_ACCESS_KEY, 'Transaction-Hash': tx_hash}\n response = requests.post(settings.BEPAY_SERVER_URL + '/payments', json=data, headers=headers).json()\n return self.raise_if_error(response)\n\n\n def can_deposit(self, account, data):\n pass\n\n def to_withdraw(self, withdraw):\n amount = str(int(withdraw.net_amount))\n external_identifier = str(withdraw.pk)\n account_type = 1 if withdraw.account_type == 'checking' else 2\n\n data = {\n 'totalAmount': amount,\n 'currency': 'BRL',\n 'withdrawInfo': {\n 'withdrawType': 'BankTransfer',\n 'bankTransfer': {\n 'bankDestination': withdraw.bank,\n 'branchDestination': withdraw.agency,\n 'accountDestination': withdraw.account_number,\n 'taxIdentifier': {\n 'taxId': withdraw.account.user.document_1,\n 'country': 'BRA'\n },\n 'personType': 'PERSON',\n 'name': withdraw.account.user.name,\n 'accountTypeDestination': str(account_type)\n }\n },\n 'externalIdentifier': external_identifier\n }\n\n account_id = self.get_account_id(withdraw.account)\n tx_hash = hash_256(amount + account_id + withdraw.bank + withdraw.agency + withdraw.account_number)\n headers = {'Api-Access-Key': settings.BEPAY_API_ACCESS_KEY, 'Transaction-Hash': tx_hash}\n response = requests.post(settings.BEPAY_SERVER_URL + f'accounts/{account_id}/withdraw', json=data, headers=headers).json()\n valid_response = self.raise_if_error(response)\n return valid_response['data']['transactionId']\n","sub_path":"exchange_payments/gateways/bepay.py","file_name":"bepay.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"55924584","text":"'''\nCreated on Feb 5, 2016\n\n@author: yumilceh\n'''\nfrom numpy import linspace\nfrom numpy import random as np_rnd\n\nif __name__ == '__main__':\n \n \n ## Adding the projects folder to the path##\n import os,sys,random\n sys.path.append(os.getcwd())\n\n ## Adding libraries##\n from SensorimotorSystems.Sinus import Constrained_Sinus as System\n from Algorithm.Algorithm_Random import Algorithm_Random as Algorithm\n from Algorithm.Algorithm_Random import MODELS \n from Models.GMM_SM import GMM_SM\n from Models.GMM_SS import GMM_SS\n from Algorithm.ModelEvaluation import SM_ModelEvaluation\n from DataManager.PlotTools import *\n\n\n ## Simulation Parameters ##\n n_initialization=20\n n_evaluation_samples=100\n n_experiments=30\n random_seed=1234\n \n k_sm = 5\n sm_step=30\n alpha_sm=0.1\n \n k_ss = 5\n ss_step=30\n alpha_ss=0.1\n \n ## To guarantee reproductible experiments##\n random.seed(random_seed)\n np_rnd.seed(random_seed)\n\n ## Creating Agent ##\n system=System()\n \n ## Creating models ##\n models=MODELS()\n \n models.f_sm = GMM_SM(system,k_sm, \n sm_step = sm_step, \n alpha = alpha_sm)\n models.f_ss = GMM_SS(system,k_ss, \n ss_step = ss_step, \n alpha = alpha_ss)\n\n ## Creating Simulation object, running simulation and plotting experiments##\n file_prefix='Sinus_GMM_'\n simulation1=Algorithm(system,\n models,\n file_prefix=file_prefix,\n n_experiments = n_experiments,\n random_babbling='sensor'\n )\n \n\n simulation1.runNonProprioceptiveAlgorithm(n_motor_initialization=k_sm*3)\n \n initialization_data_sm_ss=simulation1.data.initialization_data_sm_ss\n simulation_data=simulation1.data.simulation_data\n\n '''\n fig,ax=initializeFigure();\n fig,ax=initialization_data_sm_ss.plot_2D(fig,ax,'art', 0, 'sensor', 0,\"or\")\n '''\n\n fig1,ax1=initializeFigure();\n fig1,ax1=simulation_data.plot_2D(fig1, ax1, 'art', 0, 'sensor', 0, \"or\")\n \n \n ## Validation of the model ##\n evaluation=SM_ModelEvaluation(system,\n n_evaluation_samples,\n simulation1.models.f_sm,\n file_prefix=file_prefix)\n evaluation.setValidationEvaluationSets()\n \n validation_valSet_data = evaluation.evaluate(save_data=True)\n \n fig1,ax1=validation_valSet_data.plot_2D(fig1, ax1, 'art', 0, 'sensor', 0, \"ob\")\n fig1,ax1=validation_valSet_data.plot_2D(fig1, ax1, 'art', 0, 'sensor_goal', 0, \"ok\")\n fig1,ax1 = simulation1.models.f_sm.model.plot_gmm_projection(fig1, ax1, 0, 1)\n ax1.relim()\n ax1.autoscale_view()\n \n fig2,ax2=initializeFigure();\n fig2,ax2=validation_valSet_data.plot_time_series(fig2, ax2, 'competence', 0, \"r\", moving_average=0)\n \n \n \n \n \n plt.show();","sub_path":"exploration/executables/Trash/sinus_constrained/learnSinus_GMMpbd_sensorBabbling.py","file_name":"learnSinus_GMMpbd_sensorBabbling.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"427417327","text":"# -*- coding: utf-8 -*-\nfrom sklearn.model_selection import train_test_split\nimport sys\nimport pandas as pd\n\nINPUT_PATH = sys.argv[1]\nOUTPUT_PATH1 = sys.argv[2]\nOUTPUT_PATH2 = sys.argv[3]\nSPLIT_STYLE = sys.argv[4]\nSPLIT_PERCENT = sys.argv[5]\nRANDOM_STATE = sys.argv[6]\n\n# define the configures\ninput = {\"input_path\": INPUT_PATH}\noutput = {\"output_path1\": OUTPUT_PATH1,\n \"output_path2\": OUTPUT_PATH2}\nparameter = {\"setting\": {\"split_style\": SPLIT_STYLE,\n \"split_percent\": SPLIT_PERCENT,\n \"random_state\": RANDOM_STATE}}\n\n\n# run the module\ninput_data = pd.read_csv(input[\"input_path\"])\nX_train, X_test = train_test_split(input_data,\n test_size=parameter[\"setting\"].get(\"split_percent\", 0.5),\n random_state=parameter[\"setting\"].get(\"random_state\", 1))\n\nX_train.to_csv(output[\"output_path1\"])\nX_test.to_csv(output[\"output_path2\"])\n","sub_path":"alg_platform/feature/data_split.py","file_name":"data_split.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359155042","text":"'''\nCreated on Oct 10, 2013\n\n@package: ally documentation\n@copyright: 2011 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProcessor that provides invoker definition descriptions indexing.\n'''\n\nfrom collections import OrderedDict\n\nfrom ally.container.ioc import injected\nfrom ally.core.spec.definition import IVerifier\nfrom ally.design.processor.attribute import requires\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.handler import HandlerProcessor\nfrom ally.design.processor.resolvers import solve\n\nfrom ..definition import resolversForDescriptions, indexDefinition\n\n\n# --------------------------------------------------------------------\nclass Document(Context):\n '''\n The introspect context.\n '''\n # ---------------------------------------------------------------- Required\n invokerData = requires(list)\n\nclass Invoker(Context):\n '''\n The invoker context.\n '''\n # ---------------------------------------------------------------- Required\n definitions = requires(list)\n \n# --------------------------------------------------------------------\n\n@injected\nclass DefinitionInvokerHandler(HandlerProcessor):\n '''\n Handler that provides the definition indexing for invoker definitions.\n '''\n \n name = str\n # The name to publish the definitions in data.\n verifier = IVerifier\n # The verifier used on the register definition in order to extract the headers.\n descriptions = list\n # The descriptions (list[tuple(IVerifier, tuple(string), dictionary{string: object})]) used in constructing the error.\n \n def __init__(self):\n assert isinstance(self.name, str), 'Invalid name %s' % self.name\n assert isinstance(self.verifier, IVerifier), 'Invalid verifier %s' % self.verifier\n assert isinstance(self.descriptions, list), 'Invalid descriptions %s' % self.descriptions\n \n resolvers = resolversForDescriptions(self.descriptions)\n self.verifier.prepare(resolvers)\n solve(resolvers, dict(Invoker=Invoker))\n \n super().__init__(**resolvers)\n \n def process(self, chain, document:Document, **keyargs):\n '''\n @see: HandlerProcessor.process\n \n Index the regiter definition data to be documented.\n '''\n assert isinstance(document, Document), 'Invalid document %s' % document\n if not document.invokerData: return # No invoker data to process.\n\n for invoker, data in document.invokerData:\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n if not invoker.definitions: continue\n \n for defin in invoker.definitions:\n if not self.verifier.isValid(defin): continue\n ddata = data.get(self.name)\n if ddata is None: ddata = data[self.name] = OrderedDict()\n indexDefinition(defin, ddata, self.descriptions)\n","sub_path":"components/ally-documentation/ally/documentation/core/impl/processor/definition_invoker.py","file_name":"definition_invoker.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124275100","text":"#!/usr/bin/python\n\n# This script tests that no changes to simulation output were introduced between two MarDyn binaries\n# E.g. we change the structure of the cache-s to SoA-s and we want that the program gives the same\n# physical output as before.\n#\n# I'm starting a first basic version, many options will be added as this script is turned into a\n# more general tool which Jenkins automatically tests.\n#\n# For the moment, this will test the output of a ResultWriter MarDyn output-plugin.\n#\n# Author: Nikola Tchipev\n# 8. Mai 2016\n\n\nfrom getopt import getopt\nfrom sys import argv\nfrom subprocess import call\nfrom glob import glob\nimport os\nimport ntpath\nimport cmd\nfrom subprocess import Popen, PIPE\nfrom shlex import split\nimport compareHelpers\nimport time\n\n# from twisted.internet.defer import returnValue\n\nmpi = '-1'\nnewMarDyn = ''\noldMarDyn = '-1'\nxmlFilename = ''\nadditionalFilenames = []\ncomparePlugins = ['ResultWriter', 'GammaWriter', 'RDF']\ndisabledPlugins = []\nnumIterations = '25'\nbaseisnormal = 0\nremote = ''\nremoteprefix = '/scratch'\n# shortopts: if they have an argument, then add : after shortcut\noptions, remainder = getopt(argv[1:], 'M:m:n:o:c:p:I:hbr:R:LB:AS',\n ['mpicmd=',\n 'mpi=',\n 'newMarDyn=',\n 'oldMarDyn=',\n 'xmlFilename=',\n 'plugin=',\n 'numIterations=',\n 'help',\n 'baseisnormal',\n 'remote=',\n 'remoteprefix=',\n 'baseIsLocal',\n 'baseRemote=',\n 'allMPI',\n 'legacy-cell-processor',\n 'srunFix',\n 'disablePlugin='\n ])\nnonDefaultPlugins = False\nbaseIsLocal = False\nlegacyCellProcessor = False\n\nallMPI = False\nMPI_START = 'mpirun' # e.g. I need to set it to mpirun.mpich locally\nprint(options)\nbaseRemote = \"\"\nfor opt, arg in options:\n if opt in ('-n', '--newMarDyn'):\n newMarDyn = arg\n elif opt in ('-o', '--oldMarDyn'):\n oldMarDyn = arg\n elif opt in ('-c', '--xmlFilename'):\n xmlFilename = arg\n elif opt in ('-m', '--mpi'):\n mpi = arg\n elif opt in ('-M', '--mpicmd'):\n MPI_START = arg\n elif opt in ('-p', '--plugin'):\n if (not nonDefaultPlugins): # first encounter of \"-p\" -> clear plugin list\n nonDefaultPlugins = True\n comparePlugins = []\n comparePlugins.append(arg)\n elif opt in ('--disablePlugin'):\n disabledPlugins.append(arg)\n elif opt in ('-I', '--numIterations'):\n numIterations = arg\n elif opt in ('-h', '--help'):\n print(\"Make sure two versions of mardyn produce identical simulation results. Sample usage:\")\n print(\" multiple -p are possible. Currently ResultWriter, GammaWriter and RDF are supported.\")\n print(\n \"\"\" ./vr -m 4 -n MarDyn.PAR_RELEASE_AVX2 -o MarDyn.PAR_RELEASE_AOS -c ../examples/surface-tension_LRC/C6H12_500/C6H12_500_1R.xml -p GammaWriter -I 10 \"\"\")\n print(\"All files in the same directory as the xml are automatically copied to the testing directories.\")\n print(\" -b specifies, that the base (old file) is assumed to be sequential\")\n print(\" -r specifies the remote host \")\n print(\n \" --remoteprefix changes the prefix of the directory, that is used on the remote host. as relative path to $HOME or absolute path if path starts with /\")\n exit(1)\n elif opt in ('-b', '--baseisnormal'):\n baseisnormal = 1\n elif opt in ('-r', '--remote'):\n remote = arg\n print(\"remote\", remote)\n elif opt in ('-R', '--remoteprefix'):\n remoteprefix = arg\n elif opt in ('--baseIsLocal'):\n baseIsLocal = True\n elif opt in ('-B', '--baseRemote'):\n baseRemote = arg\n elif opt in ('-A', '--allMPI'):\n allMPI = True\n elif opt in ('', '--legacy-cell-processor'):\n legacyCellProcessor = True\n elif opt in ('-S', '--srunFix'):\n os.environ[\"I_MPI_PMI_LIBRARY\"] = \"/usr/lib64/libpmi.so\"\n else:\n print(\"unknown option: \" + opt)\n exit(1)\n\nif xmlFilename == \"\":\n print(\"xmlFilename is required, specify using -c or --xmlFilename\")\n exit(1)\n\nif baseIsLocal and baseRemote:\n print(\"defined baseIsLocal and defined a base remote host. this contradicts itself. exiting...\")\n exit(1)\n\n# disable disabled plugins:\nfor dPlugin in disabledPlugins:\n comparePlugins.remove(dPlugin)\n\nSEQ = (mpi == '-1')\nPAR = not SEQ\n\nnoReferenceRun = (oldMarDyn == '-1')\ndoReferenceRun = not noReferenceRun\n\ncomparePostfixes = []\nfor i in range(len(comparePlugins)):\n comparePlugin = comparePlugins[i]\n if comparePlugin == 'Resultwriter' or comparePlugin == 'ResultWriter':\n comparePlugins[i] = comparePlugin = 'ResultWriter'\n comparePostfixes.append('.res')\n elif comparePlugin == 'GammaWriter':\n comparePostfixes.append('.gamma')\n elif comparePlugin == 'RDF':\n comparePostfixes.append('.rdf')\n else:\n print(\"Plugin \" + comparePlugin + \" not supported yet.\")\n print(\"Have a look whether you can add it yourself.\")\n exit(1)\n\nif noReferenceRun:\n print(\"no old version given. Will try to reuse existing output, by not erasing it at start.\")\n\n# JUMP to validationRuns - extract path to validationRuns from argv[0]!\npathToValidationRuns = ntpath.dirname(os.path.realpath(__file__))\npathToValidationRuns = os.path.realpath(pathToValidationRuns)\npathToInput = pathToValidationRuns + '/input'\npathToNew = pathToValidationRuns + '/new'\npathToReference = pathToValidationRuns + '/reference'\n\nprint(pathToValidationRuns)\n\n# first clean all the folders\ncleanUpCommand = ['rm', \"-rf\", \"--preserve-root\"]\ncleanUpCommand.extend([pathToInput])\ncleanUpCommand.extend(glob(pathToNew + '/*'))\nif doReferenceRun:\n # this shouldn't be cleared if no reference run is done, as we will reuse previous results.\n cleanUpCommand.extend(glob(pathToReference + '/*'))\nprint(cleanUpCommand)\np = Popen(cleanUpCommand, stdout=PIPE, stderr=PIPE)\np.communicate() # suppresses possible errors if nothing there yet, as we don't want them for rm\n\n# get the basename and the directory of the xml file.\noriginalInpDir = ntpath.dirname(xmlFilename)\nxmlBase = ntpath.basename(xmlFilename)\n\n# copy input\ncall(['cp', '-r', originalInpDir, pathToInput])\n\n# copy executables!\ncall(['mkdir', '-p', pathToNew])\ncall(['cp', newMarDyn, pathToNew])\nif doReferenceRun:\n call(['mkdir', '-p', pathToReference])\n call(['cp', oldMarDyn, pathToReference])\n\n# go there\nos.chdir(pathToValidationRuns)\n\n# gets the file names (after last '/')\noldMarDynBase = ntpath.basename(oldMarDyn)\nnewMarDynBase = ntpath.basename(newMarDyn)\n\n# print \"append ComparisonWriter here\"\n# print \"append ComparisonWriter here\"\nwith open(pathToInput + \"/\" + xmlBase, \"r\") as prev_file:\n with open(\"tmp.xml\", \"w\") as new_file:\n contents = prev_file.readlines()\n # Now contents is a list of strings and you may add the new line to this list at any position\n # contents.insert(4, \"\\n This is a new line \\n \")\n\n for i in range(len(contents)):\n line = contents[i]\n if 'RDF' in comparePlugins and line.find(\"\") != -1:\n contents.insert(i + 1, \"\"\"0\\n\"\"\")\n i += 1\n continue\n\n if line.find(\"\") != -1:\n for comparePlugin in comparePlugins:\n if comparePlugin == 'RDF': # configuring RDF within the xml is different...\n contents.insert(i + 1, \"\"\"\n \n 10\n val.comparison\n 0.003\n 1000\n \\n\"\"\")\n i += 1\n # myfile.write(\"initStatistics 0\\nRDF 0.003 1000\\nRDFOutputTimesteps 10\\nRDFOutputPrefix val.comparison\\n\")\n else:\n contents.insert(i + 1, \"\"\"\n \n 1\n val.comparison\n \"\"\")\n i += 1\n # myfile.write(\"output \" + comparePlugin + \" 1 val.comparison\\n\")\n new_file.write(\"\".join(contents))\ncall(['mv', 'tmp.xml', pathToInput + \"/\" + xmlBase])\n\n# copy files to new and reference\n\ncall(['cp', '-r', pathToInput, pathToNew + \"/input\"])\ncall(['cp', '-r', pathToInput, pathToReference + \"/input\"])\n\ncomparisonFilenames = []\nfor comparePostfix in comparePostfixes:\n comparisonFilenames.append('val.comparison' + comparePostfix)\n\n\ndef doRun(directory, MardynExe):\n # first run\n if baseRemote and directory == \"reference\":\n localRemote = baseRemote\n else:\n localRemote = remote\n os.chdir(directory)\n call(['chmod', '+x', MardynExe])\n cmd = []\n\n doRemote = localRemote and (directory == 'new' or not baseIsLocal)\n\n if doRemote:\n rsyncremote = localRemote\n if localRemote.endswith('-mic0') or localRemote.endswith('-mic1'):\n rsyncremote = localRemote[:-5]\n command = \"mkdir -p \" + remoteprefix\n mkdircmd = []\n mkdircmd.extend(['ssh', rsyncremote, command])\n p = Popen(mkdircmd, stdout=PIPE, stderr=PIPE)\n out, err = p.communicate()\n if p.returncode:\n print(\"error on mkdir -p:\")\n print(out, err)\n exit(1)\n remotedirectory = remoteprefix + \"/\" + directory\n command = \"rsync --delete-before -r ../\" + directory + \" \" + rsyncremote + \":\" + remoteprefix\n print(command)\n p = Popen(split(command))\n p.wait()\n if p.returncode:\n print(\"error on rsync\")\n exit(1)\n command = \"cd \" + remotedirectory + \" && pwd && \"\n cmd.extend(['ssh', localRemote, command])\n\n if allMPI:\n cmd.extend(split(MPI_START))\n if directory == 'new' or not baseisnormal:\n cmd.extend(['-n', str(mpi)])\n else:\n cmd.extend(['-n', '1'])\n else:\n if PAR and (directory == 'new' or not baseisnormal):\n cmd.extend(split(MPI_START))\n cmd.extend(['-n', str(mpi)])\n\n if legacyCellProcessor and directory == \"new\":\n cmd.extend(\n ['./' + MardynExe, \"--legacy-cell-processor\", \"--final-checkpoint=0\", \"input/\" + xmlBase, \"--steps\",\n numIterations])\n else:\n cmd.extend(['./' + MardynExe, \"--final-checkpoint=0\", \"input/\" + xmlBase, \"--steps\", numIterations])\n # cmd.extend(['/work_fast/tchipevn/SDE/sde-external-7.41.0-2016-03-03-lin/sde64', '-knl', '--', './' + MardynExe, \"--final-checkpoint=0\", xmlBase, numIterations]);\n print(cmd)\n print(\"================\")\n t = time.time()\n while True:\n # repeatedly try this if srun was not working\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n out, err = p.communicate()\n if p.returncode == 1 and (\n \"Job violates accounting/QOS policy\" in err or \"Socket timed out on send/recv\" in err):\n print(\"srun submit limit reached or socket timed out error, trying again in 60s\")\n time.sleep(60)\n continue\n break\n t = time.time() - t\n print(\"elapsed time:\", t)\n if p.returncode:\n print(\"error while executing program:\")\n print(out, err)\n exit(1)\n print(out, err)\n if doRemote: # sync back\n command = \"rsync \" + rsyncremote + \":\" + remotedirectory + \"/* ./\"\n print(command)\n p = Popen(split(command))\n p.wait()\n\n if \"RDF\" in comparePlugins:\n p = Popen(['ls', '-r'] + glob(\"val.comparison*.rdf\"), stdout=PIPE, stderr=PIPE)\n out, err = p.communicate()\n p = Popen(split(\"cp \" + split(out)[0] + \" val.comparison.rdf\"))\n # Copy newest rdf file to val.comparison.rdf\n p.wait()\n for comparisonFilename in comparisonFilenames:\n # possible switch/if statements if other comparison plugins require different output.\n p = Popen(split(\n \"sed -i.bak '/^#/d; s/[[:blank:]]*$//; /^$/d' \" + comparisonFilename)) # deletes lines starting with #.\n # These are the lines containing timestamps, and have to be removed for proper comparison.\n p.wait()\n os.chdir('..')\n\n\nprint(\"new run:\")\n# first run\ndoRun('new', newMarDynBase)\n\n# second run\nif doReferenceRun:\n print(\"reference run:\")\n doRun('reference', oldMarDynBase)\nreturnValue = 0\n# call(['diff' 'reference/val.comparison.res' 'new/val.comparison.res'])\nprint(\"\")\nfor i in range(len(comparePlugins)):\n localReturn = compareHelpers.compareFiles(\"reference/\" + comparisonFilenames[i], \"new/\" + comparisonFilenames[i])\n returnValue += localReturn\n if localReturn == 0:\n print(\"Identical values! for \", comparePlugins[i])\n else:\n print(\"mismatches for \", comparePlugins[i])\n\nif returnValue == 0:\n print(\"\")\n print(\"Identical values!\")\n print(\"\")\n exit(0)\nelse:\n print(\"\")\n print(\"mismatches\")\n print(\"\")\n exit(1)\n","sub_path":"validation/validationRun/validationRun.py","file_name":"validationRun.py","file_ext":"py","file_size_in_byte":13465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29294907","text":"import os\nimport numpy\nimport matplotlib.pylab as plt\n\nif __name__ == '__main__':\n loadpath = 'E:/ProjectData_SpeechRecognition/Transform/IEMOCAP-Tran-LA-3-Punishment-1000-Result/'\n for gender in ['Female', 'Male']:\n for session in range(1, 6):\n if not os.path.exists(os.path.join(loadpath, 'Session%d-%s' % (session, gender))):\n print()\n continue\n uaList, waList = [], []\n for CHOOSED in ['Decode', 'Logits', 'SoftMax']:\n for episode in range(200):\n if not os.path.exists(\n os.path.join(loadpath, 'Session%d-%s' % (session, gender),\n '%s-%04d.csv' % (CHOOSED, episode))): continue\n data = numpy.genfromtxt(\n fname=os.path.join(loadpath, 'Session%d-%s' % (session, gender),\n '%s-%04d.csv' % (CHOOSED, episode)),\n dtype=int, delimiter=',')\n\n uaCounter, waCounter = 0, 0\n for index in range(len(data)):\n uaCounter += data[index][index] / sum(data[index])\n waCounter += data[index][index]\n uaCounter /= len(data)\n waCounter /= sum(sum(data))\n # print(uaCounter, waCounter)\n uaList.append(uaCounter)\n waList.append(waCounter)\n # plt.plot(uaList, label='UA')\n # plt.plot(waList, label='WA')\n # plt.legend()\n # plt.show()\n print(max(uaList), '\\t', max(waList))\n","sub_path":"MultiModalTest/Test/Transform/ResultAnalysis.py","file_name":"ResultAnalysis.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"21217734","text":"from stacks import Stack, is_balanced\n\n\nclass Postfix():\n def __init__(self, s):\n self.oplist = [\n \"+\",\n \"*\",\n \"=\",\n ]\n self.result = self.load_expression(s)\n\n # загружает выражение из строки в 2 стека - операнды и операции\n # операнды и операции отделены друг от друга пробелами\n def load_expression(self, s):\n # проверка баланса открывающих и закрывающих строк\n if not(is_balanced(s)):\n raise ValueError('Brackets are not balanced in \"{1}\"'.format(s))\n\n s1 = Stack()\n s2 = Stack()\n # получаем список операндов и операций, выполняем простую проверку, что их минимум 3\n lst = s.split(\" \")\n if len(lst) < 3:\n raise ValueError(\n 'Unsufficient operands or operators in \"{0}\"'.format(s))\n # кладем все элементы в первый стек\n for elem in lst:\n s1.push(elem)\n\n # забираем по одному элементы\n elem = s1.pop()\n while elem is not None:\n # если число - кладем во второй стек\n if elem.isnumeric():\n s2.push(elem)\n else:\n if elem in self.oplist: # если оператор\n # если оператор =, то возвращаем результат вычислений\n if elem == \"=\":\n return s2.stack[0]\n # в остальных случаях забираем 2 верхних числа из стека и выполняем над ними операцию, хранящуюся в elem\n operand1 = s2.pop()\n operand2 = s2.pop()\n if (operand1 is not None and operand2 is not None):\n result = eval(\n \"\".join([str(operand1), elem, str(operand2)]))\n s2.push(result)\n else:\n # если во втором стеке не оказалось достаточно операторов, значит порядок в строке был неправильный -\n # выбрасываем исключение\n raise ValueError(\n 'Wrong order near \"{0}\" in \"{1}\"'.format(elem, s))\n else:\n # если встретили элемент в первом стеке, который не число и не из списка разрешенных операторов,\n # то выбрасываем исключение\n raise ValueError(\n 'Unknown expression \"{0}\" in \"{1}\"'.format(elem, s))\n elem = s1.pop() # извлекаем следующий элемент из первого стека\n # дошли до конца строки, но сивола равно в ней не оказалось - все равно возвращаем результат\n return s2.stack[0]\n","sub_path":"t5/postfix.py","file_name":"postfix.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"528230608","text":"import ppscore as pps\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set(style=\"darkgrid\")\n\ndef find_predictive_power_score(data):\n \n print(\"Predictive Power Score - Heatmap\")\n \n # Predictive Power Score - PPS Matrix\n pscore = pps.matrix(data)\n \n return pscore.style.background_gradient(cmap='coolwarm', axis=None).set_precision(2)\n\ndef scatterplot_between_numerical_features(data, x_num_column, y_num_column, cat_column):\n \n print(\"Analyse Relationships - Scatterplot\")\n \n # Show Scatterplot between two features\n sns.set(rc={'figure.figsize':(20,10)})\n sns.scatterplot(x=x_num_column, y=y_num_column, hue=cat_column, data=data)\n plt.show()\n\ndef violinplot_of_categorical_with_numerical_feature(data, cat_column, num_column):\n \n print(\"Compare Category - Violinplot\")\n \n # Show vilionplots to compare a categorical feature with Numerical feature\n fig = sns.catplot(x=cat_column, y=num_column, kind='violin', data=data, aspect=3)\n fig.set_xticklabels(rotation=90)\n plt.show()\n\ndef pivot_data(data, cat_index, cat_columns, num_values, agg_method):\n \n print(\"Pivot View\")\n \n # Generate Pivot Tables\n data_out = data.pivot_table(index=cat_index, columns=cat_columns, \n values=num_values, aggfunc=agg_method).round(2)\n \n return data_out\n\n#=================================================================================================\n\ndef eda_numcat(data, x, y, method=\"pps\", hue=None, values=None, aggfunc=\"mean\"):\n \n try:\n \n # Default method\n if method==\"pps\":\n return find_predictive_power_score(data)\n \n if method==\"relationship\":\n scatterplot_between_numerical_features(data, x, y, hue)\n \n if method==\"comparison\":\n violinplot_of_categorical_with_numerical_feature(data, x, y)\n \n if method==\"pivot\":\n return pivot_data(data, x, y, values, aggfunc)\n \n except Exception as e:\n print(e)","sub_path":"build/lib/quickda/explore_numeric_categoric.py","file_name":"explore_numeric_categoric.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604670629","text":"from flask import Flask, render_template, request, redirect\nfrom google.cloud import datastore\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport datetime\nimport requests\nfrom flask_table import Table, Col\n\ndatastore_client = datastore.Client()\n\napp = Flask(__name__)\n#def fetch_times(limit):\n# query = datastore_client.query(kind='visit')\n# query.order = ['-timestamp']\n#\n# times = query.fetch(limit=limit)\n#\n# return times\n# [END gae_python37_datastore_store_and_fetch_times]\n\nclass ItemTable(Table):\n Name = Col('Name')\n Trophies = Col('Trophies')\n Role = Col('Role')\n Dontaions = Col('Dontaions')\n Battles_war_1 = Col('Battles_war_1')\n Wins_war_1 = Col('Wins_war_1')\n Cards_war_1 = Col('Cards_war_1')\n Battles_war_2 = Col('Battles_war_2')\n Wins_war_2 = Col('Wins_war_2')\n Cards_war_2 = Col('Cards_war_2')\n Battles_war_3 = Col('Battles_war_3')\n Wins_war_3 = Col('Wins_war_3')\n Cards_war_3 = Col('Cards_war_3')\n Battles_war_4 = Col('Battles_war_4')\n Wins_war_4 = Col('Wins_war_4')\n Cards_war_4 = Col('Cards_war_4')\n\n\ndef Getlist_clanmembers(limit):\n datastore_client = datastore.Client()\n\n query = datastore_client.query(kind='Clan')\n query.add_filter('Battles_war_1', '=', 0)\n query.add_filter('Battles_war_2', '=', 0)\n query.add_filter('Battles_war_3', '=', 0)\n query.add_filter('Battles_war_4', '=', 0)\n query.add_filter('Role', '=', 'Member')\n query.order = ['Trophies']\n names = list(query.fetch(limit=limit))\n\n\n return names\n\ndef get_stats_f():\n URL = 'https://statsroyale.com/clan/222JU0P2'\n requests.get(URL + '/refresh')\n r = requests.get(URL)\n soup = BeautifulSoup(r.text, 'html.parser')\n players = [a.text for a in soup.find_all('a', {'class':'ui__blueLink'})]\n players_link = [a.get('href') for a in soup.find_all('a', {'class':'ui__blueLink'})]\n trophies = [int(a.text) for a in soup.find_all('div', {'class':'clan__cup'})]\n donations = [int(a.text) for a in soup.find_all('div', {'class':'clan__donation'})]\n roles = [a.text for a in soup.find_all('div', {'class':'clan__memberRoleInner'})]\n clan_table = pd.DataFrame()\n clan_table['players'] = players\n clan_table['players_link'] = players_link\n clan_table['trophies'] = trophies\n clan_table['donations'] = donations\n clan_table['roles'] = roles\n\n URL_wars = URL + '/war/history'\n r = requests.get(URL_wars)\n soup = BeautifulSoup(r.text, 'html.parser')\n wars = soup.find_all('div', {'class':'clanWar__table'})\n for i, war in enumerate(wars):\n players_of_war = [a.find('a').text for a in war.find_all('div', {'class':'clanParticipants__rowContainer'})]\n players_link_of_war = [a.find('a').get('href') for a in war.find_all('div', {'class':'clanParticipants__rowContainer'})]\n battles_of_war = [int(a.get('data-battles')) for a in war.find_all('div', {'class':'clanParticipants__rowContainer'})]\n wins_of_war = [int(a.get('data-wins')) for a in war.find_all('div', {'class':'clanParticipants__rowContainer'})]\n cards_of_war = [int(a.get('data-cards')) for a in war.find_all('div', {'class':'clanParticipants__rowContainer'})]\n\n dw = pd.DataFrame()\n dw['players_link'] = players_link_of_war\n dw['battles_of_war' + '_' + str(i)] = battles_of_war\n dw['wins_of_war' + '_' + str(i)] = wins_of_war\n dw['cards_of_war' + '_' + str(i)] = cards_of_war\n clan_table = clan_table.merge(dw, how='left', left_on='players_link', right_on='players_link', copy=False)\n\n clan_table = clan_table.fillna(0)\n def splt(n):\n return n.split('/')[-1]\n clan_table['players_link'] = clan_table['players_link'].apply(splt)\n datastore_client = datastore.Client()\n for i in range(0,clan_table.shape[0]):\n kind = 'Clan'\n code = clan_table['players_link'][i]\n Clan_key = datastore_client.key(kind, code)\n Clan = datastore.Entity(key=Clan_key)\n Clan['Code'] = code\n Clan['Name'] = clan_table['players'][i]\n Clan['Trophies'] = int(clan_table['trophies'][i])\n Clan['Role'] = clan_table['roles'][i]\n Clan['Dontaions'] = int(clan_table['donations'][i])\n Clan['Battles_war_1'] = int(clan_table['battles_of_war_0'][i])\n Clan['Wins_war_1'] = int(clan_table['wins_of_war_0'][i])\n Clan['Cards_war_1'] = int(clan_table['cards_of_war_0'][i])\n Clan['Battles_war_2'] = int(clan_table['battles_of_war_1'][i])\n Clan['Wins_war_2'] = int(clan_table['wins_of_war_1'][i])\n Clan['Cards_war_2'] = int(clan_table['cards_of_war_1'][i])\n Clan['Battles_war_3'] = int(clan_table['battles_of_war_2'][i])\n Clan['Wins_war_3'] = int(clan_table['wins_of_war_2'][i])\n Clan['Cards_war_3'] = int(clan_table['cards_of_war_2'][i])\n Clan['Battles_war_4'] = int(clan_table['battles_of_war_3'][i])\n Clan['Wins_war_4'] = int(clan_table['wins_of_war_3'][i])\n Clan['Cards_war_4'] = int(clan_table['cards_of_war_3'][i])\n datastore_client.put(Clan)\n\n# [START gae_python37_datastore_render_times]\n@app.route('/')\ndef root():\n names = Getlist_clanmembers(50)\n table = ItemTable(names)\n return render_template('index.html', table=table)\n# [END gae_python37_datastore_render_times]\n\n@app.route('/get_stats', methods=['POST', 'GET'])\ndef get_stats():\n if request.method == 'GET':\n get_stats_f()\n return redirect(\"/\", code=302)\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n\n # Flask's development server will automatically serve static files in\n # the \"static\" directory. See:\n # http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,\n # App Engine itself will serve those files as configured in app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"422195419","text":"import time\nimport random\nfrom queue import Queue\nfrom threading import Thread\n\n\nclass Producer(Thread):\n def __init__(self, queue):\n super().__init__()\n self.queue = queue\n\n def run(self):\n while True:\n a = random.randint(0, 10)\n b = random.randint(80, 100)\n print('生产者{0}, {1}'.format(a, b))\n self.queue.put((a, b))\n time.sleep(2)\n\n\nclass Consumer(Thread):\n\n def __init__(self, queue):\n super().__init__()\n self.queue = queue\n\n def run(self):\n while True:\n num_tuple = self.queue.get(block=True)\n sum_a_b = sum(num_tuple)\n print('消费者购买了多少个数字{0}+{1}={2}'.format(num_tuple[0], num_tuple[1], sum_a_b))\n time.sleep(random.randint(0, 10))\n\n\nqueue = Queue()\nproducer = Producer(queue)\nconsumer = Consumer(queue)\n\nproducer.start()\nconsumer.start()\nwhile True:\n time.sleep(1)","sub_path":"redis_test/redis_01.py","file_name":"redis_01.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"2449657","text":"from abc import ABC\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.modules.loss import _Loss\n\nfrom utils import Parameters\n\n\nclass _BaseLoss(ABC):\n\n def __init__(self, config: Parameters):\n\n config_loss = config.get([\"training\", \"loss_function\"])\n self._setup_loss_function(config_loss)\n\n def _setup_loss_function(self, loss_name: str) -> None:\n \"\"\"instantiate torch loss function\"\"\"\n self.loss_function: _Loss\n if loss_name == 'mse':\n self.loss_function = nn.MSELoss()\n elif loss_name == 'bce':\n self.loss_function = nn.BCELoss()\n elif loss_name == 'l1':\n self.loss_function = nn.L1Loss()\n elif loss_name == 'smooth_l1':\n self.loss_function = nn.SmoothL1Loss()\n else:\n raise NotImplementedError(\"{} is not currently supported, \\\n please use mse loss or cross_entropy \\\n for mnist\".format(loss_name))\n\n def compute_loss(self, prediction: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates loss of prediction of student vs. target from teacher\n Loss function determined by configuration\n\n :param prediction: prediction made by student network on given input\n :param target: target - teacher output on same input\n\n :return loss: loss between target (from teacher) and prediction\n (from student)\n \"\"\"\n loss = 0.5 * self.loss_function(prediction, target)\n return loss\n","sub_path":"components/loss_modules/base_loss.py","file_name":"base_loss.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"167303931","text":"import json\nimport requests\nimport re\nfrom flask_script import Command\n\nfrom app import db\nfrom models import ingredients_juices, Ingredient, Juice\n\n\nclass Seed(Command):\n \"\"\"Seeds the database from the nutritionix api\"\"\"\n\n def create_payload(self, offset):\n payload = {\n \"appId\": \"f5de3947\",\n \"appKey\": \"60fdc2f75e388c8018641eaa2d6f9e91\",\n \"query\": \"Juicy Juice\",\n \"fields\": [\n \"item_name\",\n \"brand_name\",\n \"nf_ingredient_statement\",\n \"nf_calories\",\n \"nf_servings_per_container\",\n \"nf_serving_size_qty\",\n \"nf_serving_size_unit\"\n ],\n \"offset\": offset,\n \"limit\": 50,\n \"sort\": {\n \"field\": \"item_name.sortable_na\",\n \"order\": \"desc\"\n },\n \"filters\": {\n \"brand_id\": \"51db37d0176fe9790a899db2\"\n }\n }\n return payload\n\n def create_sample_database(self, offset):\n url = \"https://api.nutritionix.com/v1_1/search/\"\n headers = {'content-type': 'application/json'}\n\n res = requests.post(url, json=self.create_payload(offset)).json()\n\n for x in res['hits']:\n item_id = x['_id']\n item_name = x['fields']['item_name']\n calories = x['fields']['nf_calories']\n spc = x['fields']['nf_servings_per_container']\n ssq = x['fields']['nf_serving_size_qty']\n ssu = x['fields']['nf_serving_size_unit']\n ing_statement = x['fields']['nf_ingredient_statement']\n\n juice = None\n if Juice.query.filter_by(item_id=item_id).first() is None:\n juice = Juice(\n item_id,\n item_name,\n calories,\n spc,\n ssq,\n ssu\n )\n db.session.add(juice)\n else:\n juice = Juice.query.filter_by(item_id=item_id).first()\n\n if ing_statement is not None:\n ing_list = re.split(r',\\s*(?![^()]*\\))', ing_statement)\n for ing in ing_list:\n ing_entry = None\n if Ingredient.query.filter_by(name=ing).first() is None:\n ing_entry = Ingredient(ing)\n db.session.add(ing_entry)\n db.session.commit()\n juice.ingredients.append(ing_entry)\n else:\n ing_entry = Ingredient.query.filter_by(name=ing).first()\n juice.ingredients.append(ing_entry)\n\n db.session.commit()\n return res\n\n def build_database_entries(self):\n offset = 0\n total = {}\n url = \"https://api.nutritionix.com/v1_1/search/\"\n headers = {'content-type': 'application/json'}\n\n res = requests.post(url, json=self.create_payload(0)).json()\n total[\"total\"] = res['total']\n\n while offset < total[\"total\"]:\n self.create_sample_database(offset)\n offset += 50\n\n def run(self):\n self.build_database_entries()\n","sub_path":"seed_db.py","file_name":"seed_db.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"504295159","text":"\"\"\"\n练习1:英制单位英寸与公制单位厘米互换。\n\"\"\"\ndef convert_unit(a):\n v = float(a[:len(a)-2])\n if a[len(a)-2:] in ['英寸','in']:\n print('您输入的是{0:.2f}英寸,转化后为{1:.2f}厘米'.format(v,v * 2.54))\n elif a[len(a)-2:] in ['厘米','cm']:\n print('您输入的是{0:.2f}厘米,转化后为{1:.2f}英寸'.format(v ,v / 2.54))\n else:\n print('输入格式错误,请以in或者cm结尾')\n\n\n# 调用练习一的方法\n# value = input('请输入你要转换的单位,以in或者cm结尾\\n')\n# convert_unit(value)\n\n\n\"\"\"练习2:百分制成绩转换为等级制成绩。\"\"\"\ndef convert_score_format(score):\n score = float(score)\n if 90 < score <= 100:\n print(\"A\")\n elif 80 < score <= 90:\n print(\"B\")\n elif 70 < score <= 80:\n print(\"C\")\n elif 60 <= score <= 70:\n print(\"D\")\n elif 0 <= score < 60:\n print(\"E\")\n else:\n print('格式输入错误')\n\n\n# 调用练习2的方法\n# score = input(\"请输入需要转换的成绩\\n\")\n# convert_score_format(score)\n\n\n\"\"\"练习3:输入三条边长,如果能构成三角形就计算周长和面积。\"\"\"\ndef cal_triangle(a,b,c):\n a = float(a)\n b = float(b)\n c = float(c)\n if a+b>c and a+c>b and b+c>a:\n perimeter = a + b + c\n p = perimeter / 2\n area = pow(p*(p-a)*(p-b)*(p-c),0.5)\n print(\"该三角形的周长为{0:.2f},面积为{1:.2f}\".format(perimeter,area))\n else:\n print('输入的数字无法构成三角形')\n\n\n# 调用练习3的方法\na = input(\"请输入第一条边长:\")\nb = input(\"请输入第二条边长:\")\nc = input(\"请输入第三条边长:\")\ncal_triangle(a,b,c)","sub_path":"003.py","file_name":"003.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335331787","text":"import ugfx\nimport pyb\nimport buttons\n\nbuttons.init()\n\nCOLORS = [ugfx.RED, ugfx.GREEN, ugfx.YELLOW, ugfx.WHITE ,ugfx.BLUE]\ni = 0\n\nwhile not buttons.is_triggered(\"BTN_MENU\"):\n\ti = (i + 1) % len(COLORS)\n\tugfx.area(0,0,320,240, COLORS[i])\n\tpyb.delay(60)\n\npyb.hard_reset()","sub_path":"apps/test_app2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"400574366","text":"from copy import deepcopy\nfrom prints import printTable\nfrom tests import mrv_addTest,finalTest,mrv_recover\nfrom forward_BTS import forward_checking\nfrom degree_forward import degree_init\nfrom lcv_BTS import new_node\ndef all_BTS1(table,variables,hints):\n # printTable(table)\n iteration=0\n newTable=deepcopy(table)\n path=[]\n unsigned=deepcopy(variables)\n stack=[]\n degree=degree_init(newTable,hints,variables)\n newNode,result=find_min(newTable,hints,unsigned)\n if result==-1:\n newNode=find_max(newTable,degree,unsigned,variables)\n if newNode!=-1:\n stack.append([newNode,0,-1])\n stack.append([newNode,1,0])\n else:\n newNode,result=new_node(newTable,hints,unsigned[0])\n if result==-1:\n stack.append([newNode,0,-1])\n stack.append([newNode,1,0])\n else:\n #print(newNode)\n stack.append([newNode,1,-1])\n stack.append([newNode,0,0])\n else:\n #print(newNode)\n stack.append([newNode,result,-1])\n while stack:\n current=stack.pop()\n iteration+=1\n path.append(current)\n # print(current)\n unsigned.remove(current[0])\n # test=forward_checking(newTable,current,hints,unsigned,path)\n test=mrv_addTest(newTable,current,hints) #foward checking or not\n # if current[0]==[3,5]:\n # print(test)\n if(test==False):\n while path:\n delete=path.pop()\n unsigned.append(delete[0])\n mrv_recover(newTable,delete,hints)\n if delete[2]==0:\n break\n continue\n else:\n if len(unsigned)==0:\n test=finalTest(newTable,table,hints)\n if(test==False):\n while path:\n delete=path.pop()\n unsigned.append(delete[0])\n mrv_recover(newTable,delete,hints)\n if delete[2]==0:\n break\n continue\n else:\n break\n else:\n newNode,result=find_min(newTable,hints,unsigned)\n if result==-1:\n newNode=find_max(newTable,degree,unsigned,variables)\n if newNode!=-1:\n stack.append([newNode,0,-1])\n stack.append([newNode,1,0])\n else:\n newNode,result=new_node(newTable,hints,unsigned[0])\n if result==-1:\n stack.append([newNode,0,-1])\n stack.append([newNode,1,0])\n else:\n #print(newNode)\n stack.append([newNode,1,-1])\n stack.append([newNode,0,0])\n else:\n #print(newNode)\n stack.append([newNode,result,-1])\n for hint in hints:\n newTable[hint[0]][hint[1]]=table[hint[0]][hint[1]]\n # print(stack)\n printTable(newTable,iteration)\n print(iteration)\n\ndef find_min(newTable,hints,unsigned):\n # printTable(newTable)\n # print(hints)\n for hint in hints:\n if newTable[hint[0]][hint[1]]==0:\n for i in range(-1,2):\n for j in range(-1,2):\n if(hint[0]+i>=0 and hint[0]+i=0 and hint[1]+j=0 and hint[0]+i=0 and hint[1]+j q_target)\n row_index = np.arange(0, self.batch_size)\n column_index = batch_memory[:, self.n_feature].astype(np.int)\n index = list(zip(row_index, column_index))\n\n ## y_true\n # 真实值\n # DQN直接通过targetnet进行学习,目前通过eval_net进行选择\n q_next = self.target_net.predict(batch_memory[:, -self.n_feature:])\n q_eval4next = self.eval_net.predict(batch_memory[:, -self.n_feature:])\n max_act4next = np.argmax(q_eval4next, axis=1) # the action that brings the highest value is evaluated by q_eval\n\n selected_q_next = q_next[row_index, max_act4next] # Double DQN, select q_next depending on above actions\n\n rewards = batch_memory[:, self.n_feature + 1]\n q_target = rewards + self.gamma * selected_q_next\n\n return q_target, index\n\n def choose_action(self, observation):\n observation = observation[np.newaxis, :]\n\n if np.random.rand() > self.epsilon:\n action = np.random.randint(0, self.n_action)\n else:\n q_eval = self.eval_net.predict(observation)\n action = np.argmax(q_eval)\n return action\n\n @tf.function\n def train_model(self, batch_memory, q_target, index):\n with tf.GradientTape() as tape:\n # q预测值 通过上个状态查表\n q_eval = self.eval_net(batch_memory[:, :self.n_feature])\n q_eval = tf.gather_nd(q_eval, index)\n loss = self.loss(q_target, q_eval)\n\n ## optimize\n # 每次更新预测eval\n gradients = tape.gradient(loss, self.eval_net.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.eval_net.trainable_variables))\n\n def learn(self):\n if self.learning_counter % self.replace_target_iter == 0:\n self.replace_parameters()\n\n ## sample batch\n if self.memory_counter < self.memory_size:\n index = np.random.choice(self.memory_counter, size=self.batch_size)\n else:\n index = np.random.choice(self.memory_size, size=self.batch_size)\n batch_memory = self.memory[index, :]\n ## training model\n q_target, index = self.get_q_target(batch_memory)\n batch_memory, q_target, index = tf.convert_to_tensor(batch_memory), \\\n tf.convert_to_tensor(q_target), \\\n tf.convert_to_tensor(index)\n self.train_model(batch_memory, q_target, index)\n\n self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n self.learning_counter += 1\n","sub_path":"PR_DQN/DeepQN.py","file_name":"DeepQN.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314223295","text":"# _*_ coding:utf-8 _*_\r\n\r\nimport socket\r\nimport time\r\nfrom commons.liveServiceMonitor.kodec import msg_type_pb2, logical_pb2\r\nfrom commons.liveServiceMonitor.public import IPConver\r\nimport struct\r\nimport random\r\n\r\n#### 互动课件类\r\nclass CoursewareClass(object):\r\n def __init__(self, userId):\r\n self.userId = userId\r\n self.courseware_id = \"\"\r\n self.courseware_exercise_id = \"\"\r\n self.current_used_config_info = list()\r\n\r\n # 互动题逻辑函数\r\n def coursewareExerciselogic(self, recData):\r\n if recData.result_frame.code == 0:\r\n # 收到获取互动课件配置信息响应\r\n if recData.head_frame.msg_type == msg_type_pb2.COURSEWARE_CONFIG_RES:\r\n courseware_config_res = recData.logical_frame.courseware_config_res\r\n for courseware_config in courseware_config_res.courseware_config:\r\n if courseware_config.is_current_used == True:\r\n # print(\"courseware_config:\", courseware_config)\r\n self.current_used_config_info.append(courseware_config)\r\n return 1631\r\n # 收到推题成功响应\r\n elif recData.head_frame.msg_type == msg_type_pb2.COURSEWARE_EXERCISE_START_RES:\r\n print(\"发布互动题成功:\",recData.logical_frame.courseware_exercise_start_res.courseware_exercise_id)\r\n self.courseware_exercise_id = recData.logical_frame.courseware_exercise_start_res.courseware_exercise_id\r\n return 1641\r\n # 结束互动课件响应\r\n elif recData.head_frame.msg_type == msg_type_pb2.COURSEWARE_EXERCISE_STOP_RES:\r\n print(\"停止互动课件推题:\", self.courseware_exercise_id)\r\n return 1661\r\n else:\r\n print (\"互动题处理报错:\", recData.result_frame.code, recData.result_frame.msg)\r\n\r\n # 获取互动课件配置信息\r\n def pack_coursewareConfigReq(self, token):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.COURSEWARE_CONFIG_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 999999)) # 采用随机数\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 0 ## 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n reqCommFrame.version = 101000012\r\n # reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n reqCommFrame.client_info.os_name = \"windows\"\r\n reqCommFrame.client_info.client_version = \"wkai2133\"\r\n reqCommFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 构造请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.courseware_config_req\r\n\r\n # 对请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n configMessage = struct.pack('!IH', Msg_len, Msg_flag) + reqPack.SerializeToString()\r\n return configMessage\r\n\r\n # 互动课件老师推题\r\n def pack_CoursewareExerciseStart(self, token, courseware_id,courseware_url,questions_count,\r\n online_user_count,courseware_questions,page_num,countdown_seconds):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.COURSEWARE_EXERCISE_START_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 99999)) # 采用随机数\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 0 ## 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n reqCommFrame.version = 101000012\r\n reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n\r\n # 构造查询请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.courseware_exercise_start_req\r\n reqBody.courseware_id = courseware_id\r\n reqBody.courseware_url = courseware_url\r\n reqBody.questions_count = questions_count\r\n reqBody.online_user_count = online_user_count\r\n for question in courseware_questions:\r\n # 对复合类型(message),调用add方法初始化新实例,再对该实例中的每一个元素进行赋值\r\n new_data = reqBody.courseware_questions.add()\r\n for answer in question.right_answer:\r\n new_data.right_answer.append(answer)\r\n new_data.question_type = question.question_type\r\n new_data.submit_type = question.submit_type\r\n\r\n reqBody.page_num = page_num\r\n reqBody.countdown_seconds = countdown_seconds\r\n\r\n # 对请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n startMessage = struct.pack('!IH', Msg_len, Msg_flag) + reqPack.SerializeToString()\r\n return startMessage\r\n\r\n # 互动课件老师停止推题\r\n def pack_CoursewareExerciseStop(self, token, courseware_exercise_id):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.COURSEWARE_EXERCISE_STOP_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 99999)) # 采用随机数\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 0 ## 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n reqCommFrame.version = 101000012\r\n reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n\r\n # 构造查询请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.courseware_exercise_stop_req\r\n reqBody.courseware_exercise_id = courseware_exercise_id\r\n\r\n # 对请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n exerciseStopMessage = struct.pack('!IH', Msg_len, Msg_flag) + reqPack.SerializeToString()\r\n return exerciseStopMessage\r\n","sub_path":"liveTest/simulateSever/liveServiceMonitor/logical_teach/TCoursewareClass.py","file_name":"TCoursewareClass.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"47545383","text":"from django.db import models\nfrom cms.models.pluginmodel import CMSPlugin\nfrom datetime import date\nfrom filer.fields.image import FilerImageField\n\n\nclass Sprint(CMSPlugin):\n RL = \"Real Life Sprint\"\n One = \"1\"\n Two = \"2\"\n Three = \"3\"\n Four = \"4\"\n Five = \"5\"\n Six = \"6\"\n Seven = \"7\"\n Eight = \"8\"\n Nine = \"9\"\n sprint_number_choices = (\n (One, \"1\"),\n (Two, \"2\"),\n (Three, \"3\"),\n (Four, \"4\"),\n (Five, \"5\"),\n (Six, \"6\"),\n (Seven, \"7\"),\n (Eight, \"8\"),\n (Nine, \"9\"),\n (RL, \"Real Life Sprint\"),\n )\n sprint_number = models.CharField(max_length=16, choices=sprint_number_choices, default=\"1\")\n starting_date = models.DateField(default=date.today)\n ending_date = models.DateField(default=date.today)\n description = models.TextField(null=True, blank=True)\n\n def get_absolute_url(self):\n\n return reverse('sprint-view', kwargs={'pk': self.id})\n\n\nclass Tester(CMSPlugin):\n first_name = models.CharField(max_length=32, null=True, blank=True)\n last_name = models.CharField(max_length=32, null=True, blank=True)\n notes = models.TextField()\n\n\nclass Process(CMSPlugin):\n \"\"\"\n Process plugin is designed to describe one company process and the state\n of the test cases. A process can have multiple testers.\n \"\"\"\n TS = \"To Start\"\n ID = \"In development\"\n BR = \"Being reviewed\"\n RFT = \"Ready for test\"\n T = \"Being tested\"\n TF = \"Tested, findings in progress\"\n TFD = \"Tested, findings solved\"\n fase_test_cases_choices = (\n (TS, \"To Start\"),\n (ID, \"In development\"),\n (BR, \"Being reviewed\"),\n (RFT, \"Ready for test\"),\n (T, \"Being tested\"),\n (TF, \"Tested, findings in progress\"),\n (TFD, \"Tested, findings solved\"),\n )\n name = models.CharField(max_length=128, null=True, blank=True)\n number = models.CharField(max_length=12, null=True, blank=True)\n fase_test_cases = models.CharField(max_length=24, choices=fase_test_cases_choices, default=\"To Start\")\n testers = models.ManyToManyField(Tester)\n process_model = FilerImageField(null=True, blank=True, related_name=\"process_model\")\n notes = models.TextField()\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"307356274","text":"import argparse\nimport logging\nimport os\n\nimport mxnet as mx\n\nfrom rcnn.data_iter import ROIIter\nfrom rcnn.detector import Detector\nfrom rcnn.symbol import get_vgg_rcnn_test\nfrom rcnn.tester import pred_eval\nfrom utils.load_data import load_test_rpn_roidb\nfrom utils.load_model import load_param\n\n\ndef test_net(imageset, year, root_path, devkit_path, prefix, epoch, ctx, vis):\n # set up logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # load testing data\n voc, roidb = load_test_rpn_roidb(imageset, year, root_path, devkit_path)\n test_data = ROIIter(roidb, batch_size=1, shuffle=False, mode='test')\n\n # load model\n args, auxs = load_param(prefix, epoch, convert=True, ctx=ctx)\n\n # load symbol\n sym = get_vgg_rcnn_test()\n\n # detect\n detector = Detector(sym, ctx, args, auxs)\n pred_eval(detector, test_data, voc, vis=vis)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')\n parser.add_argument('--image_set', dest='image_set', help='can be test',\n default='test', type=str)\n parser.add_argument('--year', dest='year', help='can be 2007, 2010, 2012',\n default='2007', type=str)\n parser.add_argument('--root_path', dest='root_path', help='output data folder',\n default=os.path.join(os.getcwd(), 'data'), type=str)\n parser.add_argument('--devkit_path', dest='devkit_path', help='VOCdevkit path',\n default=os.path.join(os.getcwd(), 'data', 'VOCdevkit'), type=str)\n parser.add_argument('--prefix', dest='prefix', help='model to test with', type=str)\n parser.add_argument('--epoch', dest='epoch', help='model to test with',\n default=8, type=int)\n parser.add_argument('--gpu', dest='gpu_id', help='GPU device to test with',\n default=0, type=int)\n parser.add_argument('--vis', dest='vis', help='turn on visualization', action='store_true')\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n ctx = mx.gpu(args.gpu_id)\n test_net(args.image_set, args.year, args.root_path, args.devkit_path, args.prefix, args.epoch, ctx, args.vis)\n","sub_path":"tools/test_rcnn.py","file_name":"test_rcnn.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"74235321","text":"#!/usr/bin/python3\n\nimport numpy as np\n\ndef desplazarIzq(vector,fin):\n if fin < len(vector):\n for i in range(fin):\n vector[i] = vector[i+1]\n\nclass Cola:\n def __init__(self, tam, dtype = int):\n self.cola = np.zeros(shape = (tam), dtype=dtype)\n self.fin = None\n\n def isEmpty(self):\n return self.fin == None\n\n def isFull(self):\n return self.fin == len(self.cola)-1\n\n def queue(self, data):\n if self.isEmpty():\n self.fin = -1\n if not self.isFull():\n self.fin += 1\n self.cola[self.fin] = data\n else:\n raise Exception(\"No se pudo encolar el elemento \"+str(data)+\", la cola esta llena.\")\n\n def dequeue(self):\n data = None\n if not self.isEmpty():\n data = self.cola[0]\n if self.fin == 0:\n self.fin = None\n else:\n desplazarIzq(self.cola,self.fin)\n self.fin -= 1\n else:\n raise Exception(\"No se puede obtener elemento de la cola. La cola esta vacia.\")\n return data\n\n def top(self):\n data = None\n if not self.isEmpty():\n data = self.cola[0]\n else:\n raise Exception(\"No se puede obtener elemento de la cola. La cola esta vacia.\")\n return data\n\n def clone(self):\n nueva = Cola(len(self.cola),self.cola.dtype)\n if not self.isEmpty():\n for i in range(self.fin+1):\n nueva.cola[i] = self.cola[i]\n nueva.fin = self.fin\n return nueva\n\n def getLen(self):\n clen = 0\n if not self.isEmpty():\n clen = self.fin + 1\n return clen\n\n def __repr__(self):\n outrepr = \"\"\n if not self.isEmpty():\n outrepr = ','.join([str(x) for x in self.cola[:self.fin+1]])\n else:\n outrepr = \"La cola esta vacia.\"\n return outrepr\n\n def mostrar(self):\n mout = \"\"\n if not self.isEmpty():\n for i in range(self.fin+1):\n mout = mout + \",\" + str(self.cola[i])\n mout = mout[1:]\n else:\n mout = \"La cola esta vacia.\"\n print(mout)\n","sub_path":"cola.py","file_name":"cola.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103838960","text":"##############################################################################\n#\n# Copyright (c) 2002-2006 Zope Foundation and Contributors.\n# All Rights Reserved.\n# \n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n# \n##############################################################################\n\n__doc__='''Package wrapper for Queued Catalogs\n\n$Id$'''\n__version__='$$'[11:-2]\n\n# Placeholder for Zope Product data\nmisc_ = {}\n\nfrom Globals import DTMLFile\nfrom QueueCatalog import QueueCatalog\n\nmanage_addQueueCatalogForm = DTMLFile('dtml/add', globals())\n\ndef manage_addQueueCatalog(self, id, title='', location=None, REQUEST=None):\n \"Add a Catalog Queue\"\n ob = QueueCatalog()\n ob.id = id\n ob.manage_edit(title, location, immediate_removal=1)\n self._setObject(id, ob)\n\n if REQUEST is not None:\n try:\n u = self.DestinationURL()\n except AttributeError:\n u = REQUEST['URL1']\n\n REQUEST.RESPONSE.redirect(u+'/manage_main')\n\ndef initialize(context):\n context.registerClass(\n QueueCatalog,\n permission='Add ZCatalogs',\n constructors=(manage_addQueueCatalogForm, manage_addQueueCatalog, ),\n icon='www/QueueCatalog.gif',\n )\n\n context.registerHelp()\n context.registerHelpTitle('Zope Help')\n","sub_path":"Products.QueueCatalog/trunk/Products/QueueCatalog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"488091448","text":"#!/usr/bin/env python\n\nimport rospy\nfrom phantomx_gazebo.phantomx import PhantomX\n\nfrom sensor_msgs.msg import LaserScan\n\n\nclass Follow_wall():\n def __init__(self, K = 1):\n rospy.Subscriber('/scan', LaserScan, self._callback_scan)\n self.ranges = []\n self.K = K\n\n\n def _callback_scan(self, msg): \n self.ranges = msg.ranges\n\n def follow_wall(self):\n d_mur = self.ranges[90]\n d_consigne = 0.8\n z = self.K*(d_mur - d_consigne)\n if z > 1 :\n z = 1\n if z < -1 :\n z = -1\n return z\n\n\nif __name__ == '__main__':\n rospy.init_node('follow_wall')\n robot = PhantomX()\n follow = Follow_wall()\n rospy.sleep(1)\n\n\n while not rospy.is_shutdown():\n z = follow.follow_wall()\n robot.set_walk_velocity(0.5, 0, z)\n print(z)\n rospy.sleep(0.1)\n","sub_path":"workspaceRos/src/phantomx/phantomx_gazebo/scripts/follow_wall.py","file_name":"follow_wall.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"504128876","text":"import gino\n\nfrom tgbot.data import config\nimport asyncio\n\nfrom tgbot.utils.db_api.db_gino import db\nfrom tgbot.utils.db_api.schemas.goods import Subcategory, Category, Product\n\n\nasync def get_parent_child(): # get child model with children attribute\n query = Subcategory.outerjoin(Category).select()\n parent = await query.gino.load(Category.distinct(Category.id).load(add_child=Subcategory)).all()\n return parent\n\n\nasync def get_child_parent(category_id: int):\n async with db.transaction():\n query = Subcategory.load(parent=Category).where(Category.id == category_id)\n result = await query.gino.all()\n return result\n\n\nasync def show_all_products(subcategory_id: int):\n async with db.transaction():\n query = Product.load(parent=Subcategory).where(Subcategory.id == subcategory_id)\n result = await query.gino.all()\n return result\n\n\nasync def show_product():\n products = await Product.query.gino.all()\n return products\n\n\nasync def test():\n engine = await gino.create_engine(config.POSTGRES_URI)\n db.bind = engine\n result = await show_all_products(2)\n for i in result:\n print(i)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(test())\n","sub_path":"tgbot/utils/db_api/quick_commands.py","file_name":"quick_commands.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211747171","text":"\n\n\n\n\ndef bestseq(subseqS, protoP):\n bestStart = 0 \n bestMis = len(subseqS) \n print()\n for start in range(0, len(protoP) - len(subseqS)):\n mismatch = 0\n for i in range (0, len(subseqS)):\n if subseqS[i] != protoP[i+start]:\n mismatch = mismatch + 1\n if mismatch < bestMis:\n bestStart = start\n bestMis = mismatch\n print(\"beststart=\", bestStart)\n print(\"bestMis=\", bestMis)\n print()\n \n\ndef main():\n inputFile = open(\"test.txt\",\"r\")\n x = inputFile.readlines() \n protoP = x[0] \n print(protoP)\n \n for line in x[1:]: \n line = line.strip()\n print(line[:], end=\"\")\n subseqS = line \n bestseq(subseqS, protoP) \n \n \n \nmain()","sub_path":"testFiles/match/match64.py","file_name":"match64.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168892006","text":"# Copyright 2013 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport os\n\nfrom oslo_log import log as logging\n\nfrom trove.common import cfg\nfrom trove.common import instance as trove_instance\nfrom trove.common.notification import EndNotification\nfrom trove.guestagent import backup\nfrom trove.guestagent.common import operating_system\nfrom trove.guestagent.datastore.experimental.cassandra import service\nfrom trove.guestagent.datastore import manager\nfrom trove.guestagent import guest_log\nfrom trove.guestagent import volume\n\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass Manager(manager.Manager):\n\n GUEST_LOG_DEFS_SYSTEM_LABEL = 'system'\n\n def __init__(self, manager_name='cassandra'):\n super(Manager, self).__init__(manager_name)\n self._app = None\n self._admin = None\n\n @property\n def status(self):\n return self.app.status\n\n @property\n def app(self):\n if self._app is None:\n self._app = self.build_app()\n return self._app\n\n def build_app(self):\n return service.CassandraApp()\n\n @property\n def admin(self):\n if self._admin is None:\n self._admin = self.app.build_admin()\n return self._admin\n\n @property\n def configuration_manager(self):\n return self.app.configuration_manager\n\n @property\n def datastore_log_defs(self):\n system_log_file = self.validate_log_file(\n self.app.cassandra_system_log_file, self.app.cassandra_owner)\n return {\n self.GUEST_LOG_DEFS_SYSTEM_LABEL: {\n self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER,\n self.GUEST_LOG_USER_LABEL: self.app.cassandra_owner,\n self.GUEST_LOG_FILE_LABEL: system_log_file\n }\n }\n\n def guest_log_enable(self, context, log_name, disable):\n if disable:\n LOG.debug(\"Disabling system log.\")\n self.app.set_logging_level('OFF')\n else:\n log_level = CONF.get(self.manager_name).get('system_log_level')\n LOG.debug(\"Enabling system log with logging level: %s\", log_level)\n self.app.set_logging_level(log_level)\n\n return False\n\n def restart(self, context):\n self.app.restart()\n\n def start_db_with_conf_changes(self, context, config_contents):\n self.app.start_db_with_conf_changes(config_contents)\n\n def stop_db(self, context, do_not_start_on_reboot=False):\n self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)\n\n def reset_configuration(self, context, configuration):\n self.app.reset_configuration(configuration)\n\n def do_prepare(self, context, packages, databases, memory_mb, users,\n device_path, mount_point, backup_info,\n config_contents, root_password, overrides,\n cluster_config, snapshot):\n \"\"\"This is called from prepare in the base class.\"\"\"\n self.app.install_if_needed(packages)\n self.app.init_storage_structure(mount_point)\n\n if config_contents or device_path or backup_info:\n\n # FIXME(pmalik) Once the cassandra bug\n # https://issues.apache.org/jira/browse/CASSANDRA-2356\n # is fixed, this code may have to be revisited.\n #\n # Cassandra generates system keyspaces on the first start.\n # The stored properties include the 'cluster_name', which once\n # saved cannot be easily changed without removing the system\n # tables. It is crucial that the service does not boot up in\n # the middle of the configuration procedure.\n # We wait here for the service to come up, stop it properly and\n # remove the generated keyspaces before proceeding with\n # configuration. If it does not start up within the time limit\n # we assume it is not going to and proceed with configuration\n # right away.\n LOG.debug(\"Waiting for database first boot.\")\n if (self.app.status.wait_for_real_status_to_change_to(\n trove_instance.ServiceStatuses.RUNNING,\n CONF.state_change_wait_time,\n False)):\n LOG.debug(\"Stopping database prior to initial configuration.\")\n self.app.stop_db()\n self.app._remove_system_tables()\n\n LOG.debug(\"Starting initial configuration.\")\n if config_contents:\n LOG.debug(\"Applying configuration.\")\n self.app.configuration_manager.save_configuration(\n config_contents)\n cluster_name = None\n if cluster_config:\n cluster_name = cluster_config.get('id', None)\n self.app.apply_initial_guestagent_configuration(\n cluster_name=cluster_name)\n\n if cluster_config:\n self.app.write_cluster_topology(\n cluster_config['dc'], cluster_config['rack'],\n prefer_local=True)\n\n if device_path:\n LOG.debug(\"Preparing data volume.\")\n device = volume.VolumeDevice(device_path)\n # unmount if device is already mounted\n device.unmount_device(device_path)\n device.format()\n if os.path.exists(mount_point):\n # rsync exiting data\n LOG.debug(\"Migrating existing data.\")\n device.migrate_data(mount_point)\n # mount the volume\n LOG.debug(\"Mounting new volume.\")\n device.mount(mount_point)\n\n if not cluster_config:\n if backup_info:\n self._perform_restore(backup_info, context, mount_point)\n\n LOG.debug(\"Starting database with configuration changes.\")\n self.app.start_db(update_db=False)\n\n if not self.app.has_user_config():\n LOG.debug(\"Securing superuser access.\")\n self.app.secure()\n self.app.restart()\n\n self._admin = self.app.build_admin()\n\n if not cluster_config and self.is_root_enabled(context):\n self.status.report_root(context)\n\n def pre_upgrade(self, context):\n data_dir = self.app.cassandra_data_dir\n mount_point, _data = os.path.split(data_dir)\n save_etc_dir = \"%s/etc\" % mount_point\n home_save = \"%s/trove_user\" % mount_point\n\n self.app.status.begin_restart()\n self.app.drain()\n self.app.stop_db()\n\n operating_system.copy(\"%s/.\" % self.app.cassandra_conf_dir,\n save_etc_dir,\n preserve=True, as_root=True)\n operating_system.copy(\"%s/.\" % os.path.expanduser('~'), home_save,\n preserve=True, as_root=True)\n\n self.unmount_volume(context, mount_point=mount_point)\n\n return {\n 'mount_point': mount_point,\n 'save_etc_dir': save_etc_dir,\n 'home_save': home_save\n }\n\n def post_upgrade(self, context, upgrade_info):\n self.app.stop_db()\n\n if 'device' in upgrade_info:\n self.mount_volume(context, mount_point=upgrade_info['mount_point'],\n device_path=upgrade_info['device'],\n write_to_fstab=True)\n operating_system.chown(path=upgrade_info['mount_point'],\n user=self.app.cassandra_owner,\n group=self.app.cassandra_owner,\n recursive=True,\n as_root=True)\n\n self._restore_home_directory(upgrade_info['home_save'])\n self._restore_directory(upgrade_info['save_etc_dir'],\n self.app.cassandra_conf_dir)\n\n self._reset_app()\n self.app.start_db()\n self.app.upgrade_sstables()\n self.app.status.end_restart()\n\n def change_passwords(self, context, users):\n with EndNotification(context):\n self.admin.change_passwords(context, users)\n\n def update_attributes(self, context, username, hostname, user_attrs):\n with EndNotification(context):\n self.admin.update_attributes(context, username, hostname,\n user_attrs)\n\n def create_database(self, context, databases):\n with EndNotification(context):\n self.admin.create_database(context, databases)\n\n def create_user(self, context, users):\n with EndNotification(context):\n self.admin.create_user(context, users)\n\n def delete_database(self, context, database):\n with EndNotification(context):\n self.admin.delete_database(context, database)\n\n def delete_user(self, context, user):\n with EndNotification(context):\n self.admin.delete_user(context, user)\n\n def get_user(self, context, username, hostname):\n return self.admin.get_user(context, username, hostname)\n\n def grant_access(self, context, username, hostname, databases):\n self.admin.grant_access(context, username, hostname, databases)\n\n def revoke_access(self, context, username, hostname, database):\n self.admin.revoke_access(context, username, hostname, database)\n\n def list_access(self, context, username, hostname):\n return self.admin.list_access(context, username, hostname)\n\n def list_databases(self, context, limit=None, marker=None,\n include_marker=False):\n return self.admin.list_databases(context, limit, marker,\n include_marker)\n\n def list_users(self, context, limit=None, marker=None,\n include_marker=False):\n return self.admin.list_users(context, limit, marker, include_marker)\n\n def enable_root(self, context):\n return self.app.enable_root()\n\n def enable_root_with_password(self, context, root_password=None):\n return self.app.enable_root(root_password=root_password)\n\n def disable_root(self, context):\n self.app.enable_root(root_password=None)\n\n def is_root_enabled(self, context):\n return self.app.is_root_enabled()\n\n def _perform_restore(self, backup_info, context, restore_location):\n LOG.info(\"Restoring database from backup %s.\", backup_info['id'])\n try:\n backup.restore(context, backup_info, restore_location)\n self.app._apply_post_restore_updates(backup_info)\n except Exception as e:\n LOG.error(e)\n LOG.error(\"Error performing restore from backup %s.\",\n backup_info['id'])\n self.app.status.set_status(trove_instance.ServiceStatuses.FAILED)\n raise\n LOG.info(\"Restored database successfully.\")\n\n def create_backup(self, context, backup_info):\n \"\"\"\n Entry point for initiating a backup for this instance.\n The call currently blocks guestagent until the backup is finished.\n\n :param backup_info: a dictionary containing the db instance id of the\n backup task, location, type, and other data.\n \"\"\"\n\n with EndNotification(context):\n backup.backup(context, backup_info)\n\n def update_overrides(self, context, overrides, remove=False):\n LOG.debug(\"Updating overrides.\")\n if remove:\n self.app.remove_overrides()\n else:\n self.app.update_overrides(context, overrides, remove)\n\n def apply_overrides(self, context, overrides):\n \"\"\"Configuration changes are made in the config YAML file and\n require restart, so this is a no-op.\n \"\"\"\n pass\n\n def get_data_center(self, context):\n return self.app.get_data_center()\n\n def get_rack(self, context):\n return self.app.get_rack()\n\n def set_seeds(self, context, seeds):\n self.app.set_seeds(seeds)\n\n def get_seeds(self, context):\n return self.app.get_seeds()\n\n def set_auto_bootstrap(self, context, enabled):\n self.app.set_auto_bootstrap(enabled)\n\n def node_cleanup_begin(self, context):\n self.app.node_cleanup_begin()\n\n def node_cleanup(self, context):\n self.app.node_cleanup()\n\n def node_decommission(self, context):\n self.app.node_decommission()\n\n def cluster_secure(self, context, password):\n os_admin = self.app.cluster_secure(password)\n self._admin = self.app.build_admin()\n return os_admin\n\n def get_admin_credentials(self, context):\n return self.app.get_admin_credentials()\n\n def store_admin_credentials(self, context, admin_credentials):\n self.app.store_admin_credentials(admin_credentials)\n self._admin = self.app.build_admin()\n\n def _reset_app(self):\n \"\"\"\n A function for reseting app and admin properties.\n It is useful when we want to force reload application.\n Possible usages: loading new configuration files, loading new\n datastore password\n \"\"\"\n self._app = None\n self._admin = None\n","sub_path":"trove/guestagent/datastore/experimental/cassandra/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":13765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"110008672","text":"#!/usr/bin/env python\n\"\"\"\nExamples:\n\t%s -i folderRun/Scaffold301_splitVCF_unit1.vcf -o folderRun/Scaffold301_splitVCF_unit1.info.vcf\n\t\n\t%s \n\t\n\t%s \n\t\nDescription:\n\t2013.07.10 program that adds description of info fields/tags that appear in sites but not in VCF header, into VCF header.\n\t\tAt this moment, it just addes description of these fields into VCF header, and does NOT check missing info tags.\n\tLDAF\n\tERATE\n\tAVGPOST\n\tAF_Orig\n\tAC_Orig\n\tAN_Orig\n\n\"\"\"\nimport sys, os, math\n__doc__ = __doc__%(sys.argv[0], sys.argv[0], sys.argv[0])\n\nsys.path.insert(0, os.path.expanduser('~/lib/python'))\nsys.path.insert(0, os.path.join(os.path.expanduser('~/script')))\n\nimport cStringIO, re, csv\nfrom pymodule import ProcessOptions, figureOutDelimiter\nfrom pymodule.utils import sortCMPBySecondTupleValue\nfrom pymodule.yhio.VCFFile import VCFFile\nfrom pymodule.pegasus.mapper.AbstractVCFMapper import AbstractVCFMapper\n\nparentClass = AbstractVCFMapper\nclass AddMissingInfoDescriptionToVCFHeader(parentClass):\n\t__doc__ = __doc__\n\toption_default_dict = parentClass.option_default_dict.copy()\n\toption_default_dict.update({\n\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\tknownInfoTag2DescriptionLine = {\"LDAF\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t\"ERATE\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t\"AVGPOST\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t\"RSQ\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t\"THETA\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t\"AC_Orig\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t\"AF_Orig\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t\"AN_Orig\": \"\"\"##INFO=\\n\"\"\",\\\n\t\t\t\t}\n\t\n\tdef __init__(self, inputFnameLs=None, **keywords):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tparentClass.__init__(self, inputFnameLs=inputFnameLs, **keywords)\n\t\n\t\n\tdef getAllInfoTags(self, inputFname=None, **keywords):\n\t\t\"\"\"\n\t\t2013.07.10\n\t\t\tnot used right now.\n\t\t\"\"\"\n\t\tsys.stderr.write(\"Extracting info tags from VCF %s ...\"%(inputFname))\n\t\tvcfFile = VCFFile(inputFname=inputFname)\n\t\t\n\t\tinfo_tag_set = set()\n\t\tcounter = 0\n\t\treal_counter = 0\n\t\tfor vcfRecord in vcfFile:\n\t\t\tfor info_tag in vcfRecord.info_tag2value:\n\t\t\t\tinfo_tag_set.add(info_tag)\n\t\t\tcounter += 1\n\t\tvcfFile.close()\n\t\t\n\t\tsys.stderr.write(\"%s unique info tags.\\n\"%(len(info_tag_set)))\n\t\treturn info_tag_set\n\t\n\tdef run(self):\n\t\tif self.debug:\n\t\t\timport pdb\n\t\t\tpdb.set_trace()\n\t\t\tdebug = True\n\t\telse:\n\t\t\tdebug =False\n\t\t\n\t\t\n\t\t\n\t\toutputDir = os.path.split(self.outputFname)[0]\n\t\tif outputDir and not os.path.isdir(outputDir):\n\t\t\tos.makedirs(outputDir)\n\t\t\n\t\t\n\t\tself.reader = VCFFile(inputFname=self.inputFname)\n\t\t\n\t\tself.writer = VCFFile(outputFname=self.outputFname, openMode='w')\n\t\tself.writer.metaInfoLs = self.reader.metaInfoLs\n\t\tfor info_tag, description in self.knownInfoTag2DescriptionLine.iteritems():\n\t\t\tself.writer.metaInfoLs.append(description)\n\t\tself.writer.header = self.reader.header\n\t\tself.writer.writeMetaAndHeader()\n\t\t\n\t\tcounter = 0\n\t\tfor vcfRecord in self.reader:\n\t\t\tcounter += 1\n\t\t\tself.writer.writeVCFRecord(vcfRecord)\n\t\t\n\t\tself.reader.close()\n\t\tself.writer.close()\n\t\t\n\nif __name__ == '__main__':\n\tmain_class = AddMissingInfoDescriptionToVCFHeader\n\tpo = ProcessOptions(sys.argv, main_class.option_default_dict, error_doc=main_class.__doc__)\n\tinstance = main_class(**po.long_option2value)\n\tinstance.run()","sub_path":"pymodule/pegasus/mapper/modifier/AddMissingInfoDescriptionToVCFHeader.py","file_name":"AddMissingInfoDescriptionToVCFHeader.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"362528479","text":"\"\"\"\nPython makes performing file I/O simple. Take a look\nat how to read and write to files here:\n\nhttps://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files\n\"\"\"\n\n# Open up the \"foo.txt\" file (which already exists) for reading\n# Print all the contents of the file, then close the file\n# Note: pay close attention to your current directory when trying to open \"foo.txt\"\n\nf = open(\"foo.txt\", \"r\")\nprint(f.read())\nf.close()\n\n# Open up a file called \"bar.txt\" (which doesn't exist yet) for\n# writing. Write three lines of arbitrary content to that file,\n# then close the file. Open up \"bar.txt\" and inspect it to make\n# sure that it contains what you expect it to contain\n\nn = open(\"bar.txt\", \"w\")\nn.write(\n \"\"\"\n Get up, stand up, stand up for your rights!\n Get up, stand up, don't give up the fight!\n\n Most people think,\n Great God will come from the skies\n Take away everything\n And make everybody feel high\n But if you know what life is worth\n You will look for yours on earth\n And now you see the light\n You stand up for your rights. Jah!\n\n Get up, stand up!\n Stand up for your rights!\n \"\"\")\n\nn.close()","sub_path":"src/13_file_io.py","file_name":"13_file_io.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"591132353","text":"import sys\nsys.path.append('grammar-vae')\nimport nltk\nfrom src import reactions_grammar\nimport numpy as np\nimport h5py\nfrom tqdm import tqdm\n\nMAX_LEN = 300\nNCHARS = len(reactions_grammar.GCFG.productions())\n\n\ndef get_reactions_tokenizer(cfg):\n long_tokens = list(filter(lambda a: len(a) > 1, cfg._lexical_index.keys()))\n replacements = ['$', '%', '^', '&'] # ,'&']\n assert len(long_tokens) == len(replacements)\n for token in replacements:\n assert token not in cfg._lexical_index\n\n def tokenize(smiles):\n for i, token in enumerate(long_tokens):\n smiles = smiles.replace(token, replacements[i])\n tokens = []\n for token in smiles:\n try:\n ix = replacements.index(token)\n tokens.append(long_tokens[ix])\n except:\n tokens.append(token)\n return tokens\n\n return tokenize\n\n\ndef to_one_hot(smiles):\n \"\"\" Encode a list of smiles strings to one-hot vectors \"\"\"\n assert type(smiles) == list\n prod_map = {}\n for ix, prod in enumerate(reactions_grammar.GCFG.productions()):\n prod_map[prod] = ix\n tokenize = get_reactions_tokenizer(reactions_grammar.GCFG)\n tokens = list(map(tokenize, smiles))\n parser = nltk.ChartParser(reactions_grammar.GCFG)\n parse_trees = []\n for t in tokens:\n try:\n parse_trees.append(list(parser.parse(t))[0])\n except (ValueError, StopIteration, IndexError) as e:\n pass\n productions_seq = [tree.productions() for tree in parse_trees]\n indices = [np.array([prod_map[prod] for prod in entry], dtype=int)\n for entry in productions_seq if len(entry) <= MAX_LEN]\n one_hot = np.zeros((len(indices), MAX_LEN, NCHARS), dtype=np.float32)\n for i in range(len(indices)):\n num_productions = len(indices[i])\n one_hot[i][np.arange(num_productions), indices[i]] = 1.\n one_hot[i][np.arange(num_productions, MAX_LEN), -1] = 1.\n return one_hot\n\n\ndef grammar_dataset(dataset_len=10000):\n f = open('data/biocad_reactions_dataset.smi', 'r')\n L = []\n\n for line in f:\n line = line.strip()\n L.append(line)\n f.close()\n\n step = 100\n max_len = min(len(L), dataset_len)\n print(f'Trying to save {max_len}/{len(L)} examples')\n OH = None\n for i in tqdm(range(0, max_len, step)):\n # print('Processing: i=[' + str(i) + ':' + str(i+100) + ']')\n onehot = to_one_hot(L[i:i + step])\n if OH is None:\n OH = onehot\n else:\n OH = np.concatenate((OH, onehot), axis=0)\n\n h5f = h5py.File('data/biocad_reactions_grammar_dataset.h5', 'w')\n h5f.create_dataset('data', data=OH)\n h5f.close()\n print(f'Saved {OH.shape[0]}/{len(L)} examples, {max_len - OH.shape[0]} lost')\n\n\nif __name__ == '__main__':\n grammar_dataset()\n","sub_path":"src/make_grammar_dataset.py","file_name":"make_grammar_dataset.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588442824","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport sys\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Example(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super().__init__()\n self.initUI(self)\n\n def initUI(self, MainWindow):\n # centralwidget\n MainWindow.resize(346, 193)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n # The Action to quit\n self.toolb_action_Exit = QtWidgets.QAction(\n QtGui.QIcon(\"exit.png\"), \"Exit\", self\n )\n self.toolb_action_Exit.setShortcut(\"Ctrl+Q\")\n self.toolb_action_Exit.triggered.connect(self.close)\n\n # The Button\n self.btn_prt = QtWidgets.QPushButton(self.centralwidget)\n self.btn_prt.setGeometry(QtCore.QRect(120, 20, 89, 25))\n self.btn_prt.clicked.connect(lambda: self.doPrint())\n self.btn_quit = QtWidgets.QPushButton(self.centralwidget)\n self.btn_quit.setGeometry(QtCore.QRect(220, 20, 89, 25))\n self.btn_quit.clicked.connect(lambda: self.close())\n\n # The textEdit\n self.textEdit = QtWidgets.QTextEdit(self.centralwidget)\n self.textEdit.setGeometry(QtCore.QRect(10, 60, 321, 81))\n\n # Show the frame\n MainWindow.setCentralWidget(self.centralwidget)\n self.show()\n\n def doPrint(self):\n print(\"TEST doPrint\")\n\n def closeEvent(self, event):\n # Ask a question before to quit.\n replyClosing = QtWidgets.QMessageBox.question(\n self,\n \"Message\",\n \"Are you sure to quit?\",\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.No,\n )\n\n if replyClosing == QtWidgets.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\ndef main_GUI():\n print(\"start\")\n app = QtWidgets.QApplication(sys.argv)\n imageViewer = Example()\n return app, imageViewer\n\n\nif __name__ == \"__main__\":\n app, imageViewer = main_GUI()\n rc = app.exec_()\n print(\"App end is exit code {}\".format(rc))\n sys.exit(rc)\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432120582","text":"#!/usr/bin/env python3\n# coding=utf-8\nimport socket\nfrom os import listdir\nfrom threading import Thread\nimport base64\n\nIP_ADDR = \"172.16.104.6\"\nPORT = 8081\n# max size 10kb\nMAX_FILE_SIZE = 10000\nextension = []\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\ndef RecvFromServer():\n while True:\n msg = \"\"\n flag = False\n serv = client.recvfrom(MAX_FILE_SIZE + 1)\n msg = serv[0].decode()\n if len(msg) > MAX_FILE_SIZE:\n print(\"\\nFile cant be downloaded since it exceeds file size limit of 10kb\")\n continue\n if msg == \"no such file exists\":\n print(\"\\nThe requested file doesnt exist in the server\\nClient> \", end=\"\")\n continue\n print(\"\\nImage received succesfully\\nClient> \", end=\"\")\n file = open(\n \"received_image_{}.{}\".format(len(listdir(\".\")), extension[0]), \"wb+\"\n )\n img = base64.b64decode(msg)\n file.write(img)\n file.close()\n\n\ndef SendToServer():\n while True:\n msg = input(\"Client> \")\n pos = -1\n for x, i in enumerate(msg):\n if i == \".\":\n pos = x + 1\n break\n if pos == -1:\n print(\"*****Enter the filename as msg, like so :- img1.png*****\")\n else:\n if len(extension) == 0:\n extension.append(msg[pos:])\n else:\n extension[0] = msg[pos:]\n client.sendto(msg.encode(), (IP_ADDR, PORT))\n\n\ndef main():\n print(\"UDP Client Running...\")\n try:\n t1 = Thread(target=RecvFromServer, args=())\n t2 = Thread(target=SendToServer, args=())\n t1.start()\n t2.start()\n except KeyboardInterrupt:\n print(\"Closing connection...\")\n client.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"s6/networks_lab/image_transfer/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"615619255","text":"# 用python很简单,但是最后一个样例超时了,我也不会优化,换成c++\na=input().split()\nn=int(a[0])\ndel(a[0])\nsum=0\nk=int(input())\nl1=[]\nd1={}\nfor i in range(k):\n t1,t2=input().split()\n l1.append(t1)\n d1[t1]=int(t2)\n sum+=int(t2)\nsum=sum/k\nl2=[]\nfor i in l1:\n if i not in a and d1[i]>sum:\n l2.append(i)\nl2.sort()\nfor i in l2:\n print(i,end=' ')\n","sub_path":"PTA/GPLT/l2019.py","file_name":"l2019.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"458496798","text":"import unittest\nfrom unittest.mock import patch\nimport camel\n\nclass TestCamelCase(unittest.TestCase):\n\n def test_capitalize(self):\n\n input_words = ['abc', 'ABC', 'aBC', 'ABc']\n capitalized = 'Abc'\n\n for word in input_words:\n self.assertEqual(camel.capitalize(word), capitalized)\n\n\n def test_lower(self):\n # this isn't really needed, since we can assume that Python's library functions work correctly :)\n input_words = ['abc', 'ABC', 'aBC', 'ABc']\n lower = 'abc'\n\n for word in input_words:\n self.assertEqual(camel.lowercase(word), lower)\n\n\n def test_camel_case(self):\n\n input_and_expected_outputs = {\n '' : '' ,\n 'hello' : 'hello',\n 'Hello' : 'hello',\n 'Hello world' : 'helloWorld',\n 'HELLO WORLD' : 'helloWorld',\n 'hELLO wORLD' : 'helloWorld',\n 'this is a sentence' : 'thisIsASentence',\n 'Here is a long sentence with many words' : 'hereIsALongSentenceWithManyWords'\n }\n\n for input_val in input_and_expected_outputs.keys():\n self.assertEqual(camel.camel_case(input_val), input_and_expected_outputs[input_val])\n\n\n def test_input_and_output(self):\n\n # Patch the input. Using with context manager automatically takes care of unpatching.\n with patch('builtins.input', return_value='This IS another SENTenCE'):\n\n # And, patch the output\n with patch('builtins.print') as mock_print:\n\n camel.main()\n mock_print.assert_called_with('thisIsAnotherSentence')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"camel_case/test_camelcase.py","file_name":"test_camelcase.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"523680324","text":"n=input()\nn=n.split()\nd=int(n[1])\nm=input()\nm=m.split()\nm.sort()\n\ng=len(m)\n\n\nfor i in range(0,g):\n if(m[i]==str(d) ):\n print(\"Yes\")\n else:\n print(\"No\")\n \n","sub_path":"32pp.py","file_name":"32pp.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"223328353","text":"# -*- coding: utf-8 -*-\n\"\"\"Work submit forms.\"\"\"\nfrom flask_wtf import Form\nfrom wtforms.widgets import html_params\nfrom wtforms import StringField, RadioField, TextAreaField\nfrom flask_wtf.file import FileField, FileAllowed, FileRequired\nfrom innovator.user.models import User\nfrom innovator.admin.models import Event\nfrom wtforms.validators import DataRequired, Email, Length, NumberRange\nfrom innovator.team.models import Work\nimport json\n\nclass WorkSubmitForm(Form):\n \"\"\"\n Work submission form. It is displayed in \"Edit Work\" and \"Submit Work\" view.\n\n :cvar StringField work_title: Title for the work. Data is required and length limited within 3 and 200 when validation.\n :cvar FileField work_file: Handles work upload. Only PDF file is accepted.\n :cvar SelectField work_event: Which event is this work submitting to.\n .. note::\n :class:`flask_wtf.file.FileField` behaves different from other field types. Check its documentation if in unsure.\n .. note::\n ``choices`` attribute of work_event field is not set at instantiation. Dynamically populate it by passing in a list of ``(value, label)`` pairs as you render your page. The ``value`` is then coerced into int as ``wtform`` handles the submission.\n \"\"\"\n\n work_title = StringField('Title',\n validators=[DataRequired(), Length(min=3, max=200)])\n work_event = RadioField('Event', coerce=int, validators=[DataRequired()])\n extra = TextAreaField('Extra Information', validators=[DataRequired()])\n work_file = FileField('Upload Your Work', validators=[FileRequired(), FileAllowed(['pdf'], 'PDF document only.')])\n\n def __init__(self, *args, **kwargs):\n super(WorkSubmitForm, self).__init__(*args, **kwargs)\n self.work = None\n\n def validate(self):\n initial_validation = super(WorkSubmitForm, self).validate()\n if not initial_validation:\n return False\n\n # check syntax\n try:\n extra_list = json.loads(self.extra.data)\n if type(extra_list) is dict:\n # check content\n event = Event.get_by_id(self.work_event.data)\n item_list = event.extra\n for item in item_list:\n if item not in extra_list:\n self.extra.errors.append('Missing one or more fields.')\n return False\n return True\n else:\n self.extra.errors.append('Bad syntax.')\n return False\n except:\n self.extra.errors.append('Bad syntax.')\n return False\n\n def select_table(self, table_class='table', **kwargs):\n field = self.work_event\n kwargs.setdefault('type', 'radio')\n field_id = kwargs.pop('id', field.id)\n html = ['' % html_params(id=field_id, class_=table_class)]\n html.append(\"\"\"\n \n \n \n \"\"\")\n for value, label, checked in field.iter_choices():\n event_info = Event.get_by_id(value)\n html.append(\"\\n\")\n choice_id = '%s-%s' % (field_id, value)\n options = dict(kwargs, name=field.name, value=value, id=choice_id)\n if checked:\n options['checked'] = 'checked'\n html.append('\\n' % (field_id, label))\n html.append('\\n')\n html.append('\\n' % html_params(**options))\n html.append('')\n html.append('
NameExtra InfoSelected
' + ', '.join(event_info.extra) + '
')\n return ''.join(html)\n\n\nclass WorkDeleteForm(Form):\n \"\"\"\n An empty form used in work deletion. Used for CSRF protection.\n \"\"\"\n\n\nclass TeamMemberForm(Form):\n name = StringField('Name', validators=[DataRequired()])\n school_id = StringField('Student ID', validators=[DataRequired(), NumberRange(message=\"Student ID must be an integer.\")])\n email = StringField('Email', validators=[DataRequired(), Email()])\n cell_phone = StringField('Cell Phone', validators=[DataRequired(), NumberRange(message=\"Cell phone number must be an integer.\")])\n department = StringField('Department', validators=[DataRequired()])\n\n def validate(self):\n initial_validation = super(TeamMemberForm, self).validate()\n if not initial_validation:\n return False\n return True\n\n\nclass TeamMemberDeleteForm(Form):\n \"\"\"\n Used for CSRF protection.\n \"\"\"\n","sub_path":"innovator/team/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"419627245","text":"from python.component.grid.GridPanelComponent import GridPanelComponent\n\nclass ReportApprovalGridComponent(GridPanelComponent):\n selectors = {\n \"report_name\":\"xpath=.//*[contains(@class, 'cell_report_name')]/a[text()='{report}']\",\n \"report_record\": \"xpath=.//a[text()='{reportName}']/parent::div/parent::div//*[contains(text(),'{value}')]\"\n }\n \n def _field_to_column(self, field):\n switcher = {\n \"Status\": \"c0\",\n \"Report Name\": \"c1\",\n \"Sender\": \"c2\",\n \"Amount\": \"c3\",\n \"Submitted Date\" : \"c4\",\n \"Paid\" : \"c5\",\n \"Sent To\" :\"c6\",\n }\n return switcher.get(field, \"nothing\")\n \n def select_report(self, report_name):\n locatorReportName = self.resolve_selector(\"report_name\", report=report_name)\n if self._is_visible(locatorReportName):\n self.click_element(locatorReportName)\n else:\n self.search_item(\"Report name\", report_name)\n if self._is_grid_empty() is False:\n self.click_element(locatorReportName)\n else:\n self.logger.info(\"Report item don't exist\")\n return self\n \n def report_record_should_contain(self, reportName, value):\n locatorReportRecord = self.resolve_selector(\"report_record\", reportName=reportName, value=value)\n self.page_should_contain_element(locatorReportRecord)\n return self\n \n def grid_should_not_contain_report(self, reportName):\n report_element = self.resolve_selector(\"report_name\", report = reportName)\n self.page_should_not_contain_element(report_element)\n return self\n \n def grid_should_contain_report(self, reportName):\n report_element = self.resolve_selector(\"report_name\", report = reportName)\n self.page_should_contain_element(report_element)\n return self\n \n def grid_should_be_filtered_by(self, type_filter, email):\n allType = {\n \"Pending Reports\" : 1,\n \"My Approval History\" : 2,\n \"Approved Reports\" : 3,\n \"Approved & Unpaid Reports\" : 4,\n \"Approved & Paid Reports\" : 5,\n \"Rejected Reports\" : 6,\n \"Recalled Reports\" : 7,\n \"Paid Reports\" : 8,\n \"Unpaid Reports\" : 9\n }\n type_number = 0\n for key,val in allType.items():\n if type_filter == key:\n type_number = val\n break\n allRows = self._get_report_lists()\n for i in range(len(allRows)):\n if type_number is 1:\n self.buildIn.should_be_equal_as_strings(allRows[i].status, 'Pending')\n elif type_number is 2:\n self.buildIn.should_be_equal_as_strings(allRows[i].sender, email)\n elif type_number is 3:\n self.buildIn.should_be_equal_as_strings(allRows[i].status, 'Approved')\n elif type_number is 4:\n self.buildIn.should_be_equal_as_strings(allRows[i].status, 'Approved')\n self.buildIn.should_be_equal_as_strings(allRows[i].paid_info, 'No')\n elif type_number is 5:\n self.buildIn.should_be_equal_as_strings(allRows[i].status, 'Approved')\n self.buildIn.should_be_equal_as_strings(allRows[i].paid_info, 'Yes')\n elif type_number is 6:\n self.buildIn.should_be_equal_as_strings(allRows[i].status, 'Rejected')\n elif type_number is 7:\n self.buildIn.should_be_equal_as_strings(allRows[i].status, 'Recalled')\n elif type_number is 8:\n self.buildIn.should_be_equal_as_strings(allRows[i].paid_info, 'Yes')\n elif type_number is 9:\n self.buildIn.should_be_equal_as_strings(allRows[i].paid_info, 'No')\n return self\n \n def _get_report_lists(self):\n changes = []\n allRows = self._search_for_all_rows()\n for i in range(0, len(allRows)):\n row = allRows[i]\n changes.append(self._convert_row_to_report(i, row))\n return changes\n \n def _convert_row_to_report(self, index, row):\n reportId = row._retrieve_cell_string_value(index, \"id\")\n status = row._retrieve_cell_string_value(index, \"status\")\n name = row._retrieve_cell_string_value(index, \"name\")\n amount = row._retrieve_cell_string_value(index, \"amount\")\n sender = row._retrieve_cell_string_value(index, \"sender\")\n submittedDate = row._retrieve_cell_string_value(index, \"submitted_date\")\n submittedtoEmail = row._retrieve_cell_string_value(index, \"submitted_to_email\")\n paid_info = row._retrieve_cell_string_value(index, \"paid_info\")\n sentTo = row._retrieve_cell_string_value(index, \"sent_to\")\n from python.entity.Report import ApproveReport\n artifactChange = ApproveReport(reportId, status, name, sender, amount, submittedDate, submittedtoEmail, paid_info, sentTo)\n return artifactChange","sub_path":"expense-ui-robot-tests/PythonExpenseAutomationTest/python/component/pagepart/grid/ReportApprovalGridComponent.py","file_name":"ReportApprovalGridComponent.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88484683","text":"import json\n\nimport hashlib\n\nfrom django.utils import encoding\nfrom django.conf import settings as djangosettings\nfrom django.core.cache import cache\n\nnotcachedRemoteAddress = ['188.184.185.129']\n\ndef deleteCacheTestData(request,data):\n### Filtering data\n if request.user.is_authenticated() and request.user.is_tester:\n return data\n else:\n if data is not None:\n for key in data.keys():\n if '_test' in key:\n del data[key]\n return data\n\n\nimport socket\nimport uuid\nimport logging\n\ndef cacheIsAvailable(request):\n hostname = \"bigpanda-redis.cern.ch\"\n port = \"6379\"\n try:\n host = socket.gethostbyname(hostname)\n s = socket.create_connection((host, port), 2)\n if(s):\n cache_key = uuid.uuid4()\n from core.views import DateEncoder\n data = json.dumps({\"message\":\"ping-pong\"}, cls=DateEncoder)\n timeout = 0.5\n cache.set(cache_key, data, timeout)\n data = cache.get(cache_key, None)\n return True\n except Exception as e:\n logger = logging.getLogger('bigpandamon-error')\n message = \"Internal Servicer Error: %s | Error in Reddis: %s\" %(str(request),e)\n #e = 'Internal Server Error: Reddis! '+ e\n logger.error(message)\n pass\n return False\n\ndef getCacheEntry(request, viewType, skipCentralRefresh = False, isData = False):\n isCache = cacheIsAvailable(request)\n if isCache:\n is_json = False\n\n # We do this check to always rebuild cache for the page when it called from the crawler\n if (('HTTP_X_FORWARDED_FOR' in request.META) and (request.META['HTTP_X_FORWARDED_FOR'] in notcachedRemoteAddress) and\n skipCentralRefresh == False):\n return None\n\n request._cache_update_cache = False\n if ((('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) or (\n 'json' in request.GET)):\n is_json = True\n key_prefix = \"%s_%s_%s_\" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX, viewType)\n if isData==False:\n try:\n if request.method == \"POST\":\n path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path() + '?' + request.body)))\n else:\n path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path())))\n except: path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path())))\n cache_key = '%s.%s' % (key_prefix, path.hexdigest())\n return cache.get(cache_key, None)\n else:\n if 'harvester' in request.META['PATH_INFO']:\n is_json = False\n key_prefix = \"%s_%s_%s_\" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX, viewType)\n cache_key = '%s' % (key_prefix)\n return cache.get(cache_key, None)\n else:\n return None\n\n\ndef setCacheEntry(request, viewType, data, timeout, isData = False):\n isCache = cacheIsAvailable(request)\n if isCache:\n is_json = False\n request._cache_update_cache = False\n if ((('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) or (\n 'json' in request.GET)):\n is_json = True\n key_prefix = \"%s_%s_%s_\" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX, viewType)\n if isData==False:\n try:\n if request.method == \"POST\":\n path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path() + '?' + request.body)))\n else:\n path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path())))\n except: path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path())))\n cache_key = '%s.%s' % (key_prefix, path.hexdigest())\n else:\n cache_key = '%s' % (key_prefix)\n cache.set(cache_key, data, timeout)\n else:None\n\ndef preparePlotData(data):\n oldPlotData = data\n if isinstance(oldPlotData, dict):\n newPlotData = {}\n for key, value in oldPlotData.iteritems():\n newPlotData[str(key)] = float(value)\n else:\n newPlotData = oldPlotData\n return newPlotData","sub_path":"core/libs/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646667395","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Entry',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('registered_time', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'verbose_name': 'Anmälning',\n 'verbose_name_plural': 'Anmälningar',\n },\n ),\n migrations.CreateModel(\n name='EntryDeadline',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('description_sv', models.TextField(verbose_name='beskrivning')),\n ('entry_from', models.DateTimeField(verbose_name='anmälningsstart')),\n ('entry_to', models.DateTimeField(verbose_name='anmälningsslut')),\n ('enable_unregistration', models.BooleanField(verbose_name='kan avanmäla sig')),\n ],\n options={\n 'verbose_name': 'Anmälningsperiod',\n 'verbose_name_plural': 'Anmälningsperioder',\n },\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('headline', models.CharField(verbose_name='rubrik', max_length=255)),\n ('lead', models.TextField(verbose_name='ingress')),\n ('body', models.TextField(verbose_name='brödtext')),\n ('visible_from', models.DateTimeField()),\n ('visible_to', models.DateTimeField()),\n ('approved', models.BooleanField(verbose_name='godkänd', default=False)),\n ('created', models.DateTimeField(editable=False)),\n ('modified', models.DateTimeField(editable=False)),\n ('start', models.DateTimeField(verbose_name='start')),\n ('end', models.DateTimeField(verbose_name='slut')),\n ('enable_registration', models.BooleanField(verbose_name='kan anmäla sig')),\n ('registration_limit', models.IntegerField(verbose_name='max antal anmälningar')),\n ('tags', models.ManyToManyField(to='articles.Tag', verbose_name='tag', blank=True)),\n ],\n options={\n 'verbose_name_plural': 'Arrangemanger',\n 'verbose_name': 'Arrangemang',\n 'permissions': (('can_approve_article', 'Can approve article'),),\n },\n ),\n ]\n","sub_path":"wsgi/iportalen_django/events/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335313361","text":"# niestety mam totalnie urwanie głowy w pracy więc mogę zrobić zadania tylko po łebkach i wersja minimum\n\nimport random\n#import Faker\n\n#faker.Faker()\nfrom faker import Faker\nfake = Faker()\n\nclass BaseContact:\n def __init__(self, name, surname, phone, email):\n self.name = name\n self.surname = surname\n self.email = email\n self.phone = phone\n def __str__(self):\n return f'{self.name} {self.surname} {self.email} {self.phone}'\n def contact(self):\n return f'Kontaktujesz się z {self.name} {self.surname} {self.phone}'\n \n @property\n def label_lenght(self):\n return len(self.name) + len(self.surname) + 1\n\n\nclass BusinessContact(BaseContact):\n def __init__(self, company, bphone, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.company = company\n self.bphone = bphone\n def __str__(self):\n return f'{self.name} {self.surname} {self.email} {self.bphone}'\n def contact(self):\n return f'Kontaktujesz się z {self.name} {self.surname} {self.bphone}'\n\n\ndef new_contact(x):\n for i in range(0,x):\n contact_type = random.randint(1,2) \n if contact_type == 1:\n new_contact_name_x = fake.name() \n position = new_contact_name_x.find(\" \")\n new_contact_name = new_contact_name_x[0:position]\n new_contact_surname = new_contact_name_x[position+1:]\n new_contact_phone = random.randint(600000000,699999999)\n new_contact_email = new_contact_name + \".\" + new_contact_surname + \"@gmail.com\" \n new_contact_data = BaseContact(name = new_contact_name, surname = new_contact_surname, phone = new_contact_phone, email= new_contact_email)\n else:\n new_contact_name_x = fake.name() \n position = new_contact_name_x.find(\" \")\n new_contact_name = new_contact_name_x[0:position]\n new_contact_surname = new_contact_name_x[position+1:]\n new_contact_phone = random.randint(600000000,699999999)\n new_contact_company = fake.text()[0:10]\n new_contact_email = new_contact_name + \".\" + new_contact_surname + \"@\" + new_contact_company.replace(\" \", \"\") + \".com\" \n new_contact_data = BusinessContact(name = new_contact_name, surname = new_contact_surname, bphone = new_contact_phone, phone = None, email= new_contact_email, company = new_contact_company)\n contact_list.append(new_contact_data)\n i = i + 1\n\n\n\n\n#typ1 = BaseContact(name = \"Jan\", surname = \"Kowalski\", firma = firma\"JTI\", stanowisko= \"CFO\", email= \"j@jti.com\")\n#typ2 = BaseContact(name = \"Anna\", surname = \"Woźniak\", firma = \"Zoetis\", stanowisko= \"Marketing Manager\", email= \"a@zoetis.com\")\n#typ3 = BaseContact(name = \"Waldemar\", surname = \"Szczecki\", firma = \"Bakalland\", stanowisko= \"MA\", email= \"j@bakalland.pl\")\n#typ4 = BaseContact(name = \"Iza\", surname = \"Nowak\", firma = \"Ipsen\", stanowisko= \"Programista\", email= \"j@ipsen.com\")\n#typ5 = BaseContact(name = \"Leszek\", surname = \"LaLa\", firma = \"Polski Fundusz Rozwoju\", stanowisko= \"Stażysta\", email= \"j@pfr.pl\")\n\ncontact_list = []\n\nnew_contact(5)\n\n#for typki in contact_list:\n# print(f\"{typki.name} {typki.surname} {typki.email}\")\n\nfor typki in contact_list:\n #print(typki)\n print(typki.contact())\n print(typki.label_lenght)\n\n#print(BaseContact.contact(typ2))\n#print(typ1.contact())\n#print(typ1.label_lentht)","sub_path":"wizytowki.py","file_name":"wizytowki.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"342907304","text":"#!/usr/bin/python3\n# coding=utf-8\n\n\n\"\"\"\n完成发布 发送邮件给测试\n\"\"\"\n\n\n__author__ = 'LCD'\n\n\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\ndef email_send(fname, fpwd, toname, smtp, app_address=''):\n subject = \"\"\"\n

新的测试包已经上传

\n

测试包链接:%s

\n \"\"\" % (app_address.replace(\"https\", \"http\"), app_address.replace(\"https\", \"http\"))\n # Https 会有时发送失败\n msg = MIMEText(subject, 'html', 'utf-8')\n msg['from'] = fname\n msg['to'] = \",\".join(toname)\n msg['subject'] = Header('新的测试包已经上传', 'utf-8')\n print('---------->发送邮件')\n try:\n server = smtplib.SMTP_SSL()\n server.connect(smtp)\n server.login(fname, fpwd)\n server.sendmail(fname, toname, msg.as_string())\n server.quit()\n except Exception as e:\n print('---------->发送邮件失败')\n raise Exception('发送邮件失败')\n else:\n print('----------> 发送邮件成功')\n finally:\n pass","sub_path":"code/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"217951706","text":"import csv\nimport os\n\n\ncsv_files_names=[]\nwith open(\"new_excel_new.csv\",\"w\") as f :\n for file in os.listdir('.'):\n\n if file.endswith(\".csv\") and not file.endswith(\"new.csv\"):\n csv_files_names.append(file)\n\n\n with open(csv_files_names[0]) as r :\n read = csv.reader(r)\n lines = list(read)\n writer_csv = csv.writer(f)\n writer_csv.writerows(lines)\n\n for file_name in csv_files_names[1::]:\n with open(file_name,\"r\") as r:\n read = csv.reader(r)\n lines = list(read)[1::]\n writer_csv = csv.writer(f)\n writer_csv.writerows(lines)\n\n","sub_path":"combine_csv_files.py","file_name":"combine_csv_files.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104608846","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nfrom sqlalchemy.dialects import postgresql\n\nfrom cubes_lite.errors import ArgumentError\nfrom cubes_lite.loggers import get_logger\n\nfrom .request import Request\nfrom .query import QueryBuilder\n\n__all__ = (\n 'Browser',\n)\n\n\nlogger = get_logger()\n\n\nclass Browser(object):\n \"\"\"Class for browsing data aggregations\n\n :Attributes:\n * `model` - a set of cubes for browsing data\n \"\"\"\n\n default_request_cls = Request\n default_query_builder_cls = QueryBuilder\n\n query_types_registry = []\n\n log_queries = False\n\n def __init__(self, model, **options):\n super(Browser, self).__init__()\n\n if not model:\n raise ArgumentError('No model was given for aggregation browser')\n\n self.model = model\n self.log_queries = options.get('log_queries') or self.log_queries\n\n self._init_query_types_registry()\n\n def _init_query_types_registry(self):\n registry = {}\n\n for request_type in self.query_types_registry:\n if request_type.type_ in registry:\n raise ArgumentError(\n 'Duplicate key in query registration: \"{}\"'\n .format(request_type.type_)\n )\n\n request_cls = request_type.request_cls or self.default_request_cls\n response_cls = request_type.response_cls or request_cls.response_cls\n query_builders = request_type.query_builder_cls_desc or self.default_query_builder_cls\n\n registry[request_type.type_] = (\n request_cls,\n response_cls,\n query_builders,\n )\n\n self.query_types_registry = registry\n\n def browse(\n self, request_type, conditions=None, aggregates=None,\n drilldown_levels=None,\n **options\n ):\n \"\"\"\n Arguments:\n\n * `aggregates` - list of aggregate measures. By default all\n cube's aggregates are included in the result.\n * `drilldown_levels` - dimensions' levels through which to drill-down\n\n Returns a :class:`Response` object.\n \"\"\"\n\n request_cls = self.get_request_cls(request_type)\n\n request = request_cls(\n self.model,\n type_=request_type,\n conditions=conditions,\n aggregates=aggregates,\n drilldown=drilldown_levels,\n **options\n )\n\n return self._browse(request)\n\n def _browse(self, request):\n query_builder = self.get_query_builder(request)\n query = query_builder.build()\n meta_data = query_builder.get_meta_data(query)\n\n data = self.execute_query(query, label=str(request.type_))\n\n response_cls = self.get_response_cls(request.type_)\n return response_cls(request, data, **meta_data)\n\n def execute_query(self, query, label=None):\n if self.log_queries:\n label = 'SQL({}):'.format(label if label else 'info')\n query = query.compile(\n dialect=postgresql.dialect(),\n compile_kwargs={'literal_binds': True},\n )\n logger.debug('%s\\n%s\\n', label, query)\n\n return self._execute_query(query)\n\n def _execute_query(self, statement):\n raise NotImplementedError()\n\n def get_query_builder(self, request):\n info = self.query_types_registry.get(request.type_)\n if not info:\n return self.default_query_builder_cls\n\n desc = info[2]\n if not isinstance(desc, dict):\n query_builder_cls = desc\n else:\n cubes = request.get_related_cubes()\n cube_names = [cube.name for cube in cubes]\n key = tuple(sorted(cube_names))\n\n query_builder_cls = desc.get(key) or self.default_query_builder_cls\n\n return query_builder_cls(request, self)\n\n def get_request_cls(self, request_type):\n info = self.query_types_registry.get(request_type)\n if not info:\n return self.default_request_cls\n return info[0]\n\n def get_response_cls(self, request_type):\n info = self.query_types_registry.get(request_type)\n if not info:\n return self.default_request_cls.response_cls\n return info[1]\n","sub_path":"cubes_lite/query/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"134131421","text":"import requests, json\n\n\ndegree_sign= u'\\N{DEGREE SIGN}'\n\n\n\ndef connect():\n\tfull_url = (\"http://api.openweathermap.org/data/2.5/weather?zip=32566&units=imperial&us&appid=e4f9c3989f6e3d0f337d8f18c8995bef\")\n\treq = requests.get(full_url)\n\twx = req.json()\n\n\ttry:\n\t\trequests.get(full_url)\n\t\twx = req.json()\n\t\tprint(wx)\n\t\t\n\n\texcept:\n\t\t\tprint(\"An unexpected error has occured, please ensure you have entered the correct zip code and try again\")\n\nget_weather()","sub_path":"Final_Project/api2.py","file_name":"api2.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"558523067","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport datetime\nimport pathlib\n\nimport pytz\n\nAPP_URL_PREFIX = \"/rasp-water\"\n\nTIMEZONE_OFFSET = \"+9\"\nTIMEZONE = datetime.timezone(datetime.timedelta(hours=int(TIMEZONE_OFFSET)), \"JST\")\nTIMEZONE_PYTZ = pytz.timezone(\"Asia/Tokyo\") # schedule 用\n\nSTATIC_FILE_PATH = pathlib.Path(__file__).parent.parent.parent / \"dist\" / \"rasp-water\"\n\nSCHEDULE_DATA_PATH = pathlib.Path(__file__).parent.parent / \"data\" / \"schedule.dat\"\nLOG_DB_PATH = pathlib.Path(__file__).parent.parent / \"data\" / \"log.db\"\n\nSTAT_DIR_PATH = pathlib.Path(\"/dev/shm\") / \"rasp-water\"\n","sub_path":"flask/lib/webapp_config.py","file_name":"webapp_config.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"472737021","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 27 01:21:53 2020\n\n@author: Franco Chiesa Docampo \n\nFunción de procesamiento para calcular el z-score dF/F de la senal adquirida mediante el sistema de Fiber Photometry.\n\nPara el desarrollo de esta función se adaptó en parte código de la siguiente referencia:\n \n(1) Martianova, E., Aronson, S., Proulx, C.D. Multi-Fiber Photometry \n to Record Neural Activity in Freely Moving Animal. J. Vis. Exp. \n (152), e60278, doi:10.3791/60278 (2019)\n https://www.jove.com/video/60278/multi-fiber-photometry-to-record-neural-activity-freely-moving\n\n\"\"\"\n\n# Procesamiento\nimport scipy\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom get_zdFF import get_zdFF\n\n# Cargar datos | Ajustar directorio de forma acorde al usuario\nfolder = 'inserte-directorio' \n# Nombre del archivo \nfile_name = 'inserte-nombre-de-archivo' \n \n# Lectura\ndf = pd.read_csv(folder+file_name,index_col=False) \n \ntime = df['time'][0:] # Se extrae el vector tiempo del archivo \"df\".\nintensity = df['intensity'][0:] # Se extrae el vector intensidad del archivo \"df\".\n \ntime[:] = (time[:] - time[0])/1000; # Convierte el tiempo a tiempo relativo (s).\n\ndataOdd = intensity[1::2] # Separar por índice impar | L470 \ndataEven = intensity[2::2] # Separar por índice par | L410\n \nraw_signal = dataEven\nraw_reference = dataOdd\n \n\"\"\"\n \n1) Calcular la SNR de la señal de fluorescencia correspondiente a la de 410 nm y 470 nm.\n\n\"\"\" \n\navg_signal= np.mean(raw_signal)\nstdev_signal = np.std(raw_signal)\n\nSNR_signal = avg_signal/stdev_signal\n\nprint(f'La SNR de la señal correspondiente al LED de 470 nm es {SNR_signal}.')\n\navg_ref = np.mean(raw_reference)\nstdev_ref = np.std(raw_reference)\n\nSNR_ref = avg_ref/stdev_ref\n\nprint(f'La SNR de la señal correspondiente al LED de 410 nm es {SNR_ref}.')\n \n# Grafica la data sin procesar.\nfig = plt.figure(figsize=(16, 10))\n \nax1 = fig.add_subplot(211)\nax1.plot(raw_signal,'blue',linewidth=1.5)\n \nax2 = fig.add_subplot(212)\nax2.plot(raw_reference,'purple',linewidth=1.5)\n \n# ----------------------------------------------------------------------------------------------------\n \n\"\"\" \n \n2) Revisa que los intervalos entre muestras del vector, que contiene la fluorescencia correspondiente \n a ambos haces de luz, sea consistente con los cuadros por segundo especificados. Si se observan \n picos en la resta entre un elemento del vector tiempo y su consecutivo, significa que los FPS \n de la cámara sufrieron una caída.\n \n Si se adquiere a 40 FPS, se buscan los elementos que hayan sido adquiridos por fuera 50 milisegundos\n respecto del dato anterior y se los elimina. \n \n\"\"\"\n \n# Encuentra el tiempo que efectivamente transcurre entre cada muestra.\nIFI = np.diff(time[:])\n\n# Revisar visualmente los picos que figuren al graficar IFI. Los picos abruptos significan que los FPS de la cámara sufrieron una caida.\nfig = plt.figure(figsize=(16, 10))\nax1 = fig.add_subplot(211)\nax1.plot(IFI,'red',linewidth=1.5)\n \nfig = plt.figure(figsize=(30, 20))\nplt.hist(IFI*1000, bins = 10000)\nplt.show()\n \n# Se determina el umbral adecuado para IFI, el cual es 1/[FPS*0.5] (s)\n \nfrom scipy import signal\nfrom scipy.signal import find_peaks\n \nlocs, peaks = find_peaks(IFI, height=0.05) # 1/[40*0.5] = 0.05\n \ndf.iloc[locs,:] = 0.0000000001 # Los cuadros malos pasan a valer 0.0000000001 para luego poder identificarlos facilmente y eliminarlos.\n \ndata_corr = df[df != 0.0000000001]\ndata_corr = data_corr.dropna() # drop NaN.\n \nintensity_corr = data_corr['intensity'][0:]\n \n\"\"\"\n \n3) Se extraen los valores de fluorescencia registrados correspondientes al haz de 470 nm y 410 nm.\n \n\"\"\"\n \n# Una vez corregidos los datos se pasa a la fase de procesamiento.\nraw_signal = intensity_corr[2::2] # Separar por datos con índice par.\nraw_reference = intensity_corr[1::2] # Separar por datos con índice impar.\n \nraw_reference = raw_reference.iloc[:-1] # Se lo usa para emparejar el largo de \"raw_reference\" respecto de \"raw_signal\".\n\n# ----------------------------------------------------------------------------------------------------\n \n\"\"\"\n \n4) Cálculo directo de la zdFF (z-score dF/F signal based on fiber photometry calcium-idependent and calcium-dependent signals).\n \n\"\"\"\n \nzdFF = get_zdFF(raw_reference,raw_signal) \n\n# Grafica el resultado.\nfig = plt.figure(figsize=(16, 8))\nax1 = fig.add_subplot(111)\nax1.plot(zdFF,'black',linewidth=1.5)\n# ----------------------------------------------------------------------------------------------------\n \n\"\"\"\n \nCálculo de la zdFF de forma gradual, pudiendo observar los gráficos paso a paso. \n \n5) Suaviza cada señal usando un algoritmo de media móvil.\n\n\"\"\"\n \nfrom smooth_signal import smooth_signal\n \nsmooth_win = 10\nsmooth_reference = smooth_signal(raw_reference, smooth_win)\nsmooth_signal = smooth_signal(raw_signal, smooth_win)\n \nfig = plt.figure(figsize=(16, 10))\nax1 = fig.add_subplot(211)\nax1.plot(smooth_signal,'blue',linewidth=1.5)\nax2 = fig.add_subplot(212)\nax2.plot(smooth_reference,'purple',linewidth=1.5)\n \n\"\"\"\n \n6) Realiza una corrección de línea de base para cada señal usando un algoritmo denominado Adaptative \n Reweigthed Penalized Least Squares (AirPLS).\n \n \"\"\"\n \nfrom airPLS import airPLS\n \nlambd = 5e4 # Lambda es ajustable para mejorar la estimación de la línea de base.\nporder = 1\nitermax = 50\nr_base=airPLS(smooth_reference.T,lambda_=lambd,porder=porder,itermax=itermax)\ns_base=airPLS(smooth_signal,lambda_=lambd,porder=porder,itermax=itermax)\n \nfig = plt.figure(figsize=(16, 10))\nax1 = fig.add_subplot(211)\nax1.plot(smooth_signal,'blue',linewidth=1.5)\nax1.plot(s_base,'black',linewidth=1.5)\nax2 = fig.add_subplot(212)\nax2.plot(smooth_reference,'purple',linewidth=1.5)\nax2.plot(r_base,'black',linewidth=1.5)\n \n# Se remueve la linea de base y el comienzo de las mediciones.\nremove=200\nreference_corrected = (smooth_reference[remove:] - r_base[remove:])\nsignal_corrected = (smooth_signal[remove:] - s_base[remove:]) \n \nfig = plt.figure(figsize=(16, 10))\nax1 = fig.add_subplot(211)\nax1.plot(signal_corrected,'blue',linewidth=1.5)\nax2 = fig.add_subplot(212)\nax2.plot(reference_corrected,'purple',linewidth=1.5)\n\n\"\"\"\n \n7) Estandarizar las señales utilizando sus respectivos valores medios y desviaciones estándar. \n Dando lugar a 2 señales denominadas zInt410 (z_reference) y zInt470 (z_signal).\n\n\"\"\"\n \nz_reference = (reference_corrected - np.median(reference_corrected)) / np.std(reference_corrected)\nz_signal = (signal_corrected - np.median(signal_corrected)) / np.std(signal_corrected)\n\nfig = plt.figure(figsize=(16, 10))\nax1 = fig.add_subplot(211)\nax1.plot(z_signal,'blue',linewidth=1.5)\nax2 = fig.add_subplot(212)\nax2.plot(z_reference,'purple',linewidth=1.5)\n \n\"\"\"\n \n8) Ajusta las señales estandarizadas de 410 y 470 nm a la función de regresión: y = a*x + b\n \n\"\"\"\n# Ajuste de la señal de calcio usando regresión lineal.\nfrom sklearn.linear_model import Lasso\n \nlin = Lasso(alpha=0.0001,precompute=True,max_iter=1000,positive=True, random_state=9999, selection='random')\nn = len(z_reference)\nlin.fit(z_reference.reshape(n,1), z_signal.reshape(n,1))\n \nfig = plt.figure(figsize=(16, 8))\nax1 = fig.add_subplot(111)\nax1.plot(z_reference,z_signal,'b.')\n \n# Alineamiento de la referencia con la señal de la señal de calcio usando regresión lineal.\nz_reference_fitted = lin.predict(z_reference.reshape(n,1)).reshape(n,)\nax1.plot(z_reference,z_reference_fitted, 'r--',linewidth=1.5)\n \nfig = plt.figure(figsize=(16, 8))\nax1 = fig.add_subplot(111)\nax1.plot(z_signal,'blue')\nax1.plot(z_reference_fitted,'purple')\n \n\"\"\"\n \n9) Calcular la expresión dF/F normalizada.\n\n\"\"\" \n \n# Cálculo de la zdFF.\nzdFF = (z_signal - z_reference_fitted)\n \n# Gráfico de zdFF. \nfig = plt.figure(figsize=(16, 8))\nax1 = fig.add_subplot(111)\nax1.plot(zdFF,'black')\n \n","sub_path":"procesamiento.py","file_name":"procesamiento.py","file_ext":"py","file_size_in_byte":8036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"164863299","text":"import numpy as np\nimport scipy as sc\nfrom scipy import special\n\nimport copy\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\nclass Policy:\n\n def __init__(self, d_state, dm_act, pdict):\n self.d_state = d_state\n self.dm_act = dm_act\n\n self.type = 'poly'\n self.degree = pdict['degree']\n self.nb_feat = int(sc.special.comb(self.degree + self.d_state, self.degree)) - 1\n self.basis = PolynomialFeatures(self.degree, include_bias=False)\n\n self.n_param = self.dm_act * self.nb_feat\n self.K = 1e-8 * np.random.randn(self.n_param)\n self.cov = pdict['cov0'] * np.eye(self.n_param)\n\n def features(self, x):\n return self.basis.fit_transform(x.reshape(-1, self.d_state)).squeeze()\n\n def mean(self, x):\n feat = self.features(x)\n return np.einsum('...k,mk->...m', feat, self.K.reshape(self.dm_act, self.d_state))\n\n def actions(self, x):\n return self.mean(x)\n\n def perturb(self):\n pert = copy.deepcopy(self)\n pert.K = np.random.multivariate_normal(self.K, self.cov)\n return pert\n\n\nclass FDPG:\n\n def __init__(self, env, n_episodes, discount,\n alpha, pdict):\n self.env = env\n\n self.d_state = self.env.observation_space.shape[0]\n self.dm_act = self.env.action_space.shape[0]\n\n self.alim = self.env.action_space.high\n\n self.n_episodes = n_episodes\n self.discount = discount\n\n self.alpha = alpha\n\n self.ctl = Policy(self.d_state, self.dm_act, pdict)\n\n self.rollouts = None\n\n def sample(self, n_episodes, ctl=None):\n rollouts = []\n\n for _ in range(n_episodes):\n roll = {'x': np.empty((0, self.d_state)),\n 'u': np.empty((0, self.dm_act)),\n 'xn': np.empty((0, self.d_state)),\n 'done': np.empty((0,), np.int64),\n 'r': np.empty((0,))}\n\n x = self.env.reset()\n\n done = False\n while not done:\n if ctl is None:\n u = self.ctl.actions(x)\n else:\n u = ctl.actions(x)\n\n roll['x'] = np.vstack((roll['x'], x))\n roll['u'] = np.vstack((roll['u'], u))\n\n x, r, done, _ = self.env.step(np.clip(u, - self.alim, self.alim))\n roll['xn'] = np.vstack((roll['xn'], x))\n roll['done'] = np.hstack((roll['done'], done))\n roll['r'] = np.hstack((roll['r'], r))\n\n rollouts.append(roll)\n\n return rollouts\n\n def run(self, nb_iter=100, verbose=False):\n _trace = {'ret': []}\n\n for it in range(nb_iter):\n self.rollouts = self.sample(n_episodes=self.n_episodes)\n\n _return = []\n for roll in self.rollouts:\n _gamma = self.discount * np.ones((len(roll['r']), ))\n _disc = np.hstack((1.0, np.cumprod(_gamma[:-1])))\n _return.append(np.sum(_disc * roll['r']))\n\n _meanr = np.mean(_return)\n\n _par, _return = [], []\n for n in range(self.n_episodes):\n # perturbed policy\n _pert = self.ctl.perturb()\n\n # return of perturbed policy\n _roll = self.sample(n_episodes=1, ctl=_pert)\n\n _gamma = self.discount * np.ones((len(_roll[-1]['r']), ))\n _disc = np.hstack((1.0, np.cumprod(_gamma[:-1])))\n _return.append(np.sum(_disc * _roll[-1]['r']))\n _par.append(_pert.K)\n\n # param diff\n _dpar = np.squeeze(np.asarray(_par) - self.ctl.K)\n\n # rwrd diff\n _dr = np.asarray(_return) - _meanr\n\n # gradient\n _grad = np.linalg.inv(_dpar.T @ _dpar + 1e-8 * np.eye(self.ctl.nb_feat)) @ _dpar.T @ _dr\n\n # update\n self.ctl.K += self.alpha * _grad / self.n_episodes\n\n _trace['ret'].append(_meanr)\n\n if verbose:\n print('it=', it, f'ret={_meanr:{5}.{4}}')\n\n return _trace\n","sub_path":"rl/fdpg/fdpg.py","file_name":"fdpg.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239030259","text":"import os\nimport astropy.units as u\nimport warnings\nfrom sunpy.net import Fido,attrs\nfrom datetime import date, time, datetime, timedelta\nfrom astropy.io import fits\n\nworkdir = 'C:/Users/alexf/Desktop/HMI_Data/'\nsharp_dir = workdir + 'sharp/'\n\nif not os.path.exists(workdir):\n os.mkdir(workdir)\n print(\"Directory \" + workdir + \"does not exist. Creating...\")\n\nif not os.path.exists(sharp_dir):\n os.mkdir(sharp_dir)\n print(\"Directory \" + sharp_dir + \"does not exist. Creating...\")\n#Define start and times, as well as the time interval between disks and the download chunk size\nstart = datetime(2010, 5, 1,0,0,0)#date time object format is year, month, day, hour, minute, second\nend = datetime(2018, 5, 1,0,0,0)#currently generating 8 years of data\ntime_interval = timedelta(minutes = 60)\ndownload_chunk = timedelta(days = 10)#avoid download chunks greater than 1 month in order to not download too much at once\n\n#breaks the download into pieces and downloads\ncurrent_time = start\nwhile(current_time < end):\n if(end-current_time > download_chunk):\n next_time = current_time + download_chunk\n else:\n next_time = end\n response = Fido.search(\n attrs.jsoc.Time(current_time, next_time),\n attrs.jsoc.Notify('hsmgroupnasa@gmail.com'),\n attrs.jsoc.Series('hmi.Sharp_720s'),\n attrs.jsoc.Segment('bitmap'),\n attrs.Sample(time_interval.total_seconds() * u.s)\n )\n response\n res = Fido.fetch(response, path= sharp_dir + '/{file}.fits')\n current_time = next_time\n\nwarnings.simplefilter(\"ignore\")#.verify('fix') produces many warnings which will lag the jupyter notebook\n\n#extracts relevant keywords in the given order\nkeywords = ['HARPNUM','T_REC','NAXIS1','NAXIS2','CDELT1','CDELT2','IMCRPIX1','IMCRPIX2','LAT_FWT','LON_FWT','NPIX']#Keywords in order to be saved\nfilenames = os.listdir(sharp_dir)\nfilename = 'data.txt'\n\ndata = open(workdir + filename,\"w+\")\nline = ''\nfor keyword in keywords:\n line += keyword + ' '\ndata.write(line + \"\\n\")\nfor filename in filenames:\n line = ''\n hdul = fits.open(sharp_dir + filename)\n hdul.verify('fix')\n for keyword in keywords:\n line += str(hdul[1].header[keyword]) + ' '\n data.write(line + '\\n')\n hdul.close()\ndata.close()","sub_path":"aracle/data/Datagenerator.py","file_name":"Datagenerator.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324336377","text":"from django.conf.urls import patterns, include, url\n#from django.conf import settings\nfrom booklib import settings\nfrom booklib.settings import MEDIA_ROOT\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^$', 'library.views.index'),\n url(r'^library/$', 'library.views.index'),\n url(r'^library/books/$', 'library.views.index'),\n url(r'^library/books/(\\d+)/$', 'library.views.bookCard'),\n url(r'^library/authors/$', 'library.views.authors'),\n url(r'^library/authors/(\\d+)/$', 'library.views.authorsCard'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT, }),\n)\n","sub_path":"practice_5/lab5/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652583040","text":"\"\"\"\nSolve -u'' = f using finite difference\n4th order in interior, 3rd order at second and penultimate point\n\"\"\"\nfrom numpy import pi, sin, linspace, zeros, ones, abs\nimport matplotlib.pyplot as plt\nfrom scipy.sparse import diags, csc_matrix\nfrom scipy.sparse.linalg import spsolve\n\n# RHS function\ndef f(x):\n return sin(x)\n\n# exact solution\ndef uexact(x):\n return sin(x)\n\n# Domain\nxmin, xmax = 0.0, 2.0*pi\n\n# Grid of n points\nn = 32\nh = (xmax - xmin)/(n - 1)\nx = linspace(xmin,xmax,n)\n\n# array for solution\nu = zeros(n)\n\n# BC for first and last points\nu[0] = uexact(x[0])\nu[-1] = uexact(x[-1])\n\nb = 12.0 * h**2 * f(x[1:-1])\nb[0] += 11.0 * u[0]\nb[-1] += 11.0 * u[-1]\n\n#A = spdiags([ones(n-2),-16*ones(n-2),30*ones(n-2),-16*ones(n-2),ones(n-2)],\n# [-2,-1,0,1,2], n-2, n-2)\nA = diags([ones(n-4),-16*ones(n-3),30*ones(n-2),-16*ones(n-3),ones(n-4)],\n [-2,-1,0,1,2])\nA = A.tolil() # lil_matrix allows to change elements\nA[0,0] = 20.0; A[0,1] = -6.0; A[0,2] = -4.0; A[0,3] = 1.0\nA[-1,-4] = 1.0; A[-1,-3] = -4.0; A[-1,-2] = -6.0; A[-1,-1] = 20.0;\nA = csc_matrix(A) # spsolve requires this format\nu[1:-1] = spsolve(A,b)\nprint(\"Max error = \", abs(uexact(x)-u).max())\n\n# Exact solution on fine mesh for plotting\nxe = linspace(xmin, xmax, 100); ue = uexact(xe)\n\n# Plot exact and numerical solution\nplt.plot(xe,ue,x,u,'o')\nplt.legend(('Exact solution','Numerical solution'))\nplt.xlabel('x')\nplt.ylabel('u')\nplt.show()\n","sub_path":"bvp1d/bvp1db.py","file_name":"bvp1db.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"371377798","text":"'''Crie um programa que leia a idade e o sexo de várias pessoas. A cada pessoa cadastrada, o programa\ndeverá perguntar se o usuário quer ou não continuar. No final, mostre:\nA: Quantas pessoas tem mais de 18 anos.\nB: Quantos homens foram cadastrados.\nC: Quantas mulheres tem menos de 20 anos.'''\n\ntot18 = totman = totwoman = 0\nwhile True:\n idade = int(input('Qual sua idade? '))\n sexo = ' '\n while sexo not in 'MF':\n sexo = str(input('Qual seu sexo? [M/F]')).strip().upper()[0]\n if idade >= 18:\n tot18 += 1\n if sexo == 'M':\n totman += 1\n if sexo == 'F' and idade < 20:\n totwoman += 1\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? ')).strip().upper()[0]\n if resp == 'N':\n break\nprint(f'Total de pessoas com mais de 18 anos: {tot18}')\nprint(f'Ao todo temos {totman} homens cadastrados')\nprint(f'E temos {totwoman} mulheres com menos de 20 anos')\n","sub_path":"World - 2/ex069.py","file_name":"ex069.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"466756409","text":"import http.client as client\nconnection = client.HTTPSConnection(\"sms.movesms.co.ke\")\ndef sendSMS(mobile_number,message):\n payload = \"username=victor&api_key=Nh7ctEVbkSW3knMznKYeyXo6JHppeshVRwv0S5usdvaZH6qNUQ&sender=SMARTLINK&to=%s&message=%s&msgtype=5&dlr=0\" % (mobile_number,message)\n headers = {\n 'content-type':\"application/x-www-form-urlencoded\",\n 'cache-control':\"no-cache\",\n }\n\n connection.request(\"POST\",'/api/compose',payload,headers)\n ","sub_path":"user/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"312517469","text":"# Create your views here.\nfrom rest_framework import viewsets, status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom milestone.models import Milestone\nfrom milestone.serializer import MilestoneSerializer\nfrom project.models import Project\nfrom django.db.transaction import atomic\n\nclass MilestoneViewSet(viewsets.ModelViewSet):\n queryset = Milestone.objects.all()\n serializer_class = MilestoneSerializer\n permission_classes = [IsAuthenticated]\n\n def create(self, request, *args, **kwargs):\n project = get_object_or_404(Project, pk=kwargs.get('project_pk'))\n if project.owner != request.user and not project.collaborators.all().filter(\n username=request.user.username).exists():\n raise PermissionDenied()\n\n context = {\n \"project\": project\n }\n\n serializer_data = {**request.data, **{\"project_id\": project.id}}\n serializer = self.serializer_class(\n data=serializer_data, context=context,\n )\n serializer.is_valid(raise_exception=True)\n\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n @atomic\n def update(self, request, *args, **kwargs):\n project = get_object_or_404(Project, pk=kwargs.get('project_pk'))\n milestone = Milestone.objects.select_for_update().filter(pk=kwargs.get('pk'))\n if project.owner != request.user and not project.collaborators.all().filter(\n username=request.user.username).exists():\n raise PermissionDenied()\n\n context = {\n \"project\": project\n }\n\n serializer_data = request.data\n serializer = self.serializer_class(\n data=serializer_data, context=context\n )\n serializer.validators = []\n serializer.is_valid(raise_exception=True)\n serializer.update(milestone, serializer.validated_data)\n return Response(serializer.data, status.HTTP_200_OK)\n\n def list(self, request, *args, **kwargs):\n project = get_object_or_404(Project, pk=kwargs.get('project_pk'))\n\n if project.owner != request.user and not project.collaborators.all().filter(\n username=request.user.username).exists():\n raise PermissionDenied()\n\n milestones = Milestone.objects.filter(project=project)\n serializer = MilestoneSerializer(milestones, many=True)\n return Response(serializer.data)\n\n def destroy(self, request, *args, **kwargs):\n project = get_object_or_404(Project, pk=kwargs.get('project_pk'))\n\n if project.owner != request.user and not project.collaborators.all().filter(\n username=request.user.username).exists():\n raise PermissionDenied()\n\n milestone = get_object_or_404(Milestone, pk=kwargs.get('pk'))\n milestone.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"backend/milestone/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606635143","text":"#!/usr/bin/env python3\n\nfrom sortedcontainers import SortedDict\nimport queries\nfrom indeed import IndeedClient\nfrom world import countries\nfrom sys import argv\n\nplace = argv[1]\n\ndef search(params):\n client = IndeedClient(publisher=8201417039877332)\n res = client.search(**params)\n return res\n\ndef query(query_string, loc):\n params = {'q': query_string, 'l': loc, 'userip': '85.236.38.212', 'co': countries[loc], 'radius': 0,\n 'useragent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2)', 'format': 'json',\n 'publisher': 8201417039877332, 'v': 2, 'filter': 1}\n res = search(params)\n del res['results']\n return res\n\ndef calc(stats, c, file):\n with open(file, 'w') as fh:\n fh.write('Indeed.com ' + place + '\\n\\n')\n for comparison, pair in c.items():\n if (stats[pair[0]] > 0) and (stats[pair[1]] > 0):\n fh.write('%-20s %.1f \\n' % (comparison, stats[pair[0]] / stats[pair[1]]))\n\ndef write_stats(q, file, c):\n with open(file, 'w') as fh:\n stats = SortedDict()\n for lang, qr in q.items():\n total = query(qr, place)['totalResults']\n fh.write(lang + ':' + str(total) + '\\n')\n stats[lang] = total\n fh.close()\n calc(stats, c, 'job_ratios1.txt')\n\nwrite_stats(queries.q, 'stats1.txt', queries.c)\n\n# write_stats -> query -> search\n# write_stats -> calc\n\n","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"530739020","text":"import pandas as pd\nimport numpy as np\n\ndef train_validate_test_split(df, train_percent=.64, validate_percent=.16, seed=1234):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n validate_end = int(validate_percent * m) + train_end\n train = df.iloc[perm[:train_end]]\n validate = df.iloc[perm[train_end:validate_end]]\n test = df.iloc[perm[validate_end:]]\n return train, validate, test\n\ndef main():\n df = pd.read_csv(\"./unprocessed_data/casehold.csv\")\n train, validate, test = train_validate_test_split(df)\n \n train.to_csv(\"./data_processed/train.csv\", sep=',', header=True, index=False, encoding='utf-8')\n validate.to_csv(\"./data_processed/dev.csv\", sep=',', header=True, index=False, encoding='utf-8')\n test.to_csv(\"./data_processed/test.csv\", sep=',', header=True, index=False, encoding='utf-8')\n\nif __name__ == \"__main__\":\n main()","sub_path":"dataset_setup.py","file_name":"dataset_setup.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"538043977","text":"\"\"\"\nLICENSE: public domain.\n\nConverts an amazon payments .csv file to a quickbooks-compatible .iif file.\n\nAn .iif file has a format like:\n\n!TRNS\tDATE\tACCNT\tNAME\tCLASS\tAMOUNT\tMEMO\n!SPL\tDATE\tACCNT\tNAME\tAMOUNT\tMEMO\n!ENDTRNS\nTRNS\t\"9/24/2010\"\t\"Paypal\"\t\"grommit\"\t\"Web Accept Payment Received\"\t48.25\t\"Wiki Spot c/o Wiki Spot\"\t\nSPL\t\"9/24/2010\"\t\"Other Income\"\t\"grommit\"\t-50.00\nSPL\t\"9/24/2010\"\t\"Other Expenses\"\tFee\t1.75\nENDTRNS\nTRNS\t\"9/1/2010\"\t\"Paypal\"\t\"PayPal - Money Market\"\t\"Dividend From PayPal Money Market\"\t0.01\t\nSPL\t\"9/1/2010\"\t\"Other Income\"\t\"PayPal - Money Market\"\t-0.01\nENDTRNS\nTRNS\t\"8/8/2010\"\t\"Paypal\"\t\"Cernio Technology Cooperative\"\t\"Shopping Cart Payment Sent\"\t-100.00\t\"Shopping Cart\"\t\nSPL\t\"8/8/2010\"\t\"Other Expenses\"\t\"Cernio Technology Cooperative\"\t100.00\nENDTRNS\nTRNS\t\"8/5/2010\"\t\"Paypal\"\t\"PayPal - Money Market\"\t\"Dividend From PayPal Money Market\"\t0.02\t\nSPL\t\"8/5/2010\"\t\"Other Income\"\t\"PayPal - Money Market\"\t-0.02\nENDTRNS\n\n\namazon CSV looks like:\n\n\"Date\",\"Type\",\"To/From\",\"Name\",\"Status\",\"Amount\",\"Fees\",\"Transaction ID\"\n\"Sep 25, 2010\",\"Withdrawal\",\"To\",\"Wiki Spot\",\"Initiated\",\"$23,724.88\",\"$0.00\",\"15D5J2SL11UN7VLTAJA7C2F2GHVJDGTENS8\"\n\"Sep 20, 2010\",\"Payment\",\"From\",\"Scott Meehleib\",\"Failed\",\"$100.00\",\"$3.20\",\"15CODDSKP5U9KNF8LGBEAJBRU3CR81CMI86\"\n\"Sep 20, 2010\",\"Payment\",\"To\",\"Kickstarter\",\"Completed\",\"$1.00\",\"$0.00\",\"15CODE7ZP18GDUJMAQA5UO9E3R5FAT62QFM\"\n\"\"\"\n\nACCOUNT = \"Amazon Payments\"\n# Our expense account, in our case, payments to Kickstarter.com - Fundraising Fees\nEXPENSES = \"Contract Services:Fundraising Fees\"\n# We're making amazon payment fees as Banking Fees\nPAYMENT_FEES = \"Business Expenses:Banking Fees\"\n# Our income account, in our case, is our individual contribution/donation account\nINCOME = \"Direct Public Support:Individ, Business Contributions\"\n\nimport csv\nimport sys\nimport datetime\nimport locale\n\nlocale.setlocale( locale.LC_ALL, '' )\n\nfilename = sys.argv[1]\niif_out = open('amazon.iif', 'w')\n\ndef get_customers(filename):\n customers = []\n f = open(filename, 'r')\n reader = csv.reader(f)\n reader.next()\n for row in reader:\n date_str, type, to_or_from, name, status, amount, fees, transaction_id = row\n customers.append(name)\n f.close()\n return list(set(customers))\n\ncustomers = get_customers(filename)\n\ndef write_iif_header():\n iif_out.write(\"\"\"!TRNS\tDATE\tACCNT\tNAME\tCLASS\tAMOUNT\tMEMO\\n\"\"\")\n iif_out.write(\"\"\"!SPL\tDATE\tACCNT\tNAME\tAMOUNT\tMEMO\n!ENDTRNS\\n\"\"\")\n iif_out.write(\"\"\"!CUST\tNAME\\n\"\"\")\n for customer in customers:\n iif_out.write(\"\"\"CUST\t%s\\n\"\"\" % customer)\n\ndef parse_amount(amount):\n num = float(amount[1:].replace(',',''))\n return (num, locale.currency(num)[1:])\n\ndef process_payment(transaction_date, to_or_from, name, amount, fees, transaction_id):\n date = \"%s/%s/%s\" % (\n transaction_date.month, transaction_date.day, transaction_date.year\n )\n fee_num, fee_amount = parse_amount(fees)\n direct_amount_num, direct_amount = parse_amount(amount)\n rough_total += direct_amount_num\n total_amount = locale.currency(direct_amount_num - fee_num)[1:]\n payment_details = {\n 'date': date,\n 'account': ACCOUNT,\n 'name': name,\n 'comment': \"Amazon payment\",\n 'total_amount': total_amount,\n 'income_account': INCOME,\n 'direct_amount': direct_amount,\n 'expense_account': EXPENSES,\n 'fee_amount': fee_amount,\n 'fee_account': PAYMENT_FEES,\n }\n if to_or_from == 'From':\n payment_str = \"\"\"TRNS\t\"%(date)s\"\t\"%(account)s\"\t\"%(name)s\"\t\"%(comment)s\"\t%(total_amount)s\t\"Amazon payment\"\t\nSPL\t\"%(date)s\"\t\"%(income_account)s\"\t\"%(name)s\"\t-%(direct_amount)s\nSPL\t\"%(date)s\"\t\"%(fee_account)s\"\tFee\t%(fee_amount)s\nENDTRNS\"\"\" % payment_details\n exact_total += (direct_amount_num - fee_num)\n elif to_or_from == 'To':\n payment_str = \"\"\"TRNS\t\"%(date)s\"\t\"%(account)s\"\t\"%(name)s\"\t\"%(comment)s\"\t-%(total_amount)s\t\"Amazon payment\"\t\nSPL\t\"%(date)s\"\t\"%(expense_account)s\"\t\"%(name)s\"\t%(direct_amount)s\nSPL\t\"%(date)s\"\t\"%(fee_account)s\"\tFee\t%(fee_amount)s\nENDTRNS\"\"\" % payment_details\n exact_total -= (direct_amount_num - fee_num)\n\n iif_out.write(payment_str + '\\n')\n\ndef process_withdrawal(transaction_date, to_or_from, name, amount, fees, transaction_id):\n \"\"\"\n We skip these because we assume they can be marked from the relevant bank account import.\n\n PayPal doesn't export bank account transfers in their .iif files and it all works out.\n \"\"\"\n pass\n\nreader = csv.reader(open(filename, 'r'))\nreader.next()\nwrite_iif_header()\nfor row in reader:\n date_str, type, to_or_from, name, status, amount, fees, transaction_id = row\n transaction_date = datetime.datetime.strptime(date_str, '%b %d, %Y')\n # skip transactions that didn't go through\n # it seems that amazon lists transactions as 'initiated' that are\n # actually successful. we don't import failed ones, though..\n if status != 'Completed':\n continue\n if type == 'Payment':\n process_payment(transaction_date, to_or_from, name, amount, fees, transaction_id)\n elif type == 'Withdrawal':\n process_withdrawal(transaction_date, to_or_from, name, amount, fees, transaction_id)\n","sub_path":"amazon_csv_to_iif.py","file_name":"amazon_csv_to_iif.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"491672874","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright Kitware Inc. and Contributors\n# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)\n# See accompanying Copyright.txt and LICENSE files for details\n###############################################################################\n\nimport json\nimport re\n\nfrom girder_jobs import Job\n\nfrom girder_client import GirderClient\n\nfrom ..constants import DanesfieldJobKey\nfrom ..utilities import removeDuplicateCount\n\n\ndef createGirderClient(requestInfo):\n \"\"\"Return new configured GirderClient instance.\"\"\"\n gc = GirderClient(apiUrl=requestInfo.apiUrl)\n gc.token = requestInfo.token[\"_id\"]\n return gc\n\n\ndef createUploadMetadata(jobId, stepName):\n \"\"\"\n Return metadata to supply with uploaded files, including:\n - Job identifier\n - Step name\n\n :param jobId: Job ID.\n :type jobId: str\n :param stepName: The name of the step.\n :type stepName: str (DanesfieldStep)\n \"\"\"\n upload_kwargs = {}\n if jobId is not None:\n upload_kwargs.update(\n {\n \"reference\": json.dumps(\n {DanesfieldJobKey.ID: jobId, DanesfieldJobKey.STEP_NAME: stepName}\n )\n }\n )\n return upload_kwargs\n\n\ndef createDockerRunArguments(\n image, containerArgs, jobTitle, jobType, user, resultHooks=None\n):\n \"\"\"\n Return arguments to pass to docker_run Celery task.\n\n :param image: Docker image name.\n :type image: str\n :param containerArgs: Docker container arguments.\n :type containerArgs: list[str]\n :param jobTitle: Girder job title.\n :type jobTitle: str\n :param jobType: Girder job type.\n :type jobType: str\n :param user: User document.\n :type user: dict\n :param resultHooks: List of Girder Worker transforms.\n :type resultHooks: list\n :returns: dict\n \"\"\"\n args = {\n \"image\": image,\n \"pull_image\": False,\n \"container_args\": containerArgs,\n \"girder_job_title\": jobTitle,\n \"girder_job_type\": jobType,\n \"girder_user\": user,\n # Force Python's stdout, stderr to be unbuffered. This ensures that the\n # job log is updated without waiting for a buffer to fill.\n \"environment\": [\"PYTHONUNBUFFERED=1\"],\n }\n if resultHooks is not None:\n args[\"girder_result_hooks\"] = resultHooks\n\n return args\n\n\ndef addJobInfo(job, jobId, stepName, workingSetId):\n \"\"\"\n Add common information to a job for use by job event listeners.\n This information allows the job event handler/workflow manager to\n process the job and continue running the workflow.\n\n :param job: Job document.\n :type job: dict\n :param jobId: Job ID.\n :type jobId: str\n :param stepName: The name of the step.\n :type stepName: str (DanesfieldStep)\n :returns: Updated job document.\n \"\"\"\n if jobId is not None:\n job.update(\n {\n DanesfieldJobKey.ID: jobId,\n DanesfieldJobKey.STEP_NAME: stepName,\n DanesfieldJobKey.WORKINGSETID: workingSetId,\n }\n )\n job = Job().save(job)\n\n return job\n\n\ndef rpcFileMatchesImageFile(rpcFile, imageFile):\n \"\"\"\n Return true if the RPC file corresponds to the image file.\n Matches are determined by file names.\n\n :param rpcFile: RPC file document.\n :type rpcFile: dict\n :param imageFile: Image file document.\n :type imageFile: dict\n \"\"\"\n rpcBaseName = removeDuplicateCount(rpcFile[\"name\"]).split(\".\")[0]\n # Remove suffix added to RPC files generated for MSI images\n result = re.match(r\"^(?P.+)_\\d+$\", rpcBaseName)\n if result:\n rpcBaseName = result.group(\"basename\")\n imageBaseName = imageFile[\"name\"].split(\".\")[0]\n return rpcBaseName.endswith(imageBaseName)\n\n\ndef imagePrefix(imageFile):\n \"\"\"\n Returns the image filename prefix for the given image file.\n\n :param imageFile: Image file document.\n :type imageFile: dict\n :returns: prefix string\n \"\"\"\n match = re.match(\n r\".*?(?P[0-9]{2}[A-Z]{3}[0-9]{8})\\-\"\n \"(?PP1BS|M1BS)\\-\"\n \"(?P[0-9]{12}_[0-9]{2}_P[0-9]{3})\",\n imageFile[\"name\"],\n flags=re.IGNORECASE,\n )\n\n if match:\n return \"%s-%s\" % (match.group(\"prefix\"), match.group(\"trail\"))\n else:\n return None\n\n\ndef rpcPrefix(rpcFile):\n \"\"\"\n Returns the RPC filename prefix for the given RPC file.\n\n :param rpcFile: RPC file document.\n :type rpcFile: dict\n :returns: prefix string\n \"\"\"\n match = re.match(\n r\".*?(?P[0-9]{2}[A-Z]{3}[0-9]{8})\\-\"\n \"(?PP1BS|M1BS)\\-\"\n \"(?P[0-9]{12}_[0-9]{2}_P[0-9]{3})\",\n rpcFile[\"name\"],\n flags=re.IGNORECASE,\n )\n\n if match:\n return \"%s-%s\" % (match.group(\"prefix\"), match.group(\"trail\"))\n else:\n return None\n","sub_path":"server/danesfield_server/algorithms/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"236506905","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 5 11:44:37 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\ndef get_keydata(dt,num):\r\n import pandas as pd\r\n import numpy as np\r\n s=dt[1:len(dt)-1]\r\n s1=dt[0:len(dt)-2]\r\n s2=dt[2:len(dt)]\r\n sd=abs(s.values-(s1.values+s2.values)/2)\r\n SSd=pd.Series(sd,index=np.arange(1,len(dt)-1))\r\n SSd=SSd.sort_values(ascending=False)\r\n SSd=SSd[0:num-2]\r\n st=pd.Series([dt[0],dt[len(dt)-1]],index=[0,len(dt)-1])\r\n SSd=SSd.append(st)\r\n keydata=dt[SSd.index].sort_index()\r\n return keydata\r\n\r\ndef get_tz(keydata):\r\n import numpy as np\r\n y1=keydata.values[1:]\r\n y2=keydata.values[0:-1]\r\n x1=keydata.index[1:]\r\n x2=keydata.index[0:-1]\r\n tan=list((y2-y1)/(x2-x1))\r\n T=np.array(tan)\r\n I7=T>0.5\r\n i1=T>0.2\r\n i2=T<=0.5\r\n I6=i1&i2\r\n i1=T>0.1\r\n i2=T<=0.2\r\n I5=i1&i2\r\n i1=T>-0.1\r\n i2=T<=0.1\r\n I4=i1&i2\r\n i1=T>-0.2\r\n i2=T<=-0.1\r\n I3=i1&i2\r\n i1=T>=-0.5\r\n i2=T<=-0.2\r\n I2=i1&i2\r\n I1=T<-0.5\r\n T[I1]=1\r\n T[I2]=2\r\n T[I3]=3\r\n T[I4]=4\r\n T[I5]=5\r\n T[I6]=6\r\n T[I7]=7\r\n return T","sub_path":"程序与数据/第10章 综合案例3:股票价格形态聚类与收益分析/df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"234349370","text":"import json\nfrom upylib.idx.uidx import UIdx\n\n\ndef test_uidx_create():\n uidx = UIdx(conf_fn=\"test.json\")\n uidx.reset_db()\n # uidx.dump()\n\n uidx.scan()\n file_list = uidx.get_file_list(path=\"/\")\n assert file_list\n\n file_list = uidx.get_file_list(path=\"/d1\", recursive=True)\n assert file_list\n\n for f in file_list:\n print(f[\"path\"], f[\"fn\"])\n f2 = uidx.get_file(path=f[\"path\"], fn=f[\"fn\"])\n print(f2)\n assert uidx.set_tag_int(id=f2[\"id\"], tag=\"test_int\", val=8)\n assert uidx.set_tag_str(id=f2[\"id\"], tag=\"test_str\", val=\"ok\")\n\n\ndef test_uidx():\n uidx = UIdx(conf_fn=\"test.json\")\n\n\n","sub_path":"test/idx/test_uidx.py","file_name":"test_uidx.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"419508361","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# ------------------------------------------------------------------------------\n# Copyright 2020. NAVER Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------------\n\n# Created by eeliu at 10/16/19\nimport json\nimport os\nfrom multiprocessing import Process, Queue as MPQueue\nfrom queue import Full, Queue\n\nfrom CollectorAgent.GrpcAgent import GrpcAgent\nfrom CollectorAgent.GrpcMeta import GrpcMeta\nfrom CollectorAgent.GrpcSpan import GrpcSpan\nfrom CollectorAgent.GrpcSpanFactory import GrpcSpanFactory\nfrom CollectorAgent.GrpcStat import GrpcStat\nfrom Common.AgentHost import AgentHost\nfrom Common.Logger import TCLogger\nfrom PinpointAgent.PinpointAgent import PinpointAgent\nfrom PinpointAgent.Type import SUPPORT_GRPC, API_DEFAULT\nfrom Proto.grpc.Span_pb2 import PSpanMessage\n\n\nclass GrpcAgentImplement(PinpointAgent):\n class SpanHelper(object):\n def __init__(self, span_addr, appid, appname, starttime,max_pending_sz):\n self.agent_meta = [('starttime', str(starttime)), ('agentid', appid), ('applicationname', appname)]\n self.agent_id = appid\n self.agent_name = appname\n self.span_addr = span_addr\n self.max_pending_sz =max_pending_sz\n self.span_queue = Queue(self.max_pending_sz)\n self.span_client = GrpcSpan(self.span_addr, self.agent_meta, self.span_queue)\n self.dropped_span_count=0\n\n def start(self):\n self.span_client.start()\n\n\n def sendSpan(self, spanMesg):\n\n try:\n self.span_queue.put(spanMesg, False)\n except Full as e:\n self.dropped_span_count+=1\n TCLogger.warning(\"span send queue is full\")\n return False\n except Exception as e:\n TCLogger.error(\"send span failed: %s\", e)\n return False\n return True\n\n def stop(self):\n self.span_client.stop()\n TCLogger.info(\"grpc agent dropped %d\",self.dropped_span_count)\n\n def __init__(self, ac, app_id, app_name, serviceType):\n assert ac.collector_type == SUPPORT_GRPC\n super().__init__(app_id, app_name)\n self.agent_meta = [('starttime', str(ac.startTimestamp)),\n ('agentid', app_id),\n ('applicationname', app_name)]\n self.startTimeStamp = ac.startTimestamp\n self.service_type = serviceType\n self.max_pending_sz = ac.max_pending_size\n self.agent_addr = ac.CollectorAgentIp + ':' + str(ac.CollectorAgentPort)\n self.stat_addr = ac.CollectorStatIp + ':' + str(ac.CollectorStatPort)\n self.span_addr = ac.CollectorSpanIp + ':' + str(ac.CollectorSpanPort)\n self.web_port = ac.getWebPort()\n self.agentHost = AgentHost()\n self.max_span_sender_size = 2\n self.sender_index = 0\n\n def start(self):\n self.mpQueue = MPQueue()\n self.process = Process(target=self.processMain)\n self.process.start()\n\n def processMain(self):\n self.span_helper = GrpcAgentImplement.SpanHelper(self.span_addr, self.app_id, self.app_name,\n self.startTimeStamp,\n self.max_pending_sz)\n self.agent_client = GrpcAgent(self.agentHost.hostname, self.agentHost.ip, self.web_port, os.getpid(),\n self.agent_addr, self.service_type,self.agent_meta,self.getReqStat)\n self.meta_client = GrpcMeta(self.agent_addr, self.agent_meta)\n self.stat_client = GrpcStat(self.stat_addr,self.agent_meta,self.getIntervalStat)\n self.span_factory = GrpcSpanFactory(self)\n self.agent_client.start()\n self.meta_client.start()\n self.stat_client.start()\n self.span_helper.start()\n\n self.loopTheQueue()\n\n self.stopProcessMain()\n\n def loopTheQueue(self):\n while True:\n body = self.mpQueue.get()\n if body == None:\n TCLogger.info(\"agent: %s stopping\", self.agent_meta)\n break\n else:\n content = body.decode('utf-8')\n try:\n TCLogger.debug(content)\n stack = json.loads(content)\n except Exception as e:\n TCLogger.error(\"json is crash\")\n return\n\n super().sendSpan(stack, body)\n try:\n pSpan = self.span_factory.makeSpan(stack)\n spanMesg = PSpanMessage(span=pSpan)\n except Exception as e:\n TCLogger.warn(\"interrupted by %s\", e)\n continue\n self.span_helper.sendSpan(spanMesg)\n\n def asynSendSpan(self, stack, body):\n self.mpQueue.put(body, 5)\n\n def stop(self):\n self.mpQueue.put(None, 5)\n self.process.join()\n\n def stopProcessMain(self):\n self.agent_client.stop()\n self.meta_client.stop()\n self.stat_client.stop()\n self.span_helper.stop()\n\n def updateApiMeta(self, name, type=API_DEFAULT):\n return self.meta_client.updateApiMeta(name, -1, type)\n\n def updateStringMeta(self, name):\n return self.meta_client.updateStringMeta(name)\n","sub_path":"collector-agent/CollectorAgent/GrpcAgentImplement.py","file_name":"GrpcAgentImplement.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"545730224","text":"#Project Euler Problem 10\n#Solution : Brandon Greer\nimport math \nimport numpy\n\ndef primesfrom3to(n): #From https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/3035188#3035188\n \"\"\" Returns a array of primes, 3 <= p < n \"\"\"\n sieve = numpy.ones(n//2, dtype=numpy.bool)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = False\n return 2*numpy.nonzero(sieve)[0][1::]+1\n\nprimes = primesfrom3to(2000000)\nprint(sum(primes)+2)\n\n\n","sub_path":"Project Euler/Problem 10/euler_10_BG.py","file_name":"euler_10_BG.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"628369361","text":"import numpy as np\nfrom ProjectData import *\nfrom sklearn import model_selection, tree\nfrom platform import system\nfrom os import getcwd\nimport matplotlib.pyplot as plt\nfrom toolbox_02450 import windows_graphviz_call\nfrom matplotlib.pyplot import figure, plot, xlabel, ylabel, legend, show, boxplot\nfrom matplotlib.image import imread\nfrom matplotlib.pylab import figure, plot, xlabel, ylabel, legend, show\nimport graphviz as gv\n\ntarget='isLegendary'\n\n# Class indices\ny = np.array(dOriginal[target])\n\ndOriginal=dOriginal.drop(target,axis=1)\ndLogReg=dLogReg.drop(target,axis=1)\n#dLogReg=dLogReg.drop(\"Catch_Rate\",axis=1)\n# Names of data objects\ndataobjectNames = list(dOriginal['Name'])\n\n\n# Attribute names\nattributeNames = list(dLogReg)\n\n\n# Attribute values\nX = np.asarray(np.array(dLogReg))\n\n\n\n\n\n# Class names\nclassNames = ['Is not legendary', 'Is legendary']\n \n# Number data objects, attributes, and classes\nN, M = X.shape\nC = len(classNames)\n\ncriterion='gini'\ndtc = tree.DecisionTreeClassifier(criterion=criterion, min_samples_split=2)\ndtc = dtc.fit(X,y)\n\nfname='tree_Legendary'\n# Export tree graph .gvz file to parse to graphviz\nout = tree.export_graphviz(dtc, out_file=fname + '.dot', feature_names=attributeNames)\n\n#if system() == 'Windows':\n# windows_graphviz_call(fname=fname,\n# cur_dir=getcwd(),\n# path_to_graphviz=r'C:\\Users\\Erik Gylling\\Desktop\\DTU\\4.Semester\\IntroductionToMachineLearningAndDataMining\\PythonPakker\\graphviz-2.38\\release')\n\n#Kør kommandoen \"dot -Tpng tree_Legendary.dot -o tree_Legendary.png\"\n# fra working directory for at se plot\n\n\n# Tree complexity parameter - constraint on maximum depth\ntc = np.arange(2, 21, 1)\n\n# K-fold crossvalidation\nK = 10\nCV = model_selection.KFold(n_splits=K,shuffle=True)\n\n# Initialize variable\nError_train = np.empty((len(tc),K))\nError_test = np.empty((len(tc),K))\n\nk=0\nfor train_index, test_index in CV.split(X):\n print('Computing CV fold: {0}/{1}..'.format(k+1,K))\n\n # extract training and test set for current CV fold\n X_train, y_train = X[train_index,:], y[train_index]\n X_test, y_test = X[test_index,:], y[test_index]\n\n for i, t in enumerate(tc):\n # Fit decision tree classifier, Gini split criterion, different pruning levels\n dtc = tree.DecisionTreeClassifier(criterion=criterion, max_depth=t)\n dtc = dtc.fit(X_train,y_train.ravel())\n y_est_test = dtc.predict(X_test)\n y_est_train = dtc.predict(X_train)\n # Evaluate misclassification rate over train/test data (in this CV fold)\n misclass_rate_test = np.sum(y_est_test != y_test) / float(len(y_est_test))\n misclass_rate_train = np.sum(y_est_train != y_train) / float(len(y_est_train))\n Error_test[i,k], Error_train[i,k] = misclass_rate_test, misclass_rate_train\n k+=1\n\nbestDepth=np.argmin(Error_test.mean(1))+2\ndtc = tree.DecisionTreeClassifier(criterion=criterion, max_depth=bestDepth)\ndtc = dtc.fit(X,y)\nout = tree.export_graphviz(dtc, out_file=fname + '.dot', feature_names=attributeNames)\n\nf = figure()\nboxplot(Error_test.T)\nxlabel('Model complexity (max tree depth)')\nylabel('Test error across CV folds, K={0})'.format(K))\n\nf = figure()\nplot(tc, Error_train.mean(1))\nplot(tc, Error_test.mean(1))\nxlabel('Model complexity (max tree depth)')\nylabel('Error (misclassification rate, CV K={0})'.format(K))\nlegend(['Error_train','Error_test'])\n\nplt.title('Min. Test Error of {0}% is found on depth {1}'.format(np.round(np.min(Error_test.mean(1))*100,2),bestDepth))\nshow()\n#plt.savefig(\"../Figures/bestDepth.png\")\nprint(\"Error test with depth {0}:\\n{1}\".format(bestDepth,Error_test[bestDepth-2,:].T))\n\n","sub_path":"Scripts/oldScripts/ClassTree.py","file_name":"ClassTree.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492923788","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of the Mage Knight implementation at\n# https://github.com/MartinAltmayer/mageknight.\n#\n# Copyright 2016 Martin Altmayer, Stefan Altmayer\n# The Mage Knight board game was created by Vlaada Chvátil.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\nimport sys\nimport gettext\n\nfrom mageknight.data import RoundType, Hero, Tile\n\ntr = gettext.gettext\n\n\ndef run(**kwargs):\n \"\"\"Run the application.\"\"\"\n arguments = {\n 'playerName': tr(\"Nameless Hero\"),\n 'playerHero': Hero.Norowas,\n 'numberOfRounds': 6,\n }\n arguments.update(kwargs)\n cmdArgs = parseCommandLineArguments()\n arguments.update({k: v for k, v in cmdArgs.items() if v is not None})\n\n from mageknight.base import savefile\n if arguments['list']:\n for saveFile in savefile.getList():\n print(saveFile.nameWithTimestamp()) # pylint: disable=bad-builtin\n\n elif len(arguments['delete']) > 0:\n savefile.delete(savefile.SaveFile(arguments['delete']))\n\n else:\n from PyQt5 import QtWidgets\n app = QtWidgets.QApplication([])\n from mageknight.gui import mainwindow\n try:\n window = mainwindow.MainWindow(arguments)\n except savefile.LoadException as e:\n print(e) # pylint: disable=bad-builtin\n sys.exit(1)\n\n window.show()\n app.exec_()\n\n\ndef parseCommandLineArguments():\n import argparse\n parser = argparse.ArgumentParser(description=tr(\"MageKnight board game\"))\n parser.add_argument('--name', type=str, dest='playerName', help=tr(\"Choose the player's name.\"))\n parser.add_argument('--hero', type=str, dest='playerHero', help=tr(\"Choose the player's hero.\"))\n parser.add_argument('-n', '--new', action='store_true',\n help=tr(\"Skip menu and start a new game. This game will not be saved!\"))\n parser.add_argument('-l', '--load', nargs='?', default=None, const='',\n help=\"Load match. If no filename is specified, the last file will be used.\")\n parser.add_argument('--list', action='store_true', help=tr(\"Show list of savegames.\"))\n parser.add_argument('--delete', default='', help=tr(\"Delete the specified savefile.\"))\n parser.add_argument('-t', '--tactic', action='store_true', dest='skipTacticSelection',\n help=tr(\"Skip tactic selection. Use together with -n.\"))\n parser.add_argument('--night', action='store_true', help=tr(\"Start with a night round\"))\n parser.add_argument('--rounds', dest='numberOfRounds', type=int,\n help=tr(\"The number of rounds until the game ends\"))\n parser.add_argument('--cards', nargs='*', help=tr(\"Define start of 'cards' sequence.\"))\n parser.add_argument('--units', nargs='*', help=tr(\"Cheat yourself some units.\"))\n parser.add_argument('--skills', nargs='*', help=tr(\"Cheat yourself some skills.\"))\n parser.add_argument('--combat', nargs='*', help=tr(\"Start a fight against some enemies of your choice.\"))\n parser.add_argument('--tiles', nargs='*', help=tr(\"Define start of 'tiles' sequence.\"))\n parser.add_argument('-c', '--cheat', action='store_true', help=tr(\"Activate cheat/debug buttons.\"),\n dest='enableCheatButtons')\n parser.add_argument('-a', '--dontSave', action='store_true',\n help=tr(\"Don't save this match. Use together with -n.\"))\n\n args = parser.parse_args()\n\n # Process arguments\n if args.night:\n args.roundType = RoundType.night\n del args.night\n\n if args.playerHero:\n name = args.playerHero.capitalize()\n if name in [hero.name for hero in Hero]:\n args.playerHero = Hero[name]\n else:\n args.playerHero = None\n\n if args.tiles:\n args.tiles = [Tile(id) for id in args.tiles]\n\n return vars(args)\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"mageknight/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"292683952","text":"import random\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\n\ndef split_to_train_and_test(data_df, random_state=None):\n \"\"\"\n prepare the data to classifying format\n :param data_df: the data to classify\n :param random_state: a seed to split the data by (keep empty if you want a random seed)\n :return: x_train, x_test, y_train, y_test\n \"\"\"\n if random_state is None:\n random_state = random.randint(0, 1000)\n\n # Split data into training and testing sets\n x_train, x_test, y_train, y_test = train_test_split(data_df['text'], data_df['label'], random_state=random_state)\n train_df = pd.DataFrame({\"text\": x_train, \"label\": y_train})\n test_df = pd.DataFrame({\"text\": x_test, \"label\": y_test})\n return train_df, test_df\n\n\ndef data_exploration(train_df):\n \"\"\"\n The function print the label distribution of the training data - the number pf samples with label\n 1 and with label 0\n :param train_df: the train data\n \"\"\"\n tot = len(train_df)\n print(train_df.label.value_counts())\n print(train_df.label.value_counts() / tot)\n sns.distplot(train_df.label, kde=False)\n plt.show()\n\n\ndef bag_of_words(train_df, test_df):\n \"\"\"\n convert data to counted vector, that count how many times word with min_df=3 and max_df=97% appears.\n binary features indicating the presence of word unigrams, bigrams and trigrams.\n\n :param x_train: training data\n :param x_test: test data\n :return: the converted counted vectors and the mapping - training, test, cv\n \"\"\"\n cv = CountVectorizer(min_df=3, max_df=0.97, ngram_range=(1,3))\n x_train_counts = cv.fit_transform(train_df.text.astype(np.str))\n x_test_counts = cv.transform(test_df.text.astype(np.str))\n\n return x_train_counts, x_test_counts\n\n\ndef bow_character_level_n_grams(train_df, test_df):\n \"\"\"\n binary features indicating the presence of character n-gram\n (without crossing word boundaries). Character n-grams provide some\n abstraction from the word level and provide robustness to the spelling variation that\n characterises social media data.\n :param x_train: training data\n :param x_test: test data\n :return: the converted counted vectors and the mapping - training, test, cv\n \"\"\"\n cv = CountVectorizer(analyzer='char_wb', ngram_range=(1,6), min_df=3, max_df=0.97)\n x_train_counts = cv.fit_transform(train_df.text.astype(np.str))\n x_test_counts = cv.transform(test_df.text.astype(np.str))\n\n return x_train_counts, x_test_counts\n\n\ndef tf_idf(x_train_counts, x_test_counts):\n \"\"\"\n The TF-IDF (term frequency-inverse document frequency) is a measure of the importance of a word in a document\n within a collection of documents, thereby taking into account the frequency of occurrence of a word in the entire\n corpus as a whole and within each document. G This function add the tf-idt weight to each word\n in the bag of words vector in the inputs.\n :param x_train_counts: bag of words for the train data\n :param x_test_counts: bag of words for the test data\n :return: bag of words with tf-idf for each data sets (train and test)\n \"\"\"\n tf_transformer = TfidfTransformer(use_idf=False).fit(x_train_counts)\n x_train_tf = tf_transformer.transform(x_train_counts)\n x_test_tfidf = tf_transformer.transform(x_test_counts)\n return x_train_tf, x_test_tfidf\n\n\ndef get_bow_tfidf(data, flag):\n \"\"\"\n create bow+tf-idf features\n :param data: The data for creating the features\n :param flag: if True the features are bow_tf-idf and if False the features are bow_character_n_grams + tf-idf\n :return:\n \"\"\"\n train_df, test_df = split_to_train_and_test(data)\n data_exploration(train_df)\n if flag: # bag of words with sentiment lexicon\n x_train_counts, x_test_counts = bag_of_words(train_df, test_df)\n else: # bow character level n grams\n x_train_counts, x_test_counts = bow_character_level_n_grams(train_df, test_df)\n x_train_tf, x_test_tfidf = tf_idf(x_train_counts, x_test_counts)\n if flag: # bag of words with sentiment lexicon\n x_train_tf = add_feature_from_sentiment_lexicon(train_df, x_train_tf)\n x_test_tfidf = add_feature_from_sentiment_lexicon(test_df, x_test_tfidf)\n return x_train_tf, x_test_tfidf, train_df, test_df\n\n\ndef _convert_txt_file_to_set(path):\n \"\"\"\n convert txt file to set for more efficient search\n :param path: path for the file's directory\n :return: txt file as set\n \"\"\"\n file = open(path, 'r', encoding='utf-8')\n s = set()\n for line in file:\n s.add(line.replace(\"\\n\", \"\"))\n return s\n\ndef add_feature_from_sentiment_lexicon(data, features):\n \"\"\"\n create sentiment features(for positive words and negative words) from sentiment lexicon.\n :param data: The data for creating the features\n :param features: set of feature to add them sentiments features\n :return: train matrix oa all the features\n \"\"\"\n neg_words = _convert_txt_file_to_set('sentiment_lexicon/negative_words_he.txt')\n pos_words = _convert_txt_file_to_set('sentiment_lexicon/positive_words_he.txt')\n\n neg_feature = []\n pos_feature = []\n\n # the feature is calculate by number to sentimants words ic the sentence divide by number of words in the sentence\n for idx, message in data.iterrows():\n count_neg = 0\n count_pos = 0\n for word in str(message[0]).split():\n if word in neg_words:\n count_neg += 1\n if word in pos_words:\n count_pos += 1\n # if there is no sentiment feature in the sentence the score is 0\n if len(str(message[0]).split()) == 0:\n mass_len = 1\n else:\n mass_len = len(str(message[0]).split())\n pos_feature.append(count_pos/mass_len)\n neg_feature.append(count_neg/mass_len)\n\n # add the semantic features to the general matrix features\n dense_matrix = features.todense()\n dense_matrix = np.insert(dense_matrix, dense_matrix.shape[1], pos_feature, axis=1)\n dense_matrix = np.insert(dense_matrix, dense_matrix.shape[1], neg_feature, axis=1)\n train_matrix = csr_matrix(dense_matrix)\n\n return train_matrix\n\n\ndef word2vec_model():\n \"\"\"\n loading pre-trained word2vec model as a dictionary\n :return: word2vec dictionary while the keys is the words tnw the values is the embedding vector\n \"\"\"\n vectors = np.load('words_vectors.npy')\n with open('words_list.txt', encoding=\"utf-8\") as f:\n words = f.read().splitlines()\n if len(words) != len(vectors):\n print(\"error\")\n raise AssertionError\n word2vec_dict = {}\n for i in range(len(words)):\n word2vec_dict[words[i]] = vectors[i]\n return word2vec_dict\n\n\ndef message_vector(word2vec_model, message):\n \"\"\"\n create word2vec for message - for each word in the message\n take the average of all the word vectors in a sentence\n and it will represent the message vector.\n :param word2vec_model: dictionary of pre-trained word2vec model\n :param message: the message for creating the features\n :return: message embedding vector\n \"\"\"\n # remove out-of-vocabulary words\n message = message.split()\n # for word in message:\n # if word not in word2vec_model:\n # print(word)\n message = [word2vec_model[word] for word in message if word in word2vec_model]\n if len(message) == 0:\n return None\n return list(np.mean(message, axis=0))\n\n\ndef get_word2vec(data):\n \"\"\"\n Build word2vec + TF-IDF embedding for all the data\n :param data: The data for creating the features\n :return: x_train_tf, x_test_tfidf, train_df, test_df\n \"\"\"\n train_df, test_df = split_to_train_and_test(data)\n data_exploration(train_df)\n model = word2vec_model()\n train_embedding = np.zeros(shape=[len(train_df), 100])\n test_embedding = np.zeros(shape=[len(test_df), 100])\n count = 0\n\n for message in train_df.text.astype(np.str): # look up each message in model\n embedded_message = message_vector(model, message)\n if embedded_message is None:\n count += 1\n # print(\"---------train-----------\")\n # # print(message)\n # print(\"--------------------\")\n continue\n train_embedding[count] = embedded_message\n count += 1\n\n count = 0\n for message in test_df.text.astype(np.str): # look up each message in model\n embedded_message = message_vector(model, message)\n if embedded_message is None:\n count += 1\n # print(\"---------test-----------\")\n # # print(message)\n # print(\"--------------------\")\n continue\n test_embedding[count] = embedded_message\n count += 1\n\n x_train_tf, x_test_tfidf = tf_idf(train_embedding, test_embedding)\n return x_train_tf, x_test_tfidf, train_df, test_df\n","sub_path":"feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":9122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186291638","text":"#the hotel function will calucate how much the user will have to pay based on the number of nights they are staying\r\ndef hotel_cost(num_nights):\r\n return num_nights*500.00\r\n\r\n#the plane_cost function will charge the user an amount based on the choice of the airport they will be landing at \r\ndef plane_cost(airport_choice):\r\n if airport_choice.upper()==\"A\":\r\n airport_choice=500\r\n elif airport_choice.upper()==\"B\":\r\n airport_choice=1000\r\n elif airport_choice.upper()==\"C\":\r\n airport_choice=1500\r\n return airport_choice\r\n\r\n# the type car function will charge the user an amount based on the choice of the type of car they want \r\ndef type_car(car):\r\n if car.upper()==\"A\":\r\n car=200\r\n elif car.upper()==\"B\":\r\n car=450\r\n elif car.upper()==\"C\":\r\n car=700\r\n elif car.upper()==\"D\":\r\n car=1200\r\n return car\r\n\r\n#the car_rental function will calucate how much the user will have to pay to use the car based on the number of days they will be using the car \r\ndef car_rental(num_days):\r\n return num_days\r\n\r\n#the holiday_cost fuction will calculate the total cost of the trip\r\n#if the user stays for more than 10 days they will recive a 500 discount \r\ndef holiday_cost(num_nights,airport_choice,num_days,car):\r\n if num_nights>=5000:\r\n return num_nights+airport_choice+(num_days*car)-500\r\n elif num_nights<5000:\r\n return num_nights+airport_choice+(num_days*car)\r\n\r\n\r\nprint(\"Welcome to travel-With-Us\")\r\n\r\nprint(\"\\n\")\r\n\r\nprint(\"NB:please note we have a discount for those that choose to stay longer than 10 days at our accomdation\")\r\n\r\nnum_nights=int(input(\"Please enter the number of days you be staying with us: \"))\r\n\r\nairport_choice=input(\"Please choose which airport you be flying to\\nA.London Heathrow Airport\\nB.London Gatwick Airport\\nC.Manchester airport\\n:\")\r\n\r\nnum_days=int(input(\"Plese enter the number of days you will be renting the car for: \"))\r\n\r\ncar=input(\"Please choose which type of car you would like to rent\\nA.Small car\\nB.4x4\\nC.Kombi\\nD.Sports car\")\r\n\r\n#the functions are now stored into varibles\r\nnum_nights=hotel_cost(num_nights)\r\nairport_choice=plane_cost(airport_choice)\r\nnum_days=car_rental(num_days)\r\ncar=type_car(car)\r\ntotal=holiday_cost(num_nights, airport_choice,num_days,car)\r\n\r\n#the function is called and the total of the trip is printed\r\nprint(f\"the total cost of your trip will be R{holiday_cost(num_nights, airport_choice,num_days,car)}\")\r\n \r\n\r\n","sub_path":"Defining My Own Functions/holiday.py","file_name":"holiday.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"178275247","text":"from kermas.bjson import connection\nfrom kermas.views.base import BaseViewer\n\n\n \n \nclass MainViewer(BaseViewer):\n def __init__(self, request):\n super(MainViewer, self).__init__(request)\n content = \"Main Page\"\n self.layout.content = content\n \n \n\n def foobar(self):\n if self.route == 'home':\n self.connection.call.set_context('status')\n self.layout.subheader = 'Main Page'\n user = self.connection.call.whoami()\n content = \"Eddie being run by %s\" % user\n self.layout.content = content\n elif self.route == 'status':\n self.connection.call.set_context('status')\n context = self.request.matchdict['context']\n if context in ['fortune', 'whoami']:\n content = getattr(self.connection.call, context)()\n self.layout.content = '
%s
' % content\n else:\n self.layout.content = 'Status request with context %s' % context\n \n","sub_path":"kermas/views/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563728880","text":"# Copyright 2018 Francesco Ceccon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Visitor applying rules for convexity propagation.\"\"\"\nfrom suspect.pyomo.expressions import (\n nonpyomo_leaf_types,\n NumericConstant,\n Var,\n Constraint,\n Objective,\n MonomialTermExpression,\n AbsExpression,\n ProductExpression,\n DivisionExpression,\n LinearExpression,\n SumExpression,\n PowExpression,\n NegationExpression,\n UnaryFunctionExpression,\n SimpleExpression,\n ScalarExpression,\n _GeneralExpressionData,\n)\nfrom suspect.pyomo.quadratic import QuadraticExpression\nfrom suspect.interfaces import CombineUnaryFunctionRules\nfrom suspect.visitor import Visitor\nfrom suspect.convexity.rules import * # pylint: disable=wildcard-import\n\n\n_expr_to_rule_map = dict()\n_expr_to_rule_map[NumericConstant] = ConstantRule()\n_expr_to_rule_map[Var] = VariableRule()\n_expr_to_rule_map[Constraint] = ConstraintRule()\n_expr_to_rule_map[Objective] = ObjectiveRule()\n_expr_to_rule_map[MonomialTermExpression] = ProductRule()\n_expr_to_rule_map[ProductExpression] = ProductRule()\n_expr_to_rule_map[DivisionExpression] = DivisionRule()\n_expr_to_rule_map[LinearExpression] = LinearRule()\n_expr_to_rule_map[SumExpression] = SumRule()\n_expr_to_rule_map[PowExpression] = PowerRule()\n_expr_to_rule_map[NegationExpression] = NegationRule()\n_expr_to_rule_map[AbsExpression] = AbsRule()\n_expr_to_rule_map[QuadraticExpression] = QuadraticRule()\n_expr_to_rule_map[SimpleExpression] = ExpressionRule()\n_expr_to_rule_map[ScalarExpression] = ExpressionRule()\n_expr_to_rule_map[_GeneralExpressionData] = ExpressionRule()\n_expr_to_rule_map[UnaryFunctionExpression] = CombineUnaryFunctionRules({\n 'abs': AbsRule(),\n 'sqrt': SqrtRule(),\n 'exp': ExpRule(),\n 'log': LogRule(),\n 'log10': Log10Rule(),\n 'tan': TanRule(),\n 'atan': AtanRule(),\n 'sin': SinRule(),\n 'asin': AsinRule(),\n 'cos': CosRule(),\n 'acos': AcosRule(),\n})\n\n\ndef propagate_expression_convexity(expr, convexity, mono, bounds):\n if type(expr) in nonpyomo_leaf_types:\n rule = _expr_to_rule_map[NumericConstant]\n elif expr.is_constant():\n rule = _expr_to_rule_map[NumericConstant]\n elif expr.is_variable_type():\n rule = _expr_to_rule_map[Var]\n else:\n assert expr.is_expression_type()\n rule = _expr_to_rule_map[type(expr)]\n return rule.apply(expr, convexity, mono, bounds)\n\n\nclass ConvexityPropagationVisitor(Visitor):\n \"\"\"Visitor applying convexity rules.\"\"\"\n def handle_result(self, expr, result, convexity):\n convexity[expr] = result\n return not result.is_unknown()\n\n def visit_expression(self, expr, convexity, mono, bounds):\n return True, propagate_expression_convexity(expr, convexity, mono, bounds)\n","sub_path":"suspect/convexity/visitor.py","file_name":"visitor.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469993760","text":"n, m= map(int, input().split())\nrow, col, dir = map(int, input().split())\n\ncheck = [[0]*m for _ in range(n)]\ncheck[row][col] = 1\n\narray=[]\nfor i in range(n) :\n array.append(list(map(int, input().split())))\n\n\n\ndx=(-1, 0, 1, 0)\ndy=(0, 1, 0, -1)\n\ndef turn_left() :\n global dir\n dir-=1\n if dir==-1 : dir=3\n\n\ncount = 1\nturn = 0\n\nwhile True :\n turn_left()\n nx = row+dx[dir]\n ny = col+dy[dir]\n\n if (check[nx][ny]==0 and array[nx][ny]==0) :\n check[nx][ny]=1\n row=nx\n col=ny\n count+=1\n turn=0\n continue\n\n else :\n turn+=1\n\n if( turn ==4 ) :\n nx = row-dx[dir]\n ny = col-dy[dir]\n\n if(array[nx][ny]==0) :\n row = nx\n col = ny\n else :\n break\n turn=0\n\n\nprint(count)\n\n'''\n#맵 크기\nn, m = map(int, input().split())\n\nrow, col, dir = map(int, input().split())\n\ncheck=[[0]*m for _ in range(n)]\ncheck[row][col]=1 #현재 좌표 방문처리\n\narray=[]\nfor i in range(n) :\n array.append(list(map(int, input().split())))\n\n#0 : 북쪽보고있을 때 서쪽으로 이동은 -1, 0\ndx=[-1, 0, 1, 0]\ndy=[0, 1, 0, -1]\n\ndef left_turn() :\n #왼족으로 회전 : 0->3->2->1 순으로 회전\n #0->3으로갈때 -1이 나오면 서쪽으로 회전\n global dir\n dir-=1\n if(dir==-1) : dir=3\n print(dir)\n\ncount =1\nturn_time = 0\n\nwhile True :\n left_turn()\n nx = row+dx[dir]\n ny = col+dy[dir]\n\n #회전한 후, 가보지 않은 칸이 존재하는 경우\n if(check[nx][ny]==0 and array[nx][ny]==0) :\n check[nx][ny]=1\n row=nx\n col=ny\n count+=1\n turn_time=0\n continue\n\n #회전한 후, 가보지 않은 칸이 없거나 바다인경우\n else :\n turn_time+=1\n\n #4방향 모두 이동불가\n if turn_time==4 :\n nx=row-dx[dir]\n ny=col-dy[dir]\n #뒤로갈 수 있다면 이동하기\n if(array[nx][ny]==0) :\n row=nx\n col=ny\n #바다로 막혀있는 경우\n else :\n break\n turn_time=0\n\n\nprint(f'결과{count}')\n\n'''\n\n","sub_path":"Part2.(4)Game.py","file_name":"Part2.(4)Game.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"262610383","text":"from query_tcga import api\nfrom query_tcga import query_tcga as qt\nfrom query_tcga import config\nimport pandas as pd\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nTEST_SAMPLE_FILE_ID='0001801b-54b0-4551-8d7a-d66fb59429bf'\nTEST_PROJECT='TCGA-BLCA'\nTEST_CLIN_FILE_ID='6082fbb4-1f13-4e58-a0fb-33574354b74b'\n\nTEST_DATA_DIR='test/test_data'\nconfig.set_value(\n GDC_DATA_DIR=TEST_DATA_DIR,\n GDC_TOKEN_PATH='/Users/jacquelineburos/Downloads/gdc-user-token.2016-09-26T12-23-27-04-00.txt'\n )\n\n\ndef test_get_data_sample_file():\n res = api.get_data(endpoint_name='files', query_args={'files.file_id': TEST_SAMPLE_FILE_ID})\n assert len(res.json()['data']['hits']) == 1\n\n\ndef test_get_data_sample_file_with_fields():\n res = api.get_data(endpoint_name='files',\n query_args={'files.file_id': TEST_SAMPLE_FILE_ID},\n fields=config.get_setting_value('DEFAULT_FILE_FIELDS'),\n )\n assert len(res.json()['data']['hits']) == 1\n\n\ndef test_get_data_clin_file_with_fields():\n res = api.get_data(endpoint_name='files',\n query_args={'files.file_id': TEST_CLIN_FILE_ID},\n fields=config.get_setting_value('DEFAULT_FILE_FIELDS'),\n )\n assert len(res.json()['data']['hits']) == 1\n\n\ndef test_get_fileinfo_data_sample_file():\n res = api.get_fileinfo_data(file_id = TEST_SAMPLE_FILE_ID)\n assert isinstance(res, pd.DataFrame)\n assert len(res.index)==1\n\n\ndef test_get_fileinfo_data_clin_file():\n res = api.get_fileinfo_data(file_id = TEST_CLIN_FILE_ID)\n assert isinstance(res, pd.DataFrame)\n assert len(res.index)==1\n\n\ndef test_get_fileinfo_data_multiple_files():\n res = api.get_fileinfo_data(file_id = [TEST_SAMPLE_FILE_ID, TEST_CLIN_FILE_ID])\n assert isinstance(res, pd.DataFrame)\n assert len(res.index)==2\n\n\ndef test_get_fileinfo_data_file_details():\n clin = qt.get_clinical_data(project_name=TEST_PROJECT, data_dir=TEST_DATA_DIR, n=1)\n res = api.get_fileinfo_data(file_id = clin['_source_file_uuid'][0])\n assert isinstance(res, pd.DataFrame)\n assert len(res.index)==1\n\n \n","sub_path":"test/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"159354548","text":"#!./venv/bin/python3\n\nimport PIL.Image\nimport PIL.ExifTags\n\n\nclass NotPhotoType(OSError):\n pass\n\n\nclass HasntGPSData(KeyError):\n pass\n\n\nclass RetrieverPhotoInformation:\n def __init__(self, name_file):\n if not self.is_photo(name_file):\n raise NotPhotoType\n\n self.photo_name = name_file\n img = PIL.Image.open(name_file)\n self.__exif = self.extractEXIF(img)\n\n def extractEXIF(self, img):\n try:\n exif = {\n PIL.ExifTags.TAGS[k]: v\n for k, v in img._getexif().items() if k in PIL.ExifTags.TAGS\n }\n except AttributeError:\n exif = None\n\n return exif\n\n @staticmethod\n def dms_to_dd(d, m, s, direction):\n \"\"\"\n Convert dms (degree, minutes, seconds) type of coordinates to dd (Decimal Degrees) \n \"\"\"\n \n dd = float(d) + (float(m) / 60.0) + (float(s) / 3600.0)\n if direction.upper() in \"SW\":\n dd *= -1\n return dd\n\n def is_photo(self, photo_name):\n return photo_name.split('.')[-1].lower() in ['jpg']\n\n def transform_gps_data(self, source):\n return [val / div for val, div in source]\n\n def get_coordinates(self):\n \"\"\"\n Returns coordinates of photo in dictionary type\n Keys: lat, lon\n Values: float\n \"\"\"\n\n if self.__exif is None:\n return None\n\n #positions in the array\n latitude = 2\n longitude = 4\n pos_cord_dir_lat = 1\n pos_cord_dir_lon = 3\n\n try:\n gps_data = self.__exif['GPSInfo']\n except KeyError:\n raise HasntGPSData\n\n cord = {\n 'lat': {\n 'val': self.transform_gps_data(gps_data[latitude]),\n 'dir': gps_data[pos_cord_dir_lat]\n },\n 'lon': {\n 'val': self.transform_gps_data(gps_data[longitude]),\n 'dir': gps_data[pos_cord_dir_lon]\n }\n }\n\n lat = self.dms_to_dd(\n cord['lat']['val'][0], # degree\n cord['lat']['val'][1], # minutes\n cord['lat']['val'][2], # seconds\n cord['lat']['dir'] # direction\n )\n\n lon = self.dms_to_dd(\n cord['lon']['val'][0], # degree\n cord['lon']['val'][1], # minutes\n cord['lon']['val'][2], # seconds\n cord['lon']['dir'] # direction\n )\n\n return {'lat': lat, 'lon': lon}\n\n def get_date(self):\n \"\"\"\n Returns the date, when the photo was created\n \"\"\"\n date, time = self.__exif['DateTimeOriginal'].split(' ')\n date, time = date.split(':'), time.split(':')\n return {\n 'year': date[0],\n 'month': date[1],\n 'day': date[2],\n 'hour': time[0],\n 'minute': time[1],\n 'second': time[2]\n }\n","sub_path":"retriever_photo_info.py","file_name":"retriever_photo_info.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"290854758","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nThe following are the terms from n=1 to n=10 of the count-and-say sequence:\n 1. 1\n 2. 11\n 3. 21\n 4. 1211\n 5. 111221 \n 6. 312211\n 7. 13112221\n 8. 1113213211\n 9. 31131211131221\n10. 13211311123113112211\n'''\n\nimport sys\n\ndef countAndSayHelper(s, k, i, sLength):\n if(i == sLength - 1): #if end of string\n return(str(k) + s[i])\n elif(s[i] == s[i + 1]): #if next char in string is equal to current char\n return(countAndSayHelper(s, k+1, i + 1, sLength))\n else: #if next char in string is diff to current char\n return(str(k) + s[i] + countAndSayHelper(s, 1, i + 1, sLength))\n \n\nclass Solution(object):\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n s = '1'\n while(n > 1):\n sLength = len(s)\n s = countAndSayHelper(s, 1, 0, sLength)\n n -= 1\n return s\n\n\nif __name__ == '__main__':\n n = int(sys.argv[1])\n print (Solution().countAndSay(n))","sub_path":"38.Count_and_Say_dashen.py","file_name":"38.Count_and_Say_dashen.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"157613358","text":"import os\nimport csv\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom collections import deque\n\nimport torch\n\ndef set_logger(log_path):\n \"\"\" Set a logger to record what happened when running the program and record the information on teriminal to a permanent file.\n \n Args:\n log_path (string): where to store the log.\n\n Examples:\n logging.info(\"Start training...\")\n \"\"\"\n # Get logger object from the logging module.\n logger = logging.getLogger()\n # Set the level of logger as INFO:\n # report events that occur during normal operation of a program;\n # Confirmation that things are working as expected. \n logger.setLevel(logging.INFO)\n\n # StreamHandler: send the logging output to the console.\n # with the stream such as sys.stdout, sys.stderr\n if not logger.handlers: # There may already exists stream_handler.\n stream_handler = logging.StreamHandler() # default: sys.stderr\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n\n # FileHandler: send the logging output to a local disk file. (note: 'w' will overwrite the original file if it exists)\n file_handler = logging.FileHandler(log_path, 'w') \n # %(asctime)s: human-readable time('2003-07-08 16:49:45,896'); ascii time from unix literature.\n # %(levelname)s Text logging level for the message ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL').\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\ndef build_exp_folders(output_dir, exp_name):\n \"\"\" Build experiment folders such as saving models and summarizing statistics. \"\"\"\n # folder of saved models\n checkpoints_dir = \"{}/{}/{}\".format(output_dir, exp_name, \"checkpoints\")\n if not os.path.exists(checkpoints_dir): os.makedirs(checkpoints_dir)\n # folder for summaries\n summaries_dir = \"{}/{}/{}\".format(output_dir, exp_name, \"summaries\")\n if not os.path.exists(summaries_dir): os.makedirs(summaries_dir)\n\n return checkpoints_dir, summaries_dir \n\ndef save_stats(log_dir, statistics_file_name, list_of_statistics, create=False):\n \"\"\"\n Saves a statistics .csv file with the statistics.\n\n Args:\n log_dir (str): directory of log\n statistics_file_name (str): name of .csv file\n list_of_statistics (list) : a list of statistics to add in the file\n create (boolean): if True creates a new file, if False adds list to existing\n \"\"\"\n if create:\n with open(\"{}/{}.csv\".format(log_dir, statistics_file_name), 'w+') as f:\n writer = csv.writer(f)\n writer.writerow(list_of_statistics)\n else:\n with open(\"{}/{}.csv\".format(log_dir, statistics_file_name), 'a') as f:\n writer = csv.writer(f)\n writer.writerow(list_of_statistics)\n\ndef get_best_stat_with_epoch(log_dir, statistics_file_name, metric = \"valid_accuracy\", max_best = True):\n \"\"\"\n Get the best value of the given metric and its epoch number. \n\n Args:\n log_dir (str): directory of log\n statistics_file_name (str): name of .csv file ('file_name' for file_name.csv)\n metric (str): the name of the metric you want to get the best value\n max_best (boolean): True if larger metric is better; False if smaller metric is bettercd\n\n Returns:\n metric_best_val (float): the best value of the given metric\n epoch (int) : the epoch number getting the best value of the metric\n \n Examples:\n >>> # file: exp/mnist/exp_mnist_bs100_i784_h100_o10/summaries/summary_stat.csv\n >>> get_best_stat_with_epoch('exp/mnist/exp_mnist_bs100_i784_h100_o10/summaries', 'summary_stat', metric='val_accuracy') \n >>> get_best_stat_with_epoch('exp/mnist/exp_mnist_bs100_i784_h100_o10/summaries', 'summary_stat', metric='val_loss', max_best=False) \n \"\"\"\n df = pd.read_csv(\"{}/{}.csv\".format(log_dir, statistics_file_name), sep = \",\", header = 0)\n return (df[metric].max(), df['epoch'][df[metric].idxmax()]) if max_best else (df[metric].min(), df['epoch'][df[metric].idxmin()]) \n\nclass checkpoint_saver(object):\n \"\"\" Save and store the checkpoints of model in pytorch. \"\"\"\n\n def __init__(self, max_to_keep = 5):\n \"\"\" Save and store the checkpoints of model in pytorch.\n \n Args:\n max_to_keep(int): the maximum number of checkpoints we will keep for the saver object.\n \"\"\" \n\n self.max_to_keep = 5;\n self.keep_list = deque() # use a queue to keep the maximum number of checkpoints saved.\n\n def save_state(self, state, file_path):\n \"\"\" Save the state dictionary of a checkpoint to the path. \"\"\"\n self.keep_list.append(file_path)\n torch.save(state, file_path)\n\n while (len(self.keep_list) > self.max_to_keep): os.remove(self.keep_list.popleft())\n return file_path\n\n @staticmethod\n def load_state(file_path):\n \"\"\" Load the state dictionary of a checkpoint to the path. \"\"\"\n return torch.load(file_path)\n","sub_path":"pytorch/vision/egs/mnist/utils/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"82444130","text":"'''\nAuthor: PengKang6\nDescription: 模型训练前将音频和语料进行预处理\n'''\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport json\n\nfrom util import get_config\n\nimport sys\nsys.path.append(\"..\")\nfrom utils.load_dataset import load_data\nfrom utils.audio_process import get_max_audio_length\nfrom utils.text_process import get_process_text_list, get_max_label_length, tokenize\n\n\nif __name__ == \"__main__\":\n configs = get_config()\n\n dataset_name = configs[\"preprocess\"][\"dataset_name\"]\n data_path = configs[\"train\"][\"data_path\"]\n text_row_style = configs[\"preprocess\"][\"text_row_style\"]\n num_examples = configs[\"train\"][\"num_examples\"]\n\n # 获取语料里所有语音路径list和文本list\n audio_data_path_list, text_list = load_data(dataset_name, data_path, num_examples)\n\n # 基于文本按照某种mode切分文本\n mode = configs[\"preprocess\"][\"text_process_mode\"]\n process_text_list = get_process_text_list(text_list, mode)\n\n # 将文本处理成对应的token数字序列\n text_int_sequences, tokenizer = tokenize(process_text_list)\n\n # 获取音频和文本的最大length,从而进行数据补齐\n audio_feature_type = configs[\"other\"][\"audio_feature_type\"]\n max_input_length = get_max_audio_length(audio_data_path_list, audio_feature_type)\n max_label_length = get_max_label_length(text_int_sequences)\n\n # 将数据集的相关信息写入dataset_information.json文件\n dataset_information_path = configs[\"preprocess\"][\"dataset_information_path\"]\n\n dataset_info = {}\n dataset_info[\"vocab_size\"] = len(tokenizer.index_word)\n dataset_info[\"max_input_length\"] = max_input_length\n dataset_info[\"max_label_length\"] = max_label_length\n dataset_info[\"index_word\"] = tokenizer.index_word\n dataset_info[\"word_index\"] = tokenizer.word_index\n\n with open(dataset_information_path, 'w', encoding=\"utf-8\") as f:\n json.dump(dataset_info, f, ensure_ascii=False, indent=4)\n\n print(\"语音文件数:\", len(audio_data_path_list))\n print(\"vocab_size:\", dataset_info[\"vocab_size\"])\n print(\"最长语音:\", dataset_info[\"max_input_length\"])\n print(\"最长转写文本:\", dataset_info[\"max_label_length\"])\n","sub_path":"hlp/stt/ds2/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"86378800","text":"from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMessageBox, QLineEdit, QLCDNumber, QDial, QSlider, QLabel, QFormLayout, QLineEdit, QTextEdit, QFileDialog, QProgressBar, QRadioButton, QButtonGroup\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import QCoreApplication, Qt, QBasicTimer, QThread, pyqtSignal\n\nfrom random import randint\nimport hashlib\nimport requests\nimport os, time, sys, json\n\n\n\nclass dfThread(QThread):\n\n trigger = pyqtSignal(str)\n \n\n def __init__(self,ffsize,totalchunk,username,userid,file_up,filename_up, step):\n super().__init__()\n self.ffsize = ffsize\n self.totalchunk = totalchunk\n self.username = username\n self.userid = userid\n self.file_up = file_up\n #文件分片大小\n self.kuaidx = 2097152\n self.filename_up = filename_up\n self.step = step\n print(self.file_up)\n\n\n def run(self):\n #重写线程执行的run函数\n #触发自定义信号\n #for i in range(20):\n #time.sleep(1)\n # 通过自定义信号把待显示的字符串传递给槽函数\n #self.trigger.emit(str(i))\n print(\"123\")\n self.trigger.emit(str(99))\n\n def runssss(self):\n print(1112222)\n with open(self.file_up,'rb') as f:\n chunknumber = 0\n chunknumber += 1\n print(\"当前块编号:\" + str(chunknumber))\n #for chunk in iter(lambda: f.read(self.kuaidx),b''):\n for chunk in f.read(self.kuaidx):\n if not f.read():\n break\n print(123)\n chunknumber += 1\n print(\"当前块编号:\" + str(chunknumber))\n if len(chunk) < self.kuaidx:\n kuaisj = len(chunk)\n else:\n kuaisj = self.kuaidx\n print(len(chunk))\n while True:\n if self.upfile3(chunknumber, kuaisj, chunk):\n self.step = self.step + (100/self.totalchunk)\n self.pbar.setValue(self.step)\n break\n else:\n time.sleep(3)\n\n\n def upfile3(self,chunknumber, kuaisj, chunk):\n url = self.upfileurl\n header = {}\n header['Token'] = self.login_session\n dd = {} \n dd['chunkNumber'] = (None,chunknumber)\n dd['chunkSize'] = (None,self.kuaidx)\n dd['currentChunkSize'] = (None,kuaisj)\n dd['totalSize'] = (None,self.totalsize)\n dd['identifier'] = (None,self.ident)\n dd['filename'] = (None,self.filename_up)\n dd['relativePath'] = (None,self.filename_up)\n dd['totalChunks'] = (None,self.totalchunk)\n dd['accept'] = (None,\"*\")\n dd['userId'] = (None,self.userid)\n dd['colonyId'] = (None,43)\n dd['toPath'] = (None,\"/\")\n dd['userHomeDir'] = (None, \"/public1/\" + \"/home/\" + self.username + \"/\" + self.username)\n dd['upfile'] = (self.filename_up,self.chunk)\n #print(dd)\n try:\n r = requests.post(url,headers=header,files=dd)\n print(r.content)\n if r.json()['code'] == 200:\n return 1\n return None\n except requests.exceptions.Timeout as e:\n print('请求超时:'+str(e.message))\n except requests.exceptions.HTTPError as e:\n print('http请求错误:'+str(e.message))\n except requests.exceptions.ConnectionError:\n print('网卡断了')\n\n \n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n #文件分片大小\n self.kuaidx = 2097152\n #self.kuaidx = 20971520\n self.tokenurl = \"http://11.2.77.3:30089/portal-test/user/login/account\"\n self.upfileurl = \"http://11.2.77.3:30089/portal-test/store/file/upload\"\n self.useridurl = \"http://11.2.77.3:30089/portal-test/user/person/get\"\n self.changepemurl = \"http://11.2.77.3:30089/portal-test/store/file/merge\"\n self.rbinfo = \"内部\"\n self.initUi3()\n self.ffsize = 123\n self.totalchunk = 123\n self.username = 123\n self.userid = 123\n self.file_up = 123\n self.filename_up =123\n self.step =123\n self.work = dfThread(self.ffsize,self.totalchunk,self.username,self.userid,self.file_up,self.filename_up, self.step)\n \n\n \n\n def initUi(self):\n lcd = QLCDNumber(self)\n lab = QLabel(self)\n #dial = QDial(self)\n dial = QSlider(self)\n \n \n self.setGeometry(300,300,350,250)\n self.setWindowTitle('忽然之间')\n\n lab.setGeometry(90,80,70,60)\n lcd.setGeometry(100,50,150,60)\n dial.setGeometry(120,120,100,100)\n\n dial.valueChanged.connect(lcd.display)\n lab.setText('a')\n self.show()\n\n def initUi2(self):\n self.setGeometry(300, 300, 350, 250)\n self.setWindowTitle('CASJC')\n self.lab = QLabel('方向',self)\n self.lab.setGeometry(150,100,50,50)\n self.show()\n\n def initUi3(self):\n self.setGeometry(300,300,300,200)\n self.setWindowTitle('CASJC')\n \n self.formlayout = QFormLayout()\n\n self.rb1 = QRadioButton('线上',self)\n self.rb2 = QRadioButton('内部',self)\n self.rb2.setChecked(True)\n\n self.bg1 = QButtonGroup(self)\n self.bg1.addButton(self.rb1,11)\n self.bg1.addButton(self.rb2,22)\n \n self.info1 = \"\"\n self.info2 = \"\"\n\n self.bg1.buttonClicked.connect(self.rbclicked)\n \n self.nameLabel = QLabel(\"账号\")\n self.nameLineEdit = QLineEdit(\"\")\n \n self.introductionLabel = QLabel(\"密码\")\n self.introductionLineEdit = QLineEdit(\"\")\n #self.introductionLineEdit = QTextEdit(\"\")\n\n self.bt1 = QPushButton('登录',self)\n self.bt1.setGeometry(115,150,70,30)\n self.bt1.setToolTip('登录先进云计算平台')\n\n self.formlayout.addRow(self.rb1,self.rb2)\n self.formlayout.addRow(self.nameLabel,self.nameLineEdit)\n self.formlayout.addRow(self.introductionLabel,self.introductionLineEdit)\n #formlayout.addRow(fileup,self.filebutton)\n self.formlayout.addRow(self.bt1)\n self.setLayout(self.formlayout)\n\n self.bt1.clicked.connect(self.Casjc_login)\n \n self.show()\n\n def rbclicked(self):\n sender = self.sender()\n if sender == self.bg1:\n if self.bg1.checkedId() == 11:\n self.tokenurl = \"https://www.casjc.com/portal/user/login/account\"\n self.upfileurl = \"https://console.casjc.com/portal/store/file/upload\"\n self.useridurl = \"https://www.casjc.com/portal/user/person/get\"\n self.changepemurl = \"https://console.casjc.com/portal/store/file/merge\"\n self.rbinfo = \"线上\"\n\n else:\n self.tokenurl = \"http://11.2.77.3:30089/portal-test/user/login/account\"\n self.upfileurl = \"http://11.2.77.3:30089/portal-test/store/file/upload\"\n self.useridurl = \"http://11.2.77.3:30089/portal-test/user/person/get\"\n self.changepemurl = \"http://11.2.77.3:30089/portal-test/store/file/merge\"\n \n\n def initUi4(self):\n self.setGeometry(300,300,300,200)\n self.setWindowTitle('CASJC')\n \n formlayout = QFormLayout()\n \n fileup = QLabel(\"文件上传\")\n self.filebutton = QPushButton(\"选择文件\",self)\n \n self.filebutton.clicked.connect(self.selefile) \n \n self.show()\n\n def selefile(self):\n self.file_up, self.bbb = QFileDialog.getOpenFileName(self,\"打开文件\",os.getcwd(),\"All File(*)\")\n p, self.filename_up = os.path.split(self.file_up)\n #print(self.filename_up)\n try:\n self.selefilenameup.setVisible(False)\n except:\n pass\n self.selefile = QLabel(\"选中文件\")\n self.selefilename = QLabel(self.filename_up)\n\n self.jindu = QLabel(\"进度条\")\n self.pbar = QProgressBar(self)\n self.pbar.setGeometry(30, 40, 200, 25)\n \n self.btn = QPushButton('开始上传', self)\n self.btn.move(40, 80)\n \n self.formlayout.addRow(self.selefile,self.selefilename)\n self.formlayout.addRow(self.jindu,self.pbar)\n self.formlayout.addRow(self.btn)\n self.btn.clicked.connect(self.cmd)\n\n self.timer = QBasicTimer()\n self.step = 0\n self.setGeometry(300, 300, 320, 200)\n \n\n def submitStore(self):\n print(self.nameLineEdit.text())\n print(self.introductionLineEdit.text())\n print(self.filename_up)\n\n\n def timerEvent(self, e):\n\n if self.step >= 100:\n self.step = 0\n self.pbar.setValue(self.step)\n self.timer.stop()\n self.btn.setText('完成')\n return\n #self.step = self.step+1\n #self.pbar.setValue(self.step)\n \n def cmd(self):\n print(\"do action\")\n self.btn.setEnabled(False)\n if self.timer.isActive():\n self.timer.stop()\n self.btn.setText('开始')\n else:\n self.timer.start(100, self)\n self.btn.setText('上传中')\n print(self.username)\n self.userid = self.getuserId()\n print(\"文件名: \" + self.filename_up)\n #文件大小\n self.ffsize = os.path.getsize(self.file_up)\n print(\"文件大小: \" + str(self.ffsize))\n #文件分片块数\n self.totalchunk = int(self.ffsize / self.kuaidx) + 1\n print(\"文件块数: \" + str(self.totalchunk))\n #self.work.start()\n\n #shangchuang = dfThread(self.ffsize,self.totalchunk,self.username,self.userid,self.file_up,self.filename_up, self.step)\n #shagnchuang.start()\n \n self.filerun(self.ffsize,self.totalchunk,self.username,self.userid)\n \n self.merge()\n print(\"ok\")\n self.selefile.setVisible(False)\n self.selefilename.setVisible(False)\n self.pbar.setVisible(False)\n self.jindu.setVisible(False)\n self.btn.setVisible(False)\n self.selefilenameup = QLabel(\"文件:\" + self.filename_up + \" 上传完成\")\n self.formlayout.addRow(self.selefilenameup)\n #self.btn.setEnabled(True)\n \n \n def execute(self):\n self.work.start()\n self.work.trigger.connect(self.display)\n\n def display(self):\n self.listWidget.addItem(str)\n\n \n def Casjc_login(self):\n self.username = self.nameLineEdit.text()\n #print(self.introductionLineEdit.text())\n url = self.tokenurl\n header = {}\n header['Content-Type'] = \"application/json\"\n data = {}\n data[\"account\"] = self.nameLineEdit.text()\n data[\"password\"] = self.introductionLineEdit.text()\n data[\"rememberMe\"] = False\n data[\"origin\"] = 0\n try:\n r = requests.post(url, headers=header, data=json.dumps(data))\n if r.status_code == 200:\n if r.json()['code'] == 200:\n print(r.json()['data'])\n self.login_session = r.json()['data']\n self.login_mess = '登录成功'\n self.nameLabel.setVisible(False)\n self.nameLineEdit.setVisible(False)\n self.introductionLabel.setVisible(False)\n self.introductionLineEdit.setVisible(False)\n self.bt1.setVisible(False)\n self.rb1.setVisible(False)\n self.rb2.setVisible(False)\n #self.bt2 = QPushButton('退出',self)\n #self.bt2.setGeometry(200,200,30,20)\n self.lab = QLabel(self.rbinfo,self)\n self.fileup = QLabel(\"文件上传\")\n self.filebutton = QPushButton(\"选择文件\",self)\n self.colonyId = QLabel(\"colonyId\")\n self.mycolonyId = QLineEdit(\"43\")\n self.colonypath = QLabel(\"路径\")\n self.mycolonypath = QLineEdit(\"/public1\")\n self.formlayout.addRow(self.lab)\n self.formlayout.addRow(self.colonyId,self.mycolonyId)\n self.formlayout.addRow(self.colonypath,self.mycolonypath)\n self.formlayout.addRow(self.fileup,self.filebutton)\n self.filebutton.clicked.connect(self.selefile)\n #self.bt2 = QPushButton('退出',self)\n #self.bt2.setGeometry(155,150,60,40)\n else:\n print('登录认证信息错误')\n self.login_mess = '登录认证信息错误'\n else:\n print('登录异常')\n self.login_mess = '登录异常'\n except requests.exceptions.ConnectionError:\n print(\"网络异常无法连接服务器\")\n self.login_mess = '网络异常无法连接服务器'\n except requests.exceptions.MissingSchema:\n print('请求的Url地址有误')\n self.login_mess = '请求的Url地址有误'\n except requests.exceptions.Timeout as e:\n print('请求超时:' + str(e.message))\n self.login_mess = '请求超时'\n except requests.exceptions.HTTPError as e:\n print('http请求错误:' + str(e.message))\n self.login_mess = 'http请求错误'\n reply = QMessageBox.information(self, \"登录提示信息\", self.login_mess, QMessageBox.Yes)\n\n #self.setLayout(self.formlayout)\n\n\n def getuserId(self):\n url = self.useridurl\n header = {}\n header[\"Authorization\"] = self.login_session\n header['Token'] = self.login_session\n header['Cookie'] = \"JSESSIONID=\" + self.login_session\n r = requests.get(url, headers=header)\n #print(r.content)\n print(r.json()['data']['id'])\n return r.json()['data']['id']\n\n def filerun(self,ffsize,totalchunk,username,userid):\n #文件md5值\n self.ident = cmd5(self.file_up)\n with open(self.file_up,'rb') as f:\n chunknumber = 0\n for chunk in iter(lambda: f.read(self.kuaidx),b''):\n chunknumber += 1\n print(\"当前块编号:\" + str(chunknumber))\n #md5 = hashlib.md5()\n #print(f.tell())\n #md5.update(chunk)\n if len(chunk) < self.kuaidx:\n kuaisj = len(chunk)\n else:\n kuaisj = self.kuaidx\n print(len(chunk))\n #chunkmd5 = md5.hexdigest()\n #print(chunkmd5)\n while True:\n if self.upfile2(chunknumber, kuaisj,ffsize,totalchunk,chunk,username,userid):\n self.step = self.step + (100/self.totalchunk)\n self.pbar.setValue(self.step)\n break\n else:\n time.sleep(3)\n\n\n def upfile2(self,chunknumber,cchunksize,totalsize,totalchunk,chunk,username,userid):\n url = self.upfileurl\n header = {}\n header['Token'] = self.login_session\n dd = {} \n dd['chunkNumber'] = (None,chunknumber)\n dd['chunkSize'] = (None,self.kuaidx)\n dd['currentChunkSize'] = (None,cchunksize)\n dd['totalSize'] = (None,totalsize)\n dd['identifier'] = (None,self.ident)\n dd['filename'] = (None,self.filename_up)\n dd['relativePath'] = (None,self.filename_up)\n dd['totalChunks'] = (None,totalchunk)\n dd['accept'] = (None,\"*\")\n dd['userId'] = (None,userid)\n dd['colonyId'] = (None,self.mycolonyId.text())\n dd['toPath'] = (None,\"/\")\n dd['userHomeDir'] = (None, self.mycolonypath.text())\n dd['upfile'] = (self.filename_up,chunk)\n #print(dd)\n try:\n r = requests.post(url,headers=header,files=dd)\n print(r.content)\n if r.json()['code'] == 200:\n return 1\n return None\n except requests.exceptions.Timeout as e:\n print('请求超时:'+str(e.message))\n except requests.exceptions.HTTPError as e:\n print('http请求错误:'+str(e.message))\n except requests.exceptions.ConnectionError:\n print('网卡断了')\n\n def merge(self):\n url = self.changepemurl\n header = {}\n header['Token'] = self.login_session\n header['Content-Type'] = \"application/json\"\n dd = {}\n dd['colonyId'] = self.mycolonyId.text()\n dd['filename'] = self.filename_up\n dd['identifier'] = self.ident\n dd['isFolder'] = False\n dd['toPath'] = \"/\"\n dd['totalSize'] = self.ffsize\n dd['totalChunks'] = self.totalchunk\n dd['relativePath'] = self.filename_up\n dd['userHomeDir'] = self.mycolonypath.text()\n dd['userId'] = self.userid\n print(dd['userHomeDir'])\n print(header)\n r = requests.post(url,headers=header, data=json.dumps(dd))\n print(r.content)\n print(r.json())\n \n\ndef cmd5(filename):\n print('safsdf')\n md5 = hashlib.md5()\n with open(filename,'rb') as f:\n for chunk in iter(lambda: f.read(2097152),b''):\n #md5 = hashlib.md5()\n #print(f.tell())\n md5.update(chunk)\n #print(md5.hexdigest())\n print(md5.hexdigest())\n return md5.hexdigest()\n \n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\n\n","sub_path":"other/casjc_file.py","file_name":"casjc_file.py","file_ext":"py","file_size_in_byte":19401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"380716451","text":"import random\nfrom collections import deque\nfrom utils import splitText\nfrom aiwolfpy import contentbuilder as cb\nfrom FollowRole import Villager\n# calmのdivineansがoutofrange\n\n\nclass Werewolf(Villager.Villager):\n\n def initialize(self, base_info, diff_data, game_setting, myrole):\n self.base_info = base_info\n self.game_setting = game_setting\n self.diff_data = diff_data\n self.playerNum = len(self.base_info[\"remainTalkMap\"].keys())\n self.myrole = myrole\n self.agentIdx = int(self.base_info[\"agentIdx\"]) - 1\n self.day = -1\n self.agree_co = None\n\n def dayStart(self):\n self.day += 1\n self.voteop = None\n self.isCo = True\n self.isVote = False\n self.request_vote = False\n self.divineop = None\n self.Agreeque = deque([])\n self.talk_turn = 0\n self.istalk_vote = [False for _ in range(self.playerNum)]\n\n def attack(self):\n for d in self.base_info[\"statusMap\"].items():\n if d[1] == \"ALIVE\":\n return int(d[0])\n\n def talk(self):\n self.talk_turn += 2\n if not self.isCo:\n self.isCo = True\n return cb.AND(cb.AGREE(self.agree_co[0], self.agree_co[1], self.agree_co[2]), cb.COMINGOUT(self.agentIdx, \"VILLAGER\"))\n elif not self.isVote and self.voteop != None:\n self.isVote = True\n return cb.AND(cb.AGREE(self.request_vote_agree[0], self.request_vote_agree[1], self.request_vote_agree[2]), cb.VOTE(self.voteop))\n elif len(self.Agreeque) >= 1:\n AGREEText = self.Agreeque.pop()\n return cb.AGREE(AGREEText[0], AGREEText[1], AGREEText[2])\n\n elif not self.request_vote and self.talk_turn >= 3:\n for d in self.base_info[\"statusMap\"].items():\n if d[1] == \"ALIVE\":\n return cb.REQUEST(int(d[0]) - 1, cb.REQUEST(self.agentIdx, cb.VOTE(\"ANY\")))\n index = 0\n while True:\n if self.talk_turn <= 3:\n return cb.skip()\n if index == self.playerNum:\n return cb.skip()\n if not self.istalk_vote[index] and self.base_info[\"statusMap\"][str(index + 1)] == \"ALIVE\":\n self.istalk_vote[index] = True\n return cb.INQUIRE(index, cb.VOTE(\"ANY\"))\n else:\n index += 1\n else:\n return cb.skip()\n","sub_path":"wolf-strategy/FollowRole/Werewolf.py","file_name":"Werewolf.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"473589013","text":"# Python 2.7.11\n\"\"\"\n# Joe Pasquantonio\n# Python Graph Practice\n# 4/7/2016\n#\n# Had an idea (unoriginal) to make the united states into a network graph. Started with New England\n# will continue to add with time\n#\n\n\n### reference: python-course.eu\n\"\"\"\n# North East\nNE = {'Massachusetts' : ['New Hampshire','Vermont','Rhode Island','Connecticut','New York'],\n'New Hampshire' :['Massachusetts','Maine','Vermont'],\n'Vermont' :['Massachusetts','New Hampshire','New York'],\n'Rhode Island' :['Connecticut','Massachusetts'],\n'Connecticut' :['Massachusetts','New York','Rhode Island'],\n'New York' :['Connecticut','Massachusetts','Vermont'],\n'Alaska' :[]}\n\n# generate a list of tuples. tuples are edges which show connection from one state to the next\ndef get_edges(graph):\n\tedges = []\n\tfor node in graph:\n\t\tfor edge in graph[node]:\n\t\t\tedges.append((node, edge))\n\treturn edges\n\n# generate a list of nodes in the graph\ndef get_nodes(graph):\n\tnodes = []\n\tfor node in graph:\n\t\tnodes.append(node)\n\treturn nodes\n\n# generate a list of all nodes that are not connected to any other nodes\ndef get_unconnected_nodes(graph):\n\tunconnected = []\n\tfor node in graph:\n\t\tif not graph[node]:\n\t\t\tunconnected.append(node)\n\treturn unconnected\n\nprint(\"Edges: {e}\".format(e = get_edges(NE)))\n\nprint(\"Nodes: {n}\".format(n = get_nodes(NE)))\n\nprint(\"Unconnected Nodes: {u}\".format(u = find_unconnected_nodes(NE)))","sub_path":"new_england.py","file_name":"new_england.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"595291373","text":"import arcade\n\nSCREEN_WIDTH = 200\nSCREEN_HEIGHT = 200\nWIDTH = 20\nHEIGHT = 20\nMARGIN = 5\nROW_COUNT = 10\nCOLUMN_COUNT = 10\nBLOCK_X = 10\nBLOCK_Y = 10\n\nclass MyGame(arcade.Window):\n \"\"\"\n Main application class\n \"\"\"\n\n def __init__(self, width, height):\n super().__init__(width, height)\n\n arcade.set_background_color(arcade.color.BLACK)\n\n def on_draw(self):\n \"\"\"\n render the screen\n \"\"\"\n\n\n arcade.start_render()\n for i in range(ROW_COUNT):\n for i in range(COLUMN_COUNT):\n arcade.draw_rectangle_filled(BLOCK_X,BLOCK_Y,WIDTH-MARGIN*2,HEIGHT-MARGIN*2,arcade.color.WHITE,)\n BLOCK_X += 20\n BLOCK_Y += 20\n\n def on_mouse_press(self, x, y, button, key_modifiers):\n \"\"\"\n Called when the user presses a mouse button\n \"\"\"\n pass\n\ndef main():\n\n window = MyGame(SCREEN_WIDTH,SCREEN_HEIGHT)\n arcade.run()\nif __name__ == \"__main__\":\n main()","sub_path":"Scratch Work/array_backed_grid.py","file_name":"array_backed_grid.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415470151","text":"# If redfoot is not installed set the path to use the one relative to\r\n# us. This is useful for developing or experimenting with the core\r\n# code.\r\ntry:\r\n import redfootlib\r\nexcept:\r\n # redfoot must not be installed, try adding \r\n import sys, os\r\n\r\n # use directory of script importing us as base for relative names\r\n RFHOME = os.path.dirname(sys.argv[0])\r\n\r\n #paths = [\"lib\", \"examples\", \"chump-1.1/src/\"]\r\n paths = [\"lib\"] \r\n \r\n for path in paths:\r\n sys.path.append(os.path.join(RFHOME, path))\r\n","sub_path":"redfoot-1.6/dev_hack.py","file_name":"dev_hack.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193682122","text":"# _*_ coding:utf-8 _*_\n# Filename:ServerUI.py\n# Python在线聊天服务器端 \n \nimport Tkinter\nimport tkFont\nimport socket\nimport thread\nimport time\nimport sys \n \nclass ServerUI(): \n \n title = '正在与xxx聊天中'\n local = '127.0.0.1'\n port = 8808\n #global serverSock;\n serverSock = None\n flag = False \n #初始化类的相关属性,类似于Java的构造方法\n def __init__(self):\n self.root = Tkinter.Tk()\n self.root.title(self.title) \n \n #窗口面板,用5个frame面板布局\n self.frame = [Tkinter.Frame(),Tkinter.Frame(),Tkinter.Frame(),Tkinter.Frame(),Tkinter.Frame()] \n #标签,显示提示消息 \n label = Tkinter.Label(self.frame[0],height=2,text='聊天信息记录')\n label.pack(fill=Tkinter.BOTH,side=Tkinter.LEFT)\n self.frame[0].pack(expand=10,fill=Tkinter.BOTH) \n #显示消息Text右边的滚动条\n self.chatTextScrollBar = Tkinter.Scrollbar(self.frame[1])\n self.chatTextScrollBar.pack(side=Tkinter.RIGHT,fill=Tkinter.Y) \n \n #显示消息Text,并绑定上面的滚动条\n ft = tkFont.Font(family='Fixdsys',size=11)\n self.chatText = Tkinter.Listbox(self.frame[1],width=70,height=16,font=ft)\n self.chatText['yscrollcommand'] = self.chatTextScrollBar.set\n self.chatText.pack(expand=1,fill=Tkinter.BOTH)\n self.chatTextScrollBar['command'] = self.chatText.yview()\n self.frame[1].pack(expand=1,fill=Tkinter.BOTH) \n \n #标签,显示提示消息 \n label = Tkinter.Label(self.frame[2],height=2,text='请输入聊天信息')\n label.pack(fill=Tkinter.BOTH,side=Tkinter.LEFT)\n self.frame[2].pack(expand=1,fill=Tkinter.BOTH) \n \n #输入消息Text的滚动条\n self.inputTextScrollBar = Tkinter.Scrollbar(self.frame[3])\n self.inputTextScrollBar.pack(side=Tkinter.RIGHT,fill=Tkinter.Y) \n \n #输入消息Text,并与滚动条绑定\n ft = tkFont.Font(family='Fixdsys',size=11)\n self.inputText = Tkinter.Text(self.frame[3],width=70,height=8,font=ft)\n self.inputText['yscrollcommand'] = self.inputTextScrollBar.set\n self.inputText.pack(expand=1,fill=Tkinter.BOTH)\n self.inputTextScrollBar['command'] = self.chatText.yview()\n self.frame[3].pack(expand=1,fill=Tkinter.BOTH) \n \n #发送消息按钮\n self.sendButton=Tkinter.Button(self.frame[4],text=' 发 送 ',width=10,command=self.sendMessage)\n self.sendButton.pack(expand=1,side=Tkinter.BOTTOM and Tkinter.RIGHT,padx=25,pady=5) \n \n #关闭按钮\n self.closeButton=Tkinter.Button(self.frame[4],text=' 关 闭 ',width=10,command=self.close)\n self.closeButton.pack(expand=1,side=Tkinter.RIGHT,padx=25,pady=5)\n self.frame[4].pack(expand=1,fill=Tkinter.BOTH) \n \n #接收消息\n def receiveMessage(self):\n #建立Socket连接\n self.serverSock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.serverSock.bind((self.local,self.port))\n self.serverSock.listen(15)\n self.buffer = 1024\n self.chatText.insert(Tkinter.END,'服务器已经就绪......')\n #循环接受客户端的连接请求\n while True:\n self.connection,self.address = self.serverSock.accept()\n self.flag = True\n while True:\n #接收客户端发送的消息\n self.cientMsg = self.connection.recv(self.buffer)\n if not self.cientMsg:\n continue\n elif self.cientMsg == 'Y':\n self.chatText.insert(Tkinter.END,'服务器端已经与客户端建立连接......')\n self.connection.send('Y')\n elif self.cientMsg == 'N':\n self.chatText.insert(Tkinter.END,'服务器端与客户端建立连接失败......')\n self.connection.send('N')\n else:\n theTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n self.chatText.insert(Tkinter.END, '客户端 ' + theTime +' 说:')\n self.chatText.insert(Tkinter.END, ' ' + self.cientMsg) \n \n #发送消息\n def sendMessage(self):\n #得到用户在Text中输入的消息\n message = self.inputText.get('1.0',Tkinter.END)\n #格式化当前的时间\n theTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n self.chatText.insert(Tkinter.END, '服务器 ' + theTime +' 说:')\n self.chatText.insert(Tkinter.END,' ' + message )\n if self.flag == True:\n #将消息发送到客户端\n self.connection.send(message)\n else:\n #Socket连接没有建立,提示用户\n self.chatText.insert(Tkinter.END,'您还未与客户端建立连接,客户端无法收到您的消息n')\n #清空用户在Text中输入的消息\n self.inputText.delete(0.0,message.__len__()-1.0) \n \n #关闭消息窗口并退出\n def close(self):\n sys.exit() \n \n #启动线程接收客户��的消息\n def startNewThread(self):\n #启动一个新线程来接收客户端的消息\n #thread.start_new_thread(function,args[,kwargs])函数原型,\n #其中function参数是将要调用的线程函数,args是传递给线程函数的参数,它必须是个元组类型,而kwargs是可选的参数\n #receiveMessage函数不需要参数,就传一个空元组\n thread.start_new_thread(self.receiveMessage,()) \n \ndef main():\n server = ServerUI()\n server.startNewThread()\n server.root.mainloop() \n \nif __name__=='__main__':\n main()\n","sub_path":"聊天工具/severUI.py","file_name":"severUI.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"180885885","text":"#!/usr/bin/python\n'''\nCreated on Oct 21, 2016\n@author: Rohan Achar\n'''\n\nimport logging\nimport logging.handlers\nimport os\nimport sys\nimport argparse\nimport uuid\n\nsys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), \"../..\")))\nfrom spacetime.connectors.spacetime import ObjectlessSpacetimeConnection\nfrom rtypes.dataframe.dataframe_client import dataframe_client\nfrom spacetime.client.frame import ClientFrame\nfrom applications.search.crawler_frame import CrawlerFrame\n\nlogger = None\n\nclass Simulation(object):\n '''\n classdocs\n '''\n def __init__(self, address, port):\n '''\n Constructor\n '''\n\n objectless_connector = ObjectlessSpacetimeConnection(\n \"CrawlerFrame_{0}\".format(CrawlerFrame.app_id),\n address = \"http://\" + address + \":\" + str(port) + \"/\")\n\n frame_c = ClientFrame(\n objectless_connector,\n dataframe_client(),\n time_step=2000)\n\n frame_c.attach_app(CrawlerFrame(frame_c))\n\n frame_c.run_main()\n\ndef SetupLoggers():\n global logger\n logger = logging.getLogger()\n logging.info(\"testing before\")\n logger.setLevel(logging.DEBUG)\n\n #logfile = os.path.join(os.path.dirname(__file__), \"../../logs/CADIS.log\")\n #flog = logging.handlers.RotatingFileHandler(logfile, maxBytes=10*1024*1024, backupCount=50, mode='w')\n #flog.setFormatter(logging.Formatter('%(levelname)s [%(name)s] %(message)s'))\n #logger.addHandler(flog)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n clog = logging.StreamHandler()\n clog.addFilter(logging.Filter(name='CRAWLER'))\n clog.setFormatter(logging.Formatter('[%(name)s] %(message)s'))\n clog.setLevel(logging.DEBUG)\n logger.addHandler(clog)\n\nif __name__== \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--address', type=str, default=\"127.0.0.1\", help='Address of the distributing server')\n parser.add_argument('-p', '--port', type=int, default=12000, help='Port used by the distributing server')\n args = parser.parse_args()\n SetupLoggers()\n sim = Simulation(args.address, args.port)\n","sub_path":"applications/search/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186671956","text":"import datetime\n\nfrom pytz import timezone\n\nfrom wildberries.consts import (\n CONTENT_API_AUTHORIZATION_TOKEN, SUPPLIER_ID,\n STATISTICS_API_KEY, ORDERS_API_TOKEN\n)\nfrom wildberries.api import check_connection, product_list, fbs_order_list\n\n\nACCESS = {\n \"CONTENT_API_AUTHORIZATION_TOKEN\": CONTENT_API_AUTHORIZATION_TOKEN,\n \"SUPPLIER_ID\": SUPPLIER_ID,\n \"STATISTICS_API_KEY\": STATISTICS_API_KEY,\n \"ORDERS_API_TOKEN\": ORDERS_API_TOKEN\n}\n\ndef test_check_connection():\n assert check_connection(access=ACCESS), \"проверка подключения должна возвращать True\"\n\n\ndef test_get_products_list_returns_cards():\n products = product_list(access=ACCESS)\n assert list(products), \"функция должна вернуть не пустой список товаров\"\n\n\ndef test_order_list():\n tz = timezone('UTC')\n from_datetime = tz.localize(datetime.datetime(year=2021, month=4, day=1))\n to_datetime = tz.localize(datetime.datetime(year=2021, month=4, day=30))\n orders = fbs_order_list(ACCESS, from_datetime, to_datetime)\n assert list(orders), \"функция должна вернуть не пустой список заказов\"\n\n\ntz = timezone('UTC')\nfrom_datetime = tz.localize(datetime.datetime(year=2021, month=4, day=1))\nto_datetime = tz.localize(datetime.datetime(year=2021, month=5, day=1))\norders = list(fbs_order_list(ACCESS, from_datetime, to_datetime))\n# products = list(product_list(ACCESS))\n\nprint()","sub_path":"wildberries/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"358526184","text":"from .imagenet1000_index_to_label import my_dict\nnew_dict = {}\nfor k, v in my_dict.items():\n lists = v.split(',')\n for i in lists:\n i = i.strip()\n new_dict[i] = k\nprint(new_dict)\nwith open('imagenet1000_label_to_index.py', 'w') as f:\n f.write('new_dict = {\\n')\n for k, v in new_dict.items():\n f.write(' \"' + k + '\": ' + str(v) + ',\\n')\n f.write('}')\n","sub_path":"image-classification-finetune/script/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"533877195","text":"import streamlit as st\nimport numpy as np \n\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nst.title(\"Hello World! Let's tune some parameters for ML models ..\")\n\nst.write(\"\"\" # Test Different Models\n \"\"\")\n\ndataset_name = st.sidebar.selectbox(\"Select Dataset\",\n (\"Iris\",\"Wine dataset\",\"Breast Cancer\"))\n\nmodel_name = st.sidebar.selectbox(\"Select CLassifier\",\n (\"K-Nearest Neighbors\",\"Support Vectors\",\"Random Forests\"))\n\nst.write(\"Loading Data: \", dataset_name, \" -- \", \"Using Model: \", model_name)\n\ndef get_dataset(name):\n data = None\n if name == 'Iris':\n data = datasets.load_iris()\n elif name == 'Wine dataset':\n data = datasets.load_wine()\n else:\n data = datasets.load_breast_cancer()\n X = data.data\n y = data.target\n return X, y\n\nX, y = get_dataset(dataset_name)\nst.write('Shape of dataset:', X.shape)\nst.write('number of classes:', len(np.unique(y)))\n\n\ndef add_parameter_ui(clf_name):\n params = dict()\n if clf_name == 'Support Vectors':\n C = st.sidebar.selectbox('C', (0.01, 0.1, 1.0))\n params['C'] = C\n elif clf_name == 'K-Nearest Neighbors':\n K = st.sidebar.selectbox('K', (1, 5, 10, 15))\n params['K'] = K\n else:\n max_depth = st.sidebar.selectbox('max_depth', (2, 4, 8, 10))\n params['max_depth'] = max_depth\n n_estimators = st.sidebar.selectbox('n_estimators', (10,50, 100))\n params['n_estimators'] = n_estimators\n return params\n\nparams = add_parameter_ui(model_name)\n\ndef get_classifier(clf_name, params):\n clf = None\n if clf_name == 'Support Vectors':\n clf = SVC(C=params['C'])\n elif clf_name == 'K-Nearest Neighbors':\n clf = KNeighborsClassifier(n_neighbors=params['K'])\n else:\n clf = clf = RandomForestClassifier(n_estimators=params['n_estimators'], \n max_depth=params['max_depth'], random_state=1234)\n return clf\n\nclf = get_classifier(model_name, params)\n#### CLASSIFICATION ####\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)\n\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\n\nacc = accuracy_score(y_test, y_pred)\n\nst.write(f'Classifier = {model_name}')\nst.write(f'Accuracy =', acc)\n\n#### PLOT DATASET ####\n# Project the data onto the 2 primary principal components\npca = PCA(2)\nX_projected = pca.fit_transform(X)\n\nx1 = X_projected[:, 0]\nx2 = X_projected[:, 1]\n\nfig = plt.figure()\nplt.scatter(x1, x2,\n c=y, alpha=0.8,\n cmap='viridis')\n\nplt.xlabel('Principal Component 1')\nplt.ylabel('Principal Component 2')\nplt.colorbar()\n\n#plt.show()\nst.pyplot(fig)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"160203575","text":"# -*- encoding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nimport datetime, pytz\n\n\nclass ChangeDateTime(models.Model):\n _name = 'change.datetime'\n _description = 'Change Datetime'\n\n def change_utc_to_local_datetime(self, souce_date, option):\n user_tz = self.env.user.tz or str(pytz.utc)\n tz_now = datetime.datetime.now(pytz.timezone(user_tz))\n difference = tz_now.utcoffset().total_seconds() / 60 / 60\n difference = int(difference)\n utc_date = datetime.datetime.strptime(souce_date, '%Y-%m-%d %H:%M:%S')\n local_date = utc_date + datetime.timedelta(hours=difference)\n return local_date.strftime(option)\n\n def change_local_datetime_to_utc(self, souce_date, option):\n user_tz = self.env.user.tz or str(pytz.utc)\n tz_now = datetime.datetime.now(pytz.timezone(user_tz))\n difference = tz_now.utcoffset().total_seconds() / 60 / 60\n difference = int(difference)\n local_date = datetime.datetime.strptime(souce_date,\n '%Y-%m-%d %H:%M:%S')\n utc_date = local_date + datetime.timedelta(hours=-difference)\n return utc_date.strftime(option)\n","sub_path":"ERP_IN/addons/toolz/models/change_datetime.py","file_name":"change_datetime.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432028263","text":"from vosk import Model, KaldiRecognizer\nimport pyaudio\nimport re\n\nclass Reconhecimento:\n\n @staticmethod\n def meu_comando(): #função que retorna o que foi falado em forma de string\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8000)\n stream.start_stream()\n\n model = Model(\"vosk-model-small-pt-0.3\") #localiza o arquivo de reconhecimento de voz\n rec = KaldiRecognizer(model, 16000)\n print(\"Fale algo\")\n\n while True:\n\n data = stream.read(2000)\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n meuResultado = rec.Result()\n minhaLista = meuResultado.split(\"text\") #o que foi falado na posição text é retornado em lista\n comando = minhaLista[1] #\n stream.stop_stream()\n stream.close()\n p.terminate()\n resultado = re.findall(r'\\w+', comando) #expressão regular parar pegar todas as letras\n resultadofinal = \" \".join(resultado) #transforma a lista em string limpa\n return resultadofinal","sub_path":"AssistentePython/reconhecimento.py","file_name":"reconhecimento.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"612472377","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches\n\"\"\"Fused Conv2D Reference Data\"\"\"\nimport numpy as np\nfrom .conv2d_nhwc_python import conv2d_nhwc_python\nfrom .depthwise_conv2d_python import depthwise_conv2d_python_nhwc\nfrom scipy.special import expit\nimport os\n\ndef get_fused_conv2d_ref_data(fc,\n workload_name,\n best_config=None,\n save_data=False):\n if best_config:\n fc.update_all_shapes_from_best_cfg(best_config)\n ref_data = []\n ref_data_no_transform = []\n workspace = fc.workspace\n\n # Pretending the input_data is some output_data from stage -1\n input_cfg = fc.get_input_cfg(0)\n output_data = np.random.uniform(0.0, 0.1, size=(input_cfg.N, input_cfg.H, input_cfg.W, input_cfg.C)).astype(fc.output_dtype)\n ref_data_no_transform.append(output_data)\n ref_data.append(fc.tensor_transformation(output_data, input_cfg, 'data'))\n # params names for saving data\n params_name = ['input']\n\n for idx in range(fc.layer_num):\n f = fc.get_filter_cfg(idx)\n f_size = (f.H, f.W, f.O, f.I) if f.depthwise else (f.H, f.W, f.I, f.O)\n filter_data = np.random.uniform(0.0, 0.1, size=f_size).astype(fc.output_dtype)\n ref_data_no_transform.append(filter_data)\n ref_data.append(fc.tensor_transformation(filter_data, f, 'kernel'))\n input_data = np.copy(output_data)\n\n if f.depthwise:\n output_data = depthwise_conv2d_python_nhwc(input_data, filter_data, stride=[f.stride_h, f.stride_w], padding='SAME').astype(fc.output_dtype)\n params_name.append('filter_{}_d'.format(idx+1)) # Mark depthwise filter\n else: # Normal convolution\n output_data = conv2d_nhwc_python(input_data, filter_data, f.stride_h, padding=f.padding).astype(fc.output_dtype)\n params_name.append('filter_{}'.format(idx+1))\n\n if f.post_op is not None:\n n, h, w, oc = output_data.shape\n bias_np = np.random.uniform(0.0, 0.1, size=(oc,)).astype(fc.output_dtype)\n ref_data_no_transform.append(bias_np)\n ref_data.append(bias_np)\n\n post_op_scipy = np.zeros(shape=(n, h, w, oc))\n for c in range(oc):\n post_op_scipy[:,:,:,c] = output_data[:,:,:,c] + bias_np[c]\n\n # For ResNet / DenseNet blocks, etc\n if fc.is_block:\n post_op_scipy[:,:,:,c] = post_op_scipy[:,:,:,c] + input_data[:,:,:,c]\n\n if f.post_op == 'relu':\n post_op_scipy[:,:,:,c] = np.maximum(post_op_scipy[:,:,:,c], 0)\n elif f.post_op == 'relu6':\n post_op_scipy[:,:,:,c] = np.maximum(post_op_scipy[:,:,:,c], 0)\n post_op_scipy[:,:,:,c] = np.minimum(post_op_scipy[:,:,:,c], 6)\n elif f.post_op == 'sigmoid':\n post_op_scipy[:,:,:,c] = expit(post_op_scipy[:,:,:,c])\n output_data = post_op_scipy.astype(fc.output_dtype)\n params_name.append('bias_{}'.format(idx+1))\n\n if idx == fc.layer_num - 1: # At the last stage, append output_data as the final output for reference\n output_cfg = fc.get_output_cfg(idx)\n ref_data_no_transform.append(output_data)\n ref_data.append(fc.tensor_transformation(output_data, output_cfg, 'data'))\n params_name.append('output')\n\n if save_data:\n # Save ref data\n folder_name = '{}/npy/fused/{}/'.format(workspace, workload_name)\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n for i in range(0, len(ref_data)):\n filename = folder_name + params_name[i]\n # Transpose filter for cudnn: should be non-fortran order\n if fc.target.kind.name == 'cuda':\n np.save(filename, ref_data[i])\n if 'filter' in filename:\n np.save(filename+'_transposed', np.array(ref_data[i].transpose(3, 2, 0, 1), order='C'))\n else:\n if len(ref_data[i].shape) == 4: # Don't need to save NCHW format for bias data\n np.save(filename+'_NCHW', np.array(ref_data[i].transpose(0, 3, 1, 2), order='C'))\n else:\n np.save(filename, ref_data[i])\n else:\n if 'filter' in filename:\n np.save(filename+'_NCHWc', ref_data[i]) # NCHWc data\n np.save(filename+'_transposed', np.array(ref_data_no_transform[i].transpose(3, 2, 0, 1), order='C'))\n else:\n if len(ref_data[i].shape) == 5: # Don't need to save NCHW format for bias data\n np.save(filename+'_NCHWc', ref_data[i]) # NCHWc data\n np.save(filename+'_NCHW', np.array(ref_data_no_transform[i].transpose(0, 3, 1, 2), order='C')) # NHWC to NCHW\n else:\n np.save(filename, ref_data[i])\n\n return ref_data\n","sub_path":"python/tvm/topi/testing/fused_conv2d_python.py","file_name":"fused_conv2d_python.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439182572","text":"import requests\nimport csv\nimport random\nfrom bs4 import BeautifulSoup\nclass CheckInInventory:\n def __init__(self):\n self.soups=[]\n self.pages=[]\n self.columns=[]\n #self.size_dictionary=[]\n self.read_links();\n for link in self.links:\n self.pages.append(requests.get(link))\n for page in self.pages:\n self.soups.append(BeautifulSoup(page.content,'html.parser'))\n for soup in self.soups : \n self.columns.append(soup.find_all(class_=\"cBeditable\"))\n def read_links(self):\n self.links=[]\n with open('./prod.csv','rb') as input_file:\n reader = csv.reader(input_file)\n for row in reader:\n if(row[1] not in self.links):\n self.links.append(row[1])\n #self.links=random.sample(self.links)\n #self.links=list(set(self.links)) \n def checkSizes(self,webSize,size):\n if(size==webSize):\n return True\n elif(webSize==\"XXXL\" and size==\"3XL\"):\n return True\n elif(webSize==\"XXL\" and size==\"2XL\"):\n return True\n elif(webSize==\"XXS\" and size==\"2XS\"):\n return True \n else : \n return False \n def checkProduct(self):\n with open('./prod.csv','rb') as input_file:\n reader = csv.reader(input_file)\n for row in reader:\n for i in range(len(self.links)):\n if(row[1]==self.links[i]):\n self.main_column=self.columns[i]\n #self.main_dictionary = self.size_dictionary[i]\n\n id_prod=row[0].split('/')[1]\n for single_column in self.main_column:\n index_value =-1\n for sizes in single_column.find(class_=\"tableBackgroundBlack\").find_all(\"td\"):\n index_value+=1\n if id_prod==single_column.find(class_=\"color-name\").get_text().replace(\"\\t\",\" \").split(' ')[0]:\n if(self.checkSizes(sizes.get_text(),row[0].split('/')[2])):\n available=int((single_column.find(class_=\"cBsecondLine\").find_all(\"td\"))[index_value].get_text().replace(\"+\",\"\").strip())\n if(available>0):\n print(row[0]+\" product is available \"+str(available))\n elif(available==0):\n print(row[0]+\" product is not available\")\n\nclass ReadFromHtml:\n def __init__(self,sku,hmtllink):\n self.sku=sku\n self.pages=(requests.get(hmtllink))\n self.soups=(BeautifulSoup(self.pages.content,'html.parser')) \n self.columns=(self.soups.find_all(class_=\"cBeditable\"))\n self.checkProduct()\n def checkSizes(self,webSize,size):\n if(size==webSize):\n return True\n elif(webSize==\"XXXL\" and size==\"3XL\"):\n return True\n elif(webSize==\"XXL\" and size==\"2XL\"):\n return True\n elif(webSize==\"XXS\" and size==\"2XS\"):\n return True \n else : \n return False \n def checkProduct(self):\n for single_column in self.columns:\n index_value =-1\n for sizes in single_column.find(class_=\"tableBackgroundBlack\").find_all(\"td\"):\n index_value+=1\n if self.sku.split('/')[1]==single_column.find(class_=\"color-name\").get_text().replace(\"\\t\",\" \").split(' ')[0]:\n if(self.checkSizes(sizes.get_text(),self.sku.split('/')[2])):\n available=int((single_column.find(class_=\"cBsecondLine\").find_all(\"td\"))[index_value].get_text().replace(\"+\",\"\").strip())\n if(available>0):\n print(self.sku+\" product is available \"+str(available))\n elif(available==0):\n print(self.sku+\" product is not available\")\n#inv = CheckInInventory();\n#inv.checkProduct();\n\ninv = ReadFromHtml(\"302510/0099DKTEAM/2XS\",\"https://seizoroi.com/product_api/0999/0999_dk.htm\")\n\n\n# Size not found \n#Product Not found \n# Available","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"163027909","text":"# O3 RUN SUMMARY PLOT\n# 2019.05.29 MADE\t\tBY Gregory S.H. Paek\n#============================================================#\n#\tMODULE\n#------------------------------------------------------------#\nimport os, glob\nimport matplotlib.pyplot as plt\nfrom astropy.io import ascii\nimport numpy as np\n#------------------------------------------------------------#\npath_table\t\t= '/mnt/window/Users/User/Downloads/data/Project/gw/bayestar/sel/O3run_summary.dat'\npath_save\t= '/home/sonic/S190425z'\n\neventbl\t\t\t= ascii.read(path_table)\neventbl\t\t\t= eventbl[eventbl['noise']!='o']\n#------------------------------------------------------------\n#\tPLOT 1\t: TIME - MAG.\n#------------------------------------------------------------\n#\thttps://partrita.github.io/posts/matplotlib-examples/\n#\tGet the figure and the axes\nplt.close('all')\nfig, ax0\t= plt.subplots(nrows=1, ncols=1, sharey=False, figsize=(10, 10))\nplt.rcParams.update({'font.size': 20})\n#ax0.fill_between(jdrange, imag-imagerr, imag+imagerr, color='dodgerblue', alpha=0.5, label='i-band')\ni=0\nfor i in range(len(eventbl)):\n\tx\t= eventbl['delmjd'][i]\n\ty\t= eventbl['distmean'][i]\n\tyerr= eventbl['diststd'][i]\n\tcapsize = 10\n\n\tsize= np.sqrt(eventbl['region'][i]*2/np.pi)\n\n\n\tif\t\t'Tera'in eventbl['prog'][i]:\n\t\tcolor\t= 'grey'\n\telif\t'BHNS'in eventbl['prog'][i]:\n\t\tcolor\t= 'blue'\n\telif\t'BNS' in eventbl['prog'][i]:\n\t\tcolor\t= 'dodgerblue'\n\telif\t'BBH' in eventbl['prog'][i]:\n\t\tcolor\t= 'dimgrey'\n\telif\t'Tera/BNS' in eventbl['prog'][i]:\n\t\tcolor\t= 'violet'\n\telse:\n\t\tcolor\t= 'brown'\n\tmarker\t\t= 'o'\n#------------------------------------------------------------\n\tif\t\teventbl['noise'][i]=='o':\n\t\tmarker\t= 'x'\n\t\tcolor\t= 'black'\n\t\tsize\t= 15\n\t\tyerr\t= 0\n\t\tcapsize = 0\n#------------------------------------------------------------\n\tparams_plot\t= dict(\tx=x, y=y, yerr=yerr,\n\t\t\t\t\t\tmarker=marker, ms=size,\n\t\t\t\t\t\tc=color, alpha=0.5,\n\t\t\t\t\t\tcapthick=1, capsize=capsize,\n\t\t\t\t\t\tlabel='_nolegend_')\n\tax0.errorbar(**params_plot)\n#------------------------------------------------------------\n#\tGW170817\n#------------------------------------------------------------\nparams_plot\t= dict(\tx=-5, y=44.74, yerr=9,\n\t\t\t\t\tmarker='o', ms=np.sqrt(30/np.pi),\n\t\t\t\t\tc='red', alpha=0.5,\n\t\t\t\t\tcapthick=1, capsize=10,\n\t\t\t\t\tlabel='_nolegend_')\nax0.errorbar(**params_plot)\n#------------------------------------------------------------\n#\tLEGEND (DUMMY POINTS)\n#------------------------------------------------------------\nax0.errorbar(\tx=-99, y=-99, yerr=0,\n\t\t\t\tmarker='o',\n\t\t\t\tc='dodgerblue', alpha=0.4,\n\t\t\t\tlabel='BNS')\nax0.errorbar(\tx=-99, y=-99, yerr=0,\n\t\t\t\tmarker='o',\n\t\t\t\tc='blue', alpha=0.4,\n\t\t\t\tlabel='BNS/BHNS')\nax0.errorbar(\tx=-99, y=-99, yerr=0,\n\t\t\t\tmarker='o',\n\t\t\t\tc='violet', alpha=0.5,\n\t\t\t\tlabel='BNS/False Alarm')\nax0.errorbar(\tx=-99, y=-99, yerr=0,\n\t\t\t\tmarker='o',\n\t\t\t\tc='dimgrey', alpha=0.4,\n\t\t\t\tlabel='BBH')\n'''\nax0.scatter(\tx=-99, y=-99,\n\t\t\t\tmarker='x', s=300,\n\t\t\t\tcolor='black', alpha=0.4,\n\t\t\t\tlabel='False Alarm')\n'''\n#\tGW170817\nparams_plot\t= dict(\tx=-99, y=-99, yerr=0,\n\t\t\t\t\tmarker='o',\n\t\t\t\t\tcolor='red', alpha=0.4,\n\t\t\t\t\tlabel='GW170817')\nax0.errorbar(**params_plot)\n#------------------------------------------------------------\n#\tSETTING\n#------------------------------------------------------------\n#fig.suptitle('S190425z', fontsize=14, fontweight='bold')\n#ax0.set(title='LIGO/Virgo O3 run', xlabel=r'$t-t_{0}$ [days]', ylabel='GW Luminosity Distance [Mpc]')#, fontsize=14)\n#ax0.set(xlabel=r'$t-t_{0}$ [days]', ylabel='GW Luminosity Distance [Mpc]')#, fontsize=14)\nax0.set(xlabel='Since O3 run start [days]', ylabel='GW Luminosity Distance [Mpc]')#, fontsize=14)\n#ax0.set_ylim([24, 18])\nax0.set_xlim([0,np.max(eventbl['delmjd']+5)])\n#ax0.set_yscale('log')\nparam_legend\t= dict(\tloc='upper left', fontsize=20,\n\t\t\t\t\t\tfancybox=True, framealpha=0.5,\n\t\t\t\t\t\tscatterpoints=1, markerscale=2)\nplt.legend(**param_legend)\nif 'O3_summary.png' in glob.glob('*png'):\n\tos.system('mv O3_summary.png O3_summary.png.bkg')\nplt.minorticks_on()\nplt.tight_layout()\nplt.xticks(np.arange(0, 80, 10))\n#------------------------------------------------------------\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\naxins = zoomed_inset_axes(ax0, 20, loc='upper right') # zoom-factor: 2.5, location: upper-left\nparams_plot\t= dict(\tx=-5, y=44.74, yerr=9,\n\t\t\t\t\tmarker='o', ms=np.sqrt(30*2/np.pi),\n\t\t\t\t\tc='red', alpha=1.0,\n\t\t\t\t\tcapthick=1, capsize=10,\n\t\t\t\t\tlabel='_nolegend_')\naxins.errorbar(**params_plot)\nx1, x2, y1, y2 = -5.25, -4.75, 30, 60 # specify the limits\naxins.set_xlim(x1, x2) # apply the x-limits\naxins.set_ylim(y1, y2) # apply the y-limits\naxins.minorticks_on()\nplt.yticks(visible=True)\nplt.xticks(visible=False)\nplt.yticks(np.arange(30, 61, 10))\n\nplt.savefig(path_save+'/Figure_X+O3_summary.png')","sub_path":"gw/work/o3run_summary.py","file_name":"o3run_summary.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"613245179","text":"#!/usr/bin/python3\n\nimport time\n###################################################\n# DEFINE TIMESTAMP\n###################################################\nnow = datetime.now()\ndatestamp = now.strftime(\"%m%d%Y-%H%M\")\ndatestamp_Readable = now.strftime(\"%m/%d/%Y - %H:%M\")\n# print(datestamp) # debug printing timestamp (only date, really)\n\n\"\"\"\nDevelopment Notes:\n- Timestamp details: https://timestamp.online/article/how-to-convert-timestamp-to-datetime-in-python\n\"\"\"","sub_path":"Development Files/0-timestamp.py","file_name":"0-timestamp.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548409974","text":"import int as int\n\nfrom django.views.generic.base import View\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404\n\n# Create your views here.\n\nfrom products.models import Variations\nfrom carts.models import Cart, CartItem\n\n\nclass CartView(SingleObjectMixin, View):\n model = Cart\n template_name = \"carts/view.html\"\n\n def get_object(self,*args, **kwargs):\n # Expire the session after 5 mins - put 300\n # To expire once the browser is closed use 0\n self.request.session.set_expiry(0)\n cart_id = self.request.session.get(\"cart_id\")\n if cart_id is None:\n cart = Cart()\n cart.save()\n cart_id = cart.id\n self.request.session[\"cart_id\"] = cart_id\n\n cart = Cart.objects.get(id=cart_id)\n if self.request.user.is_authenticated():\n cart.user = self.request.user\n cart.save()\n return cart\n\n def get(self, request, *args, **kwargs):\n cart = self.get_object()\n item_id = request.GET.get(\"item\")\n delete_item = request.GET.get(\"delete\")\n if item_id:\n item_instance = get_object_or_404(Variations, id=item_id)\n qty = request.GET.get(\"qty\",1)\n try:\n if int(qty) < 1:\n delete_item = True\n except:\n raise Http404\n\n cart_item = CartItem.objects.get_or_create(cart=cart,item = item_instance)[0]\n if delete_item:\n cart_item.delete()\n else:\n cart_item.quantity = qty\n cart_item.save()\n context = {\n \"object\":self.get_object()\n }\n template = self.template_name\n return render(request,template,context)\n","sub_path":"src/carts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"384985764","text":"# from mysql.connector import Error,MySQLConnection\nfrom mysql.connector import Error, MySQLConnection\n\nfrom conncection.python_mysql_dbconfig import read_db_config\n\n\ndef connect():\n try:\n conn = MySQLConnection(host = \"localhost\",\n database = \"mysql\",\n user= \"root\",\n password = 'rootroot')\n if conn.is_connected():\n print('Connected to MySQL database')\n print(type(conn), conn)\n except Error as e:\n print(e)\n finally:\n conn.close()\n print('Conncection closed.')\n\n\n\ndef connect_use_config():\n db_config = read_db_config()\n\n try:\n print(\"Connecting to MySQl database.\")\n conn = MySQLConnection(**db_config)\n\n if conn.is_connected():\n print(\"connection established\")\n print(type(conn), conn)\n else:\n print(\"connection failed\")\n except Error as error:\n print(error)\n finally:\n conn.close()\n print(\"Connection closed\")","sub_path":"conncection/conncect_study01.py","file_name":"conncect_study01.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"266512906","text":"\"\"\"\nParser for [Duckling](https://github.com/facebook/duckling), the open source project.\n\nWe use [Duckling](https://github.com/facebook/duckling) for parsing and extracting date, time, numbers, currency etc.\nWe will expect Duckling to be running as an http service, and provide means to connect from the implementation here.\n\n## Tutorials\n\n- [DucklingParser](../../../../tests/parser/text/entity/test_duckling_parser.html)\n\nImport classes:\n\n- DucklingParser\n\"\"\"\nimport json\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nimport attr\nimport pytz\nimport requests\nfrom pytz.tzinfo import BaseTzInfo # type: ignore\n\nfrom dialogy.constants import EntityKeys\nfrom dialogy.plugin import Plugin, PluginFn\nfrom dialogy.types.entity import BaseEntity, dimension_entity_map\nfrom dialogy.workflow import Workflow\n\n\n# == DucklingParser ==\n@attr.s(kw_only=True)\nclass DucklingParser(Plugin):\n \"\"\"\n [Plugin](../../../plugin/plugin.html) for extracting entities using [Duckling](https://github.com/facebook/duckling).\n Once instantiated, a `duckling_parser` object will interface to an http server, running [Duckling](https://github.com/facebook/duckling).\n\n This object when used as a plugin, transforms the `List[Dict[str, Any]]` returned from the API to a [BaseEntity](../../../types/entity/base_entity.html).\n\n Plugin signature:\n\n - `access(Workflow) -> (str, int)`\n - int here should be `datetime.timestamp()`.\n - `mutate(Workflow, List[BaseEntity]) -> None`\n - insert `List[BaseEntity]` into `Workflow`.\n\n Attributes:\n\n - dimensions (Optional[List[str]])\n - locale (str):\n - timezone (Optional[str]):\n - timeout (Optional[float]): (default `0.05`)\n - url (str): (default: `\"http://0.0.0.0:8000/parse\"`)\n \"\"\"\n\n # **dimensions**\n #\n # [[Read](https://github.com/facebook/duckling#supported-dimensions)]\n # We support:\n # - `Numeral`\n # - `Time`\n # - `People` - This isn't part of the standard, we have a private fork to support this.\n # Do note, passing more dimensions is not free. Duckling would search for extra set of patterns just because\n # those dimensions were expected.\n dimensions: Optional[List[str]] = attr.ib(default=None)\n\n # **locale**\n #\n # The format for expressing locale requires language code and country name ids. [[Read](https://github.com/facebook/duckling#extending-duckling)]\n # about sections that define [ISO-639-codes](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) for languages and\n # [ISO3166 alpha2 country code](https://www.iso.org/obp/ui/#search/code/) for country codes.\n # Examples: `\"en_IN\"`, `\"en_US\"`, `\"en_GB\"`.\n locale: str = attr.ib(default=None)\n\n # **timezone**\n #\n # `pytz` Timezone. This is especially important when services are deployed across different geographies\n # and consistency is expected in the responses. Get a valid value from a [list of tz database timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).\n # Example: `\"Asia/Kolkata\"`\n timezone: Optional[str] = attr.ib(default=None)\n\n # **timeout**\n #\n # There are certain strings which tend to stall Duckling: [example](https://github.com/facebook/duckling/issues/338).\n # In such cases, to prevent the overall experience to slow down as well, provide a certain timeout value.\n timeout: Optional[float] = attr.ib(default=0.05)\n\n # **url**: The address where Duckling's entity parser can be reached.\n url: str = attr.ib(default=\"http://0.0.0.0:8000/parse\")\n\n headers: Dict[str, str] = {\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\"\n }\n\n # == __set_timezone ==\n def __set_timezone(self) -> Optional[BaseTzInfo]:\n \"\"\"\n Set timezone as BaseTzInfo from compatible timezone string.\n\n Raises:\n pytz.UnknownTimeZoneError: If `self.timezone` is not in [list of tz database timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).\n\n Returns:\n [BaseTzInfo]\n \"\"\"\n\n # If timezone is an unsafe string, we will handle a `pytz.UnknownTimeZoneError` exception\n # and pass a friendly message.\n if isinstance(self.timezone, str):\n try:\n return pytz.timezone(self.timezone)\n except pytz.UnknownTimeZoneError as unknown_timezone_error:\n raise pytz.UnknownTimeZoneError(\n f\"The timezone {self.timezone} is invalid\"\n \" check valid types here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\"\n ) from unknown_timezone_error\n return None\n\n # == __create_req_body ==\n def __create_req_body(\n self, text: str, reference_time: Optional[int]\n ) -> Dict[str, Any]:\n \"\"\"\n create request body for entity parsing\n\n Args:\n\n - text (str): A sentence or document.\n - reference_time (Optional[int]):\n \"\"\"\n dimensions = self.dimensions\n\n # **Payload Description**\n #\n # text - example: \"3 people tomorrow\"\n #\n # reftime - Resolve relative time like \"yesterday\", \"next month\", etc.\n # Make your own reference time using the current timestamp using: `int(datetime.now().timestamp() * 1000)`\n # These are the seconds since the [Unix epoch](https://en.wikipedia.org/wiki/Unix_time)\n payload = {\n \"text\": text,\n \"locale\": self.locale,\n \"tz\": self.__set_timezone(),\n \"dims\": json.dumps(dimensions),\n \"reftime\": reference_time,\n }\n\n return payload\n\n # == mutate_entity ==\n @staticmethod\n def mutate_entity(entity: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Mutate entity obtained from Duckling API.\n\n The purpose is to simplify BaseEntity initialization by calling `BaseEntity.from_dict(**entity)`.\n\n Args:\n entity (Dict[str, Any]): An entity returned from Duckling's API.\n\n Returns:\n Dict[str, Any]: Updated keys and structure.\n \"\"\"\n\n # **range** describes the span of string from which the entity was found.\n entity[EntityKeys.RANGE] = {\n EntityKeys.START: entity[EntityKeys.START],\n EntityKeys.END: entity[EntityKeys.END],\n }\n\n # **type** of an entity is same as its **dimension**.\n entity[EntityKeys.TYPE] = entity[EntityKeys.DIM]\n\n # This piece is a preparation for multiple entity values.\n # So, even though we are confident of the value found, we are still keeping the\n # structure.\n if EntityKeys.VALUES in entity[EntityKeys.VALUE]:\n del entity[EntityKeys.VALUE][EntityKeys.VALUES]\n entity[EntityKeys.VALUES] = [entity[EntityKeys.VALUE]]\n\n # Pulling out the value of entity's **grain**. The value of **grain** helps\n # us understand the precision of the entity. Usually present for `Time` dimension\n # expect \"year\", \"month\", \"day\", etc.\n if EntityKeys.GRAIN in entity[EntityKeys.VALUE]:\n entity[EntityKeys.GRAIN] = entity[EntityKeys.VALUE][EntityKeys.GRAIN]\n elif entity[EntityKeys.VALUE][EntityKeys.TYPE] == EntityKeys.INTERVAL:\n entity[EntityKeys.GRAIN] = entity[EntityKeys.VALUE][EntityKeys.TO][\n EntityKeys.GRAIN\n ]\n\n del entity[EntityKeys.START]\n del entity[EntityKeys.END]\n del entity[EntityKeys.VALUE]\n return entity\n\n # == reshape ==\n def reshape(\n self, entities_json: List[Dict[str, Any]]\n ) -> Optional[List[BaseEntity]]:\n \"\"\"\n Create `BaseEntity` from a list of entity dicts.\n\n Args:\n entities_json (List[Dict[str, Any]]): List of entities derived from Duckling's API.\n\n Raises:\n NotImplementedError: Raised when dimensions not supported by the project are used.\n KeyError: Expected keys in entity dict don't match the Entity class.\n\n Returns:\n Optional[List[BaseEntity]]: A list of Entity objects.\n \"\"\"\n entity_object_list: List[BaseEntity] = []\n\n try:\n # For each entity dict:\n #\n # 1. Get the Entity class\n # 2. create an Entity object from the entity dict.\n for entity in entities_json:\n if entity[EntityKeys.VALUE][EntityKeys.TYPE] == EntityKeys.INTERVAL:\n cls = dimension_entity_map[entity[EntityKeys.DIM]][EntityKeys.INTERVAL] # type: ignore\n duckling_entity = cls.from_dict(self.mutate_entity(entity))\n duckling_entity.set_value()\n entity_object_list.append(duckling_entity)\n elif entity[EntityKeys.VALUE][EntityKeys.TYPE] == EntityKeys.VALUE:\n cls = dimension_entity_map[entity[EntityKeys.DIM]][EntityKeys.VALUE] # type: ignore\n duckling_entity = cls.from_dict(self.mutate_entity(entity))\n duckling_entity.set_value()\n entity_object_list.append(duckling_entity)\n else:\n # Raised only if an unsupported `dimension` is used.\n raise NotImplementedError(\n f\"Entities with value.type {entity['value']['type']} are\"\n \" not implemented. Report this\"\n \" issue here: https://github.com/Vernacular-ai/dialogy/issues\"\n )\n except KeyError as key_error:\n # Being vary of structural changes in the API or entity dicts.\n # Under normal circumstances this error shouldn't be raised.\n raise KeyError(\n f\"Missing key {key_error} in entity {entity}.\"\n ) from key_error\n return entity_object_list\n\n # == get_entities ==\n def get_entities(\n self, text: str, reference_time: Optional[int] = None\n ) -> List[Dict[str, Any]]:\n \"\"\"\n Get entities from duckling-server.\n\n Assuming duckling-server is running at expected `url`. The entities are returned in\n `json` compatible format.\n\n Args:\n\n - text (str): The sentence or document in which entities must be looked up.\n - reference_time (int): Cases where relative units of time are mentioned,\n like \"today\", \"now\", etc. We need to know the current time\n to parse the values into usable dates/times.\n\n Raises:\n requests.exceptions.ConnectionError: Duckling cannot be reached at `self.url`\n requests.exceptions.Timeout: Duckling request times out.\n ValueError: The status code of the response is not 200.\n\n Returns:\n Optional[List[Dict[str, Any]]]\n \"\"\"\n body = self.__create_req_body(text, reference_time)\n response = requests.post(\n self.url, data=body, headers=self.headers, timeout=self.timeout\n )\n\n if response.status_code == 200:\n # The API call was successful, expect the following to contain entities.\n # A list of dicts or an empty list.\n return response.json()\n\n # Control flow reaching here would mean the API call wasn't successful.\n # To prevent rest of the things from crashing, we will raise an exception.\n raise ValueError(\n f\"Duckling API call failed | [{response.status_code}]: {response.text}\"\n )\n\n # == plugin ==\n def plugin(self, workflow: Workflow) -> None:\n \"\"\"\n Insert Entity objects into the workflow.\n\n Args:\n\n - workflow (Workflow)\n\n Raises:\n TypeError: If access and mutate functions are not callable.\n \"\"\"\n access = self.access\n mutate = self.mutate\n input_ = Union[str, List[str]]\n if not isinstance(access, Callable): # type: ignore\n raise TypeError(\n \"Expected `access` to be Callable,\"\n f\" got access={type(access)} mutate={type(mutate)}\"\n )\n if not isinstance(mutate, Callable): # type: ignore\n raise TypeError(\n \"Expected `mutate` to be Callable,\"\n f\" got access={type(access)} mutate={type(mutate)}\"\n )\n\n entities = []\n if access and mutate:\n input_, reference_time = access(workflow)\n try:\n if isinstance(input_, str):\n entities.append(\n self.get_entities(input_, reference_time=reference_time)\n )\n elif isinstance(input_, list) and all(\n isinstance(text, str) for text in input_\n ):\n for text in input_:\n entities.append(\n self.get_entities(text, reference_time=reference_time)\n )\n else:\n raise TypeError(f\"Expected {input_} to be a List[str] or str.\")\n\n entities_flattened = [\n entity for entity_list in entities for entity in entity_list\n ]\n\n if entities_flattened:\n shaped_entities = self.reshape(entities_flattened)\n mutate(workflow, shaped_entities)\n else:\n mutate(workflow, [])\n except ValueError as value_error:\n raise ValueError(str(value_error)) from value_error\n\n # == __call__ ==\n def __call__(self) -> PluginFn:\n \"\"\"\n [callable-plugin](../../../plugin/plugin.html#__call__)\n \"\"\"\n return self.plugin\n","sub_path":"dialogy/parser/text/entity/duckling_parser.py","file_name":"duckling_parser.py","file_ext":"py","file_size_in_byte":13736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"414124819","text":"# Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu aumento.\r\n# Para salários superiores a R$1250,00, calcule um aumento de 10%. Para os inferiores ou iguais, o aumento é de 15%.\r\n\r\nsalary = float(input('Digite o seu salário: '))\r\n\r\nif salary > 1250.00:\r\n calc = (salary * 0.10) + salary\r\n print('Você teve um aumento de 10%. Sendo assim o seu salário vai de R${} para R${}'.format(salary, calc))\r\n\r\nelif salary < 1250.00:\r\n calc = (salary * 0.15) + salary\r\n print('Você teve um aumento de 15%. Sendo assim o seu salário vai de R${} para {}'.format(salary, calc))\r\n","sub_path":"+100exercicios/ex034.py","file_name":"ex034.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"466495795","text":"# 멱집합 구하기\n#\n# 집합 A에 다하여, A의 모든 부분집합을 원소로 가지는 집합을 A의 멱집합이라고 한다.\n# 예를 들어, 집합 A의 원소가 {1, 2, 3} 일 경우, A의 멱집합은 다음과 같이 8개의 원소를 갖는 집합이다.\n#\n# {}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}\n#\n# 집합 A의 원소는 1부터 nn까지의 자연수로 구성된다. nn이 주어질 때, A의 멱집합의 원소를 사전 순서대로 모두 출력하는 프로그램을 작성하시오.\n# 단, 공집합은 제외하고 출력한다.\n#\n# 입력\n# 첫째 줄에 원소의 개수 nn이 주어진다. (1 \\leq n \\leq 101≤n≤10)\n#\n# 출력\n# A의 멱집합의 원소를 사전 순서대로 모두 출력한다. 단, 공집합은 제외하고 출력한다.\n#\n# 입력 예시\n# 3\n#\n# Copy\n# 출력 예시\n# 1\n# 1 2\n# 1 2 3\n# 1 3\n# 2\n# 2 3\n# 3\n\nimport sys\n\ndef getPowerSet(n,k):\n '''\n n개의 원소가 있고, k를 가장 처음으로\n 선택하는 경우의 멱집합 반환\n\n getPowerSet(3,2) = [ [2], [2, 3] ]\n '''\n\n if n == k :\n return [ [n] ]\n else :\n '''\n result = [ [1] ]\n temp = [ [2], [2,3], [3] ]\n '''\n result = [ [k] ]\n temp = []\n\n for i in range(k+1, n+1) :\n temp = temp + getPowerSet(n, i)\n\n for i in range(len(temp)) :\n temp[i] = [k] + temp[i]\n\n return result + temp\n\ndef powerSet(n):\n '''\n n개의 원소를 가지는 집합 A의 멱집합의 원소를 사전 순서대로 list로 반환하는 함수를 작성하시오.\n\n 예를 들어, n = 3 일 경우 다음의 list를 반환한다.\n\n [ [1], [1, 2], [1, 3], [1, 2, 3], [2], [2, 3], [3] ]\n '''\n\n result = []\n\n for i in range(1, n+1) :\n result = result + getPowerSet(n, i)\n\n return result\n\n return []\n\n\ndef main():\n '''\n 이 부분은 수정하지 마세요.\n 재귀가 필요함!!\n '''\n\n n = int(input())\n\n result = powerSet(n)\n\n for line in result:\n print(*line)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"_site/2주차/예제2.py","file_name":"예제2.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404130047","text":"import numpy as np\n\n# TASK 1\narray_1 = np.array([2, 3, 4, 5], dtype=float)\narray_2 = np.array([[1,2,3,4,5], [1,2,3,4,5], [1,2,3,4,5]])\narray_3 = np.array(np.arange(0, 16))\narray_4 = np.array(np.arange(0, 100, 7))\n\n# TASK 2\naccess_1 = array_1[2]\naccess_2 = array_3[-2]\naccess_3 = array_4[5:9]\naccess_4 = array_4[2::3]\naccess_5 = array_2[2:][0]\naccess_6 = array_2[:, 1]\naccess_7 = array_2[:, 1:4] # won't pass\n\n# TASK 3\nmask_1 = array_4 % 2 == 0\nmask_2 = array_4[array_4 > 50]\nmask_3 = array_2[array_2 % 2 == 0]\n\n# TASK 4\nnames = np.array([\"Susie\", \"Mikayla\", \"Carlos\", \"Thao\", \"Ahmed\"])\n\nwages = np.array([15.50, 35.00, 23.40, 18.75, 54.00])\nover_20 = names[wages > 20.00]\n","sub_path":"S6-2/Labs/numpy-basics.py","file_name":"numpy-basics.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"111266660","text":"from random import randrange\nfrom uuid import uuid4\nfrom argparse import ArgumentParser\nimport sys\n\nmax_size = 30\n\ndef main(fname):\n\n with open(fname, 'w') as fout:\n size = randrange(1, max_size)\n\n fout.write(\"parity \" + str(size) + \";\\n\")\n for i in range(size+1):\n fout.write(str(i) + ' ')\n priority = randrange(1, 100)\n fout.write(str(priority) + ' ')\n owner = randrange(1, 3)\n owner = owner % 2\n fout.write(str(owner) + ' ')\n\n number_of_edges = randrange(1, size+1)\n used = [0] * (size + 1)\n for j in range(number_of_edges):\n\n while True:\n t = randrange(0, size + 1)\n if t != i and used[t] == 0:\n # only add edges to other vertexes, dont allow self loops\n used[t] = 1\n fout.write(str(t))\n break\n\n if j == number_of_edges - 1:\n fout.write(\" \")\n else:\n fout.write(\",\")\n\n\n uuid = uuid4()\n fout.write(\"\\\"\" + str(uuid) + \"\\\";\\n\")\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"--fname\", help=\"name of the file to be created\", nargs=1, required=True)\n\n try:\n args = parser.parse_args()\n except:\n parser.print_help(sys.stderr)\n exit(1)\n\n main(args.fname[0])\n\n","sub_path":"generate_graph.py","file_name":"generate_graph.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"422888626","text":"# Import the Flask class from the flask module.\nfrom flask import (Flask, render_template, url_for,\n redirect, flash, request, abort, session)\nfrom flask_qrcode import QRcode\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import (LoginManager, login_user, current_user,\n\tlogout_user, login_required, login_manager, UserMixin)\nfrom PIL import Image\nfrom flask_wtf import FlaskForm\nfrom wtforms import (StringField, PasswordField, SubmitField, BooleanField,\n\tTextAreaField, TextField, IntegerField)\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, NumberRange\nfrom flask_wtf.file import FileField, FileAllowed\nimport pandas as pd\nimport pandas_datareader.data as web\nfrom datetime import datetime, timedelta\nfrom gsearch.googlesearch import search\nfrom PyLyrics import *\nfrom oxforddictionaries.words import OxfordDictionaries\n# SecretKey -> protect form from mofifying cookies & cross site forgery attack.\nimport secrets, os, requests, json, pyttsx3, copy, fortune_cookie_phrases\nimport wikiquote, wikipedia, random, string, socket, hashlib, emoji\nfrom forex_python.bitcoin import BtcConverter\nfrom bs4 import BeautifulSoup as soup\nfrom dateutil.parser import parse\nfrom translate import Translator\nfrom quiz import QuizFile\n\n# create the app object\napp = Flask(__name__)\napp.config['SECRET_KEY'] = secrets.token_hex(16)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\nqrcode = QRcode(app)\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'\nlogin_manager.login_message_category = 'info'\noriginal_questions = QuizFile.original_questions\nquestions = copy.deepcopy(original_questions)\nphrases = fortune_cookie_phrases.phrases\n\n\n# forms.py\n\n@login_manager.user_loader\ndef load_user(user_id):\n\treturn User.query.get(int(user_id))\n\nclass RegistrationForm(FlaskForm):\n\tusername = StringField('Username', validators = [DataRequired(), Length(min=7, max=20)])\n\temail = StringField('Email', validators = [DataRequired(), Email()])\n\tpassword = PasswordField('Password', validators = [DataRequired(), Length(min=7, max=20)])\n\tconfirm_password = PasswordField('Confirm Password',validators = [DataRequired(), EqualTo('password')])\n\tsubmit = SubmitField('Sign Up')\n\nclass LoginForm(FlaskForm):\n\temail = StringField('Email', validators = [DataRequired(), Email()])\n\tpassword = PasswordField('Password', validators = [DataRequired(), Length(min=7, max=20)])\n\tremember = BooleanField('Remember Me')\n\tsubmit = SubmitField('Login')\n\nclass UpdateAccountForm(FlaskForm):\n\tusername = StringField('Username', validators = [DataRequired(), Length(min=7, max=20)])\n\temail = StringField('Email', validators = [DataRequired(), Email()])\n\tpicture = FileField('Update Profile Picture', validators = [FileAllowed(['jpg', 'png'])])\n\tsubmit = SubmitField('Update')\n\nclass PostForm(FlaskForm):\n\ttitle = StringField('Title', validators = [DataRequired()])\n\tcontent = TextAreaField('Content', validators = [DataRequired()])\n\tsubmit = SubmitField('Post')\n\nclass TextToSpeechConversionForm(FlaskForm):\n\ttext = TextField('Enter Sample Text', validators = [DataRequired()])\n\tsubmit = SubmitField('Convert!')\n\nclass GoogleSearchForm(FlaskForm):\n\tkeyword = TextField('Enter keyword', validators = [DataRequired()])\n\tsubmit = SubmitField('Search')\n\nclass YoutubeVideoForm(FlaskForm):\n\tlink = TextField('Type YouTube Link', validators = [DataRequired()])\n\tsubmit = SubmitField('Play Video')\n\nclass SongLyricsForm(FlaskForm):\n\tartist_name = TextField('Artist Name', validators = [DataRequired()])\n\tsong_name = TextField('Song Name', validators = [DataRequired()])\n\tsubmit = SubmitField('Get Song Lyrics')\n\nclass OxfordDictionarysForm(FlaskForm):\n\tword = TextField('Word', validators = [DataRequired()])\n\tsubmit = SubmitField('Get Meaning and Opposite!')\n\nclass AgeCalculatorsForm(FlaskForm):\n\tdob = TextField('Enter Date of Birth (dd/mm/yyyy)', validators = [DataRequired()])\n\tsubmit = SubmitField('Get Age!')\n\nclass GenerateQRCodeFrom(FlaskForm):\n\ttext = TextField('Enter text', validators = [DataRequired()])\n\tsubmit = SubmitField('Generate QR Code')\n\nclass WeatherReportForm(FlaskForm):\n\tcity = TextField('Enter City Name (Not Country)', validators = [DataRequired()])\n\tsubmit = SubmitField('Get Weather Data')\n\nclass FlamesGameForm(FlaskForm):\n\tyour_name = TextField('Enter Your Name', validators = [DataRequired()])\n\tpartner_name = TextField('Enter Your Partner Name', validators = [DataRequired()])\n\tsubmit = SubmitField('Play GAME!')\n\nclass LanguageTranslateForm(FlaskForm):\n\tword = TextField('Enter Word/Sentence', validators = [DataRequired()])\n\tsubmit = SubmitField('Translate!')\n\nclass RandomQuotesForm(FlaskForm):\n\tword = TextField('Enter KeyWord', validators = [DataRequired()])\n\tsubmit = SubmitField('Get Quotes!')\n\nclass WikipediaSearchForm(FlaskForm):\n\tword = TextField('Enter Keyword', validators = [DataRequired()])\n\tsubmit = SubmitField('Search')\n\nclass RechargeCodeGeneratorForm(FlaskForm):\n\tmobile_num = IntegerField('Enter Mobile Number (without +91)', validators = [DataRequired(), NumberRange(min=10)])\n\tsubmit = SubmitField('Generate Code')\n\nclass IPAddressSearchForm(FlaskForm):\n name = TextField('Enter IP Address / Domain', validators = [DataRequired()])\n submit = SubmitField('Search')\n\nclass IPAddressSearchForm(FlaskForm):\n\tname = TextField('Enter IP Address / Domain', validators = [DataRequired()])\n\tsubmit = SubmitField('Search')\n\nclass CryptographyForm(FlaskForm):\n\tname = TextField('Enter Sample Text', validators = [DataRequired()])\n\tsubmit = SubmitField('Encrypt Message!')\n\nclass PlayMusicForm(FlaskForm):\n\taudio_path = TextField('Type Audio URL from Internet', validators = [DataRequired()])\n\tsubmit = SubmitField('Load Music')\n\nclass LoveCalculatorForm(FlaskForm):\n\tboy_name = TextField('Enter Boy Name', validators = [DataRequired()])\n\tgirl_name = TextField('Enter Girl Name', validators = [DataRequired()])\n\tsubmit = SubmitField('Calculate Love %')\n\nclass ZodiacSignForm(FlaskForm):\n\tdob = TextField('Enter Date of Birth (dd/mm/yyyy)', validators = [DataRequired()])\n\tsubmit = SubmitField('Get Astro Sign!!')\n\n\n# models.py\n\nclass User(db.Model, UserMixin):\n\tid = db.Column(db.Integer, primary_key=True)\n\tusername = db.Column(db.String(20), unique=True, nullable=False)\n\temail = db.Column(db.String(50), unique=True, nullable=False)\n\timage_file = db.Column(db.String(20), nullable=False, default='default.jpg')\n\tpassword = db.Column(db.String(60), nullable=False)\n\tposts = db.relationship('Post', backref='author', lazy=True)\n\n\tdef __repr__(self):\n\t\treturn f\"User('{self.username}', '{self.email}', '{self.image_file}')\"\n\nclass Post(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\ttitle = db.Column(db.String(70), nullable=False)\n\tdate_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\tcontent = db.Column(db.Text, nullable=False)\n\tuser_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n\tdef __repr__(self):\n\t\treturn f\"Post('{self.title}', '{self.date_posted}')\"\n\n\n# routes.py\n\n@app.route('/')\n@app.route('/home')\ndef home():\n\tpage = request.args.get('page', 1, type=int)\n\tposts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)\n\treturn render_template('home.html', posts = posts, fortune = random.choice(phrases),\n\t\tdate = datetime.now().strftime('%d-%m-%Y'))\n\n@app.route('/about')\ndef about():\n return render_template('about.html', title = 'AboutMe')\n\n@app.route('/register', methods = ['GET', 'POST'])\ndef register():\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('home'))\n\tform = RegistrationForm()\n\tif form.validate_on_submit():\n\t\thashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n\t\tuser = User(username=form.username.data, email=form.email.data, password=hashed_password)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\t\tflash(\"Your account has been created! You are now able to log in.\", 'success')\n\t\treturn redirect(url_for('login'))\n\treturn render_template('register.html', title = 'Register', form = form)\n\n@app.route('/login', methods = ['GET', 'POST'])\ndef login():\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('home'))\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(email=form.email.data).first()\n\t\tif user and bcrypt.check_password_hash(user.password, form.password.data):\n\t\t\tlogin_user(user, remember=form.remember.data)\n\t\t\tnext_page = request.args.get('next')\n\t\t\treturn redirect(next_page) if next_page else redirect(url_for('home'))\n\t\telse:\n\t\t\tflash(\"Login Unsucessful. Please Check Email and Password..\", 'danger')\n\treturn render_template('login.html',title = 'Login', form = form)\n\n@app.route('/logout')\ndef logout():\n\tlogout_user()\n\treturn redirect(url_for('home'))\n\ndef save_picture(form_picture):\n\trandom_hex = secrets.token_hex(8)\n\t_, f_ext = os.path.splitext(form_picture.filename)\n\tpicture_fn = random_hex + f_ext\n\tpicture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)\n\toutput_size = (125, 125)\n\ti = Image.open(form_picture)\n\ti.thumbnail(output_size)\n\ti.save(picture_path)\n\treturn picture_fn\n\n\n@app.route('/account', methods = ['GET', 'POST'])\n@login_required\ndef account():\n\tform = UpdateAccountForm()\n\tif form.validate_on_submit():\n\t\tif form.picture.data:\n\t\t\tpicture_file = save_picture(form.picture.data)\n\t\t\tcurrent_user.image_file = picture_file\n\t\tcurrent_user.username = form.username.data\n\t\tcurrent_user.email = form.email.data\n\t\tdb.session.commit()\n\t\tflash('Your account has been updated!', 'success')\n\t\treturn redirect(url_for('account'))\n\telif request.method == 'GET':\n\t\tform.username.data = current_user.username\n\t\tform.email.data = current_user.email\n\timage_file = url_for('static', filename = 'profile_pics/' + current_user.image_file)\n\treturn render_template('account.html', title = 'Account', image_file = image_file, form = form)\n\n@app.route('/post/new', methods = ['GET', 'POST'])\n@login_required\ndef new_post():\n\tform = PostForm()\n\tif form.validate_on_submit():\n\t\tpost = Post(title=form.title.data, content=form.content.data, author=current_user)\n\t\tdb.session.add(post)\n\t\tdb.session.commit()\n\t\tflash('Your post has been created!', 'success')\n\t\treturn redirect(url_for('home'))\n\treturn render_template('create_post.html', title = 'NewPost', form = form, legend = 'New Post')\n\n@app.route('/post/')\n@login_required\ndef post(post_id):\n\tpost = Post.query.get_or_404(post_id)\n\treturn render_template('post.html', title = post.title, post = post)\n\n@app.route('/post//update', methods = ['GET', 'POST'])\n@login_required\ndef update_post(post_id):\n\tpost = Post.query.get_or_404(post_id)\n\tif post.author != current_user:\n\t\tabort(403)\n\tform = PostForm()\n\tif form.validate_on_submit():\n\t\tpost.title = form.title.data\n\t\tpost.content = form.content.data\n\t\tdb.session.commit()\n\t\tflash('Your post has been updated!', 'success')\n\t\treturn redirect(url_for('post', post_id=post.id))\n\telif request.method == 'GET':\n\t\tform.title.data = post.title\n\t\tform.content.data = post.content\n\treturn render_template('create_post.html', title='Update Post', form=form, legend='Update Post')\n\n@app.route(\"/post//delete\", methods=['POST'])\n@login_required\ndef delete_post(post_id):\n\tpost = Post.query.get_or_404(post_id)\n\tif post.author != current_user:\n\t\tabort(403)\n\tdb.session.delete(post)\n\tdb.session.commit()\n\tflash('Your post has been deleted!', 'success')\n\treturn redirect(url_for('home'))\n\n@app.route('/user/')\ndef user_posts(username):\n\tpage = request.args.get('page', 1, type=int)\n\tuser = User.query.filter_by(username=username).first_or_404()\n\tposts = Post.query.filter_by(author=user)\\\n\t\t\t.order_by(Post.date_posted.desc())\\\n\t\t\t.paginate(page=page, per_page=5)\n\treturn render_template('user_posts.html', posts = posts, user = user)\n\n@app.route('/users')\ndef users():\n\tusers = User.query.all()\n\tusers_count = User.query.count()\n\treturn render_template('users.html', title = 'Users', users = users, users_count = users_count)\n\n@app.route('/applications')\ndef applications():\n\treturn render_template('applications.html', title = 'Apps')\n\n@app.route(\"/applications/currency_exchange\", methods=['GET', 'POST'])\ndef currency_exchange():\n response = requests.get(\"https://api.exchangeratesapi.io/latest?base=USD\").json()\n data = response['rates']\n return render_template('currency_exchange.html', data = data)\n\n@app.route(\"/applications/cryptocurrency_exchanges\", methods=['GET', 'POST'])\ndef cryptocurrency_exchanges():\n\tcountries = ['AUD', 'BGN', 'BRL', 'CAD', 'CHF', 'CNY', 'CZK', 'DKK', 'EUR', 'GBP', 'HKD', 'HRK', 'HUF', 'IDR', 'ILS', 'INR', 'ISK', 'JPY', 'KRW', 'MXN', 'MYR', 'NOK', 'NZD', 'PHP', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'THB', 'TRY', 'USD', 'ZAR']\n\tbitc = BtcConverter()\n\traw_data = []\n\tfor data in countries:\n\t\traw_data.append([data, bitc.get_latest_price(data)])\n\treturn render_template('cryptocurrency_exchanges.html', raw_data = raw_data)\n\n@app.route(\"/applications/stock_ticker\", methods=['GET', 'POST'])\ndef stock_ticker():\n\tcompany_codes = ['AAPL', 'MSFT', 'FB', 'AMZN', 'SBUX', 'GOOG', 'BABA', 'JNJ', 'JPM', 'XOM', 'BAC', 'WMT', 'WFC', 'INTC', 'VZ', 'ORCL', 'HON']\n\tstart_date = (datetime.now() - timedelta(days=5)).strftime('%d-%m-%Y')\n\traw_data = []\n\tfor code in company_codes:\n\t\tdf = web.DataReader(code, \"yahoo\", start_date, datetime.now())\n\t\tcontent = [code, df['High'].tail(5)[-1], df['Low'].tail(5)[-1], df['Open'].tail(5)[-1], df['Close'].tail(5)[-1], df['Volume'].tail(5)[-1], df['Adj Close'].tail(5)[-1]]\n\t\traw_data.append(content)\n\treturn render_template('stock_ticker.html', raw_data = raw_data)\n\n@app.route('/applications/text_to_speech', methods = ['GET', 'POST'])\ndef text_to_speech():\n\tform = TextToSpeechConversionForm()\n\tif request.method == 'POST':\n\t\tengine = pyttsx3.init()\n\t\tengine.say(form.text.data)\n\t\tengine.runAndWait()\n\t\treturn render_template('text_to_speech.html', form = form)\n\treturn render_template('text_to_speech.html', form = form)\n\n@app.route('/applications/google_search', methods = ['GET', 'POST'])\ndef google_search():\n form = GoogleSearchForm()\n raw_data = []\n if request.method == 'POST':\n keyword = form.keyword.data\n raw_data = [url for url in search(keyword.casefold(), num_results=10)]\n return render_template('google_search.html', form = form, raw_data = raw_data)\n return render_template('google_search.html', form = form)\n\n@app.route('/applications/youtube_video', methods = ['GET', 'POST'])\ndef youtube_video():\n\tform = YoutubeVideoForm()\n\tif request.method == 'POST':\n\t\ttemp_link = form.link.data\n\t\traw_link = \"https://www.youtube.com/embed/{}\".format(temp_link.split('/')[3])\n\t\treturn render_template('youtube_video.html', form = form, raw_link = raw_link)\n\treturn render_template('youtube_video.html', form = form)\n\n@app.route('/applications/song_lyrics', methods = ['GET', 'POST'])\ndef song_lyrics():\n\tform = SongLyricsForm()\n\tif request.method == 'POST':\n\t\tartist_name = form.artist_name.data\n\t\tsong_name = form.song_name.data\n\t\traw_data = PyLyrics.getLyrics(artist_name.casefold(), song_name.casefold())\n\t\traw_data = raw_data.split('\\n')\n\t\treturn render_template('song_lyrics.html', form = form, raw_data = raw_data)\n\treturn render_template('song_lyrics.html', form = form)\n\n@app.route('/applications/oxford_dictionary', methods = ['GET', 'POST'])\ndef oxford_dictionary():\n\tform = OxfordDictionarysForm()\n\tif request.method == 'POST':\n\t\tword = form.word.data\n\t\tapp_id = '86bad9b7'\n\t\tapp_key = 'b36d710e0a1ea7cbf4080c46e66fa8db'\n\t\tod = OxfordDictionaries(app_id, app_key)\n\t\tsynonyms_data = od.get_synonyms(word.casefold()).json()\n\t\tsynonyms = synonyms_data['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['synonyms']\n\t\tsynonyms = [res['text'] for res in synonyms]\n\t\tantonyms_data = od.get_antonyms(word.casefold()).json()\n\t\tantonyms = antonyms_data['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['antonyms']\n\t\tantonyms = [res['text'] for res in antonyms]\n\t\treturn render_template('oxford_dictionary.html', form = form, synonyms = synonyms, antonyms = antonyms)\n\treturn render_template('oxford_dictionary.html', form = form)\n\n@app.route('/applications/age_calculator', methods = ['GET', 'POST'])\ndef age_calculator():\n\tform = AgeCalculatorsForm()\n\tif request.method == 'POST':\n\t\tdob = form.dob.data\n\t\tb_date = datetime.strptime(dob, '%d/%m/%Y')\n\t\tage_calc = ((datetime.today() - b_date).days/365)\n\t\treturn render_template('age_calculator.html', form = form, age_calc = age_calc)\n\treturn render_template('age_calculator.html', form = form)\n\n@app.route('/applications/get_news', methods = ['GET', 'POST'])\ndef get_news():\n\tresp = soup(requests.get(\"https://news.google.com/news/rss\").text, \"xml\")\n\traw_data = []\n\tfor news in resp.findAll(\"item\"):\n\t\tpubdate = parse(news.pubDate.text)\n\t\tcontent = [news.title.text, news.link.text, pubdate.strftime('%d-%m-%Y %I:%M %p')]\n\t\traw_data.append(content)\n\treturn render_template('get_news.html', raw_data = raw_data)\n\n@app.route('/applications/generate_qrcode', methods = ['GET', 'POST'])\ndef generate_qrcode():\n\tform = GenerateQRCodeFrom()\n\tif request.method == 'POST':\n\t\ttext = form.text.data\n\t\ttext = qrcode(text, box_size=12)\n\t\treturn render_template('generate_qrcode.html', form = form, text = text)\n\treturn render_template('generate_qrcode.html', form = form)\n\n@app.route('/applications/weather_report', methods = ['GET', 'POST'])\ndef weather_report():\n\tform = WeatherReportForm()\n\tif request.method == 'POST':\n\t\tapi_key = '28a31767a1909138a53410a56233a326'\n\t\tcity = form.city.data\n\t\traw_data = []\n\t\tresp = requests.get(f\"http://api.openweathermap.org/data/2.5/weather?q={city}&units=imperial&APPID={api_key}\")\n\t\tdata = json.loads(resp.text)\n\t\traw_data.append(f\" City - {(data['name']).upper()}\")\n\t\traw_data.append(f\" Coordinates - Latitude {data['coord']['lat']} & Longitude {data['coord']['lon']}\")\n\t\traw_data.append(f\" Description - {(data['weather'][0]['description']).upper()}\")\n\t\traw_data.append(f\" Temperature - {data['main']['temp']} F\")\n\t\traw_data.append(f\" Humidity - {data['main']['humidity']}\")\n\t\traw_data.append(f\" Wind Speed - {data['wind']['speed']}\")\n\t\ticon = data['weather'][0]['icon']\n\t\treturn render_template('weather_report.html', form = form, raw_data = raw_data, icon = icon)\n\treturn render_template('weather_report.html', form = form)\n\n@app.route('/applications/flames_game', methods = ['GET', 'POST'])\ndef flames_game():\n\tform = FlamesGameForm()\n\tif request.method == 'POST':\n\t\tyour_name = form.your_name.data\n\t\tpartner_name = form.partner_name.data\n\t\tdata = ''\n\n\t\tdef flames_count(male_name, female_name):\n\t\t\tmale_name_list = list(male_name)\n\t\t\tfemale_name_list = list(female_name)\n\t\t\tfor letter in male_name_list[:]:\n\t\t\t\tif female_name_list.count(letter) > 0:\n\t\t\t\t\tfemale_name_list.remove(letter)\n\t\t\t\t\tmale_name_list.remove(letter)\n\t\t\treturn len(female_name_list) + len(male_name_list)\n\n\t\tdef flames_result(count):\n\t\t\tflames_list = ['Friend', 'Love', 'Affection', 'Marriage', 'Enemy', 'Sister']\n\t\t\twhile len(flames_list) > 1:\n\t\t\t\tremove_count = count\n\t\t\t\tif count > len(flames_list):\n\t\t\t\t\tremove_count = count % len(flames_list)\n\t\t\t\t\tif remove_count == 0:\n\t\t\t\t\t\tremove_count = len(flames_list)\n\t\t\t\tflames_list.remove(flames_list[remove_count - 1])\n\t\t\t\tflames_list = flames_list[remove_count - 1:] + flames_list[:remove_count - 1]\n\t\t\treturn flames_list[0]\n\n\t\tdef calculate(your_name, partner_name):\n\t\t\tfirst_name = your_name.lower().replace(' ', '')\n\t\t\tsecond_name = partner_name.lower().replace(' ', '')\n\t\t\tcount = flames_count(first_name, second_name)\n\t\t\tresult = flames_result(count)\n\t\t\treturn result\n\n\t\tdata = calculate(your_name, partner_name)\n\t\treturn render_template('flames_game.html', form = form, data = data)\n\treturn render_template('flames_game.html', form = form)\n\n@app.route('/applications/language_translate', methods = ['GET', 'POST'])\ndef language_translate():\n\tform = LanguageTranslateForm()\n\tlanguages = ['Arabic', 'Bengali', 'Bulgarian', 'Chinese', 'French', 'Georgian', 'German', 'Greek', 'Gujarati', 'Hawaiian', 'Hindi', 'Indonesian', 'Italian', 'Japanese', 'Kannada', 'Korean', 'Malay', 'Malayalam', 'Persian', 'Portuguese', 'Serbian', 'Slovak', 'Somali', 'Spanish', 'Swedish', 'Tamil', 'Telugu', 'Thai', 'Turkish', 'Vietnamese']\n\tif request.method == 'POST':\n\t\tword = form.word.data\n\t\traw_data = []\n\t\tfor language in languages:\n\t\t\ttranslator = Translator(from_lang = \"english\" ,to_lang = language)\n\t\t\tcontent = [language, translator.translate(word)]\n\t\t\traw_data.append(content)\n\t\treturn render_template('language_translate.html', form = form, raw_data = raw_data)\n\treturn render_template('language_translate.html', form = form)\n\n@app.route('/applications/random_quotes', methods = ['GET', 'POST'])\ndef random_quotes():\n\tform = RandomQuotesForm()\n\tif request.method == 'POST':\n\t\tword = form.word.data\n\t\traw_data = []\n\t\tquotes = wikiquote.quotes(word.casefold())\n\t\tfor quote in quotes:\n\t\t\traw_data.append(quote)\n\t\treturn render_template('random_quotes.html', form = form, raw_data = raw_data)\n\treturn render_template('random_quotes.html', form = form)\n\n@app.route('/applications/wikipedia_search', methods = ['GET', 'POST'])\ndef wikipedia_search():\n\tform = WikipediaSearchForm()\n\tif request.method == 'POST':\n\t\tword = form.word.data\n\t\traw_data = []\n\t\tcontents = wikipedia.summary(word.casefold()).splitlines()\n\t\treturn render_template('wikipedia_search.html', form = form, contents = contents)\n\treturn render_template('wikipedia_search.html', form = form)\n\n@app.route('/applications/recharge_code_generator', methods = ['GET', 'POST'])\ndef recharge_code_generator():\n\tform = RechargeCodeGeneratorForm()\n\tmobile_networks = ['Airtel', 'Jio', 'BSNL', 'Aircel', 'Reliance', 'Idea', 'Vodafone', 'Tata Docomo']\n\tstates = [\"Andhra Pradesh\", \"Arunachal Pradesh \", \"Assam\", \"Bihar\", \"Chhattisgarh\", \"Goa\", \"Gujarat\", \"Haryana\", \"Himachal Pradesh\", \"Jammu and Kashmir\", \"Jharkhand\", \"Karnataka\", \"Kerala\", \"Madhya Pradesh\", \"Maharashtra\", \"Manipur\", \"Meghalaya\", \"Mizoram\", \"Nagaland\", \"Odisha\", \"Punjab\", \"Rajasthan\", \"Sikkim\", \"Tamil Nadu\", \"Telangana\", \"Tripura\", \"Uttar Pradesh\", \"Uttarakhand\", \"West Bengal\"]\n\tif request.method == 'POST':\n\t\tmobile_num = form.mobile_num.data\n\t\tdata = \"\".join(random.choice(string.digits) for _ in range(16))\n\t\treturn render_template('recharge_code_generator.html', form = form, data = data, mobile_networks = mobile_networks, states = states)\n\treturn render_template('recharge_code_generator.html', form = form)\n\n@app.route('/applications/ip_address_search', methods = ['GET', 'POST'])\ndef ip_address_search():\n\tform = IPAddressSearchForm()\n\tif request.method == 'POST':\n\t\tname = form.name.data\n\t\traw_data = []\n\t\traw_data.append(f\" Input : {name}\")\n\t\traw_data.append(f\" IP Address: {socket.gethostbyname(name)}\")\n\t\traw_data.append(f\" Fully Qualified Domain Name: {socket.gethostbyaddr(name)}\")\n\t\treturn render_template('ip_address_search.html', form = form, raw_data = raw_data)\n\treturn render_template('ip_address_search.html', form = form)\n\ndef shuffle(q):\n\tselected_keys = []\n\ti = 0\n\twhile i < len(q):\n\t\tkeys = list(q.keys())\n\t\tcurrent_selection = random.choice(keys)\n\t\tif current_selection not in selected_keys:\n\t\t\tselected_keys.append(current_selection)\n\t\t\ti = i+1\n\treturn selected_keys\n\n@app.route('/applications/quiz_game')\ndef quiz_game():\n\tquestions_shuffled = shuffle(questions)\n\tfor i in questions.keys():\n\t\trandom.shuffle(questions[i])\n\treturn render_template('quiz_game.html', questions_shuffled = questions_shuffled, questions = questions)\n\n@app.route('/applications/quiz', methods=['POST'])\ndef quiz_answers():\n\tcorrect = 0\n\tfor i in questions.keys():\n\t\tprint(original_questions[i][0])\n\t\tanswered = request.form[i]\n\t\tif original_questions[i][0] == answered:\n\t\t\tcorrect = correct + 1\n\tcorrect = f\"You have got {correct} Correct Answers out of {len(original_questions)} Questions!\"\n\treturn render_template('quiz_results.html', correct = correct)\n\n@app.route('/applications/cryptography_security', methods = ['GET', 'POST'])\ndef cryptography_security():\n\tform = CryptographyForm()\n\tif request.method == 'POST':\n\t\tname = form.name.data\n\t\traw_data = []\n\t\traw_data.append(f\" Encrypted using MD5 : {hashlib.md5(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using SHA 1 : {hashlib.sha1(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using SHA 224 : {hashlib.sha224(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using SHA 256 : {hashlib.sha256(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using SHA 384 : {hashlib.sha384(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using SHA3 224 : {hashlib.sha3_224(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using SHA3 256 : {hashlib.sha3_256(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using SHA3 384 : {hashlib.sha3_384(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using Blake 2B : {hashlib.blake2b(name.encode()).hexdigest()}\")\n\t\traw_data.append(f\" Encrypted using Blake 2S : {hashlib.blake2s(name.encode()).hexdigest()}\")\n\t\treturn render_template('cryptography_security.html', form = form, raw_data = raw_data)\n\treturn render_template('cryptography_security.html', form = form)\n\n\n@app.route('/applications/ninga_gold_game')\ndef ninga_gold_game():\n return render_template('ninga_gold_game.html')\n\n\n@app.route('/applications/process_money', methods=['POST'])\ndef process_money():\n\ttry:\n\t\tsession['gold']\n\texcept KeyError:\n\t\tsession['gold'] = 0\n\n\ttry:\n\t\tsession['activities']\n\texcept KeyError:\n\t\tsession['activities'] = []\n\n\tif request.form['building'] == 'farm':\n\t\tgold = random.randrange(10,21)\n\telif request.form['building'] == 'cave':\n\t\tgold = random.randrange(5,11)\n\telif request.form['building'] == 'house':\n\t\tgold = random.randrange(2,6)\n\telif request.form['building'] == 'casino':\n\t\tgold = random.randrange(-50,51)\n\n\tactivity = ''\n\ttime = datetime.now().strftime('%Y/%m/%d %I:%M %p')\n\tif gold >= 0:\n\t\tactivity += 'Earned ' + str(gold) + ' golds from the ' + str(request.form['building'])\n\telse:\n\t\tactivity += 'Entered a casino and lost ' + str(gold) + ' golds... Ouch...'\n\n\tactivity += '! (' + str(time) + ')'\n\tsession['gold'] += gold\n\tsession['activities'].insert(0, activity)\n\treturn redirect(url_for('ninga_gold_game'))\n\n@app.route('/applications/reset')\ndef reset():\n\tsession.pop('gold')\n\tsession.pop('activities')\n\treturn redirect(url_for('ninga_gold_game'))\n\n@app.route('/applications/play_music', methods = ['GET', 'POST'])\ndef play_music():\n\tform = PlayMusicForm()\n\tif request.method == 'POST':\n\t\taudio_url = form.audio_path.data\n\t\treturn render_template('play_music.html', title = 'Music', form = form, audio_url = audio_url)\n\treturn render_template('play_music.html', form = form)\n\n@app.route(\"/applications/common_passwords\", methods=['GET', 'POST'])\ndef common_passwords():\n\tdf = pd.read_html('https://en.wikipedia.org/wiki/List_of_the_most_common_passwords')\n\tdata = df[0][1:]\n\traw_data = data.values.tolist()\n\treturn render_template('common_passwords.html', raw_data = raw_data)\n\n@app.route(\"/applications/todo_list\")\ndef todo_list():\n\treturn render_template('todo_list.html', title = 'To-Do List')\n\n@app.route(\"/applications/tictactoe_game\")\ndef tictactoe_game():\n\treturn render_template('tictactoe_game.html', title = 'Tic-Tac-Toe Game')\n\n@app.route(\"/applications/towerofhanoi_game\")\ndef towerofhanoi_game():\n\treturn render_template('towerofhanoi_game.html', title = 'Tower-of-Hanoi Game')\n\n@app.route(\"/applications/game_2048\")\ndef game_2048():\n\treturn render_template('game_2048.html', title = '2048 Game')\n\n@app.route('/applications/love_calculator', methods = ['GET', 'POST'])\ndef love_calculator():\n\tform = LoveCalculatorForm()\n\tif request.method == 'POST':\n\t\tboy_name = form.boy_name.data\n\t\tgirl_name = form.girl_name.data\n\t\tscore = f\" Their chance of finding true love together is {100-(len(boy_name)*len(girl_name))-(random.randint(1,20))} %\"\n\t\treturn render_template('love_calculator.html', form = form, score = score)\n\treturn render_template('love_calculator.html', form = form)\n\n@app.route(\"/applications/snake_game\")\ndef snake_game():\n\treturn render_template('snake_game.html', title = '2048 Game')\n\n@app.route('/applications/zodiac_sign', methods = ['GET', 'POST'])\ndef zodiac_sign():\n\tform = ZodiacSignForm()\n\tif request.method == 'POST':\n\t\tdob = form.dob.data\n\t\tb_date = datetime.strptime(dob, '%d/%m/%Y')\n\t\tday, month, result = b_date.day, b_date.month, \"\"\n\t\tif int(month) == 1:\n\t\t\tresult = 'Capricorn (மகரம் )' if (day < 20) else 'Aquarius (கும்பம் )'\n\t\telif int(month) == 2:\n\t\t\tresult = 'Aquarius (கும்பம் )' if (day < 19) else 'Pisces (மீனம் )'\n\t\telif int(month) == 3:\n\t\t\tresult = 'Pisces (மீனம் )' if (day < 21) else 'Aries (மேஷம்)'\n\t\telif int(month) == 4:\n\t\t\tresult = 'Aries (மேஷம்)' if (day < 20) else 'Taurus (ரிஷபம்)'\n\t\telif int(month) == 5:\n\t\t\tresult = 'Taurus (ரிஷபம்)' if (day < 21) else 'Gemini (மிதுனம் )'\n\t\telif int(month) == 6:\n\t\t\tresult = 'Gemini (மிதுனம் )' if (day < 21) else 'Cancer (கடகம் )'\n\t\telif int(month) == 7:\n\t\t\tresult = 'Cancer (கடகம் )' if (day < 23) else 'Leo (சிம்மம் )'\n\t\telif int(month) == 8:\n\t\t\tresult = 'Leo (சிம்மம் )' if (day < 23) else 'Virgo (கன்னி )'\n\t\telif int(month) == 9:\n\t\t\tresult = 'Virgo (கன்னி )' if (day < 23) else 'Libra (துலாம் )'\n\t\telif int(month) == 10:\n\t\t\tresult = 'Libra (துலாம் )' if (day < 23) else 'Scorpio (விருச்சிகம் )'\n\t\telif int(month) == 11:\n\t\t\tresult = 'scorpio (விருச்சிகம் )' if (day < 22) else 'Sagittarius (தனுசு )'\n\t\telif int(month) == 12:\n\t\t\tresult = 'Sagittarius (தனுசு )' if (day < 22) else 'Capricorn (மகரம் )'\n\t\telif int(month)>12 or day>31:\n\t\t\tresult = 'Invalid Date-of-Birth, Please try again!'\n\t\treturn render_template('zodiac_sign.html', form = form, result = result)\n\treturn render_template('zodiac_sign.html', form = form)\n\n@app.route(\"/applications/emoji_symbols\")\ndef emoji_symbols():\n\traw_data = []\n\traw_data.append(emoji.emojize(\" Thumps Up -> :thumbs_up:\"))\n\traw_data.append(emoji.emojize(\" Two hearts -> :two_hearts: \"))\n\traw_data.append(emoji.emojize(\" Sparkles -> :sparkles: \"))\n\traw_data.append(emoji.emojize(\" Musical Notes -> :musical_note: \"))\n\traw_data.append(emoji.emojize(\" Family -> :family: \"))\n\traw_data.append(emoji.emojize(\" Baby -> :baby: \"))\n\treturn render_template('emoji_symbols.html', title = 'Emojis', raw_data = raw_data)\n\n@app.route(\"/applications/web_camera\")\ndef web_camera():\n\treturn render_template('web_camera.html', title = 'WebCam')\n\n@app.route(\"/applications/rock_paper_scissor_game\")\ndef rock_paper_scissor_game():\n\treturn render_template('rock_paper_scissor_game.html')\n\n@app.route(\"/applications/python_online_compiler\")\ndef python_online_compiler():\n\tonline_compiler_link = 'https://console.python.org/python-dot-org-console/'\n\treturn render_template('python_online_compiler.html', raw_link = online_compiler_link)\n\n\n# Start the server with the 'run()' method.\nif __name__ == '__main__':\n\tapp.run(debug = True)\n","sub_path":"Cloud/PythonAnywhere/Flask/AkashJeezApp/flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":31277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"394849835","text":"import sys\nimport os\nfrom common import connect\n\nsys.path.append('../psycho-mongo')\n\nimport psycho_mongo as pm\n\ndb = \"longitudinal\"\ntable = \"addblock\"\nstats_folder = 'christian_addblock_comp'\n\ncons = ['con_0015.img']\n\nruns = pm.Connect(db).setTable(\"%s_runs\" % table)\nsummary = pm.Connect(db).setTable(\"summary\")\n\nsids = runs.find().distinct('sid')\n\nf = open('sub_age_%s.csv' % table, 'w')\n\nsub_list = []\n\nf.write('sub, age, fsiq, num_ops, math_reas, visit, quad_age, contrast,scan_id\\n')\n\nfor sid in sids:\n\tvisits = runs.find({'sid':sid, 'usable':1}).distinct('visit')\n\tif len(visits) > 1:\n\t\tsub_list.append(sid)\n\t\tvisits.sort()\n\t\tfor visit in visits:\n\t\t\trow = runs.find_one({'sid':sid, 'visit':visit})\n\t\t\tcp = row['base_path'].replace('musk1', 'musk2').replace('/addition_block', '')\n\t\t\tcp = os.path.join(cp, 'stats_spm8', stats_folder)\n\n\t\t\tsumm = summary.find_one({'sid':sid, 'visit_id':visits[0]})\n\n\t\t\tif summ.has_key('fsiq_fixed'):\n\t\t\t\tfsiq = summ['fsiq_fixed']\n\t\t\telse:\n\t\t\t\tfsiq = \"NA\"\n\n\t\t\tif summ.has_key('num_ops_fixed'):\n\t\t\t\tnum_ops = summ['num_ops_fixed']\n\t\t\telse:\n\t\t\t\tnum_ops = \"NA\"\n\n\t\t\tif summ.has_key('math_reas_fixed'):\n\t\t\t\tmath_reas = summ['math_reas_fixed']\n\t\t\telse:\n\t\t\t\tmath_reas = \"NA\"\n\n\t\t\tfailed = False\n\n\t\t\t#get the list of contrasts on the server\t\t\n\t\t\ttry:\n\t\t\t\tmyList = os.listdir(cp)\n\t\t\texcept:\n\t\t\t\tfailed = True\n\n\t\t\t#if the directory exists...\n\t\t\tif not failed:\n\t\t\t\t#check for the paths\n\t\t\t\tfor con in cons:\n\t\t\t\t\tif con not in myList:\n\t\t\t\t\t\tfailed = True\n\n\t\t\tif not failed:\n\t\t\t\tcon_path = os.path.join(cp, cons[0])\t\t\n\t\t\telse:\n\t\t\t\tcon_path = \"\"\n\n\t\t\tf.write('%s,%s,%s,%s,%s,%s,%0.2f,%s,%s\\n' % (sid, row['age'], fsiq, num_ops, math_reas, visit, row['age'] ** 2, con_path, row['scan_id']))\n\nf.close()\n\n\n","sub_path":"check_contrasts.py","file_name":"check_contrasts.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"603662966","text":"import sys\n\nimport jenkins_jobs.cli.subcommand.base as base\n\n\nclass TestSubCommand(base.BaseSubCommand):\n def parse_args(self, subparser):\n test = subparser.add_parser('test')\n\n self.parse_option_recursive_exclude(test)\n\n test.add_argument(\n 'path',\n help='''colon-separated list of paths to YAML files or\n directories''',\n nargs='?',\n default=sys.stdin)\n test.add_argument(\n '-p',\n dest='plugins_info_path',\n default=None,\n help='path to plugin info YAML file')\n test.add_argument(\n '-o',\n dest='output_dir',\n default=sys.stdout,\n help='path to output XML')\n test.add_argument(\n 'name',\n help='name(s) of job(s)', nargs='*')\n\n def execute(self, config):\n raise NotImplementedError\n","sub_path":"jenkins_jobs/cli/subcommand/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"575116416","text":"from pyspark.sql.types import *\n\nfrom tests.conftest import SparkConfiguration\nimport main.examples as T\nfrom quinn.extensions import create_df\nimport chispa\n\n\nclass TestTransformations(object):\n\n def test_with_greeting(self, spark):\n source_data = [\n (\"jose\", 1),\n (\"li\", 2)\n ]\n source_df = spark.spark_session.createDataFrame(\n source_data,\n [\"name\", \"age\"]\n )\n actual_df = T.with_greeting(source_df)\n expected_data = [\n (\"jose\", 1, \"hello!\"),\n (\"li\", 2, \"hello!\")\n ]\n expected_df = spark.spark_session.createDataFrame(\n expected_data,\n [\"name\", \"age\", \"greeting\"]\n )\n chispa.assert_df_equality(actual_df, expected_df, ignore_nullable=True)\n\n def test_with_greeting2(self, spark):\n source_data = [\n (\"jose\", 1),\n (\"li\", 2)\n ]\n source_df = spark.spark_session.createDataFrame(\n source_data,\n [\"name\", \"age\"]\n )\n actual_df = source_df.transform(T.with_greeting2(\"hi\"))\n expected_data = [\n (\"jose\", 1, \"hi\"),\n (\"li\", 2, \"hi\")\n ]\n expected_df = spark.spark_session.createDataFrame(\n expected_data,\n [\"name\", \"age\", \"greeting\"]\n )\n chispa.assert_df_equality(actual_df, expected_df, ignore_nullable=True)\n\n def test_with_clean_first_name(self, spark):\n source_df = spark.spark_session.create_df(\n [(\"jo&&se\", \"a\"), (\"##li\", \"b\"), (\"!!sam**\", \"c\")],\n [(\"first_name\", StringType(), True), (\"letter\", StringType(), True)]\n )\n actual_df = T.with_clean_first_name(source_df)\n expected_df = spark.spark_session.create_df(\n [(\"jo&&se\", \"a\", \"jose\"), (\"##li\", \"b\", \"li\"), (\"!!sam**\", \"c\", \"sam\")],\n [(\"first_name\", StringType(), True), (\"letter\", StringType(), True),\n (\"clean_first_name\", StringType(), True)]\n )\n chispa.assert_df_equality(actual_df, expected_df, ignore_nullable=True)\n","sub_path":"tests/test_transformations.py","file_name":"test_transformations.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469452509","text":"#!/bin/python2\n\nimport os,commands,sys,socket\n\ns=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\nos.system('ssh root@192.168.10.121 -X libreoffice4.3')\n\nexecfile('saas.py')\n\n","sub_path":"office.py","file_name":"office.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"10981805","text":"# send\n\nimport pika\n\n\n# Establish connection\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\n# Recipient-queue, in this case hello-queue\nchannel.queue_declare(queue='hello')\n\n# Specify which queue the message should go to, in this case \"hello\"\nchannel.basic_publish(exchange='', routing_key='hello', body='Hello World!')\nprint(\" [x] Sent 'Hello World!'\")\n\n# closing connection\nconnection.close()","sub_path":"expass7/Hello_World/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"503514942","text":"class Solution:\n def maxSlidingWindow(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n q, result = [], []\n\n for idx, n in enumerate(nums):\n while q and nums[q[-1]] < n:\n q.pop()\n q.append(idx)\n if q[0] == idx - k:\n q.pop(0)\n if idx >= k - 1:\n result += nums[q[0]],\n return result\n\ns = Solution()\nnums = [1,3,-1,-3,5,3,6,7]\nk = 4\nprint(s.maxSlidingWindow(nums, k))","sub_path":"python/239_slidingWindowMax.py","file_name":"239_slidingWindowMax.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"167452678","text":"import os\nfrom amqp_connect import mq_config, connect_mq, create_channel, consume_messages\nfrom my_log import MyLogging as log\n\nl = log.get_logger('amqp-receiver')\nl.setLevel('INFO')\n\n\ndef message_handler(method_frame, properties, body):\n l.info(f'method_frame: {method_frame}')\n l.info(f'properties: {properties}')\n l.info(f'body: {body}')\n\ndef main():\n path = os.path.join(os.path.dirname(__file__),\n '..', 'data', 'mq_config.ini')\n params = mq_config(path, 'hello')\n with connect_mq(params) as connection:\n with create_channel(connection, 'hello') as channel:\n l.info('starting consume message on {}'.format(params.getQueueName()))\n consume_messages(channel, params.getQueueName(), message_handler)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/amqp-receiver.py","file_name":"amqp-receiver.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"596395436","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.distributions import Categorical\n\n\nclass PGModel(nn.Module):\n def __init__(self, observation_space, action_space, config):\n super().__init__()\n num_inputs = observation_space.shape[0]\n num_outputs = action_space.n\n\n self.fc1 = nn.Linear(num_inputs, 256)\n self.fc2 = nn.Linear(256, 256)\n\n self.logits = nn.Linear(256, num_outputs)\n self.value = nn.Linear(256, 1)\n\n def forward(self, obs):\n encoding = self.fc1(obs)\n encoding = self.fc2(encoding)\n\n logits = self.logits(encoding)\n value = self.value(encoding)\n\n return [logits, value]\n\n\nclass PGLoss(nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, *args):\n obs, actions, advs = args\n\n logits, values = self.model(obs)\n action_dist = Categorical(logits=logits)\n\n actions = torch.tensor(actions, dtype=torch.float32)\n log_probs = action_dist.log_prob(actions)\n\n policy_loss = torch.mean(-log_probs * advs)\n value_loss = F.smooth_l1_loss(torch.squeeze(values), advs)\n\n return policy_loss + value_loss\n","sub_path":"src/lib/models/pg_model.py","file_name":"pg_model.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261664901","text":"# -*- coding: utf-8 -*-\nimport nfc\nimport threading\nimport time\nimport sys\n\nfrag = 'False'\n\ndef read(tag):\n\ttag = str(tag) #変数tsgを文字列型に変換\n\tid_check = ('ID=' in tag) #対応カードかどうか確認\n\tif id_check == True: #対応カードなら実行\n\t\tidm = tag.find('ID=') #idのインデックスを検索\n\t\t#idm += 3 #id本体の開始インデックスを指定\n\t\tidm_end = idm + 16 + 3 #idの終了インデックスを指定\n\t\tprint(tag[idm:idm_end]) #idを出力\n\telse: #非対応カードの場合実行\n\t\tprint('Unsupported_card') #エラーメッセージを出力\n\tglobal frag\n\tfrag = 'True'\n\n\treturn\n\ndef read_start():\n clf = nfc.ContactlessFrontend('usb') #nfcpyドキュメントを参照\n tag = clf.connect(rdwr={'on-connect': read }) #nfcpyドキュメントを参照\n\ndef time_out():\n\ttime.sleep(5) #待機時間(タイムアウト)\n\tprint(\"Time_out\")\n\tglobal frag\n\tfrag = 'True'\n\treturn\n\n\nread_thread = threading.Thread(target=read_start)\nread_thread.setDaemon(True)\nread_thread.start()\n\ntime_out_thread = threading.Thread(target=time_out)\ntime_out_thread.setDaemon(True)\ntime_out_thread.start()\n\nwhile frag == 'False':\n\ttime.sleep(0.5)\nelse:\n\tsys.exit()\n","sub_path":"read-id.py","file_name":"read-id.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"99829911","text":"from operator import itemgetter\nimport math\n\n\nclass Recommender:\n def __init__(self, ratings, movie_filter=9, user_filter=3):\n self.ratings = ratings\n self.movie_filter = movie_filter\n self.user_filter = user_filter\n\n @property\n def movie_avgs(self):\n averages = []\n for movie in self.ratings.movie_table:\n averages.append([movie.ID, self.ratings.ratings_avg(movie.ID)])\n return averages\n\n def double_sort(self, a_list):\n double_sorted = sorted(a_list, key=itemgetter(0))\n double_sorted = sorted(double_sorted, key=itemgetter(1), reverse=True)\n return double_sorted\n\n @property\n def top(self):\n filtered_averages = []\n for movie in self.movie_avgs:\n if len(self.ratings.get_ratings(movie[0])) >= self.movie_filter:\n movie[0] = self.ratings.movie_title(movie[0])\n movie[1] = round(movie[1], 2)\n filtered_averages.append(movie)\n top_movies = self.double_sort(filtered_averages)\n\n return top_movies\n\n def topx(self, cut_off=10):\n return self.top[:cut_off]\n\n def bottomx(self, cut_off=10):\n return self.top[-cut_off:]\n\n def topx_for_user(self, user, cut_off=10):\n user_movies = [rating[0]\n for rating in self.ratings.get_user_ratings(user)]\n\n top_unseen_movies = [movie for movie in self.top\n if movie[0] not in user_movies]\n\n return top_unseen_movies[:cut_off]\n\n def user_match(self, user1, user2):\n user1_ratings = user1\n user2_ratings = user2\n\n comparison = {rating[0] for rating in user1_ratings}\n user1_ratings = [rating[1] for rating in user1_ratings]\n user2_ratings = [rating[1] for rating in user2_ratings if rating[0] in comparison]\n\n shared_ratings = zip(user1_ratings, user2_ratings)\n\n return list(shared_ratings)\n\n def pearson_score(self, user1, user2):\n if len(self.user_match(user1, user2)) < self.user_filter:\n return 0\n\n user1_ratings, user2_ratings = zip(*self.user_match(user1, user2))\n\n user1_ratings = list(user1_ratings)\n user2_ratings = list(user2_ratings)\n\n covariance = sum([(x - (sum(user1_ratings)/len(user1_ratings))) *\n (y - (sum(user2_ratings)/len(user2_ratings)))\n for x, y in zip(user1_ratings, user2_ratings)])\n\n user1_deviation = math.sqrt(\n sum([(x - (sum(user1_ratings)/len(user1_ratings)))**2\n for x in user1_ratings]))\n\n user2_deviation = math.sqrt(\n sum([(y - (sum(user2_ratings)/len(user2_ratings)))**2\n for y in user2_ratings]))\n\n deviation_product = user1_deviation * user2_deviation\n\n if deviation_product == 0:\n return 0\n\n pearson_coefficient = covariance/deviation_product\n return pearson_coefficient\n\n def euclidean_distance(self, user1, user2):\n if len(self.user_match(user1, user2)) < self.user_filter:\n return 0\n\n user1_ratings, user2_ratings = zip(*self.user_match(user1, user2))\n\n user1_ratings = list(user1_ratings)\n user2_ratings = list(user2_ratings)\n\n differences = [user1_ratings[idx] - user2_ratings[idx]\n for idx in range(len(user1_ratings))]\n squares = [diff ** 2 for diff in differences]\n sum_of_squares = sum(squares)\n\n return 1 / (1 + math.sqrt(sum_of_squares))\n\n def similarity_score(self, user, user_list, function):\n users = user_list\n similar_users = []\n recommendations = []\n user_movies = [rating[0] for rating in self.ratings.get_user_ratings(user[0])]\n for userID in users:\n if userID[0] != user[0]:\n similarity = function(user[1], userID[1])\n similar_users.append([userID[0], similarity])\n similar_users = sorted(similar_users, key=itemgetter(1), reverse=True)\n\n for userID in similar_users[:10]:\n movies = self.ratings.get_user_ratings(userID[0])\n for movie in movies:\n if movie[0] not in [rating for rating in user_movies]:\n recommendations.append([movie[0], (userID[1] * movie[1])])\n if len(recommendations) == 0:\n return 0\n return self.double_sort(recommendations)\n\n def user_recommendation(self, user, cut_off=10, pearson=True):\n users = [[key, self.ratings.user_table[key]] for key in self.ratings.user_table]\n input_user = [user, self.ratings.user_table[user]]\n if pearson:\n function = self.pearson_score\n else:\n function = self.euclidean_distance\n ordered_recommendations = \\\n self.similarity_score(input_user, users, function)\n movie_recommendations = []\n for movie in ordered_recommendations:\n if movie[0] not in movie_recommendations:\n movie_recommendations.append(movie[0])\n\n return movie_recommendations[:cut_off]\n","sub_path":"recommendations.py","file_name":"recommendations.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"572734743","text":"#課題2_配布データ.csv\n\nimport os\nimport matplotlib.pyplot as plt\n\n'''\npath = os.getcwd()\npath = os.pardir\n'''\nprint(os.getcwd())\nchoice_address = \"/Users/etojin/anaconda3/bin/conda/nagai labo_task/卒論生_共通課題/2020\"\nprocessing_file = choice_address\n\nwant_files_name=[]\ninput_name=input(\"名前を入力してください:\")\nprint(input_name)\nfor search_folder, search_subfolders, search_files in os.walk(processing_file):\n for search_file in search_files:\n if search_file.count(input_name)>0:\n want_files_name.append(os.path.join(search_folder,search_file))\nprint(want_files_name)\nprint(len(want_files_name))\n\nI17_df = []\nI18_df = []\nI19_df = []\n\nfor want_file_name in want_files_name:\n with open(want_file_name. replace('\\\\','\\\\\\\\'), encoding = \"shift-jis\") as f:\n line = f.read().split('\\n')\n for reader in line:\n if reader == line[0]:\n header = reader.split(',')\n else:\n row = reader.split(',')\n if not row[0]==\"\":\n I17_df.append(int(row[0]))\n if not row[1]==\"\":\n I18_df.append(int(row[1]))\n if not row[2]==\"\":\n I19_df.append(int(row[2]))\nprint(I18_df)\n\nI17_df.sort()\nI18_df.sort()\nI19_df.sort()\nprint(I17_df)\nprint(I18_df)\nprint(I19_df)\n#print(I17_df[int(int(len(I17_df)) / 4)+10])\n'''\nif int(len(I17_df)) % 2 == 0:\n print(I17_df[int(int(len(I17_df)) / 2)])\n\nelse:\n print(int((I17_df[int(int(len(I17_df)) / 2)+1]+I17_df[int(int(len(I17_df)) / 2)-1])/2))\n'''\n\n\nplot = (I17_df,I18_df,I19_df)\nfig, ax = plt.subplots()\n\nbp = ax.boxplot(plot)\nax.set_xticklabels(['I17_df', 'I18_df','I19_df'])\n'''\nplt.title('Titke')\nplt.xlabel('Xlabel')\nplt.ylabel('Ylabel')\n# Y軸のメモリのrange\nplt.ylim([1900,5700])\nplt.grid()\n'''\n# 描画\nplt.show()\n#print(int(int(len(I17_df)) / 4))\n","sub_path":"nagai labo_task/卒論生_共通課題/2020/栄藤仁/試作.py","file_name":"試作.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"553440974","text":"# -*- coding: utf-8 -*\nimport logging\nimport os\nimport signal\nimport sys\n\nimport tornado\nfrom tornado.ioloop import IOLoop\nfrom tornado.options import define, options\nfrom tornado.web import url\n\nimport core\nimport ir_fit\nfrom core import my_redis,oss,config\nfrom core.config import Config\nfrom handler import app,order_batch,image_retrieval,relevance\n\n\ndef make_app():\n settings = {\n \"debug\": options.debug,\n \"app_name\": Config().name,\n \"compress_response\": True,\n \"template_path\" : 'templates', #模板文件目录,想要Tornado能够正确的找到html文件,需要在 Application 中指定文件的位置\n \"static_path\" : 'static' #静态文件目录,可用于用于访问js,css,图片之类的添加此配置之后,tornado就能自己找到静态文件\n }\n\n handlers = [\n url(r\"/test\", app.TestHandler, name='app.test'),\n url(r\"/algorithm/warehouse/orderbatchan\", order_batch.OrderBatchAnHandler, name='OrderBatchAn'),\n url(r\"/algorithm/warehouse/orderbatchsn\", order_batch.OrderBatchSnHandler, name='OrderBatchSn'),\n url(r\"/algorithm/order/relevance\", relevance.RelevanceHandler, name='Relevance'),\n url(r\"/algorithm/imageretrieval/test\",image_retrieval.IndexHandler, name='IrTest'),\n url(r\"/algorithm/imageretrieval/imageretrieval\",image_retrieval.ImageRetrivalHandler, name='ImageRetrieval'),\n url(r\"/algorithm/imageretrieval/fit\",image_retrieval.IrFitHandler, name='IrFit'),\n url(r\"/algorithm/imageretrieval/label\",image_retrieval.IrLabelHandler, name='IrLabel'),\n url(r\"/algorithm/imageretrieval/fitstatus\",image_retrieval.IrFitStatusHandler, name='IrFitStatus'),\n url(r\"/.*\", app.NotFoundHandler, name='error404')\n ]\n\n return tornado.web.Application(handlers, **settings)\n\n\ndef signal_shutdown_handler(signal, frame):\n logging.critical('application exited')\n sys.exit(0)\n\n\ndef main():\n init_options()\n tornado.options.parse_command_line()\n\n if options.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n logging.getLogger().setLevel(logging.INFO)\n\n logging.info('application started')\n\n # set signal handler\n signal.signal(signal.SIGINT, signal_shutdown_handler)\n signal.signal(signal.SIGTERM, signal_shutdown_handler)\n\n app = make_app()\n server = tornado.httpserver.HTTPServer(app, xheaders=True,max_buffer_size=1048576000) # 最大文件上传尺寸1000M\n server.bind(options.port)\n server.start(options.thread)\n # server.listen(options.port)\n logging.info(\"{} Server started on port: {}\".format(Config().name, options.port))\n IOLoop.current().start()\n\n\ndef init_options():\n options.define(\"debug\", default=Config().debug)\n options.define(\"thread\", default=Config().thread)\n options.define('port', default=Config().port,\n help='run on the given port', type=int)\n\n\nif __name__ == '__main__':\n my_redis.init()\n oss.init()\n ir_fit.main()\n main()\n ","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"381755502","text":"import os\nimport shutil\nimport sys\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '.')))\n\n# import imp\n# imp.load_source( \"lumapi\", \"/Applications/Lumerical 2020a.app/Contents/API/Python/lumapi.py\" )\nimport lumapi\n\nimport functools\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\nis_lumerical_version_2020a = False\n\ndef permittivity_to_index( permittivity ):\n\teps_real = np.real( permittivity )\n\teps_imag = np.imag( permittivity )\n\n\teps_mag = np.sqrt( eps_real**2 + eps_imag**2 )\n\n\tn = np.sqrt( ( eps_mag + eps_real ) / 2. )\n\tkappa = np.sqrt( ( eps_mag - eps_real ) / 2. )\n\n\treturn ( n + 1j * kappa )\n\n\ndef get_non_struct_data(monitor_name, monitor_field):\n\tlumerical_data_name = \"monitor_data_\" + monitor_name + \"_\" + monitor_field\n\tdata_transfer_filename = projects_directory_location + \"/data_transfer_\" + monitor_name + \"_\" + monitor_field\n\n\tcommand_read_monitor = lumerical_data_name + \" = getresult(\\'\" + monitor_name + \"\\', \\'\" + monitor_field + \"\\');\"\n\tcommand_save_data_to_file = \"matlabsave(\\'\" + data_transfer_filename + \"\\', \" + lumerical_data_name + \");\"\n\n\tlumapi.evalScript(fdtd_hook.handle, command_read_monitor)\n\n\tlumapi.evalScript(fdtd_hook.handle, command_save_data_to_file)\n\tmonitor_data = {}\n\tload_file = h5py.File(data_transfer_filename + \".mat\", 'r')\n\n\tmonitor_data = np.array(load_file[lumerical_data_name])\n\n\treturn monitor_data['real']\n\n#\n# Consolidate the data transfer functionality for getting data from Lumerical FDTD process to\n# python process. This is much faster than going through Lumerical's interop library\n#\ndef get_monitor_data(monitor_name, monitor_field):\n\tlumerical_data_name = \"monitor_data_\" + monitor_name + \"_\" + monitor_field\n\textracted_data_name = lumerical_data_name + \"_data\"\n\tdata_transfer_filename = projects_directory_location + \"/data_transfer_\" + monitor_name + \"_\" + monitor_field\n\n\tcommand_read_monitor = lumerical_data_name + \" = getresult(\\'\" + monitor_name + \"\\', \\'\" + monitor_field + \"\\');\"\n\tcommand_extract_data = extracted_data_name + \" = \" + lumerical_data_name + \".\" + monitor_field + \";\"\n\tcommand_save_data_to_file = \"matlabsave(\\'\" + data_transfer_filename + \"\\', \" + extracted_data_name + \");\"\n\n\tlumapi.evalScript(fdtd_hook.handle, command_read_monitor)\n\tlumapi.evalScript(fdtd_hook.handle, command_extract_data)\n\tlumapi.evalScript(fdtd_hook.handle, command_save_data_to_file)\n\n\tmonitor_data = {}\n\tload_file = h5py.File(data_transfer_filename + \".mat\", 'r')\n\n\tmonitor_data = np.array(load_file[extracted_data_name])\n\n\treturn monitor_data\n\ndef get_complex_monitor_data(monitor_name, monitor_field):\n\tdata = get_monitor_data(monitor_name, monitor_field)\n\treturn (data['real'] + np.complex(0, 1) * data['imag'])\n\n\n\n#\n# Create FDTD hook\n#\nfdtd_hook = lumapi.FDTD()\n\npython_src_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))\nprojects_directory_location = os.path.abspath(os.path.join(os.path.dirname(__file__), '../projects/'))\n\nif not os.path.isdir(projects_directory_location):\n\tos.mkdir(projects_directory_location)\n\nprojects_directory_location += \"/optimize_absorptive_switch_states_single_freq_v2\"\n\nif not os.path.isdir(projects_directory_location):\n\tos.mkdir(projects_directory_location)\n\nlog_file = open(projects_directory_location + \"/log.txt\", 'w')\nlog_file.write(\"Log\\n\")\nlog_file.close()\n\nfdtd_hook.newproject()\nfdtd_hook.save(projects_directory_location + \"/optimization\")\n\nfdtd_region_size_lateral_um = 5\nfdtd_region_minimum_vertical_um = -2.5\nfdtd_region_maximum_vertical_um = 2.5\n\nmesh_size_um = 0.005\n\nfdtd_region_minimum_lateral_voxels = 1 + int ( fdtd_region_size_lateral_um / mesh_size_um )\nfdtd_region_minimum_vertical_voxels = 1 + int( ( fdtd_region_maximum_vertical_um - fdtd_region_minimum_vertical_um ) / mesh_size_um )\n\n\n#\n# Set up the FDTD region and mesh\n#\nfdtd = fdtd_hook.addfdtd()\nfdtd['dimension'] = '2D'\nfdtd['x span'] = fdtd_region_size_lateral_um * 1e-6\nfdtd['y max'] = fdtd_region_maximum_vertical_um * 1e-6\nfdtd['y min'] = fdtd_region_minimum_vertical_um * 1e-6\nfdtd['mesh type'] = 'uniform'\nfdtd['define x mesh by'] = 'number of mesh cells'\nfdtd['define y mesh by'] = 'number of mesh cells'\n# Setting the x min bc to Bloch will automatically set the x max bc to Bloch and lock it\nfdtd['x min bc'] = 'PML'\nfdtd['x max bc'] = 'PML'\nfdtd['y min bc'] = 'PML'\nfdtd['y max bc'] = 'PML'\n# fdtd['dt stability factor'] = 0.25\nfdtd['mesh cells x'] = fdtd_region_minimum_lateral_voxels\nfdtd['mesh cells y'] = fdtd_region_minimum_vertical_voxels\nfdtd['simulation time'] = 100000 * 1e-15\nfdtd['background index'] = 1.0\n\nlambda_min_um = 0.4\nlambda_max_um = 0.7\nnum_design_frequency_points = 20\nhalf_frequency_point = int( 0.5 * num_design_frequency_points )\n\nlambda_values_um = np.linspace( lambda_min_um, lambda_max_um, num_design_frequency_points )\n\ndevice_width_um = 2\ndevice_height_um = 1\ndevice_min_um = 0\ndevice_max_um = device_min_um + device_height_um\n\n\n#\n# General polarized source information\n#\nxy_phi_rotations = [0, 90]\nxy_names = ['x', 'y']\n\n\nforward_src = fdtd_hook.addplane()\nforward_src['name'] = 'forward_src'\nforward_src['plane wave type'] = 'Diffracting'\nforward_src['polarization angle'] = 90\nforward_src['direction'] = 'Backward'\nforward_src['x span'] = 1.1 * device_width_um * 1e-6\nforward_src['y'] = ( device_max_um + 0.5 * device_height_um ) * 1e-6\nforward_src['wavelength start'] = lambda_min_um * 1e-6\nforward_src['wavelength stop'] = lambda_max_um * 1e-6\n\n\ntbox_y_max_um = 2.0\ntbox_y_min_um = -1.0\ntbox_x_max_um = 1.5\ntbox_x_min_um = -1.5\n\nadjoint_monitor_top = fdtd_hook.addpower()\nadjoint_monitor_top['name'] = 'adjoint_monitor_top'\nadjoint_monitor_top['monitor type'] = 'Linear X'\nadjoint_monitor_top['x span'] = 3 * 1e-6\nadjoint_monitor_top['y'] = tbox_y_max_um * 1e-6\nadjoint_monitor_top['override global monitor settings'] = 1\nif is_lumerical_version_2020a:\n\tadjoint_monitor_top['use wavelength spacing'] = 1\nelse:\n\tadjoint_monitor_top['use linear wavelength spacing'] = 1\n\nadjoint_monitor_top['use source limits'] = 0\nadjoint_monitor_top['minimum wavelength'] = lambda_min_um * 1e-6\nadjoint_monitor_top['maximum wavelength'] = lambda_max_um * 1e-6\nadjoint_monitor_top['frequency points'] = num_design_frequency_points\nadjoint_monitor_top['output Hx'] = 1\nadjoint_monitor_top['output Hy'] = 1\nadjoint_monitor_top['output Hz'] = 1\n\n\ntop_adjoint_source = fdtd_hook.addimportedsource()\ntop_adjoint_source['name'] = 'top_adjoint_source'\ntop_adjoint_source['injection axis'] = 'y-axis'\ntop_adjoint_source['direction'] = 'Backward'\ntop_adjoint_source['wavelength start'] = lambda_min_um * 1e-6\ntop_adjoint_source['wavelength stop'] = lambda_max_um * 1e-6\ntop_adjoint_source['x span'] = adjoint_monitor_top['x span']\ntop_adjoint_source['y'] = adjoint_monitor_top['y']\n\n\ntransmission_box_top = fdtd_hook.addpower()\ntransmission_box_top['name'] = 'transmission_box_top'\ntransmission_box_top['monitor type'] = 'Linear X'\ntransmission_box_top['x span'] = 3 * 1e-6\ntransmission_box_top['y'] = tbox_y_max_um * 1e-6\ntransmission_box_top['override global monitor settings'] = 1\nif is_lumerical_version_2020a:\n\ttransmission_box_top['use wavelength spacing'] = 1\nelse:\n\ttransmission_box_top['use linear wavelength spacing'] = 1\n\ntransmission_box_top['use source limits'] = 0\ntransmission_box_top['minimum wavelength'] = lambda_min_um * 1e-6\ntransmission_box_top['maximum wavelength'] = lambda_max_um * 1e-6\ntransmission_box_top['frequency points'] = num_design_frequency_points\ntransmission_box_top['output Hx'] = 1\ntransmission_box_top['output Hy'] = 1\ntransmission_box_top['output Hz'] = 1\n\n\n\ndef compute_transmission_top( wavelength_indexes ):\n\tget_T = get_monitor_data( transmission_box_top[ 'name' ], 'T' )\n\tget_T *= 1.0\n\tget_T = get_T[ 0 ]\n\n\tselect_data = get_T[ wavelength_indexes[ 0 ] : wavelength_indexes[ 1 ] ]\n\ttotal = np.mean( select_data )\n\n\treturn total\n\n\n#\n# Disable all sources in the simulation, so that we can selectively turn single sources on at a time\n#\ndef disable_all_sources():\n\tfdtd_hook.switchtolayout()\n\n\ttop_adjoint_source.enabled = 0\n\tforward_src.enabled = 0\n\n\n\n#\n# Set up the volumetric electric field monitor inside the design region. We will need this compute\n# the adjoint gradient\n#\n\ndesign_efield_monitor = fdtd_hook.addprofile()\ndesign_efield_monitor['name'] = 'design_efield_monitor'\ndesign_efield_monitor['x span'] = device_width_um * 1e-6\ndesign_efield_monitor['y min'] = device_min_um * 1e-6\ndesign_efield_monitor['y max'] = device_max_um * 1e-6\ndesign_efield_monitor['override global monitor settings'] = 1\nif is_lumerical_version_2020a:\n\tdesign_efield_monitor['use wavelength spacing'] = 1\nelse:\n\tdesign_efield_monitor['use linear wavelength spacing'] = 1\ndesign_efield_monitor['use source limits'] = 0\ndesign_efield_monitor['minimum wavelength'] = lambda_min_um * 1e-6\ndesign_efield_monitor['maximum wavelength'] = lambda_max_um * 1e-6\ndesign_efield_monitor['frequency points'] = num_design_frequency_points\ndesign_efield_monitor['output Hx'] = 0\ndesign_efield_monitor['output Hy'] = 0\ndesign_efield_monitor['output Hz'] = 0\n\ndesign_index_monitor = fdtd_hook.addindex()\ndesign_index_monitor['name'] = 'design_index_monitor'\ndesign_index_monitor['x span'] = device_width_um * 1e-6\ndesign_index_monitor['y min'] = device_min_um * 1e-6\ndesign_index_monitor['y max'] = device_max_um * 1e-6\n\n\n\n#\n# Add device region and create device permittivity\n#\n\ndevice_import = fdtd_hook.addimport()\ndevice_import['name'] = 'device_import'\ndevice_import['x span'] = device_width_um * 1e-6\ndevice_import['y min'] = device_min_um * 1e-6\ndevice_import['y max'] = device_max_um * 1e-6\ndevice_import['z min'] = -0.51 * 1e-6\ndevice_import['z max'] = 0.51 * 1e-6\ndevice_import[\"override mesh order from material database\"] = 1\ndevice_import['mesh order'] = 1\n\n\ndevice_width_voxels = 2 + int( device_width_um / mesh_size_um )\ndevice_height_voxels = 2 + int( device_height_um / mesh_size_um )\n\npermittivity_max = 2.5\npermittivity_min = 1.5\npermittivity_mid = 0.5 * ( permittivity_min + permittivity_max )\n\ndevice_permittivity = permittivity_mid * np.ones( ( device_width_voxels, device_height_voxels, 2 ) )\n\ndevice_x_range = 1e-6 * np.linspace( -0.5 * device_width_um, 0.5 * device_width_um, device_width_voxels )\ndevice_y_range = 1e-6 * np.linspace( device_min_um, device_max_um, device_height_voxels )\ndevice_z_range = 1e-6 * np.linspace( -0.51, 0.51, 2 )\n\ncavity_height_um = 0.2\ncavity_index = 1.5\ncavity_max_um = device_min_um\ncavity_min_um = cavity_max_um - cavity_height_um\n\ncavity = fdtd_hook.addrect()\ncavity['name'] = 'cavity'\ncavity['x span'] = device_width_um * 1e-6\ncavity['y max'] = cavity_max_um * 1e-6\ncavity['y min'] = cavity_min_um * 1e-6\ncavity['z min'] = -0.51 * 1e-6\ncavity['z max'] = 0.51 * 1e-6\ncavity['index'] = cavity_index\n\n\n# gsst_n_states = [ 3.0, 4.5 ]\n# gsst_k_states = [ 0.1, 0.25 ]\n\ngsst_n_states = [ 4.2, 5.75 ]\ngsst_k_states = [ 2.5, 3.75 ]\n\n\n# note: may want an override mesh here around this interface because it is small and high index\ngsst_height_um = 3 * mesh_size_um\n\nnum_gsst_states = len( gsst_n_states )\n\ngsst_max_um = cavity_min_um\ngsst_min_um = cavity_min_um - gsst_height_um\n\ngsst_indexes = [ ( gsst_n_states[ idx ] + 1j * gsst_k_states[ idx ] ) * np.ones( ( 2, 2, 2 ), dtype=np.complex ) for idx in range( 0, len( gsst_n_states ) ) ]\n\ngsst_import = fdtd_hook.addimport()\ngsst_import['name'] = 'gsst_import'\ngsst_import['x span'] = device_width_um * 1e-6\ngsst_import['y min'] = gsst_min_um * 1e-6\ngsst_import['y max'] = gsst_max_um * 1e-6\ngsst_import['z min'] = -0.51 * 1e-6\ngsst_import['z max'] = 0.51 * 1e-6\ngsst_import[\"override mesh order from material database\"] = 1\ngsst_import['mesh order'] = 1\n\n# gsst_override_mesh = fdtd_hook.addmesh()\n# gsst_override_mesh['name'] = 'gsst_override_mesh'\n# gsst_override_mesh['x span'] = fdtd_region_size_lateral_um * 1e-6\n# gsst_override_mesh['y min'] = ( gsst_min_um - 0.05 ) * 1e-6\n# gsst_override_mesh['y max'] = ( gsst_max_um + 0.05 ) * 1e-6\n# gsst_override_mesh['z min'] = -0.51 * 1e-6\n# gsst_override_mesh['z max'] = 0.51 * 1e-6\n# # gsst_override_mesh['dx'] = 0.001 * 1e-6\n# gsst_override_mesh['dy'] = 0.003 * 1e-6\n\ngsst_x_range = 1e-6 * np.linspace( -0.5 * device_width_um, 0.5 * device_width_um, 2 )\ngsst_y_range = 1e-6 * np.linspace( gsst_min_um, gsst_max_um, 2 )\ngsst_z_range = 1e-6 * np.linspace( -0.51, 0.51, 2 )\n\n\nmirror_max_um = gsst_min_um\nmirror_height_um = 0.5\nmirror_min_um = mirror_max_um - mirror_height_um\n\nmirror = fdtd_hook.addrect()\nmirror['name'] = 'mirror'\nmirror['x span'] = device_width_um * 1e-6\nmirror['y max'] = mirror_max_um * 1e-6\nmirror['y min'] = mirror_min_um * 1e-6\nmirror['z min'] = -0.51 * 1e-6\nmirror['z max'] = 0.51 * 1e-6\nmirror['material'] = 'Au (Gold) - Palik'\n\ndef lumapi_set_wavelength( wl_idx ):\n\t# lumerical script is one indexed so need to adjust from python indexing\n\tcmd = \"wl_idx = \" + str( wl_idx + 1 ) + \";\"\n\tlumapi.evalScript( fdtd_hook.handle, cmd )\n\nlumapi_pull_results = \"\"\"\n\tE_field = getresult( \"adjoint_monitor_top\", \"E\" );\n\tH_field = getresult( \"adjoint_monitor_top\", \"H\" );\n\"\"\"\n\nlumapi_import_source = \"\"\"\n\tEx = E_field.Ex( :, :, :, wl_idx );\n\tEy = E_field.Ey( :, :, :, wl_idx );\n\tEz = E_field.Ez( :, :, :, wl_idx );\n\tHx = H_field.Hx( :, :, :, wl_idx );\n\tHy = H_field.Hy( :, :, :, wl_idx );\n\tHz = H_field.Hz( :, :, :, wl_idx );\n\tget_f = E_field.f( wl_idx );\n\tget_lambda = c / get_f;\n\tEM = rectilineardataset(\"EM fields\",E_field.x,E_field.y,E_field.z);\n\tEM.addparameter(\"lambda\",get_lambda,\"f\",get_f);\n\tEM.addattribute(\"E\",conj(Ex),conj(Ey),conj(Ez));\n\tEM.addattribute(\"H\",conj(Hx),conj(Hy),conj(Hz));\n\tswitchtolayout;\n\tselect(\"top_adjoint_source\");\n\timportdataset(EM);\n\"\"\"\n\ndirectional_weightings_by_state = [ np.ones( num_design_frequency_points ) for idx in range( 0, num_gsst_states ) ]\n# directional_weightings_by_state[ 1 ][ 0 : half_frequency_point ] = -1\ndirectional_weightings_by_state[ 0 ][ : ] = -1\ndirectional_weightings_by_state[ 1 ][ : ] = 0\ndirectional_weightings_by_state[ 1 ][ int( 3 * num_design_frequency_points / 4. ) ] = num_design_frequency_points\n\nnum_iterations = 50\nfigure_of_merit_by_iteration_by_state_by_wavelength = np.zeros( ( num_iterations, num_gsst_states, num_design_frequency_points ) )\nfigure_of_merit_by_iteration = np.zeros( num_iterations )\n\nfor iteration in range( 0, num_iterations ):\n\n\tgradient_by_gsst_state = []\n\tfom_by_gsst_state = []\n\n\tfor gsst_state in range( 0, num_gsst_states ):\n\n\t\tdevice_index = permittivity_to_index( device_permittivity )\n\t\tnp.save( projects_directory_location + '/cur_device.npy', device_permittivity )\n\t\tfdtd_hook.switchtolayout()\n\t\tfdtd_hook.select( device_import['name'] )\n\t\tfdtd_hook.importnk2( device_index, device_x_range, device_y_range, device_z_range )\n\t\tfdtd_hook.select( gsst_import['name'] )\n\t\tfdtd_hook.importnk2( gsst_indexes[ gsst_state ], gsst_x_range, gsst_y_range, gsst_z_range )\n\n\t\tdisable_all_sources()\n\t\tforward_src.enabled = 1\n\t\tfdtd_hook.run()\n\n\t\ttransmission_fom = np.zeros( num_design_frequency_points )\n\t\tfor wl_idx in range( 0, num_design_frequency_points ):\n\t\t\tget_T_top = compute_transmission_top( [ wl_idx, wl_idx + 1 ] )\n\t\t\tfom_T = get_T_top\n\t\t\tif directional_weightings_by_state[ gsst_state ][ wl_idx ] < 0:\n\t\t\t\tfom_T = 1 + directional_weightings_by_state[ gsst_state ][ wl_idx ] * fom_T\n\n\t\t\tfom_T = np.maximum( np.minimum( fom_T, 1.0 ), 0.0 )\n\n\t\t\ttransmission_fom[ wl_idx ] = fom_T\n\n\t\tfigure_of_merit_by_iteration_by_state_by_wavelength[ iteration, gsst_state, : ] = transmission_fom\n\t\tfom_by_gsst_state.append( np.mean( transmission_fom ) )\n\n\t\tforward_e_fields = get_complex_monitor_data( design_efield_monitor[ 'name' ], 'E' )\n\t\tadjoint_e_fields = np.zeros( forward_e_fields.shape, dtype=np.complex )\n\n\t\tlumapi.evalScript( fdtd_hook.handle,\n\t\t\t''.join( lumapi_pull_results.split() )\n\t\t)\n\n\n\n\t\tfor wl_idx in range( 0, num_design_frequency_points ):\n\t\t\tfdtd_hook.switchtolayout()\n\t\t\tlumapi_set_wavelength( wl_idx )\n\n\t\t\tshutil.copy(\n\t\t\t\tprojects_directory_location + \"/optimization.fsp\",\n\t\t\t\tprojects_directory_location + \"/optimization_gsst_state_\" + str( gsst_state ) + \".fsp\" )\n\t\t\t\n\t\t\tdisable_all_sources()\n\t\t\ttop_adjoint_source.enabled = 1\n\n\t\t\tlumapi.evalScript( fdtd_hook.handle,\n\t\t\t\t''.join( lumapi_import_source.split() )\n\t\t\t)\n\n\t\t\tfdtd_hook.run()\n\n\t\t\tsingle_wl_adjoint_e_fields = get_complex_monitor_data( design_efield_monitor[ 'name' ], 'E' )\n\t\t\tadjoint_e_fields[ :, wl_idx, :, :, : ] = single_wl_adjoint_e_fields[ :, wl_idx, :, :, : ]\n\n\t\tgradient = -2 * np.real( np.sum( forward_e_fields * adjoint_e_fields, axis=0 ) / 1j )\n\n\t\tfor wl_idx in range( 0, num_design_frequency_points ):\n\t\t\tgradient[ wl_idx, :, :, : ] *= directional_weightings_by_state[ gsst_state ][ wl_idx ]\n\n\t\tfom_weightings = ( 2. / num_design_frequency_points ) - transmission_fom**2 / np.sum( transmission_fom**2 )\n\t\tfom_weightings = np.maximum( fom_weightings, 0 )\n\t\tfom_weightings /= np.sum( fom_weightings )\n\n\t\tweighted_gradient = np.zeros( gradient[ 0 ].shape )\n\t\tfor wl_idx in range( 0, num_design_frequency_points ):\n\t\t\tweighted_gradient += fom_weightings[ wl_idx ] * gradient[ wl_idx ] \n\n\t\tweighted_gradient = np.swapaxes( weighted_gradient, 0, 2 )\n\t\tgradient_by_gsst_state.append( weighted_gradient )\n\n\tfom_by_gsst_state = np.array( fom_by_gsst_state )\n\tfigure_of_merit_by_iteration[ iteration ] = np.mean( fom_by_gsst_state )\n\tweightings_by_state = 1. - fom_by_gsst_state**2 / np.sum( fom_by_gsst_state**2 )\n\tweightings_by_state = np.maximum( weightings_by_state, 0 )\n\tweightings_by_state /= np.sum( weightings_by_state )\n\tprint( \"On iteration \" + str( iteration ) + \" fom by gsst state = \" + str( fom_by_gsst_state ) )\n\n\ttotal_gradient = np.zeros( gradient_by_gsst_state[ 0 ].shape )\n\tfor gsst_state in range( 0, num_gsst_states ):\n\t\ttotal_gradient += weightings_by_state[ gsst_state ] * gradient_by_gsst_state[ gsst_state ]\n\n\tstep_size_rel = 0.05 - ( iteration / ( num_iterations - 1 ) ) * ( 0.05 - 0.025 )\n\tstep = step_size_rel * total_gradient / np.max( np.abs( total_gradient ) )\n\n\tstepped_permittivity = device_permittivity[ :, :, 0 ] + step[ :, :, 0 ]\n\tstepped_permittivity = np.maximum( np.minimum( stepped_permittivity, permittivity_max ), permittivity_min )\n\tdevice_permittivity[ :, :, 0 ] = stepped_permittivity\n\tdevice_permittivity[ :, :, 1 ] = stepped_permittivity\n\n","sub_path":"tests/OptimizeAbsorptive.py","file_name":"OptimizeAbsorptive.py","file_ext":"py","file_size_in_byte":18224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46289331","text":"#!/usr/bin/python3\n\nimport re\n\naddBill = 0\t\t#additional bills\nareaCode = 0\nphoneNumber = 0\nmessageQty = 0\nmonthBill = 5\t\t#default bill/basic bill\ntotalBill = 0\nsentinel1 = \"123\"\nsentinel2 = \"1234567\"\n\nsentinelPresence = False\n\nwhile bool(sentinelPresence) == False:\n\tareaCode = str(input(\"Area Code: \"))\n\twhile not len(areaCode) == 3 or not re.match(\"^[0-9]*$\", areaCode):\n\t\tprint(\"Please input only 3 digits.\")\n\t\tareaCode = str(input(\"Area Code: \"))\n\tif areaCode == sentinel1:\n\t\tsentinelPresence = True\n\tphoneNumber = str(input(\"Phone Number: \"))\n\twhile not len(phoneNumber) == 7 or not re.match(\"^[0-9]*$\", phoneNumber):\n\t\tprint(\"Please input only 7 digits.\")\n\t\tphoneNumber = str(input(\"Phone Number: \"))\n\tif phoneNumber == sentinel2:\n\t\tsentinelPresence = True\n\tmessageQty += 1\n\nif bool(sentinelPresence) == True:\n\tif messageQty > 60 and messageQty < 180:\n\t\texcess1 = messageQty - 60\n\t\taddBill += 0.05 * excess1\n\t\tmonthBill += addBill\n\tif messageQty > 180:\n\t\texcess2 = messageQty - 180\n\t\taddBill += 0.1 * excess2\n\t\tmonthBill += addBill\n\ttax = monthBill * 0.12\n\ttotalBill = monthBill + tax\n\tprint(\" \")\n\tprint(\"===================================\")\n\tprint(\"Area Code: \" + areaCode)\n\tprint(\"Phone Number: \" + phoneNumber)\n\tprint(\"No. of Messages: \" + str(messageQty))\n\tprint(\"Bill ($): \" + str(monthBill))\n","sub_path":"CPE102L/Chapter 04 - Making Decisions (Exercises)/Code/6b.py","file_name":"6b.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124702400","text":"#!/usr/bin/env/python\r\n# -*- coding: utf-8 -*-\r\n\r\n__author__ = \"Relrin\"\r\n__credits__= \"Elliptic curves, exchange keys (Diffi-Hellman) and ECDSA algorithms\"\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n \r\n if (len(sys.argv)<2):\r\n print(\"\"\"Examples of commands:\r\n get_ab [value M]\r\n gen_pairs [value M] [value A] [value B]\r\n key_exchange [value M] [value A] [value B]\r\n ecdsa [msg.txt] [value M] [value A] [value B]\r\n \"\"\")\r\n else:\r\n \r\n # генерирование всех пар (a,b), зная модуль M\r\n if sys.argv[1]==\"get_ab\":\r\n \r\n import GetValuesForEC\r\n ec=GetValuesForEC.EC(int(sys.argv[2]))\r\n print(\"All pairs (a,b): %s\" % ec.getAB())\r\n \r\n # генерирование всех пар Em(a,b)\r\n if sys.argv[1]==\"gen_pairs\":\r\n \r\n import GetValuesForEC\r\n ec=GetValuesForEC.EC(int(sys.argv[2]))\r\n ec.setAB((int(sys.argv[3]),int(sys.argv[4])))\r\n if ec.getAllPairsEC()!=None:\r\n print(\"Pairs from Em(%d,%d): %s\" % (int(sys.argv[3]),int(sys.argv[4]),ec.getAllPairsEC()))\r\n \r\n # эмуляция обмена ключами между двумя пользователями\r\n if sys.argv[1]==\"key_exchange\":\r\n \r\n # инициализация\r\n import random, GetValuesForEC, EllipticCurve\r\n ec_=GetValuesForEC.EC(int(sys.argv[2]))\r\n ec_.setAB((int(sys.argv[3]),int(sys.argv[4])))\r\n ec = EllipticCurve.EC(int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[2]))\r\n g,_= ec.at(0) # 23 1 18 # 23 9 18 # 23 11 13\r\n # 23 13 18 # 23 15 3\r\n # 53 2 13 # 53 2 40 # 53 4 11\r\n # 53 4 42 # 53 5 17\r\n assert ec.order(g) <= ec.q\r\n \r\n # используем ECDH\r\n dh = EllipticCurve.DiffieHellman(ec, g)\r\n # ключ пользователя А\r\n while(True):\r\n A_private = random.randrange(1,int(sys.argv[2]))\r\n if ec_.isPrime(A_private):\r\n break\r\n A_public = dh.gen(A_private)\r\n # ключ пользователя Б\r\n while(True):\r\n B_private = random.randrange(1,int(sys.argv[2]))\r\n if ec_.isPrime(B_private):\r\n break\r\n B_public = dh.gen(B_private)\r\n # вывод сгенерированных ключей\r\n print(\"Public A key: (%d,%d)\" % A_public)\r\n print(\"Public B key: (%d,%d)\" % B_public)\r\n print(\"Private A key: %d\" % A_private)\r\n print(\"Private B key: %d\" % B_private)\r\n # ключ для пары \r\n Ka=dh.secret(A_private, B_public)\r\n Kb=dh.secret(B_private, A_public)\r\n print(\"Secret key for user A: (%d,%d)\" % Ka)\r\n print(\"Secret key for user B: (%d,%d)\" % Kb)\r\n \r\n # получение цифровой подписи\r\n if sys.argv[1]==\"ecdsa\":\r\n \r\n #инициализация \r\n import GetValuesForEC, EllipticCurve, SHA1, random\r\n ec_= GetValuesForEC.EC(int(sys.argv[3]))\r\n ec_.setAB((int(sys.argv[4]),int(sys.argv[5])))\r\n ec = EllipticCurve.EC(int(sys.argv[4]), int(sys.argv[5]), int(sys.argv[3]))\r\n g,_= ec.at(0)\r\n eg = EllipticCurve.ElGamal(ec, g)\r\n dsa = EllipticCurve.DSA(ec, g)\r\n \r\n # читаем файл\r\n file=open(sys.argv[2],\"r\")\r\n text=file.read()\r\n file.close()\r\n # сгенерируем хеш\r\n hash_text=int(''.join([str(hex(h)[2:]).replace(\"L\",\"\") for h in SHA1.sha1(text)]),16)\r\n \r\n # генерируем ЭЦП\r\n while(True):\r\n private_key = random.randrange(1,int(sys.argv[3])-1)\r\n if ec_.isPrime(private_key):\r\n public_key = eg.gen(private_key)\r\n print(\"Private key: %d\" % private_key)\r\n print(\"Public key: (%d,%d)\" % public_key)\r\n r = public_key[0] % int(sys.argv[3])\r\n if r==0:\r\n continue\r\n else:\r\n break\r\n #r = 20 # 53 5 17\r\n sig=dsa.sign(hash_text, private_key, r)\r\n print(sig)\r\n \r\n # валидация ЭЦП\r\n if dsa.validate(hash_text, sig, public_key):\r\n print(\"Successfully accepted!\")\r\n else:\r\n print(\"Not equal digital signatures!\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"635907218","text":"\"\"\"ProductOperations request.\n\nSave description for products.\n\"\"\"\n\nfrom ..apirequest import APIRequest\n\n\nclass SaveDescription(APIRequest):\n \"\"\"saveDescription request.\"\"\"\n\n uri = \"Handlers/Products/saveDescription.ashx\"\n\n def __new__(self, *, description, product_ids=[], channel_id=0):\n \"\"\"Create saveDescription request.\n\n Args:\n request_mode: requestmode header\n \"\"\"\n self.description = str(description)\n if isinstance(product_ids, str) or isinstance(product_ids, int):\n self.product_ids = [str(product_ids)]\n else:\n self.product_ids = [str(x) for x in product_ids]\n self.channel_id = channel_id\n return super().__new__(self)\n\n def get_data(self):\n \"\"\"Get data for get request.\"\"\"\n data = {\n \"channelID\": self.channel_id,\n \"desc\": self.description,\n \"prodids\": \",\".join(self.product_ids),\n }\n return data\n\n def get_params(self):\n \"\"\"Get parameters for get request.\"\"\"\n return {\"d\": \"769\"}\n\n def process_response(self, response):\n \"\"\"Handle request response.\"\"\"\n self.raise_for_non_200(\n self,\n response,\n 'Error saving description for product IDs \"{}\".'.format(\n \", \".join(self.product_ids)\n ),\n )\n return response.text\n","sub_path":"ccapi/requests/products/savedescription.py","file_name":"savedescription.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"188180037","text":"\"\"\"\nWe truthfully declare:\n- to have contributed approximately equally to this assignment [if this is not true, modify this sentence to disclose individual contributions so we can grade accordingly]\n- that we have neither helped other students nor received help from other students\n- that we provided references for all code that is not our own\n\nYannick Hogebrug y.r.hogebrug@student.vu.nl\nJesse Schouten j7.schouten@student.vu.nl\n\"\"\"\n \n#%%\nimport os\nos.chdir(\"C:/Users/Elitebook/Desktop/ProjectBigData\")\n\nfilenames = [\"hue_upload.csv\",\"hue_upload2.csv\"]\ndef read_csv_data(filenames):\n import pandas as pd\n import re as re\n import numpy as np\n import datetime\n \n def isNoInformativeEvent(eventid):\n matches = re.search(r'(lamp_change|nudge_time|bedtime_tonight|risetime|rise_reason|adherence_importance|fitness)',eventid) \n if matches:\n result = False\n else:\n result = True\n \n return result\n\n def getEvent(eventid):\n \n matches = re.search(r'(lamp_change|nudge_time|bedtime_tonight|risetime|rise_reason|adherence_importance|fitness)',eventid)\n \n if matches:\n result = matches.group(0) \n else: result = 'No event'\n \n return result \n \n def getDateTimeFromEventID(eventid): \n import datetime\n \n monthDict = {'januari' : 1,'februari' : 2,'maart' : 3,'april' : 4,'mei' : 5,'juni' : 6,'juli' : 7,'augustus' : 8,'september' : 9, 'oktober' : 10,'november' : 11,'december' : 12}\n \n matches = re.search(r'((\\d{1,2})_(\\D{1,12})_(\\d{4}))+(_(\\d{1,2})_(\\d{1,2})_(\\d{1,2}))*',eventid)\n \n if matches: \n year = int(matches.group(0).split('_')[2])\n month = monthDict[matches.group(0).split('_')[1]]\n day = int(matches.group(0).split('_')[0])\n \n #isDatetime = re.search(r'(_(\\d{1,2})_(\\d{1,2})_(\\d{1,2}))',line)\n #if isDatetime:\n # hour = int(matches.group(0).split('_')[3])\n # minute =int(matches.group(0).split('_')[4])\n # sec = int(matches.group(0).split('_')[5])\n hour = 0\n minute = 0\n sec = 0\n \n result = datetime.datetime(year,month,day,hour,minute,sec)\n else: \n result = 'No datetime' \n \n return result\n\n def insert_if_new(df,idx):\n if idx not in df.index:\n df = df.append(pd.Series({'bedtime' : float('nan'),\\\n 'intended_bedtime' : float('nan'),\\\n 'risetime' : float('nan'),\\\n 'rise_reason' : float('nan'),\\\n 'fitness' : float('nan'),\\\n 'adherence_importance' : float('nan'),\\\n 'in_experimental_group' : False},\\\n name=idx))\n return df\n \n def convertValueToDateTime(time,dateLine,event):\n import datetime\n \n matchestime = re.search(r'((\\d{1,2})(\\d{2}))+',time)\n matchesdate = re.search(r'((\\d{1,2})_(\\D{1,12})_(\\d{4}))+',dateLine)\n \n monthDict = {'januari' : 1,'februari' : 2,'maart' : 3,'april' : 4,'mei' : 5,'juni' : 6,'juli' : 7,'augustus' : 8,'september' : 9, 'oktober' : 10,'november' : 11,'december' : 12}\n \n result = 'no datetime found'\n \n if matchesdate and matchestime: \n year = int(matchesdate.group(0).split('_')[2])\n month = monthDict[matchesdate.group(0).split('_')[1]]\n day = int(matchesdate.group(0).split('_')[0])\n #Check if user starting sleeping in morning (wrong input)\n hour = int(matchestime.group(2))\n \n if event == 'bedtime_tonight': \n #if someone expects to sleep between 6:00 and 13:00, he/she problably mean the evening\n if hour >=6 and hour <= 12:\n hour += 12\n elif hour > 12 and hour < 13:\n hour -= 12\n \n #Check if the time is set at 24:00, and change to 0:00\n if hour == 24:\n hour = 0\n else:\n hour = int(matchestime.group(2)) \n \n minute = int(matchestime.group(3))\n \n result = datetime.datetime(year,month,day,hour,minute)\n \n return result\n\n def getTimeFromLampChange(event_id):\n import datetime \n \n matches = re.search(r'(\\d{1,2})_(\\D{1,12})_(\\d{4})+_(\\d{1,2})_(\\d{1,2})_(\\d{1,2})_(\\d{3})+', event_id)\n \n monthDict = {'januari' : 1,'februari' : 2,'maart' : 3,'april' : 4,'mei' : 5,'juni' : 6,'juli' : 7,'augustus' : 8,'september' : 9, 'oktober' : 10,'november' : 11,'december' : 12}\n \n year = int(matches.group(3))\n month = int(monthDict[matches.group(2)])\n day = int(matches.group(1))\n hour = int(matches.group(4))\n minute = int(matches.group(5))\n second = int(matches.group(6))\n millisecond = int(matches.group(7))\n \n result = datetime.datetime(year, month, day, hour, minute, second, millisecond)\n \n return result\n \n \n columns = ['bedtime','intended_bedtime','risetime','rise_reason','fitness','adherence_importance','in_experimental_group']\n dataresult = pd.DataFrame(columns=columns)\n \n for file in filenames: \n with open(file) as f:\n lines = [line.rstrip('\\n') for line in f]\n for line in lines: \n line_values = line.split(';')\n user_id = int(re.search(r'\\d+',line_values[1]).group())\n event_id =line_values[2]\n value = line_values[3]\n \n if isNoInformativeEvent(event_id):\n continue\n \n event = getEvent(event_id)\n dtime = getDateTimeFromEventID(event_id)\n index = (dtime,user_id)\n \n if(event == 'lamp_change' and value == '\"OFF\"'):\n time = getTimeFromLampChange(event_id)\n #Check whether the bedtime doesn't belong to the day before\n if int(time.strftime('%H')) < 6:\n dtime = dtime - datetime.timedelta(hours = 24) \n index = (dtime,user_id)\n #Checker whether the (possibly) changed index was already created for the user_id\n if index not in dataresult:\n dataresult = insert_if_new(dataresult,index)\n \n dateAtIndex = dataresult.ix[index,'bedtime']\n #Just replace 'bedtime' when the time is later then a current registered time! \n #OR no time at all yet in cell!\n if type(dateAtIndex) == float:\n dataresult = dataresult.set_value(index, 'bedtime', time) \n elif(time 4) and (len(value) < 7):\n intendedBedtime = convertValueToDateTime(value,event_id,event)\n dataresult = dataresult.set_value(index,'intended_bedtime',intendedBedtime)\n \n if event == 'risetime':\n #Skip if the time in the string has 1,2 or larger then 5 number (taking the comma's into account!)\n if (len(value) > 4) and (len(value) < 7):\n risetime = convertValueToDateTime(value,event_id,event)\n dataresult = dataresult.set_value(index,'risetime',risetime)\n \n if event == 'rise_reason':\n dataresult = dataresult.set_value(index, 'rise_reason', value) \n \n if event == 'nudge_time':\n dataresult = dataresult.set_value(index, 'in_experimental_group', True)\n \n if(event == 'fitness'):\n dataresult = dataresult.set_value(index, 'fitness', value) \n \n if(event == 'adherence_importance'):\n dataresult = dataresult.set_value(index, 'adherence_importance', value) \n \n return dataresult\n \n#%% \n#Returns a filled mongodb (if we wouldn't do this it would only exist in the function)\ndef to_mongodb(df):\n import pymongo, datetime\n \n def bedtime_previousDay(df, i):\n previousDay = df.index[i][0] - datetime.timedelta(days = 1)\n userID = df.index[i][1]\n try:\n bedtime = df.get_value((previousDay, userID), 'bedtime')\n return bedtime\n except KeyError:\n return float('nan')\n \n client = pymongo.MongoClient(\"localhost\", 27017)\n db = client['BigData'] \n sleepdata = db['sleepdata']\n sleepdata.delete_many({})\n \n for i in range(0,len(df)):\n if isinstance(df['bedtime'][i],datetime.datetime):\n bedtime = bedtime_previousDay(df, i)\n if isinstance(bedtime ,datetime.datetime) and \\\n isinstance(df['risetime'][i], datetime.datetime): \n risetime = df['risetime'][i] \n sleepDuration = round((risetime - bedtime).total_seconds(), 0)\n else:\n sleepDuration = float('nan')\n \n #Search for the bedtime of the day before\n df.index[i][0] - datetime.timedelta(days = 1)\n \n else:\n sleepDuration = float('nan')\n \n x = {}\n x['_id'] = {'date': df.index[i][0], 'user': df.index[i][1]}\n \n sleepdata.insert_one({'_id':x,\\\n 'date':df.index[i][0],\\\n 'user':df.index[i][1],\\\n 'bedtime': df['bedtime'][i],\\\n 'intended_bedtime' : df['intended_bedtime'][i],\\\n 'risetime' : df['risetime'][i],\\\n 'rise_reason' : df['rise_reason'][i],\\\n 'fitness' : df['fitness'][i],\\\n 'adherence_importance' : df['adherence_importance'][i],\\\n 'in_experimental_group' : df['in_experimental_group'][i],\\\n 'sleep_duration' : sleepDuration})\n \n return sleepdata \n\n#%%\n#assumes the database is named sleepdata\ndef read_mongodb(filter,sort):\n import pymongo\n from pymongo import MongoClient\n from pprint import pprint\n import re as re\n import time\n \n def isNan(var):\n return isinstance(var,float)\n \n #for doc in sleepdata.find():\n # print(doc)\n \n query = sleepdata.find(filter)\n\n matches = re.search(r'(_id|date|user|bedtime|intended_bedtime|risetime|rise_reason|fitness|adherence_importance|in_experimental_group|sleep_duration)',sort) \n\n if matches:\n printedQuery = query.sort(sort,pymongo.ASCENDING)\n else: \n printedQuery = query \n \n print(\"%10s\\t%2s\\t%8s\\t%8s\\t%8s\\t%5s\\t%5s\\t%5s\\t%5s\\t%7s\\t\" %\n ('date'\n ,'user'\n ,'bedtime'\n ,'intended'\n ,'risetime'\n ,'reason'\n ,'fitness'\n ,'adh'\n ,'in_exp'\n ,'sleep_duration'))\n \n for document in printedQuery: \n date = str(document['date'].date())\n user = str(document['user'])\n \n if isNan(document['bedtime']): \n bedtime = str(document['bedtime'])\n else: \n bedtime = str(document['bedtime'].time())\n \n if isNan(document['intended_bedtime']):\n int_bedtime = str(document['intended_bedtime'])\n else: \n int_bedtime = str(document['intended_bedtime'].time()) \n \n if isNan(document['risetime']):\n risetime = str(document['risetime'])\n else: \n risetime = str(document['risetime'].time()) \n \n rise_reason = str(document['rise_reason'])\n fitness = str(document['fitness'])\n adh_importance = str(document['adherence_importance'])\n in_exp_group = str(document['in_experimental_group'] )\n sleep_duration = str(document['sleep_duration'])\n\n print(\"%10s\\t%2s\\t%8s\\t%8s\\t%8s\\t%5s\\t%5s\\t%5s\\t%5s\\t%7s\\t\" %\n (date\n ,user\n ,bedtime\n ,int_bedtime\n ,risetime\n ,rise_reason\n ,fitness\n ,adh_importance\n ,in_exp_group\n ,sleep_duration))\n\n#%%\nif __name__ == '__main__':\n # this code block is run if you run solution.py (instead of run_solution.py)\n # it is convenient for debugging\n\n df = read_csv_data([\"hue_upload.csv\",\"hue_upload2.csv\"])\n sleepdata = to_mongodb(df)\n read_mongodb({},'_id')\n","sub_path":"Assignment2/solution w2 Jesse.py","file_name":"solution w2 Jesse.py","file_ext":"py","file_size_in_byte":13530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"384481696","text":"import requests\nimport logging\nimport os\nimport json\nfrom .auth import get_auth_token\n\n\ndef make_request(\n *, method, path, query=None, payload=None, parse_payload=True, parse_response=True\n):\n api_endpoint = __get_api_endpoint()\n verify_ssl = False if api_endpoint.find(\"https://localhost\", 0) == 0 else True\n method = method.lower()\n url = f\"{api_endpoint}{path}\"\n\n headers = {\"Authorization\": f\"Bearer {get_auth_token()}\"}\n\n logging.debug(f\"Making API '{method}' request to '{url}'\")\n\n if parse_payload and payload:\n payload = json.dumps(payload)\n\n r = getattr(requests, method)(\n url, headers=headers, params=query, verify=verify_ssl, data=payload\n )\n\n response_json = {}\n try:\n if r.status_code >= 400 or (parse_response and r.text != \"OK\"):\n response_json = r.json()\n except Exception:\n raise Exception(r.text)\n\n if r.status_code >= 400:\n raise Exception(response_json[\"error\"])\n elif parse_response:\n return response_json\n else:\n return r.text\n\n\ndef make_paginated_request(\n *, path, query={}, page_size=100, max_results=None, parse_response=True\n):\n logging.debug(f\"Making paginated API request to '{path}'\")\n\n page = 0\n results = []\n next_page_token = None\n\n while True:\n if max_results is not None and len(results) >= max_results:\n break\n\n response = make_request(\n method=\"get\",\n path=path,\n parse_response=True,\n query={\n **query,\n **{\n \"pageToken\": next_page_token,\n \"maxResults\": page_size\n if max_results is None or (page + 1) * page_size < max_results\n else max_results - page * page_size,\n },\n },\n )\n page += 1\n results += response[\"results\"]\n next_page_token = response[\"nextPageToken\"]\n if not next_page_token:\n break\n\n return results\n\n\ndef make_rows_request(*, uri, max_results, query={}):\n page = 0\n page_size = 100000\n\n rows = \"\"\n while page * page_size < max_results:\n results = make_request(\n method=\"get\",\n path=f\"{uri}/rows\",\n parse_response=False,\n query={\n **query,\n **{\n \"startIndex\": page * page_size,\n \"maxResults\": page_size\n if (page + 1) * page_size < max_results\n else max_results - page * page_size,\n },\n },\n )\n if page != 0:\n rows += \"\\n\"\n rows += results\n page += 1\n\n if not rows:\n return []\n\n return [json.loads(row) for row in rows.split(\"\\n\")]\n\n\ndef __get_api_endpoint():\n return (\n \"https://redivis.com/api/v1\"\n if os.getenv(\"REDIVIS_API_ENDPOINT\") is None\n else os.getenv(\"REDIVIS_API_ENDPOINT\")\n )\n","sub_path":"redivis/common/api_request.py","file_name":"api_request.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"229880842","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom datetime import date\nfrom datetime import timedelta\nimport os\nimport sys\n\n_ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__),\n os.path.pardir))\n_FIRST_PARTY_DIR = os.path.join(_ROOT_DIR, 'first_party')\nsys.path.insert(1, _FIRST_PARTY_DIR)\n\nfrom local_libs import script_util\nscript_util.SetUpSystemPaths(_ROOT_DIR)\n\nfrom analysis.type_enums import CrashClient\nfrom app.common.model.clusterfuzz_analysis import ClusterfuzzAnalysis\nfrom app.common.model.cracas_crash_analysis import CracasCrashAnalysis\nfrom app.common.model.fracas_crash_analysis import FracasCrashAnalysis\nfrom app.common.model.uma_sampling_profiler_analysis import (\n UMASamplingProfilerAnalysis)\nfrom libs.cache_decorator import GeneratorCached\nfrom local_libs import local_iterator\nfrom local_libs.local_cache import LocalCache\n\n\n_DEFAULT_BATCH_SIZE = 1000\n_TODAY = date.today().strftime('%Y-%m-%d')\n_A_YEAR_AGO = (date.today() - timedelta(days=365)).strftime('%Y-%m-%d')\n_CLIENT_ID_TO_CLASS = {CrashClient.FRACAS: FracasCrashAnalysis,\n CrashClient.CRACAS: CracasCrashAnalysis,\n CrashClient.CLUSTERFUZZ: ClusterfuzzAnalysis,\n CrashClient.UMA_SAMPLING_PROFILER:\n UMASamplingProfilerAnalysis}\n\n\n# TODO(crbug.com/662540): Add unittests.\ndef IterateCrashes(client_id,\n app_id,\n projection=None,\n property_values=None,\n start_date=_A_YEAR_AGO,\n end_date=_TODAY,\n batch_size=_DEFAULT_BATCH_SIZE,\n batch_run=False): # pragma: no cover.\n \"\"\"Genrates query to query crashes and iterates crashes.\n\n Args:\n client_id (CrashClient): One of the 3 supported clients -\n CrashClient.FRACAS, CrashClient.CRACAS and CrashClient.CLUSTERFUZZ.\n app_id (str): App engine app id.\n projection (tuple or list): Operations return entities with only the\n specified properties set. For example:\n projection=[Article.title, Article.date] or\n projection=['title', 'date'] fetches entities with just those two\n fields set. Note, query can only project indexed properties.\n property_values (dict): Property values to filter.\n start_date (str): Only iterate testcases after this date including this\n date, format '%Y-%m-%d'.\n end_date (str): Only iterate testcases before this date excluding this date,\n format '%Y-%m-%d'.\n batch_size (int): The number of crashes to query at one time.\n batch_run (bool): If True, iterate batches of crashes, if\n False, iterate each crash.\n\n An example is available in crash_printer/print_crash.py.\n \"\"\"\n cls = _CLIENT_ID_TO_CLASS.get(client_id)\n if property_values:\n property_values = {getattr(cls, property_name): value for\n property_name, value in property_values.iteritems()}\n\n query = script_util.GetFilterQuery(\n cls.query(), cls.requested_time, start_date, end_date,\n property_values=property_values)\n\n # According to https://goo.gl/5BgxQt, the query must be sorted by key\n # to make a query with both ``IN`` operation and cursor.\n query = query.order(-cls.requested_time, cls.key)\n for crash in local_iterator.ScriptIterate(\n query, app_id, projection=projection,\n batch_size=batch_size, batch_run=batch_run):\n yield crash\n\n\n@GeneratorCached(LocalCache(), namespace='Crash-iterator') # pragma: no cover.\ndef CachedCrashIterator(client_id, app_id,\n projection=None,\n property_values=None,\n start_date=_A_YEAR_AGO, end_date=_TODAY,\n batch_size=_DEFAULT_BATCH_SIZE, batch_run=False):\n \"\"\"Genrates query to query crashes and iterates crashes.\n\n This iterator will check local cache first, if there is cache, iterate cached\n values, else it will visit datastore of appengine app to yield data.\n\n Args:\n client_id (CrashClient): One of the 3 supported clients -\n CrashClient.FRACAS, CrashClient.CRACAS and CrashClient.CLUSTERFUZZ.\n app_id (str): App engine app id.\n projection (tuple or list): Operations return entities with only the\n specified properties set. For example:\n projection=[Article.title, Article.date] or\n projection=['title', 'date'] fetches entities with just those two\n fields set. Note, query can only project indexed properties.\n property_values (dict): Property values to filter.\n start_date (str): Only iterate testcases after this date including this\n date, format '%Y-%m-%d'.\n end_date (str): Only iterate testcases before this date excluding this date,\n format '%Y-%m-%d'.\n batch_size (int): The number of crashes to query at one time.\n batch_run (bool): If True, iterate batches of crashes, if\n False, iterate each crash.\n\n An example is available in crash_printer/print_crash.py.\n \"\"\"\n for crash in IterateCrashes(client_id, app_id, projection=projection,\n property_values=property_values,\n start_date=start_date, end_date=end_date,\n batch_size=batch_size, batch_run=batch_run):\n yield crash\n","sub_path":"appengine/predator/scripts/crash_iterator.py","file_name":"crash_iterator.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287965046","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimage = cv2.imread(\"ball.jpg\"); # Reading color image\nimg =cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting to grayscale\nrows, cols = img.shape # Size of the image\n\n'''\nThis can be done using an inbuilt function 'equalizeHist'\nres = cv2.equalizeHist(img); \n'''\n\n'''\nBelow is our implemention of the same and we get the same result as the inbuilt function\n'''\nK = 255\nh = [0 for i in range(K)] \nc = [0 for i in range(K)]\n\n# Calculating 'h' : Number of pixels for each gray level value\nfor i in range(rows):\n\tfor j in range(cols):\n\t\th[img[i][j]] = h[img[i][j]] + 1;\n\n# Calculating the cumulative histogram\nc[0] = h[0];\nfor i in range(1,K):\n\tc[i] = c[i-1] + h[i]\nfor i in range(1,K):\n\tc[i] = c[i]/float(rows*cols)\n\n# Defining result image : res : K * C[img]\nsize = rows,cols,3\nres = np.zeros(size, dtype=np.uint8)\nfor i in range(rows):\n\tfor j in range(cols):\n\t\tres[i][j] = K * c[img[i][j]]\n\ncv2.imwrite('hist_ball.png',res)\ncv2.waitKey();\n","sub_path":"Sem2/MachinePerception/MP_assignment1/CODE/5b_histogram.py","file_name":"5b_histogram.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"641555932","text":"import os\nimport sys\nimport atexit\nimport time\nfrom signal import SIGTERM\n\nimport psutil\n\nfrom .init import settings\n\n\nclass Daemon(object):\n COLOUR_DIC = {\n \"red\": \"\\033[31;1m %s \\033[0m\",\n \"green\": \"\\033[32;1m %s \\033[0m\",\n }\n\n def __init__(self, pidfile, stdin=\"/dev/null\", stdout=\"/dev/null\", stderr=\"/dev/null\",\n debug=settings.MODE):\n if debug == \"test\":\n # 调试信息,改为stdin=\"/dev/stdin\", stdout=\"/dev/stdout\", stderr=\"/dev/stderr\",以root身份运行。\n self.pidfile = pidfile\n self.stdin = os.path.join(settings.TMP_DIR, \"stdin.log\")\n self.stdout = os.path.join(settings.TMP_DIR, \"stdout.log\")\n self.stderr = os.path.join(settings.TMP_DIR, \"stderr.log\")\n else:\n self.pidfile = pidfile\n self.stdin = stdin\n self.stdout = stdout\n self.stderr = stderr\n\n def _daemonize(self):\n try:\n pid = os.fork() # 第一次fork,生成子进程,脱离父进程\n if pid > 0:\n sys.exit(0) # 退出主进程\n except OSError as e:\n sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n os.chdir(\"/\") # 修改工作目录\n os.setsid() # 设置新的会话连接\n os.umask(0) # 重新设置文件创建权限\n\n try:\n pid = os.fork() # 第二次fork,禁止进程打开终端\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # 重定向文件描述符\n # sys.stdout.flush()\n # sys.stderr.flush()\n si = open(self.stdin, \"a+\")\n so = open(self.stdout, \"a+\")\n se = open(self.stderr, \"a+\")\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n # 注册退出函数,根据文件pid判断是否存在进程\n atexit.register(self.del_pid)\n pid = str(os.getpid())\n time_date = int2str(time.time())\n open(self.pidfile, \"w+\").write(\"%s_%s\\n\" % (pid, time_date))\n\n def del_pid(self):\n os.remove(self.pidfile)\n\n def start(self):\n pid = self._get_pid()\n if pid:\n message = \"pidfile %s already exist. Daemon already running!\\n\"\n sys.stderr.write(message % self.pidfile)\n sys.exit(1)\n # 启动监控\n self._daemonize()\n self.run()\n\n def status(self):\n # 查看状态\n pid = self._get_pid()\n if pid in psutil.pids():\n msg = self.cmd_line(pid)\n print(self.colour(msg, \"green\"))\n else:\n print(self.colour(f\"Not found process [{pid}]\"))\n message = \"pidfile %s does not exist. Daemon not running!\\n\"\n sys.stderr.write(message % self.pidfile)\n\n @staticmethod\n def cmd_line(pid):\n p = psutil.Process(pid)\n cmd_line = p.cmdline()\n return \" \".join(cmd_line) + f\" Status:[{p.is_running()}] PID:[{pid}]\"\n\n def monitor(self):\n pid = self._get_pid()\n if pid in psutil.pids():\n msg = self.cmd_line(pid)\n print(self.colour(msg, \"green\"))\n else:\n self.restart()\n\n def pid(self):\n pid = self._get_pid()\n if not pid: # 重启不报错\n print(self.colour(\"Not found pid\"))\n return\n print(pid)\n\n def _get_pid(self):\n \"\"\"\n 获取文件中存在的进程id\n :return:\n \"\"\"\n try:\n pf = open(self.pidfile, \"r\")\n pid = int(pf.read().split(\"_\")[0].strip())\n pf.close()\n except IOError:\n pid = None\n return pid\n\n def stop(self):\n pid = self._get_pid()\n if not pid: # 重启不报错\n print(self.colour(f\"Not found process [{pid}]\"))\n message = \"pidfile %s does not exist. Daemon not running!\\n\"\n sys.stderr.write(message % self.pidfile)\n return\n # 杀进程\n try:\n while 1:\n os.kill(pid, SIGTERM)\n time.sleep(0.1)\n except OSError as err:\n err = str(err)\n if err.find(\"No such process\") > 0:\n if os.path.exists(self.pidfile):\n os.remove(self.pidfile)\n print(self.colour(f\"kill process [{pid}] success\", \"green\"))\n else:\n print(str(err))\n sys.exit(1)\n\n def restart(self):\n \"\"\"\n 重启进程\n :return:\n \"\"\"\n self.stop()\n self.start()\n\n def colour(self, string, col=\"red\"):\n \"\"\"\n 给字段加颜色\n :param string:\n :param col:\n :return:\n \"\"\"\n fmt_str = self.COLOUR_DIC.get(col, \"%s\")\n return fmt_str % string\n\n def run(self):\n \"\"\" run your function\"\"\"\n # while True:\n # fp=open(\"/tmp/result\",\"a+\")\n # fp.write(\"Hello World\\n\")\n # sys.stdout.write(\"%s:hello world\\n\" % (time.ctime(),))\n # sys.stdout.flush()\n # time.sleep(2)\n pass\n\n\nif __name__ == \"__main__\":\n # script_file.replace(\"py\", \"pid\")\n # pid_dir = os.path.join(script_dir, script_file)\n daemon = Daemon(\"/tmp/watch_process.pid\", stdout=\"/tmp/watch_stdout.log\")\n if len(sys.argv) == 2:\n if hasattr(daemon, sys.argv[1]):\n func = getattr(daemon, sys.argv[1])\n else:\n print(\"unknown command\")\n sys.exit(2)\n sys.exit(0)\n else:\n print(\"usage: %s start|stop|restart\" % sys.argv[0])\n sys.exit(2)\n","sub_path":"candy/daemon_library.py","file_name":"daemon_library.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177979454","text":"def palindrome(s):\n\tleft, right = 0, len(s) - 1\n\twhile left < right:\n\t\tif s[left] != s[right]:\n\t\t\treturn False\n\t\t\n\t\tleft += 1\n\t\tright -= 1\n\treturn True\n\n\nT = int(input())\nfor _ in range(T):\n\tstr = input().replace(\" \",\"\").lower()\n\tprint(palindrome(str))\n","sub_path":"Hackerrank/SimpleAlg/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"160073940","text":"import pickle\r\n\r\n# 객체(object)를 문자열(text)가 아닌 이진 데이터(binary data) 형태로\r\n# 파일 읽기/쓰기\r\n\r\nperson = {'name': '오쌤',\r\n 'age': 16,\r\n 'phone': ['010-1111-2222', '02-1234-5678'],\r\n 'email': {'company': 'jake@itwill.co.kr',\r\n 'personal': 'jake@gmail.com'}}\r\n\r\nwith open('person.pickle', mode='wb') as f:\r\n # mode='wb': write binary\r\n pickle.dump(person, f) # 바이너리 데이터를 파일에 쓰기(serialize)\r\n\r\nwith open('person.pickle', mode='rb') as f:\r\n # mode='rb': read binary\r\n contact = pickle.load(f) # 파일에서 바이너리 데이터를 읽기(de-serialize)\r\n print(contact)\r\n","sub_path":"lab-python/py06_file/file08.py","file_name":"file08.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"496959050","text":"import random\nimport timeit\nimport profile\nimport sys\n\ndef add(tree, element):\n #print(element)\n if not tree:\n tree['root'] = element\n tree[element] = {'left': None, 'right': None, 'parent': None, 'colour': 'black'}\n tree[None] = {'left': None, 'right': None, 'parent': None, 'colour': 'red'}\n return\n node = tree['root']\n parent = None\n while node != None:\n parent = node\n if element == node:\n break\n if element < node:\n node= tree[node]['left']\n elif element > node:\n node= tree[node]['right']\n else:\n if element < parent:\n tree[parent]['left'] = element\n if element > parent:\n tree[parent]['right'] = element\n tree[element] = {'left': None, 'right': None, 'parent': parent, 'colour': 'black'}\n rb_fixup(tree, element)\n\ndef rb_fixup(tree, element):\n z = element\n while tree[tree[z]['parent']]['colour'] == 'red':\n print('stuck')\n print(z)\n if tree[z]['parent'] == tree[tree[tree[z]['parent']]['parent']]['left']:\n y = tree[tree[tree[z]['parent']]['parent']]['right']\n if tree[y]['colour'] == 'red':\n tree[tree[z]['parent']]['colour'] = 'black'\n tree[y]['colour'] = 'black'\n z = tree[tree[z]['parent']]['parent']\n elif z == tree[tree[z]['parent']]['right']:\n z = tree[z]['parent']\n left_rotate(tree, z)\n tree[tree[z]['parent']]['colour'] = 'black'\n tree[tree[tree[z]['parent']]['parent']]['colour'] = 'red'\n right_rotate(tree, tree[tree[z]['parent']]['parent'])\n else:\n y = tree[tree[tree[z]['parent']]['parent']]['left']\n if tree[y]['colour'] == 'red':\n tree[tree[z]['parent']]['colour'] = 'black'\n tree[y]['colour'] = 'black'\n z = tree[tree[z]['parent']]['parent']\n elif z == tree[tree[z]['parent']]['left']:\n z = tree[z]['parent']\n right_rotate(tree, z)\n tree[tree[z]['parent']]['colour'] = 'black'\n tree[tree[tree[z]['parent']]['parent']]['colour'] = 'red'\n left_rotate(tree, tree[tree[z]['parent']]['parent'])\n tree[tree['root']]['colour'] = 'black'\n\ndef left_rotate(tree, x):\n y = tree[x]['right']\n tree[x]['right'] = tree[y]['left']\n if tree[y]['left'] != None:\n tree[tree[y]['left']]['parent'] = x\n tree[y]['parent'] = tree[x]['parent']\n if tree[x]['parent'] == None:\n tree['root'] = y\n elif x == tree[tree[x]['parent']]['left']:\n tree[tree[x]['parent']]['left'] = y\n else:\n tree[tree[x]['parent']]['right'] = y\n tree[y]['left'] = x\n tree[x]['parent'] = y\n\ndef right_rotate(tree, x):\n y = tree[x]['parent']\n tree[y]['left'] = tree[x]['right']\n if tree[x]['right'] != None:\n tree[tree[x]['right']]['parent'] = y\n tree[x]['parent'] = tree[y]['parent']\n if tree[y]['parent'] == None:\n tree['root'] = x\n elif y == tree[tree[y]['parent']]['left']:\n tree[tree[y]['parent']]['left'] = x\n else:\n tree[tree[y]['parent']]['right'] = x\n tree[x]['right'] = y\n tree[y]['parent'] = x\n\ndef search(tree, element):\n if not tree:\n return None\n node = tree['root']\n while node != None:\n if node == element:\n return node\n elif element < node:\n node = tree[node]['left']\n else:\n node = tree[node]['right']\n return node\n\ndef find_next(tree, element):\n if not tree:\n return None\n node = search(tree, element)\n if node != None:\n node, parent = tree[node]['right'], node\n while node != None:\n parent, node = node, tree[node]['left']\n return parent\n node = tree['root']\n left = None\n while node != None:\n if element < node:\n node, left = tree[node]['left'], node\n else:\n node = tree[node]['right']\n return left\n\ndef bst(array, m):\n tree = {}\n prefix_sum = 0\n max_overall = array[0] % m\n for i in range(len(array)):\n prefix_sum = (prefix_sum + array[i]) % m\n max_ending_here = prefix_sum\n previous_sum = find_next(tree, max_ending_here)\n if previous_sum:\n max_ending_here = (prefix_sum - previous_sum) % m\n max_overall = max(max_overall, max_ending_here)\n add(tree, prefix_sum)\n #for x in tree:\n # print(x, tree[x])\n return max_overall\n\ndef main():\n trials = int(sys.stdin.readline())\n for trial in range(trials):\n n, m = map(int, sys.stdin.readline().split())\n array = [int(x) for x in sys.stdin.readline().split()]\n print(bst(array, m))\n\ndef test():\n n, m = 10 ** 3, 20 #, random.randint(1, 1001)\n array = [random.randint(1, m - 2) for _ in range(n)]\n print(bst(array, m))\n\nif __name__ == '__main__':\n profile.run('main()')\n #main()\n #test()\n","sub_path":"algorithms/search/maximize_sum_modulo/max_sum_rb_tree.py","file_name":"max_sum_rb_tree.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"176539491","text":"from kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.graphics import Rectangle, Color\n\n# Simple are to place pieces once captured\nclass CapturedPieces(BoxLayout):\n def __init__(self):\n super(CapturedPieces, self).__init__(orientation='horizontal')\n\n white_piece_box = BoxLayout(orientation='horizontal')\n black_piece_box = BoxLayout(orientation='horizontal')\n\n self.add_widget(white_piece_box)\n self.add_widget(black_piece_box)\n\n self.children_dict = {\"white_piece_box\": white_piece_box, \"black_piece_box\": black_piece_box}\n\n def add_piece(self, piece):\n if piece.islower():\n piece = Label(text= piece)\n self.children_dict['white_piece_box'].add_widget(piece)\n else:\n piece = Label(text= piece)\n self.children_dict['black_piece_box'].add_widget(piece)\n","sub_path":"frame_elements/captured_pieces.py","file_name":"captured_pieces.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"434551123","text":"import os\r\n\r\nprint(\"Beggining!\")\r\npath = \"/pi/shit/nobodyknows\"\r\nfiles = listdir(path)\r\n\r\nfor _file in files:\r\n\tprint(_file)\r\n\r\n\r\n#Should output all the games available, and run them!\r\n#Should handle ctrl+c and get out of the game, not the script itself\r\n","sub_path":"old_stuff/begin.py","file_name":"begin.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"27753353","text":"class SoundtouchPackage (Package):\n\tdef __init__(self):\n\t\tPackage.__init__ (self, 'soundtouch', '1.9.0',\n\t\t\tsources = [\n\t\t\t\t'http://www.surina.net/%{name}/%{name}-%{version}.tar.gz'\n\t\t\t],\n\t\t\tconfigure_flags = ['--disable-silent-rules', '--disable-static', '--enable-shared'],\n\t\t\tsource_dir_name = \"soundtouch\"\n\t\t)\n\n\tdef arch_build(self, arch):\n\t\tself.sh(\"./bootstrap\")\n\n\t\tif arch == 'darwin-32':\n\t\t\tself.ld_flags = ['-arch i386']\n\t\t\tself.gcc_flags = ['-arch i386']\n\t\t\tself.configure = 'AM_CXXFLAGS=\"-arch i386\" ./configure --prefix=\"%{package_prefix}\"'\n\t\t\tself.local_configure_flags.extend (['--build=i386-apple-darwin11.2.0'])\n\t\telif arch == 'darwin-64':\n\t\t\tself.ld_flags = ['-arch x86_64']\n\t\t\tself.gcc_flags = ['-arch x86_64']\n\t\t\tself.configure = 'AM_CXXFLAGS=\"-arch x86_64\" ./configure --prefix=\"%{package_prefix}\"'\n\t\t\tself.local_configure_flags.extend (['--build=x86_64-apple-darwin11.2.0'])\n\n\t\tPackage.arch_build (self, arch, defaults = False)\n\nSoundtouchPackage()","sub_path":"packages/soundtouch.py","file_name":"soundtouch.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"337671739","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\nimport json\n\nfrom django.core.exceptions import ValidationError\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom requests import RequestException\n\nfrom models import Url, UrlVisits, UrlVisitors\nfrom shortenurls.exceptions import URLException\nfrom django.core.urlresolvers import reverse\n\n\nclass UrlModelTest(TestCase):\n def setUp(self):\n self.url1 = Url.create(original_url=\"testing\", shorten_url=\"1\", last_visit_from=\"me\")\n self.url2 = Url.create(original_url=\"http://testing2\", shorten_url=\"tstng2\", last_visit_from=\"me\")\n self.url3 = Url.create(original_url=\"https://www.football-italia.net/\", shorten_url=\"daaf1\", last_visit_from=\"127.0.0.1\")\n self.url = Url.create(save=False, original_url=\"testing\", shorten_url=\"tstng\", last_visit_from=\"me\")\n self.visit1 = UrlVisits.create(url_id=self.url3.id)\n self.visitor = UrlVisitors.create(url_visit=self.visit1)\n\n def test_url_creation_width_save(self):\n now = timezone.now()\n self.assertLess(self.url1.created, now)\n\n def test_url_creation_without_save(self):\n urls = Url.fetch(single=False)\n self.assertEqual(len(urls), 3)\n self.assertNotIn(self.url, urls)\n\n def test_invalid_url_missing_schema(self):\n exception = None\n try:\n Url.check_url_validation(self.url1.original_url)\n except (ValidationError, RequestException) as error:\n exception = error.message\n self.assertIsNotNone(exception)\n self.assertIn(\"No schema supplied.\", exception)\n\n def test_invalid_url_invalid_url(self):\n exception = None\n try:\n Url.check_url_validation(self.url2.original_url)\n except (ValidationError, RequestException) as error:\n exception = error.message\n self.assertIsNotNone(exception)\n self.assertIn(\"Failed to establish a new connection\", exception.message)\n\n def test_valid_url(self):\n exception = None\n try:\n Url.check_url_validation(self.url3.original_url)\n except URLException as error:\n exception = error.message\n self.assertEqual(None, exception)\n\n def test_duplicated_short_url(self):\n exist = Url.short_url_exist(self.url1.shorten_url)\n self.assertTrue(exist)\n\n def test_not_duplicated_short_url(self):\n exist = Url.short_url_exist(\"dummy\")\n self.assertFalse(exist)\n\n def test_invalid_short_url_characters(self):\n exception = None\n try:\n Url.check_short_url(self.url1.shorten_url)\n except URLException as error:\n exception = error.message\n self.assertIsNotNone(exception)\n self.assertIn(\"Invalid short url\", exception)\n\n def test_invalid_short_url_doesnt_exist(self):\n exception = None\n try:\n Url.check_short_url(self.url.shorten_url)\n except URLException as error:\n exception = error.message\n self.assertIsNotNone(exception)\n self.assertIn(\"not found\", exception)\n\n def test_correct_short_url(self):\n url = Url.check_short_url(self.url3.shorten_url)\n self.assertEqual(url, self.url3)\n\n def test_json_response(self):\n resp = self.url3.json(\"testhost:8080\", True)\n self.assertEqual(resp['id'], self.url3.id)\n self.assertEqual(resp['shortUrl'], \"https://testhost:8080/url/\" + self.url3.shorten_url)\n json_params = ['id', 'shortUrl', 'redirectUrl', 'created', 'lastIP']\n for param in json_params:\n self.assertIn(param, resp)\n\n def test_all_visits_of_url(self):\n visits = Url.get_all_visits(self.url3)\n self.assertIn(self.visit1.json(), visits)\n\n def test_get_all_visitors_of_url(self):\n visitors = Url.get_all_visitors(self.url3)\n self.assertIn(self.visitor.json(), visitors)\n\n def test_non_existing_visit_and_visitors(self):\n self.assertEqual(0, len(Url.get_all_visits(12)))\n self.assertEqual(0, len(Url.get_all_visitors(3)))\n\n\nclass UrlVisitsModelTest(TestCase):\n def setUp(self):\n self.url = Url.create(original_url=\"https://www.football-italia.net/\", shorten_url=\"daaf1\", last_visit_from=\"127.0.0.1\")\n self.visit1 = UrlVisits.create(url_id=self.url.id)\n self.visit2 = UrlVisits.create(save=False, url_id=self.url.id)\n self.visit3 = UrlVisits.create(url_id=self.url.id)\n # self.visit3 = UrlVisits.create(url_id=self.url.id)\n self.url_to_test = \"https://google.com\"\n\n def test_creating_with_save(self):\n self.assertIn(self.visit1, UrlVisits.fetch(single=False, url_id=self.url.id))\n\n def test_creating_without_save(self):\n self.assertNotIn(self.visit2, UrlVisits.fetch(single=False))\n\n def test_single_fetch(self):\n resp = UrlVisits.fetch(url_id=self.url.id, id=self.visit1.id)\n self.assertEqual(resp, self.visit1)\n\n def test_fetch_non_existing_visit(self):\n exception = None\n try:\n UrlVisits.fetch(url_id=2)\n except URLException as error:\n exception = error.message\n self.assertIsNotNone(exception)\n self.assertIn(\"not exist\", exception.message)\n\n def test_fetch_all(self):\n exception = None\n resp = []\n try:\n resp = UrlVisits.fetch(single=False)\n except URLException as error:\n exception = error.message\n self.assertIsNone(exception)\n self.assertIn(self.visit1, resp)\n self.assertNotIn(self.visit2, resp)\n\n def test_mark_visit(self):\n exception = None\n try:\n visit = UrlVisits.mark_visit(self.url.id, \"1.2.3.4\")\n except Exception as error:\n exception = error.message\n self.assertIsNone(exception)\n self.assertEqual(visit.visits, 1)\n self.assertEqual(visit.last_visit_from, \"1.2.3.4\")\n self.assertLess(visit.last_visit_at, datetime.datetime.now())\n\n def test_json_response(self):\n resp = self.visit1.json()\n json_params = ['id', 'visits', 'created', 'lastVisitAt', 'lastIP']\n for param in json_params:\n self.assertIn(param, resp)\n self.assertEqual(resp['id'], self.visit1.id)\n self.assertEqual(resp['lastIP'], \"\")\n self.assertEqual(resp['lastVisitAt'], None)\n\n def test_getting_url(self):\n resp = self.client.post(reverse(\"generate_url\"), data=json.dumps({\"url\": self.url_to_test}), content_type=\"application/json\")\n self.assertIn(resp.status_code, [200, 201])\n self.assertIn(\"shortenUrl\", resp.content.decode(\"utf-8\"))\n\n def test_wrong_url(self):\n resp = self.client.post(reverse(\"generate_url\"), data=json.dumps({\"url\":\"??\"}), content_type=\"application/json\")\n self.assertEqual(resp.status_code, 400)\n self.assertIn(\"Provided URL is not valid\", resp.content.decode(\"utf-8\"))\n\n def test_empty_url(self):\n resp = self.client.post(reverse(\"generate_url\"), data=json.dumps({\"url\":\"\"}), content_type=\"application/json\")\n self.assertEqual(resp.status_code, 400)\n self.assertIn(\"Provided URL is not valid\", resp.content.decode(\"utf-8\"))\n\n def test_redirection(self):\n resp = self.client.post(reverse(\"generate_url\"), data=json.dumps({\"url\": self.url_to_test}), content_type=\"application/json\")\n self.assertIn(resp.status_code, [200, 201])\n self.assertIn(\"shortenUrl\", resp.content.decode(\"utf-8\"))\n\n short_url = json.loads(resp.content.decode(\"utf-8\"))['shortenUrl'].split(\"/url/\")[1]\n request = self.client.get(reverse(\"retrieve_url\", kwargs={\"url\": short_url}))\n self.assertIn(request.status_code, [200, 301])\n\n\nclass UrlVisitorModelTest(TestCase):\n def setUp(self):\n self.url = Url.create(original_url=\"https://www.football-italia.net/\", shorten_url=\"daaf1\", last_visit_from=\"127.0.0.1\")\n self.visit = UrlVisits.create(url_id=self.url.id)\n self.visitor = UrlVisitors.create(url_visit=self.visit, remote_address=\"1.1.1.1\")\n self.visitor2 = UrlVisitors.create(save=False, url_visit=self.visit)\n self.visitor3 = UrlVisitors.create(url_visit=self.visit, remote_address=\"1.1.1.1\")\n\n def test_creating_with_save(self):\n self.assertIn(self.visitor, UrlVisitors.fetch(single=False, url_visit=self.visit))\n\n def test_creating_without_save(self):\n self.assertNotIn(self.visitor2, UrlVisitors.fetch(single=False))\n\n def test_single_fetch(self):\n resp = UrlVisitors.fetch(url_visit=self.visit, id=self.visitor.id)\n self.assertEqual(resp, self.visitor)\n\n def test_fetch_non_existing_visit(self):\n exception = None\n try:\n UrlVisitors.fetch(url_visit=2)\n except UrlVisitors.DoesNotExist as error:\n exception = error.message\n self.assertIsNotNone(exception)\n self.assertIn(\"not exist\", exception)\n\n def test_fetch_all(self):\n exception = None\n resp = []\n try:\n resp = UrlVisitors.fetch(single=False)\n except URLException as error:\n exception = error.message\n self.assertIsNone(exception)\n self.assertIn(self.visitor, resp)\n self.assertNotIn(self.visitor2, resp)\n\n def test_mark_visitor_with_same_remote_address(self):\n meta = {\"REMOTE_ADDR\": \"1.1.1.1\", \"HTTP_USER_AGENT\": \"Chrome\"}\n exception = None\n try:\n visitor = UrlVisitors.mark_visitor(meta, self.visit.id)\n except Exception as error:\n exception = error.message\n self.assertIsNotNone(exception)\n self.assertIn(\"More than 1\", exception)\n\n def test_mark_visitor(self):\n meta = {\"REMOTE_ADDR\": \"1.1.1.2\", \"HTTP_USER_AGENT\": \"Chrome\"}\n exception = None\n try:\n visitor = UrlVisitors.mark_visitor(meta, self.visit.id)\n except Exception as error:\n exception = error.message\n self.assertIsNone(exception)\n self.assertEqual(visitor.remote_address, meta['REMOTE_ADDR'])\n self.assertEqual(visitor.visits, 1)\n self.assertEqual(visitor.url_visit_id, self.visit.id)\n self.assertLess(visitor.last_visit, datetime.datetime.now())\n url = Url.objects.get(urlvisits__urlvisitors=visitor)\n self.assertEqual(url.last_visit_from, visitor.remote_address)\n\n def test_json(self):\n resp = self.visitor.json()\n json_params = ['id', 'visits', 'firstVisit', 'lastVisit', 'ip', 'userAgent']\n for param in json_params:\n self.assertIn(param, resp)\n self.assertEqual(resp['id'], self.visitor.id)\n self.assertEqual(resp['lastVisit'], None)\n self.assertEqual(resp['lastVisit'], self.visitor.last_visit)\n\n","sub_path":"shortenurls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"295342970","text":"\"\"\"\nSmoke tests for the command-line client.\n\"\"\"\n\nimport sys\n\nfrom cmkclient.cli import main\n\n\ndef test_main():\n # there is no documented way of passing a command-line arguments to\n # `Fire()`, so we need to temporarily change `sys.argv`\n saved_argv = sys.argv[:]\n try:\n sys.argv = ['cmkclient', '--help']\n main()\n except SystemExit as ex:\n assert ex.code == 0\n finally:\n sys.argv = saved_argv\n","sub_path":"tests/test_cmkclient.py","file_name":"test_cmkclient.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"452471117","text":"# pytorch is the best thing that has happened to my life.\n\nfrom modular import resnet\nfrom torch import nn\nimport torch\nfrom torch.nn.modules.loss import _assert_no_grad\nfrom torch.nn.functional import softmax\nimport torchvision.transforms as transforms\nfrom modular.image_preparation import *\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport time\nimport torch.cuda\n\nclass ConfuciusLoss(nn.Module):\n '''\n Allow the network to say it does not know.\n '''\n def __init__(self,margin=0):\n super(ConfuciusLoss,self).__init__()\n # No idea what margin is for.\n self.margin=margin\n\n def forward(self,input,target):\n _assert_no_grad(target)\n CEL=nn.CrossEntropyLoss()\n # need batch slice.\n entropy=CEL.forward(input[:,:-1],target)\n\n idk=softmax(input)[:,-1] # percentage of the bet on idk\n # batch?\n num_real_classes=Variable(torch.cuda.FloatTensor([input.size()[1]-1]))\n loss=(1-idk)*entropy+0.5*idk*torch.log(num_real_classes)\n return loss\n\nlr=0.1\nmomentum=0.9\nweight_decay=1e-4\nbatch_size=128\nworkers=4\n\n\n\nkeys=dogs.predecessors('wnid')\nkey=keys[1]\nimages=pickle.load(open('images/'+key+'_10.pickle','rb'))\n\nmodel=resnet.resnet18(num_classes=10).cuda()\n\noptimizer = torch.optim.Adam(model.parameters(), lr,\n weight_decay=weight_decay)\n\ncriterion=ConfuciusLoss().cuda()\n\ndef data_generator():\n ii=iter(images)\n yield(next(ii),1)\n\nloader=transforms.Compose([transforms.ToTensor()])\nimage=loader(images[1])\nimage=torch.unsqueeze(image,0)\nlabel=torch.from_numpy(np.array([1]))\nimage, label = Variable(image).cuda(), Variable(label).cuda()\n\nfor epoch in range(0, 1000):\n # compute output\n optimizer.zero_grad()\n output = model(image)\n loss = criterion(output, label)\n\n loss.backward()\n optimizer.step()\n\n if epoch%10 == 0:\n print(loss.data[0])","sub_path":"modular/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"476372716","text":"#!/usr/bin/env python3\n\nimport calendar, collections, datetime, http.client, os, re, smtplib, subprocess\nfrom bs4 import BeautifulSoup as bs \n\n# login and password to gmail account\nUSER = \"XXXXX\"\nPASSWORD = \"XXXXX\"\n\n# Gmail SMTP server address\nG_SMTP_ADD = \"smtp.gmail.com\"\n\n# Email parametres\nfrom_addr = \" \".join([\"From:\", USER])\nto_addr = \"To: XXXXXX\"\nsubject = \"CETIN - Shifts Changed!\"\n\nMONTH_SHIFT = 2\n\nHOST = \"85.162.8.155\"\nHTTPHOST = \"\".join([\"http://\", HOST])\nPORT = 80\nURI = \"/index.php?\"\nURI_CH_SESS = \"/php/zmen_session.php\"\nMETHOD_POST = \"POST\"\nMETHOD_GET = \"GET\"\nBODY_LOG = \"XXXXXXn\"\nUSER_ID = \"XXXXXX\"\nMONTH_SHIFT = 1\n\ndef main():\n\tdays, month, year = getDaysMonthYear()\n\tstart = \"/\".join([month, \"1\", year])\n\tend = \"/\".join([str(int(month) + 1), \"1\", year])\n\n\tgoogle_shifts = getGoogleShifts(start, end, days)\n\t\n\troot = getData(month, year)\n\tuser = findUser(root)\n\n\tshifts = collections.defaultdict(lambda : \"\")\n\n\tfor day in range(1, days + 1):\n\t\tshifts[day] = getShift(day, user)\n\n\n\n\tmsg = getMessage(shifts, google_shifts, days)\n\tsendEmail( month, year, msg)\n\ndef findUser(root):\n\t\"\"\"\n\tFunction filter USER by USER_ID. \n\t\"\"\"\n\tsoup = bs(root, \"lxml\") \n\treturn soup.find(id=USER_ID)\n\ndef getData(month, year):\n\t\"\"\"\n\tReturn data of the user in HTML.\n\t\"\"\"\n\tbody = \"\".join([\"cas=\", month, \"%2F\", year])\n\n\tsession_id = \"\"\n\tconnection = http.client.HTTPConnection(HOST, PORT)\n\n\theaders = getHeaderPost(ContentLength=len(BODY_LOG), Referer=\"/\")\n\tresponse, data = getResponseData(connection, METHOD_POST, URI, headers, BODY_LOG)\n\n\tfor key, value in response.getheaders():\n\t\tif key == \"Set-Cookie\":\n\t\t\tfor x in value.split():\n\t\t\t\tif \"PHPSESSID\" in x:\n\t\t\t\t\tsession_id = x[0:-1]\n\t\t\tbreak\n\n\theaders = getHeaderPost(Cookie=session_id, ContentLength=len(body), Referer=URI)\n\tresponse, data = getResponseData(connection, METHOD_POST, URI_CH_SESS, headers, body)\n\n\theaders = getHeaderGet(Cookie=session_id, Referer=URI)\n\tresponse, data = getResponseData(connection, METHOD_GET, URI, headers)\n\n\treturn data\n\ndef getDaysMonthYear():\n\t\"\"\"\n\tReturn days in next month, next month and year of next month.\n\t\"\"\"\n\tmonth = datetime.date.today().month + MONTH_SHIFT\n\tyear = datetime.date.today().year\n\tdays = calendar.monthrange(year, month)\n\tif month > 12:\n\t\tmonth = 1\n\t\tyear += 1\n\treturn days[1], str(month), str(year)\n\ndef getGoogleShifts(start, end, days):\n\t\"\"\"\n\tReturn list of shifts from google calendar.\n\t\"\"\"\n\tcommand = \"gcalcli agenda {0} {1} --military --calendar CETIN\".format(start, end).split()\n\tstring = subprocess.run(command, stdout=subprocess.PIPE).stdout.decode(\"utf-8\")\n\n\tpattern = re.compile(\"\\x1b\\[+[0-8]m|\\x1b\\[+[0-8];[3-4][0-7]m\")\n\tstring = re.sub(pattern, \"\", string)\n\n\tgoogle_shifts_list = []\n\tfor i in string.split(\"\\n\"):\n\t\tif \"CETIN\" not in i: continue\n\t\tgoogle_shifts_list.append(i)\n\n\tgoogle_shifts_dict = {i: None for i in range(1, (days +1))}\n\tfor i in google_shifts_list:\n\t\tline = i.split()\n\t\t# print(int(line[2].lstrip(\"0\")), type(int(line[2].lstrip(\"0\"))))\n\t\tgoogle_shifts_dict[int(line[2].lstrip(\"0\"))] = \"DI\" if line[3] == \"07:00\" else \"NI\"\n\n\treturn google_shifts_dict\n\ndef getHeaderGet(Accept = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n\tAcceptEncoding = \"deflate, sdch\",\n\tAcceptLanguage = \"cs-CZ,cs;q=0.8,en;q=0.6,sk;q=0.4,und;q=0.2\",\n\tCacheControl = \"max-age=0\",\n\tConnection = \"keep-alive\",\n\tCookie = \"\",\n\tHost = HOST,\n\tReferer = \"\",\n\tUpgradeInsecureRequests = \"1\",\n\tUserAgent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36\" ):\n\n\theaders_get = { \"Accept\": Accept,\n\t\t\t\t\t\"Accept-Encoding\": AcceptEncoding,\n\t\t\t\t\t\"Accept-Language\": AcceptLanguage,\n\t\t\t\t\t\"Cache-Control\": CacheControl,\n\t\t\t\t\t\"Connection\": Connection,\n\t\t\t\t\t\"Cookie\": Cookie,\n\t\t\t\t\t\"Host\": Host,\n\t\t\t\t\t\"Referer\": \"\".join([HTTPHOST, Referer]),\n\t\t\t\t\t\"Upgrade-Insecure-Requests\": UpgradeInsecureRequests,\n\t\t\t\t\t\"User-Agent\": UserAgent\n\t\t\t\t\t}\n\treturn headers_get\n\ndef getHeaderPost(Accept = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n\tAcceptEncoding = \"deflate\",\n\tAcceptLanguage = \"cs-CZ,cs;q=0.8,en;q=0.6,sk;q=0.4,und;q=0.2\",\n\tCacheControl = \"max-age=0\",\n\tConnection = \"keep-alive\",\n\tContentLength = \"\",\n\tContentType = \"application/x-www-form-urlencoded\",\n\tCookie = \"\",\n\tHost = HOST,\n\tOrigin = HTTPHOST,\n\tReferer = \"\",\n\tUpgradeInsecureRequests = \"1\",\n\tUserAgent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36\"):\n\n\theader_post = { \"Accept\": Accept, \n\t\t\t\t\t\"Accept-Encoding\": AcceptEncoding, \n\t\t\t\t\t\"Accept-Language\": AcceptLanguage,\n\t\t\t\t\t\"Cache-Control\": CacheControl,\n\t\t\t\t\t\"Connection\": Connection,\n\t\t\t\t\t\"Content-Length\": ContentLength,\n\t\t\t\t\t\"Content-Type\": ContentType,\n\t\t\t\t\t\"Cookie\": Cookie,\n\t\t\t\t\t\"Host\": Host,\n\t\t\t\t\t\"Origin\": Origin,\n\t\t\t\t\t\"Referer\": \"\".join([HTTPHOST, Referer]),\n\t\t\t\t\t\"Upgrade-Insecure-Requests\": UpgradeInsecureRequests,\n\t\t\t\t\t\"User-Agent\": UserAgent\n\t\t\t\t\t}\n\treturn header_post\n\ndef getMessage(shifts, google_shifts, days):\n\tmsg = \"\"\n\tfor day in range(1,days + 1):\n\t\tif shifts[day] == google_shifts[day]: continue\n\t\tmsg = \"\".join([msg, \"DAY: {0:.>5} CETIN: {1:.>6} GOOGLE: {2:.>6}\\n\".format(day, str(shifts[day]), str(google_shifts[day]))])\n\t\t# print(\"DEŇ: {0:.>5} CETIN: {1:.>6} GOOGLE: {2:.>6}\".format(day, str(shifts[day]), str(google_shifts[day])))\n\treturn msg\n\ndef getResponseData(connection, method, uri, headers, body = \"\"):\n\t\"\"\"\n\tSend request and receive data. Return response and data.\n\t\"\"\"\n\tconnection.request(method, uri, body, headers=headers)\n\tresponse = connection.getresponse()\n\tdata = response.read().decode(\"utf-8\")\n\n\treturn response, data\n\ndef getShift(day, user):\n\t\"\"\"\n\tFunction write down DI or NI according to downloaded data. If there is no shift\n\tfunction return None.\n\t\"\"\"\n\tuser_id = \"\".join([USER_ID, \"_\", str(day)])\n\tcode = user.find(id=user_id)\n\tfor line in str(code).split(\"\\n\"):\n\t\tline = line.strip() \n\t\tif not line.startswith(\"<dt>Zkratka:\"):\n\t\t\tcontinue\n\t\t\n\t\tif \"DI\" in line:\n\t\t\torder = line.find(\"DI\")\n\t\telif \"NI\" in line:\n\t\t\torder = line.find(\"NI\")\n\t\telse:\n\t\t\torder = -1\n\t\tret = None if order == -1 else line[order:order+2]\n\treturn ret\n\ndef sendEmail( month, year, msg=\"No text...\"):\n\t\"\"\"\n\tThis function build final message (subject + msg).\n\tThen create connection to SMTP server and send email.\n\t\"\"\"\n\tif msg == \"\" : return\n\ttext = \"In {0}/{1} was performed some changes.\".format(month, year)\n\tmessage = \"Subject: {0}\\n\\n{1}\\n\\n{2}\".format(subject, text,msg)\n\t# print(message)\n\tserver = smtplib.SMTP(host=G_SMTP_ADD)\n\tserver.starttls()\n\tserver.login(USER, PASSWORD)\n\tserver.sendmail(from_addr, to_addr, message)\n\tserver.quit()\n\nmain()\n\n\n","sub_path":"checkout-shifts.py","file_name":"checkout-shifts.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"137074625","text":"\nimport os\nimport glob\nimport pandas as pd\nos.chdir(\"./\")\n\nsymbols = []\nout = []\nextension = 'csv'\nspooks = pd.read_csv(glob.glob('data/spooky-author-identification/train.csv')[0])\n\neap = pd.read_csv(glob.glob('data/out/eap.csv')[0])\nhpl = pd.read_csv(glob.glob('data/out/hpl.csv')[0])\nmws = pd.read_csv(glob.glob('data/out/mws.csv')[0])\n\nauthor_to_bin = \"\"\n\nmerged_csv = []\n\nmamatrix = [spooks. as_matrix(), eap.as_matrix(), hpl.as_matrix(), mws.as_matrix()]\n\n# we iterate to length of longest\nfor i in range(0, len(spooks)):\n for matrix in mamatrix:\n if i < len(matrix):\n print(matrix[i])\n merged_csv.append([matrix[i][1], matrix[i][2] == 'HPL', matrix[i][2] == 'EAP', matrix[i][2] == 'MWS'])\n\ndf = pd.DataFrame(merged_csv, columns=['text', 'is_HPL', 'is_EAP', 'is_MWS'])\nprint(df)\npd.DataFrame(df).to_csv(\"data/out/spook_to_lstm.csv\", index=False)\n","sub_path":"adjust_train_to_binary_rep.py","file_name":"adjust_train_to_binary_rep.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"510084078","text":"import inspect\nimport logging\nimport re\n\nfrom rebasehelper.helpers.macro_helper import MacroHelper\nfrom rebasehelper.specfile import SpecFile, RebaseHelperError\n\ntry:\n from rebasehelper.plugins.plugin_manager import plugin_manager\nexcept ImportError:\n from rebasehelper.versioneer import versioneers_runner\n\nfrom packit.exceptions import PackitException\n\nlogger = logging.getLogger(__name__)\n\n\nclass Specfile(SpecFile):\n def __init__(self, path=\"\", dir=None):\n s = inspect.signature(SpecFile)\n if \"changelog_entry\" in s.parameters:\n super().__init__(path=path, sources_location=str(dir), changelog_entry=\"\")\n else:\n super().__init__(path=path, sources_location=str(dir))\n\n def update_spec(self):\n if hasattr(self, \"update\"):\n # new rebase-helper\n self.update()\n else:\n # old rebase-helper\n self._update_data()\n\n def update_changelog_in_spec(self, changelog_entry):\n if hasattr(self, \"update_changelog\"):\n # new rebase-helper\n self.update_changelog(changelog_entry)\n else:\n # old rebase-helper\n self.changelog_entry = changelog_entry\n new_log = self.get_new_log()\n new_log.extend(self.spec_content.sections[\"%changelog\"])\n self.spec_content.sections[\"%changelog\"] = new_log\n self.save()\n\n def set_spec_version(\n self, version: str = None, release: str = None, changelog_entry: str = None\n ):\n \"\"\"\n Set version in spec, release and add a changelog_entry (if they are presented).\n\n :param version: new version\n :param release: new release\n :param changelog_entry: accompanying changelog entry\n \"\"\"\n try:\n if version:\n # also this code adds 3 rpmbuild dirs into the upstream repo,\n # we should ask rebase-helper not to do that\n self.set_version(version=version)\n\n if release:\n self.set_release_number(release=release)\n\n if not changelog_entry:\n return\n\n if not self.spec_content.section(\"%changelog\"):\n logger.debug(\n \"The specfile doesn't have any %changelog, will not set it.\"\n )\n return\n\n self.update_changelog_in_spec(changelog_entry)\n\n except RebaseHelperError as ex:\n logger.error(f\"rebase-helper failed to change the spec file: {ex!r}\")\n raise PackitException(\"rebase-helper didn't do the job\")\n\n def write_spec_content(self):\n if hasattr(self, \"_write_spec_content\"):\n # new rebase-helper\n self._write_spec_content()\n else:\n # old rebase-helper\n self._write_spec_file_to_disc()\n\n @staticmethod\n def get_upstream_version(versioneer, package_name, category):\n \"\"\"\n Call the method of rebase-helper (due to the version of rebase-helper)\n to get the latest upstream version of a package.\n :param versioneer:\n :param package_name: str\n :param category:\n :return: str version\n \"\"\"\n try:\n get_version = plugin_manager.versioneers.run\n except NameError:\n get_version = versioneers_runner.run\n return get_version(versioneer, package_name, category)\n\n def get_release_number(self) -> str:\n \"\"\"\n Removed in rebasehelper=0.20.0\n \"\"\"\n release = self.header.release\n dist = MacroHelper.expand(\"%{dist}\")\n if dist:\n release = release.replace(dist, \"\")\n return re.sub(r\"([0-9.]*[0-9]+).*\", r\"\\1\", release)\n","sub_path":"packit/specfile.py","file_name":"specfile.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"611399329","text":"def check_double_letter(string):\n for i in range(len(string)-1):\n tmp = string[i] + string[i+1]\n if tmp in string[i+2:]:\n return True\n return False\n\ndef check_letter_spaced(string):\n for i in range(len(string)-2):\n if string[i] == string[i+2]:\n return True\n return False\n\ndef parse_and_process(file):\n a_file = open(file, encoding='utf-8')\n count = 0\n for line in a_file:\n if check_double_letter(line) and check_letter_spaced(line):\n count += 1\n return count\n\n\nresult = parse_and_process(\"input.txt\")\n\n\nprint(\"Result:\",result)\n","sub_path":"day05/day5_2.py","file_name":"day5_2.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"312822264","text":"import math\nimport string\n\ndef f_init(): #funcion para convertir archivo en lista\n\tinp = open(\"prueba.txt\",\"r\")\n\ttmp = inp.read()\n\tinp.close()\n\tl = [] #matriz que contiene una lista por instruccion\n\tfor elemento in tmp.split(\"\\n\"):\n\t\tl.append(elemento.split(\",\"))\n\treturn f_inst(l)\n\t\n\ndef f_inst(l):\n\tacu=4 #acumulador de nro de ciclos\n\tcont=0 #contador para recorrer la matriz\n\twhile cont < len(l):\n\t\tif (l[cont][0]==\"beq\" and (len(l[cont-1][0]) >=5)): #burbujas de salto\n\t\t\tif (l[cont-1][0][0:5] == \"ciclo\"): ###RESTRICCIoN: Los ciclos deberan comenzar por la palabra ciclo\n\t\t\t\tpila=[]\n\t\t\t\tpila.append(l[cont-1][0])\n\t\t\t\ttmp=cont\n\t\t\t\twhile tmp < len(l):\n\t\t\t\t\t#print (\"tmp0\",l[tmp][0],\"tmp1\",l[tmp][1],\"cont-1\",l[cont-1][0])\n\t\t\t\t\tif(l[tmp][0]==\"j\" and l[tmp][1]==l[cont-1][0]):\n\t\t\t\t\t\tbreak\n\t\t\t\t\ttmp+=1\n\t\t\t\tr1=l[cont][1]\n\t\t\t\tr2=l[cont][2]\n\t\t\t\tvr1=f_getValue(r1,cont-1,l)\n\t\t\t\tprint(\"VALOR S0\",vr1)\n\t\t\t\tvr2=f_getValue(r2,cont-1,l)\n\t\t\t\tprint(\"VALOR S1\",vr2)\n\t\t\t\tnC=f_nCiclos_beq(cont+1,tmp-1,r1,vr1,vr2,l)\n\t\t\t\tprint(\"\\n\")\n\t\t\t\tprint(\"\\n\")\n\t\t\t\tprint(\"cantidad\",nC)\n\t\t\t\tacu+=f_loops(cont+1,tmp-1,nC,l)\n\t\t\t\tif l[cont][3][0:5]==\"ciclo\":\n\t\t\t\t\tpila.append(l[cont][3])\n\t\t\t\telse:\n\t\t\t\t\tpila.pop(len(pila)-1)\n\n\t\t\t\tcontPos=0\n\t\t\t\twhile (l[contPos][0] != l[cont][3]) and (contPos < len(l)):\n\t\t\t\t\tcontPos+=1\n\n\t\t\t\tcont=contPos-1 #El contador queda en la posicion antes del ciclo para que abajo en cont+=1 quede en la tag de ciclo.\n\t\t\t\t\n\n\t\telse: #path para instrucciones fuera de ciclos\n\t\t\tif (l[cont][0]==\"lw\" and (l[cont][1]==l[cont+1][2] or l[cont][1]==l[cont+1][3])): #burbujas para lw\n\t\t\t\tacu+=2 \n\t\t\telse:\n\t\t\t\tacu+=1\n\t\tprint(\"\\n\")\n\t\tprint(\"intruccion por la que va--->\",l[cont])\n\t\tprint(\"\\nCONTADOR INST,ACUMULADOR CICLO\",cont,acu)\n\t\tprint(\"\\n\")\n\t\tcont+=1\n\n\tif l[len(l)-1][0]==\"jr\":\n\t\tacu+=1 #para la ultima instruccion\n\n\tcontTag=0 #contador para recorrer la matriz\n\twhile contTag < len(l):\n\t\tif l[contTag][1]==\":\":\n\t\t\tacu-=1\n\t\tcontTag+=1\n\tprint(\"PILA\",pila)\n\treturn acu\n\n\ndef f_nCiclos_beq(n,m,reg,r1,r2,l): ###RESTRICCIoN: En el segundo registro se encuentra el limite y en el primero el contador\n\tcontCiclo=0\n\n\twhile r1 != r2:\n\t\tprint(\"wpwwwww\",r1,r2)\n\t\ttemp=m\n\t\twhile n <= temp:\n\t\t\tprint(\"ppppppp\")\n\t\t\tprint (\"l[temp] nCiclos\",l[temp])\n\t\t\tif l[temp][1]==reg and (l[temp][2]==reg or l[temp][3]==reg):\n\t\t\t\tprint(\"jump: \",l[temp][1])\n\t\t\t\tprint(\"INSTRUCCION,R1 \",l[temp],r1)\n\t\t\t\tr1+=f_getValue(reg,temp,l)\n\t\t\ttemp-=1\n\t\tcontCiclo+=1\n\treturn contCiclo\n\n\ndef f_loops(n,m,nCiclo,l):\n\ttmp=n\n\tacu=0\n\tprint(\"temp CIIIIIIICLOOOOO N\",l[n])\n\tprint(\"temp CIIIIIIICLOOOOO M\",l[m])\n\twhile tmp<=m:\n\t\tacu+=1\n\t\ttmp+=1\n\tprint(\"Otrooooooo acuu\",acu)\n\n\twhile n <= m:\n\t\tif (l[n][0]==\"lw\" and (l[n][1]==l[n+1][2] or l[n][1]==l[n+1][3])): #burbujas para lw\n\t\t\tacu+=1 \n\t\tn+=1\n\tacu = acu*nCiclo + 3*(nCiclo) + 2 #acumualdor final del numero de ciclos en el loop por la cantidad de veces \n\t #que se repite el loop mas las burbujas de los branch cumplidos mas el branch que no se cumple\n\tprint(\"----------acu final-----------\",acu)\n\treturn acu\n\t\t\t\t\t\n\n\n\ndef f_getValue(r,cont,l):\n\twhile cont >= 0:\n\t\tprint(\"cooont\",cont)\n\t\tprint(\"l[cont] getValue\",l[cont])\n\t\tif l[cont][1] == r:\t\n\n\t\t\tif l[cont][0] == \"addi\":\n\t\t\t\tif l[cont][2]==\"$0\":\n\t\t\t\t\tprint(\"value: \")\n\t\t\t\t\tresult=int(l[cont][3])\n\t\t\t\t\tprint (\"??\",result)\n\t\t\t\t\treturn result\n\t\t\t\telse:\n\t\t\t\t\tresult=f_addi(l[cont][2],int(l[cont][3]),cont,l)\n\t\t\t\t\treturn result\n\n\n\t\t\tif (l[cont][0] == \"add\"):\n\t\t\t\tif l[cont][2]==\"$0\":\n\t\t\t\t\tresult=f_getValue(l[cont][3],cont,l)\n\t\t\t\t\treturn result\n\t\t\t\telif l[cont][3]==\"$0\":\n\t\t\t\t\tresult=f_getValue(l[cont][2],cont,l)\n\t\t\t\t\treturn result\n\t\t\t\telse:\n\t\t\t\t\tprint(\"mm\")\n\t\t\t\t\tresult=f_add(l[cont][2],l[cont][3],cont,l)\n\t\t\t\t\treturn result\n\t\t\t\t\n\n\t\t\tif l[cont][0] == \"sll\":\n\t\t\t\tresult=f_sll(l[cont][2],int(l[cont][3]),cont,l)\n\t\t\t\treturn result\n\n\n\t\t\tif l[cont][0] == \"srl\":\n\t\t\t\tresult=f_srl(l[cont][2],int(l[cont][3]),cont,l)\n\t\t\t\treturn result\n\n\n\t\t\tif l[cont][0] == \"andi\":\n\t\t\t\tif l[cont][2]==\"$0\":\n\t\t\t\t\tresult=0\n\t\t\t\t\treturn result\n\t\t\t\telse:\n\t\t\t\t\tresult=f_andi(l[cont][2],int(l[cont][3]),cont,l)\n\t\t\t\t\treturn result\n\n\t\t\tif l[cont][0] == \"and\":\n\t\t\t\tif l[cont][2]==\"$0\" or l[cont][3]==\"$0\":\n\t\t\t\t\tresult=0\n\t\t\t\t\treturn result\n\t\t\t\telse:\n\t\t\t\t\tresult=f_and(l[cont][2],l[cont][3],cont,l)\n\t\t\t\t\treturn result\n\t\t\t\t\n\n\t\t\tif (l[cont][0] == \"or\"):\n\t\t\t\tif l[cont][2]==\"$0\":\n\t\t\t\t\tresult= f_getValue(l[cont][3],cont,l)\n\t\t\t\t\treturn result\n\t\t\t\telif l[cont][3]==\"$0\":\n\t\t\t\t\tresult=f_getValue(l[cont][2],cont,l)\n\t\t\t\t\treturn result\n\t\t\t\telse:\n\t\t\t\t\tresult=f_or(l[cont][2],l[cont][3],cont,l)\n\t\t\t\t\treturn result\n\t\t\t\n\t\t\tif l[cont][0] == \"ori\":\n\t\t\t\tif l[cont][2] == \"$0\":\n\t\t\t\t\tresult=int(l[cont][3])\n\t\t\t\t\treturn result\n\t\t\t\telse: \n\t\t\t\t\tresult=f_ori(l[cont][2],l[cont][3],cont,l)\n\t\t\t\t\treturn result\n\n\t\t\tif (l[cont][0] == \"slt\" or l[cont][0]==\"slti\"):\n\t\t\t\tresult=f_slt(l[cont][2],l[cont][3])\n\t\t\t\treturn result\n\n\t\t\tif l[cont][0] == \"sub\":\n\t\t\t\tprint(\"HELLOOOOOOOOOOOOOOO\")\n\t\t\t\tresult=f_sub(l[cont][2],l[cont][3],cont,l)\n\t\t\t\treturn result\n\n\t\t\tif l[cont][0] == \"lui\":\n\t\t\t\tresult=int(l[cont][2])\n\t\t\t\treturn result\n\n\t\t\t#return result\n\t\t\t\t\n\t\tcont-=1\n\t\tprint(\"!=!?!=!=!!=!=!=\",cont)\n\tprint(\"!=!RRR!=\",result)\n\n\t#return result\n\n\n\ndef f_add(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\tvb=f_getValue(b,cont-1,l)\n\tresult= va + vb\n\tprint(\"\\n\")\n\tprint(\"\\n\")\n\tprint(\"va + vb = result:\",va,vb,result)\n\treturn result\n\n\ndef f_addi(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\tresult= va + b\n\treturn result\n\ndef f_sub(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\tvb=f_getValue(b,cont-1,l)\n\tresult= va - vb\n\treturn result\n\t\n\ndef f_sll(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\t#vb=f_getValue(b,cont-1,l)\n\tresult=va*((2)**b)\n\treturn result\n\n\ndef f_srl(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\t#vb=f_getValue(b,cont-1,l)\n\tresult=va//(2**b)\n\treturn result\n\n\ndef f_and(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\tvb=f_getValue(b,cont-1,l)\n\tresult=int(bin(va and vb))\n\treturn result\n\t\ndef f_andi(a,b,cont,l):\n\tif b==0:\n\t\tresult=0\n\telse:\n\t\tva=f_getValue(a,cont-1,l)\n\t\tresult=bin( va and b)\n\treturn result\n\n\n\ndef f_or(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\tvb=f_getValue(b,cont-1,l)\n\tresult=int(bin(va | vb))\n\treturn result\n\ndef f_ori(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\tvb=int(b)\n\tresult=int(bin(va | vb))\n\treturn result\n\n\ndef f_slt(a,b,cont,l):\n\tva=f_getValue(a,cont-1,l)\n\tvb=f_getValue(b,cont-1,l)\n\tif va>=vb:\n\t\tresult=0\n\telse:\n\t\tresult=1\n\treturn result\n\n\n\n\n\n\nprint(\" ACU \",f_init())\n\n\n\t\n\t\n\t\n\t\n\n\n","sub_path":"version4/ciclos2.py","file_name":"ciclos2.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548188419","text":"#!/usr/bin/python3\n''' Module 2 for task 1'''\n\n\ndef matrix_divided(matrix, div):\n ''' Divide each element of a matrix by dev '''\n errors = {\n 'matrix': 'matrix must be a matrix (list of lists) of integers/floats',\n 'row': 'Each row of the matrix must have the same size',\n 'div': 'div must be a number',\n 'zero': 'division by zero'\n }\n\n if type(matrix) != list:\n raise TypeError(errors['matrix'])\n\n row_size = None\n for row in matrix:\n # Check if the element is a list\n if type(row) is not list:\n raise TypeError(errors['matrix'])\n\n # Check the size of all sublist\n if row_size is None:\n row_size = len(row)\n elif row_size != len(row):\n raise TypeError(errors['row'])\n\n # Check that all the elements are int or flot\n status = all(type(el) in set([int, float]) for el in row)\n if status is False:\n raise TypeError(errors['matrix'])\n\n if type(div) not in [int, float]:\n raise TypeError(errors['div'])\n\n if div == 0:\n raise ZeroDivisionError(errors['zero'])\n\n new = map(lambda x: list(map(lambda y: round(y / div, 2), x)), matrix)\n return list(new)\n","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"348133955","text":"\nfrom django.conf import settings\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nimport datetime\n\nimport twitter\nimport pownce\nimport random\nimport zlib\nimport base64\nfrom zwitschern.utils import twitter_account_for_user, twitter_verify_credentials\nfrom zwitschern.pownce_utils import pownce_account_for_user, pownce_verify_credentials\n\n# from django.contrib.auth.models import User\n\nfrom zwitschern.models import Tweet, TweetInstance\n\n@login_required\ndef personal(request):\n \"\"\"\n personal lifestream\n \"\"\"\n \n stream = []\n \n twitter_account = twitter_account_for_user(request.user)\n if twitter_account:\n twitter_timeline = twitter_account.GetUserTimeline()\n for post in twitter_timeline:\n stream.append((post.created_at, post.GetText(), post.id))\n \n pownce_account = pownce_account_for_user(request.user)\n if pownce_account:\n pownce_timeline = pownce_account.get_notes(pownce_account.username)\n for post in pownce_timeline:\n stream.append(( post.timestamp_parsed,post.body, post.id))\n \n stream.sort()\n if request.method == \"POST\":\n pass\n else:\n pass\n \n return render_to_response(\"lifestream/personal.html\", {\n \"stream\": stream,\n }, context_instance=RequestContext(request))\n\ndef public(request):\n \"\"\"\n all the tweets\n \"\"\"\n tweets = Tweet.objects.all().order_by(\"-sent\")\n\n return render_to_response(\"lifestream/friends.html\", {\n \"tweets\": tweets,\n }, context_instance=RequestContext(request))\n\ndef single(request, id):\n \"\"\"\n A single tweet.\n \"\"\"\n tweet = get_object_or_404(TweetInstance, id=id)\n return render_to_response(\"lifestream/single.html\", {\n \"tweet\": tweet,\n }, context_instance=RequestContext(request))","sub_path":"apps/local_apps/lifestream/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"420835450","text":"from collections import defaultdict, deque\n\nclass Intcode:\n def __init__(self, prog, inputs=[]):\n self.prog = prog\n self.pc = 0\n self.mem = list(prog)\n self.halt = False\n self.inputs = deque(inputs)\n self.outputs = deque()\n self.relative_base = 0\n self.xmem = defaultdict(int)\n\n def reset(self):\n self.pc = 0\n self.halt = False\n self.mem = list(self.prog)\n\n def run(self, inputs=[]):\n self.inputs.extend(inputs)\n\n while not (self.halt or self.need_input):\n self.step()\n\n def read(self, addr):\n if addr < 0:\n raise Exception(\"invalid memory access to negative address\")\n\n if addr < len(self.mem):\n return self.mem[addr]\n else:\n return self.xmem[addr]\n\n def write(self, addr, value):\n if addr < len(self.mem):\n self.mem[addr] = value\n else:\n self.xmem[addr] = value\n\n def step(self):\n if self.halt:\n raise Exception('halted')\n\n if self.need_input:\n raise Exception('need input')\n\n def param(i):\n mode = (self.mem[self.pc] // (10 * 10**i)) % 10\n\n if mode == 0: # position mode\n return self.mem[self.pc + i]\n elif mode == 1: # immediate mode\n return self.pc + i\n elif mode == 2: # relative mode\n return self.mem[self.pc + i] + self.relative_base\n\n raise Exception(\"bad parameter mode\")\n\n def read_param(i):\n return self.read(param(i))\n\n opcode = self.mem[self.pc] % 100\n\n if opcode == 1:\n self.write(param(3), read_param(1) + read_param(2))\n self.pc += 4\n elif opcode == 2:\n self.write(param(3), read_param(1) * read_param(2))\n self.pc += 4\n elif opcode == 3:\n if len(self.inputs) > 0:\n self.write(param(1), self.inputs.popleft())\n self.pc += 2\n elif opcode == 4:\n self.outputs.append(read_param(1))\n self.pc += 2\n elif opcode == 5:\n if read_param(1) == 0:\n self.pc += 3\n else:\n self.pc = read_param(2)\n elif opcode == 6:\n if read_param(1) == 0:\n self.pc = read_param(2)\n else:\n self.pc += 3\n elif opcode == 7:\n self.write(param(3), 1 if read_param(1) < read_param(2) else 0)\n self.pc += 4\n elif opcode == 8:\n self.write(param(3), 1 if read_param(1) == read_param(2) else 0)\n self.pc += 4\n elif opcode == 9:\n self.relative_base += read_param(1)\n self.pc += 2\n elif opcode == 99:\n self.halt = True\n else:\n raise Exception(f'unknown opcode: {opcode} at pc = {pc}')\n\n @property\n def need_input(self):\n return self.mem[self.pc] % 100 == 3 and len(self.inputs) == 0","sub_path":"2019/day/9/intcode.py","file_name":"intcode.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"183915688","text":"from django.conf.urls import url\n\nfrom wasabi_event.views import EventListView, EventCreateView, EventUpdateView\n\nurlpatterns = [\n url(r\"^$\",\n view=EventListView.as_view(), name=\"list\"),\n url(r\"^create/$\",\n view=EventCreateView.as_view(), name=\"create\"),\n url(r\"^update/(?P[\\w]+)/$\",\n view=EventUpdateView.as_view(), name=\"update\")\n]\n","sub_path":"wasabi_event/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"609142797","text":"import numpy as np\nimport scipy as sp\nfrom scipy import linalg\n#import Translation\n#import ParTrace\nimport matplotlib.pyplot as plt\nfrom scipy.linalg import norm, eigh, eigvalsh, sqrtm, svd, svdvals, det\nfrom scipy import sparse\nfrom scipy.sparse import csr_matrix\nfrom qit import *\nfrom qit.state import state, fidelity\nfrom qitbias import *\n\n'''\nx = np.arange(100)\ny = x\nt = np.ones(len(x))\nfig, (ax1, ax2) = plt.subplots(1, 2)\nax1.scatter(x, y, c=t, cmap='viridis')\nax2.scatter(x, y, c=t, cmap='viridis_r')\nplt.show()\n'''\n\n'''\nx = np.arange(100)\ny = x\nt = np.ones(len(x))\nfig, ax1 = plt.subplots()\nax1.scatter(x, y, c=t, cmap='viridis')\nplt.show()\n'''\n\n\ndef process_wbc(W, b, c, equal_nodes, layer_MIcoord, layer_Dcoord):\n N, num_layers, spins_per_layer, flat_W, num_w = CountSpins(W)\n rho = Ham2Density(BuildHamiltonian4(W, b, c, N, spins_per_layer))\n rho = state(rho.A, [2]*N)\n Density_per_layer = ObtainPartialDens(rho, N, spins_per_layer)\n\n RelEnt_coord = []\n MI_coord = []\n label = []\n word = 'Layer '\n for i,e in enumerate(spins_per_layer):\n MI_coord.append(MI_firstANDlast(i, rho, Density_per_layer, spins_per_layer, N)) # Check first and last layer to see that all works\n label.append(word + str(i))\n if equal_nodes:\n RelEnt_coord.append(RelEnt_firstANDlast(i, Density_per_layer))\n else:\n RelEnt_coord.append('vacio') #This is for the iterator in the zi\n for i,(E, I) in enumerate(zip(RelEnt_coord, MI_coord)):\n if equal_nodes:\n layer_Dcoord[i].append((E[0], E[1]))\n layer_MIcoord[i].append((I[0], I[1]))\n\n \n\n\n\n\ndef main():\n layers = 4 # Number of layers, including the visible layer of RBM\n equal_nodes = True ## True if all layers have same number of nodes. Important for relative entropy calculation\n epochs_per_layer = 32000\n snapshot_epoch = 100\n\n epochs_total = (layers-1)*epochs_per_layer\n #epochs_array = [snapshot_epoch*x*epochs_per_layer/ for x in range(layers)]\n n_files = (int(epochs_per_layer/snapshot_epoch) + 1) * (layers-1)\n #print(list(range(0,int((layer-1)*epochs_per_layer/snapshot_epoch))))\n label = []\n word = 'Layer '\n for i in range(layers):\n label.append(word + str(i)) \n\n\n\n \n layer_MIcoord = [[] for x in range(layers)]\n layer_Dcoord = [[] for x in range(layers)]\n\n\n\n '''\n for k in range(layers-1):\n for i in range(0, epochs_per_layer + 1, snapshot_epoch):\n string1 = '../rbm_weights_' + str(k) + '-' + str(i) + '.npy'\n string2 = '../rbm_bias_b_' + str(k) + '-' + str(i) + '.npy'\n string3 = '../rbm_bias_c_' + str(k) +'-' + str(i) + '.npy'\n #print(string1)\n W = np.load(string1)\n b = np.load(string2)\n c = np.load(string3) \n process_wbc(W, b, c, equal_nodes, layer_MIcoord, layer_Dcoord)\n print('layer: ' + str(k) + ' epoch: ' + str(i))\n print(layer_MIcoord[0])\n np.save('layer_MIcoord', layer_MIcoord)\n np.save('layer_Dcoord', layer_Dcoord)\n ''' \n layer_MIcoord = np.load('layer_MIcoord.npy')\n layer_Dcoord = np.load('layer_Dcoord.npy')\n\n\n #fig = plt.figure()\n #ax1 = fig.add_subplot(111) \n #print(layer_Dcoord[-1])\n fig1, ax1 = plt.subplots()\n for k in range(layers):\n x = []\n y = []\n\n \n for e in layer_MIcoord[k]:\n x.append(e[0])\n y.append(e[1])\n t = np.linspace(0, epochs_per_layer*(layers-1), n_files)\n ax1.plot(x, y, linestyle='-', label=label[k], linewidth=1)\n ax1.scatter(x, y, c=t, cmap='jet')\n\n #for i, txt in enumerate(label):\n # ax1.annotate(txt, (M_1_i[i], M_N_i[i]))\n #plt.colorbar()\n plt.title('Mi(Layer i , layer N) (y axis) v/s Mi(Layer i , layer N) (x axis)')\n plt.ylabel('Mi(Layer i , layer N)')\n plt.xlabel('Mi(layer 0 , layer i)')\n plt.legend(loc='upper right')\n ax=plt.gca()\n PCM=ax.get_children()[2]\n cbar = plt.colorbar(PCM, ax=ax)\n cbar.set_label('Epochs', rotation=0)\n plt.savefig('MI_history_qit.pdf')\n plt.close()\n\n\n fig2, ax2 = plt.subplots()\n for k in range(layers):\n x = []\n y = []\n for e in layer_Dcoord[k]:\n x.append(e[0])\n y.append(e[1])\n t = np.linspace(0, epochs_per_layer*(layers-1), n_files)\n ax2.plot(x, y, linestyle='-', label=label[k], linewidth=1)\n ax2.scatter(x, y, c=t, cmap='jet')\n\n #for i, txt in enumerate(label):\n # ax1.annotate(txt, (M_1_i[i], M_N_i[i]))\n #plt.colorbar()\n plt.title('D(Layer i || layer N) (y axis) v/s D(layer 1 || layer i) (x axis)')\n plt.ylabel('D(Layer i || layer N)')\n plt.xlabel('D(layer 1 || layer i)')\n plt.legend(loc='upper right')\n ax=plt.gca()\n PCM=ax.get_children()[2]\n cbar = plt.colorbar(PCM, ax=ax)\n cbar.set_label('Epochs', rotation=0)\n plt.savefig('D_history_qit.pdf')\n plt.close()\n\n \n fig3, ax3 = plt.subplots()\n for k in range(layers):\n x = []\n y = []\n for e in layer_MIcoord[k]:\n x.append(e[0])\n y.append(e[1]) \n t = np.linspace(0, epochs_per_layer*(layers-1), n_files)\n ax3.plot([k]*len(x), x, linestyle='-', label=label[k], linewidth=1)\n ax3.scatter([k]*len(x), x, c=t, cmap='jet') \n plt.title('Mi(Layer i , layer 1) (y axis) v/s Layers first to last (x axis)')\n plt.ylabel('Mi(Layer i , layer 1)')\n plt.xlabel('Layers first to last')\n plt.legend(loc='upper right')\n ax=plt.gca()\n PCM=ax.get_children()[2]\n cbar = plt.colorbar(PCM, ax=ax)\n cbar.set_label('Epochs', rotation=0) \n plt.savefig('MI_1-i_qit.pdf')\n plt.close()\n\n\n fig4, ax4 = plt.subplots()\n for k in range(layers):\n x = []\n y = []\n for e in layer_MIcoord[k]:\n x.append(e[0])\n y.append(e[1])\n t = np.linspace(0, epochs_per_layer*(layers-1), n_files)\n ax4.plot([k]*len(y), y, linestyle='-', label=label[k], linewidth=1)\n ax4.scatter([k]*len(y), y, c=t, cmap='jet') \n plt.title('Mi(Layer i , layer N) (y axis) v/s Layers first to last (x axis)')\n plt.ylabel('Mi(Layer i , layer N)')\n plt.xlabel('Layers first to last')\n plt.legend(loc='upper right')\n ax=plt.gca()\n PCM=ax.get_children()[2]\n cbar = plt.colorbar(PCM, ax=ax)\n cbar.set_label('Epochs', rotation=0) \n plt.savefig('MI_N-i_qit.pdf')\n plt.close()\n\n\n fig5, ax5 = plt.subplots()\n for k in range(layers):\n x = []\n y = []\n for e in layer_Dcoord[k]:\n x.append(e[0])\n y.append(e[1]) \n t = np.linspace(0, epochs_per_layer*(layers-1), n_files)\n ax5.plot([k]*len(x), x, linestyle='-', label=label[k], linewidth=1)\n ax5.scatter([k]*len(x), x, c=t, cmap='jet') \n plt.title('D(Layer 1 || layer i) (y axis) v/s Layers first to last(x axis)')\n plt.ylabel('D(Layer 1 || layer i)')\n plt.xlabel('Layers first to last')\n plt.legend(loc='upper right')\n ax=plt.gca()\n PCM=ax.get_children()[2]\n cbar = plt.colorbar(PCM, ax=ax)\n cbar.set_label('Epochs', rotation=0) \n plt.savefig('D_1-i_qit.pdf')\n plt.close()\n\n fig6, ax6 = plt.subplots()\n for k in range(layers):\n x = []\n y = []\n for e in layer_Dcoord[k]:\n x.append(e[0])\n y.append(e[1]) \n t = np.linspace(0, epochs_per_layer*(layers-1), n_files)\n ax6.plot([k]*len(y), y, linestyle='-', label=label[k], linewidth=1)\n ax6.scatter([k]*len(y), y, c=t, cmap='jet') \n plt.title('DD(Layer i || layer N) (y axis) v/s Layers first to last(x axis)')\n plt.ylabel('D(Layer i || layer N)')\n plt.xlabel('Layers first to last')\n plt.legend(loc='upper right')\n ax=plt.gca()\n PCM=ax.get_children()[2]\n cbar = plt.colorbar(PCM, ax=ax)\n cbar.set_label('Epochs', rotation=0) \n plt.savefig('D_N-i_qit.pdf')\n plt.close()\n\n\n\n\n '''\n for k, i in enumerate(epochs_array):\n if i != epochs_total:\n string1 = '../rbm_weights_all-' + str(i) + '.npy'\n string2 = '../rbm_bias_b_all-' + str(i) + '.npy'\n string3 = '../rbm_bias_c_all-' + str(i) + '.npy'\n W = np.load(string1)\n b = np.load(string2)\n c = np.load(string3)\n print(string1)\n #process_wbc(W, b, c)\n for j in range(0,epochs_per_layer,snapshot_epoch):\n string1 = '../rbm_weights_' + str(k) + '-' + str(j) + '.npy'\n string2 = '../rbm_bias_b_' + str(k) + '-' + str(j) + '.npy'\n string3 = '../rbm_bias_c_' + str(k) +'-' + str(j) + '.npy'\n print(string1)\n if i == epochs_total:\n string1 = '../rbm_weights_all-' + str(i) + '.npy'\n string2 = '../rbm_bias_b_all-' + str(i) + '.npy'\n string3 = '../rbm_bias_c_all-' + str(i) + '.npy'\n W = np.load(string1)\n b = np.load(string2)\n c = np.load(string3)\n ''' \n \n\nif __name__ == '__main__':\n main()\n","sub_path":"plot-history.py","file_name":"plot-history.py","file_ext":"py","file_size_in_byte":9102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"24303731","text":"import os, email\nimport numpy as np \nimport pandas as pd\nimport datetime\nimport random\nimport threading\nfrom enum import IntEnum\nimport json\nfrom collections import Counter\n\nfrom EmailHelperFunctions import get_text_from_email, split_email_addresses, clean_email\nfrom MDS import cmdscale\n\nimport gensim\nfrom gensim import corpora\nfrom gensim.models import CoherenceModel\n\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n# Set the random seed for reproducability\nrandom.seed(1)\n\n# TODO: Set the sample sizes\noptimum_sample_size = 1000\nsample_size = 15000\n\n# Min and max number of topics\nmin_topic_size = 3\nmax_topic_size = 35\n\n# Download File: http://mallet.cs.umass.edu/dist/mallet-2.0.8.zip\n# Mallet is Java based, so make sure Java is installed\n\n\nclass DocumentTypeEnum(IntEnum):\n unknownType = 0\n emailType = 1 # 'emails'\n documentType = 2 # 'documents'\n\nclass createModelThread(threading.Thread):\n def __init__(self, tmo):\n threading.Thread.__init__(self)\n self.tm = tmo\n self.mallet_path = ''\n\n def run(self):\n print('Starting createModelThread: {}'.format(datetime.datetime.now()))\n\n #\n # These functions should set the following lists of strings:\n # self.tm.text_clean\n # self.tm.optimum_text_clean\n #\n if self.tm.documentType == DocumentTypeEnum.emailType:\n self.process_emails()\n elif self.tm.documentType == DocumentTypeEnum.documentType:\n self.process_documents()\n else:\n print('Error in createModelThread, document type not specified')\n return False\n\n # Create the dictionary for the model\n self.tm.dictionary = corpora.Dictionary(self.tm.optimum_text_clean)\n\n # Create the text_term_matrix\n self.tm.text_term_matrix = [self.tm.dictionary.doc2bow(text) for text in self.tm.optimum_text_clean]\n\n #\n # Automatically determine the number of topics if required\n #\n if self.tm.numberOfTopics <= 0:\n\n # Compute optimial number of topics only\n optimalTopicsOnly = False\n\n if self.tm.numberOfTopics == -1:\n optimalTopicsOnly = True\n\n # Set paths needed by Mallet\n self.mallet_distribution = os.environ[\"MALLET_HOME\"]\n self.mallet_path = os.path.join(self.mallet_distribution, 'bin', 'mallet')\n\n # Compute the coherence values using Mallet\n model_list, coherence_values = self.compute_coherence_values(dictionary=self.tm.dictionary,\\\n corpus=self.tm.text_term_matrix,\\\n texts=self.tm.optimum_text_clean,\\\n limit=max_topic_size,\\\n start=5,step=5)\n\n # Find the optimal number of topics\n limit = max_topic_size\n start = 5\n step = 5\n x = list(range(start, limit, step))\n self.tm.numberOfTopics = x[np.argmax(coherence_values)]\n print('Optimum number of topics is: {}'.format(self.tm.numberOfTopics))\n\n if optimalTopicsOnly:\n return True\n\n # Create the dictionary and term matrix used by LDA\n self.tm.dictionary = corpora.Dictionary(self.tm.text_clean)\n self.tm.text_term_matrix = [self.tm.dictionary.doc2bow(text) for text in self.tm.text_clean]\n\n # Buid the Gensim LDA model\n Lda = gensim.models.ldamodel.LdaModel\n\n self.tm.ldamodel = Lda(self.tm.text_term_matrix, num_topics=self.tm.numberOfTopics, id2word = self.tm.dictionary, passes=30)\n\n #\n # Get token count proportion statistics for the plot. Also add topic\n # category to sub_df\n #\n topic_token_count = [0 for i in range(0,self.tm.numberOfTopics)]\n topicSeries = []\n probabilitySeries = []\n\n # Note: Must use len(text_clean) because len(text_clean) <= sample_size because some documents may have been removed (e.g. were HTML)\n for i in range(0,len(self.tm.text_clean)):\n assignedTopic, topicProbability = self.assigned_topic(self.get_candidate_topics(i))\n topic_token_count[assignedTopic] += len(self.tm.text_term_matrix[i])\n topicSeries.append(assignedTopic)\n probabilitySeries.append(topicProbability)\n\n self.tm.token_count_proportions = np.array(topic_token_count) / sum(topic_token_count)\n self.tm.sub_df['topic'] = topicSeries\n self.tm.sub_df['probability'] = probabilitySeries\n\n # Sort by probability\n self.tm.sub_df.sort_values(by=['probability'], ascending=False, inplace=True)\n self.tm.sub_df.drop(columns=['probability'], inplace=True)\n\n # Write the data frame (with topic assignment) to disk so it can be read when the user switches the number of topics\n self.tm.sub_df.to_csv('./state/TopicData/topic_{0}.csv'.format(self.tm.numberOfTopics), index=False)\n\n self.tm.modelBuilt = True\n\n print('Finished createModelThread: {}'.format(datetime.datetime.now()))\n return True\n\n def createStopWordList(self, df):\n\n stop_words_path = './state/TopicData/stopwords.json'\n\n # Read stop words from a file if it already exists\n if os.path.isfile(stop_words_path):\n with open(stop_words_path, 'r') as f:\n return json.load(f)\n\n # Get word count vector\n cv = CountVectorizer(min_df = 0.01, max_df = 1.0)\n word_count_vector = cv.fit_transform(df.content)\n feature_names = cv.get_feature_names()\n\n # Calculate TF-IDF weights for words in documents\n tfidf_transformer = TfidfTransformer(smooth_idf=True, use_idf=True)\n transformed_weights = tfidf_transformer.fit_transform(word_count_vector)\n\n # Create a count of least informative words from each document\n counter = Counter()\n\n def findLeastInformativeWordAndUpdateCounter(row):\n cols = row.nonzero()[1]\n vals = row.toarray().ravel()[cols].tolist()\n if len(vals) > 0:\n counter[cols[np.argmin(vals)]] += 1\n\n # Find least least informative word for each document\n for row in transformed_weights:\n findLeastInformativeWordAndUpdateCounter(row)\n\n # Now find the words (from the indicies)\n most_common_indicies = [x[0] for x in counter.most_common()]\n common_words = [feature_names[idx] for idx in most_common_indicies]\n\n # Remove English stop words\n final_list = []\n for word in common_words:\n if word not in set(stopwords.words('english')):\n final_list.append(word)\n\n final_list = final_list[:20]\n print('The stop words are: ', final_list)\n\n # Write to a file so it does not need to be computed each time\n with open(stop_words_path, 'w') as f:\n json.dump(final_list, f)\n\n return final_list\n\n def process_emails(self):\n global sample_size\n global optimum_sample_size\n\n # Load the entire email corpus\n emails_df = pd.read_csv('email_data.csv').dropna(subset=[\"body\"])\n emails_df.columns = emails_df.columns.str.lower()\n\n # Adjust the sample size if necessary\n if sample_size > len(emails_df):\n\n # In case the hard coded sample size is greater than the number of\n # emails, then just use the number of emails\n sample_size = len(emails_df)\n\n # Use 10% of the sample size when determining the optimal number of\n # topics\n optimum_sample_size = round(sample_size * 0.1)\n\n # Sample emails from entire csv\n emails_df = emails_df.sample(n=sample_size)\n\n # Convert columns to the correct type\n emails_df['id'] = pd.to_numeric(emails_df['id'])\n emails_df['date'] = emails_df['date'].apply(lambda x: pd.to_datetime(str(x)))\n\n # Parse the emails into a list email objects\n messages = list(map(email.message_from_string, emails_df['body']))\n\n # Parse content from emails\n emails_df['content'] = list(map(get_text_from_email, messages))\n del messages\n emails_df = emails_df.drop(['body'], axis=1)\n\n # Remove emails that are HTML\n emails_df = emails_df[(emails_df['content'].str.lower()).str.find(\"\") == -1]\n\n self.tm.sub_df = emails_df\n\n # Create stop words\n stopWords = self.createStopWordList(emails_df)\n\n # Set the text_clean to be used to create the LDA model\n self.tm.text_clean = []\n for text in self.tm.sub_df['content']:\n self.tm.text_clean.append(clean_email(text, stopWords).split())\n\n # Use a smaller sample to find the coherence values in\n # compute_coherence_values()\n self.tm.optimum_text_clean = [\n self.tm.text_clean[i] for i in random.sample(range(len(self.tm.text_clean)), optimum_sample_size)\n ]\n\n def process_documents(self):\n # TODO\n self.tm.text_clean = []\n self.tm.optimum_text_clean = []\n\n def compute_coherence_values(self, dictionary, corpus, texts, limit, start=2, step=3):\n \"\"\"\n Compute c_v coherence for various number of topics\n\n Args:\n dictionary : Gensim dictionary\n corpus : Gensim corpus\n texts : List of input texts\n limit : Max num of topics\n\n Returns:\n model_list : List of LDA topic models\n coherence_values : Coherence values corresponding to the LDA model with respective number of topics\n\n Raises:\n None\n \"\"\"\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = gensim.models.wrappers.LdaMallet(self.mallet_path, corpus=corpus, num_topics=num_topics, id2word=dictionary)\n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values\n\n def assigned_topic(self, candidateTopics):\n largest = (0,0.0)\n for topic in candidateTopics:\n if topic[1] > largest[1]:\n largest = topic\n return largest\n\n def get_candidate_topics(self, index):\n return self.tm.ldamodel.get_document_topics(self.tm.text_term_matrix[index])\n\n","sub_path":"BerkeleyMIDS/W210/TopicModelingWebAPI/CreateModelingThread.py","file_name":"CreateModelingThread.py","file_ext":"py","file_size_in_byte":10791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"388381183","text":"import argparse\nimport geopandas as gpd\nimport numpy as np\nfrom functools import partial\nimport pickle\n\n## Set up argument parser\n\nparser = argparse.ArgumentParser(description=\"Neutral blockgroup ensemble for GA\", \n prog=\"ga_block_groups_neutral_chain.py\")\nparser.add_argument(\"map\", metavar=\"map\", type=str,\n choices=[\"congress\", \"congress_2000\",\n \"state_house\", \"state_senate\"],\n help=\"the map to redistrict\")\nparser.add_argument(\"n\", metavar=\"iterations\", type=int,\n help=\"the number of plans to sample\")\nparser.add_argument(\"popcol\", metavar=\"population column\", type=str,\n choices=[\"TOTPOP\", \"VAP\", \"CPOP\", \"CVAP\"],\n help=\"the population column by which to balance redistricting\")\nparser.add_argument(\"--i\", metavar=\"run number\", type=int, default=0,\n help=\"which chain run is this?\")\nargs = parser.parse_args()\n\nfrom gerrychain.random import random\nrandom.seed(args.i)\nfrom gerrychain import Graph, GeographicPartition, Partition, Election, accept\nfrom gerrychain.updaters import Tally, cut_edges\nfrom gerrychain import MarkovChain\nfrom gerrychain.proposals import recom\nfrom gerrychain.accept import always_accept\nfrom gerrychain import constraints\nfrom gerrychain.tree import recursive_tree_part\n\n\n\nnum_districts_in_map = {\"congress\" : 14,\n \"congress_2000\" : 13,\n \"state_senate\" : 56,\n \"state_house\" : 180}\n\nepsilons = {\"congress\" : 0.01,\n \"congress_2000\" : 0.01,\n \"state_senate\" : 0.02,\n \"state_house\" : 0.05} \n\nPOP_COL = \"{}18\".format(args.popcol)\nNUM_DISTRICTS = num_districts_in_map[args.map]\nITERS = args.n\nEPS = epsilons[args.map]\nDEMO_COLS = [\"TOTPOP\", \"VAP\", \"CPOP\", \"CVAP\", \n \"BPOP\", \"HPOP\", \"WPOP\", \"BPOP_perc\", \"HPOP_perc\", \"WPOP_perc\",\n \"BCPOP\", \"HCPOP\", \"WCPOP\", \"BCPOP_perc\", \"HCPOP_perc\", \"WCPOP_perc\",\n \"BCVAP\", \"HCVAP\", \"WCVAP\", \"BCVAP_perc\", \"HCVAP_perc\", \"WCVAP_perc\"]\n\n## Pull in graph and set up updaters\n\nprint(\"Reading in Data/Graph\")\n\nwith open(\"../graphs/GA_blockgroup_graph.p\", \"rb\") as f_in:\n graph = pickle.load(f_in)\n\n\nga_updaters = {\"population\" : Tally(POP_COL, alias=\"population\"),\n \"cut_edges\": cut_edges,\n \"TOTPOP\": Tally(\"TOTPOP18\", alias=\"TOTPOP\"),\n \"VAP\": Tally(\"VAP18\", alias=\"VAP\"),\n \"CPOP\": Tally(\"CPOP18\", alias=\"CPOP\"),\n \"CVAP\": Tally(\"CVAP18\", alias=\"CVAP\"),\n \"BPOP\": Tally(\"NH_BLACK18\", alias=\"BPOP\"),\n \"HPOP\": Tally(\"HISP18\", alias=\"HPOP\"),\n \"WPOP\": Tally(\"NH_WHITE18\", alias=\"WPOP\"),\n \"BCPOP\": Tally(\"BCPOP18\", alias=\"BCPOP\"),\n \"HCPOP\": Tally(\"HCPOP18\", alias=\"HCPOP\"),\n \"WCPOP\": Tally(\"WCPOP18\", alias=\"WCPOP\"),\n \"BCVAP\": Tally(\"BCVAP18\", alias=\"BCVAP\"),\n \"HCVAP\": Tally(\"HCVAP18\", alias=\"HCVAP\"),\n \"WCVAP\": Tally(\"WCVAP18\", alias=\"WCVAP\"),\n \"BPOP_perc\": lambda p: {k: (v / p[\"TOTPOP\"][k]) for k, v in p[\"BPOP\"].items()},\n \"HPOP_perc\": lambda p: {k: (v / p[\"TOTPOP\"][k]) for k, v in p[\"HPOP\"].items()},\n \"WPOP_perc\": lambda p: {k: (v / p[\"TOTPOP\"][k]) for k, v in p[\"WPOP\"].items()},\n \"BCPOP_perc\": lambda p: {k: (v / p[\"CPOP\"][k]) for k, v in p[\"BCPOP\"].items()},\n \"HCPOP_perc\": lambda p: {k: (v / p[\"CPOP\"][k]) for k, v in p[\"HCPOP\"].items()},\n \"WCPOP_perc\": lambda p: {k: (v / p[\"CPOP\"][k]) for k, v in p[\"WCPOP\"].items()},\n \"BCVAP_perc\": lambda p: {k: (v / p[\"CVAP\"][k]) for k, v in p[\"BCVAP\"].items()},\n \"HCVAP_perc\": lambda p: {k: (v / p[\"CVAP\"][k]) for k, v in p[\"HCVAP\"].items()},\n \"WCVAP_perc\": lambda p: {k: (v / p[\"CVAP\"][k]) for k, v in p[\"WCVAP\"].items()},\n }\n\n\n## Create seed plan\n\nprint(\"Creating seed plan\")\n\ntotal_pop = sum([graph.nodes[n][POP_COL] for n in graph.nodes])\nideal_pop = total_pop / NUM_DISTRICTS\n\nif args.map != \"state_house\":\n cddict = recursive_tree_part(graph=graph, parts=range(NUM_DISTRICTS), \n pop_target=ideal_pop, pop_col=POP_COL, epsilon=EPS)\nelse:\n with open(\"GA_house_seed_part_0.05.p\", \"rb\") as f:\n cddict = pickle.load(f)\n\ninit_partition = Partition(graph, assignment=cddict, updaters=ga_updaters)\n\n\nwhile(not constraints.within_percent_of_ideal_population(init_partition, EPS)(init_partition)):\n cddict = recursive_tree_part(graph=graph, parts=range(NUM_DISTRICTS), \n pop_target=ideal_pop, pop_col=POP_COL, epsilon=EPS)\n init_partition = Partition(graph, assignment=cddict, updaters=ga_updaters)\n\n\n## Setup chain\n\nproposal = partial(recom, pop_col=POP_COL, pop_target=ideal_pop, epsilon=EPS, \n node_repeats=1)\n\ncompactness_bound = constraints.UpperBound(lambda p: len(p[\"cut_edges\"]), \n 2*len(init_partition[\"cut_edges\"]))\n\nchain = MarkovChain(\n proposal,\n constraints=[\n constraints.within_percent_of_ideal_population(init_partition, EPS),\n compactness_bound],\n accept=accept.always_accept,\n initial_state=init_partition,\n total_steps=ITERS)\n\n\n## Run chain\n\nprint(\"Starting Markov Chain\")\n\ndef init_chain_results():\n data = {\"cutedges\": np.zeros(ITERS)}\n parts = {\"samples\": []}\n\n\n for c in DEMO_COLS:\n data[c] = np.zeros((ITERS, NUM_DISTRICTS))\n\n return data, parts\n\ndef tract_chain_results(data, part, i):\n data[\"cutedges\"][i] = len(part[\"cut_edges\"])\n\n for c in DEMO_COLS:\n data[c] = sorted(part[c].values())\n\n\ndef update_saved_parts(parts, part):\n parts[\"samples\"].append(part.assignment)\n\nchain_results, parts = init_chain_results()\n\nfor i, part in enumerate(chain):\n chain_results[\"cutedges\"][i] = len(part[\"cut_edges\"])\n tract_chain_results(chain_results, part, i)\n\n if i % (ITERS / 10) == 99: update_saved_parts(parts, part)\n if i % 1000 == 0: print(\"*\", end=\"\", flush=True)\nprint()\n\n## Save results\n\nprint(\"Saving results\")\n\noutput = \"/cluster/tufts/mggg/jmatth03/Georgia/GA_blockgroups_{}_{}_{}_{}.p\".format(args.map, POP_COL, ITERS, args.i)\noutput_parts = \"/cluster/tufts/mggg/jmatth03/Georgia/GA_blockgroups_{}_{}_{}_{}_parts.p\".format(args.map, POP_COL, ITERS, args.i) \n\nwith open(output, \"wb\") as f_out:\n pickle.dump(chain_results, f_out)\n\nwith open(output_parts, \"wb\") as f_out:\n pickle.dump(parts, f_out)\n","sub_path":"experiments/ga_block_groups_neutral_chain.py","file_name":"ga_block_groups_neutral_chain.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415894125","text":"# coding=utf-8\n\n\"\"\"Reverse Vowels of a String.\"\"\"\n\nfrom __future__ import print_function\nimport re\n\n\ndef _solve(s):\n front = 0\n end = -1\n length = len(s)\n s = list(s)\n vowels = 'aeiouAEIOU'\n while front - end <= length:\n if s[front] in vowels and s[end] in vowels:\n s[front], s[end] = s[end], s[front]\n front += 1\n end -= 1\n if front == length or end == -length - 1:\n break\n if s[front] not in vowels:\n front += 1\n if s[end] not in vowels:\n end -= 1\n return ''.join(s)\n\n\nif __name__ == '__main__':\n print (_solve(''))\n print (_solve('a.'))\n print (_solve('.a'))\n print (_solve('aeui'))\n print (_solve('race a car'))\n","sub_path":"easy/345.py","file_name":"345.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"222083327","text":"from pymongo import MongoClient\nfrom jobqueue import JobQueue\nimport unittest\n\nclass K(object):\n host = 'localhost'\n port = 27017\n collection = 'test_jobqueue'\n\nclass TestJobQueue(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n client = MongoClient(K.host, K.port)\n client.pymongo_test[K.collection].drop()\n cls.db = client.pymongo_test\n\n def tearDown(self):\n self.db[K.collection].drop()\n\n def test_init(self):\n jq = JobQueue(self.db, collection_name=K.collection)\n self.assertTrue(jq.valid())\n self.assertRaises(Exception, jq._create)\n\n def test_valid(self):\n jq = JobQueue(self.db, collection_name=K.collection)\n jq.db[K.collection].drop()\n jq._create(capped=False)\n self.assertFalse(jq.valid())\n self.assertRaises(Exception, jq._create)\n\n def test_publish(self):\n jq = JobQueue(self.db, collection_name=K.collection)\n job = {'message': 'hello world!'}\n jq.pub(job)\n self.assertEquals(jq.queue_count(), 1)\n jq.clear_queue()\n jq.q = None # erase the queue\n self.assertRaises(Exception, jq.pub, job)\n\n def test_next(self):\n jq = JobQueue(self.db, collection_name=K.collection)\n self.assertRaises(Exception, jq.next)\n job = {'message': 'hello world!'}\n jq.pub(job)\n row = jq.next()\n self.assertEquals(row['data']['message'], 'hello world!')\n self.assertEquals(jq.queue_count(), 0)\n\n def test_iter(self):\n NUM_JOBS = 3\n num_jobs_queued = [NUM_JOBS]\n def iterator_wait():\n num_jobs_queued[0] -= 1\n return num_jobs_queued[0] < 0\n jq = JobQueue(self.db, iterator_wait=iterator_wait, collection_name=K.collection)\n for ii in range(1, NUM_JOBS + 1):\n job = {'message': 'I am # ' + str(ii)}\n jq.pub(job)\n num_jobs_done = 0\n for job in jq:\n #print job['data']['message']\n num_jobs_done += 1\n record = jq.q.find_one({'_id': job['_id']})\n self.assertEquals(record['status'], jq.WORKING)\n self.assertEquals(num_jobs_done, NUM_JOBS)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pymjq/test_jobqueue.py","file_name":"test_jobqueue.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"498751634","text":"# encoding: utf8\nfrom __future__ import unicode_literals\n\nimport fdb\n\nfrom .cartao import Cartao\nfrom gimli.commons.logger import log\nfrom gimli.commons.evento import EventoPedido\n\n\nclass Retaguarda(object):\n\n QUERY_CARTOES_CONSUMINDO = (\n \"SELECT m.IdMovimentacao, m.Numero, m.Abertura, m.TotalBruto, \"\n \"a.IdComissionado, a.Nome, \"\n \"(SELECT COUNT(*) FROM TFdsProdutosConsumidos \"\n \" WHERE IdMovimentacao=m.IdMovimentacao) AS QtdItens \"\n \"FROM TFdsMovimentacao m \"\n \"LEFT JOIN TVdaComissionado a ON (m.ATENDENTE=a.IDCOMISSIONADO) \"\n \"WHERE m.Situacao IN ('C', 'F')\"\n )\n\n def __init__(self, config):\n self.connection = self.create_database_connection(config)\n\n def create_database_connection(self, config):\n \"\"\" Cria e retorna uma conexão com o Firebird\n \"\"\"\n database = config.get(\"Dados\", \"DataBase\")\n log.info(\"Iniciando conexão Firebird: {}\".format(database))\n return fdb.connect(database, 'gimli', 'gimli')\n\n def cartoes_consumindo(self):\n \"\"\" Retorna um iterator com os cartões consumindo\n \"\"\"\n cursor = self.connection.cursor()\n cursor.execute(self.QUERY_CARTOES_CONSUMINDO)\n cartoes = cursor.fetchallmap()\n for dados_cartao in cartoes:\n cartao = Cartao(self.connection)\n cartao.load_from(dados_cartao) \n if cartao.qtde_itens == 0: \n continue\n yield cartao\n cursor.close()\n\n def obtem_cartao(self, pedido_id):\n query = self.QUERY_CARTOES_CONSUMINDO + \" AND m.IdMovimentacao={}\".format(pedido_id)\n cursor = self.connection.cursor()\n cursor.execute(query)\n cartao = Cartao(self.connection)\n dataset = cursor.fetchonemap()\n if dataset:\n cartao.load_from(dataset)\n return cartao\n\n def fecha_cartao(self, numero):\n cartao = Cartao(self.connection, numero)\n cartao.fecha()\n self.connection.commit()\n\n def marca_cartao_como_sincronizado(self, cartao):\n log.debug(\"Marcando {} como sincronizado na retaguarda\".format(cartao))\n # Três tentativas de gravação\n for i in range(3):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\n \"UPDATE TFdsMovimentacao SET Editado=-1 \"\n \"WHERE Editado=1 AND numero=?\", (cartao.numero,))\n self.connection.commit()\n break\n except Exception as e:\n self.connection.rollback()\n log.info(\n \"Erro ao marcar {} como sincronizado. \"\n \"Tentativa {} => ({})\"\n .format(cartao, i, e[0]))\n finally:\n cursor.close()\n\n def eventos_nao_processados(self):\n \"\"\" Retorna uma lista dos eventos ainda não processados\n \"\"\"\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT EVENTO_ID, DATA, EVENTO \"\n \"FROM TFDSEVENTO \"\n \"WHERE processado='0' \"\n \"ORDER BY EVENTO_ID\"\n )\n eventos = cursor.fetchall()\n for evento in eventos:\n yield EventoPedido(evento[0], evento[2])\n\n def marca_evento_como_processado(self, evento):\n log.debug(\"Marcando evento {} como processado\".format(evento.evento_id))\n cursor = self.connection.cursor()\n cursor.execute(\n \"UPDATE TFdsEvento SET processado='1' \"\n \"WHERE evento_id=?\", (evento.evento_id,)\n )\n self.connection.commit()\n","sub_path":"gimli/retaguarda/retaguarda.py","file_name":"retaguarda.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650107956","text":"'''Trains a simple deep NN on the MNIST dataset.\nGets to 98.40% test accuracy after 20 epochs\n(there is *a lot* of margin for parameter tuning).\n2 seconds per epoch on a K520 GPU.\n'''\n\nfrom __future__ import print_function\n\nimport keras, cv2\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\nfrom numpy.random import seed\nimport numpy as np\nimport Utilities as utils\n\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nbatch_size = 128\nnum_classes = 10\nepochs = 10\n\n# the data, shuffled and split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.reshape(60000, 784)\nx_test = x_test.reshape(10000, 784)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\nx_train1 = x_train.copy()\nx_test1 = x_test.copy()\n\ntraining_steps = 2000\n\nprint(x_train.shape)\nprint(x_test.shape)\n\nseed(2)\nperm = np.random.permutation(28 * 28)\nx_train1 = x_train[:, perm]\nx_test1 = x_test[:, perm]\n\ncv2.imshow(\"TrainImages\", utils.MakeGridOfImages(x_train[0:100, :].reshape(100, 28, 28)))\ncv2.imshow(\"TrainImages_Transformed\", utils.MakeGridOfImages(x_train1[0:100, :].reshape(100, 28, 28)))\ncv2.waitKey(1)\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_shape=(784,)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])\n\nhistory = model.fit(x_train1, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test1, y_test))\n\nscore = model.evaluate(x_test1, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n\n","sub_path":"Unsorted/mnist_keras.py","file_name":"mnist_keras.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631186810","text":"\"\"\"\n\nunion():O(log(n))\nfind(): O(log(n))\n\n\"\"\"\n\ngraph = [0,1,2,3,4,5,6,7,8]\nweights = [0,0,0,0,0,0,0,0,0]\n\ndef union(one, two): \n i = findParent(one)\n j = findParent(two)\n\n if weights[i] < weights[j]:\n graph[i] =j\n weights[j]+=1\n else:\n graph[j] =i\n weights[i]+=1\n\ndef find(one, two):\n return findParent(one) == findParent(two)\n \ndef findParent(idx):\n while(idx != graph[idx]):\n # Path compression - point node to grand-parent instead of parent\n graph[idx] = graph[graph[idx]]\n idx = graph[idx];\n \n return idx;\n\ni =1;\nwhile i < len(graph):\n union(i-1,i)\n i+=1\n \nprint(graph)\nprint(weights)\n\ni =1;\nwhile i < len(graph):\n print(find(i-1,i))\n i+=1\n \n","sub_path":"src/main/java/com/salpe/graph/disjointsets/WeightedUnionAndFind.py","file_name":"WeightedUnionAndFind.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"577545530","text":"import argparse\r\nimport re\r\nimport random\r\nimport pickle\r\nimport collections\r\n\r\n\r\ndef rand(dic_t, value):\r\n su_m = 0\r\n for k in dic_t[value]:\r\n su_m += dic_t[value][k]\r\n val = random.choice(range(1, su_m))\r\n su_m = 0;\r\n for k in dic_t[value]:\r\n su_m += dic_t[value][k]\r\n k = k\r\n if su_m >= val:\r\n return k\r\n\r\n\r\ndef generator(first_w, le_n, model, out):\r\n words = pickle.load(model)\r\n open(out, 'w')\r\n wrd = first_w\r\n out.write(wrd)\r\n for i in range(le_n - 1):\r\n if not wrd in words:\r\n wrd = random.choice(words.values)\r\n out.write(wrd)\r\n out.write(' ')\r\n else:\r\n wrd = rand(words, wrd)\r\n out.write(wrd)\r\n out.write(' ')\r\n out.close()\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--model', required=True, help='File where your model is')\r\n parser.add_argument('--seed', default='', help='First word')\r\n parser.add_argument('--length', required=True, type=int, help='Length of the new text')\r\n parser.add_argument('--output', default='stdout', help='Output file')\r\n args = parser.parse_args()\r\n words = pickle.load(args.model)\r\n if args.seed == '':\r\n first_word = random.choice(words.values)\r\n else:\r\n first_word = args.seed\r\n print(generator(first_word, args.length, args.model, args.output))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"596310880","text":"from arl_para.data.data_models import *\nimport copy\nfrom arl_para.test.Constants import *\nfrom arl_para.imaging.params import *\nfrom arl_para.fourier_transforms.fft_support import *\nfrom astropy.wcs.utils import skycoord_to_pixel, pixel_to_skycoord\nfrom arl_para.visibility.operations import *\n\ndef predict_2d_base_para(vis: visibility_for_para, model: image_for_para, **kwargs):\n '''\n 用image 预测 visibility 生成新的visibility\n :param vis: 待预测的visibility\n :param model:\n :param kwargs:\n :return: 预测后的visiblity\n '''\n ny, nx = model.shape\n FACETS = get_parameter(kwargs, \"FACETS\", 2)\n uvw_mode, shape, padding, vuvwmap = get_uvw_map_para(vis, model, padding=FACETS)\n kernel_name, gcf, vkernellist = get_kernel_list_para(vis, model, FACETS, **kwargs)\n # 此处gcf大小和padding后的model大小相同, 然后做fft,fft和padding函数均不用变动\n uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) * gcf).astype(dtype=complex))\n vis.data['vis'] = convolutional_degrid_para(vkernellist, vis.data['vis'].shape, uvgrid,\n vuvwmap, model.polarisation)\n\n svis = shift_vis_to_image_para(vis, model, tangent=True, inverse=True)\n return svis\n\ndef predict_facets_para(vis: visibility_for_para, model: image_for_para, predict_function=predict_2d_base_para, **kwargs):\n '''\n predict的主入口\n :param vis:\n :param model:\n :param predict_function:\n :param kwargs:\n :return:\n '''\n if type(vis) == tuple:\n vis = vis[1]\n return predict_with_image_iterator_para(vis, model, predict_function=predict_function,\n **kwargs)\n\ndef predict_with_image_iterator_para(vis: visibility_for_para, model: image_for_para,\n predict_function=predict_2d_base_para, **kwargs):\n '''\n 4个pol分在同一组\n :param vis:\n :param model:\n :param predict_function:\n :param kwargs:\n :return:\n '''\n result = copy.deepcopy(vis)\n result.data['vis'][...] = 0.0\n result = predict_function(result, model, **kwargs)\n vis.data['vis'][...] += result.data['vis'][...]\n return vis\n\ndef convolutional_degrid_para(kernel_list, vshape, uvgrid, vuvwmap, pol):\n kernel_indices, kernels = kernel_list\n kernel_oversampling, _, gh, gw = kernels[0].shape\n assert gh % 2 == 0, \"Convolution kernel must have even number of pixels\"\n assert gw % 2 == 0, \"Convolution kernel must have even number of pixels\"\n ny, nx = uvgrid.shape\n vis = numpy.zeros(vshape, dtype='complex')\n\n y, yf = frac_coord(ny, kernel_oversampling, vuvwmap[:, 1])\n y -= gh // 2\n x, xf = frac_coord(nx, kernel_oversampling, vuvwmap[:, 0])\n x -= gw // 2 # (1,1)\n\n if len(kernels) > 1:\n coords = (kernel_indices, x, y, xf, yf)\n ckernels = numpy.conjugate(kernels)\n vis[..., pol] = [\n numpy.sum(uvgrid[yy:yy + gh, xx:xx + gw] * ckernels[kind][yyf, xxf, :, :])\n for kind, xx, yy, xxf, yyf in zip(*coords)\n ]\n else:\n coords = (x, y, xf, yf)\n ckernel0 = numpy.conjugate(kernels[0])\n vis[..., pol] = [\n numpy.sum(uvgrid[yy:yy + gh, xx:xx + gw] * ckernel0[yyf, xxf, :, :])\n for xx, yy, xxf, yyf in zip(*coords)\n ]\n\n return vis\n\ndef frac_coord(npixel, kernel_oversampling, p):\n \"\"\" Compute whole and fractional parts of coordinates, rounded to\n \"\"\"\n assert numpy.array(p >= -0.5).all() and numpy.array(\n p < 0.5).all(), \"Cellsize is too large: uv overflows grid uv= %s\" % str(p)\n x = npixel // 2 + p * npixel\n flx = numpy.floor(x + 0.5 / kernel_oversampling)\n fracx = numpy.around((x - flx) * kernel_oversampling)\n return flx.astype(int), fracx.astype(int)\n\ndef shift_vis_to_image_para(vis: visibility_for_para, im: image_for_para, tangent: bool = True, inverse: bool = False):\n '''\n 旋转visibility的phasecentre到image的phasecentre,改变vis的uvw等值\n :param vis:\n :param im:\n :param tangent:\n :param inverse:\n :return:\n '''\n ny, nx = im.shape\n image_phasecentre = pixel_to_skycoord(nx // 2, ny // 2, im.wcs, origin=1)\n\n if vis.phasecentre.separation(image_phasecentre).rad > 1e-15:\n vis = phaserotate_visibility_para(vis, image_phasecentre, tangent=tangent, inverse=inverse)\n vis.phasecentre = im.phasecentre\n\n\n return vis\n\n\n\n\n","sub_path":"arl_para/imaging/convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104107633","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAnalyze the distribution of similarity ratings in the data set\n\nCreated on Mon Jan 14 14:00:32 2019\n\n@author: lbechberger\n\"\"\"\nfrom matplotlib import pyplot as plt\n\nimport pickle, argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Analyzing similarity data')\nparser.add_argument('input_file', help = 'pickle file containing the preprocessed data')\nparser.add_argument('-s', '--subset', help = 'the subset of data to use', default = \"all\")\nparser.add_argument('-o', '--output_path', help = 'path where to store the figures', default = './')\nparser.add_argument('-m', '--median', action = 'store_true', help = 'use median instead of mean for matrix aggregation')\nargs = parser.parse_args()\n\nnp.random.seed(42) # fixed random seed to ensure reproducibility\n\n# load the data set from the pickle file\nwith open(args.input_file, \"rb\") as f:\n data_set = pickle.load(f)\n\nitem_ids = list(data_set['items'].keys())\ncategory_names = list(data_set['categories'].keys())\n\nif args.subset == \"all\":\n # use all the similarity ratings that we have \n \n items_of_interest = list(item_ids)\n categories_of_interest = list(category_names)\n\nelif args.subset == \"between\":\n # only use the similarity ratings from the 'between' file\n\n items_of_interest = [] \n \n for idx1, item1 in enumerate(item_ids):\n for idx2, item2 in enumerate(item_ids):\n \n if idx2 <= idx1:\n continue\n \n tuple_id = str(sorted([item1, item2]))\n if tuple_id in data_set['similarities']:\n border = data_set['similarities'][tuple_id]['border']\n between_ratings = data_set['similarities'][tuple_id]['values'][border:]\n if len(between_ratings) > 0:\n items_of_interest.append(item1)\n items_of_interest.append(item2)\n \n items_of_interest = list(set(items_of_interest)) # remove duplicates\n categories_of_interest = list(set(map(lambda x: data_set['items'][x]['category'], items_of_interest)))\n\nelif args.subset == \"within\":\n # only use the similarity ratings from the 'within' file\n items_of_interest = [] \n \n for idx1, item1 in enumerate(item_ids):\n for idx2, item2 in enumerate(item_ids):\n \n if idx2 <= idx1:\n continue\n \n tuple_id = str(sorted([item1, item2]))\n if tuple_id in data_set['similarities']:\n border = data_set['similarities'][tuple_id]['border']\n between_ratings = data_set['similarities'][tuple_id]['values'][:border]\n if len(between_ratings) > 0:\n items_of_interest.append(item1)\n items_of_interest.append(item2)\n \n items_of_interest = list(set(items_of_interest)) # remove duplicates\n categories_of_interest = list(set(map(lambda x: data_set['items'][x]['category'], items_of_interest)))\n \nelif args.subset == \"cats\":\n # consider only the categories from the second study, but use all items within them\n categories_of_interest = [\"C03_Elektrogeräte\", \"C04_Gebäude\", \"C05_Gemüse\", \"C06_Geschirr\", \"C07_Insekten\", \n \"C10_Landtiere\", \"C12_Oberkörperbekleidung\", \"C13_Obst\", \"C14_Pflanzen\", \n \"C19_Straßenfahrzeuge\", \"C21_Vögel\", \"C25_Werkzeug\"]\n items_of_interest = []\n for item in item_ids:\n if data_set['items'][item]['category'] in categories_of_interest:\n items_of_interest.append(item)\n\ncategories_of_interest = sorted(categories_of_interest)\n\n# collect overall statistics\nsimilarity_ranges = []\nsimilarity_stds = []\n\nfor idx1, item1 in enumerate(items_of_interest):\n for idx2, item2 in enumerate(items_of_interest):\n \n if idx2 <= idx1:\n continue\n \n tuple_id = str(sorted([item1, item2])) \n if tuple_id in data_set['similarities']:\n similarity_ratings = data_set['similarities'][tuple_id]['values']\n \n if args.subset == \"between\":\n # remove everything from first study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[border:]\n elif args.subset == \"within\":\n # remove everything from second study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[:border]\n\n if len(similarity_ratings) == 0:\n continue\n\n # analyze range and standard deviation of the ratings \n similarity_range = max(similarity_ratings) - min(similarity_ratings)\n similarity_std = np.std(similarity_ratings)\n #print('{0} range: {1} std: {2}'.format(tuple_id, similarity_range, similarity_std))\n similarity_ranges.append(similarity_range)\n similarity_stds.append(similarity_std)\n \n\n# plot histograms of ranges and standard deviations\nplt.hist(similarity_ranges, bins=21)\nplt.title('Distribution of Similarity Ranges')\nplt.savefig(args.output_path + 'range.png', bbox_inches='tight', dpi=200)\nplt.close()\n\nplt.hist(similarity_stds, bins=21)\nplt.title('Distribution of Similarity Standard Deviations')\nplt.savefig(args.output_path + 'std.png', bbox_inches='tight', dpi=200)\nplt.close()\n\nsimilarity_matrix = []\nfor i in range(len(categories_of_interest)):\n similarity_matrix.append([])\n for j in range(len(categories_of_interest)):\n similarity_matrix[i].append([])\n\n# collect category-level statistics\nfor cat_idx1, cat1 in enumerate(categories_of_interest):\n for cat_idx2, cat2 in enumerate(categories_of_interest):\n \n if cat_idx2 < cat_idx1:\n continue\n \n within_sim_1 = []\n within_sim_2 = []\n between_sim = []\n \n for itm_idx1, item1 in enumerate(data_set['categories'][cat1]['items']):\n for itm_idx2, item2 in enumerate(data_set['categories'][cat1]['items']):\n\n if itm_idx2 <= itm_idx1:\n continue\n \n tuple_id = str(sorted([item1, item2])) \n if tuple_id in data_set['similarities']:\n similarity_ratings = data_set['similarities'][tuple_id]['values']\n \n if args.subset == \"between\":\n # remove everything from first study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[border:]\n elif args.subset == \"within\":\n # remove everything from second study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[:border]\n \n within_sim_1 += similarity_ratings\n \n for itm_idx1, item1 in enumerate(data_set['categories'][cat2]['items']):\n for itm_idx2, item2 in enumerate(data_set['categories'][cat2]['items']):\n\n if itm_idx2 <= itm_idx1:\n continue\n \n tuple_id = str(sorted([item1, item2])) \n if tuple_id in data_set['similarities']:\n similarity_ratings = data_set['similarities'][tuple_id]['values']\n \n if args.subset == \"between\":\n # remove everything from first study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[border:]\n elif args.subset == \"within\":\n # remove everything from second study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[:border]\n \n within_sim_2 += similarity_ratings\n \n for itm_idx1, item1 in enumerate(data_set['categories'][cat1]['items']):\n for itm_idx2, item2 in enumerate(data_set['categories'][cat2]['items']):\n\n if itm_idx2 <= itm_idx1:\n continue\n \n tuple_id = str(sorted([item1, item2])) \n if tuple_id in data_set['similarities']:\n similarity_ratings = data_set['similarities'][tuple_id]['values']\n \n if args.subset == \"between\":\n # remove everything from first study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[border:]\n elif args.subset == \"within\":\n # remove everything from second study\n border = data_set['similarities'][tuple_id]['border']\n similarity_ratings = similarity_ratings[:border]\n \n between_sim += similarity_ratings\n \n similarity_matrix[cat_idx1][cat_idx1] = within_sim_1\n similarity_matrix[cat_idx2][cat_idx2] = within_sim_2\n similarity_matrix[cat_idx1][cat_idx2] = between_sim\n similarity_matrix[cat_idx2][cat_idx1] = between_sim\n\n# print out average similarity ratings on category-level\nprint(',' + ','.join(map(lambda x: '{0}({1})'.format(x, data_set['categories'][x]['visSim']), categories_of_interest)))\nfor i in range(len(categories_of_interest)):\n mean_list = []\n for j in range(len(categories_of_interest)):\n if args.median:\n mean_list.append(np.median(similarity_matrix[i][j]))\n else:\n mean_list.append(np.mean(similarity_matrix[i][j]))\n print(\"{0}({1})\".format(categories_of_interest[i], data_set['categories'][categories_of_interest[i]]['visSim']) + ',' + ','.join(map(lambda x: str(x), mean_list)))\n\nall_within = []\nall_between = []\n\nfor i in range(len(categories_of_interest)):\n for j in range(len(categories_of_interest)):\n if i == j:\n all_within += similarity_matrix[i][j]\n elif i < j:\n all_between += similarity_matrix[i][j]\n\n# plot histograms of within vs between categories\nplt.hist(all_within, bins=21)\nplt.title('Distribution of Similarity Ranges within Categories')\nplt.savefig(args.output_path + 'within.png', bbox_inches='tight', dpi=200)\nplt.close()\n\nplt.hist(all_between, bins=21)\nplt.title('Distribution of Similarity Ranges between Categories')\nplt.savefig(args.output_path + 'between.png', bbox_inches='tight', dpi=200)\nplt.close()\n","sub_path":"code/mds/preprocessing/analyze_similarity_distribution.py","file_name":"analyze_similarity_distribution.py","file_ext":"py","file_size_in_byte":10727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"508735518","text":"import numpy as np\nimport numpy.linalg as LA\nfrom math import e, pow\n\nclass TargetEKF:\n \"\"\"Implement continuous-continuous EKF for target in stateful fashion\n \"\"\"\n\n def __init__(self, manager, target=None):\n self.manager = manager\n self.target = target\n\n self.old_x = None\n self.old_y = None\n self.x = None\n self.y = None\n\n self.H = np.array([[1.0, 0.0, 0.0]])\n\n self.P_x = np.diag([0.0, 0.0, 0.0])\n self.P_y = np.diag([0.0, 0.0, 0.0])\n\n self.cov_x = np.array([[self.P_x[0,0]], [self.P_x[1,1]], [self.P_x[2,2]]])\n self.cov_y = np.array([[self.P_y[0,0]], [self.P_y[1,1]], [self.P_y[2,2]]])\n \n self.alpha_acc = 0.1 # reciprocal of maneuver(acceleration) time constant. 1/60-lazy turn, 1/20-evasive, 1-atmospheric turbulence\n self.sigma_square_x = 0.1 \n self.sigma_square_y = 0.05\n\n self.filter_initialized_flag = False\n self.ready = False\n\n def is_initialized(self):\n \"\"\"Indicates if EKF is initialized\n\n Returns:\n bool: EKF initalized or not\n \"\"\"\n return self.filter_initialized_flag\n\n def initialize_filter(self, x, y, vx, vy):\n \"\"\"Initializes EKF. Meant to run only once at first.\n\n Args:\n x (float32): target position x component in inertial frame (m)\n y (float32): target position y component in inertial frame (m)\n vx (float32): target velocity vx component in inertial frame (m/s)\n vy (float32): target velocity vy component in inertial frame (m/s)\n \"\"\"\n self.prev_x = x\n self.prev_y = y\n self.prev_vx = vx\n self.prev_vy = vy\n self.prev_ax = 0.0\n self.prev_ay = 0.0\n \n self.filter_initialized_flag = True\n\n def add(self, x, y):\n \"\"\"Add measurements and auxiliary data for filtering\n\n Args:\n x (float32): target position x component in inertial frame (m)\n y (float32): target position y component in inertial frame (m)\n \"\"\"\n # make sure filter is initialized\n if not self.is_initialized():\n vx = self.target.sprite_obj.velocity[0]\n vy = self.target.sprite_obj.velocity[1]\n self.initialize_filter(x, y, vx, vy)\n return\n\n # filter is initialized; set ready to true\n self.ready = True\n\n # store measurement\n self.x = x\n self.y = y\n\n # perform predictor and filter step\n self.preprocess()\n self.estimate_x()\n self.estimate_y()\n\n # remember state estimations\n self.old_x = self.prev_x\n self.old_y = self.prev_y\n self.prev_x = self.x\n self.prev_y = self.y\n self.prev_vx = self.vx\n self.prev_vy = self.vy\n\n\n def preprocess(self):\n \"\"\"pre compute transition matrix and process noise\"\"\"\n # set variables for better numerical efficiency\n dt = self.manager.get_sim_dt()\n adt = self.alpha_acc * dt # αΔt\n adt2 = pow(adt, 2)\n adt3 = pow(adt, 3)\n a2 = pow(self.alpha_acc, 2)\n a3 = pow(self.alpha_acc, 3)\n a4 = pow(self.alpha_acc, 4)\n eadt = pow(e, (-adt))\n e2adt = pow(e, (-2*adt))\n\n # transition matrix\n self.A = np.array([[1.0, dt, (eadt + adt -1) / a2],\n [0.0, 1.0, (1 - eadt)/(self.alpha_acc)],\n [0.0, 0.0, eadt]])\n\n self.q11 = (1 - e2adt + 2*adt + (2/3)*adt3 - 2*adt2 - 4*adt*eadt) / (a4)\n self.q12 = (e2adt + 1 - 2*eadt + 2*adt*eadt - 2*adt + adt**2) / (a3)\n self.q13 = (1 - e2adt - 2*adt*eadt) / (a2)\n self.q22 = (4*eadt - 3 - e2adt + 2*adt) / (a2)\n self.q23 = (e2adt + 1 -2*eadt) / (self.alpha_acc)\n self.q33 = (1 - e2adt)\n\n # process noise\n self.Q = np.array([[self.q11, self.q12, self.q13],\n [self.q12, self.q22, self.q23],\n [self.q13, self.q23, self.q33]])\n\n\n def estimate_x(self):\n # set R and x appropriate to occlusion state\n if self.x is None:\n self.R_x = 10 #100\n self.x_measured = self.prev_x\n else:\n self.R_x = 1 #1\n self.x_measured = self.x\n\n self.Q_x = self.sigma_square_x * self.Q\n \n # form state vector\n state_est = np.array([[self.prev_x], [self.prev_vx], [self.prev_ax]])\n\n # predict\n state_est_pre = self.A @ state_est\n P_pre = self.A @ self.P_x @ self.A.T + self.Q_x\n S = self.H @ P_pre @ self.H.T + self.R_x\n K = P_pre @ self.H.T @ LA.pinv(S)\n\n # correct\n state_est = state_est_pre + K @ (self.x_measured - self.H @ state_est_pre)\n self.P_x = (np.eye(3) - K @ self.H) @ P_pre\n self.cov_x = np.array([[self.P_x[0,0]], [self.P_x[1,1]], [self.P_x[1,1]]])\n\n # extract estimations from state vector\n self.x = state_est.flatten()[0]\n self.vx = state_est.flatten()[1]\n self.ax = state_est.flatten()[2]\n\n\n def estimate_y(self):\n # set R and y appropriate to occlusion state\n if self.y is None:\n self.R_y = 10 #1000\n self.y_measured = self.prev_y\n else:\n self.R_y = 1 #10\n self.y_measured = self.y\n\n self.Q_y = self.sigma_square_y * self.Q\n\n # form state vector\n state_est = np.array([[self.prev_y], [self.prev_vy], [self.prev_ay]])\n\n # predict\n state_est_pre = self.A @ state_est\n P_pre = self.A @ self.P_y @ self.A.T + self.Q_y\n S = self.H @ P_pre @ self.H.T + self.R_y\n K = P_pre @ self.H.T @ LA.pinv(S)\n\n # correct\n state_est = state_est_pre + K @ (self.y_measured - self.H @ state_est_pre)\n self.P_y = (np.eye(3) - K @ self.H) @ P_pre\n self.cov_y = np.array([[self.P_y[0,0]], [self.P_y[1,1]], [self.P_y[1,1]]])\n\n # extract estimations from state vector\n self.y = state_est.flatten()[0]\n self.vy = state_est.flatten()[1]\n self.ay = state_est.flatten()[2]\n \n\n def get_estimated_state(self):\n \"\"\"return estimated state information.\n\n Returns:\n tuple(float32, float32, float32, float32, float32, float32): (x, y, vx, vy, ax, ay)\n \"\"\"\n if self.ready:\n return (self.x, self.vx, self.ax, self.y, self.vy, self.ay)\n else:\n return (self.prev_x, self.prev_vx, self.prev_ax, self.prev_y, self.prev_vy, self.prev_ay)\n","sub_path":"vbot/experiments/exp_ell/target_ekf.py","file_name":"target_ekf.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630701738","text":"import inspect\nimport imp\nimport sys\nimport types\nimport pico\n\ndef module_dict(module):\n module_dict = {}\n pico_exports = getattr(module, 'pico_exports', None)\n members = inspect.getmembers(module)\n def function_filter(x):\n (name, f) = x\n return (inspect.isfunction(f) or inspect.ismethod(f)) \\\n and (pico_exports == None or name in pico_exports) \\\n and f.__module__ == module.__name__ \\\n and not name.startswith('_') \\\n and not hasattr(f, 'private')\n\n def class_filter(x):\n (name, f) = x\n return inspect.isclass(f) \\\n and (issubclass(f, pico.Pico) or issubclass(f, pico.object)) \\\n and (not pico_exports or name in pico_exports) \\\n and f.__module__ == module.__name__ \\\n and not name.startswith('_') \\\n and not hasattr(f, 'private')\n class_defs = [class_dict(cls) for (name, cls) in filter(class_filter, members)]\n function_defs = [func_dict(f, name) for (name, f) in filter(function_filter, members)]\n module_dict['classes'] = class_defs\n module_dict['functions'] = function_defs\n module_dict['__doc__'] = module.__doc__\n return module_dict\n\ndef class_dict(cls):\n def method_filter(x):\n (name, f) = x\n return (inspect.isfunction(f) or inspect.ismethod(f)) \\\n and (not name.startswith('_') or name == '__init__') \\\n and not hasattr(f, 'private')\n class_dict = {'__class__': cls.__name__}\n class_dict['name'] = cls.__name__\n methods = filter(method_filter, inspect.getmembers(cls))\n name, f = methods.pop(0)\n class_dict['__init__'] = func_dict(f, name)\n class_dict['functions'] = [func_dict(f, name) for (name, f) in methods]\n class_dict['__doc__'] = cls.__doc__\n return class_dict\n\ndef func_dict(f, name):\n func_dict = {}\n func_dict['name'] = name\n func_dict['cache'] = ((hasattr(f, 'cacheable') and f.cacheable))\n func_dict['stream'] = ((hasattr(f, 'stream') and f.stream))\n func_dict['protected'] = ((hasattr(f, 'protected') and f.protected))\n a = inspect.getargspec(f)\n args = list(reversed(map(None, reversed(a.args), reversed(a.defaults or [None]))))\n func_dict['args'] = filter(lambda x: x[0] and not (x[0].startswith('pico_') or x[0] == 'self'), args)\n func_dict['doc'] = f.__doc__\n return func_dict\n\ndef load(module_name, RELOAD=False):\n if module_name == 'pico':\n return sys.modules['pico']\n if module_name == 'pico.modules':\n if module_name in sys.modules:\n return sys.modules[module_name]\n else:\n return sys.modules[__name__]\n modules_path = './'\n if module_name in sys.modules and RELOAD: \n del sys.modules[module_name]\n if not sys.path.__contains__(modules_path):\n sys.path.insert(0, modules_path)\n m = __import__(module_name)\n m = sys.modules[module_name]\n if RELOAD:\n m = reload(m)\n if not (hasattr(m, 'pico') and m.pico.magic == pico.magic):\n raise ImportError('This module has not imported pico and therefore is not picoable!')\n return m\n\ndef module_proxy(cls):\n module_name = cls.__module__\n module = imp.new_module(module_name)\n module.pico = pico\n def method_filter(x):\n (name, f) = x\n return (inspect.isfunction(f) or inspect.ismethod(f)) \\\n and (not name.startswith('_') or name == '__init__') \\\n and not hasattr(f, 'private')\n methods = filter(method_filter, inspect.getmembers(cls))\n for (name, f) in methods:\n setattr(module, name, f)\n return module\n\njson_dumpers = {\n types.ModuleType: module_dict\n}\n","sub_path":"pico/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"8378627","text":"\"\"\"\nThe function that returns the epitope distance between two strands\n\"\"\"\n\ndef get_epitope_distance(seq, ref_seq, matrix_dict, weights):\n num = 0\n denom = 0\n for i in range(0, len(seq) - 1):\n seq_letter = seq[i]\n ref_letter = ref_seq[i]\n if (seq_letter == '-'):\n seq_letter = '*'\n if (ref_letter == '-'):\n ref_letter = '*'\n\n num += weights[i] * (matrix_dict[seq_letter, ref_letter])\n denom += weights[i]\n # print('seq[i]', seq[i], 'ref_seq[i]', ref_seq[i], 'weights[i]', weights[i])\n\n return num/denom\n","sub_path":"Week 4/src/epitope_dist.py","file_name":"epitope_dist.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"174444031","text":"import random\n\ndef airport_generator():\n airports = []\n\n #open() will open a file and return a corresponding file object . Refer here for information on arguments and parameters: https://www.programiz.com/python-programming/methods/built-in/open\n\n airportsfile = open(r'airports2.txt')\n for line in airportsfile:\n airports.append(line.split(','))\n #print(airports)\n\n randomAirport = random.choice(airports)\n\n return\n","sub_path":"UnitedAirportGenerator.py","file_name":"UnitedAirportGenerator.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"195534408","text":"import sys\nN = int(input())\narr = []\n\ndef get_unique_list(seq):\n seen = []\n return [x for x in seq if x not in seen and not seen.append(x)]\n\nfor i in range(N):\n arr.append(list(map(int, input().split())))\n\nif N == 1:\n print(1)\n sys.exit()\n\nC = []\nfor i in range(N):\n for j in range(N):\n if i < j:\n x = arr[i][0]-arr[j][0]\n y = arr[i][1]-arr[j][1]\n C.append([x, y])\n\n\nC = get_unique_list(C)\nS = [0 for i in range(len(C))]\n\nfor c in range(len(C)):\n tmp = [[[arr[0][0], arr[0][1]]]]\n for i in range(1, N):\n l = len(tmp)\n for j in range(l):\n l_2 = len(tmp[j])\n for k in range(l_2):\n x_t = tmp[j][k][0] - arr[i][0]\n y_t = tmp[j][k][1] - arr[i][1]\n arr_i = [arr[i][0], arr[i][1]]\n if C[c][0] == 0 and y_t%C[c][1] == 0:\n if x_t == 0:\n tmp[j].append(arr_i)\n break\n elif j == l - 1:\n tmp.append(arr_i)\n \n elif C[c][1] == 0 and x_t%C[c][0] == 0:\n if y_t == 0:\n tmp[j].append(arr_i)\n break\n elif j == l - 1:\n tmp.append(arr_i)\n else:\n if x_t*C[c][1] == y_t*C[c][0]:\n tmp[j].append(arr_i)\n break\n elif j == l - 1:\n tmp.append(arr_i)\n \n S[c] = len(tmp)\n \nprint(min(S))\n","sub_path":"ARC/diverta2/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"41344372","text":"from typing import Match\nfrom math import *\n\n\nclass Solution:\n def minimizedMaximum(self, n: int, Q: List[int]) -> int:\n beg, end = 0, max(Q)\n\n while beg + 1 < end:\n mid = (beg + end)//2\n if sum(ceil(i/mid) for i in Q) <= n:\n end = mid\n else:\n beg = mid\n\n return end\n","sub_path":"search/2064_minmized_maximum_of_distributed_to_any_store.py","file_name":"2064_minmized_maximum_of_distributed_to_any_store.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"94582963","text":"import pytest\n\nfrom matryoshka_tester.helpers import ContainerBuild\n\n\n@pytest.mark.parametrize(\n \"dockerfile_build\",\n (\n build.to_pytest_param()\n for build in (\n ContainerBuild(\n name=\"amidst\",\n pre_build_steps=(\n \"git clone -b v4.6 \"\n \"https://github.com/toolbox4minecraft/amidst\"\n ),\n ),\n ContainerBuild(\n name=\"maven\",\n pre_build_steps=(\n \"git clone -b maven-3.8.1 https://github.com/apache/maven\"\n ),\n marks=pytest.mark.xfail(\n reason=\"environment variables are not set correctly\"\n ),\n ),\n ContainerBuild(\n name=\"pdftk\",\n pre_build_steps=(\n \"git clone -b v3.2.2 \"\n \"https://gitlab.com/pdftk-java/pdftk.git\"\n ),\n ),\n ContainerBuild(\n name=\"k3sup\",\n pre_build_steps=(\n \"git clone -b 0.10.2 https://github.com/alexellis/k3sup\"\n ),\n ),\n )\n ),\n indirect=[\"dockerfile_build\"],\n)\ndef test_dockerfile_build(host, container_runtime, dockerfile_build):\n cmd = host.run_expect([0], container_runtime.build_command)\n img_id = container_runtime.get_image_id_from_stdout(cmd.stdout)\n\n host.run_expect(\n [0], f\"{container_runtime.runner_binary} run --rm {img_id}\"\n )\n","sub_path":"test_multistage.py","file_name":"test_multistage.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"290675183","text":"\"\"\"\nTest the time of pipeline or modules(yolov5s and alphapose) for a single person of multi ways!\nIn main process of bs = multi ways forward.\n\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport cv2\nimport time\nimport argparse\nimport torchvision\nfrom torchvision import transforms\nimport numpy as np\n\nimport torch \nimport torch.nn as nn\nimport torch.utils.data as Data\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))\n\nfrom detector.yolo_cfg import cfg\nfrom detector.yolov5.utils.general import non_max_suppression\n\nfrom alphapose.models import builder\nfrom alphapose.utils.config import update_config\nfrom alphapose.utils.transforms import heatmap_to_coord_simple\nfrom alphapose.utils.pPose_nms import pose_nms\n\nfrom process_utils import scale_coords, test_transform, plot_pose_res\n\n\n\"\"\"----------------------------- Test Time options -----------------------------\"\"\"\nparser = argparse.ArgumentParser(description='Test the time of pipeline or modules(yolov5s and alphapose) for a single person of multi ways')\nparser.add_argument('--cfg', \n type=str, \n default=\"./configs/coco/resnet/256x192_res50_lr1e-3_1x.yaml\", \n help='experiment configure file name')\nparser.add_argument('--checkpoint', \n type=str, \n default=\"./pretrained_models/fast_res50_256x192.pth\", \n help='checkpoint file name')\nparser.add_argument('--imgsdir', \n type=str, \n default=\"./examples/batch_imgs_multi_persons/singleway\", \n help='the directory of input image')\nparser.add_argument('--flag', \n type=str, \n default='pipeline', \n choices=['pipeline', 'modules'], \n help='test the all pipeline or all sub module')\nparser.add_argument('--inp_size', \n type=tuple, \n default=(640, 384), \n help='the input size of model')\nparser.add_argument('--pose_res', \n action='store_true', \n help='test the pose results')\nparser.add_argument('--FP16', \n action='store_true', \n help='whether use FP16')\nargs = parser.parse_args()\ncfg = update_config(args.cfg)\nargs.device = torch.device(\"cuda:0\")\ntorch.backends.cudnn.benchmark = True\nprint(\"Input Size: \", args.inp_size)\nif args.FP16:\n print(\"Forward(yolov5s_alphapose) by FP16\")\n\n\ndef cal_avg_time(times_ix):\n assert isinstance(times_ix, list), \"The inpust must be a list\"\n times = times_ix[:]\n max_index = times.index(max(times))\n del times[max_index]\n\n min_index = times.index(min(times))\n del times[min_index]\n \n time_avg = sum(times) / len(times)\n return time_avg\n\n\ndef letterbox_image(img, inp_dim=(640, 384)):\n # BGR--->RGB, (384, 640, 3)--->(3, 384, 640)\n img = torch.transpose(img, 1, 2)\n img = torch.transpose(img, 0, 1)\n dst = torch.empty_like(img[0])\n dst.copy_(img[0], non_blocking=True)\n img[0].copy_(img[2], non_blocking=True)\n img[2].copy_(dst, non_blocking=True)\n '''resize image with unchanged aspect ratio using padding'''\n transform = transforms.Compose([transforms.Resize(size=(360, 640)), \n transforms.Pad(padding=(0, 12), fill=128)])\n return transform(img)\n\n\ndef load_det_model(opt, cfg):\n weights = \"../yolov5/yolov5s.pt\"\n from detector.yolov5.models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n det_model = attempt_load(weights, map_location=None)\n if args.FP16:\n det_model.half()\n det_model.to(args.device)\n det_model.eval()\n return det_model\n\n\ndef batched_nms(prediction, conf_thres=0.25, iou_thres=0.45, max_det=100):\n output = []\n bs, boxes_num, _ = prediction.shape\n idxs = torch.from_numpy(np.arange(bs)).unsqueeze(-1).expand(bs, boxes_num)\n\n xc = prediction[..., 4] > conf_thres # candidates\n \n x = prediction[xc] # confidence\n idxs = idxs[xc]\n\n boxes, scores = x[:, :4], x[:, 4]\n i = torchvision.ops.batched_nms(boxes, scores, idxs, iou_thres) # NMS\n \n res = x[i]\n idxs_batch = idxs[i]\n for j in range(bs):\n output.append(res[idxs_batch == j])\n return output\n\n\ndef batched_det_post_to_pose_data(prediction, orig_img, im_dim, im_name):\n pose_inps = []\n pose_infor = []\n pose_index = []\n \n for i, (pred, y, z) in enumerate(zip(prediction, orig_img, im_name)):\n pred[:, :4] = scale_coords(args.inp_size, pred[:, :4], (1280, 720)).round()\n det = pred.cpu()\n \n boxes = det[:, :4]\n scores = det[:, 4:5]\n labels = det[:, 5:6]\n flag = labels[:, 0] == 1. # select the person\n\n ids = torch.zeros(scores.shape)\n\n inps_idx = torch.zeros(boxes.size(0), 3, 256, 192)\n cropped_boxes = torch.zeros(boxes.size(0), 4)\n # det_res = (y, z, boxes[flag], scores[flag], ids[flag], inps_idx[flag], cropped_boxes[flag])\n inps = inps_idx[flag]\n for j, box in enumerate(boxes[flag]):\n inps[j], cropped_box = test_transform(y, box)\n cropped_boxes[j] = torch.FloatTensor(cropped_box)\n pose_infor.append((y, z, boxes[flag], scores[flag], ids[flag], cropped_boxes[flag]))\n pose_inps.append(inps)\n pose_index.extend([i for j in range(len(boxes[flag]))])\n\n pose_inps = torch.cat(pose_inps, dim=0)\n pose_index = torch.tensor(pose_index)\n return pose_inps, pose_infor, pose_index\n\n\ndef detection(data, det_model):\n img, orig_img, im_name, im_dim = data\n with torch.no_grad():\n prediction = det_model(img)\n \n prediction = batched_nms(prediction)\n pose_inps, pose_infor, pose_index = batched_det_post_to_pose_data(prediction, orig_img, im_dim, im_name)\n\n return pose_inps, pose_infor, pose_index\n\n\n# Stage2: pose estimation(alphapose)\ndef load_pose_model():\n pose_model = builder.build_sppe(cfg.MODEL, preset_cfg=cfg.DATA_PRESET)\n print('Loading pose model from %s...' % (args.checkpoint,))\n pose_model.load_state_dict(torch.load(args.checkpoint, map_location=args.device))\n if args.FP16:\n pose_model.half()\n pose_model.to(args.device)\n pose_model.eval() \n return pose_model \n\n\ndef pose_post(hm_data, pose_infor):\n orig_img, im_name, boxes, scores, ids, cropped_boxes = pose_infor\n hm_data = hm_data.cpu()\n \n orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]\n\n eval_joints = [*range(0,17)]\n hm_size = (64, 48)\n min_box_area = 0\n\n pose_coords = []\n pose_scores = []\n \n for i in range(hm_data.shape[0]):\n bbox = cropped_boxes[i].tolist()\n pose_coord, pose_score = heatmap_to_coord_simple(hm_data[i][eval_joints], bbox, hm_shape=hm_size, norm_type=None)\n pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))\n pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))\n \n preds_img = torch.cat(pose_coords)\n preds_scores = torch.cat(pose_scores)\n \n boxes, scores, ids, preds_img, preds_scores, pick_ids = \\\n pose_nms(boxes, scores, ids, preds_img, preds_scores, min_box_area)\n\n _result = []\n for k in range(len(scores)):\n _result.append(\n {\n 'keypoints':preds_img[k],\n 'kp_score':preds_scores[k],\n 'proposal_score': torch.mean(preds_scores[k]) + scores[k] + 1.25 * max(preds_scores[k]),\n 'idx':ids[k],\n 'box':[boxes[k][0], boxes[k][1], boxes[k][2]-boxes[k][0],boxes[k][3]-boxes[k][1]] \n }\n )\n\n result = {\n 'imgname': im_name,\n 'result': _result\n }\n return result \n\n\ndef pose(pose_inps, pose_infor, pose_model, pose_index):\n pose_out = []\n with torch.no_grad():\n pose_inps = pose_inps.to(args.device)\n if args.FP16:\n pose_inps = pose_inps.half()\n hm = pose_model(pose_inps)\n \n if args.FP16:\n hm = hm.float()\n \n for i, infor in enumerate(pose_infor):\n hm_data = hm[pose_index == i]\n if hm_data.dim == 3:\n hm_data = hm_data.unsqeeze(0)\n res = pose_post(hm_data, infor)\n pose_out.append(res)\n return pose_out\n \n\ndef preprocess(orig_im):\n # PadResize\n img = (letterbox_image(orig_im, args.inp_size))\n #(0, 255)--->(0, 1), (3, 384, 640)--->(1, 3, 384, 640)\n img = img.div(255.).unsqueeze(0)\n return img\n\n\nstream_length = 4\nstreams = [torch.cuda.Stream() for i in range(stream_length)]\n\n\ndef all_pipeline_time(det_model, pose_model):\n times = []\n for i in range(256):\n s = time.time()\n \n rootdir = args.imgsdir\n imgnames = [x for x in os.listdir(rootdir) if x.endswith('png') or x.endswith('jpg')]\n imgpaths = [os.path.join(rootdir, imgname) for imgname in imgnames] \n orig_im_batch = [cv2.imread(imgpath) for imgpath in imgpaths]\n \n s = time.time()\n dims_batch = [(orig_im.shape[1], orig_im.shape[0]) for orig_im in orig_im_batch]\n img_batch = []\n for idx, orig_im in enumerate(orig_im_batch):\n with torch.cuda.stream(streams[idx % stream_length]):\n if args.FP16:\n img_batch.append(preprocess(torch.from_numpy(orig_im).pin_memory().to(args.device, non_blocking=True).to(torch.half)))\n else:\n img_batch.append(preprocess(torch.from_numpy(orig_im).pin_memory().to(args.device, non_blocking=True).to(torch.float)))\n\n torch.cuda.synchronize()\n \n img = torch.cat(img_batch, dim=0)\n data = (img, orig_im_batch, imgnames, dims_batch)\n \n pose_inps, pose_infor, pose_index = detection(data, det_model)\n pose_out = pose(pose_inps, pose_infor, pose_model, pose_index)\n e = time.time()\n time_once = e - s\n times.append(time_once)\n print('iter: %d, time: %.6f' % (i, time_once))\n \n avg_times = cal_avg_time(times)\n print('avg_times: %.6f' % (avg_times))\n \n\ndef all_module_time(det_model, pose_model):\n times_ix = []\n prof_load_det_data = []\n prof_det_forward = []\n prof_det_post = []\n prof_load_pose_data = []\n prof_pose_forward = []\n prof_pose_post = []\n \n for i in range(256):\n rootdir = args.imgsdir\n imgnames = [x for x in os.listdir(rootdir) if x.endswith('png') or x.endswith('jpg')]\n imgpaths = [os.path.join(rootdir, imgname) for imgname in imgnames] \n\n orig_im_batch = [cv2.imread(imgpath) for imgpath in imgpaths]\n \n t0 = time.time()\n dims_batch = [(orig_im.shape[1], orig_im.shape[0]) for orig_im in orig_im_batch]\n img_batch = []\n for idx,orig_im in enumerate(orig_im_batch):\n with torch.cuda.stream(streams[idx%stream_length]):\n if args.FP16:\n img_batch.append(preprocess(torch.from_numpy(orig_im).pin_memory().to(args.device, non_blocking=True).to(torch.half)))\n else:\n img_batch.append(preprocess(torch.from_numpy(orig_im).pin_memory().to(args.device, non_blocking=True).to(torch.float)))\n\n torch.cuda.synchronize()\n\n img = torch.cat(img_batch, dim=0)\n data = (img, orig_im_batch, imgnames, dims_batch)\n torch.cuda.synchronize()\n t1 = time.time()\n prof_load_det_data.append(t1 - t0)\n \n img, orig_img, im_name, im_dim = data\n with torch.no_grad():\n prediction = det_model(img)\n torch.cuda.synchronize()\n t2 = time.time()\n prof_det_forward.append(t2 - t1)\n \n det_out = []\n prediction = batched_nms(prediction)\n pose_inps, pose_infor, pose_index = batched_det_post_to_pose_data(prediction, orig_img, im_dim, im_name)\n t3 = time.time()\n prof_det_post.append(t3 - t2)\n \n torch.cuda.synchronize()\n t4 = time.time()\n prof_load_pose_data.append(t4 - t3)\n \n pose_out = []\n with torch.no_grad():\n pose_inps = pose_inps.to(args.device)\n if args.FP16:\n pose_inps = pose_inps.half()\n hm = pose_model(pose_inps)\n torch.cuda.synchronize()\n t5 = time.time()\n prof_pose_forward.append(t5 - t4)\n \n if args.FP16:\n hm = hm.float()\n \n for i, infor in enumerate(pose_infor):\n hm_data = hm[pose_index == i]\n if hm_data.dim == 3:\n hm_data = hm_data.unsqeeze(0)\n res = pose_post(hm_data, infor)\n pose_out.append(res)\n \n torch.cuda.synchronize()\n t6 = time.time()\n prof_pose_post.append(t6 - t5)\n times_ix.append((t6 - t0))\n \n time_avg = cal_avg_time(times_ix)\n t1_avg = cal_avg_time(prof_load_det_data)\n t2_avg = cal_avg_time(prof_det_forward)\n t3_avg = cal_avg_time(prof_det_post)\n t4_avg = cal_avg_time(prof_load_pose_data)\n t5_avg = cal_avg_time(prof_pose_forward)\n t6_avg = cal_avg_time(prof_pose_post)\n \n print(\"\"\"\n *************************************************\n pipeline_time_avg: {:.6f} s,\n prof_load_det_data_avg: {:.6f} s,\n prof_det_forward_avg: {:.6f} s,\n prof_det_post_avg: {:.6f} s,\n prof_load_pose_data_avg: {:.6f} s, \n prof_pose_forward_avg: {:.6f} s, \n prof_pose_post_avg: {:.6f} s, \n \"\"\".format(time_avg, t1_avg, t2_avg, t3_avg, t4_avg, t5_avg, t6_avg))\n\n\ndef test_pose(det_model, pose_model):\n rootdir = './examples/batch_imgs_multi_persons/singleway'\n imgnames = [x for x in os.listdir(rootdir) if x.endswith('png') or x.endswith('jpg')]\n imgpaths = [os.path.join(rootdir, imgname) for imgname in imgnames] \n\n orig_im_batch = [cv2.imread(imgpath) for imgpath in imgpaths]\n \n dims_batch = [(orig_im.shape[1], orig_im.shape[0]) for orig_im in orig_im_batch]\n img_batch = [preprocess(torch.from_numpy(orig_im).pin_memory().to(args.device, non_blocking=True)) for orig_im in orig_im_batch]\n \n img = torch.cat(img_batch, dim=0)\n if args.FP16:\n img = img.half()\n data = (img, orig_im_batch, imgnames, dims_batch)\n \n pose_inps, pose_infor, pose_index = detection(data, det_model)\n pose_out = pose(pose_inps, pose_infor, pose_model, pose_index)\n \n plot_pose_res(imgpaths[0], pose_out[0], './res.jpg')\n\n \nif __name__ == \"__main__\":\n det_model = load_det_model(args, cfg)\n pose_model = load_pose_model()\n \n if args.pose_res:\n test_pose(det_model, pose_model)\n else:\n if args.flag == 'pipeline':\n call_func = all_pipeline_time # test the time of all pipeline \n else: \n call_func = all_module_time # test the time of six module time\n \n results = call_func(det_model, pose_model) \n","sub_path":"cv/pose/alphapose/pytorch/scripts/multi_persons_multi_ways_batch.py","file_name":"multi_persons_multi_ways_batch.py","file_ext":"py","file_size_in_byte":15048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"63452810","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api\n\nclass radio(models.Model):\n _name = 'dribot.radio'\n _description = 'Radio Info' \n _rec_name = \"radio_id\"\n _inherit = ['mail.thread', 'mail.activity.mixin']\n\n radio_serial_number = fields.Char('Radio Serial Number', required=True,\n track_visibility='onchange')\n\n radio_id = fields.Char('Radio Id', required=True,\n track_visibility='onchange')\n\n radio_phone_number = fields.Char('Device Phone Number',\n track_visibility='onchange')\n\n radio_profile_id = fields.Many2one(\n comodel_name='dribot.radio.profile',\n string='Radio Profile', \n ondelete='set null',\n track_visibility='onchange')\n\n module_serial_number_id = fields.Many2one(\n comodel_name='stock.production.lot', \n string='Radio Module Serial Number', \n ondelete='restrict',\n track_visibility='onchange')\n\n last_error = fields.Text('Last Error received by Provider')\n\n last_activity_datetime = fields.Datetime('Last Activity Datetime')\n\n last_activity_performed = fields.Char('Last Activity Performed')\n\n action = fields.Selection(\n selection=[('activate_radio', 'Activate Radio'),\n ('deactivate_radio', 'Deactivate Radio'),\n ('check_status', 'Check Status'),\n ('sync', 'Sync with provider'),\n ('none', 'None'),\n ],\n track_visibility='onchange',\n string=\"Action\")\n \n radio_status = fields.Selection(\n selection=[('active', 'Active'),\n ('deactive', 'Deactive'),\n ('pending_activation', 'Pending Activation'),\n ('pending_deactivation', 'Pending Deactivation'),\n ('pending_account_update', 'Pending Account Update'),\n ('pending_esn_change', 'Pending ESN Change'),\n ('pending_mdn_change', 'Pending MDN Change'),\n ('pending_mdn_reconnect', 'Pending MDN Reconnect'),\n ('pending_rate_plan_change', 'Pending Rate Plan Change'),\n ('pending_restore', 'Pending Restore'),\n ('pending_suspend', 'Pending Suspend'),\n ('preactive', 'Preactive'),\n ('suspend', 'Suspend'),\n ('unknown_device_state', 'Unknown Device State'),\n ('error_state', 'Error State or Pending too long'),\n ],\n track_visibility='onchange',\n string=\"Status\")","sub_path":"odoo-addons/ManageDribotUnit/models/radio.py","file_name":"radio.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"393012545","text":"#coding=utf-8\n\nimport threading, random\n\nPCB = threading.local()\ndef rand_pcb(i):\n\tPCB = {}\n\tPCB['duration'] = random.randint(1, 20)\n\tPCB['priority'] = random.randint(1, 10)\n\tPCB['wait_time'] = 0\n\tPCB['pid'] = i\n\tPCB['not_been_run'] = True\n\treturn PCB\ndef son_thread():\n\tthread_pcb = LPCB[turn]\n\tprint(threading.current_thread().name, \"sum: \", thread_pcb['duration'])\n\tfor i in range(now_time, now_time + thread_pcb['duration']):\n\t\tprint(' ', threading.current_thread().name, ': ', i + 1)\n\nnow_time = 0\nrunning_time = 0\nturn = -1\nLPCB = []\nfor i in range(20):\n\tLPCB.append(rand_pcb(i))\n#print(list(map(lambda x:x['priority'], LPCB)))\nLt = []\nfor i in range(20):\n\tt = threading.Thread(target = son_thread)\n\tLt.append(t)\n\nfor x in sorted(LPCB, key = lambda x:x['priority']):\n\tturn = x['pid']\n\tLt[turn].start()\n\tLt[turn].join()\n\tLPCB[turn]['not_been_run'] = False\n\tnow_time += x['duration']\n\tfor y in list(filter(lambda d:d['not_been_run'], LPCB)):\n\t\ty['wait_time'] += x['duration']\nsum_wait_time = 0\nfor x in LPCB:\n\tsum_wait_time += x['wait_time']\nprint('Priority avr_wait_time: ', sum_wait_time/20)\n","sub_path":"Priority.py","file_name":"Priority.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"245804823","text":"# Copyright 2014 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"nuage_extraroute\n\nRevision ID: 10cd28e692e9\nRevises: 1b837a7125a9\nCreate Date: 2014-05-14 14:47:53.148132\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '10cd28e692e9'\ndown_revision = '1b837a7125a9'\n\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom neutron.db import migration\n\n\ndef upgrade(active_plugins=None, options=None):\n op.create_table(\n 'routerroutes_mapping',\n sa.Column('router_id', sa.String(length=36), nullable=False),\n sa.Column('nuage_route_id', sa.String(length=36), nullable=True),\n sa.ForeignKeyConstraint(['router_id'], ['routers.id'],\n ondelete='CASCADE'),\n )\n # This table might already exist as it might have been created\n # if another plugin was configured before the nuage one\n if op.get_bind().engine.dialect.name == 'postgresql':\n migration.create_table_if_not_exist_psql(\n 'routerroutes',\n (\"(destination VARCHAR(64) NOT NULL,\"\n \"nexthop VARCHAR(64) NOT NULL,\"\n \"router_id VARCHAR(36) NOT NULL,\"\n \"PRIMARY KEY (destination, nexthop, router_id),\"\n \"FOREIGN KEY (router_id) REFERENCES routers (id) \"\n \"ON DELETE CASCADE ON UPDATE CASCADE)\"))\n else:\n op.execute(\"CREATE TABLE IF NOT EXISTS routerroutes( \"\n \"destination VARCHAR(64) NOT NULL,\"\n \"nexthop VARCHAR(64) NOT NULL,\"\n \"router_id VARCHAR(36) NOT NULL,\"\n \"PRIMARY KEY (destination, nexthop, router_id),\"\n \"FOREIGN KEY (router_id) REFERENCES routers (id) \"\n \"ON DELETE CASCADE ON UPDATE CASCADE)\")\n\n\ndef downgrade(active_plugins=None, options=None):\n # The routerroutes table should not be dropped\n op.execute('DROP TABLE IF EXISTS routerroutes_mapping')\n","sub_path":"neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py","file_name":"10cd28e692e9_nuage_extraroute.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"621527696","text":"# CTI-110\r\n# P4T2_Bug Collector\r\n# Lafayette King\r\n# 3/5/2018\r\n\r\n\r\n\r\n\r\n# start \r\nGrandTotal = 0\r\nfor day in range(1,8):\r\n print(\"Enter the bugs collected on day\",day)\r\n bugs = int(input())\r\n GrandTotal += bugs\r\n \r\n \r\n \r\n print(\"you collected a GrandTotal of\", GrandTotal, \"bugs.\" ) \r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\n","sub_path":"P4T2_King.py","file_name":"P4T2_King.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"62976261","text":"import urllib.request\nimport urllib.parse\nimport re\nimport numpy as np\nimport pymysql\n# import time\nfrom html.parser import HTMLParser\n\ndef request_url(url):\n if url is None:\n return \" \"\n try:\n # print(url)\n resp = urllib.request.urlopen(url)\n return resp.read().decode(resp.headers.get_content_charset())\n # print(respData) #debug\n except:\n print(\"*****urllib.request.urlopen error!*****\", url)\n return \" \"\n\nclass MyHTMLParser(HTMLParser):\n container = \"\"\n def handle_data(self, data):\n self.container += data.strip().replace(\"\\\"\",\"#\")\n return str(self.container)\n\ndef liangdian_parser(url,i):\n url_r = url + str(i)\n respData = request_url(url_r)\n if respData:\n ld_1 = reg_1.findall(str(respData))\n for ld in ld_1:\n if ld:\n parser = MyHTMLParser()\n parser.feed(ld)\n # print (parser.container) # debug\n if parser.container:\n i = i + 1\n # time.sleep(5)\n return str(parser.container) + str(liangdian_parser(url,i))\n return \"\"\n else:\n return \"\"\n else:\n return \"\"\n\ndef parser(respData):\n paragraphs = regular.findall(str(respData))\n for eachP in paragraphs:\n # print(eachP) # debug\n nameList = name_reg.findall(str(eachP))\n descriptionList = description_reg.findall(eachP)\n fatherList = father_reg.findall(str(eachP))\n fatherurlList = fatherurl_reg.findall(str(eachP))\n bianhaoList = bianhao_reg.findall(str(eachP))\n\n for i in range(len(nameList)):\n bianhao = bianhaoList[i]\n name = nameList[i]\n description = descriptionList[i]\n father = fatherList[i]\n father_url = \"http://www.xialv.com\" + fatherurlList[i]\n url = \"http://www.xialv.com/scenery/item/\" + bianhaoList[i] + \"?&page=\"\n liangdian = liangdian_parser(url,1)\n # print(liangdian) #debug\n sql = 'INSERT INTO tr_trip_temp.t_bj_zhoubian(bianhao, name, father_url, father, description, liangdian ) VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");' % \\\n (bianhao, name, father_url, father, description, liangdian)\n sql = sql.replace(\"None\",\" \")\n print(sql)\n\n # try:\n # # 执行sql语句\n # cursor.execute(sql)\n # # 提交到数据库执行\n # db.commit()\n # except:\n # # 如果发生错误则回滚\n # # print(sql)\n # print(\"*******insert SQL error!*******\")\n # db.rollback()\n\ndef main():\n url = 'http://www.xialv.com/beijing/zhoubianyou'\n respData = request_url(url)\n\n pagset = pagset_reg.findall(str(respData)) #获取总页数\n if not pagset:\n pagset = ['0']\n # print(\"获取总页数\", pagset)\n\n for i in range(int(pagset[0])):\n url = 'http://www.xialv.com/beijing/zhoubianyou'\n url = url + \"?&page=\" + str(i+1)\n # print(url) # debug\n respData = request_url(url)\n parser(respData)\n # time.sleep(5)\n\nif __name__ == '__main__':\n # Regular Expressions:\n pagset_reg = re.compile(r'下一页末页')\n regular = re.compile(r'