diff --git "a/324.jsonl" "b/324.jsonl" new file mode 100644--- /dev/null +++ "b/324.jsonl" @@ -0,0 +1,724 @@ +{"seq_id":"369892970","text":"#verbose = True\n\nclass Configs_Controller():\n def __init__(self, verbose):\n self.freq = 50 #Hz \n self.hardwares = {\n 'steering': {\n 'servoMin': 150,\n 'servoMax': 350,\n 'servoMid': 285, \n 'channel': 1\n },\n 'throttle': {\n 'servoMin': 205,\n 'servoMax': 410,\n 'servoMid': 330, \n 'channel': 0\n }\n }\n self.verbose = verbose\n self.controller_sampling_rate = 0.001 #\n\n\nclass Configs_Server():\n def __init__(self, verbose):\n self.socket_port = 50007\n self.socket_host = ''\n self.verbose = verbose\n\n\nclass Configs_Camera():\n def __init__(self, verbose):\n self.img_width = 320 #px\n self.img_height = 240 #px\n self.fps = 30 #if -1, not used \n self.delay = 0.1 #delay in second between single snapshot\n self.verbose = verbose\n\n\nclass Configs_Arduino():\n def __init__(self, verbose):\n self.baud = 115200\n self.usb_port = 'ttyACM0'\n self.verbose = verbose\n\n","sub_path":"server/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"33195853","text":"import numpy as np\nimport cv2\nimport config\nfrom car import Car\nimport hog_subsample\nimport heatmap_threshold_detection\nfrom scipy.ndimage.measurements import label\nfrom collections import deque\n\nclass VehicleDetection():\n \"\"\" Vehicle detection state\n \"\"\"\n def __init__(self, nframes):\n # number of frames in history\n self.nframes = nframes\n \n # bounding boxes for positive detections for each frame in history\n self.bbox_list_history = deque([], self.nframes)\n \n # cars in each frame in history\n self.cars_history = []\n \n def process_image( self, image ):\n \"\"\"Find cars in image\n \n Args:\n image (numpy.ndarray): Source image. Color channels in RGB order.\n\n Returns:\n (numpy.ndarray): image decorated with bounding boxes around cars\n \"\"\"\n \n # 1. detect cars in image at different scales\n \n # Modify x/y start stop according to scale, cars appear smaller near horizon\n scales = config.scales\n \n box_list = []\n for scale_item in scales:\n scale = scale_item[\"scale\"]\n detects_image, boxes = hog_subsample.find_cars(image, \n scale_item[\"y_start_stop\"][0], scale_item[\"y_start_stop\"][1], \n scale, \n config.settings[\"svc\"], \n config.settings[\"scaler\"], \n config.settings[\"orient\"], \n config.settings[\"pix_per_cell\"], config.settings[\"cell_per_block\"], \n config.settings[\"spatial_size\"], config.settings[\"hist_bins\"],\n scale_item[\"x_start_stop\"][0], scale_item[\"x_start_stop\"][1])\n box_list.extend(boxes)\n \n # Update history\n self.bbox_list_history.append( box_list )\n bbox_list_history_list = sum(self.bbox_list_history.copy(), []) # single list of bbox lists in history\n \n # 2. heat map and threshold\n \n # Make zeros shaped like image\n heat = np.zeros_like(image[:,:,0]).astype(np.float)\n\n # Add heat for each box in box list history\n heat = heatmap_threshold_detection.add_heat(heat, bbox_list_history_list)\n\n # Apply threshold to help remove false positives\n heat_threshold = config.heatmap_threshold\n heat = heatmap_threshold_detection.apply_threshold(heat, heat_threshold)\n\n # Find final boxes from heatmap using label function\n heatmap = np.clip(heat, 0, 255) # only need to clip if there is more than 255 boxes around a point?\n labels = label(heatmap)\n boxed_image = heatmap_threshold_detection.draw_labeled_bboxes(np.copy(image), labels)\n \n # frame image annotation\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(boxed_image,\"Frame:{}\".format(config.count), (10,100), font, 1, (255,255,255), 2 ,cv2.LINE_AA )\n \n return boxed_image\n \n ","sub_path":"src/vehicle_detection.py","file_name":"vehicle_detection.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"109179865","text":"# a collection of personal finance tools\nimport pandas as pd\nimport os\nimport numpy as np\n\ntxn_columns = [\"txn_type\",\n \"date\",\n \"desc\",\n \"amnt\",\n \"acc_name\",\n \"acc_type\"]\n\n\nclass Account(object):\n \"\"\"\n A financial account.\n\n \"\"\"\n\n def __init__(self, number, name, acc_type, txns=None):\n self.number = number\n self.name = name\n self.acc_type = acc_type\n self.txns = txns\n\n def set_txns(self, txns):\n \"\"\"\n Set the transactions of this account, and add columns for account name and type.\n\n \"\"\"\n self.txns = txns\n self.txns[\"acc_name\"] = self.name\n self.txns[\"acc_type\"] = self.acc_type\n\n def fix_column_names(self):\n \"\"\"\n Fix the names of txn columns to\n - txn_type\n - date\n - desc\n - amnt\n\n \"\"\"\n col_name_dict = {\"Details\": \"txn_details\",\n \"Type\": \"txn_type\",\n \"Post Date\": \"date\",\n \"Posting Date\": \"date\", \"Description\": \"desc\",\n \"Amount\": \"amnt\"}\n\n self.txns.columns = [col_name_dict.get(col_name, col_name) for col_name in self.txns.columns]\n\n\ndef make_account(row):\n \"\"\"\n Make an account object from a row in account_info_df.\n\n \"\"\"\n return Account(number=row.acc_number[1:], name=row.acc_name, acc_type=row.acc_type)\n\n\ndef get_filename(account, file_path):\n def get_filenames():\n for file in os.listdir(file_path):\n if file.find(str(account.number)) >= 0:\n yield (file)\n\n filenames = list(get_filenames())\n if len(filenames) > 1:\n raise Exception(\"Multiple possible filenames containg {}\".format(account.number))\n else:\n return filenames[0]\n\n\ndef get_txns(account, file_path, sep=\",(?=\\S)\"):\n \"\"\"\n Get txns for an account from a csv.\n\n :param Account account:\n the account to get txns for\n\n :param str file_path:\n the location of the csv\n\n :param str sep:\n the separator regex, fixes issue\n of commas inside txn description\n\n :return:\n the txns of the account\n\n :rtype:\n pd.DataFrame\n\n \"\"\"\n\n # get the date columns to parse\n date_cols = []\n if account.acc_type == \"cc\":\n date_cols += [\"Trans Date\", \"Post Date\"]\n else:\n date_cols += [\"Posting Date\"]\n\n txn_df = pd.read_csv(\n file_path + get_filename(account, file_path),\n sep=sep,\n parse_dates=date_cols,\n index_col=False,\n engine='python')\n\n return txn_df\n\n\ndef combine_txns(acc_list):\n \"\"\"\n Combine all the txns from a list of accounts.\n\n \"\"\"\n return pd.concat([acc.txns[txn_columns] for acc in acc_list])\n\n\nclass Finances(object):\n \"\"\"\n Tools for analyzing finances\n\n :param str accounts_info_file_path:\n the path of the accounts info csv\n\n :param str accounts_info_filename:\n the name of the accounts info csv\n\n :param str txn_file_path:\n the path of the txn csv's\n\n \"\"\"\n def __init__(self, accounts_info_file_path, accounts_info_filename, txn_file_path, from_date, to_date):\n\n self.accounts_info_file_path = accounts_info_file_path\n self.accounts_info_filename = accounts_info_filename\n self.txn_file_path = txn_file_path\n self.from_date = from_date\n self.to_date = to_date\n\n self.accounts_dict, self.all_txns_df = self.setup()\n\n self.cred_deb_pairs, self.txns_no_pairs = self.find_cred_deb_pairs()\n\n def setup(self):\n \"\"\"\n Read in account info, txns for each account, and combine all txns.\n\n :return:\n dictionary of accounts and all txns\n :rtype:\n dict, pandas.DataFrame\n\n \"\"\"\n # read in account info\n account_info_df = pd.read_csv(self.accounts_info_file_path+self.accounts_info_filename, quotechar='\"')\n\n # make account objects\n accounts = account_info_df.apply(make_account, axis=1)\n\n # make accounts dictionary\n accounts_dict = {}\n\n for acc in accounts:\n # read in txns\n acc.set_txns(get_txns(acc, file_path=self.txn_file_path))\n\n # fix the column names\n acc.fix_column_names()\n\n # add the account to accounts_dict\n accounts_dict[acc.name] = acc\n\n # combine all txns\n all_txns_df = combine_txns(accounts_dict.values())\n\n # add a txn_id column\n all_txns_df['txn_id'] = range(len(all_txns_df))\n\n # add an amount absolute value column\n all_txns_df[\"amnt_value\"] = np.abs(all_txns_df.amnt)\n\n # add amount to description for checks (so I can regex for rent)\n all_txns_df[\"desc\"] = all_txns_df.apply(\n lambda row: row.desc + \" \" + str(row.amnt) if \"CHECK\" in row.desc else row.desc, axis=1)\n\n return accounts_dict, all_txns_df[(all_txns_df.date >= self.from_date) & (all_txns_df.date <= self.to_date)]\n\n def find_cred_deb_pairs(self):\n \"\"\"\n Find credit-debit pairs: transfers among accounts\n\n :return:\n the credit-debit pairs and all txns not in a credit-debit pair\n :rtype:\n pd.DataFrame, pd.DataFrame\n\n \"\"\"\n # first join on amnt_value\n txns_join_amnt_value = pd.merge(left=self.all_txns_df, right=self.all_txns_df, how=\"inner\", on=\"amnt_value\")\n\n # filter by sum of amounts = 0 and date_x < date_y\n # zero sum condition\n zero_sum = txns_join_amnt_value.amnt_x + txns_join_amnt_value.amnt_y == 0\n\n # date order condition\n date_order = txns_join_amnt_value.date_x <= txns_join_amnt_value.date_y\n\n # filtered join\n txns_join_zero_sum = txns_join_amnt_value[(zero_sum) & (date_order)]\n\n # filter by date_diff\n # calculate datediff\n txns_join_zero_sum['date_diff'] = txns_join_zero_sum.date_y - txns_join_zero_sum.date_x\n\n # uniqueness condition\n unique = txns_join_zero_sum.txn_id_x != txns_join_zero_sum.txn_id_y\n\n # filtered join\n txns_join_close_zero_sum = txns_join_zero_sum[(txns_join_zero_sum.date_diff < pd.Timedelta(days=4)) & (unique)]\n\n # only pick one pair for each first txn\n cred_deb_pairs = txns_join_close_zero_sum.groupby(\"txn_id_x\", as_index=False).first()\n\n # get all txns not in a credit-debit pair\n # left pair condition\n left_pair_cond = self.all_txns_df.txn_id.isin(cred_deb_pairs.txn_id_x)\n\n # right pair cond\n right_pair_cond = self.all_txns_df.txn_id.isin(cred_deb_pairs.txn_id_y)\n\n # filtered txns\n all_txns_df_no_pairs = self.all_txns_df[(~left_pair_cond) & (~right_pair_cond)]\n\n return cred_deb_pairs, all_txns_df_no_pairs\n","sub_path":"finances.py","file_name":"finances.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"618893541","text":"# coding: utf8\n\nimport geocoder\n\nlocation = 'Ottawa, Ontario'\ncity = 'Ottawa'\nottawa = (45.4215296, -75.6971930)\nlocations = ['Denver,CO', 'Boulder,CO']\n\ndef test_bing():\n g = geocoder.bing(location)\n assert g.ok\n assert g.city == city\n osm_count, fields_count = g.debug()[0]\n assert osm_count >= 3\n assert fields_count >= 12\n\n\ndef test_bing_details():\n details = {\n 'adminDistrict': 'Ontario',\n 'locality': 'Ottawa'\n }\n\n g = geocoder.bing(None, method='details', **details)\n assert g.ok\n assert g.city == city\n osm_count, fields_count = g.debug()[0]\n assert osm_count >= 3\n assert fields_count >= 12\n\n details = {\n 'addressLine': '6912 Route 8',\n 'adminDistrict': 'Northumberland',\n 'countryRegion': 'CA',\n 'locality': 'Ludlow'\n }\n\n g = geocoder.bing(None, method='details', **details)\n assert g.ok\n osm_count, fields_count = g.debug()[0]\n assert osm_count >= 3\n assert fields_count >= 12\n\n\ndef test_bing_reverse():\n g = geocoder.bing(ottawa, method='reverse')\n assert g.ok\n assert g.city == city\n\n\ndef test_bing_batch():\n g = geocoder.bing(locations, method='batch')\n assert g.ok\n assert len(g) == 2\n\n\ndef test_multi_results():\n g = geocoder.bing(location, maxRows=3)\n assert len(g) == 3\n assert g.city == city\n\n expected_results = [\n [45.4217796325684, -75.6911926269531],\n [45.2931327819824, -75.7756805419922],\n [36.9871711730957, -94.7606735229492],\n ]\n assert [result.latlng for result in g] == expected_results\n","sub_path":"tests/test_bing.py","file_name":"test_bing.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"478911127","text":"import random\nip_r=[1,2,3,4]\nmask=''\nmask_r=[1,2,3,4]\nfor i in range(0,4):\n ip_r[i]=random.randint(1,255)\nprint(ip_r)\nzeros=random.randint(1,32)\nmask+=str(('1'*zeros)+('0'*(32-zeros)))\nfor i in range(0,4):\n mask_r[i]=int((mask[(8*i):((8*i)+8)]),2)\nprint(mask_r)","sub_path":"Scripts/ip_random.py","file_name":"ip_random.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"7199671","text":"#! /usr/bin/python\n\nimport sqlite3\nimport pandas as pd\nimport datetime\n\nusername = 'alexanderhiller'\n\nconn = sqlite3.connect('/Users/' + username + '/Library/Messages/chat.db')\nc = conn.cursor()\n\nfor i in range(1000):\n cmd1 = 'SELECT ROWID, text, handle_id, \\\n datetime(date + strftime(\\'%s\\',\\'2001-01-01\\'), \\'unixepoch\\') as date_utc \\\n FROM message T1 \\\n INNER JOIN chat_message_join T2 \\\n ON T2.chat_id='+ str(i)+ '\\\n AND T1.ROWID=T2.message_id \\\n ORDER BY T1.date'\n c.execute(cmd1)\n df_msg = pd.DataFrame(c.fetchall(), columns=['id', 'text', 'sender', 'time'])\n\n df_msg.to_csv(\"sqliteOutput_\" + str(i) + \".csv\")\n","sub_path":"scripts/grab_imessages.py","file_name":"grab_imessages.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"311617265","text":"from cleo import Command\n\n# import sys\n# sys.path.append(\"..\")\n#\n# from libs.data_container import data_container as dc\n# dc.config_overwrite = {'doorlockd': {'enable_hardware': False, 'enable_webserver': False,}}\n#\n# from app import db\n# from models import *\n\n\n\n\nclass GenSecretCommand(Command):\n\t\"\"\"\n\tGenerate secret for jwt_token config.\n\n\tgen:secret : Generate secret for jwt_token config.\n\t\"\"\"\n\n\tdef handle(self):\n\t\t\n\t\timport secrets \n\t\t#only python > 3.6 , sorry config secret in your config.ini\n\t\tsecret = secrets.token_urlsafe(64)\n\t\t\n\t\tself.line('# ')\n\t\tself.line('# Add the information below the [jwt_token] section in your config.ini:')\n\t\tself.line('# ')\n\t\tself.line('[jwt_token]')\n\t\tself.line('secret = \"{}\"'.format(secret))\n\t\tself.line(' ')\n\t\t\n\n","sub_path":"src/libs/cli/HelperCommand.py","file_name":"HelperCommand.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"563463354","text":"from sys import stdin, stdout\n\nk = [int(n) for n in stdin.readline()]\ng = len(k)\nwhile g is not 0:\n for j in k:\n s = bin(j)\n na = []\n for i in range(2,len(s)):\n na.append(int(s[i]))\n\n sum = 0\n for i in na:\n sum = sum + i\n\n stdout.write(str(sum))\n g = g - 1","sub_path":"MYSTERY.py","file_name":"MYSTERY.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"77301458","text":"aList = [5, 2, 1, 5, 2, 3]\n\ndef selectionsort(aList):\n for i in range(len(aList)):\n least = i\n for k in range(i + 1, len(aList)):\n if aList[k] < aList[least]:\n least = k\n print(aList)\n swap(aList, least, i)\n\ndef swap(aList, i, j):\n aList[i], aList[j] = aList[j], aList[i]\n\nselectionsort(aList)","sub_path":"Algorithms/Sorting Algorithms/Selection Sort.py","file_name":"Selection Sort.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"550016546","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render_to_response, redirect\nfrom django.template import Context, loader\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User,Group\nfrom courses.common_page_data import get_common_page_data\nfrom django.views.decorators.http import require_POST\n\nfrom c2g.models import *\nfrom random import randrange\nfrom datetime import datetime\n\nfrom django.utils.functional import wraps\n\ndef auth_view_wrapper(view):\n @wraps (view)\n def inner(request, *args, **kw):\n user = request.user\n course = request.common_page_data['course']\n\n if user.is_authenticated() and not is_member_of_course(course, user):\n return HttpResponseRedirect(reverse('courses.views.main', args=(request.common_page_data['course_prefix'], request.common_page_data['course_suffix'],)))\n\n if not user.is_authenticated():\n return HttpResponseRedirect(reverse('courses.views.main', args=(request.common_page_data['course_prefix'], request.common_page_data['course_suffix'],)))\n\n return view(request, *args, **kw)\n return inner\n\n@require_POST\n@auth_view_wrapper\ndef switch_mode(request):\n common_page_data = get_common_page_data(request, request.POST.get(\"course_prefix\"), request.POST.get(\"course_suffix\"))\n if common_page_data['can_switch_mode']:\n request.session['course_mode'] = request.POST.get('to_mode')\n return redirect(request.META['HTTP_REFERER'])\n\n@require_POST\n@auth_view_wrapper\ndef add_section(request):\n course_prefix = request.POST.get(\"course_prefix\")\n course_suffix = request.POST.get(\"course_suffix\")\n common_page_data = get_common_page_data(request, course_prefix, course_suffix)\n\n index = len(ContentSection.objects.filter(course=common_page_data['course']))\n\n if not common_page_data['is_course_admin']:\n return redirect('courses.views.view', course_prefix, course_suffix)\n\n staging_section = ContentSection(course=common_page_data['staging_course'], title=request.POST.get(\"title\"), index=index, mode='staging')\n staging_section.save()\n\n staging_section.create_production_instance()\n\n return redirect(request.META['HTTP_REFERER'])\n\n@require_POST\n@auth_view_wrapper\ndef commit(request):\n ids = request.POST.get(\"commit_ids\").split(\",\")\n for id in ids:\n parts = id.split('_')\n if parts[0] == 'video':\n Video.objects.get(id=parts[1]).commit()\n elif parts[0] == 'problemset':\n ProblemSet.objects.get(id=parts[1]).commit()\n elif parts[0] == 'additionalpage':\n AdditionalPage.objects.get(id=parts[1]).commit()\n return redirect(request.META['HTTP_REFERER'])\n\n@require_POST\n@auth_view_wrapper\ndef revert(request):\n ids = request.POST.get(\"revert_ids\").split(\",\")\n for id in ids:\n parts = id.split('_')\n if parts[0] == 'video':\n Video.objects.get(id=parts[1]).revert()\n elif parts[0] == 'problemset':\n ProblemSet.objects.get(id=parts[1]).revert()\n elif parts[0] == 'additionalpage':\n AdditionalPage.objects.get(id=parts[1]).revert()\n return redirect(request.META['HTTP_REFERER'])\n\n@require_POST\n@auth_view_wrapper\ndef change_live_datetime(request):\n ids = request.POST.get(\"change_live_datetime_ids\").split(\",\")\n\n if request.POST.get(\"live_datetime_option\") == 'now':\n new_live_datetime = datetime.now()\n else:\n live_date_parts = request.POST.get(\"live_date\").split(\"-\")\n year = int(live_date_parts[2])\n month = int(live_date_parts[0])\n day = int(live_date_parts[1])\n if request.POST.get(\"live_hours\") == '':\n hour = 0\n else:\n hour = int(request.POST.get(\"live_hours\"))\n\n if request.POST.get(\"live_minutes\") == '':\n minute = 0\n else:\n minute = int(request.POST.get(\"live_minutes\"))\n\n new_live_datetime = datetime(year,month,day,hour,minute)\n\n for id in ids:\n parts = id.split('_')\n if parts[0] == 'video':\n video = Video.objects.get(id=parts[1])\n video.live_datetime = new_live_datetime\n video.image.live_datetime = new_live_datetime\n video.save()\n video.image.save()\n elif parts[0] == 'problemset':\n pset = ProblemSet.objects.get(id=parts[1])\n pset.live_datetime = new_live_datetime\n pset.image.live_datetime = new_live_datetime\n pset.save()\n pset.image.save()\n elif parts[0] == 'additionalpage':\n page = AdditionalPage.objects.get(id=parts[1])\n page.live_datetime = new_live_datetime\n page.image.live_datetime = new_live_datetime\n page.save()\n page.image.save()\n elif parts[0] == 'file':\n file = File.objects.get(id=parts[1])\n file.live_datetime = new_live_datetime\n file.image.live_datetime = new_live_datetime\n file.save()\n file.image.save()\n\n return redirect(request.META['HTTP_REFERER'])\n\ndef is_member_of_course(course, user):\n student_group_id = course.student_group.id\n instructor_group_id = course.instructor_group.id\n tas_group_id = course.tas_group.id\n readonly_tas_group_id = course.readonly_tas_group.id\n\n group_list = user.groups.values_list('id',flat=True)\n\n for item in group_list:\n if item == student_group_id or item == instructor_group_id or item == tas_group_id or item == readonly_tas_group_id:\n return True\n\n return False\n\n\n@require_POST\ndef signup(request):\n handle = request.POST.get('handle')\n\n user = request.user\n course = Course.objects.get(handle=handle, mode = \"production\")\n if not is_member_of_course(course, user):\n student_group = Group.objects.get(id=course.student_group_id)\n student_group.user_set.add(user)\n\n return redirect(request.META['HTTP_REFERER'])\n\n\n\n","sub_path":"main/courses/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"11915352","text":"'''\nA permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order.\nThe lexicographic permutations of 0, 1 and 2 are:\n\n012 021 102 120 201 210\n\nWhat is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n'''\n\nfrom math import factorial as fct\n\nnumbers = ['0','1','2','3','4','5','6','7','8','9']\nsolution = ''\nn = 0\nfor d in range(9, 0, -1):\n\tf = fct(d)\n\ti = 1\n\twhile f*(i+1) + n < 1000000:\n\t\ti += 1\n\tsolution += numbers[i]\n\tnumbers.pop(i)\n\tn = f*i + n\nsolution += numbers[0]\n\nprint('Answer:', solution)\n","sub_path":"024.py","file_name":"024.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"83100658","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nimport cv2\n\n\ndef update(i):\n # new_data = (plt_img + 2 * i) % 256\n new_data = np.random.rand(500,500,3)\n new_data[new_data<0 ]=0\n new_data[new_data>1]=0.5\n # new_data.astype('uint8')\n data.set_data(new_data)\n return data\n\n\nflower = 'img.jpg'\nframes = 100\nimg = cv2.imread(flower)\n# cv2.namedWindow('flower',cv2.WINDOW_FREERATIO)\n# cv2.imshow('flower',img);\n# cv2.waitKey()\n# cv2.destroyAllWindows()\n# plt_img = img[..., ::-1]\n# print(plt_img.dtype)\nfig = plt.figure(figsize=(15, 15))\n_data = np.random.randn(500,500,3)\n_data[_data<0]=0\n_data[_data>1]=0.5\n# sameas np.where((1-_data)*_data>0,_data,0)\ndata = plt.imshow(_data)\nanim = animation.FuncAnimation(fig, update, frames,interval = 10)\nplt.show()\n","sub_path":"python/plot/show_dynamic_picture.py","file_name":"show_dynamic_picture.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"609925748","text":"# binary search algorithm\r\n\r\nfrom typing import Any, Sequence\r\n\r\ndef bin_search(a: Sequence, key: Any) -> int:\r\n pl = 0\r\n pr = len(a) - 1\r\n\r\n while True:\r\n pc = (pl + pr)//2\r\n if a[pc] == key:\r\n return pc\r\n elif a[pc] < key:\r\n pl = pc + 1\r\n else:\r\n pr = pc - 1\r\n if pl > pr:\r\n break\r\n return -1\r\n\r\nif __name__ == '__main__':\r\n num = int(input('enter the number of element: '))\r\n x = [None]*num\r\n \r\n print('enter array data in ascending')\r\n \r\n x[0] = int(input('x[0]: '))\r\n for i in range(1,num):\r\n while True:\r\n x[i] = int(input(f'x[{i}]: '))\r\n if x[i] >= x[i-1]:\r\n break\r\n \r\n ky = int(input('enter the objective value: '))\r\n\r\n idx = bin_search(x,ky)\r\n\r\n if idx == -1:\r\n print(\"the objective doesn't exist\")\r\n else:\r\n print(f\"the objective is in x[{idx}]\")","sub_path":"3.04 bsearch.py","file_name":"3.04 bsearch.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"317254805","text":"import sys\nimport numpy as np\n\nfrom Reader import times, num_machines\n\ndef calc_cost(solution):\n return sum([ schedule[-1] for schedule in calc_schedule(solution)] )\n\ndef calc_schedule(solution):\n sol_schedule = [ [0]*num_machines ]\n for idx, time in enumerate(times[solution]):\n job_schedule = calc_job_schedule(sol_schedule[idx], time) \n sol_schedule.append(job_schedule)\n return sol_schedule[1:]\n\ndef calc_job_schedule(prev_schedule, job_schedule):\n schedule = [0] * num_machines\n for idx, duration in enumerate(job_schedule):\n schedule[idx] = max(schedule[idx-1], prev_schedule[idx]) + duration\n return schedule\n","sub_path":"Heuristics/Neighborhood/Calc_cost.py","file_name":"Calc_cost.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"241660180","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Brian\n#\n# Created: 30/03/2015\n# Copyright: (c) Brian 2015\n# Licence: \n#-------------------------------------------------------------------------------\n\ndef bmi(amount):\n answer = []\n for i in range(amount):\n a, b = raw_input().split(' ')\n weight = float(a)\n height = float(b)\n bmival = weight / height ** 2\n # Best practice in terms of performance to have an equal number of comparisons made in your if statements\n if bmival < 25.0:\n if bmival < 18.5:\n answer.append(\"under\")\n else:\n answer.append(\"normal\")\n else:\n if bmival < 30.0:\n answer.append(\"over\")\n else:\n answer.append(\"obese\")\n print(\"[{}]\".format(\" \".join(map(str, answer))))\n\nbmi(input())\n","sub_path":"Code Abbey Problem 28.py","file_name":"Code Abbey Problem 28.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"227646015","text":"import sys, subprocess, os\r\nos.chdir(sys.argv[6]) #sys.argv[6] = suite_name (change current working directory to test suite folder directory)\r\nfrom ibi.testrunner.change_test_suite_config import TestSuiteConfig\r\nfrom ibi.testrunner.test_result_reader import TestResultReader\r\nfrom ibi.testrunner.testrail.testrail import TestRail\r\nfrom ibi.testrunner.utils import Utils\r\n\r\nclass TestSuiteRunner(object):\r\n \r\n def __init__(self):\r\n \r\n self.TestRail=TestRail()\r\n self.get_command_line_arguments() \r\n TestSuiteConfig(self.confid, self.release, self.product, self.device).change() # Change test suite config file\r\n self.run_id=Utils.get_test_suite_run_id(self.release, self.suite_name, self.device)\r\n \r\n def get_command_line_arguments(self): \r\n \"\"\"\r\n This function get the inputs from command argument which are passed by command line from jenkins\r\n \"\"\"\r\n self.device=sys.argv[1] # device ='IOS_IPAD'\r\n self.product=sys.argv[2] # project ='wf'\r\n self.release=sys.argv[3] # release = '8202'\r\n self.confid=sys.argv[4] # confid = 'wf2'\r\n self.pkgname=sys.argv[5] # pkgname = 'wf101218a'\r\n self.suite_name=sys.argv[6] # S10141_Action_Android\r\n self.MAC_PYTHON_PATH = sys.argv[7] # Python path\r\n \r\n def define_test_result_key_and_values(self):\r\n \"\"\"\r\n Define test result keys and values to post result to testrail database\r\n \"\"\"\r\n self.test_results={}\r\n self.test_results['status_id'] = self.TestRail.get_index_for_test_status('untested')\r\n self.test_results['custom_configurations'] = self.TestRail.get_index_for_result_field('configurations', self.confid)\r\n self.test_results['custom_run_mode'] = self.TestRail.get_index_for_result_field('run_mode', 'ap')\r\n self.test_results['custom_release'] = self.TestRail.get_index_for_result_field('release', self.release)\r\n self.test_results['custom_prodid'] = self.TestRail.get_index_for_result_field('prodid', self.product)\r\n self.test_results['custom_atm_issues'] = self.TestRail.get_index_for_result_field('atm_issues', 'na')\r\n self.test_results['custom_pkgname'] = self.pkgname\r\n self.test_results['custom_browsers'] = '0'\r\n self.test_results['defects'] = ''\r\n self.test_results['comment'] = ''\r\n \r\n def update_test_result(self, test_result_xml):\r\n \"\"\"\r\n Update the test results after run each test case\r\n \"\"\"\r\n self.define_test_result_key_and_values()\r\n test_status=TestResultReader(test_result_xml).get_test_result_status()\r\n test_status_index=self.TestRail.get_index_for_test_status(test_status)\r\n self.test_results['status_id']=test_status_index\r\n \r\n def post_results_to_testrail(self, test_id):\r\n \"\"\"\r\n Post the test case results to testrail database\r\n \"\"\"\r\n self.TestRail.post_test_result(self.run_id, test_id, self.test_results)\r\n \r\n def post_case_update_info(self, case_id):\r\n \"\"\"\r\n Post the case update info to testrail database\r\n \"\"\"\r\n update_info={}\r\n self.TestRail.post_case_update(case_id, update_info)\r\n\r\n def __run_test(self):\r\n \"\"\"\r\n Run all test case \r\n \"\"\"\r\n run_cases=Utils.get_automation_validation_test_cases_to_run(self.run_id)\r\n for case in run_cases :\r\n case_name='C'+str(case)\r\n process_status=subprocess.call(self.MAC_PYTHON_PATH + ' singlerunner.py '+case_name,shell=True)\r\n test_result_xml=Utils.get_test_result_xml_path(case_name) # Read the test result XML file to get test results\r\n if len(test_result_xml)!=0 and process_status==0 : #Checking whether test case ran or not\r\n self.update_test_result(test_result_xml[0]) \r\n self.post_case_update_info(case)\r\n self.post_results_to_testrail(case)\r\n del self.test_results # Delete the test result after post to test rail\r\n else :\r\n continue\r\n \r\n def Start(self):\r\n \"\"\"\r\n Start to run all test case\r\n \"\"\"\r\n Utils.check_appium_server_is_up()\r\n self.__run_test()\r\n \r\nTestSuiteRunner().Start()\r\n","sub_path":"Appium/ibi/testrunner/runners/AutomationValidationRunner.py","file_name":"AutomationValidationRunner.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"400209168","text":"def ChiediPesi(Pesi):\n for k in range(8):\n Pesi.append(int(input()))\n\ndef CalcolaVotiFinali(Matricole, VotiQuesiti, Pesi, VotiFinali, ValoreSoglia):\n cont = 0\n\n for i in range(70):\n VotoFinale = 0\n for j in range(8):\n VotoFinale += VotiQuesiti[cont]*Pesi[cont%8]\n cont+=1\n VotiFinali.append(VotoFinale)\n\ndef EliminaBocciati(VotiFinali, Matricole, ValoreSoglia):\n for k in range(70):\n if VotiFinali[k] < ValoreSoglia:\n Matricole[k] = 'elimina'\n\n while 'elimina' in Matricole:\n indice=Matricole.index('elimina')\n del Matricole[indice]\n del VotiFinali[indice]\n \n\ndef StampaStudentiPromossi(Matricole, VotiFinali):\n for k in range(len(Matricole)):\n VotoFinale = VotiFinali[k]\n Matricola = Matricole[k]\n print(Matricola, VotoFinale)\n\ndef CalcolaMinimo(Matricole, VotiFinali):\n PosMinimo = 0\n VotoMinimo = VotiFinali[0]\n for k in range(1, len(Matricole)):\n if VotiFinali[k] <= VotoMinimo:\n VotoMinimo = VotiFinali[k]\n PosMinimo = k\n return PosMinimo\n \ndef main():\n Pesi = []\n ChiediPesi(Pesi)\n VotiFinali = []\n Matricole = []\n VotiQuesiti = []\n \n for i in range(70):\n InputMatricola = input()\n if len(InputMatricola)<=10:\n Matricole.append(InputMatricola)\n else:\n Matricole.append(InputMatricola[0:10])\n for j in range(8):\n VotiQuesiti.append(int(input()))\n ValoreSoglia = int(input())\n CalcolaVotiFinali(Matricole, VotiQuesiti, Pesi, VotiFinali, ValoreSoglia)\n EliminaBocciati(VotiFinali, Matricole, ValoreSoglia)\n StampaStudentiPromossi(Matricole, VotiFinali)\n if len(Matricole) != 0:\n IndiceMax = max(VotiFinali) \n MatricolaMax = Matricole[VotiFinali.index(IndiceMax)]\n MatricolaMin = Matricole[CalcolaMinimo(Matricole, VotiFinali)]\n print(len(Matricole), MatricolaMax, MatricolaMin, end='')\n \n \nmain()\n \n","sub_path":"DomJudge/Scripts/n65.py","file_name":"n65.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"436592092","text":"a=[int(i) for i in input().split()]\nmax_otr=[0 for i in range(2)]\nmax_pol=[0 for i in range(3)]\nmx1,mx2=0,0\nl1,l2=0,0\nfor i in a:\n if i>mx1:\n mx1=i\n max_pol[l1%3]=i\n l1+=1\n elif ir_pol:\n print(r_otr*max_pol[0])\nelse:\n print(r_pol*max_pol[0])\n","sub_path":"musor/3max.py","file_name":"3max.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"422284425","text":"from typing import List, Tuple, Union\n\nfrom pydantic import Field, root_validator\n\nfrom weaverbird.pipeline.steps.utils.base import BaseStep\nfrom weaverbird.pipeline.steps.utils.render_variables import StepWithVariablesMixin\nfrom weaverbird.pipeline.types import TemplatedVariable\n\n\nclass RenameStep(BaseStep):\n name = Field('rename', const=True)\n to_rename: List[Tuple[str, str]] = Field(..., alias='toRename')\n\n @root_validator(pre=True)\n def handle_legacy_syntax(cls, values):\n if 'oldname' in values and 'newname' in values:\n values['to_rename'] = [(values.pop('oldname'), values.pop('newname'))]\n return values\n\n\nclass RenameStepWithVariable(RenameStep, StepWithVariablesMixin):\n to_rename: Union[TemplatedVariable, List[Tuple[TemplatedVariable, TemplatedVariable]]] = Field(\n ..., alias='toRename'\n )\n","sub_path":"server/weaverbird/pipeline/steps/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"236705083","text":"\"\"\"\n This example shows how to cut actors in the scene using a plane\n oriented along the sagittal axis\n\"\"\"\n\nimport brainrender\nbrainrender.SHADER_STYLE = 'cartoon'\n\nfrom brainrender.scene import Scene\n\n\nscene = Scene()\n\n# Add some actors\nroot = scene.actors['root']\nth = scene.add_brain_regions(['STR', 'TH'], alpha=.5)\n\n# Cut with plane\nscene.cut_actors_with_plane('sagittal', showplane=False) # Set showplane to True if you want to see the plane location\n\n# Add a silhouette around each actor to emphasize the cut location\nsil = root.silhouette().lw(1).c('k')\nsil2 = [act.silhouette().lw(3).c('k') for act in th]\nscene.add_vtkactor(sil, *sil2)\n\nscene.render(camera='top')\n","sub_path":"Examples/basic/cut_with_plane1.py","file_name":"cut_with_plane1.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"85115310","text":"from sikuli import *\n\nDawn_of_the_Dragons = \"1371058319396.png\"\nDotD_Raid_Btn = \"DotD_Raid_Btn.png\"\nDOTD_Active_Raids_Btn = \"DotD_Active_Raids_Btn.png\"\nDOTD_Engage = \"DotD_Engage_Raid_Btn.png\"\nDOTD_20 = \"DotD_Attack_20_Btn.png\"\nDOTD_close = \"DotD_Close_Popup_Btn.png\"\nDOTD_Quest = \"DotD_Quest_Btn.png\"\nDOTD_QesstAttack = \"DotD_Quest_Attack_Btn.png\"\nDOTD_AmbushFlee = \"1371392032885.png\"\nDOTD_AmbushAttack = \"DotD_Ambush_Attack_Btn.png\"\nDOTD_AmbushAttackHover = \"DotD_Ambush_Attack_Hover_Btn.png\"\n\ndef run(timeout):\n if exists(Dawn_of_the_Dragons, timeout):\n click(Dawn_of_the_Dragons)\n print(\"Dawn of the Dragons Clicked\")\n if exists(DotD_Raid_Btn, timeout):\n click(DotD_Raid_Btn)\n print(\"DotD Raid Clicked\")\n if exists(DOTD_Active_Raids_Btn, timeout):\n click(DOTD_Active_Raids_Btn)\n print(\"DotD Active Clicked\")\n if exists(DOTD_Engage, timeout):\n click(DOTD_Engage)\n print(\"DotD Engage Clicked\")\n if exists(DOTD_20, timeout):\n click(DOTD_20)\n print(\"DotD 20 Clicked\")\n if exists(DOTD_close, timeout):\n click(DOTD_close)\n print(\"DotD Close Clicked\")\n if exists(DOTD_Quest, timeout):\n click(DOTD_Quest)\n print(\"DotD Quest Clicked\")\n if exists(DOTD_QesstAttack, timeout):\n click(DOTD_QesstAttack)\n print(\"DotD Quest Attack Clicked\")\n if exists(DOTD_close, timeout):\n click(DOTD_close)\n else:\n if exists(DOTD_AmbushAttack, timeout):\n click(DOTD_AmbushAttack)\n print(\"DotD Ambush Attack Clicked\")\n while exists(DOTD_AmbushAttackHover, timeout):\n click(DOTD_AmbushAttackHover)\n print(\"DotD Ambush Attack Hover Clicked\")\n","sub_path":"FBGames.sikuli/DotD.py","file_name":"DotD.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"596751918","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 18 16:23:15 2021\n\n@author: mathewjowens\nEdiited by Matthew Lang\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport astropy.time as ast\nimport makeMASens.downMASfc as dmf\nimport makeMASens.bravdaEns as bEns\nimport pandas as pd\nimport h5py\n\ndef observerReadH5(ephFile, body, times):\n # Function to extract the locations of a celestrial body/spacecraft specified in the\n # ephemeris HDF5 file at specific times.\n # Input:\n # ephFile: File containing ephemeris data to be extracted\n # body: Celestial object/spacecraft data to be extracted\n # times: Times (astropy.Time object) at which the data is to be extracted\n #\n # Output:\n # df: Directory containing the data at the required times. If there is no data at the times specified,\n # the data is replaced with NaNs\n\n fileName = ephFile\n startDate = times.min()\n endDate = times.max()\n\n # Check body specified is a viable option\n possibleBodies = ['MERCURY', 'VENUS', 'EARTH', 'MARS',\n 'PSP', 'STERA', 'STERB', 'SO']\n\n if body.upper() in possibleBodies:\n body = body.upper()\n else:\n print('Warning, body requested was not recognised.')\n print(f'Only {possibleBodies} are valid.')\n print('Defaulting to Earth')\n body = ['EARTH']\n\n\n with h5py.File(fileName, \"r\") as f:\n # Extract variables\n\n # Generate time object to contain all dates in fileName\n allTime = ast.Time(f[body]['date'], format='mjd')\n allTime.format = 'datetime'\n\n # Define mask to restrict to only the relevant dates\n dtMask = (\n (allTime >= startDate) & (allTime <= endDate)\n )\n\n # Extract the variables at the required times\n bodyDates = ast.Time(allTime[dtMask])\n bodyRad = f[body]['radAU'][dtMask]\n bodyLat = f[body]['hgiLatDeg'][dtMask]\n bodyLon = f[body]['hgiLonDeg'][dtMask]\n\n # Interpolate the data to the required timeseries\n # Check that body dates are within times requested\n if len(bodyDates) > 0:\n bodyRadInterp = np.interp(times.jd, bodyDates.jd, bodyRad)\n bodyLatInterp = np.interp(times.jd, bodyDates.jd, bodyLat)\n bodyLonInterp = np.interp(times.jd, bodyDates.jd, bodyLon)\n\n # Fill position arrays with NaNs for [times < (bodyDates.min() - 2 days)] and\n # for [times > (bodyDates.max() + 2 days)]\n twoDaySec = ast.TimeDelta(2 * 24 * 60 * 60, format='sec')\n lowMask = times.jd < (bodyDates.min() - twoDaySec).jd\n hiMask = times.jd > (bodyDates.max() + twoDaySec).jd\n interpMask = (lowMask | hiMask)\n\n if len(interpMask) > 0:\n bodyRadInterp[interpMask] = np.nan\n bodyLatInterp[interpMask] = np.nan\n bodyLonInterp[interpMask] = np.nan\n\n # Create dictionary to store required data\n df = pd.DataFrame(\n {'radAU': bodyRadInterp, 'hgiLatDeg': bodyLatInterp, 'hgiLonDeg': bodyLonInterp},\n index=times\n )\n else:\n df = pd.DataFrame(\n {'radAU': np.nan, 'hgiLatDeg': np.nan, 'hgiLonDeg': np.nan},\n index=times\n )\n\n return df\n\n\ndef makeMASens(\n crMJDFile, cr_start, cr_end, nMASens, nLon, ephemFile, masMapsDir, ensSaveDir,\n lat_rot_sigma=5 * np.pi / 180, lat_dev_sigma=2 * np.pi / 180,\n long_dev_sigma=2 * np.pi / 180, r_in=30\n):\n # Function to generate MAS ensemble members and download MAS model runs if they don't exist\n # crMJDFile: File containing the start times of all Carrington Rotations\n # cr_start: Initial carrington rotation required\n # cr_end: Final Carrington Rotation required\n # nMASens: Number of MAS ensemble members required\n # nLon: Number of longitude points in each ens. member\n # ephemFile: Location of ephemeris file containing Earth's positional data\n # masMapsDir: Directory containing MAS maps (if they exist, if it doesn't, new directory will be made)\n # ensSaveDir: Directory to save ensembles to (if doesn't exist, new directory will be created)\n # lat_rot_sigma: The standard deviation of the Gaussian from which the rotational perturbation is drawn\n # lat_dev_sigma: The standard deviation of the Gaussian from which the linear\n # latitudinal perturbation is drawn\n # lon_dev_sigma: The standard deviation of the Gaussian from which the linear\n # longitudinal perturbation is drawn\n # r_in: Radial distance of speed map required (in rS, default is 30rS)\n\n # CR to be extracted\n # crMJDFile = 'C:/Users/mslan/Desktop/HUXtDA/CR_MJD/2039_2300CRMJDstart.csv'\n # cr_start = 2050\n # cr_end = 2230\n\n # If masMapsDir does not exist, create it\n if not os.path.isdir(masMapsDir):\n os.makedirs(masMapsDir)\n\n # If ensSaveDir does not exist, create it\n if not os.path.isdir(ensSaveDir):\n os.makedirs(ensSaveDir)\n\n # Read CR_MJD file to extract MJD start times for each Carrington Rotation\n crLines = []\n mjdLines = []\n with open(crMJDFile) as f:\n crMJDLines = f.readlines()\n\n for ln in crMJDLines:\n splitLn = (ln.strip()).split(',')\n\n crLines.append(float(splitLn[0]))\n mjdLines.append(float(splitLn[1]))\n\n # Generate MAS ensemble for required Carrington Rotation\n for nCR in range(cr_start, cr_end):\n cr = nCR\n print(f'CR = {cr}')\n\n reqIndex = next(\n x for x, val in enumerate(crLines) if val > cr\n )\n\n winCR = int(crLines[reqIndex - 1])\n\n if winCR != cr:\n print(f'winCR ({winCR}) != cr ({cr})')\n print('Exiting now')\n sys.exit()\n\n winCRStart = ast.Time(mjdLines[reqIndex - 1], format='mjd')\n winCREnd = ast.Time(mjdLines[reqIndex], format='mjd')\n\n # Make time-series of all times that need to be extracted from each MAS map\n deltaT = ast.TimeDelta(winCREnd - winCRStart, format='sec') / nLon\n\n timeGrid = ast.Time(\n [winCRStart + (deltaT * i)\n for i in range(nLon)]\n )\n crLonGrid = np.arange(360, 0, -360.0 / nLon)\n\n # Get the MAS maps\n vr_map, vr_lats, vr_longs = dmf.get_MAS_maps(cr, masMapsDir)\n\n # vr_map will be a single int if there is no data\n if not isinstance(vr_map, int):\n\n # Use the HUXt ephemeris data to get Earth lat over the CR\n earth = observerReadH5(ephemFile, 'Earth', timeGrid)\n\n # get Earth lat as a function of longitude (not time)\n E_lat = np.interp(vr_longs * 180 / np.pi, np.flipud(crLonGrid),\n np.flipud(earth['hgiLatDeg']))\n\n # Convert E_lat to radians\n E_lat = E_lat * np.pi / 180.0\n\n # ==============================================================================\n # generate the input ensemble\n # ==============================================================================\n # generate the meshed grid\n phi, theta = np.meshgrid(vr_longs, vr_lats)\n\n vr_ensemble = bEns.generate_input_ensemble(\n phi, theta, vr_map, reflats=E_lat, Nens=nMASens,\n lat_rot_sigma=lat_rot_sigma, lat_dev_sigma=lat_dev_sigma, long_dev_sigma=long_dev_sigma\n )\n\n # resample the ensemble to 128 longitude bins\n vr128_ensemble = np.ones((nMASens, nLon))\n dphi = 2 * np.pi / nLon\n phi128 = np.linspace(dphi / 2, 2 * np.pi - dphi / 2, nLon)\n\n for i in range(0, nMASens):\n vr128_ensemble[i, :] = np.interp(\n phi128, vr_longs, vr_ensemble[i, :]\n )\n\n # ==============================================================================\n # save the ensemble for use in BRaVDA\n # ==============================================================================\n h5FileName = os.path.join(ensSaveDir, f'HelioMAS_CR{cr}_vin_ensemble.h5')\n h5f = h5py.File(h5FileName, 'w')\n h5f.create_dataset('Vin_ensemble', data=vr128_ensemble)\n\n outEnsTxtFile = open(f'{ensSaveDir}/vin_ensemble_CR{cr}.dat', 'w')\n np.savetxt(outEnsTxtFile, vr128_ensemble)\n outEnsTxtFile.close()\n\n h5f.attrs['lat_rot_sigma'] = lat_rot_sigma\n h5f.attrs['lat_dev_sigma'] = lat_dev_sigma\n h5f.attrs['long_dev_sigma'] = long_dev_sigma\n\n # this is used only to identify the source files.\n filepath = f'dmf.get_MAS_maps({cr}, {masMapsDir})'\n h5f.attrs['source_file'] = filepath\n h5f.attrs['r_in_rS'] = r_in\n h5f.attrs['Carrington_rotation'] = cr\n h5f.close()\n","sub_path":"makeMASens/helioMASens.py","file_name":"helioMASens.py","file_ext":"py","file_size_in_byte":8801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"235671706","text":"def simple_hash(s: str, start: int, end: int) -> int:\n \"\"\"\n 求字符串子串的hash值,ASCII码求和\n :param s:\n :param start:\n :param end:\n :return:\n \"\"\"\n result = 0\n for i in range(start, end + 1):\n result += ord(s[i])\n return result\n\n\ndef rk(main, pattern) -> int:\n \"\"\"\n 在字符串 main 中查找 pattern\n :param main:\n :param pattern:\n :return:\n \"\"\"\n n = len(main)\n m = len(pattern)\n\n if n <= m:\n return 0 if pattern == main else -1\n\n # 生成子串hash表\n hash_table = [None] * (n - m + 1)\n hash_table[0] = simple_hash(main, 0, m - 1)\n for i in range(1, n - m + 1):\n hash_table[i] = hash_table[i - 1] - simple_hash(main, i - 1, i - 1) + simple_hash(main, i + m - 1, i + m - 1)\n hash_pattern = simple_hash(pattern, 0, m - 1)\n for index, h in enumerate(hash_table):\n if hash_table[index] == hash_pattern:\n if pattern == main[index:index + m]:\n return index\n else:\n continue\n\n return -1\n\n\ndef test():\n assert rk(\"abcdefghij\", \"abc\") == 0\n assert rk(\"abc\", \"abcd\") == -1\n assert rk(\"abcdefghij\", \"ebcd\") == -1\n assert rk(\"abcdefghij\", \"cde\") == 2\n\n\nif __name__ == '__main__':\n test()\n\n","sub_path":"algorithm/string_match/rk.py","file_name":"rk.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"22775494","text":"\"\"\"Mixture model using EM\"\"\"\nfrom typing import Tuple\nimport numpy as np\nfrom common import GaussianMixture\n\n\n\ndef estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[np.ndarray, float]:\n \"\"\"E-step: Softly assigns each datapoint to a gaussian component\n\n Args:\n X: (n, d) array holding the data\n mixture: the current gaussian mixture\n\n Returns:\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the assignment\n \"\"\"\n mu = mixture.mu\n var = mixture.var\n p = mixture.p\n n,d = X.shape\n L = np.zeros((n,len(p)))\n summ = 0\n for i in range(n):\n for j in range(len(p)):\n sq_sum = np.sum((X[i]-mu[j])**2)\n ex_part = np.exp((-1/(2*var[j]))*sq_sum)\n L[i][j] = p[j]*(1/(2*np.pi*var[j])**(d/2))*ex_part\n sum_ = np.sum(L[i])\n L[i] = L[i]/sum_\n summ = summ + np.sum(L[i]*np.log(sum_)) \n return L,summ \n raise NotImplementedError\n\n\ndef mstep(X: np.ndarray, post: np.ndarray) -> GaussianMixture:\n \"\"\"M-step: Updates the gaussian mixture by maximizing the log-likelihood\n of the weighted dataset\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n \"\"\"\n n,K = post.shape\n n,d = X.shape \n p =np.ones(K)/K\n mu = X[np.random.choice(n, K, replace=False)]\n var = np.zeros(K)\n mixture = GaussianMixture(mu,var,p)\n N = np.zeros(K)\n for i in range(K):\n N[i] = np.sum(post[:,i])\n mixture.p[i] = N[i]/n\n mixture.mu[i] = 1/(N[i])*(np.matmul(post[:,i],X))\n mixture.var[i] = 1/(N[i]*d)*(np.sum(post[:,i].reshape(n,1)*(X-mixture.mu[i])**2))\n return mixture\n raise NotImplementedError\n\n\ndef run(X: np.ndarray, mixture: GaussianMixture,\n post: np.ndarray) -> Tuple[GaussianMixture, np.ndarray, float]:\n \"\"\"Runs the mixture model\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the current assignment\n \"\"\"\n L,l_older = estep(X,mixture)\n mixt = mstep(X,L)\n L,l_new = estep(X,mixt)\n while abs((l_older-l_new)/l_new) >= 1E-6:\n l_older = l_new\n mixt = mstep(X,L)\n L,l_new = estep(X,mixt)\n return mixt,L,l_new\n raise NotImplementedError\n","sub_path":"naive_em.py","file_name":"naive_em.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"603754860","text":"#!/usr/bin/env python3\n#\n# enjoy_random.py\n#\n# Enjoy random agent in ViZDoom\n# \n\nfrom argparse import ArgumentParser\nfrom time import sleep\nimport vizdoom as vzd\nimport random\nimport os\n\nDEFAULT_CONFIG = \"my_way_home.cfg\"\n\ndef main(args):\n game = vzd.DoomGame()\n\n game.load_config(args.config)\n\n # Hide window we are evaluating and\n # show it if we are enjoying\n game.set_window_visible(not args.evaluate)\n \n # Set bot to play instead of human.\n # ASYNC mode runs game at constant, default rate,\n # which is more comfortable for us to enjoy.\n # PLAYER mode is bot controlled, and runs\n # at maximum speed\n if args.evaluate:\n game.set_mode(vzd.Mode.PLAYER)\n else:\n game.set_mode(vzd.Mode.ASYNC_PLAYER)\n\n try:\n game.init()\n except Exception as e:\n # Check if buffer mismatch error\n if \"size mismatch\" in str(e):\n print(\"Could not run ViZDoom at desired resolution. \" +\n \"Try changing the resolution in the config file \" +\n \"(e.g. RES_1920X1080 works on 1080p monitors)\")\n exit(1)\n else:\n raise e\n\n # For evaluation, track\n # how long each episode took\n # and if they were succesful or not (reached goal)\n episode_lengths = []\n episode_success = []\n\n # How many buttons are available in this config\n num_buttons = game.get_available_buttons_size()\n\n for i in range(args.num_games):\n print(\"Episode #\" + str(i + 1))\n\n game.new_episode()\n\n state = None\n while not game.is_episode_finished():\n state = game.get_state()\n # TODO \n # Implement creating random actions.\n # One action is a list of zeros and ones, each\n action = []\n action.append(random.randint(0, 1))\n action.append(random.randint(0, 1))\n action.append(random.randint(0, 1))\n # specifying if button should be pressed down or not.\n # You can see the available buttons in .cfg file, but this\n # knowledge is not needed here.\n #raise NotImplementedError(\"Implement random actions here, and then remove this line.\")\n\n game.make_action(action, args.rate)\n\n # Store episode length and if it was\n # success or not\n episode_lengths.append(state.tic)\n # If episode lasted more than 1000 tics, it timed out\n # (i.e. didn't reach goal). We know this is the timeout\n # limit from the config file.\n episode_success.append(int(state.tic < 1000))\n\n game.close()\n\n # If evaluation on, print results\n if args.evaluate:\n average_length = sum(episode_lengths)/len(episode_lengths)\n success_rate = sum(episode_success)/len(episode_success)\n print(\"Success rate: %.1f%%\" % (success_rate * 100))\n print(\"Average episode length: %.1f\" % average_length)\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Enjoy random agent in ViZDoom.\")\n parser.add_argument(\"--config\",\n default=DEFAULT_CONFIG,\n nargs=\"?\",\n help=\"Path to the configuration file of the scenario.\")\n parser.add_argument(\"--num-games\",\n default=10,\n type=int,\n help=\"How many games will be played.\")\n parser.add_argument(\"--rate\",\n default=2,\n type=int,\n help=\"How many frames between taking an action.\")\n parser.add_argument(\"--evaluate\",\n action=\"store_true\",\n help=\"Instead of displaying game, evaluate agent faster and output success rate\")\n args = parser.parse_args()\n\n main(args)\n","sub_path":"monday/monday/monday/enjoy_random.py","file_name":"enjoy_random.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"630534768","text":"#!/usr/bin/python\n# coding:utf-8\n\nimport turtle as t\n \n \ndef sanjiaoxing(san):\n \"\"\"\n 传入三个点坐标,绘制三角形\n \"\"\"\n t.penup()\n t.goto(san[0])\n t.pendown()\n t.goto(san[1])\n t.goto(san[2])\n t.goto(san[0])\n \n \ndef get_mid(a, b):\n \"\"\"\n 计算返回2个点的中间点坐标\n \"\"\"\n x = (a[0] + b[0]) / 2\n y = (a[1] + b[1]) / 2\n return [x, y]\n \n \ndef draw_san(size, i):\n \"\"\"\n 绘制谢尔宾斯基三角形函数\n :param size: 三个点坐标列表\n :param i: 递归次数\n \"\"\"\n # 绘制三角形\n sanjiaoxing(size)\n if i > 0:\n # 绘制左边小三角形\n size2 = [size[0], get_mid(size[0], size[1]), get_mid(size[0], size[2])]\n draw_san(size2, i - 1)\n \n # 绘制上边的小三角形\n size3 = [get_mid(size[0], size[2]), get_mid(size[1], size[2]), size[2]]\n draw_san(size3, i - 1)\n \n # 绘制右边的小三角形\n size4 = [get_mid(size[0], size[1]), size[1], get_mid(size[1], size[2])]\n draw_san(size4, i - 1)\n \n \ndef main():\n \"\"\"\n 主函数\n \"\"\"\n # 打印图形标题\n t.penup()\n t.left(90)\n t.forward(350)\n t.pendown()\n t.write(\"谢尔宾斯基三角形\", False, align=\"center\", font=(\"宋体\", 20, \"normal\"))\n t.speed(5)\n \n # 初始三角形坐标\n points = [[-200, 0], [200, 0], [0, 300]]\n # 递归5次\n count = 5\n # 调用绘制谢尔宾斯基三角形函数\n draw_san(points, count)\n \n t.exitonclick()\n \n \nif __name__ == '__main__':\n main()\n","sub_path":"_turtle/_turtletriangle.py","file_name":"_turtletriangle.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"613952061","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('cards', '0002_auto_20151010_1759'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Deck',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('creation_time', models.DateTimeField(default=django.utils.timezone.now)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AlterField(\n model_name='phrasecard',\n name='creation_time',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n migrations.AlterField(\n model_name='phrasecard',\n name='last_seen_date',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n migrations.AlterField(\n model_name='phrasecard',\n name='translate',\n field=models.TextField(blank=True),\n ),\n migrations.AlterField(\n model_name='phrasecard',\n name='update_time',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n migrations.AlterField(\n model_name='word',\n name='creation_date',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n migrations.AlterField(\n model_name='word',\n name='translate',\n field=models.TextField(blank=True),\n ),\n migrations.AddField(\n model_name='phrasecard',\n name='deck',\n field=models.ForeignKey(default='1', to='cards.Deck'),\n preserve_default=False,\n ),\n ]\n","sub_path":"cards/migrations/0003_auto_20151022_2342.py","file_name":"0003_auto_20151022_2342.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"467916733","text":"import sys\nsys.setrecursionlimit(2001)\ndef debug(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\nclass Node:\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\ndef solve():\n nodes = {}\n def preorder(val, arr):\n if val == 0:\n return\n arr.append(val)\n preorder(nodes[val].left, arr)\n preorder(nodes[val].right, arr)\n\n def postorder(val, arr):\n if val == 0:\n return\n postorder(nodes[val].left, arr)\n postorder(nodes[val].right, arr)\n arr.append(val)\n\n n, k = map(int, input().split())\n for i in range(1, n + 1):\n a, b = map(int, input().split())\n nodes[i] = Node(a, b)\n pre = []\n post = []\n preorder(1, pre)\n postorder(1, post)\n\n M = {}\n for pr, ps in zip(pre, post):\n M[pr] = ps\n s = set(range(1, n + 1))\n A = []\n while s:\n e = s.pop()\n x = e\n tempset = {e}\n while M[x] != e:\n x = M[x]\n assert x not in tempset\n tempset.add(x)\n s.remove(x)\n A.append(tempset)\n debug(A)\n if k > len(A):\n print(\"Impossible\")\n debug(f\"k > len(A) ({k} > {len(A)})\")\n return\n L = len(A)\n ans = [-1] * n\n for i in range(L):\n ii = i + 1\n if ii > k:\n ii = 1\n for e in A[i]:\n ans[e - 1] = ii\n print(*ans)\n\nfor T in range(int(input())):\n print(f\"Case #{T+1}:\", end=\" \")\n solve()\n debug(f\"Case #{T+1} solved!\\n\")\n # break\n","sub_path":"HackerCup/18/HackerCupR1/ethan_traverses_a_tree.py","file_name":"ethan_traverses_a_tree.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"418966793","text":"from qualang_tools.bakery.bakery import Baking\n\nfrom qualang_tools.bakery.xeb import XEB, XEBOpsSingleQubit\nfrom xeb_config import config, pulse_len\nfrom qm import SimulationConfig\nfrom qm.QmJob import QmJob\nfrom qm.qua import *\nfrom qm.QuantumMachinesManager import QuantumMachinesManager\n\nimport matplotlib.pyplot as plt\n\n\ndef id1(baking: Baking):\n baking.wait(pulse_len, \"q1\")\n\n\ndef id2(baking: Baking):\n baking.wait(pulse_len, \"q2\")\n\n\ndef baked_cphase(baking: Baking):\n baking.play(\"coupler_op\", \"coupler\")\n\n\ndef sx1(baking: Baking):\n baking.play(\"sx\", \"q1\")\n\n\ndef sx2(baking: Baking):\n baking.play(\"sx\", \"q2\")\n\n\ndef sy1(baking: Baking):\n baking.play(\"sy\", \"q1\")\n\n\ndef sy2(baking: Baking):\n baking.play(\"sy\", \"q2\")\n\n\ndef sw1(baking: Baking):\n baking.frame_rotation_2pi(0.125, \"q1\")\n baking.play(\"sx\", \"q1\")\n baking.frame_rotation_2pi(-0.125, \"q1\")\n\n\ndef sw2(baking: Baking):\n baking.frame_rotation_2pi(0.125, \"q2\")\n baking.play(\"sx\", \"q2\")\n baking.frame_rotation_2pi(-0.125, \"q2\")\n\n\ndef align_op(baking: Baking):\n baking.align(\"q1\", \"q2\", \"coupler\")\n\n\nxeb = XEB(\n config,\n m_max=10,\n q1_ops=XEBOpsSingleQubit(id=id1, sx=sx1, sy=sy1, sw=sw1),\n q2_ops=XEBOpsSingleQubit(id=id2, sx=sx2, sy=sy2, sw=sw2),\n two_qubit_op=baked_cphase,\n align_op=align_op,\n)\n\nwith program() as prog:\n truncate = declare(int)\n truncate_array = declare(int, value=[x // 4 for x in xeb.duration_tracker])\n\n I1 = declare(fixed)\n I2 = declare(fixed)\n with for_each_(truncate, truncate_array):\n for element in xeb.baked_sequence.elements:\n play(xeb.baked_sequence.operations[element], element, truncate=truncate)\n align()\n measure(\"readout\", \"rr\", None, demod.full(\"integW1\", I1, \"out1\"))\n save(I1, \"I1\")\n measure(\"readout\", \"rr\", None, demod.full(\"integW1\", I2, \"out1\"))\n save(I2, \"I2\")\n\nqmm = QuantumMachinesManager()\njob: QmJob = qmm.simulate(config, prog, SimulationConfig(1500))\njob.get_simulated_samples().con1.plot()\n\nplt.show()\n","sub_path":"examples-old/bakery/XEB/xeb_example.py","file_name":"xeb_example.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"49234450","text":"import sys\nfrom math import ceil\n\ndef calc(a, b, ope):\n if ope == '+':\n return a + b\n elif ope == '-':\n return a - b\n else:\n return a * b\n\n\ndef find_max(left=0, total=0):\n global res\n if left >= end:\n if res < total:\n res = total\n return\n \n ope = expr[left*2-1]\n find_max(left+1, calc(total, int(expr[left*2]), ope))\n if left < end - 1:\n find_max(left+2, calc(total, subtotal[left], ope))\n\n\ninput = sys.stdin.readline\nN = int(input())\nexpr = input()\nend = ceil(N / 2)\nsubtotal = [0] * (N // 2)\nfor i in range(0, N - 2, 2):\n a = int(expr[i])\n ope = expr[i+1]\n b = int(expr[i+2])\n subtotal[i//2] = calc(a, b, ope)\n\nres = -2 ** 31\nfind_max(1, int(expr[0]))\nif N // 2 > 1:\n find_max(2, subtotal[0])\nprint(res)\n","sub_path":"Python/BOJ/16637_괄호 추가하기.py","file_name":"16637_괄호 추가하기.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"530935569","text":"'''\r\nCopyright (C) 2015 Pistiwique, Pitiwazou\r\n \r\nCreated by Pistiwique, Pitiwazou\r\n \r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n \r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n \r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n'''\r\n \r\nimport bpy, shutil, pickle\r\nfrom bpy.types import Operator\r\nfrom bpy_extras.io_utils import ImportHelper\r\nfrom bpy.props import StringProperty\r\nfrom os import remove, listdir\r\nfrom os.path import join, isfile, dirname\r\nfrom ..function_utils.get_path import (get_directory,\r\n get_library_path,\r\n )\r\nfrom ..preview.preview_utils import update_pcoll_preview\r\n \r\nclass ImportFilesCollection(bpy.types.PropertyGroup):\r\n name = StringProperty(\r\n name = \"File Path\",\r\n description = \"Filepath used for importing the file\",\r\n maxlen = 1024,\r\n subtype = 'FILE_PATH',\r\n )\r\n \r\nbpy.utils.register_class(ImportFilesCollection)\r\n \r\n# ------------------------------------------------------------------\r\n#\r\n# ------------------------------------------------------------------\r\n \r\nclass IblImporter(Operator, ImportHelper):\r\n \"\"\" Add the selected hdri with the .hdr ext in your library \"\"\"\r\n bl_idname = \"wm.ibl_importer\"\r\n bl_label = \"IBL Importer\"\r\n \r\n filter_glob = StringProperty(\r\n default = \"*.hdr;*.exr;*.jpg;*.jpeg;*.png;*.tif\",\r\n options = {'HIDDEN'},\r\n )\r\n \r\n filepath = StringProperty(\r\n name = \"File Path\",\r\n maxlen = 1024,\r\n subtype = 'FILE_PATH',\r\n )\r\n \r\n files = bpy.props.CollectionProperty(type = ImportFilesCollection)\r\n \r\n def execute(self, context):\r\n AM = context.window_manager.asset_m\r\n IBL = get_directory(\"IBL\")\r\n icons = get_directory(\"icons\")\r\n path = dirname(self.filepath)\r\n ibl_ext = (\".hdr\", \".exr\", \".jpg\", \".jpeg\", \".png\", \".tif\")\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n \r\n # thumbnail list present in the source ibl folder to import\r\n existing_thumb = [f for f in listdir(path) if f.endswith(extentions)]\r\n # list of ibl names\r\n IBL_names = [f.name for f in self.files]\r\n # thumbnail list of ibl library\r\n Thumb_list = [f.rsplit(\".\", 1)[0] for f in listdir(icons) if f.endswith(extentions)]\r\n # ensures the ibl we want to add was not already in the library\r\n valid_IBL = [f for f in IBL_names if f.rsplit(\".\", 1)[0] not in Thumb_list]\r\n \r\n for ibl in valid_IBL:\r\n # ensures the ibl we want to add was not already added since \"valid_IBL\" with a different file extention\r\n if ibl.rsplit(\".\", 1)[0] not in [f.rsplit(\".\", 1)[0] for f in listdir(IBL) if f.endswith(ibl_ext)]:\r\n shutil.copy(join(path, ibl), join(IBL, ibl))\r\n \r\n if not AM.existing_thumb:\r\n AM.ibl_to_thumb.append(ibl)\r\n else:\r\n thumb = [f for f in existing_thumb if f.rsplit(\".\", 1)[0] == ibl.rsplit(\".\", 1)[0]]\r\n \r\n if thumb:\r\n shutil.copy(join(path, thumb[0]), join(icons, thumb[0]))\r\n \r\n else:\r\n AM.ibl_to_thumb.append(ibl)\r\n \r\n if AM.ibl_to_thumb:\r\n bpy.ops.object.run_generate_thumbnails('INVOKE_DEFAULT')\r\n \r\n else:\r\n if isfile(join(icons, \"EMPTY.png\")):\r\n remove(join(icons, \"EMPTY.png\"))\r\n \r\n update_pcoll_preview()\r\n \r\n AM.adding_options = False\r\n return {\"FINISHED\"}\r\n\r\n# ------------------------------------------------------------------\r\n#\r\n# ------------------------------------------------------------------\r\n\r\nclass SaveIblSettings(Operator):\r\n \"\"\" Save the settings of the current IBL \"\"\"\r\n bl_idname = \"wm.save_ibl_settings\"\r\n bl_label = \"Save Ibl Settings\"\r\n bl_options = {\"REGISTER\"}\r\n\r\n def execute(self, context):\r\n WM = context.window_manager\r\n AM = WM.asset_m\r\n library_path = get_library_path()\r\n \r\n ibl_name = WM.AssetM_previews.rsplit(\".\", 1)[0]\r\n filename = ibl_name + \"_settings\"\r\n \r\n world = bpy.data.worlds['AM_IBL_WORLD']\r\n node = world.node_tree.nodes\r\n\r\n setting = {\"rotation\": tuple(node['Mapping'].rotation),\\\r\n \"projection\": AM.projection,\\\r\n \"blur\": node['ImageBlur'].inputs[1].default_value,\\\r\n \"visible\": world.cycles_visibility.camera,\\\r\n \"transparent\": context.scene.cycles.film_transparent,\\\r\n \"gamma\": node['AM_IBL_Tool'].inputs[0].default_value,\\\r\n \"L_strengh\": node['AM_IBL_Tool'].inputs[2].default_value,\\\r\n \"L_saturation\": node['AM_IBL_Tool'].inputs[3].default_value,\\\r\n \"L_hue\": tuple(node['AM_IBL_Tool'].inputs[4].default_value),\\\r\n \"L_mix\": node['AM_IBL_Tool'].inputs[5].default_value,\\\r\n \"G_strengh\": node['AM_IBL_Tool'].inputs[7].default_value,\\\r\n \"G_saturation\": node['AM_IBL_Tool'].inputs[8].default_value,\\\r\n \"G_hue\": tuple(node['AM_IBL_Tool'].inputs[9].default_value),\\\r\n \"G_mix\": node['AM_IBL_Tool'].inputs[10].default_value\r\n }\r\n \r\n with open(join(library_path, AM.library_type, AM.libraries, AM.categories, \"IBL\", filename), \"wb\") as file:\r\n pkl_save = pickle.Pickler(file)\r\n pkl_save.dump(setting)\r\n \r\n return {\"FINISHED\"}\r\n\r\n# ------------------------------------------------------------------\r\n#\r\n# ------------------------------------------------------------------\r\n\r\nclass LoadIblSettings(Operator):\r\n \"\"\" Load the settings of the current IBL \"\"\"\r\n bl_idname = \"wm.load_ibl_settings\"\r\n bl_label = \"Load Ibl Settings\"\r\n bl_options = {\"REGISTER\"}\r\n\r\n def execute(self, context):\r\n WM = context.window_manager\r\n AM = WM.asset_m\r\n library_path = get_library_path()\r\n \r\n ibl_name = WM.AssetM_previews.rsplit(\".\", 1)[0]\r\n filename = ibl_name + \"_settings\"\r\n world = bpy.data.worlds['AM_IBL_WORLD']\r\n node = world.node_tree.nodes\r\n \r\n with open(join(library_path, AM.library_type, AM.libraries, AM.categories, \"IBL\", filename), \"rb\") as file:\r\n pkl_load = pickle.Unpickler(file)\r\n setting = pkl_load.load()\r\n \r\n node['Mapping'].rotation = setting[\"rotation\"]\r\n AM.projection = setting[\"projection\"]\r\n node['ImageBlur'].inputs[1].default_value = setting[\"blur\"]\r\n world.cycles_visibility.camera = setting[\"visible\"]\r\n bpy.context.scene.cycles.film_transparent = setting[\"transparent\"]\r\n node['AM_IBL_Tool'].inputs[0].default_value = setting[\"gamma\"]\r\n node['AM_IBL_Tool'].inputs[2].default_value = setting[\"L_strengh\"]\r\n node['AM_IBL_Tool'].inputs[3].default_value = setting[\"L_saturation\"]\r\n node['AM_IBL_Tool'].inputs[4].default_value = setting[\"L_hue\"]\r\n node['AM_IBL_Tool'].inputs[5].default_value = setting[\"L_mix\"]\r\n node['AM_IBL_Tool'].inputs[7].default_value = setting[\"G_strengh\"]\r\n node['AM_IBL_Tool'].inputs[8].default_value = setting[\"G_saturation\"]\r\n node['AM_IBL_Tool'].inputs[9].default_value = setting[\"G_hue\"]\r\n node['AM_IBL_Tool'].inputs[10].default_value = setting[\"G_mix\"]\r\n \r\n return {\"FINISHED\"}","sub_path":"All_In_One/addons/asset_management/ibl/ibl_op.py","file_name":"ibl_op.py","file_ext":"py","file_size_in_byte":8025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"362081607","text":"n = 3\nk = 1\nq = 4\nswaps = [(1,2),(2,3),(3,1),(1,2)]\ndef shuffle(n,k,swaps):\n pos = 1\n count = 0\n cOne = shuffle_findPattern(n,swaps)\n if cOne>k:\n k%=cOne\n while count 0)].reset_index(drop=True)\n\n today_transactions['time'] = today_transactions['time'].astype(str)\n\n today_leonidas = today_transactions[today_transactions.warehouse_id == '5'].reset_index(drop=True)\n today_logona = today_transactions[today_transactions.warehouse_id == '3'].reset_index(drop=True)\n\n daily_sum = today_transactions.transaction_price.sum()\n daily_unique_transactions = len(today_transactions.BR_DOK.unique())\n st.header(f'Вкупно {daily_unique_transactions} продажби и {int(daily_sum)} промет во денари')\n st.header(\n f'Леонидас: {int(today_leonidas.transaction_price.sum())} Логона: {int(today_logona.transaction_price.sum())} денари')\n\n for i, transaction_id in enumerate(today_transactions.BR_DOK.unique()):\n single_transaction = today_transactions[today_transactions.BR_DOK == transaction_id].reset_index(drop=True)\n transaction_sum = single_transaction.transaction_price.sum()\n st.subheader(f'Продажба {i + 1}')\n cols = st.beta_columns(3)\n cols[0].subheader(f'{single_transaction.time.values[0].strip()}')\n cols[1].subheader(f'{single_transaction.employee_name.values[0]}')\n cols[2].subheader(f'{int(transaction_sum)} денари')\n\n display_df = single_transaction[['product_name', 'quantity', 'transaction_price']]\n display_df.columns = ['Име на продукт', 'Количина', '��ена']\n\n st.table(display_df)\n","sub_path":"daily_transactions.py","file_name":"daily_transactions.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"153582906","text":"import re\nimport urllib.request\nimport math\nimport sys\nimport time\n\n# Tests ping of random number generators\ndef get_random_numbers(url, line):\n ping_amount = 10\n avg = 0\n response = \"\"\n for i in range(ping_amount):\n start = time.time()\n try:\n webpage = urllib.request.urlopen(url)\n except urllib.error.URLError as e:\n response = str(e.reason)\n print(response)\n return response\n loading_page = webpage.read()\n end = time.time()\n webpage.close()\n avg += end - start\n\n # Average out the pings and convert to milliseconds\n avg = (avg / ping_amount)*1000\n # return the time to ping\n response = str(math.ceil(avg))+ \"ms\"\n print(response)\n return response\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Please provide a text file with URLs\")\n else:\n # Get the file from the second arg\n responses = []\n with open(sys.argv[1], 'r') as file, open(\"latency_log.csv\",\"w+\") as out:\n out.write(\"info,url,latency(ms)\\n\")\n for line in file:\n try:\n info, url = line.split('@')\n if (\"http://\"not in url) and (\"https://\" not in url):\n url = \"http://\" + url\n # Get the random numbers\n result = get_random_numbers(url, line)\n responses.append(result)\n csvrow = info + \",\" + url.rstrip('\\n') + \",\" + result + '\\n'\n out.write(csvrow)\n except ValueError as e:\n print(e.reason)\n file.close()\n # Log the results\n logfile = open(\"timing_experiment_test_log.txt\",\"w+\")\n for i in range(len(responses)):\n line = responses[i] + '\\n'\n logfile.write(line)\n logfile.close()\n\n","sub_path":"Timing/timing_experiment_csv.py","file_name":"timing_experiment_csv.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"142541037","text":"from typing import Any\n\nimport proto\n\nclass AssetLinkErrorEnum(proto.Message):\n class AssetLinkError(proto.Enum):\n UNSPECIFIED = 0\n UNKNOWN = 1\n PINNING_UNSUPPORTED = 2\n UNSUPPORTED_FIELD_TYPE = 3\n FIELD_TYPE_INCOMPATIBLE_WITH_ASSET_TYPE = 4\n FIELD_TYPE_INCOMPATIBLE_WITH_CAMPAIGN_TYPE = 5\n INCOMPATIBLE_ADVERTISING_CHANNEL_TYPE = 6\n IMAGE_NOT_WITHIN_SPECIFIED_DIMENSION_RANGE = 7\n INVALID_PINNED_FIELD = 8\n MEDIA_BUNDLE_ASSET_FILE_SIZE_TOO_LARGE = 9\n NOT_ENOUGH_AVAILABLE_ASSET_LINKS_FOR_VALID_COMBINATION = 10\n NOT_ENOUGH_AVAILABLE_ASSET_LINKS_WITH_FALLBACK = 11\n NOT_ENOUGH_AVAILABLE_ASSET_LINKS_WITH_FALLBACK_FOR_VALID_COMBINATION = 12\n YOUTUBE_VIDEO_REMOVED = 13\n YOUTUBE_VIDEO_TOO_LONG = 14\n YOUTUBE_VIDEO_TOO_SHORT = 15\n EXCLUDED_PARENT_FIELD_TYPE = 16\n INVALID_STATUS = 17\n YOUTUBE_VIDEO_DURATION_NOT_DEFINED = 18\n CANNOT_CREATE_AUTOMATICALLY_CREATED_LINKS = 19\n CANNOT_LINK_TO_AUTOMATICALLY_CREATED_ASSET = 20\n CANNOT_MODIFY_ASSET_LINK_SOURCE = 21\n CANNOT_LINK_LOCATION_LEAD_FORM_WITHOUT_LOCATION_ASSET = 22\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v14/errors/types/asset_link_error.pyi","file_name":"asset_link_error.pyi","file_ext":"pyi","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"492655474","text":"# also get custom exceptions in here. clean out the imports when everything is figured out\nimport sys\nimport gin\nimport numpy as np\nimport pickle\nimport os\nimport logging\nimport tensorflow as tf\nimport json\n\nfrom google.protobuf.json_format import MessageToJson\nfrom absl import flags\nfrom tqdm import tqdm\nfrom pysc2.lib import actions, features, protocol, point\nfrom pysc2.env import sc2_env\nfrom . import Env, Spec, Space\nfrom reaver.utils.logger import base_logger\nfrom absl import app, flags\nfrom pysc2.env.environment import TimeStep, StepType\nfrom pysc2 import run_configs\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\nfrom s2clientprotocol import common_pb2 as sc_common\n\n\nclass Error(Exception):\n \"\"\" Base class for exceptions in this module \"\"\"\n pass\n\nclass ReplayError(Error):\n def __init__(self, message):\n super().__init__(message) # previously had the two params. is it the same?\n\ndef _assert_compat_version(replay_path):\n raise ReplayError(f\"Version is incompatible: {replay_path+'.SC2Replay'}\")\n\ndef _assert_not_corrupt(replay_path):\n raise ReplayError(f\"Replay may be corrupt: {replay_path+'.SC2Replay'}\")\n\ndef _assert_useful(replay_path):\n raise ReplayError(f\"Replay not useful for learning purposes. Could be too short or low MMR: {replay_path+'.SC2Replay'}\")\n\ndef _assert_misc_error(replay_path):\n raise ReplayError(f\"Replay could not be loaded: {replay_path+'.SC2Replay'}\")\n\nACTIONS_MINIGAMES, ACTIONS_MINIGAMES_ALL, ACTIONS_ALL = ['minigames', 'minigames_all', 'all']\nlogger = base_logger(\"./sc2env.log\")\n\n\n@gin.configurable\nclass SC2Env(Env):\n \"\"\"\n 'minigames' action set is enough to solve all minigames listed in SC2LE\n 'minigames_all' expands that set with actions that may improve end results, but will drop performance\n 'all' is the full action set, only necessary for generic agent playing full game with all three races\n\n You can also specify your own action set in the gin config file under SC2Env.action_ids\n Full list of available actions https://github.com/deepmind/pysc2/blob/master/pysc2/lib/actions.py#L447-L1008\n \"\"\"\n def __init__(\n self,\n map_name='MoveToBeacon',\n render=False,\n reset_done=True,\n max_ep_len=None,\n spatial_dim=16,\n step_mul=8,\n obs_features=None,\n action_ids=ACTIONS_MINIGAMES\n ):\n super().__init__(map_name, render, reset_done, max_ep_len) # why super here, not in agents?\n\n self.step_mul = step_mul\n self.spatial_dim = spatial_dim\n self._env = None\n\n # sensible action set for all minigames\n if not action_ids or action_ids in [ACTIONS_MINIGAMES, ACTIONS_MINIGAMES_ALL]:\n action_ids = [0, 1, 2, 3, 4, 6, 7, 12, 13, 42, 44, 50, 91, 183, 234, 309, 331, 332, 333, 334, 451, 452, 490]\n\n # some additional actions for minigames (not necessary to solve)\n if action_ids == ACTIONS_MINIGAMES_ALL:\n action_ids += [11, 71, 72, 73, 74, 79, 140, 168, 239, 261, 264, 269, 274, 318, 335, 336, 453, 477]\n\n # full action space, including outdated / unusable to current race / usable only in certain cases\n if action_ids == ACTIONS_ALL:\n action_ids = [f.id for f in actions.FUNCTIONS]\n\n # by default use majority of obs features, except for some that are unnecessary for minigames\n # e.g. race-specific like creep and shields or redundant like player_id\n if not obs_features:\n obs_features = {\n 'screen': ['player_relative', 'selected', 'visibility_map', 'unit_hit_points_ratio', 'unit_density'],\n 'minimap': ['player_relative', 'selected', 'visibility_map', 'camera'],\n # available actions should always be present and in first position\n 'non-spatial': ['available_actions', 'player']}\n\n self.act_wrapper = ActionWrapper(spatial_dim, action_ids)\n self.obs_wrapper = ObservationWrapper(obs_features, action_ids)\n\n def start(self):\n # importing here to lazy-load\n from pysc2.env import sc2_env\n\n # fail-safe if executed not as absl app\n if not flags.FLAGS.is_parsed():\n flags.FLAGS(sys.argv)\n\n self._env = sc2_env.SC2Env(\n players=[sc2_env.Agent(sc2_env.Race.terran)], # **is there a way to pull this dynamically\n map_name=self.id,\n visualize=self.render,\n agent_interface_format=[features.parse_agent_interface_format(\n feature_screen=self.spatial_dim,\n feature_minimap=self.spatial_dim,\n rgb_screen=None,\n rgb_minimap=None\n )],\n step_mul=self.step_mul,)\n\n def step(self, action):\n try:\n obs, reward, done = self.obs_wrapper(self._env.step(self.act_wrapper(action))) # this is where it would bed __call__'ed\n except protocol.ConnectionError:\n # hacky fix from websocket timeout issue...\n # this results in faulty reward signals, but I guess it beats completely crashing...\n self.restart()\n return self.reset(), 0, 1\n\n if done and self.reset_done:\n obs = self.reset()\n\n return obs, reward, done\n\n def reset(self):\n # logger.info(f\"env.reset(): {self.obs_wrapper(self._env.reset())}\")\n try:\n obs, reward, done = self.obs_wrapper(self._env.reset())\n except protocol.ConnectionError:\n # hacky fix from websocket timeout issue...\n # this results in faulty reward signals, but I guess it beats completely crashing...\n self.restart()\n return self.reset()\n\n return obs\n\n def stop(self):\n self._env.close()\n\n def restart(self):\n self.stop()\n self.start()\n\n def obs_spec(self):\n if not self.obs_wrapper.spec:\n self.make_specs()\n return self.obs_wrapper.spec\n\n def act_spec(self):\n if not self.act_wrapper.spec:\n self.make_specs()\n return self.act_wrapper.spec\n\n def make_specs(self):\n # importing here to lazy-load\n from pysc2.env import mock_sc2_env\n try:\n mock_env = mock_sc2_env.SC2TestEnv(map_name=self.id, agent_interface_format=[ # will this throw an error?\n features.parse_agent_interface_format(feature_screen=self.spatial_dim, feature_minimap=self.spatial_dim)])\n self.act_wrapper.make_spec(mock_env.action_spec())\n self.obs_wrapper.make_spec(mock_env.observation_spec())\n mock_env.close()\n except Exception:\n logger.info(\"Is this a controller-based environment?\") # says map_name is ignored? If it is, I should get rid of and just use above\n mock_env = mock_sc2_env.SC2TestEnv(agent_interface_format=[ # this will throw error. can i just set to a \"default\"\n features.parse_agent_interface_format(feature_screen=self.spatial_dim, feature_minimap=self.spatial_dim)])\n self.act_wrapper.make_spec(mock_env.action_spec())\n self.obs_wrapper.make_spec(mock_env.observation_spec())\n mock_env.close()\n\n\nclass SC2ControllerEnv(SC2Env):\n def __init__(\n self,\n replay_file_path,\n map_name=\"MoveToBeacon\",\n base_path=\"C:/Users/lbianculli/reaver_replays/\",\n player_id=1,\n render=False,\n reset_done=True,\n max_ep_len=None,\n spatial_dim=16,\n step_mul=8, # should be 8 (?)\n obs_features=None,\n frames_per_game=1,\n discount=.99\n ):\n super().__init__(map_name, render, reset_done, max_ep_len)\n if not flags.FLAGS.is_parsed():\n flags.FLAGS(sys.argv)\n\n self.n_agents = [None] #***\n self.run_config = run_configs.get()\n self.sc2_proc = self.run_config.start()\n self.controller = self.sc2_proc.controller\n self.player_id = player_id\n self.discount = discount\n self.replay_file_name = replay_file_path.split(\"/\")[-1].split(\".\")[0]\n\n self.frames_per_game = frames_per_game\n self.step_mul = step_mul\n self.spatial_dim = spatial_dim\n self.map_size = (spatial_dim, spatial_dim)\n self._env = None\n if not os.path.exists(base_path):\n os.makedirs(base_path)\n self.save_path = base_path+self.replay_file_name+\".p\" # i feel like the root is available implicitly\n\n self._episode_steps= 0\n\n action_ids = [f.id for f in actions.FUNCTIONS]\n\n if not obs_features:\n obs_features = {\n 'screen': ['player_relative', 'selected', 'visibility_map', 'unit_hit_points_ratio', 'unit_density'],\n 'minimap': ['player_relative', 'selected', 'visibility_map', 'camera'],\n # available actions should always be present and in first position\n 'non-spatial': ['available_actions', 'player']}\n\n self.act_wrapper = ActionWrapper(spatial_dim, action_ids)\n self.obs_wrapper = ObservationWrapper(obs_features, action_ids)\n\n self.aif = features.AgentInterfaceFormat(\n feature_dimensions=features.Dimensions(screen=spatial_dim, minimap=spatial_dim),\n use_feature_units=True)\n\n self.replay_data = self.run_config.replay_data(self.replay_file_name + '.SC2Replay')\n ping = self.controller.ping()\n self.info = self.controller.replay_info(self.replay_data)\n\n if self._valid_replay(self.info, ping) == \"version\":\n self.sc2_proc.close()\n _assert_compat_version(self.replay_file_name)\n if self._valid_replay(self.info, ping) == \"corrupt\":\n self.sc2_proc.close()\n _assert_not_corrupt(self.replay_file_name)\n if self._valid_replay(self.info, ping) == \"not_useful\":\n self.sc2_proc.close()\n _assert_useful(self.replay_file_name)\n\n # do i need this block?\n screen_size_px = point.Point(*self.map_size)\n minimap_size_px = point.Point(*self.map_size)\n self.interface = sc_pb.InterfaceOptions(\n raw=False, score=True,\n feature_layer=sc_pb.SpatialCameraSetup(width=self.spatial_dim))\n screen_size_px.assign_to(self.interface.feature_layer.resolution)\n minimap_size_px.assign_to(self.interface.feature_layer.minimap_resolution) # this is working\n\n self.map_data = None\n if self.info.local_map_path:\n self.map_data = self.run_config.map_data(self.info.local_map_path)\n\n self._episode_length = self.info.game_duration_loops\n\n def start(self):\n \"\"\" get features and begin \"\"\"\n from pysc2.env import sc2_env\n\n if not flags.FLAGS.is_parsed():\n flags.FLAGS(sys.argv)\n\n self.controller.start_replay(sc_pb.RequestStartReplay(\n replay_data=self.replay_data,\n map_data=self.map_data,\n options=self.interface,\n observed_player_id=self.player_id))\n\n game_info = MessageToJson(self.controller.game_info())\n game_info = json.loads(game_info)\n # at some point, confirm that the first dict is always playerID 1\n # also: how and why does this differ from batch generator file?\n if game_info[\"playerInfo\"][0][\"playerId\"] == self.player_id: # *** remember, i have this.\n self.race = game_info[\"playerInfo\"][0][\"raceActual\"]\n else:\n self.race = game_info[\"playerInfo\"][1][\"raceActual\"]\n\n map_point = point.Point(*self.map_size)\n self._features = features.Features(self.aif, map_point)\n\n def step(self):\n try:\n skips = self.step_mul\n if (self._episode_steps + self.step_mul) > self._episode_length:\n skips = self._episode_length - self._episode_steps\n\n self._episode_steps += skips\n self.controller.step(skips)\n\n obs = [self.controller.observe()]\n agent_obs = [self._features.transform_obs(o) for o in obs]\n\n if obs[0].player_result: # Episode over.\n self._state = StepType.LAST\n logger.info(\"\\nLAST STEP\")\n discount = 0.\n else:\n self._state = StepType.MID\n discount = self.discount\n\n step = tuple(\n TimeStep(step_type=self._state, reward=0, # changed rew from float to int.\n discount=discount, observation=obs) for obs in agent_obs)\n\n obs, reward, done = self.obs_wrapper(step)\n\n return obs, reward, done\n\n except protocol.ProtocolError:\n self._state = StepType.LAST\n self.stop()\n logger.info(\"Replay complete. Protocol Error Thrown\")\n\n def reset(self):\n self._state = StepType.FIRST\n try:\n obs = [self.controller.observe()]\n agent_obs = [self._features.transform_obs(o) for o in obs]\n step = tuple(TimeStep(step_type=self._state, reward=0.,\n discount=0., observation=obs) for obs in agent_obs)\n\n obs, reward, done = self.obs_wrapper(step)\n # logger.info(f\"reset reward: {reward}\")\n\n except protocol.ConnectionError:\n self.restart()\n return self.reset()\n\n return obs\n\n def restart(self):\n self.stop()\n self.start()\n\n def stop(self):\n self.controller.close()\n\n @staticmethod\n def _valid_replay(info, ping):\n \"\"\"\n Make sure the replay isn't corrupt, and is worth looking at.\n Could I use the below logic to raise varying exceptions?\n \"\"\"\n if info.HasField(\"error\"):\n return \"corrupt\"\n if info.base_build != ping.base_build:\n return \"version\"\n if info.game_duration_loops < 1000 or len(info.player_info) != 2:\n return \"not_useful\"\n for p in info.player_info:\n if p.player_apm < 50 or (p.player_mmr != 0 and p.player_mmr < 2000):\n return \"not_useful\"\n\nclass ObservationWrapper:\n def __init__(self, _features=None, action_ids=None):\n self.spec = None\n self.features = _features\n self.action_ids = action_ids\n\n screen_feature_to_idx = {feat: idx for idx, feat in enumerate(features.SCREEN_FEATURES._fields)}\n minimap_feature_to_idx = {feat: idx for idx, feat in enumerate(features.MINIMAP_FEATURES._fields)}\n\n self.feature_masks = {\n 'screen': [screen_feature_to_idx[f] for f in _features['screen']],\n 'minimap': [minimap_feature_to_idx[f] for f in _features['minimap']]\n }\n\n def __call__(self, timestep):\n ts = timestep[0]\n obs, reward, done = ts.observation, ts.reward, ts.step_type == StepType.LAST\n # logger.info(f\"ts.reward: {reward}\")\n\n obs_wrapped = [\n obs['feature_screen'][self.feature_masks['screen']],\n obs['feature_minimap'][self.feature_masks['minimap']]\n ]\n for feat_name in self.features['non-spatial']:\n if feat_name == 'available_actions':\n fn_ids_idxs = [i for i, fn_id in enumerate(self.action_ids) if fn_id in obs[feat_name]]\n mask = np.zeros((len(self.action_ids),), dtype=np.int32)\n mask[fn_ids_idxs] = 1\n obs[feat_name] = mask\n obs_wrapped.append(obs[feat_name])\n\n # logger.info(f\"ts: {ts}\")\n # logger.info(f\"len ts: {len(ts)}\")\n # logger.info(f\"ts.observation: {ts.observation}\")\n # logger.info(f\"obs wrapped: {obs_wrapped}\")\n # logger.info(f\"len(obs wrapped): {len(obs_wrapped)}\")\n\n return obs_wrapped, reward, done\n\n def make_spec(self, spec):\n spec = spec[0]\n\n default_dims = {\n 'available_actions': (len(self.action_ids), ),\n }\n\n screen_shape = (len(self.features['screen']), *spec['feature_screen'][1:])\n minimap_shape = (len(self.features['minimap']), *spec['feature_minimap'][1:])\n screen_dims = get_spatial_dims(self.features['screen'], features.SCREEN_FEATURES)\n minimap_dims = get_spatial_dims(self.features['minimap'], features.MINIMAP_FEATURES)\n\n spaces = [\n SC2Space(screen_shape, 'screen', self.features['screen'], screen_dims),\n SC2Space(minimap_shape, 'minimap', self.features['minimap'], minimap_dims),\n ]\n\n for feat in self.features['non-spatial']:\n if 0 in spec[feat]:\n spec[feat] = default_dims[feat]\n spaces.append(Space(spec[feat], name=feat))\n\n self.spec = Spec(spaces, 'Observation')\n\n\nclass ActionWrapper:\n def __init__(self, spatial_dim, action_ids, args=None):\n self.spec = None\n if not args:\n args = [\n 'screen',\n 'minimap',\n 'screen2',\n 'queued',\n 'control_group_act',\n 'control_group_id',\n 'select_add', # 6\n 'select_point_act', # 7\n 'select_unit_act',\n # 'select_unit_id'\n 'select_worker',\n 'build_queue_id',\n # 'unload_id'\n ]\n self.func_ids = action_ids\n self.args, self.spatial_dim = args, spatial_dim\n\n def __call__(self, action):\n \"\"\" returns list of functioncall(id, args) for input action\"\"\"\n defaults = {\n 'control_group_act': 0,\n 'control_group_id': 0,\n 'select_point_act': 0,\n 'select_unit_act': 0,\n 'select_unit_id': 0,\n 'build_queue_id': 0,\n 'unload_id': 0,\n }\n fn_id_idx, args = action.pop(0), []\n fn_id = self.func_ids[fn_id_idx]\n for arg_type in actions.FUNCTIONS[fn_id].args:\n arg_name = arg_type.name\n if arg_name in self.args:\n arg = action[self.args.index(arg_name)]\n # pysc2 expects all args in their separate lists\n if type(arg) not in [list, tuple]:\n arg = [arg]\n # pysc2 expects spatial coords, but we have flattened => attempt to fix\n if len(arg_type.sizes) > 1 and len(arg) == 1:\n arg = [arg[0] % self.spatial_dim, arg[0] // self.spatial_dim]\n args.append(arg)\n else:\n args.append([defaults[arg_name]])\n\n return [actions.FunctionCall(fn_id, args)]\n\n def make_spec(self, spec):\n spec = spec[0]\n\n spaces = [SC2FuncIdSpace(self.func_ids, self.args)]\n for arg_name in self.args:\n arg = getattr(spec.types, arg_name)\n if len(arg.sizes) > 1: # spatial\n spaces.append(Space(domain=(0, arg.sizes), categorical=True, name=arg_name))\n else: # else just take the value (arg)\n spaces.append(Space(domain=(0, arg.sizes[0]), categorical=True, name=arg_name))\n\n self.spec = Spec(spaces, \"Action\")\n\n\nclass SC2Space(Space):\n def __init__(self, shape, name, spatial_feats=None, spatial_dims=None):\n if spatial_feats:\n name += \"{%s}\" % \", \".join(spatial_feats)\n self.spatial_feats, self.spatial_dims = spatial_feats, spatial_dims\n\n super().__init__(shape, name=name)\n\n\nclass SC2FuncIdSpace(Space):\n def __init__(self, func_ids, args):\n super().__init__(domain=(0, len(func_ids)), categorical=True, name=\"function_id\")\n self.args_mask = []\n for fn_id in func_ids:\n fn_id_args = [arg_type.name for arg_type in actions.FUNCTIONS[fn_id].args]\n self.args_mask.append([arg in fn_id_args for arg in args])\n\n\ndef get_spatial_dims(feat_names, feats):\n feats_dims = []\n for feat_name in feat_names:\n feat = getattr(feats, feat_name)\n feats_dims.append(1)\n if feat.type == features.FeatureType.CATEGORICAL:\n feats_dims[-1] = feat.scale\n return feats_dims\n\n\n","sub_path":"envs/sc2.py","file_name":"sc2.py","file_ext":"py","file_size_in_byte":19970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"525630363","text":"import FWCore.ParameterSet.Config as cms\n\nmaxEvents = cms.untracked.PSet( input = cms.untracked.int32(10000) )\nreadFiles = cms.untracked.vstring()\nsecFiles = cms.untracked.vstring() \nsource = cms.Source (\"PoolSource\",fileNames = readFiles, secondaryFileNames = secFiles)\nreadFiles.extend( [ 'root://eoscms.cern.ch//store/group/phys_egamma/ElectronValidationArchives/MiniAOD_IDValidation/miniAOD-prod_PAT_zee14_50ns.root'\n ] );\n\n\nsecFiles.extend( [\n ] )\n\n","sub_path":"RecoEgamma/ElectronIdentification/python/Validation/DYJetsToLL_Sc1_MINIAOD.py","file_name":"DYJetsToLL_Sc1_MINIAOD.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"183737444","text":"\n\nclass Song:\n\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.album = None\n\t\tself.author = None\n\n\t@classmethod\n\tdef make_album_song(cls, name, album):\n\t\ts = Song(name)\n\t\ts.album = album\n\t\treturn s\n\n\nif __name__ == \"__main__\":\n\n\tfortune = Song('Fortune')\n\tprint(fortune.name)\n\n\thush = Song.make_album_song('Hush', 'All Boom')\n\tprint(hush.name)\n\tprint(hush.album)\n","sub_path":"effective_python/alternate_constructor.py","file_name":"alternate_constructor.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"479943505","text":"##coding:utf-8 指定编码格式\n\n#导入浏览器驱动\nfrom selenium import webdriver\nimport time\n\n#进入网站\nbrower=webdriver.Chrome()\n# brower = webdriver.Chrome() 设置了chromedriver的环境变量\nurl=\"http://localhost:8004/\"\nbrower.get(url)\n#print(\"open the http://localhost:8004\" )\n\n#浏览器最大化\nbrower.maximize_window()\n\n# 登录-1、找到用户名+赋值\nsearch_name=brower.find_element_by_css_selector(\"#txtLoginName\")\nsearch_name.clear()\nsearch_name.send_keys(\"admin\")\n\n# 登录-2、找到密码+赋值\nsearch_login=brower.find_element_by_css_selector(\"#txtLoginPass\")\nsearch_login.clear()\nsearch_login.send_keys(\"admin+123\")\n\n# 登录-3、点击登录按钮\nsearch_btn=brower.find_element_by_css_selector(\"#btnLogin\")\nsearch_btn.click()\ntime.sleep(1)\n\n#点击综合管理下拉菜单在押人员管理\nsearch_h1=brower.find_element_by_css_selector(\".accordion h1\").click()\nsearch_h2=brower.find_element_by_css_selector(\".accordion h2\").click()\n\n#进入iframe,使用标签进行定位\niframe = brower.find_elements_by_tag_name(\"iframe\")[0]\nbrower.switch_to.frame(iframe)\n\n#点击按新增钮事件\nsearch_h3=brower.find_element_by_css_selector(\".location_R li:first-child\").click()\n\n#点击新增按钮进行信息填写\n#进入iframe页面\niframe2 = brower.find_elements_by_tag_name(\"iframe\")[0]\nbrower.switch_to.frame(iframe2)\n\n#点击人员编号按钮,并且给它赋值\nsearch_h4=brower.find_element_by_css_selector(\"#txtNo\")\nsearch_h4.send_keys(\"02\")\n\n#点击姓名按钮,并且给它赋值\nsearch_h5=brower.find_element_by_css_selector(\"#txtName\")\nsearch_h5.send_keys(\"嘻嘻\")\n\n#点击下拉框所在区域进行选择\nsearch_h6=brower.find_element_by_css_selector(\".editPageTable tr:nth-child(7)\").click()\ntime.sleep(1)\nsearch_h7=brower.find_element_by_css_selector(\".editPageTable tr:nth-child(7) td.fieldInput select option:nth-child(3)\").click()\n\n#点击下拉框所在监视进行选择\nsearch_h8=brower.find_element_by_css_selector(\".editPageTable tr:nth-child(8)\").click()\ntime.sleep(1)\nsearch_h9=brower.find_element_by_css_selector(\".editPageTable tr:nth-child(8) td.fieldInput select option:nth-child(3)\").click()\n# 切换出一层iframe:一共2层,只需要切换出一层即可\nbrower.switch_to.default_content()\niframe3 = brower.find_elements_by_tag_name(\"iframe\")[0]\nbrower.switch_to.frame(iframe3)\n\n# 点击保存按钮\nsearch_h10=brower.find_element_by_css_selector(\"div.ui_buttons input:first-child\").click()\n\n\n\n\n\n############################# 新增民警信息\n\n","sub_path":"jiaoben/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"484949390","text":"# Import Libraries\n\n\n# *** Functions go here ***\n\n# Checks that input is either a float or an \n# interger that is nore than zero. Takes in custom error message\ndef num_check(question, error, num_type):\n valid = False\n\n while not valid:\n\n try:\n response = num_type(input(question))\n\n if response <= 0:\n print(error)\n else:\n return response\n \n except ValueError:\n print(error)\n\n\n# Main routine Goes here \nget_int = num_check(\"How many do you need? \", \"Please enter an amount more than 0\\n\", int)\nget_cost = num_check(\"how much does it cost? $\", \"Please enter a number more than 0\\n\", float)\n\nprint(\"You need: {}\".format(get_int))\nprint(\"It costs: ${}\".format(get_cost))\n","sub_path":"01_NumCheck_v1.py","file_name":"01_NumCheck_v1.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"109724819","text":"import json\nimport re\nimport typing\n\nimport fastapi\nimport main\nimport pydantic\nimport pytest\nimport requests\nimport starlette.testclient\n\nclient: starlette.testclient.TestClient = starlette.testclient.TestClient(\n main.app\n)\n\nner_sections: typing.List[str] = [\n 'Net income was $9.4 million compared to the prior year of $2.7 million. '\n + 'Google is a big company.',\n 'Revenue exceeded twelve billion dollars, with a loss of $1b.'\n]\n\n\ndef test_ner_sense2vec_enabled() -> None:\n response: requests.Response = client.post(\n '/ner',\n json=dict(main.NERRequest(sections=ner_sections, sense2vec=True))\n )\n assert response.status_code == 200\n with open('src/outputs/ner/sense2vec_enabled.json') as f:\n assert response.json() == json.load(f)\n\n\ndef test_ner_sense2vec_disabled() -> None:\n response: requests.Response = client.post(\n '/ner',\n json=dict(main.NERRequest(sections=ner_sections))\n )\n with open('src/outputs/ner/sense2vec_disabled.json') as f:\n assert response.json() == json.load(f)\n\n\ndef test_ner_spacy_fail() -> None:\n fail('/ner', main.NERRequest(sections=ner_sections), pipe='ner')\n\n\ndef test_ner_sense2vec_fail() -> None:\n fail(\n '/ner',\n main.NERRequest(sections=ner_sections, sense2vec=True),\n pipe='sense2vec'\n )\n\n\ndef test_sense2vec_success() -> None:\n body: main.PhraseInSentence = main.PhraseInSentence(\n sentence='Bill Gates founded Microsoft in April 4, 1975.',\n phrase='Bill Gates'\n )\n response: requests.Response = client.post('/sense2vec', json=dict(body))\n assert response.status_code == 200\n with open('src/outputs/sense2vec.json') as f:\n assert response.json() == json.load(f)\n\n\npos_body: main.TextModel = main.TextModel(\n text='Apple is looking at buying U.K. startup for $1 billion'\n)\n\n\ndef test_pos() -> None:\n response: requests.Response = client.post('/pos', json=dict(pos_body))\n assert response.status_code == 200\n with open('src/outputs/pos.json') as f:\n assert response.json() == json.load(f)\n\n\ndef test_pos_fail() -> None:\n fail('/pos', pos_body, pipe='parser')\n\n\ndef test_tokenizer() -> None:\n text: main.TextModel = main.TextModel(\n text='Apple is looking at buying U.K. startup for $1 billion'\n )\n response: requests.Response = client.post('/tokenizer', json=dict(text))\n assert response.status_code == 200\n with open('src/outputs/tokenizer.json') as f:\n assert response.json() == json.load(f)\n\n\nsentencizer_body: main.TextModel = main.TextModel(\n text='Apple is looking at buying U.K. startup for $1 billion. Another '\n + 'sentence.'\n)\n\n\ndef test_sentencizer() -> None:\n response: requests.Response = client.post(\n '/sentencizer',\n json=dict(sentencizer_body)\n )\n assert response.status_code == 200\n with open('src/outputs/sentencizer.json') as f:\n assert response.json() == json.load(f)\n\n\ndef test_sentencizer_fail() -> None:\n fail('/sentencizer', sentencizer_body, pipe='parser')\n\n\ndef test_health_check() -> None:\n assert client.get('/health_check').status_code == 204\n\n\ndef fail(endpoint: str, body: pydantic.BaseModel, pipe: str) -> None:\n with main.nlp.disable_pipes(pipe):\n response: requests.Response = client.post(endpoint, json=dict(body))\n assert re.match(r'4\\d\\d', str(response.status_code))\n assert 'detail' in response.json()\n\n\ndef test_enforce_components() -> None:\n with pytest.raises(fastapi.HTTPException):\n component: str = 'nonexistent_component'\n main.enforce_components([component], component)\n\n\ndef test_compute_phrases() -> None:\n sentence: str = 'Bill Gates founded Microsoft in April 4, 1975.'\n doc: main.nlp = main.nlp(sentence, disable=['tagger'])\n for ent in list(doc.sents)[0].ents:\n if ent.text == 'Bill Gates':\n with open('src/outputs/compute_phrases.json') as f:\n assert main.compute_phrases(ent) == json.load(f)\n","sub_path":"src/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"214669268","text":"from gui import entity\n\nclass Character(entity.Entity):\n \"\"\"Players display in the game. Has two positions to simulate a walking move.\n\n Members :\n * ip : id used to identify the player\n * sprite : picture of the character\n * screen : game display\n * x : lateral position, from left\n * y : vertical position, from top\n * next_image : picture to display at the next update\n * surface : map where the characters are positioned\n * scope : size of the character (heigth and width)\n * up/down/left/righ_img : PyGame wants them to be updated at each update (WORK IN PROGRESS)\n \"\"\"\n\n _ip = None\n _sprite = None\n _screen = None\n _x = None\n _y = None\n _next_image = None\n _surface = None\n\n _scope = 23\n\n _up_img = []\n _down_img = []\n _left_img = []\n _right_img = []\n\n def __init__(self, screen, image, x, y, ip):\n self._ip = ip\n self.init(screen, image, x, y)\n\n def create_image(self, surface):\n \"\"\"Set the character's image\"\"\"\n # Only one of these paragraph should be used if PyGame worked properly\n self._down_img.append(surface.subsurface((0,0, 23, 23)))\n self._down_img.append(surface.subsurface((23,0, 23, 23)))\n\n self._up_img.append(surface.subsurface((46,0, 23, 23)))\n self._up_img.append(surface.subsurface((69,0, 23, 23)))\n\n self._left_img.append(surface.subsurface((92,0, 23, 23)))\n self._left_img.append(surface.subsurface((115,0, 23, 23)))\n\n self._right_img.append(surface.subsurface((138,0, 23, 23)))\n self._right_img.append(surface.subsurface((161,0, 23, 23)))\n\n def up(self):\n \"\"\"Changes the orientation of the character\"\"\"\n self._sprite.image = self._surface.subsurface((46,0, 23, 23))\n self._next_image = self._surface.subsurface((69,0, 23, 23))\n self.set_position(self._x, self._y - self._scope)\n\n def down(self):\n \"\"\"Changes the orientation of the character\"\"\"\n self._sprite.image = self._surface.subsurface((0,0, 23, 23))\n self._next_image = self._surface.subsurface((23,0, 23, 23))\n self.set_position(self._x, self._y + self._scope)\n\n def left(self):\n \"\"\"Changes the orientation of the character\"\"\"\n self._sprite.image = self._surface.subsurface((92,0, 23, 23))\n self._next_image = self._surface.subsurface((115,0, 23, 23))\n self.set_position(self._x - self._scope, self._y)\n\n def right(self):\n \"\"\"Changes the orientation of the character\"\"\"\n self._sprite.image = self._surface.subsurface((138,0, 23, 23))\n self._next_image = self._surface.subsurface((161,0, 23, 23))\n self.set_position(self._x + self._scope, self._y)\n\n def set_position(self, x=-1, y=-1):\n \"\"\"Set the character position in the map\"\"\"\n if x != -1:\n self._x = x\n if y != -1:\n self._y = y\n self._sprite.rect.topleft = [self._x, self._y]\n","sub_path":"src/gui/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"496583651","text":"from openerp import fields, models, api\nfrom openerp.tools.translate import _\nfrom openerp.osv import expression\n\n\nclass res_users(models.Model):\n _inherit = \"res.users\"\n\n @api.multi\n def write(self, values):\n groups_class = self.env['res.groups']\n start_group = set(self.groups_id.ids)\n res = super(res_users, self).write(values)\n end_group = set(self.groups_id.ids)\n group_added = end_group - start_group\n group_removed = start_group - end_group\n message = _(\"\"\"\n

Access Right/Groups changed :

\n
    \n \"\"\")\n for difference in group_added:\n message += \"
  • \"\n message += groups_class.browse(difference).display_name\n message += _(\" : Added\")\n message += \"
  • \"\n for difference in group_removed:\n message += \"
  • \"\n message += groups_class.browse(difference).display_name\n message += _(\" : Removed\")\n message += \"
  • \"\n message += \"
\"\n message_id = self.message_post(\n subject=_(\"Access Right Changed\"),\n body=message,\n )\n\n if not isinstance(message_id, int):\n message_id = message_id.id\n self.env['mail.message'].browse(message_id).update(\n {'model': 'res.partner',\n 'res_id': self.id})\n values['message_ids'] = [(4, message_id)]\n\n res = super(res_users, self).write(values)\n return res\n\n# end of res_users()\n","sub_path":"fal_add_message_in_user/models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"311815563","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport cProfile\nimport wx\n\ncProfile.run(\"main()\", filename=\"my.profile\")\n\n#常见得使用字典方法\ndef get_fruits(basket,fruit):\n try:\n return basket[fruit]\n except KeyError:\n return set()\n\n# 最好的使用方式\ndef better_get_fruits(basket,fruit):\n return basket.get(fruit,set())\n\n# 常见的集合使用\ndef has_invalid_fileds(fields):\n for field in fields:\n if field not in ['foo','bar']:\n return True\n return False\n\n# 更优的实现\ndef better_has_invalid_fileds(fields):\n # set - 代表取差集 相对补集\n return bool(set(fields) - set(['foo','bar']))\n\ns = better_has_invalid_fileds(['hello'])\nprint(s)\n\n\n# uneffcient\ndef add_animal_in_family(species,animal,family):\n if family not in species:\n species[family] = set()\n species[family].add(animal)\n\nspecies = {}\nadd_animal_in_family(species,'cat','felidea')\n\n# 高效办法 更加优雅的解决这个问题\nimport collections\ndef better_add_animal_in_family(species,animal,family):\n species[family].add(animal)\n#每次试图从字典中访问一个不存在的元素,defaultdict都会使用这个作为参数传入的这个函数去构造一个新值而不是抛出keyError\nspeciess = collections.defaultdict(set)\nbetter_add_animal_in_family(species,'cat','felidea')\n\n\n\n\n\n\n","sub_path":"Python高手之路/第十章性能与优化/数据结构.py","file_name":"数据结构.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"429098245","text":"import torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.distributions import MultivariateNormal\nimport numpy as np\nfrom sklearn import manifold, datasets\n\n\nclass SwissRoll(Dataset):\n def __init__(\n self,\n num_batches,\n BATCH_SIZE,\n model_kwargs,\n shuffle=True,\n corr=False,\n train=True,\n mask=False\n ):\n \"\"\"\n Args: \n num_batches: Number of batches of synthetic data\n BATCH_SIZE: batchsize of synthetic data\n model_kwargs: dictionary containing \"x_dim\" which indicates input data size\n shuffle: True sets condition vector in input data to 0 for all possible permutations\n corr: True sets dependent input dimensions via a correlation matrix \n \"\"\"\n self.num_batches = num_batches\n self.BATCH_SIZE = BATCH_SIZE\n self.corr = corr\n self.shuffle = shuffle\n self.model_kwargs = model_kwargs\n self.train = train\n\n Batches_X, Batches_C, Batches_conds = torch.empty([0]), torch.empty([0]), torch.empty([0])\n\n for j, i in enumerate(range(self.num_batches)):\n\n # set parameters\n length_phi = 15 # length of swiss roll in angular direction\n length_Z = 15 # length of swiss roll in z direction\n sigma = 0.1 # noise strength\n m = self.BATCH_SIZE # n umber of samples\n\n # create dataset\n phi = length_phi*np.random.rand(m)\n xi = np.random.rand(m)\n Z = length_Z*np.random.rand(m)\n X = 1./6*(phi + sigma*xi)*np.sin(phi)\n Y = 1./6*(phi + sigma*xi)*np.cos(phi)\n\n swiss_roll = torch.from_numpy(np.array([X, Y, Z]).transpose()).float()\n self._color = np.sqrt(X**2 + Y**2)\n\n # swiss_roll, color = datasets.samples_generator.make_swiss_roll(n_samples=m, noise=0.05)\n # swiss_roll = swiss_roll[:, [2, 0, 1]]\n # self._color = color\n # swiss_roll = torch.from_numpy(swiss_roll).float()\n\n if mask is True:\n mask_indices = torch.cuda.FloatTensor(swiss_roll.size()[0]).uniform_() > 1 - model_kwargs['mask_percentage']\n swiss_roll[mask_indices, 0] = 0\n swiss_roll[mask_indices, 1] = 0\n swiss_roll[mask_indices, 2] = 0\n\n C = swiss_roll.clone()\n count = 0\n if self.shuffle is True:\n while count == 0:\n C_mask = torch.zeros(C.shape).bernoulli_(0.5)\n # 3 here refers to 3 dimensions in swiss roll\n if len(set([i.item() for i in torch.sum(C_mask, dim=1)])) == 3 + 1:\n count = 1 \n else:\n C_mask = torch.zeros(C.shape).bernoulli_(0)\n\n C[C_mask.byte()] = 0\n C_indicator = C_mask == 0\n\n C = torch.cat([C.float(), C_indicator.float()], 1)\n\n # 3 here is number of dimensions in swiss roll\n swiss_roll = swiss_roll.view([1, -1, 3])\n C = C.view([1, -1, 3*2])\n\n # Sum up\n conds = C[:, :, 3:].sum(2)\n Batches_X = torch.cat([Batches_X, swiss_roll], 0)\n Batches_C = torch.cat([Batches_C, C], 0)\n Batches_conds = torch.cat([Batches_conds, conds], 0)\n\n self._batches_x = Batches_X\n self._batches_c = Batches_C\n self._batches_conds = Batches_conds\n\n def __len__(self):\n return len(self._batches_x)\n\n def __getitem__(self, idx):\n \"\"\"\n Returns a tuple. (X, C, sum(C[mid:end])). \n X is the input, \n C is the condition, \n sum(C[mid:end]) is the sum of the indicators in C. It tells us how many of the condition\n columns have been masked\n \"\"\"\n return self._batches_x[idx], self._batches_c[idx], self._batches_conds[idx]\n\n def get_all_items(self):\n if self.train is True:\n return self._batches_x, self._batches_c, self._batches_conds, None\n else:\n return self._batches_x, self._batches_c, self._batches_conds\n\n def get_color(self):\n return torch.from_numpy(self._color).cuda()\n\n","sub_path":"CVAE_testbed/datasets/swiss_roll.py","file_name":"swiss_roll.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"416532424","text":"#! /usr/bin/env python3\n\nfrom nutils import *\nimport fractions, unittest\n\n\nclass MakePlots( object ):\n\n def __init__( self, geom, exact, optimalrate ):\n self.geom = geom\n self.exact = exact\n self.index = 0\n self.ndofs = []\n self.error_exact = []\n self.error_estimate = []\n self.optimalrate = optimalrate\n\n def __call__( self, domain, sol, ndofs, error_estimate ):\n self.index += 1\n\n error_exact = domain['aoi'].integrate( self.exact - sol, geometry=self.geom, ischeme='gauss9' )\n log.user( 'error estimate: %.2e (%.1f%% accurate)' % ( error_estimate, 100.*error_estimate/error_exact ) )\n\n points, colors = domain.elem_eval( [ self.geom, sol ], ischeme='bezier9', separate=True )\n aoi = domain['aoi'].boundary.elem_eval( self.geom, ischeme='bezier2', separate=True )\n with plot.PyPlot( 'sol', index=self.index ) as plt:\n plt.mesh( points, colors )\n plt.colorbar()\n plt.segments( aoi )\n\n self.ndofs.append( ndofs )\n self.error_exact.append( error_exact )\n self.error_estimate.append( error_estimate )\n with plot.PyPlot( 'conv', index=self.index ) as plt:\n plt.loglog( self.ndofs, self.error_exact, 'k-^', label='exact' )\n plt.loglog( self.ndofs, self.error_estimate, 'k--', label='estimate' )\n plt.slope_marker( ndofs, min( error_exact, error_estimate ), slope=-self.optimalrate )\n plt.legend( loc=3, frameon=False )\n plt.grid()\n plt.xlabel( 'degrees of freedom' )\n plt.ylabel( 'error' )\n\n\ndef main(\n degree: 'number of elements' = 1,\n circle: 'use circular area of interest (default square)' = False,\n uniform: 'use uniform refinement (default adaptive)' = False,\n basistype: 'basis function' = 'std',\n nrefine: 'maximum allowed number of refinements' = 7,\n figures: 'create figures' = True,\n ):\n\n # construct domain\n verts = numpy.linspace( -1, 1, 7 )\n basetopo, geom = mesh.rectilinear( [verts,verts] )\n aoi = basetopo.trim( .04 - ((geom+.5)**2).sum(-1), maxrefine=5 ) if circle else basetopo[1:2,1:2]\n domain = ( basetopo.withboundary( outside=... )\n - basetopo[3:,:3].withboundary( inside=... ) ).withsubdomain( aoi=aoi )\n\n # construct exact sulution (used for boundary conditions and error evaluation)\n exact = ( geom**2 ).sum(-1)**(1./3) * function.sin( (2./3) * function.arctan2(-geom[1],-geom[0]) )\n flux = exact.ngrad( geom )\n\n # sanity check\n harmonicity = numpy.sqrt( domain.integrate( exact.laplace(geom)**2, geometry=geom, ischeme='gauss9' ) )\n log.info( 'exact solution lsqr harmonicity:', harmonicity )\n\n # prepare plotting\n makeplots = MakePlots( geom, exact, fractions.Fraction(2 if uniform else degree*3,3) ) if figures else lambda *args, **kwargs: None\n\n # start adaptive refinement\n for irefine in log.count( 'level', start=1 ):\n\n # construct, solve course domain primal/dual problem\n basis = domain.basis( basistype, degree=degree )\n laplace = function.outer( basis.grad(geom) ).sum(-1)\n matrix = domain.integrate( laplace, geometry=geom, ischeme='gauss5' )\n rhsprimal = domain.boundary['inside'].integrate( basis * flux, geometry=geom, ischeme='gauss99' )\n rhsdual = domain['aoi'].integrate( basis, geometry=geom, ischeme='gauss5' )\n cons = domain.boundary['outside'].project( exact, ischeme='gauss9', geometry=geom, onto=basis )\n lhsprimal = matrix.solve( rhsprimal, constrain=cons )\n lhsdual = matrix.solve( rhsdual, constrain=cons&0 )\n primal = basis.dot( lhsprimal )\n dual = basis.dot( lhsdual )\n\n # construct, solve refined domain primal/dual problem\n finedomain = domain.refined\n finebasis = finedomain.basis( basistype, degree=degree )\n finelaplace = function.outer( finebasis.grad(geom) ).sum(-1)\n finematrix = finedomain.integrate( finelaplace, geometry=geom, ischeme='gauss5' )\n finerhsdual = finedomain['aoi'].integrate( finebasis, geometry=geom, ischeme='gauss5' )\n finecons = finedomain.boundary['outside'].project( 0, ischeme='gauss5', geometry=geom, onto=finebasis )\n finelhsdual = finematrix.solve( finerhsdual, constrain=finecons )\n\n # evaluate error estimate\n dlhsdual = finelhsdual - finedomain.project( dual, onto=finebasis, geometry=geom, ischeme='gauss5' )\n ddualw = finebasis * dlhsdual\n error_est_w = finedomain.boundary['inside'].integrate( ddualw * flux, geometry=geom, ischeme='gauss99' )\n error_est_w -= finedomain.integrate( ( ddualw.grad(geom) * primal.grad(geom) ).sum(-1), geometry=geom, ischeme='gauss5' )\n\n # plot solution and error convergence\n makeplots( domain, primal, len(lhsprimal), error_estimate=abs(error_est_w).sum() )\n\n if irefine >= nrefine:\n break\n\n # refine mesh\n if uniform:\n domain = domain.refined\n else:\n mask = error_est_w**2 > numpy.mean(error_est_w**2)\n domain = domain.refined_by( elem.transform[:-1] for elem in domain.refined.supp( finebasis, mask ) )\n\n return lhsprimal, error_est_w\n\n\nclass test(unittest.TestCase):\n\n def test_p1_std(self):\n lhsprimal, error_est_w = main(degree=1, circle=False, uniform=False, basistype='std', nrefine=2, figures=False)\n numeric.assert_allclose64(lhsprimal, 'eNoBhAB7/2s2sDVfNKMjoMtQypXJlzbjNZk0l9Vkyx3Ka'\n 'snA0gXL18k0yWjKfsn3yGrKrskdybXIfskeycPIdcj3yLXIdcg6yF4ocyhDLHA1JjXGNEs0izNAMs02h'\n 'DYoNq019DSrM+o2pzZTNuM1PDX6M0bUrtEKN802gTYgNpY1rjR90DfOZs630OhqQfw=')\n numeric.assert_allclose64(error_est_w, 'eNp1kE0oBGEYx/83lNq1a2c/5uu1yUFc7IWkZFkHRVx'\n 'WboqiXERJSpxw29PiwEFauVBq243k6qKQcvKxY2bM7K58RknDM+O8U//mfX/P/3metz9Q+mtiaQnY8AO'\n 'VIvDC/9Obu2uzN+jKRc1y6U1PC3X5mSBQIQMyAwYl2zOkXTJLj8ht3AB1rROvcfitcCXvGhli7QrQrdv'\n 'sK3AQ+uVdgsXNhYAF8pXRtjGntmTcBKa5CcPiXn3EqH5COuNKv/mTah2hBNdIvjGaw+SU/9nZnVaB1D0'\n 'QIX2QmlWbbunx/HlOMxpY0oiwVnM/9129o9mVlQegSL7VOyBG/3rFpm79uCr24PGCLVZvsqQZU5K+YWf'\n 'SEaXQQpu+KIlZUpToYwAYcHKbKBx64kLsqVMuGpVMeeyTR7R3pYsvSDw/J9qOH/L12yeaIVD/KaU2RPc'\n 'pp//Hn31L8LXuZdFbnJTmfS+iKmyHvcpFOKNkxZow0JMDLDWvjktr/J4AjJL+AIyZgFs=')\n\n def test_p2_spline(self):\n lhsprimal, error_est_w = main(degree=2, circle=False, uniform=False, basistype='spline', nrefine=1, figures=False)\n numeric.assert_allclose64(lhsprimal, 'eNoBbgCR/2s2GTYnNQoz9szZyufJlcl/Ni82OTUuM9jMv'\n '8rSyYHJrzZgNoQ1LDN1zH3Km8lRyek2rDbKNSo0AMwZylrJF8kpN9w2kjbNM4TKrMkKydfIssm2ySHJw'\n 'ciVyIfJBsnByHfIV8gXydfIlchXyDrIutg53Q==')\n numeric.assert_allclose64(error_est_w, 'eNpjYMAH5kiUCAXItEvd5rspEywx7dXat0fefeXX+wC'\n 'S2yfi/urv83cSi5+1C4i/bn1pKOjFz8SrIQiSq5VpfPL/UepTkYc/5Lc8O/bCSDT75USh02B9qs9mSpx'\n '9qinG8aRDjOu50qvQ147isgKBn0Bya6XPP3V+NOPx3Xu7FB9L1j5eJmb16rxIxBuQHN+TQMmMRwqPWBX'\n 'cFOwe+8pESjx9v0WQXwgkxym7SkpYsfnpO9X5D1QVWBUPiMXImLw6J87A4Ke2Q1Hs0UXJTzIr3rCIMjA'\n 'ceJL16JbU6ZeKLza9YRJmYPBWeCaTLlcvMu8lv6i4GAMDq+R1SZ7Xpq92iaW/O/mZgaFYVl+yWTLn7RG'\n 'xYq4SQcwQAgDpQ2yr')\n\n def test_p1_std_circle(self):\n lhsprimal, error_est_w = main(degree=1, circle=True, uniform=False, basistype='std', nrefine=1, figures=False)\n numeric.assert_allclose64(lhsprimal, 'eNoBUACv/2s2sDVgNHPpoMtQypXJljbgNZM0RtRkyx3Ka'\n 'snMNic25DSm0gnL1sk0yQk3gDaZNcHPYsp+yffIbMq1yR7JtciAyR/JxMh1yPfItch1yDrIoQ4s1w==')\n numeric.assert_allclose64(error_est_w, 'eNpjYMAHJigyMFQrMDBslGBgCJVlYNCVgYhL3mt6FK9'\n 'S+njh/VMitbKtzwpl+F9OkYTI/bvFwNB+m4FhoTQDgwVQ7yI5iHj8vXq5dDXW+xPvKj6VVpR4ul5+t/h'\n 'kaYjcVmUGhmWqDAwTgepzgXY+kYWIz1SYISmsvOf+OVW9eyWqhx5ckw99ORHohi4VBobG+wwMdx+DVBU'\n '+evxQRuG+XO/zPKB5W4D2KcszMCx+ApZ7rvPE6uXPpw6SLEA/LAPKrwRiBwlMvwIAHKQ+mQ==')\n\n\nif __name__ == '__main__':\n cli.run(main)\n","sub_path":"examples/adaptivity.py","file_name":"adaptivity.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"253494631","text":"import chainer\nfrom chainer.links import Convolution2D, BatchNormalization, Linear\nfrom chainer.initializers import HeNormal, GlorotUniform, Zero, One\nfrom chainer.functions import relu, average_pooling_2d, max_pooling_2d, concat\nimport numpy as np\n\n\nwhich_initializer = 1\ninitial = None\n\nif which_initializer == 1:\n initial = HeNormal()\nelif which_initializer == 2:\n initial = GlorotUniform()\n\n\n### BLOCK ###\nclass ConvolutionBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels):\n super(ConvolutionBlock, self).__init__()\n with self.init_scope():\n self.conv = Convolution2D(in_channels, out_channels,\n ksize=7, stride=2, pad=3,\n initialW=initial)\n # initialW=HeNormal())\n self.bn_conv = BatchNormalization(out_channels)\n\n def __call__(self, x):\n h = self.conv(x)\n h = self.bn_conv(h)\n y = relu(h)\n return y\n\n\nclass ResidualBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels):\n super(ResidualBlock, self).__init__()\n with self.init_scope():\n self.res_branch2a = Convolution2D(in_channels, out_channels,\n ksize=3, pad=1,\n initialW=initial)\n # initialW=HeNormal())\n self.bn_branch2a = BatchNormalization(out_channels)\n self.res_branch2b = Convolution2D(out_channels, out_channels,\n ksize=3, pad=1,\n initialW=initial)\n # initialW=HeNormal())\n self.bn_branch2b = BatchNormalization(out_channels)\n\n def __call__(self, x):\n h = self.res_branch2a(x)\n h = self.bn_branch2a(h)\n h = relu(h)\n h = self.res_branch2b(h)\n h = self.bn_branch2b(h)\n h += x\n y = relu(h)\n return y\n\n\nclass ResidualBlockB(chainer.Chain):\n def __init__(self, in_channels, out_channels):\n super(ResidualBlockB, self).__init__()\n with self.init_scope():\n self.res_branch1 = Convolution2D(in_channels, out_channels,\n ksize=1, stride=2,\n initialW=initial)\n # initialW=HeNormal())\n self.bn_branch1 = BatchNormalization(out_channels)\n self.res_branch2a = Convolution2D(in_channels, out_channels,\n ksize=3, stride=2, pad=1,\n initialW=initial)\n # initialW=HeNormal())\n self.bn_branch2a = BatchNormalization(out_channels)\n self.res_branch2b = Convolution2D(out_channels, out_channels,\n ksize=3, pad=1,\n initialW=initial)\n # initialW=HeNormal())\n self.bn_branch2b = BatchNormalization(out_channels)\n\n def __call__(self, x):\n temp = self.res_branch1(x)\n temp = self.bn_branch1(temp)\n h = self.res_branch2a(x)\n h = self.bn_branch2a(h)\n h = chainer.functions.relu(h)\n h = self.res_branch2b(h)\n h = self.bn_branch2b(h)\n h = temp + h\n y = chainer.functions.relu(h)\n return y\n### BLOCK ###\n\n\n### BRANCH ###\nclass ResNet18(chainer.Chain):\n def __init__(self):\n super(ResNet18, self).__init__()\n with self.init_scope():\n self.conv1_relu = ConvolutionBlock(3, 32)\n self.res2a_relu = ResidualBlock(32, 32)\n self.res2b_relu = ResidualBlock(32, 32)\n self.res3a_relu = ResidualBlockB(32, 64)\n self.res3b_relu = ResidualBlock(64, 64)\n self.res4a_relu = ResidualBlockB(64, 128)\n self.res4b_relu = ResidualBlock(128, 128)\n self.res5a_relu = ResidualBlockB(128, 256)\n self.res5b_relu = ResidualBlock(256, 256)\n\n def __call__(self, x):\n h = self.conv1_relu(x)\n h = max_pooling_2d(h, ksize=3, stride=2, pad=1)\n h = self.res2a_relu(h)\n h = self.res2b_relu(h)\n h = self.res3a_relu(h)\n h = self.res3b_relu(h)\n h = self.res4a_relu(h)\n h = self.res4b_relu(h)\n h = self.res5a_relu(h)\n h = self.res5b_relu(h)\n y = average_pooling_2d(h, ksize=h.data.shape[2:])\n # y.shape\n # (32, 256, 1, 1)\n return y\n### BRANCH ###\n\n\n### MODEL ###\n\nclass Siamese(chainer.Chain):\n def __init__(self):\n super(Siamese, self).__init__()\n with self.init_scope():\n self.b1 = ResNet18()\n # self.fc = Linear(in_size=512, out_size=10) # wrong way of making predictions\n # self.fc = Linear(in_size=512, out_size=5) # right way with 5 traits\n self.fc = Linear(in_size=512, out_size=1) # with collapsed traits\n\n def __call__(self, x1, x2):\n _1 = self.b1(x1) # (32, 256, 1, 1)\n _2 = self.b1(x2)\n\n h = concat((_1, _2))\n h = self.fc(h)\n return h\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"263274390","text":"import xlsxwriter\nfrom app.allImports import *\nimport sys\n\ndef makeExcelFile(SEID):\n filename = \"syllus-{}-missing-syllabi.xlsx\".format(SEID)\n path = getAbsolutePath(cfg['fileOperations']['dataPaths']['tmp'],filename,False)\n workbook = xlsxwriter.Workbook(path)\n workbook.set_properties({\n 'title': 'Missing Syllabi for {}'.format(SEID),\n 'author': 'Cody Myers',\n 'comments': 'Created with Python and XlsxWriter'\n })\n master_worksheet = workbook.add_worksheet('All Courses')\n master_worksheet.write('A1','Username')\n master_worksheet.write('B1', 'First Name')\n master_worksheet.write('C1', 'Last Name')\n master_worksheet.write('D1', 'Email')\n master_worksheet.write('E1', 'Course(s)')\n master_row = 2\n users = UsersCourses.select(UsersCourses.username).distinct().join(Courses).where(\n Courses.filePath >> None,\n Courses.SEID== SEID).order_by(\n UsersCourses.username)\n for user in users:\n master_worksheet.write('A{}'.format(master_row),user.username.username)\n master_worksheet.write('B{}'.format(master_row),user.username.firstName)\n master_worksheet.write('C{}'.format(master_row),user.username.lastName)\n master_worksheet.write('D{}'.format(master_row),user.username.email)\n courses = UsersCourses.select().join(Courses).where(\n UsersCourses.username == user.username.username,\n Courses.filePath >> None,\n Courses.SEID == SEID)\n colLetter = 'D'\n for c in courses:\n colLetter = chr(ord(colLetter) + 1) \n #Turns value in to next letter up\n #E.G. D -> E\n colName = colLetter + '{}'\n course_info = c.CID.prefix+'-'+c.CID.number+'-'+c.CID.section\n master_worksheet.write(colName.format(master_row),course_info)\n master_row += 1\n workbook.close()\n return path\n \n \n \n ","sub_path":"app/logic/excelMaker.py","file_name":"excelMaker.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"283979977","text":"import types\n\nfrom django.http import HttpResponse\nfrom django.utils import timezone\nimport pytz as pytz\nfrom django.utils.functional import SimpleLazyObject\n\nfrom jangl_utils import settings\nfrom jangl_utils.auth import get_token_from_request\nfrom jangl_utils.backend_api import get_backend_api_session\nfrom jangl_utils.etc.mixins import MiddlewareMixin\nfrom jangl_utils.unique_id import get_unique_id\n\n\nclass HealthCheckMiddleware(MiddlewareMixin):\n def process_request(self, request):\n if request.path_info == '/_hc':\n return HttpResponse(content_type='text/plain')\n\n\nclass SetRemoteAddrFromForwardedFor(MiddlewareMixin):\n def process_request(self, request):\n try:\n real_ip = request.META['HTTP_X_FORWARDED_FOR']\n except KeyError:\n pass\n else:\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n request.META['REMOTE_ADDR'] = real_ip\n\n\ndef get_correlation_id(request):\n return request.META.get('HTTP_' + settings.CID_HEADER_NAME.replace('-', '_'))\n\n\nclass CorrelationIDMiddleware(MiddlewareMixin):\n def process_request(self, request):\n # If this is a downstream request, use existing CID and return in response header\n cid = get_correlation_id(request)\n if cid:\n request.cid = cid\n request.propagate_response = True\n\n # Otherwise create a new CID and don't return in header\n else:\n request.cid = get_unique_id()\n request.propagate_response = False\n\n def process_response(self, request, response):\n if hasattr(request, 'propagate_response') and request.propagate_response:\n response[settings.CID_HEADER_NAME] = request.cid\n return response\n\n\nclass BackendAPIMiddleware(MiddlewareMixin):\n def process_request(self, request):\n assert hasattr(request, 'cid'), (\n 'Make sure to insert \"jangl_utils.middleware.CorrelationIDMiddleware\"'\n 'before \"jangl_utils.middleware.BackendAPIMiddleware\" in your'\n 'middleware settings.'\n )\n\n api_session = get_backend_api_session(cid=request.cid,\n site_id=request.META.get('HTTP_X_SITE_ID'),\n account=request.META.get('HTTP_X_AUTH_ACCOUNT'),\n host=request.get_host(),\n api_token=get_token_from_request(request),\n twilio_signature=request.META.get('HTTP_X_TWILIO_SIGNATURE'),\n cookies=request.COOKIES)\n\n request.backend_api = api_session\n\n def process_response(self, request, response):\n if hasattr(request, 'backend_api'):\n try:\n request.backend_api.close()\n finally:\n pass\n return response\n\n\nclass TimezoneMiddleware(MiddlewareMixin):\n def process_view(self, request, view_func, view_args, view_kwargs):\n try:\n tz = request.account.get('timezone', request.site.get('timezone'))\n except AttributeError:\n tz = 'US/Eastern'\n if tz:\n timezone.activate(pytz.timezone(tz))\n else:\n timezone.deactivate()\n\n\nclass AccountNamesMiddleware(MiddlewareMixin):\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n request.buyer_names = SimpleLazyObject(lambda: self.get_buyer_names(request))\n request.vendor_names = SimpleLazyObject(lambda: self.get_vendor_names(request))\n request.affiliate_names = SimpleLazyObject(lambda: self.get_affiliate_names(request))\n\n def get_buyer_name(self, buyer_id):\n if buyer_id in self.buyer_names:\n return self.buyer_names[buyer_id]['name']\n\n def get_vendor_name(self, vendor_id):\n if vendor_id in self.vendor_names:\n return self.vendor_names[vendor_id]['name']\n\n def get_affiliate_name(self, affiliate_id):\n if affiliate_id in self.affiliate_names:\n return self.affiliate_names[affiliate_id]['name']\n\n request.get_buyer_name = types.MethodType(get_buyer_name, request)\n request.get_vendor_name = types.MethodType(get_vendor_name, request)\n request.get_affiliate_name = types.MethodType(get_affiliate_name, request)\n\n def get_buyer_names(self, request):\n return self.get_names(request, 'buyers')\n\n def get_vendor_names(self, request):\n return self.get_names(request, 'vendors')\n\n def get_affiliate_names(self, request):\n return self.get_names(request, 'affiliates')\n\n def get_names(self, request, account_type):\n if hasattr(request, 'account') and request.account.is_staff:\n response = request.backend_api.get(('accounts', account_type, 'names'),\n cache_seconds=3600, cache_refresh=30)\n if response.ok:\n return {a['id']: a for a in response.json()}\n return {}\n","sub_path":"jangl_utils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"291310356","text":"import torch\nimport torch.nn as nn\nfrom src.models.modules.StackedDilation import StackedDilation\nfrom warnings import filterwarnings\n\nfilterwarnings(\"ignore\", category=UserWarning)\n\n\nclass HCBlock(nn.Module):\n def __init__(self, in_channels: int) -> None:\n super(HCBlock, self).__init__()\n\n self.conv1x1 = nn.Conv3d(in_channels * 2, in_channels, kernel_size=1)\n self.convNxN_1 = nn.Conv3d(in_channels, in_channels, kernel_size=5, padding=2, stride=1, dilation=1)\n # self.conv3x3_2 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1, stride=1, dilation=1)\n\n self.activation = nn.LeakyReLU()\n\n self.batch_norm_conv_1x1 = nn.BatchNorm3d(in_channels)\n self.batch_norm_conv_NxN_1 = nn.BatchNorm3d(in_channels)\n # self.batch_norm_conv_3x3_2 = nn.BatchNorm3d(in_channels)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n\n x = self.activation(self.batch_norm_conv_1x1(self.conv1x1(x)))\n x = self.activation(self.batch_norm_conv_NxN_1(self.convNxN_1(x)))\n\n return x\n","sub_path":"src/models/modules/HCBlock.py","file_name":"HCBlock.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"529322305","text":"#!/usr/bin/env python\nu\"\"\"\nsync_library.py (12/2020)\nExports complete library into a new directory (such as a mounted-drive)\nWill only copy new or overwritten files by checking the last modified dates\n\nCALLING SEQUENCE:\n python sync_library.py -V /path_to_external_library\n\nINPUTS:\n Directory for outputting reference library\n\nCOMMAND LINE OPTIONS:\n -P, --pull: Transfer files from external directory to library\n -L, --list: List files without transferring\n -C, --clobber: Overwrite existing data in transfer\n -V, --verbose: Print all transferred files\n -M X, --mode X: Permission mode of files transferred\n\nPROGRAM DEPENDENCIES:\n read_referencerc.py: Sets default file path and file format for output files\n\nUPDATE HISTORY:\n Updated 12/2020: using argparse to set command line options\n Updated 02/2019: added option list to only print the files to be transferred\n Written 02/2018\n\"\"\"\nfrom __future__ import print_function, division\n\nimport sys\nimport re\nimport os\nimport shutil\nimport inspect\nimport argparse\nfrom read_referencerc import read_referencerc\n\n#-- current file path for the program\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nfilepath = os.path.dirname(os.path.abspath(filename))\n\n#-- Reads BibTeX files for each article stored in the working directory\n#-- exports as a single file sorted by BibTeX key\ndef sync_library(DIRECTORY, PULL=False, LIST=False, VERBOSE=False,\n CLOBBER=False, MODE=0o775):\n #-- get reference filepath and reference format from referencerc file\n datapath,dataformat=read_referencerc(os.path.join(filepath,'.referencerc'))\n\n #-- if transferring from DIRECTORY to library\n d_in,d_out = (DIRECTORY,datapath) if PULL else (datapath,DIRECTORY)\n #-- subdirectories with supplementary information\n S = 'Supplemental'\n\n #-- iterate over yearly directories\n years = [sd for sd in os.listdir(d_in) if re.match('\\d+',sd) and\n os.path.isdir(os.path.join(d_in,sd))]\n for Y in sorted(years):\n #-- find author directories in year\n authors = [sd for sd in os.listdir(os.path.join(d_in,Y)) if\n os.path.isdir(os.path.join(d_in,Y,sd))]\n for A in sorted(authors):\n #-- find BibTeX and article files within author directory\n regex = '((.*?)-(.*?)\\.bib$)|({0}_(.*?)_{1}(.*?)$)'.format(A,Y)\n FILES = [f for f in os.listdir(os.path.join(d_in,Y,A))\n if re.match(regex,f)]\n #-- transfer each article file (check if existing)\n for fi in FILES:\n input_dir = os.path.join(d_in,Y,A)\n output_dir = os.path.join(d_out,Y,A)\n transfer_push_file(fi, input_dir, output_dir, LIST=LIST,\n CLOBBER=CLOBBER, VERBOSE=VERBOSE, MODE=MODE)\n #-- if there is supplementary information\n if os.path.isdir(os.path.join(d_in,Y,A,S)):\n #-- find supplementary files within Supplemental directory\n FILES = [f for f in os.listdir(os.path.join(d_in,Y,A,S))\n if re.match(regex,f)]\n #-- transfer each supplementary file (check if existing)\n for fi in FILES:\n input_dir = os.path.join(d_in,Y,A,S)\n output_dir = os.path.join(d_out,Y,A,S)\n transfer_push_file(fi, input_dir, output_dir, LIST=LIST,\n CLOBBER=CLOBBER, VERBOSE=VERBOSE, MODE=MODE)\n\n#-- PURPOSE: push an input file to an output directory checking if file exists\n#-- and if the input file is newer than any existing output file\n#-- set the permissions mode of the transferred file to MODE\ndef transfer_push_file(transfer_file, input_dir, output_dir, LIST=False,\n CLOBBER=False, VERBOSE=False, MODE=0o775):\n #-- input and output versions of file\n input_file = os.path.join(input_dir,transfer_file)\n output_file = os.path.join(output_dir,transfer_file)\n #-- recursively create output directory if not currently existing\n os.makedirs(output_dir,MODE) if not os.access(output_dir, os.F_OK) else None\n #-- check if input file is newer than the output file\n TEST = False\n OVERWRITE = ' (clobber)'\n #-- last modification time of the input file\n input_mtime = os.stat(input_file).st_mtime\n if os.access(output_file, os.F_OK):\n output_mtime = os.stat(output_file).st_mtime\n #-- if input file is newer: overwrite the output file\n #-- verifying based on even mtimes for different file systems\n if (even(input_mtime) > even(input_mtime)):\n TEST = True\n OVERWRITE = ' (overwrite)'\n else:\n TEST = True\n OVERWRITE = ' (new)'\n #-- if output file does not exist, is to be overwritten, or CLOBBER is set\n if TEST or CLOBBER:\n if VERBOSE or LIST:\n print('{0} -->\\n\\t{1}{2}\\n'.format(input_file,output_file,OVERWRITE))\n #-- if transferring files and not only printing the filenames\n if not LIST:\n #-- check if input_file is a directory (e.g. unzipped Supplementary)\n if os.path.isdir(input_file):\n #-- copy input directory to storage output directory\n shutil.copytree(input_file, output_file)\n else:\n #-- copy input files to storage output\n shutil.copyfile(input_file, output_file)\n #-- change the permissions level of the transported file to MODE\n os.chmod(output_file, MODE)\n #-- set modification times of the output file\n os.utime(output_file, (os.stat(output_file).st_atime,input_mtime))\n\n#-- PURPOSE: rounds a number to an even number less than or equal to original\ndef even(i):\n return 2*int(i//2)\n\n#-- main program that calls sync_library()\ndef main():\n #-- Read the system arguments listed after the program\n parser = argparse.ArgumentParser(\n description=\"\"\"Exports complete library into a new directory\n \"\"\"\n )\n #-- command line parameters\n parser.add_argument('directory', default=os.getcwd,\n type=lambda p: os.path.abspath(os.path.expanduser(p)),\n help='External reference directory')\n parser.add_argument('--pull','-P',\n default=False, action='store_true',\n help='Transfer files from external directory to library')\n parser.add_argument('--list','-L',\n default=False, action='store_true',\n help='List files without transferring')\n parser.add_argument('--clobber','-C',\n default=False, action='store_true',\n help='Overwrite existing files in transfer')\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Print all transferred files')\n #-- permissions mode of the local directories and files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of directories and files transferred')\n args = parser.parse_args()\n\n #-- export references to a new directory\n sync_library(args.directory, PULL=args.pull, LIST=args.list,\n VERBOSE=args.verbose, CLOBBER=args.clobber, MODE=args.mode)\n\n#-- run main program\nif __name__ == '__main__':\n main()\n","sub_path":"sync_library.py","file_name":"sync_library.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"15506605","text":"import faker\nimport random\nfrom time import process_time\nfrom pprint import pprint\n\n\nclass Searcher:\n def __init__(self, cnt):\n self.array =[]\n self.fill(cnt)\n self.chance = []\n self.sorted_arr = []\n self.seg_cnt = 6\n\n def fill(self, cnt):\n fake = faker.Faker()\n for i in range(cnt):\n d = {'key': 100000 + i,\n 'name': fake.name()}\n self.array.append(d)\n\n def print(self):\n pprint(self.array)\n\n def brute(self, key):\n for item in self.array:\n if item['key'] == key:\n return item\n return None\n\n def binary(self, key):\n s = 0\n e = len(self.array) - 1\n mid = (s + e) // 2\n if self.array[s]['key'] > key:\n return None\n elif self.array[e]['key'] < key:\n return None\n\n if self.array[s]['key'] == key:\n return self.array[s]\n elif self.array[e]['key'] == key:\n return self.array[e]\n\n tmp = self.array[mid]['key']\n while key != tmp:\n if key < tmp:\n e = mid\n else:\n s = mid\n mid = (s + e) // 2\n tmp = self.array[mid]['key']\n return self.array[mid]\n\n def prepare_seg(self):\n p = 100 / ((1 + self.seg_cnt) * self.seg_cnt // 2) / 100\n\n for i in range(self.seg_cnt):\n self.chance.append((self.seg_cnt - i) * p)\n for i in range(1, self.seg_cnt):\n self.chance[i] += self.chance[i - 1]\n\n def segment(self, key):\n if len(self.chance) == 0:\n self.prepare_seg()\n for item in self.array:\n if item['key'] == key:\n return item\n return None\n\n\nif __name__ == '__main__':\n repeat = 100000\n cnt = 100000\n\n s = Searcher(cnt)\n s.prepare_seg()\n\n t1 = process_time()\n print('init was done')\n for i in range(repeat):\n a = s.brute(i + 100000)\n t2 = process_time()\n\n print((t2 - t1) / repeat)\n for i in range(repeat):\n a = s.binary(i + 100000)\n t3 = process_time()\n\n print((t3 - t2) / repeat)\n for i in range(repeat):\n dice = random.random()\n if dice < s.chance[0]:\n key = random.randint(100000, 100000 + cnt // 6)\n elif dice < s.chance[1]:\n key = random.randint(100000 + cnt // 6, 100000 + 2 * cnt // 6)\n elif dice < s.chance[2]:\n key = random.randint(100000 + 2 * cnt // 6, 100000 + 3 * cnt // 6)\n elif dice < s.chance[3]:\n key = random.randint(100000 + 3 * cnt // 6, 100000 + 4 * cnt // 6)\n elif dice < s.chance[4]:\n key = random.randint(100000 + 4 * cnt // 6, 100000 + 5 * cnt // 6)\n else:\n key = random.randint(100000 + 5 * cnt // 6, 100000 + 6 * cnt // 6)\n a = s.segment(key)\n t4 = process_time()\n print((t4 - t3) / repeat)\n","sub_path":"aa/lab07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"635662013","text":"import sys\nsys.setrecursionlimit(10000)\n\n\ndef spread(_x, _y):\n if matrix[_x][_y] == 1:\n matrix[_x][_y] = 0\n spread(_x + 1, _y)\n spread(_x - 1, _y)\n spread(_x, _y + 1)\n spread(_x, _y - 1)\n return 1\n\n else:\n return 0\n\n\nT = int(input())\n\nfor t in range(T):\n M, N, K = map(int, input().split())\n\n matrix = []\n for _ in range(M + 2):\n matrix.append([0] * (N + 2))\n\n cabbage = []\n for _ in range(K):\n x, y = map(int, input().split())\n matrix[x][y] = 1\n cabbage.append([x, y])\n\n result = 0\n for x, y in cabbage:\n result += spread(x, y)\n\n print(result)\n","sub_path":"BAEKJOON/1012-cabbage.py","file_name":"1012-cabbage.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"511403150","text":"import os\n\nimport numpy as np\n\n\ndef run(inputs):\n sides = np.array([i.split() for i in inputs.split(os.linesep)]).astype(int)\n\n sides = sides.T.reshape(-1, 3)\n\n all_indices = set(range(sides.shape[1]))\n valid = []\n for input_1 in range(sides.shape[1]):\n for input_2 in range(input_1 + 1, sides.shape[1]):\n remaining_side = next(iter(all_indices - {input_1, input_2}))\n this_valid = np.greater(\n sides[:, (input_1, input_2)].sum(axis=1),\n sides[:, remaining_side],\n )\n valid.append(this_valid)\n\n return np.all(valid, axis=0).sum()\n","sub_path":"2016/03/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"171000802","text":"from pathlib import Path\n\nimport pytest\nfrom pydantic import ValidationError\n\nfrom spekulatio.models import Transformation\n\n\ndef test_instantiation():\n transformation = Transformation(\n **{\n \"pattern\": {\n \"type\": \"glob\",\n \"scope\": \"filename\",\n \"value\": \"*.md\",\n },\n \"action\": {\n \"name\": \"debug\",\n \"params\": {},\n },\n \"output_name_template\": \"{{ input_path.stem }}.html\",\n }\n )\n assert transformation.output_name_template == \"{{ input_path.stem }}.html\"\n\n\ndef test_minimal_instantiation():\n transformation = Transformation(\n **{\n \"pattern\": {\n \"value\": \"*.md\",\n },\n \"action\": {\n \"name\": \"debug\",\n },\n }\n )\n assert transformation.output_name_template == \"{{ input_path.name }}\"\n\n\ndef test_wrong_pattern():\n with pytest.raises(ValidationError):\n _ = Transformation(\n **{\n \"pattern\": {\n \"scope\": \"filename\",\n },\n \"action\": {\n \"name\": \"debug\",\n },\n }\n )\n\n\ndef test_wrong_action():\n with pytest.raises(ValidationError):\n _ = Transformation(\n **{\n \"pattern\": {\n \"value\": \"*.md\",\n },\n \"action\": {\n \"name\": \"debug\",\n \"foo\": \"bar\",\n },\n }\n )\n\n\ndef test_wrong_output_name():\n with pytest.raises(ValidationError):\n transformation = Transformation(\n **{\n \"pattern\": {\n \"value\": \"*.md\",\n },\n \"action\": {\n \"name\": \"debug\",\n },\n \"output_name_template\": \"some/folder/{{ input_path.stem }}.html\",\n }\n )\n\n\ndef test_wrong_output_name_win_anchor():\n with pytest.raises(ValidationError):\n transformation = Transformation(\n **{\n \"pattern\": {\n \"value\": \"*.md\",\n },\n \"action\": {\n \"name\": \"debug\",\n },\n \"output_name_template\": \"some\\\\folder\\\\{{ input_path.stem }}.html\",\n }\n )\n","sub_path":"tests/test_transformations.py","file_name":"test_transformations.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"392208808","text":"import sys\nimport json\nimport argparse\nfrom collections import defaultdict\n\n\ndef update_stats_db(stats, src_ip, str_repr, dst_info, count):\n # set up stats entries\n if src_ip not in stats:\n stats[src_ip] = {}\n stats[src_ip]['count'] = 0\n stats[src_ip]['fingerprints'] = {}\n if str_repr not in stats[src_ip]['fingerprints']:\n stats[src_ip]['fingerprints'][str_repr] = defaultdict(int)\n\n # update database counts\n stats[src_ip]['count'] += count\n stats[src_ip]['fingerprints'][str_repr]['count'] += count\n stats[src_ip]['fingerprints'][str_repr][dst_info] += count\n\n\ndef read_merc_data(in_file):\n stats_db = {}\n total_count = 0\n for line in open(in_file):\n r = json.loads(line)\n if 'fingerprints' not in r or 'tls' not in r['fingerprints']:\n continue\n\n # extract data elements\n str_repr = r['fingerprints']['tls']\n src_ip = r['src_ip']\n dst_ip = r['dst_ip']\n dst_port = r['dst_port']\n server_name = ''\n if 'client' in r['tls'] and 'server_name' in r['tls']['client']:\n server_name = r['tls']['client']['server_name']\n\n # update stats database\n update_stats_db(stats_db, src_ip, str_repr, f'({server_name})({dst_ip})({dst_port})', 1)\n total_count += 1\n\n return stats_db, total_count\n\n\ndef read_merc_stats(in_file):\n stats_db = {}\n total_count = 0\n for line in open(in_file):\n r = json.loads(line)\n src_ip = r['src_ip']\n for x in r['fingerprints']:\n str_repr = x['str_repr']\n for y in x['dest_info']:\n dst_info = y['dst']\n count = y['count']\n\n # update stats database\n update_stats_db(stats_db, src_ip, str_repr, dst_info, count)\n total_count += count\n\n return stats_db, total_count\n\n\ndef is_match(x, y):\n for str_repr, v in x['fingerprints'].items():\n for dst,_ in v.items():\n try:\n if x['fingerprints'][str_repr][dst] != y['fingerprints'][str_repr][dst]:\n return False\n except KeyError:\n return False\n return True\n\n\ndef compare_stats_dbs(merc_db, merc_stats):\n if len(merc_db.keys()) != len(merc_stats.keys()):\n print(f'error: merc_db\\'s src_ip\\'s ({len(merc_db.keys())}) != merc_stat\\'s src_ip\\'s ({len(merc_stats.keys())})')\n return False\n\n # find all potential matches for each src_ip\n potential_matches = defaultdict(list)\n for k,v in merc_db.items():\n for k1,v1 in merc_stats.items():\n if v['count'] == v1['count']:\n potential_matches[k].append(k1)\n\n # find exact matches for each src_ip\n matched_merc = set()\n matched_stat = set()\n for k,v in potential_matches.items():\n for k1 in v:\n if k1 in matched_stat:\n continue\n if is_match(merc_db[k], merc_stats[k1]):\n matched_merc.add(k)\n matched_stat.add(k1)\n break\n\n unmatched = False\n if len(matched_merc) != len(merc_db.keys()):\n unmatched = set(merc_db.keys()).difference(matched_merc)\n print(f'error: not all stats src_ip\\'s were matched:')\n for src_ip in unmatched:\n print(f'\\t{src_ip}')\n unmatched = True\n\n if len(matched_stat) != len(merc_stats.keys()):\n unmatched = set(merc_stats.keys()).difference(matched_stat)\n print(f'error: not all stats src_ip\\'s were matched:')\n for src_ip in unmatched:\n print(f'\\t{src_ip}')\n unmatched = True\n\n if unmatched:\n return False\n\n return True\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-m','--mercury-output',action='store',dest='merc_out',\n help='mercury output file',default=None)\n parser.add_argument('-s','--mercury-stats',action='store',dest='merc_stats',\n help='mercury statistics file',default=None)\n\n args = parser.parse_args()\n if args.merc_out == None:\n print('error: specify mercury output file')\n sys.exit(1)\n if args.merc_stats == None:\n print('error: specify mercury statistics file')\n sys.exit(1)\n\n merc_db, merc_count = read_merc_data(args.merc_out)\n merc_db_stats, merc_count_stats = read_merc_stats(args.merc_stats)\n\n if merc_count != merc_count_stats:\n print(f'error: merc_out count ({merc_count}) != merc_stats count ({merc_count_stats})')\n sys.exit(1)\n\n # print(merc_count)\n # print(merc_count_stats)\n\n if compare_stats_dbs(merc_db, merc_db_stats) == False:\n print('error: stats database comparison failed')\n sys.exit(1)\n\n print('success: stats databases match')\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test/compare-stats.py","file_name":"compare-stats.py","file_ext":"py","file_size_in_byte":4879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"285205109","text":"\"\"\"\nkissim.encoding.fingerprint_normalized\n\nDefines the normalized kissim fingerprint.\n\"\"\"\n\nimport logging\n\nimport numpy as np\n\nfrom kissim.definitions import DISTANCE_CUTOFFS, MOMENT_CUTOFFS\nfrom kissim.encoding import FingerprintBase\n\nlogger = logging.getLogger(__name__)\n\n\nclass FingerprintNormalized(FingerprintBase):\n @classmethod\n def from_fingerprint(cls, fingerprint):\n \"\"\"\n Normalize fingerprint.\n\n Parameters\n ----------\n fingerprint : kissim.encoding.Fingerprint\n (Unnormalized) fingerprint.\n\n Returns\n -------\n kissim.encoding.FingerprintNormalized\n Normalized fingerprint.\n \"\"\"\n\n fingerprint_normalized = cls()\n fingerprint_normalized.structure_klifs_id = fingerprint.structure_klifs_id\n fingerprint_normalized.residue_ids = fingerprint.residue_ids\n fingerprint_normalized.residue_ixs = fingerprint.residue_ixs\n\n fingerprint_normalized._normalize(fingerprint)\n\n return fingerprint_normalized\n\n def _normalize(self, fingerprint):\n \"\"\"\n Normalize the fingerprint (set as values_dict attribute in FingerprintNormalized class).\n\n Parameters\n ----------\n fingerprint : kissim.encoding.Fingerprint\n (Unnormalized) fingerprint.\n \"\"\"\n\n values_dict_normalized = {}\n\n values_dict_normalized[\"physicochemical\"] = self._normalize_physicochemical_bits(\n fingerprint.values_dict[\"physicochemical\"]\n )\n values_dict_normalized[\"spatial\"] = {}\n values_dict_normalized[\"spatial\"][\"distances\"] = self._normalize_distances_bits(\n fingerprint.values_dict[\"spatial\"][\"distances\"]\n )\n values_dict_normalized[\"spatial\"][\"moments\"] = self._normalize_moments_bits(\n fingerprint.values_dict[\"spatial\"][\"moments\"]\n )\n\n self.values_dict = values_dict_normalized\n\n def _normalize_physicochemical_bits(self, values):\n \"\"\"\n Normalize physicochemical bits.\n\n Parameters\n ----------\n values : dict of list of float\n Physicochemical bits.\n\n Returns\n -------\n dict of list of float\n Normalized physicochemical bits.\n \"\"\"\n\n values_normalized = {}\n\n if values is not None:\n values_normalized[\"size\"] = [\n self._min_max_normalization(value, 1.0, 3.0) for value in values[\"size\"]\n ]\n values_normalized[\"hbd\"] = [\n self._min_max_normalization(value, 0.0, 3.0) for value in values[\"hbd\"]\n ]\n values_normalized[\"hba\"] = [\n self._min_max_normalization(value, 0.0, 2.0) for value in values[\"hba\"]\n ]\n values_normalized[\"charge\"] = [\n self._min_max_normalization(value, -1.0, 1.0) for value in values[\"charge\"]\n ]\n values_normalized[\"aromatic\"] = [\n self._min_max_normalization(value, 0.0, 1.0) for value in values[\"aromatic\"]\n ]\n values_normalized[\"aliphatic\"] = [\n self._min_max_normalization(value, 0.0, 1.0) for value in values[\"aliphatic\"]\n ]\n values_normalized[\"sco\"] = [\n self._min_max_normalization(value, 1.0, 3.0) for value in values[\"sco\"]\n ]\n values_normalized[\"exposure\"] = [\n self._min_max_normalization(value, 1.0, 3.0) for value in values[\"exposure\"]\n ]\n return values_normalized\n\n else:\n return None\n\n def _normalize_distances_bits(self, values):\n \"\"\"\n Normalize distances bits (using cutoffs defined for each subpocket).\n\n Parameters\n ----------\n values : dict of list of float\n Distances bits.\n\n Returns\n -------\n dict of list of float\n Normalized distances bits.\n \"\"\"\n\n values_normalized = {}\n\n if values is not None:\n for subpocket_name, distances in values.items():\n values_normalized[subpocket_name] = [\n self._min_max_normalization(\n distance,\n DISTANCE_CUTOFFS[subpocket_name][0],\n DISTANCE_CUTOFFS[subpocket_name][1],\n )\n for distance in distances\n ]\n return values_normalized\n\n else:\n return None\n\n def _normalize_moments_bits(self, values):\n \"\"\"\n Normalize moments bits (using cutoffs defined for each moment).\n\n Parameters\n ----------\n values : dict of list of float\n Moments bits.\n\n Returns\n -------\n dict of list of float\n Normalized moments bits.\n \"\"\"\n\n values_normalized = {}\n\n if values is not None:\n for subpocket_name, moments in values.items():\n values_normalized[subpocket_name] = [\n self._min_max_normalization(\n moment, MOMENT_CUTOFFS[i + 1][0], MOMENT_CUTOFFS[i + 1][1]\n )\n for i, moment in enumerate(values[subpocket_name])\n ]\n return values_normalized\n\n else:\n return None\n\n @staticmethod\n def _min_max_normalization(value, minimum, maximum):\n \"\"\"\n Normalize a value using minimum-maximum normalization.\n Values equal or lower / greater than the minimum / maximum value are set to 0.0 / 1.0.\n\n Parameters\n ----------\n value : float or int\n Value to be normalized.\n minimum : float or int\n Minimum value for normalization, values equal/greater than this minimum are set to 0.0.\n maximum : float or int\n Maximum value for normalization, values equal/greater than this maximum are set to 1.0.\n\n Returns\n -------\n float\n Normalized value.\n \"\"\"\n\n if np.isnan(value):\n return np.nan\n elif minimum < value < maximum:\n return (value - minimum) / float(maximum - minimum)\n elif value <= minimum:\n return 0.0\n else:\n return 1.0\n","sub_path":"kissim/encoding/fingerprint_normalized.py","file_name":"fingerprint_normalized.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"498797344","text":"from pprint import pprint\r\n\r\ndb = {\r\n '1': {'2': {'3': '4'}},\r\n '2': {'22': {'33': '44'}},\r\n '3': [333, 444, 555],\r\n 4: 4,\r\n 5: {55: {555: 55555}}\r\n}\r\n\r\n\r\n\r\ndef flatter(dict_, basekey='', newdict={}):\r\n for key, value in dict_.items():\r\n if not basekey:\r\n flat_key = key\r\n else:\r\n flat_key = str(basekey) + '_' + str(key)\r\n\r\n flat_value = value\r\n\r\n if isinstance(value, dict):\r\n flatter(value, flat_key)\r\n else:\r\n newdict[flat_key] = flat_value\r\n\r\n return newdict\r\n\r\n\r\n\r\nprint(db)\r\nresult = flatter(db)\r\nprint(list(result.keys()))\r\nprint(list(result.values()))\r\nprint()\r\npprint(result)\r\n","sub_path":"normalize_dict_recursive/normalize_dict_v01.py","file_name":"normalize_dict_v01.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"64261752","text":"import uuid\n\nimport tenacity\n\n# from etcd3 import exceptions\n\nlock_prefix = '/locks/'\n\n\nclass Lock(object):\n \"\"\"\n A distributed lock.\n\n This can be used as a context manager, with the lock being acquired and\n released as you would expect:\n\n .. code-block:: python\n\n etcd = etcd3.client()\n\n # create a lock that expires after 20 seconds\n with etcd.lock('toot', ttl=20) as lock:\n # do something that requires the lock\n print(lock.is_acquired())\n\n # refresh the timeout on the lease\n lock.refresh()\n\n :param name: name of the lock\n :type name: string or bytes\n :param ttl: length of time for the lock to live for in seconds. The lock\n will be released after this time elapses, unless refreshed\n :type ttl: int\n \"\"\"\n\n def __init__(self, name, ttl=60,\n etcd_client=None):\n self.name = name\n self.ttl = ttl\n if etcd_client is not None:\n self.etcd_client = etcd_client\n\n self.key = lock_prefix + self.name\n self.lease = None\n # store uuid as bytes, since it avoids having to decode each time we\n # need to compare\n self.uuid = uuid.uuid1().bytes\n\n async def acquire(self, timeout=10):\n \"\"\"Acquire the lock.\n\n :params timeout: Maximum time to wait before returning. `None` means\n forever, any other value equal or greater than 0 is\n the number of seconds.\n :returns: True if the lock has been acquired, False otherwise.\n\n \"\"\"\n stop = (\n tenacity.stop_never\n if timeout is None else tenacity.stop_after_delay(timeout)\n )\n\n def wait(retry_state):\n # if timeout is None:\n # remaining_timeout = None\n # else:\n # remaining_timeout = max(timeout - retry_state.start_time, 0)\n # TODO(jd): Wait for a DELETE event to happen: that'd mean the lock\n # has been released, rather than retrying on PUT events too\n # try:\n # await self.etcd_client.watch_once(self.key,\n # remaining_timeout)\n # except exceptions.WatchTimedOut:\n # pass\n return 0\n\n @tenacity.retry(retry=tenacity.retry_never,\n stop=stop,\n wait=wait)\n async def _acquire():\n # TODO: save the created revision so we can check it later to make\n # sure we still have the lock\n\n self.lease = await self.etcd_client.lease(self.ttl)\n\n success, _ = await self.etcd_client.transaction(\n compare=[\n self.etcd_client.transactions.create(self.key) == 0\n ],\n success=[\n self.etcd_client.transactions.put(self.key, self.uuid,\n lease=self.lease)\n ],\n failure=[\n self.etcd_client.transactions.get(self.key)\n ]\n )\n if success is True:\n return True\n self.lease = None\n raise tenacity.TryAgain\n\n try:\n return await _acquire()\n except tenacity.RetryError:\n return False\n\n async def release(self):\n \"\"\"Release the lock.\"\"\"\n success, _ = await self.etcd_client.transaction(\n compare=[\n self.etcd_client.transactions.value(self.key) == self.uuid\n ],\n success=[self.etcd_client.transactions.delete(self.key)],\n failure=[]\n )\n return success\n\n async def refresh(self):\n \"\"\"Refresh the time to live on this lock.\"\"\"\n if self.lease is not None:\n return await self.lease.refresh()\n else:\n raise ValueError('No lease associated with this lock - have you '\n 'acquired the lock yet?')\n\n async def is_acquired(self):\n \"\"\"Check if this lock is currently acquired.\"\"\"\n uuid, _ = await self.etcd_client.get(self.key)\n\n if uuid is None:\n return False\n\n return uuid == self.uuid\n\n async def __aenter__(self):\n await self.acquire()\n return self\n\n async def __aexit__(self, exception_type, exception_value, traceback):\n await self.release()\n","sub_path":"etcd3aio/locks.py","file_name":"locks.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178005177","text":"import numpy as np, cv2\r\n\r\ndef draw_points(image, group, color):\r\n for p in group:\r\n pt = tuple(p.astype(int))\r\n cv2.circle(image, pt, 3, color, cv2.FILLED)\r\n\r\nnsample = 50\r\ntraindata = np.zeros((nsample*2, 2), np.float32)\r\nlabel = np.zeros((nsample*2, 1), np.float32)\r\n\r\ncv2.randn(traindata[:nsample], 150, 30)\r\ncv2.randn(traindata[nsample:], 250, 60)\r\nlabel[:nsample], label[nsample:] = 0, 1\r\n\r\nK = 7\r\nknn = cv2.ml.KNearest_create()\r\nknn.train(traindata, cv2.ml.ROW_SAMPLE, label)\r\n\r\npoints = [(x, y) for y in range(400) for x in range(400)]\r\nret, resp, neig, dist = knn.findNearest(np.array(points, np.float32), K)\r\n\r\ncolors = [(0, 180, 0) if p else (0, 0, 180) for p in resp]\r\nimage = np.reshape(colors, (400, 400, 3)).astype('uint8')\r\n\r\ndraw_points(image, traindata[:nsample], color=(0, 0, 255))\r\ndraw_points(image, traindata[nsample:], color=(0, 255, 0))\r\ncv2.imshow(\"sample K=\"+ str(K), image)\r\ncv2.waitKey(0)","sub_path":"CHAPTER10/04.kNN_exam.py","file_name":"04.kNN_exam.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"594228323","text":"'''\nHelper methods to work with a GPX file in companion to the upload photo set to facilitate adding lat/long coordinates.\n\nModern security concerns mean that many choose not to embed GPS data in photos taken with a smartphone; too, many DSLRs\nsimply lack this facility. With that in mind, with either a smartphone or a dedicated GPS device, a GPX file\ncan be recorded during a field session and from the set of points and timestamps it contains, we can estimate\ncoordinates a photo was taken based on timestamp.\n'''\n\nimport gpxpy\nimport pathlib\nfrom pytz import timezone\nfrom pendulum import datetime, Period, Duration, instance\nimport pendulum\nimport os\nimport sys\nfrom typing import Union, List, Tuple, Dict\nfrom logging import getLogger\n\nlogger = getLogger()\n\ndef parse_gpx(gpx_file: Union[pathlib.Path,str]) -> Dict[datetime, Tuple[str,str]]:\n '''\n Get points and their corresponding timestamp from the GPX file. The timestamp is optional according to the spec,\n but points without timestamps are useless for our purposes, so skip those points that lack it.\n\n GPX files use ISO8601 for timestamp\n\n :param gpx_file: gpx file to parse\n :return: points that meet our criteria in the form of {timestamp : (lat, long)}\n\n '''\n\n points = {}\n logger.debug('parse_gpx()')\n with open(gpx_file, 'r') as gfile:\n logger.info('Parsing {0}'.format(gpx_file))\n gpx_tree = gpxpy.parse(gfile)\n\n logger.debug('Accumulating gps points')\n for track in gpx_tree.tracks:\n for seg in track.segments:\n for point in seg.points:\n if point.time == None:\n continue\n # Timestamp in the form of 2018-10-13T15:01:52Z; GPXPy already gives it to us in vanilla datetime\n # GPX seems to always record timestamps in UTC, so do a conversion\n timestamp = pendulum.instance(point.time).in_tz('local')\n \n points[timestamp] = (point.latitude,point.longitude)\n \n logger.info('Found {0} gps points with timestamp'.format(str(len(points))))\n \n return points\n\n\ndef accumulate_gps_points(gpx_dir: pathlib.Path) -> Dict[datetime, Tuple[str,str]]:\n '''\n Iterate over the provided GPX files and assemble points with their timestamps\n\n :param gpx_dir: directory where gpx files are contained; no subdirectories are inspected\n :return:\n '''\n gps_points = {}\n \n logger.debug('accumulate_gps_points()')\n \n for gpx in pathlib.Path(gpx_dir).glob('*.gpx'):\n gps_points.update(parse_gpx(gpx))\n\n logger.info('Found {0} timestamped gps points total'.format(str(len(gps_points))))\n \n return gps_points\n","sub_path":"gpx.py","file_name":"gpx.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"349126660","text":"#!/usr/bin/env python3\n\nimport unittest\nimport test_parser\n\nparserTestSuite = unittest.TestSuite()\nparserTestSuite.addTest(unittest.makeSuite(test_parser.ParserTest))\nprint(\"Count of tests: \" + str(parserTestSuite.countTestCases()) + \"\\n\")\n\nrunner = unittest.TextTestRunner(verbosity=2)\nrunner.run(parserTestSuite)\n","sub_path":"test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"167726534","text":"import json\n\n\ndef merge_coco(coco_a, coco_b, save_path, save_name='pseudo_labeling'):\n with open(coco_a, 'r', encoding='utf-8') as fid:\n cocoa = json.load(fid)\n with open(coco_b, 'r', encoding='utf-8') as fid:\n cocob = json.load(fid)\n\n assert cocoa['info'] == cocob['info']\n assert cocoa['license'] == cocob['license']\n assert cocoa['categories'] == cocob['categories']\n\n images_a = cocoa['images']\n images_b = cocob['images']\n anns_a = cocoa['annotations']\n anns_b = cocob['annotations']\n\n id_a = [item['id'] for item in images_a]\n id_b = [item['id'] for item in images_b]\n\n image_id2imagea = {item['id']: item for item in images_a}\n image_id2imageb = {item['id']: item for item in images_b}\n\n if len(set(id_a) & set(id_b)) != 0:\n pass\n\n image_id2ann_a = {item: [] for item in id_a}\n image_id2ann_b = {item: [] for item in id_b}\n\n for item in anns_a:\n image_id2ann_a[item['image_id']].append(item)\n\n for item in anns_b:\n image_id2ann_b[item['image_id']].append(item)\n\n merge_coco_json = dict()\n merge_coco_json['info'] = cocoa['info']\n merge_coco_json['license'] = cocoa['license']\n merge_coco_json['categories'] = cocoa['categories']\n\n merge_images = []\n merge_annotations = []\n\n merge_image_index = 0\n merge_ann_index = 0\n\n for image_id, image in image_id2imagea.items():\n ori_image_id = image['id']\n anns = image_id2ann_a[ori_image_id]\n for ann in anns:\n ann['image_id'] = merge_image_index\n ann['id'] = merge_ann_index\n\n merge_annotations.append(ann)\n\n image['id'] = merge_image_index\n new_merge_image_item = image\n merge_images.append(new_merge_image_item)\n\n merge_image_index += 1\n merge_ann_index += 1\n\n for image_id, image in image_id2imageb.items():\n ori_image_id = image['id']\n anns = image_id2ann_b[ori_image_id]\n for ann in anns:\n ann['image_id'] = merge_image_index\n ann['id'] = merge_ann_index\n\n merge_annotations.append(ann)\n\n image['id'] = merge_image_index\n new_merge_image_item = image\n merge_images.append(new_merge_image_item)\n\n merge_image_index += 1\n merge_ann_index += 1\n\n assert len(merge_images) == len(images_a) + len(images_b)\n assert len(merge_annotations) == len(anns_a) + len(anns_b)\n\n merge_coco_json['images'] = merge_images\n merge_coco_json['annotations'] = merge_annotations\n\n json.dump(merge_coco_json, open(f\"{save_path}/{save_name}.json\", 'w'), ensure_ascii=False, indent=4)\n\n\ndef test_merge():\n from pycocotools.coco import COCO\n train = COCO('/fengyouliang/datasets/x-ray/coco/annotations/fold4/train.json')\n val = COCO('/fengyouliang/datasets/x-ray/coco/annotations/fold4/val.json')\n pseudo = COCO('/fengyouliang/datasets/x-ray/coco/annotations/fold4/pseudo_demo.json')\n print()\n\n\ndef main():\n # a = '/fengyouliang/datasets/x-ray/coco/annotations/fold4/train.json'\n # b = '/fengyouliang/datasets/x-ray/coco/annotations/fold4/val.json'\n # merge_coco(a, b, '/fengyouliang/datasets/x-ray/coco/annotations/fold4', 'pseudo_demo')\n test_merge()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"projects/demo/pseudo_labeling_demo.py","file_name":"pseudo_labeling_demo.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"633375367","text":"#Create a program that asks the user to enter their name and their age. Print out a message addressed to them that\n#tells them the year that they will turn 100 years old. DONE\n\n#Extras:\n\n#Add on to the previous program by asking the user for another number and printing out that many copies of the\n#previous message. (Hint: order of operations exists in Python) DONE\n\n#Print out that many copies of the previous message on separate lines. (Hint: the string \"\\n is the same as\n#pressing the ENTER button) DONE\n\nname = input('Enter your name: ')\nage = int(input('Enter your age: '))\nwhile True:\n num = int(input('How many times to print? '))\n if num>0:\n break\n elif num==0: # check if user entered 0 amount of times\n print('We cant print 0 times!')\n else: # check if user entered negative amount of times\n print('We cant print negative amount of times!')\nyear100 = str(2016+(100-age))\nwhile num > 0:\n print('Hello '+name+', you will be 100 years old in '+year100+'.')\n num-=1 # repeat num times, num decreases by 1 each time\n # it is printed.","sub_path":"1.character-input.py","file_name":"1.character-input.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"445117898","text":"import sys\nimport socket\nimport asyncio\nimport aio_msgpack_rpc\n\n\ntry:\n path = sys.argv[1]\nexcept Exception:\n print(\"No socket path provided.\")\n exit(1)\n\n\n# handlers can be defined on a class\n# they can either be async or plain functions\nclass MyServicer:\n async def sum(self, x, y):\n # print(f\"sum: {x}, {y}\")\n return x + y\n\n async def prod(self, x, y):\n return x * y\n\n async def answer(self):\n return 42\n\n async def i_dont_know(self, a, b, c):\n return None\n\n async def call_me_back(self, name):\n return None\n\n async def dont_call_me(self):\n raise Exception('Don\\'t call me !')\n\n\nasync def main():\n sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)\n sock.bind(path)\n\n try:\n server = await asyncio.start_server(\n aio_msgpack_rpc.Server(MyServicer()),\n sock=sock,\n )\n\n while True:\n await asyncio.sleep(0.1)\n finally:\n server.close()\n\ntry:\n asyncio.new_event_loop().run_until_complete(main())\nexcept KeyboardInterrupt:\n pass\n","sub_path":"test/unix-mpack-server.py","file_name":"unix-mpack-server.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"102450009","text":"# coding=utf-8\n\nimport datetime\n\nimport re\nimport json\n\nimport requests\n\nfrom reviews.config import VISTA_REGEX, VISTA_PAGE_SIZE, VISTA_REVIEW_API, VISTA_HEADER\nfrom reviews.error import RequestParamError\nfrom reviews.tools import review_resolver, Review, get_logging, CrawlerType, request_resolver\n\n# 获得日志对象\nlogging = get_logging()\n\n\nclass VistaProduct:\n \"\"\"通过Vista商品url获得评论api请求参数\"\"\"\n\n def __init__(self, product_url):\n \"\"\"\n init\n Args:\n product_url: Vista商品url\n \"\"\"\n\n self.product_url = product_url\n self.__params = {\n 'GP': '09/29/2019 05:19:49',\n 'GPS': '5489783259',\n 'GNF': '0',\n }\n\n def get_review_api_params(self):\n \"\"\"\n 通过product_url 获得product_type 和 root_product_id\n 这些都是Vista review api 所必要的参数\n Returns:\n product_type: zazzle商品类型\n root_product_id: zazzle根商品id\n \"\"\"\n\n # 第一次请求获得response返回的cookies\n response = request_resolver(url=self.product_url, params=None, header=VISTA_HEADER)\n cookies = requests.utils.dict_from_cookiejar(response.cookies)\n\n # 第二次请求带上第一请求返回的cookies, 如果没有cookies, vista要求加载js\n response = request_resolver(url=self.product_url, params=None, header=VISTA_HEADER, cookies=cookies)\n\n pattern = re.compile(VISTA_REGEX)\n match = re.search(pattern, response.text)\n\n if match:\n vista_product_id = match.group(1)\n api_key = match.group(2)\n locale = match.group(3)\n merchant_id = match.group(4)\n\n logging.info(\n '{VISTA API PARAMS} -> [vista_product_id]: ' + vista_product_id + ', [api_key]: ' + api_key + ', [locale]: ' + locale + ', [merchant_id]: ' + merchant_id)\n\n return VistaReviewApiParams(vista_product_id, api_key, locale, merchant_id)\n else:\n msg = '[Getting API PARAM] error product url: ' + self.product_url\n logging.error(msg)\n raise RequestParamError(msg)\n\n\nclass VistaReviewApiParams:\n def __init__(self, vista_product_id, api_key, locale, merchant_id):\n \"\"\"\n init\n Args:\n vista_product_id: vistal商品id\n merchant_id: merchant_id\n api_key: apikey\n locale: 区域\n \"\"\"\n\n self.__vista_product_id = vista_product_id\n self.__merchant_id = merchant_id\n self.__api_key = api_key\n self.__locale = locale\n\n def get_params(self):\n return self.__vista_product_id, self.__merchant_id, self.__api_key, self.__locale\n\n\nclass VistaReview:\n \"\"\"获取vista评论\"\"\"\n\n def __init__(self, zazzle_review_api_params):\n \"\"\"\n init\n Args:\n zazzle_review_api_params: vistal商品id\n \"\"\"\n\n vista_product_id, merchant_id, api_key, locale = zazzle_review_api_params.get_params()\n\n self.__vista_product_id = vista_product_id\n self.__merchant_id = merchant_id\n self.__api_key = api_key\n self.__locale = locale\n\n def get_reviews(self, rating, review_counts):\n \"\"\"\n 获取根据review_counts和PAGE_SIZE按照rating进行分页请求\n Args:\n rating: 请求评论星级\n review_counts: 所需评论总数\n Returns:\n 所获的评论\n \"\"\"\n\n results = list()\n\n if review_counts != -1:\n loop_times = review_counts // VISTA_PAGE_SIZE\n remainder = review_counts % VISTA_PAGE_SIZE\n time = 0\n is_done = False\n for _ in range(loop_times):\n reviews = self.__get_reviews(time, VISTA_PAGE_SIZE, rating)\n if len(reviews) < 1:\n is_done = True\n break\n results.extend(reviews)\n time += VISTA_PAGE_SIZE\n if not is_done and remainder > 0:\n results.extend(self.__get_reviews(time, remainder, rating))\n else:\n results.extend(self.__get_reviews(0, VISTA_PAGE_SIZE, rating))\n\n return results\n\n def __get_reviews(self, page_num, page_size, rating):\n \"\"\"\n 真正请求的评论的接口\n Args:\n page_num: 页数\n page_size: 次页的条数\n rating: 需要的评分\n Returns:\n 次页所获得的评论\n \"\"\"\n\n params = {\n 'paging.from': page_num,\n 'paging.size': page_size,\n 'image_only': False,\n 'apikey': self.__api_key,\n 'sort': 'HighestRating',\n }\n\n if rating != -1:\n params['filters'] = 'rating:' + str(rating)\n\n review_api = VISTA_REVIEW_API + self.__merchant_id + '/l/' + self.__locale + '/product/' + self.__vista_product_id + '/reviews'\n\n response = request_resolver(review_api, params=params, header=VISTA_HEADER)\n\n json_content = json.loads(response.text)\n\n if response.ok:\n review_list = list()\n if json_content['results'] and json_content['results'][0]:\n reviews = json_content['results'][0]['reviews']\n\n for review in reviews:\n text = review_resolver(review['details']['comments'], CrawlerType.VISTA)\n rating = rating\n date_add = datetime.datetime.fromtimestamp(review['details']['created_date'] / 1000).strftime(\n \"%Y-%m-%d %H:%M:%S\")\n author = review['details']['nickname'] if review['details']['nickname'] else 'anonymous'\n review_list.append(Review(text=text, rating=rating, date_add=date_add, author=author))\n\n if len(review_list) > 0:\n logging.info(\" rating: \" + str(rating) +\n \" page_num: \" + str(page_num) + \", review_list: \" + str(len(review_list)))\n return review_list\n\n else:\n logging.error(\n '[Getting REVIEWS] error vista_product_id: ' + self.__vista_product_id + ' merchant_id: ' + self.__merchant_id\n + ' api_key: ' + self.__api_key + ' locale: ' + self.__locale + ' page_num: ' + str(\n page_num) + ' page_size:' + str(page_size),\n exc_info=True\n )\n raise requests.exceptions.RequestException(response)\n","sub_path":"reviews/vista_reviews.py","file_name":"vista_reviews.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"373205681","text":"# -*- coding: utf-8 -*-\nimport xlwt\n\n\"\"\"\n需要稍作解释的就是write_merge方法:\n\nwrite_merge(x, x + m, y, w + n, string, sytle)\nx表示行,y表示列,m表示跨行个数,n表示跨列个数,string表示要写入的单元格内容,style表示单元格样式。其中,x,y,w,h,\n都是以0开始计算的。\n这个和xlrd中的读合并单元格的不太一样。\n如下述:sheet1.write_merge(21,21,0,1,u'合计',set_style('Times New Roman',220,True))\n即在22行合并第1,2列,合并后的单元格内容为\"合计\",并设置了style。\n如果需要创建多个sheet,则只要f.add_sheet即可。\n\"\"\"\n\ndef set_style(name, height, bold=False):\n \"\"\"\n 设置单元格样式\n :param name: 表头名\n :param height: 单元格高度\n :param bold: 单元格宽度\n :return: 单元格样式\n \"\"\"\n style = xlwt.XFStyle() # 初始化样式\n\n font = xlwt.Font() # 为样式创建字体\n font.name = name # 'Times New Roman'\n font.bold = bold\n font.colour_index = 4\n font.height = height\n\n # borders = xlwt.Borders()\n # borders.left = 6\n # borders.right = 6\n # borders.top = 6\n # borders.bottom = 6\n\n style.font = font\n # style.borders = borders\n\n return style\n\n\n# 写excel\ndef write_excel():\n f = xlwt.Workbook() # 创建工作簿\n\n \"\"\"创建一个sheet:sheet1\"\"\"\n sheet1 = f.add_sheet(u'sheet1', cell_overwrite_ok=True) # 创建sheet\n row0 = [u'业务', u'状态', u'北京', u'上海', u'广州', u'深圳', u'状态小计', u'合计']\n column0 = [u'机票', u'船票', u'火车票', u'汽车票', u'其它']\n status = [u'预定', u'出票', u'退票', u'业务小计']\n\n # 生成第一行\n for i in range(len(row0)):\n sheet1.write(0,i,row0[i],set_style('Times New Roman',220,True))\n\n # 生成第一列和最后一列(合并4行)\n i, j = 1, 0\n while i < 4*len(column0) and j < len(column0):\n sheet1.write_merge(i,i+3,0,0,column0[j],set_style('Arial',220,True)) # 第一列\n sheet1.write_merge(i,i+3,7,7) # 最后一列\"合计\"\n i += 4\n j += 1\n\n sheet1.write_merge(21,21,0,1,u'合计',set_style('Times New Roman',220,True))\n\n # 生成第二列\n i = 0\n while i < 4*len(column0):\n for j in range(0,len(status)):\n sheet1.write(j+i+1,1,status[j])\n i += 4\n\n f.save('ticket_info.xlsx') # 保存文件\n\nif __name__ == '__main__':\n # generate_workbook()\n # read_excel()\n write_excel()\n\n","sub_path":"Python文件操作/excel操作/write_ticket_info_excel.py","file_name":"write_ticket_info_excel.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"55949509","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.testbrowser import browsing\nfrom opengever.base.behaviors.base import IOpenGeverBase\nfrom opengever.base.model import CONTENT_TITLE_LENGTH\nfrom opengever.base.response import COMMENT_RESPONSE_TYPE\nfrom opengever.base.response import IResponseContainer\nfrom opengever.base.response import Response\nfrom opengever.dossier.behaviors.customproperties import IDossierCustomProperties\nfrom opengever.dossier.behaviors.dossier import IDossier\nfrom opengever.dossier.behaviors.filing import IFilingNumber\nfrom opengever.dossier.behaviors.participation import IParticipationAware\nfrom opengever.dossier.dossiertemplate.behaviors import IDossierTemplate\nfrom opengever.dossier.indexers import ParticipationIndexHelper\nfrom opengever.dossier.interfaces import IDossierArchiver\nfrom opengever.kub.testing import KuBIntegrationTestCase\nfrom opengever.sharing.events import LocalRolesAcquisitionActivated\nfrom opengever.sharing.events import LocalRolesAcquisitionBlocked\nfrom opengever.testing import index_data_for\nfrom opengever.testing import IntegrationTestCase\nfrom opengever.testing import solr_data_for\nfrom opengever.testing import SolrIntegrationTestCase\nfrom plone import api\nfrom zope.event import notify\nfrom zope.interface import Interface\nfrom zope.lifecycleevent import Attributes\nfrom zope.lifecycleevent import ObjectModifiedEvent\nimport json\nimport requests_mock\n\n\nclass TestDossierIndexers(SolrIntegrationTestCase):\n\n def test_sortable_title_indexer_accomodates_padding_for_five_numbers(self):\n self.login(self.regular_user)\n numeric_part = \"1 2 3 4 5\"\n alphabetic_part = u\"\".join([\"a\" for i in range(CONTENT_TITLE_LENGTH\n - len(numeric_part))])\n title = numeric_part + alphabetic_part\n\n self.dossier.setTitle(title)\n self.dossier.reindexObject([\"sortable_title\"])\n\n self.assertEquals(\n '0001 0002 0003 0004 0005' + alphabetic_part,\n index_data_for(self.dossier).get('sortable_title'))\n\n def test_containing_dossier(self):\n self.login(self.regular_user)\n\n self.subdossier.reindexObject()\n self.subdocument.reindexObject()\n self.commit_solr()\n\n self.assertEquals(\n u'Vertr\\xe4ge mit der kantonalen Finanzverwaltung',\n solr_data_for(self.subdossier, 'containing_dossier'),\n )\n\n self.assertEquals(\n u'Vertr\\xe4ge mit der kantonalen Finanzverwaltung',\n solr_data_for(self.document, 'containing_dossier'),\n )\n\n # Check if the subscribers catch editing the title of a dossier\n IOpenGeverBase(self.dossier).title = u\"Testd\\xf6ssier CHANGED\"\n\n self.dossier.reindexObject()\n self.subdossier.reindexObject()\n self.subdocument.reindexObject()\n\n notify(ObjectModifiedEvent(\n self.dossier,\n Attributes(Interface, 'IOpenGeverBase.title'),\n ))\n self.commit_solr()\n\n self.assertEquals(\n u'Testd\\xf6ssier CHANGED',\n solr_data_for(self.subdossier, 'containing_dossier'),\n )\n\n self.assertEquals(\n u'Testd\\xf6ssier CHANGED',\n solr_data_for(self.document, 'containing_dossier'),\n )\n\n def test_containing_subdossier(self):\n self.login(self.regular_user)\n\n self.subdossier.reindexObject()\n self.subdocument.reindexObject()\n self.commit_solr()\n\n self.assertEquals(\n u'',\n solr_data_for(self.subdossier, 'containing_subdossier'),\n )\n\n self.assertEquals(\n u'2016',\n solr_data_for(self.subdocument, 'containing_subdossier'),\n )\n\n # Check if the subscribers catch editing the title of a subdossier\n IOpenGeverBase(self.subdossier).title = u'Subd\\xf6ssier CHANGED'\n\n self.subdossier.reindexObject()\n self.subdocument.reindexObject()\n\n notify(ObjectModifiedEvent(\n self.subdossier,\n Attributes(Interface, 'IOpenGeverBase.title'),\n ))\n self.commit_solr()\n\n self.assertEquals(\n u'',\n solr_data_for(self.subdossier, 'containing_subdossier'),\n )\n\n self.assertEquals(\n u'Subd\\xf6ssier CHANGED',\n solr_data_for(self.subdocument, 'containing_subdossier'),\n )\n\n def test_filing_no_is_not_indexed_for_default_dossiers(self):\n self.login(self.regular_user)\n\n self.dossier.reindexObject()\n\n self.assertEquals(None, index_data_for(self.dossier).get('filing_no'))\n self.assertEquals(None, index_data_for(self.dossier).get('searchable_filing_no'))\n\n def test_keywords_field_is_indexed_in_Subject_index(self):\n catalog = api.portal.get_tool(name=\"portal_catalog\")\n\n self.login(self.regular_user)\n\n self.dossier.reindexObject()\n\n self.assertEquals(\n 2,\n len(catalog(Subject=u'Finanzverwaltung')),\n 'Expected two items with Keyword \"Finanzverwaltung\"',\n )\n\n self.assertEquals(\n 4,\n len(catalog(Subject=u'Vertr\\xe4ge')),\n u'Expected three items with Keyword \"Vertr\\xe4ge\"',\n )\n\n self.subdossier.reindexObject()\n\n self.assertEquals(\n 3,\n len(catalog(Subject=u'Subkeyword')),\n 'Expected three item with Keyword \"Subkeyword\"',\n )\n\n self.assertEquals(\n 1,\n len(catalog(Subject=u'Subkeyw\\xf6rd')),\n u'Expected one item with Keyword \"Subkeyw\\xf6rd\"',\n )\n\n self.assertEquals(\n (\n u'Finanzverwaltung',\n u'Subkeyword',\n u'Subkeyw\\xf6rd',\n u'Subsubkeyword',\n u'Subsubkeyw\\xf6rd',\n u'Vertr\\xe4ge',\n u'Wichtig',\n u'secret',\n u'special',\n ),\n catalog.uniqueValuesFor('Subject'),\n )\n\n def test_dossier_searchable_text_contains_keywords(self):\n self.login(self.regular_user)\n\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n\n self.assertIn(u'Finanzverwaltung', indexed_value)\n self.assertIn(u'Vertr\\xe4ge', indexed_value)\n\n def test_dossier_searchable_text_contains_external_reference(self):\n self.login(self.regular_user)\n\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n\n self.assertIn(u'qpr-900-9001', indexed_value)\n\n def test_dossier_searchable_text_contains_custom_properties_from_default_and_active_slot(self):\n self.login(self.manager)\n\n create(\n Builder(\"property_sheet_schema\")\n .named(\"schema1\")\n .assigned_to_slots(u\"IDossier.dossier_type.businesscase\")\n .with_field(\"text\", u\"f1\", u\"Field 1\", u\"\", False)\n )\n IDossier(self.dossier).dossier_type = u\"businesscase\"\n IDossierCustomProperties(self.dossier).custom_properties = {\n \"IDossier.dossier_type.businesscase\": {\"f1\": \"indexme-businescase\"},\n \"IDossier.dossier_type.meeting\": {\"f1\": \"noindex-meeting\"},\n \"IDossier.default\": {\"additional_title\": \"indexme-default\"},\n }\n self.dossier.reindexObject()\n self.commit_solr()\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n\n self.assertIn(u'indexme-businescase', indexed_value)\n self.assertIn(u'indexme-default', indexed_value)\n self.assertNotIn(u'noindex-meeting', indexed_value)\n\n def test_dossier_searchable_text_with_custom_properties_for_all_field_types(self):\n self.login(self.manager)\n\n choices = [\"rot\", u\"gr\\xfcn\", \"blau\"]\n create(\n Builder(\"property_sheet_schema\")\n .named(\"schema1\")\n .assigned_to_slots(u\"IDossier.dossier_type.businesscase\")\n .with_field(\"bool\", u\"yesorno\", u\"Yes or no\", u\"\", True)\n .with_field(\"choice\", u\"choose\", u\"Choose\", u\"\", True, values=choices)\n .with_field(\"multiple_choice\", u\"choosemulti\",\n u\"Choose Multi\", u\"\", True, values=choices)\n .with_field(\"int\", u\"num\", u\"Number\", u\"\", True)\n .with_field(\"text\", u\"text\", u\"Some lines of text\", u\"\", True)\n .with_field(\"textline\", u\"textline\", u\"A line of text\", u\"\", True)\n )\n IDossier(self.dossier).dossier_type = u\"businesscase\"\n IDossierCustomProperties(self.dossier).custom_properties = {\n \"IDossier.dossier_type.businesscase\": {\n \"yesorno\": False,\n \"choose\": u\"gr\\xfcn\",\n \"choosemulti\": [\"rot\", \"blau\"],\n \"num\": 122333,\n \"text\": u\"K\\xe4fer\\nJ\\xe4ger\",\n \"textline\": u\"Kr\\xe4he\",\n },\n }\n self.dossier.reindexObject()\n self.commit_solr()\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n\n self.assertIn(u\"gr\\xfcn\", indexed_value)\n self.assertIn(u\"122333\", indexed_value)\n self.assertIn(u\"K\\xe4fer\", indexed_value)\n self.assertIn(u\"J\\xe4ger\", indexed_value)\n self.assertIn(u\"Kr\\xe4he\", indexed_value)\n self.assertIn(u\"rot\", indexed_value)\n self.assertIn(u\"blau\", indexed_value)\n\n def test_dossier_searchable_text_contains_comments(self):\n self.login(self.regular_user)\n\n response1 = Response(COMMENT_RESPONSE_TYPE)\n response1.text = u'Telefongespr\\xe4ch mit Herr Meier'\n IResponseContainer(self.dossier).add(response1)\n\n response2 = Response(COMMENT_RESPONSE_TYPE)\n response2.text = u'Abschlussnummer DDD2837'\n IResponseContainer(self.dossier).add(response2)\n\n self.dossier.reindexObject()\n self.commit_solr()\n\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n self.assertIn(u'Meier', indexed_value)\n self.assertIn(u'DDD2837', indexed_value)\n\n @browsing\n def test_searchable_text_gets_updated_when_comment_added(self, browser):\n self.login(self.regular_user, browser=browser)\n\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n self.assertNotIn(u'DDD2837', indexed_value)\n\n url = '{}/@responses'.format(self.dossier.absolute_url())\n browser.open(url, method=\"POST\", headers=self.api_headers,\n data=json.dumps({'text': u'DDD2837'}))\n\n self.commit_solr()\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n self.assertIn(u'DDD2837', indexed_value)\n\n def test_dossiertemplate_searchable_text_contains_keywords(self):\n self.login(self.regular_user)\n\n indexed_value = solr_data_for(self.dossiertemplate, 'SearchableText')\n\n self.assertIn(u'secret', indexed_value)\n self.assertIn(u'special', indexed_value)\n\n def test_external_reference(self):\n self.login(self.regular_user)\n\n self.dossier.reindexObject()\n\n self.assert_index_value(u'qpr-900-9001-\\xf7', 'external_reference', self.dossier)\n\n def test_blocked_local_roles(self):\n self.login(self.regular_user)\n self.dossier.reindexObject()\n\n self.assert_index_value(False, 'blocked_local_roles', self.dossier)\n\n self.dossier.__ac_local_roles_block__ = True\n self.dossier.reindexObject()\n\n self.assert_index_value(True, 'blocked_local_roles', self.dossier)\n\n self.dossier.__ac_local_roles_block__ = False\n notify(LocalRolesAcquisitionActivated(self.dossier, ))\n\n self.assert_index_value(False, 'blocked_local_roles', self.dossier)\n\n self.dossier.__ac_local_roles_block__ = True\n notify(LocalRolesAcquisitionBlocked(self.dossier, ))\n\n self.assert_index_value(True, 'blocked_local_roles', self.dossier)\n\n def test_dossier_type_indexer(self):\n self.login(self.regular_user)\n\n IDossier(self.dossier).dossier_type = None\n self.dossier.reindexObject()\n self.commit_solr()\n\n self.assertEqual(solr_data_for(self.dossier, 'dossier_type'), None)\n\n IDossier(self.dossier).dossier_type = \"businesscase\"\n self.dossier.reindexObject()\n self.commit_solr()\n self.assertEqual(solr_data_for(self.dossier, 'dossier_type'), \"businesscase\")\n\n def test_dossiertemplate_dossier_type_indexer(self):\n self.login(self.regular_user)\n\n IDossierTemplate(self.dossiertemplate).dossier_type = None\n self.dossiertemplate.reindexObject()\n self.commit_solr()\n\n self.assertEqual(solr_data_for(self.dossiertemplate, 'dossier_type'), None)\n\n IDossierTemplate(self.dossiertemplate).dossier_type = \"businesscase\"\n self.dossiertemplate.reindexObject()\n self.commit_solr()\n self.assertEqual(solr_data_for(self.dossiertemplate, 'dossier_type'), \"businesscase\")\n\n\nclass TestDossierFilingNumberIndexer(SolrIntegrationTestCase):\n\n features = ('filing_number', )\n\n filing_prefix = 'directorate'\n filing_no = 'SKA ARCH-Administration-2016-11'\n searchable_filing_no = [\n 'ska',\n 'arch',\n 'administration',\n '2016',\n '11',\n ]\n\n def test_returns_empty_string_for_dossiers_without_filing_information(self):\n self.login(self.regular_user)\n\n self.dossier.reindexObject()\n\n self.assertEquals(\n None,\n index_data_for(self.dossier).get('filing_no'),\n )\n\n self.assert_index_value(u'', 'searchable_filing_no', self.dossier)\n\n def test_returns_first_part_of_the_filing_number_for_dossiers_with_only_filing_prefix_information(self):\n self.login(self.regular_user)\n\n IDossier(self.dossier).filing_prefix = self.filing_prefix\n self.dossier.reindexObject()\n\n self.assert_index_value(u'Hauptmandant-Directorate-?', 'filing_no', self.dossier)\n\n self.assertItemsEqual(\n (\n 'hauptmandant',\n 'directorate',\n ),\n index_data_for(self.dossier).get('searchable_filing_no'),\n )\n\n def test_returns_filing_number_for_dossiers_with_only_filing_prefix_information(self):\n self.login(self.regular_user)\n\n IDossier(self.dossier).filing_prefix = self.filing_prefix\n IFilingNumber(self.dossier).filing_no = self.filing_no\n self.dossier.reindexObject()\n\n self.assert_index_value(self.filing_no, 'filing_no', self.dossier)\n self.assert_index_value(self.searchable_filing_no, 'searchable_filing_no', self.dossier)\n\n def test_filing_no_is_in_searchable_text(self):\n self.login(self.regular_user)\n\n IDossier(self.dossier).filing_prefix = self.filing_prefix\n IFilingNumber(self.dossier).filing_no = self.filing_no\n self.dossier.reindexObject()\n self.commit_solr()\n\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n\n self.assertIn('SKA ARCH-Administration-2016-11', indexed_value)\n\n def test_filing_no_is_in_searchable_text_when_dossier_is_archived(self):\n self.login(self.regular_user)\n\n IDossierArchiver(self.dossier).archive('administration', '2013')\n self.commit_solr()\n\n expected_filing_no = 'Hauptmandant-Administration-2013-1'\n self.assertEqual(IFilingNumber(self.dossier).filing_no,\n expected_filing_no)\n\n indexed_value = solr_data_for(self.dossier, 'SearchableText')\n self.assertIn(expected_filing_no, indexed_value)\n\n\nclass TestDossierParticipationsIndexer(SolrIntegrationTestCase):\n\n def test_plone_participations_are_indexed_in_solr(self):\n self.login(self.regular_user)\n\n handler = IParticipationAware(self.dossier)\n handler.add_participation(\n self.regular_user.id, ['participation', 'final-drawing'])\n\n self.commit_solr()\n\n indexed_value = solr_data_for(self.dossier, 'participations')\n expected = [\n u'kathi.barfuss|participation',\n u'kathi.barfuss|final-drawing',\n u'any-participant|participation',\n u'any-participant|final-drawing',\n u'kathi.barfuss|any-role']\n self.assertItemsEqual(expected, indexed_value)\n\n\n@requests_mock.Mocker()\nclass TestDossierParticipationsIndexerWithKuB(SolrIntegrationTestCase, KuBIntegrationTestCase):\n\n def test_kub_participations_are_indexed_in_solr(self, mocker):\n self.login(self.regular_user)\n\n self.mock_labels(mocker)\n self.mock_get_by_id(mocker, self.person_jean)\n handler = IParticipationAware(self.dossier)\n handler.add_participation(\n self.person_jean, ['participation', 'final-drawing'])\n\n self.commit_solr()\n\n indexed_value = solr_data_for(self.dossier, 'participations')\n expected = [\n u'{}|participation'.format(self.person_jean),\n u'{}|final-drawing'.format(self.person_jean),\n u'any-participant|participation',\n u'any-participant|final-drawing',\n u'{}|any-role'.format(self.person_jean)]\n self.assertItemsEqual(expected, indexed_value)\n\n\nclass TestParticipationIndexHelper(IntegrationTestCase):\n\n def test_participant_id_to_label_handles_invalid_ids(self):\n helper = ParticipationIndexHelper()\n self.assertEqual(\"Unknown ID\", helper.participant_id_to_label(\"invalid-id\"))\n","sub_path":"opengever/dossier/tests/test_indexer.py","file_name":"test_indexer.py","file_ext":"py","file_size_in_byte":17526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"83876551","text":"from plone import api\nfrom plone.app.controlpanel.security import ISecuritySchema\n\n\ndef setup_workspaces(portal):\n mp = api.portal.get_tool(name='portal_membership')\n # set type to custom member type\n mp.setMemberAreaType('wigo.workspaces.workspace')\n # set member folder name\n mp.setMembersFolderById('sqa')\n\n\ndef setup_security(portal):\n \"\"\" Add security controlpanel settings.\n \"\"\"\n site = api.portal.get()\n #site security setup!\n security = ISecuritySchema(site)\n security.set_enable_user_folders(True)\n security.use_uuid_as_userid(True)\n\n\ndef setupVarious(context):\n if context.readDataFile('wigo.statusapp-various.txt') is None:\n return\n portal = api.portal.get()\n setup_workspaces(portal)\n # call update security\n setup_security(portal)\n","sub_path":"src/wigo.statusapp/wigo/statusapp/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"152335143","text":"#!/usr/bin/env python3\n\nimport sys\n\nsys.setrecursionlimit(10**7)\n\n\ndef read_h(typ=int):\n return list(map(typ, input().split()))\n\n\ndef read_v(n, m=1, typ=int):\n return [read_h() if m > 1 else typ(input()) for _ in range(n)]\n\n\ndef main():\n N, = read_h()\n arr = read_h()\n\n cnts = [0] * 100000\n for a in arr:\n cnts[a] += 1\n\n ans = 0\n for i in range(len(cnts) - 2):\n c = cnts[i] + cnts[i + 1] + cnts[i + 2]\n if c > ans:\n ans = c\n\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"abs/arc082a.py","file_name":"arc082a.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"29755671","text":"def scrape():\n#declare dependencies\n from splinter import Browser\n from bs4 import BeautifulSoup as bs\n import time\n import pandas as pd\n import requests\n import pymongo\n\n #declare chrome driver excecutable path\n \n executable_path = {'executable_path': \"C:/chrometest/chromedriver.exe\"}\n browser = Browser('chrome', **executable_path, headless=False)\n mars_dict = {}\n #pass the url \n url = \"https://mars.nasa.gov/news/\"\n browser.visit(url)\n html = browser.html\n soup = bs(html, 'html.parser')\n try:\n step1 = soup.select_one('ul.item_list li.slide')\n #find news title\n news_title = step1.find(\"div\", class_=\"content_title\").text\n #find news paragraph\n news_paragraph= step1.find(\"div\", class_=\"article_teaser_body\").text\n except:\n return None, None\n\n\n # JPL Mars Space Images - Featured Image\n\n #declare chrome driver excecutable path\n # executable_path = {'executable_path': '/Users/hello/Downloads/chromedriver_win32/chromedriver'}\n # # executable_path = {'executable_path': \"C:/chrometest/chromedriver.exe\"}\n # browser = Browser('chrome', **executable_path, headless=False)\n\n# Featured Image URL\n\n# #Scrape first jpl Nasa page\n url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(url)\n\n #this will make the browser scrape page\n html = browser.html\n soup = bs(html, 'html.parser')\n\n# #This makes the browser click button or link\n browser.click_link_by_partial_text('FULL IMAGE')\n \n time.sleep(3)\n #Scrape second NASA page\n browser.click_link_by_partial_text('more info')\n time.sleep(3)\n\n \n html = browser.html\n soup2 = bs(html, 'html.parser')\n\n # findimg= soup2.find('img', class_='fancybox-image')['src']\n image_url = soup2.find('figure', class_='lede')\n findimg = image_url.a[\"href\"]\n \n featured_image_url= \"https://www.jpl.nasa.gov\"+ findimg\n \n# # # Mars Facts\n# import pandas as pd\n url = \"https://space-facts.com/mars/\"\n\n tables = pd.read_html(url)\n MarsPlanetProfile = tables[0]\n \n # Rename columns\n renamed_marsfacts_df = MarsPlanetProfile.rename(columns={0:\"Description\", 1:\"Value\"})\n # MarsPlanetProfile.columns = ['Description', 'Value']\n # Convert DF to html\n MarsPlanet = renamed_marsfacts_df.to_html()\n # Convert DF to HTML string.\n # MarsPlanet = MarsPlanetProfile.to_html(header=True, index=True)\n \n\n\n # # # Mars Hemispheres\n\n # #declare chrome driver excecutable path\n\n # # executable_path = {\"executable_path\": \"C:\\chrometest\\chromedriver.exe\"}\n # # browser = Browser(\"chrome\", **executable_path, headless=False)\n # url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n # browser.visit(url)\n # html = browser.html\n # soup = bs(html,'html.parser')\n # # results = soup.find_all(\"a\", class_= \"itemLink product-item\")\n # partialurl= \"https://astrogeology.usgs.gov\"\n\n # # Locate all hemisphers and add to a list.\n # hemisphere_url = soup.find_all(\"div\", class_=\"item\")\n\n # # Create empty list for each Hemisphere URL.\n # hems_url = []\n\n # for hems in hemisphere_url:\n # #browser.visit(partialurl + x['href'])\n # #time.sleep(10)\n # #browser.links.find_by_partial_text('Sample')\n # hemi_url = hems.find('a')['href']\n # hems_url.append(hemi_url)\n\n # # browser.quit()\n\n # hemisphere_image_urls = []\n\n # for hem in hems_url:\n # hem_isphere_url = partialurl + hem\n # print(hem_isphere_url)\n \n # #Initialize browser\n # browser.visit(hem_isphere_url)\n \n # html = browser.html\n # soup3 = bs(html, \"html.parser\")\n\n # # Locate each title and save for modification\n # each_title = soup3.find(\"h2\", class_=\"title\").text\n \n # # Remove Enhanced.\n # title = each_title.split('Enhanced')[0]\n \n # # Locate all full images in 4 Hemisphere URLs.\n # img_url = soup3.find(\"li\").a['href']\n \n # # Append both title and img_url to 'hemisphere_image_url'.\n # hemisphere_image_urls.append({'title': title, 'img_url': img_url})\n \n # # browser.quit()\n\n \n # # Mars Data Dictionary MongoDB\n # Create empty dictionary for all Mars Data.\n \n# Mars Hemispheres\n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n response = requests.get(url)\n soup = bs(response.text, 'html.parser').find_all(\"a\",class_ = \"itemLink product-item\")\n hemi_titles = []\n for i in soup:\n title = i.find(\"h3\").text\n # link= i[\"href\"]\n hemi_titles.append(title)\n \n\n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url)\n\n hemisphere_image_urls = []\n for x in range(len(hemi_titles)):\n try:\n browser.click_link_by_partial_text(hemi_titles[x])\n except:\n browser.find_link_by_text('2').first.click()\n browser.click_link_by_partial_text(hemi_titles[x])\n html = browser.html\n soup2 = bs(html, 'html.parser')\n hemi_soup = soup2.find('div', 'downloads')\n hemi_url = hemi_soup.a['href']\n hemisphere_image_urls.append(hemi_url)\n browser.back()\n\n # Append news_title and news_paragraph to mars_data.\n mars_dict['news_title'] = news_title\n mars_dict['news_paragraph'] = news_paragraph\n # Append featured_image_url to mars_data.\n mars_dict['featured_image_url'] = featured_image_url\n # # Append mars_facts to mars_data.\n mars_dict['MarsPlanet'] = MarsPlanet\n # Append hemisphere_image_urls to mars_data.\n mars_dict['hemisphere_image_urls'] = hemisphere_image_urls\n\n return mars_dict\n\n\n\n","sub_path":"scrapemars.py","file_name":"scrapemars.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"242061685","text":"#Print the largest (maximum) hourglass sum found in am array.\r\n\r\n#!/bin/python3\r\nimport sys\r\n\r\n\r\na = []\r\nfor arr_i in range(6):\r\n arr_t = [int(arr_temp) for arr_temp in input().strip().split(' ')]\r\n a.append(arr_t)\r\nmaxsum=-63\r\nfor x in range(4):\r\n for y in range(4):\r\n tmpsum=a[y][x]+a[y][x+1]+a[y][x+2]+a[y+1][x+1]+a[y+2][x]+a[y+2][x+1]+a[y+2][x+2]\r\n maxsum=max(tmpsum,maxsum)\r\nprint(maxsum)\r\n","sub_path":"Datastructures/Arrays/2d-array.py","file_name":"2d-array.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"466509707","text":"import pytest\nimport tensorflow as tf\n\nfrom autokeras.blocks import basic\nfrom tests.autokeras.blocks import utils\n\n\ndef test_type_error_for_call():\n block = basic.ConvBlock()\n with pytest.raises(TypeError) as info:\n block(block)\n assert 'Expect the inputs to layer' in str(info.value)\n\n\ndef test_resnet_block():\n utils.block_basic_exam(\n basic.ResNetBlock(),\n tf.keras.Input(shape=(32, 32, 3), dtype=tf.float32),\n ['version', 'pooling'],\n )\n\n\ndef test_resnet_invalid_kwargs():\n with pytest.raises(ValueError) as info:\n basic.ResNetBlock(include_top=True)\n assert 'Argument \"include_top\" is not' in str(info.value)\n\n\ndef test_resnet_invalid_kwargs2():\n with pytest.raises(ValueError) as info:\n basic.ResNetBlock(input_shape=(10,))\n assert 'Argument \"input_shape\" is not' in str(info.value)\n\n\ndef test_xception_block():\n utils.block_basic_exam(\n basic.XceptionBlock(),\n tf.keras.Input(shape=(32, 32, 3), dtype=tf.float32),\n [\n 'activation',\n 'initial_strides',\n 'num_residual_blocks',\n 'pooling',\n ])\n\n\ndef test_xception_invalid_kwargs():\n with pytest.raises(ValueError) as info:\n basic.XceptionBlock(include_top=True)\n assert 'Argument \"include_top\" is not' in str(info.value)\n\n\ndef test_xception_invalid_kwargs2():\n with pytest.raises(ValueError) as info:\n basic.XceptionBlock(input_shape=(10,))\n assert 'Argument \"input_shape\" is not' in str(info.value)\n\n\ndef test_conv_block():\n utils.block_basic_exam(\n basic.ConvBlock(),\n tf.keras.Input(shape=(32, 32, 3), dtype=tf.float32),\n [\n 'kernel_size',\n 'num_blocks',\n 'separable',\n ])\n\n\ndef test_rnn_block():\n utils.block_basic_exam(\n basic.RNNBlock(),\n tf.keras.Input(shape=(32, 10), dtype=tf.float32),\n [\n 'bidirectional',\n 'layer_type',\n 'num_layers',\n ])\n\n\ndef test_dense_block():\n utils.block_basic_exam(\n basic.DenseBlock(),\n tf.keras.Input(shape=(32,), dtype=tf.float32),\n [\n 'num_layers',\n 'use_batchnorm',\n ])\n\n\ndef test_embedding_block():\n utils.block_basic_exam(\n basic.Embedding(),\n tf.keras.Input(shape=(32,), dtype=tf.float32),\n [\n 'pretraining',\n 'embedding_dim',\n ])\n","sub_path":"tests/autokeras/blocks/basic_test.py","file_name":"basic_test.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"154594750","text":"#!/usr/bin/python3\n\"\"\" Test file storage \"\"\"\nimport pep8\nimport os\nimport models\nimport unittest\nfrom datetime import datetime\nfrom models.base_model import BaseModel\nfrom models.engine.file_storage import FileStorage\nfrom models.user import User\nfrom models.state import State\nfrom models.place import Place\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass TestFileStorage(unittest.TestCase):\n \"\"\" Test cases for file storage \"\"\"\n\n @classmethod\n def Class1(cls):\n \"\"\" Instanciando la clase \"\"\"\n cls.user = User()\n cls.user.first_name = \"Santiago\"\n cls.user.last_name = \"Mic\"\n cls.user.email = \"3890@gmail.com\"\n cls.storage = FileStorage()\n\n @classmethod\n def delete(cls):\n \"\"\" Delete the instance \"\"\"\n del cls.user\n\n def Deletejson(self):\n \"\"\" Delete the json file created \"\"\"\n try:\n os.remove(\"file.json\")\n except Exception:\n pass\n\n def test_pep8_FS(self):\n \"\"\" Pep8 style \"\"\"\n style = pep8.StyleGuide(quiet=True)\n p = style.check_files(['models/engine/file_storage.py'])\n self.assertEqual(p.total_errors, 0, \"fix pep8\")\n\n def test_allFS(self):\n \"\"\" Test the method all \"\"\"\n storage = FileStorage()\n objects = storage.all()\n self.assertIsNotNone(objects)\n self.assertEqual(type(objects), dict)\n self.assertIs(objects, storage._FileStorage__objects)\n\n def test_newFS(self):\n \"\"\" Test the creation \"\"\"\n storage = FileStorage()\n objects = storage.all()\n user = User()\n user.id = 5678902\n user.name = \"Santiago\"\n storage.new(user)\n key = user.__class__.__name__ + \".\" + str(user.id)\n self.assertIsNotNone(objects[key])\n\n def test_FS_reload(self):\n \"\"\" Tests the reload \"\"\"\n with open(\"file.json\", 'r') as f:\n lines = f.readlines()\n try:\n os.remove(path)\n except Exception:\n pass\n with open(\"file.json\", 'r') as f:\n lines2 = f.readlines()\n self.assertEqual(lines, lines2)\n try:\n os.remove(\"file.json\")\n except Exception:\n pass\n with open(\"file.json\", \"w\") as f:\n f.write(\"{}\")\n with open(\"file.json\", \"r\") as r:\n for line in r:\n self.assertEqual(line, \"{}\")\n","sub_path":"tests/test_models/test_engine/test_file_storage.py","file_name":"test_file_storage.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"83898837","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.decorators import user_passes_test, login_required\nfrom django.http import JsonResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import FormView\nfrom django_filters.views import FilterView\n\nfrom smsreport.report.forms import SendSmsForm, LostFilter\nfrom smsreport.report.mixins import CurrentDayFilterMixin, RememberFilterSetMixin\nfrom smsreport.sms.forms import SmsFilter\nfrom smsreport.sms.models import Sms\nfrom smsreport.sms.utils import put_sms_to_resend_queue\nfrom smsreport.utils import get_last_filter_data\nfrom constance import config\n\n\n@method_decorator(login_required(login_url='login_view'), name='dispatch')\nclass SendSmsView(FormView):\n form_class = SendSmsForm\n template_name = 'sms/lost.html'\n\n def form_valid(self, form):\n sms_sends = []\n sms_id_list = self.request.POST.getlist('sms_id')\n for sms_id in sms_id_list:\n try:\n sms = Sms.objects.get(pk=sms_id)\n except Sms.DoesNotExist:\n pass\n else:\n is_put = put_sms_to_resend_queue(self.request, sms)\n if is_put:\n sms_sends.append(sms.pk)\n sms.delete()\n\n return JsonResponse({\n 'message': u'Выбранные сообщения отправлены',\n 'sms_sends': sms_sends\n })\n\n def form_invalid(self, form):\n return JsonResponse({\n 'message': u'Укажите хотя бы один объект для отправки'\n })\n\n\n@method_decorator(decorator=csrf_exempt, name='dispatch')\nclass MassResendAction(CurrentDayFilterMixin, FilterView):\n filterset_class = LostFilter\n\n def post(self, request, *args, **kwargs):\n filterset_class = self.get_filterset_class()\n self.filterset = self.get_filterset(filterset_class)\n self.object_list = self.filterset.qs\n\n resend_count = 0\n\n for sms in self.object_list:\n is_put = put_sms_to_resend_queue(self.request, sms)\n if is_put:\n sms.delete()\n resend_count += 1\n\n return JsonResponse({\n 'message': u'Отправлено в очередь: %s' % (resend_count, ),\n 'filter': self.filterset.data\n })\n\n def get_filterset(self, filterset_class):\n kwargs = self.get_filterset_kwargs(filterset_class)\n kwargs['data'] = kwargs['data'].copy()\n\n last_filter_data = get_last_filter_data(self.request)\n kwargs['data'].update(last_filter_data)\n\n return filterset_class(**kwargs)\n\n\n@method_decorator(user_passes_test(lambda u: u.is_staff, login_url='login_view'), name='dispatch')\nclass SmsView(CurrentDayFilterMixin, RememberFilterSetMixin, FilterView):\n filterset_class = SmsFilter\n template_name = 'sms/sms.html'\n\n def get_paginate_by(self, queryset):\n return config.LOST_PAGINATION\n","sub_path":"src/smsreport/sms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"610332784","text":"import i_dunno\nimport ipaddress\nimport pytest\nfrom random import randint\n\ndef test_decode():\n \"\"\"Decode a form with known antecedent\n\n Note the last two bytes are UTF for U+00f0 whose binary\n representation has exactly 8 significant bits.\n >>> bin(ord(b'\\xc3\\xb0'.decode(\"utf-8\")))\n '0b11110000'\n \"\"\"\n form = b'g&\\x10\\xc3\\xb0'\n v4addr = '206.152.128.240'\n assert i_dunno.decode(form) == ipaddress.ip_address(v4addr)\n\ndef randaddr(af):\n \"\"\"Return a random IP address\"\"\"\n if af == \"ip\":\n val = randint(1, 2**32 - 1)\n else:\n val = randint(2**32, 2**128 - 1)\n return ipaddress.ip_address(val)\n\n@pytest.mark.parametrize(\"af, level, nbaddrs\", [\n (\"ip\", \"minimum\", 30),\n (\"ipv6\", \"minimum\", 30),\n (\"ip\", \"satisfactory\", 30),\n (\"ipv6\", \"satisfactory\", 30),\n (\"ip\", \"delightful\", 30),\n (\"ipv6\", \"delightful\", 30),\n])\ndef test_random(af, level, nbaddrs):\n \"\"\"Check decode(encode(addr)) is idempotent for each confusion level\"\"\"\n for i in range(nbaddrs):\n run_random_one(af, level)\n\ndef run_random_one(af, level):\n \"\"\"Generate a random address; check decode(encode(addr)) is idempotent\"\"\"\n formfound = False\n while not formfound:\n addr = randaddr(af)\n try:\n form = i_dunno.encode(addr, level=level)\n formfound = True\n except ValueError:\n continue\n assert i_dunno.decode(form) == addr\n","sub_path":"tests/test_decode.py","file_name":"test_decode.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"229350195","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport config\n\ndef save_fig_with_predictions_for_direction(predicted_img_ids, predicted_dict, filename):\n\n imgs_per_direction = 10\n n_directions = 3\n fig, ax = plt.subplots(nrows=n_directions,ncols=imgs_per_direction)\n padding = 0.1\n fig_w = imgs_per_direction * 2.0 + (imgs_per_direction+1)*padding\n fig_h = n_directions * 1.0 + (n_directions+1)*padding\n fig.set_size_inches(fig_w,fig_h)\n\n for di,direct in enumerate(['left','straight','right']):\n if len(predicted_img_ids[direct]) == 0:\n continue\n perm_indices = np.random.permutation(predicted_img_ids[direct])\n for index in range(min(imgs_per_direction,len(predicted_img_ids[direct]))):\n\n norm_img = predicted_dict[perm_indices[index]][0] - np.min(predicted_dict[perm_indices[index]][0])\n norm_img = (norm_img/np.max(norm_img))\n ax[di,index].imshow(norm_img)\n ax[di,index].axis('off')\n ax[di,index].set_title(direct)\n\n fig.savefig(filename)\n plt.close(fig)","sub_path":"src/scripts/models/cerberus_cnn/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"428763521","text":"from os import path\nimport sys\n\n\n# [START manipulate file path]\ndef add_path(path_to_file: str, path_to_concat: str) -> None:\n \"Append a path relative to the current file\"\n __PATH__ = path.abspath(\n concat_path(\n path_to_file,\n path_to_concat\n )\n )\n if __PATH__ not in sys.path:\n sys.path.insert(0, __PATH__)\n\n\ndef remove_path(basename_to_remove: str) -> None:\n \"\"\"\n Remove any paths with this basename\n Useful for handling pytest within a service containing many functions\n Otherwise, the helper modules pollute the namespace during batch testing\n Alternative is to require strict absolute imports throughout\n \"\"\"\n sys.path = [p for p in sys.path if path.basename(p) != basename_to_remove]\n# [END manipulate file path]\n\n\ndef concat_path(path_to_file: str, path_to_concat: str) -> str:\n \"Use with __file__ to create a safe file path regardless of call source\"\n return path.join(\n path.dirname(path_to_file),\n path_to_concat\n )\n","sub_path":"ops_helpers/change_path.py","file_name":"change_path.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"336115448","text":"#!/usr/bin/env python\n# encoding: utf-8\n# original version from https://gist.github.com/brantfaircloth/893580\n\"\"\"\ngb_to_bed.py file.gb file.bed genome_name\n\ngrab the gene records from a genbank file (edit for other record types).\n\n- requires: biopython\n\n\"\"\"\n \nfrom Bio import SeqIO\nfrom pprint import pprint\n\nimport sys\nimport pdb\n \ndef main():\n outf = open(sys.argv[2], 'w')\n genome = sys.argv[3]\n for record in SeqIO.parse(open(sys.argv[1], \"rU\"), \"genbank\") :\n for feature in record.features:\n if feature.type == 'gene':\n for part in feature.location.parts:\n start = part.start.position\n stop = part.end.position\n try:\n name = feature.qualifiers['gene'][0]\n except:\n # some features only have a locus tag\n name = feature.qualifiers['locus_tag'][0]\n if feature.strand < 0:\n tmp = start\n start = stop\n stop = tmp\n bed_line = \"{0}_gi\\t{1}\\t{2}\\t{3}\\n\".format(genome, start, stop, name)\n sys.stdout.write(\"{{\\\"karyo\\\":\\\"{0}_gi\\\",\\\"start\\\":{1},\\\"end\\\":{2},\\\"name\\\":\\\"{3}\\\"}},\".format(genome, start, stop, name));\n outf.write(bed_line)\n outf.close()\n \nif __name__ == '__main__':\n main()\n","sub_path":"t/data/dataset001/gb_to_bed_json.py","file_name":"gb_to_bed_json.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"403210826","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n# Author: Fabien Marteau \n\"\"\" rfipcfile\n\"\"\"\n\nimport sys\nimport os\n\n\ndef which(program):\n import os\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\ndef usages():\n \"\"\" print usage \"\"\"\n print(\"usages:\")\n print(\"rfipcfile.py filename.mp3\")\n\n\nif __name__ == \"__main__\":\n try:\n filename = sys.argv[1]\n except IndexError:\n usages()\n exit()\n if which('mp3info') is None:\n print(\"mp3info is needed\")\n exit()\n\n author = os.system(\"mp3info -p %a \" + filename)\n print(unicode(author))\n\n print(filename)\n","sub_path":"rfipcfile.py","file_name":"rfipcfile.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"467126723","text":"import argparse\r\nimport logging\r\nimport os\r\nimport time\r\nfrom data import DataExtractor,download\r\nfrom model import DNNNet\r\n\r\n\r\nlogging.basicConfig(level=logging.DEBUG,format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')\r\n\r\ndef train(model,loadedData,args):\r\n logger = logging.getLogger()\r\n logger.info(\"Training for %s wine data.\"%args.type)\r\n for k in range(args.epoches):\r\n loss_sum = 0\r\n length = loadedData.getlen(0)\r\n for j in range(length):\r\n input,target = loadedData.getitem(j, 0)\r\n loss = model.train(input,target,args.learning_rate,args.lambd)\r\n loss_sum = loss_sum + loss\r\n loss = loss_sum/length\r\n logger.info(\"Training loss value:%f\"%(loss))\r\n # validate the result\r\n input,target = loadedData.getvalidate()\r\n output = model.forward(input)\r\n loss = model.loss(output,target)\r\n logger.info(\"Validating loss value:%f\" % (loss))\r\n logger.info(\"Trained for %s wine data completed.\" % args.type)\r\n modeldir = os.path.join(args.logdir,\"%s.pt\"%model.name)\r\n model.save_dict_parameters(modeldir)\r\n logger.info(\"Model saved in path:%s\"%modeldir)\r\ndef test(model,DataSet):\r\n logger = logging.getLogger()\r\n input,target = DataSet.gettest()\r\n output = model.forward(input)\r\n loss = model.loss(output,target)\r\n logger.info(\"Testing loss value:%f\" % (loss))\r\ndef main():\r\n # parse cmdline argument\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--datadir\", default=\"data\", help=\"Defination of the data file path.\", type=str)\r\n parser.add_argument(\"--learning_rate\", default=0.3, help=\"Defination of the network training learning rate.\",\r\n type=float)\r\n parser.add_argument(\"--logdir\",default=\"log\",help=\"Defination of logs file path.\",type=str)\r\n parser.add_argument(\"--actfunc\",default=\"sigmoid\",help=\"Defination of activate function.\",type=str)\r\n parser.add_argument(\"--layers\",default=\"50,30,10\",help=\"Defination of model of hidden layers.\",type=str)\r\n parser.add_argument(\"--percentage\",default=\"0.7,0.15,0.15\",help=\"Defination of data separation .\",type=str)\r\n parser.add_argument(\"--epoches\",default=50,help=\"Defination of loop numbers.\",type=int)\r\n parser.add_argument(\"--lambd\",default=0.0,help=\"Regularation of the loss function.\",type=float)\r\n parser.add_argument(\"--batchnormalize\",default=False,help=\"Whether the network has batchnormalization.\",type=bool)\r\n parser.add_argument(\"--batch\",default=1,help=\"Batchfiy the data.\",type=int)\r\n parser.add_argument(\"--type\",default=\"Red\",help=\"The training data type.\",type=str)\r\n args = parser.parse_args()\r\n\r\n # logging defination\r\n logger = logging.getLogger()\r\n logger.setLevel(logging.INFO)\r\n rq = time.strftime(\"%Y%m%d%H%M\",time.localtime(time.time()))\r\n log_dir = os.path.join(os.getcwd(),args.logdir,)\r\n if not os.path.exists(log_dir):\r\n os.mkdir(log_dir)\r\n log_file = os.path.join(log_dir,rq+\".log\")\r\n fh = logging.FileHandler(log_file,mode=\"w\")\r\n fh.setLevel(logging.DEBUG)\r\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\r\n fh.setFormatter(formatter)\r\n logger.addHandler(fh)\r\n # load data and preparing data\r\n download(os.path.join(os.getcwd(),args.datadir))\r\n percentage = list(map(float,args.percentage.split(\",\")))\r\n if sum(percentage)!=1.0:\r\n raise ValueError(\"Error for value percentage: %f,%f,%f\"%(percentage[0],percentage[1],percentage[2]))\r\n if args.type == \"Red\":\r\n DataSet = DataExtractor(os.path.join(args.datadir,\"winequality-red.csv\"),percentage,args.batch)\r\n elif args.type == \"White\":\r\n DataSet = DataExtractor(os.path.join(args.datadir,\"winequality-white.csv\"),percentage,args.batch)\r\n else:\r\n raise ModuleNotFoundError(\"Unknown model for %s\"%str(args.type))\r\n # model preparing\r\n dim_list = list(map(int,args.layers.split(\",\")))\r\n dim_list = [11] + dim_list\r\n dim_list.append(1)\r\n # args.batchnormalize, args.learning_rate, args.lambd, name = rq\r\n model = DNNNet(dim_list,args.actfunc,name = rq)\r\n # training data\r\n train(model,DataSet,args)\r\n # test data\r\n test(model,DataSet)\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"DNN_project/theano/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"435662265","text":"import numpy as np\nimport cv2 as cv\nfrom PIL import Image\nimport params as params\nimport os\nimport glob\nimport numpy as np\n\nSHOW_IMAGES = False\n\ndef rotate(img, angle): \n\tnum_rows, num_cols = img.shape[:2]\n\trotation_matrix = cv.getRotationMatrix2D((num_cols/2, num_rows/2), angle, 1)\n\timg_rotation = cv.warpAffine(img, rotation_matrix, (num_cols, num_rows)) \n \n\treturn img_rotation\n\ndef get_output_directory_name(): \n return os.path.join('output-images', params.folder_name, str(params.scale)) + 'x'\n \ndef create_folders(): \n directory_name = get_output_directory_name()\n if not os.path.exists(directory_name):\n os.makedirs(directory_name)\n print('directory created: {} '.format(directory_name)) \n else:\n print('directory {} exists '.format(directory_name))\n \ndef read_all_images_from_directory():\n '''\n This function reads the images from the directory specified in params.py.\n The output is a numpy ndarray of size (num_images, height, width, channels).\n '''\n if not os.path.exists(params.folder_base_name):\n print('Error!! Folder base name does not exit')\n if not os.path.exists(os.path.join(params.folder_base_name, params.folder_name)):\n print('Error!! Folder name does not exit') \n \n images_path = os.path.join(params.folder_base_name, params.folder_name,'*' + params.image_ext) \n files = glob.glob(images_path)\n num_images = len(files)\n print('There are {} images in {}'.format(num_images, images_path))\n # read the first image to get the size of the images\n image = cv.imread(files[0], cv.IMREAD_GRAYSCALE)\n print('The size of the first image is {}'.format(image.shape))\n images = np.zeros((num_images, image.shape[0], image.shape[1], 1))\n images[0, :, :, 0] = image\n for index in range(1, num_images): \n image = cv.imread(files[index], cv.IMREAD_GRAYSCALE)\n images[index, :, :, 0] = image \n if(SHOW_IMAGES): \n cv.imshow('image', image)\n cv.waitKey(0)\n \n return images\n \ndef resize_3d_image_standard(images, new_depth, new_heigth, new_width, interpolation_method = cv.INTER_LINEAR): \n\n resized_3d_images = np.zeros((new_depth, new_heigth, new_width, images.shape[3]))\n num_images = images.shape[0]\n resized_images = np.zeros((num_images, new_heigth, new_width))\n \n \n for index in range(num_images):\n image = images[index, :, :, :]\n resized_images[index, :, :] = cv.resize(image, (new_width, new_heigth), interpolation = interpolation_method)\n if(SHOW_IMAGES):\n cv.imshow('image', resized_images[index, :, :] / 255)\n cv.waitKey(0)\n \n for y in range(new_heigth):\n for x in range(new_width):\n depth_row = resized_images[:, y, x]\n resized_depth_row = cv.resize(depth_row, (1, new_depth), interpolation = interpolation_method)\n resized_3d_images[:, y, x, 0] = resized_depth_row.ravel()\n \n if(SHOW_IMAGES): \n for index in range(new_depth):\n image = resized_3d_images[index, :, :, :] \n cv.imshow('image', image / 255)\n cv.waitKey(0)\n \n return resized_3d_images\n \ndef write_3d_images(images, prefix):\n '''\n This function writes the images in the directory specified in params.py with the prefix specified as a param.\n The input is a numpy ndarray of size (num_images, height, width, channels) and a string.\n '''\n num_images = images.shape[0]\n directory_name = os.path.join(get_output_directory_name(), prefix)\n if not os.path.exists(directory_name):\n os.makedirs(directory_name)\n print('directory created: {} '.format(directory_name)) \n for index in range(num_images):\n image = images[index, :, :, :] \n cv.imwrite(os.path.join(directory_name, str(index) + '.' + params.image_ext), image)\n \n\ndef flip_images(images):\n num_images = images.shape[0] \n flipped_images = np.zeros(images.shape)\n for index in range(num_images):\n image = images[index, :, :, 0] \n flipped_images[index, :, :, 0] = cv.flip(image, 1)\n \n return flipped_images\n \ndef rotate_images(images, angle):\n num_images = images.shape[0] \n rotated_images = np.zeros(images.shape)\n for index in range(num_images):\n image = images[index, :, :, 0] \n rotated_images[index, :, :, 0] = rotate(image, angle)\n \n return rotated_images\n ","sub_path":"SISR/tensorflow/cnn-3d/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"132103252","text":"import sys\n\nn = sys.argv[1]\n\n\ndef is_prime(n):\n if not n.isdigit():\n return \"INVALID INPUT\"\n if int(n) < 2:\n return \"NOT PRIME\"\n for m in range(2, int(n)//2 + 1):\n if ((int(n)/m) == (int(n)//m)):\n return \"NOT PRIME\"\n return \"PRIME\"\n\n\nprint(is_prime(n))\n","sub_path":"src/16_is_prime.py","file_name":"16_is_prime.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"316783202","text":"import requests\nimport json\n\n\ndef Retorna(nome):\n \"\"\"\n Faz a requisição do filme/serie;\n retorna em Json\n baixamos a Imagem do filme, porque kivy não mostra imagem da internet\n (Se voce sabe como fazer, me mostre por favor!)\n A imagem é carregada na pasta Imagens, renomeada com title do filme\n \"\"\"\n try:\n # Entre no Site http://www.omdbapi.com e pega sua chave de API\n \n key = 'c88e13f0'\n request = requests.get(f\"http://www.omdbapi.com/?apikey={key}&t={nome}\")\n formt = json.loads(request.text)\n\n img = requests.get(formt['Poster'])\n t = formt['Title']\n\n with open(f'Imagens/{t}.jpg', 'wb') as arquivo:\n arquivo.write(img.content)\n arquivo.close()\n\n return formt\n \n except:return False\n","sub_path":"APIFilmes/metodos/retornafilme.py","file_name":"retornafilme.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"197345394","text":"import csv\n\n\ndef write_entity(fh, entity, extra_fields=None):\n if extra_fields is None:\n extra_fields = dict()\n fieldnames = [prop.label for prop in entity.schema.sorted_properties]\n fieldnames = ['id'] + list(extra_fields.keys()) + fieldnames\n writer = csv.DictWriter(fh, fieldnames=fieldnames)\n prop_dict = {\n 'id': entity.id,\n **extra_fields\n }\n for prop in entity.schema.sorted_properties:\n prop_dict[prop.label] = prop.type.join(entity.get(prop))\n writer.writerow(prop_dict)\n\n\ndef write_headers(fh, schema, extra_headers=None):\n if extra_headers is None:\n extra_headers = []\n fieldnames = [prop.label for prop in schema.sorted_properties]\n fieldnames = ['id'] + extra_headers + fieldnames\n writer = csv.DictWriter(fh, fieldnames=fieldnames)\n writer.writeheader()\n","sub_path":"followthemoney/export/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"273991786","text":"import os\nimport sys\nsys.path.insert(0,'./model/')\n\nimport streamlit as st\nfrom streamlit import caching\nfrom models import BackgroundColor, Document, Strategies, SimulatorOutput, ResourceAvailability\nfrom typing import List\nimport utils\nimport plotly.express as px\nfrom datetime import datetime\nimport math\nimport yaml\nimport numpy as np\nimport loader\nfrom model import simulator\nfrom pandas import Timestamp\n\nFIXED = datetime.now().minute\n\ndef add_all(x, all_string='Todos'):\n return [all_string] + list(x)\n \ndef filter_options(_df, var, col, all_string='Todos'):\n if var == 'Todos':\n return _df\n else:\n return _df.query(f'{col} == \"{var}\"')\n \ndef choose_place(city, region, state):\n if city == 'Todos' and region == 'Todos' and state == 'Todos':\n return 'Brasil'\n if city == 'Todos' and region == 'Todos':\n return state + ' (Estado)' if state != 'Todos' else 'Brasil'\n if city == 'Todos':\n return region + ' (Região SUS)' if region != 'Todos' else 'Todas as regiões SUS'\n return city\n\ndef refresh_rate(config):\n dt = (math.floor(datetime.now().minute/config['refresh_rate'])*config['refresh_rate'])\n return datetime.now().replace(minute=dt,second=0, microsecond=0)\n\ndef calculate_recovered(user_input, selected_region, notification_rate):\n\n confirmed_adjusted = int(selected_region[['confirmed_cases']].sum()/notification_rate)\n\n if confirmed_adjusted == 0: # dont have any cases yet\n user_input['population_params']['R'] = 0\n return user_input\n\n user_input['population_params']['R'] = confirmed_adjusted - user_input['population_params']['I'] - user_input['population_params']['D']\n\n if user_input['population_params']['R'] < 0:\n user_input['population_params']['R'] = confirmed_adjusted - user_input['population_params']['D']\n \n return user_input\n\ndef main():\n utils.localCSS(\"style.css\")\n utils.localCSS(\"icons.css\")\n\n\n # HEADER\n utils.genHeroSection()\n utils.genVideoTutorial()\n\n\n # GET DATA\n config = yaml.load(open('configs/config.yaml', 'r'), Loader = yaml.FullLoader)\n # if abs(datetime.now().minute - FIXED) > config['refresh_rate']:\n # caching.clear_cache()\n cities = loader.read_data('br', config, refresh_rate=refresh_rate(config))\n\n\n # REGION/CITY USER INPUT\n user_input = dict()\n \n utils.genStateInputSectionHeader()\n\n user_input['state'] = st.selectbox('Estado', add_all(cities['state_name'].unique()))\n cities_filtered = filter_options(cities, user_input['state'], 'state_name')\n \n utils.genMunicipalityInputSection()\n\n user_input['region'] = st.selectbox('Região SUS', add_all(cities_filtered['health_system_region'].unique()))\n cities_filtered = filter_options(cities_filtered, user_input['region'], 'health_system_region')\n\n user_input['city'] = st.selectbox('Município', add_all(cities_filtered['city_name'].unique()))\n cities_filtered = filter_options(cities_filtered, user_input['city'], 'city_name')\n\n sources = cities_filtered[[c for c in cities_filtered.columns if (('author' in c) or ('last_updated_' in c))]]\n \n selected_region = cities_filtered.sum(numeric_only=True)\n\n\n # GET LAST UPDATE DATE\n if not np.all(cities_filtered['last_updated'].isna()):\n last_update_cases = cities_filtered['last_updated'].max().strftime('%d/%m')\n\n\n # GET NOTIFICATION RATE\n if len(cities_filtered) > 1: # pega taxa do estado quando +1 municipio selecionado\n notification_rate = round(cities_filtered['state_notification_rate'].mean(), 4)\n\n else:\n notification_rate = round(cities_filtered['notification_rate'].values[0], 4)\n\n # pick locality according to hierarchy\n locality = choose_place(user_input['city'], user_input['region'], user_input['state'])\n\n st.write('
', unsafe_allow_html=True)\n\n utils.genInputCustomizationSectionHeader(locality)\n\n\n # SOURCES USER INPUT\n source_beds = sources[['author_number_beds', 'last_updated_number_beds']].drop_duplicates()\n authors_beds = source_beds.author_number_beds.str.cat(sep=', ')\n\n source_ventilators = sources[['author_number_ventilators', 'last_updated_number_ventilators']].drop_duplicates()\n authors_ventilators = source_ventilators.author_number_ventilators.str.cat(sep=', ')\n\n if locality == 'Brasil':\n authors_beds = 'SUS e Embaixadores'\n authors_ventilators = 'SUS e Embaixadores'\n\n user_input['n_beds'] = st.number_input(\n f\"Número de leitos destinados aos pacientes com Covid-19 (fonte: {authors_beds}, atualizado: {source_beds.last_updated_number_beds.max().strftime('%d/%m')})\"\n , 0, None, int(selected_region['number_beds']))\n\n user_input['n_ventilators'] = st.number_input(\n f\"Número de ventiladores destinados aos pacientes com Covid-19 (fonte: {authors_ventilators}, atualizado: {source_ventilators.last_updated_number_ventilators.max().strftime('%d/%m')}):\"\n , 0, None, int(selected_region['number_ventilators']))\n\n\n # POP USER INPUTS\n user_input['population_params'] = {'N': int(selected_region['population'])}\n user_input['population_params']['D'] = st.number_input('Mortes confirmadas:', 0, None, int(selected_region['deaths']))\n \n # get infected cases\n infectious_period = config['br']['seir_parameters']['severe_duration'] + config['br']['seir_parameters']['critical_duration']\n \n if selected_region['confirmed_cases'] == 0:\n st.write(f'''
\n Seu município ou regional de saúde ainda não possui casos reportados oficialmente. Portanto, simulamos como se o primeiro caso ocorresse hoje.\n

Caso queria, você pode mudar esse número abaixo:\n
''', unsafe_allow_html=True)\n\n user_input['population_params']['I'] = st.number_input('Casos ativos estimados:', 0, None, 1)\n\n else:\n user_input['population_params']['I'] = int(selected_region['infectious_period_cases'] / notification_rate)\n\n st.write(f'''
\n O número de casos confirmados oficialmente no seu município ou regional de saúde é de {int(selected_region['confirmed_cases'].sum())} em {last_update_cases}. \n Dada a progressão clínica da doença (em média, {infectious_period} dias) e a taxa de notificação ajustada para a região ({int(100*notification_rate)}%), \n estimamos que o número de casos ativos é de {user_input['population_params']['I']}.\n

Caso queria, você pode mudar esse número para a simulação abaixo:\n
''', unsafe_allow_html=True)\n\n user_input['population_params']['I'] = st.number_input('Casos ativos estimados:', 0, None, user_input['population_params']['I'])\n \n # calculate recovered cases\n user_input = calculate_recovered(user_input, selected_region, notification_rate)\n \n # AMBASSADOR SECTION\n utils.genAmbassadorSection()\n st.write('
', unsafe_allow_html=True)\n\n # DEFAULT WORST SCENARIO \n user_input['strategy'] = {'isolation': 90, 'lockdown': 90}\n user_input['population_params']['I'] = [user_input['population_params']['I'] if user_input['population_params']['I'] != 0 else 1][0]\n _, dday_beds, dday_ventilators = simulator.run_evolution(user_input, config)\n \n worst_case = SimulatorOutput(color=BackgroundColor.GREY_GRADIENT,\n min_range_beds=dday_beds['worst'], \n max_range_beds=dday_beds['best'], \n min_range_ventilators=dday_ventilators['worst'],\n max_range_ventilators=dday_ventilators['best'])\n \n # DEFAULT BEST SCENARIO\n user_input['strategy'] = {'isolation': 0, 'lockdown': 90}\n _, dday_beds, dday_ventilators = simulator.run_evolution(user_input, config)\n \n best_case = SimulatorOutput(color=BackgroundColor.LIGHT_BLUE_GRADIENT,\n min_range_beds=dday_beds['worst'], \n max_range_beds=dday_beds['best'], \n min_range_ventilators=dday_ventilators['worst'],\n max_range_ventilators=dday_ventilators['best'])\n\n resources = ResourceAvailability(locality=locality, \n cases=selected_region['active_cases'],\n deaths=selected_region['deaths'], \n beds=user_input['n_beds'], \n ventilators=user_input['n_ventilators'])\n \n utils.genSimulationSection(int(user_input['population_params']['I']), locality, resources, worst_case, best_case)\n \n utils.genActNowSection(locality, worst_case)\n utils.genStrategiesSection(Strategies)\n\n\n st.write('''\n
\n Etapa 4: Simule o resultado de possíveis intervenções\n
\n Agora é a hora de planejar como você pode melhor se preparar para evitar a sobrecarga hospitalar. Veja como mudanças na estratégia adotada afetam a necessidade de internação em leitos.\n
''', unsafe_allow_html=True)\n\n user_input['strategy']['isolation'] = st.slider('Em quantos dias você quer acionar a Estratégia 2, medidas restritivas? (deixe como 0 se a medida já estiver em vigor)', 0, 90, 0, key='strategy2')\n\n user_input['strategy']['lockdown'] = st.slider('Em quantos dias você quer acionar a Estratégia 3, quarentena?', 0, 90, 90, key='strategy3')\n \n st.write('

', unsafe_allow_html=True)\n \n # SIMULATOR SCENARIOS: BEDS & RESPIRATORS\n fig, dday_beds, dday_ventilators = simulator.run_evolution(user_input, config) \n\n utils.genChartSimulationSection(user_input['strategy']['isolation'], \n user_input['strategy']['lockdown'], \n SimulatorOutput(color=BackgroundColor.SIMULATOR_CARD_BG,\n min_range_beds=dday_beds['worst'], \n max_range_beds=dday_beds['best'], \n min_range_ventilators=dday_ventilators['worst'],\n max_range_ventilators=dday_ventilators['best']), \n fig)\n\n utils.genWhatsappButton()\n utils.genFooter()\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"src/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":11101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"4629843","text":"\nfrom __future__ import unicode_literals\nimport frappe\nimport frappe.utils\nfrom frappe.utils.oauth import get_oauth2_authorize_url, get_oauth_keys, login_via_oauth2, login_oauth_user as _login_oauth_user, redirect_post_login\nimport json\nfrom frappe import _\nfrom frappe.auth import LoginManager\nfrom frappe.integrations.doctype.ldap_settings.ldap_settings import get_ldap_settings\n\nno_cache = True\n\n\ndef get_context(context):\n if frappe.session.user != \"Guest\" and frappe.session.data.user_type==\"System User\":\n frappe.local.flags.redirect_location = \"/desk\"\n raise frappe.Redirect\n\n # get settings from site config\n\n context.no_header = True\n context.for_test = 'login.html'\n context[\"title\"] = \"Login\"\n context[\"disable_signup\"] = frappe.utils.cint(frappe.db.get_value(\"Website Settings\", \"Website Settings\", \"disable_signup\"))\n\n for provider in (\"google\", \"github\", \"facebook\", \"frappe\"):\n if get_oauth_keys(provider):\n context[\"{provider}_login\".format(provider=provider)] = get_oauth2_authorize_url(provider)\n context[\"social_login\"] = True\n\n ldap_settings = get_ldap_settings()\n context[\"ldap_settings\"] = ldap_settings\n\n login_name_placeholder = [_(\"Email address\")]\n\n if frappe.utils.cint(frappe.get_system_settings(\"allow_login_using_mobile_number\")):\n login_name_placeholder.append(_(\"Mobile number\"))\n\n if frappe.utils.cint(frappe.get_system_settings(\"allow_login_using_user_name\")):\n login_name_placeholder.append(_(\"Username\"))\n\n context['login_name_placeholder'] = ' {0} '.format(_('or')).join(login_name_placeholder)\n\n return context\n\n\n@frappe.whitelist(allow_guest=True)\ndef login_via_token(login_token):\n sid = frappe.cache().get_value(\"login_token:{0}\".format(login_token), expires=True)\n if not sid:\n frappe.respond_as_web_page(_(\"Invalid Request\"), _(\"Invalid Login Token\"), http_status_code=417)\n return\n\n frappe.local.form_dict.sid = sid\n frappe.local.login_manager = LoginManager()\n\n redirect_post_login(desk_user = frappe.db.get_value(\"User\", frappe.session.user, \"user_type\")==\"System User\")\n\n","sub_path":"officeplus/www/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"75715144","text":"\n#%matplotlib inline\nimport numpy as np\nimport random\nimport math\nimport matplotlib.pyplot as plt\nimport timeit\nfrom functions import *\n\n#The class for Randomized Response:\nclass Randomized_Response:\n def __init__(self, absz, pri_para): # absz: alphabet size, pri_para: privacy parameter\n self.absz = absz #alphabet size k\n self.exp = math.exp(pri_para) #privacy parameter\n self.flip_prob = (self.absz - 1)/(math.exp(pri_para) + self.absz - 1) #flipping probability to maintain local privacy\n \n def encode_string(self, samples):\n n = len(samples)\n # Start by setting private_samples = samples.\n private_samples_rr = np.copy(samples)\n # Determine which samples need to be noised (\"flipped\").\n flip = np.random.random_sample(n) < self.flip_prob\n flip_samples = samples[flip]\n # Select new samples uniformly at random to replace the original ones.\n rand_samples = np.random.randint(0, self.absz - 1, len(flip_samples))\n # Shift the samples if needed to avoid sampling the orginal samples.\n rand_samples[rand_samples >= flip_samples] += 1\n # Replace the original samples by the randomly selected ones.\n private_samples_rr[flip] = rand_samples\n return private_samples_rr\n \n def decode_string(self, out_samples, normalization = 0):\n #normalization options: 0: clip and normalize(default)\n # 1: simplex projection\n # else: no nomalization\n n = len(out_samples)\n (counts_rr,temp) = np.histogram(out_samples, range(self.absz+1))\n # Estimate the PMF using the count vector.\n p_rr = (counts_rr / float(n)) * ((self.exp + self.absz - 1) /(self.exp - 1)) - 1.0 / (self.exp - 1)\n #p_rr = decode_counts(counts_rr, epsilon, n, self.absz)\n # Check if truncation and renormalization is required.\n\n if normalization == 0: \n p_rr = probability_normalize(p_rr) #clip and normalize\n if normalization == 1:\n p_rr = project_probability_simplex(p_rr) #simplex projection\n return p_rr\n \nclass RAPPOR:\n def __init__(self, absz, pri_para): # absz: alphabet size, pri_para: privacy parameter\n self.absz = absz #alphabet size k\n self.exp = math.exp(pri_para / 2.0) #privacy parameter\n self.flip_prob = 1/(math.exp(pri_para/2.0) + 1) #flipping probability to maintain local privacy\n\n def encode_string(self, samples):\n n = len(samples)\n users = range(n)\n # One-hot encode the input integers.\n private_samples_rappor = np.zeros((n, self.absz))\n private_samples_rappor[users, samples] = 1\n # Flip the RAPPOR encoded bits with probability self.flip_prob\n flip = np.random.random_sample((n, self.absz))\n return np.logical_xor(private_samples_rappor, flip < self.flip_prob)\n\n\n def encode_string_light(self, samples):\n #return to count vector of rappor responce, which is less memory intensive\n #also return the cumulated time for adding rappor vectors, which should also be considered as decoding time.\n n = len(samples)\n users = range(n)\n time = 0\n counts = np.zeros(self.absz)\n # One-hot encode the input integers.\n for i in range(n):\n private_samples_rappor = np.zeros(self.absz)\n private_samples_rappor[samples[i]] = 1\n # Flip the RAPPOR encoded bits with probability self.flip_prob\n flip = np.random.random_sample(self.absz)\n private_samples_rappor = np.logical_xor(private_samples_rappor, flip < self.flip_prob) \n start_time = timeit.default_timer() #record adding time\n counts = counts + private_samples_rappor # add rappor responce vector\n time = time + timeit.default_timer() - start_time \n return counts,time\n\n def encode_string_compress(self, samples):\n #encode rappor responces into locations of one, which saves communcation budget when eps is large\n n = len(samples)\n out = [0]*n\n # One-hot encode the input integers.\n for i in range(n):\n private_samples_rappor = np.zeros(self.absz)\n private_samples_rappor[samples[i]] = 1\n # Flip the RAPPOR encoded bits with probability self.flip_prob\n flip = np.random.random_sample(self.absz)\n private_samples_rappor = np.logical_xor(private_samples_rappor, flip < self.flip_prob) \n out[i] = np.where(private_samples_rappor)[0] # get the locations of ones\n out_list = np.concatenate(out)\n return out_list\n \n def decode_counts(self, counts, n, normalization = 0):\n\n #normalization options: 0: clip and normalize(default)\n # 1: simplex projection\n # else: no nomalization\n # Estimate the PMF using the count vector\n \n p_rappor = (counts / float(n)) * ((self.exp + 1) /(self.exp - 1)) - 1.0 / (self.exp - 1)\n \n if normalization == 0: \n p_rappor = probability_normalize(p_rappor) #clip and normalize\n if normalization == 1:\n p_rappor = project_probability_simplex(p_rappor) #simplex projection\n\n return p_rappor\n","sub_path":"RR_RAPPOR.py","file_name":"RR_RAPPOR.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"202087612","text":"from subprocess import call\nimport os\n\nfolder_name = './new_result'\n\nif __name__ == '__main__':\n if not os.path.exists(folder_name[2:]):\n os.mkdir(folder_name[2:])\n # Checking whether it is already collected\n old_lst = []\n for item in os.listdir(folder_name):\n if item.endswith('.json'):\n old_lst.append(item.replace('.json', ''))\n print(old_lst)\n\n #################### Main code ####################\n hashtag_lst_file = 'hashtag.txt'\n h_f = open(hashtag_lst_file, 'r', encoding='utf-8')\n hashtag_lst = []\n for tag in h_f:\n tag = tag.rstrip()\n hashtag_lst.append(tag)\n h_f.close()\n # hashtag_lst = ['iphone','apple', 'ios', 'samsunggalaxy', 'android', 'samsung']\n\n # Duplicate Deletion\n hashtag_lst = list(set(hashtag_lst))\n print(hashtag_lst)\n\n # Number of Queries\n q_num = 10000\n\n # Iteration\n for hashtag in hashtag_lst:\n if hashtag in old_lst:\n print('[' + hashtag + ']', 'was already crawled!!!')\n continue\n print('[' + hashtag + ']', 'crawling....')\n call(['python3', 'crawler.py', 'hashtag', '-t', hashtag,\n '-n', str(q_num), '-o', folder_name+'/'+hashtag+'.json'])\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"360112377","text":"import pygame as pg\nfrom settings import WIDTH, HEIGHT\nimport random\n\nclock = pg.time.Clock()\n\n#Создаём класс игрока и наследуемся явно и неявно от класса спрайт в пигейме\nclass Player(pg.sprite.Sprite):\n max_speed = 10\n shouting_cooldown = 150\n\n def __init__(self, clock, plasmoids):\n super(Player, self).__init__()\n\n self.clock = clock\n self.plasmoids = plasmoids\n\n self.image = pg.image.load('game_data/player.png')\n self.rect = self.image.get_rect() #устанавливаем размеры объекта равные размеру картинки\n self.rect.centerx = WIDTH / 2\n self.rect.bottom = HEIGHT - 10\n self.current_speed = 0\n self.current_shouting_cooldown = 0\n self.plasmoid_sound = pg.mixer.Sound('game_data/sounds/plasma_bolt.wav')\n\n def update(self):\n #команда захвата нажатых клавиш\n key = pg.key.get_pressed()\n\n if key[pg.K_LEFT]:\n self.current_speed = -self.max_speed\n elif key[pg.K_RIGHT]:\n self.current_speed = self.max_speed\n else:\n self.current_speed = 0\n #Задаём смещение относительно текущей точки\n self.rect.move_ip((self.current_speed, 0))\n\n self.shouting()\n\n def shouting(self):\n #Захват нажатия клавиш\n key = pg.key.get_pressed()\n\n if key[pg.K_SPACE] and self.current_shouting_cooldown <= 0:\n self.plasmoid_sound.play()\n self.plasmoids.add(Plasmoid(self.rect.midtop))\n self.current_shouting_cooldown = self.shouting_cooldown\n else:\n self.current_shouting_cooldown -= self.clock.get_time()\n for plasmoid in list(self.plasmoids):\n if plasmoid.rect.bottom < 0:\n self.plasmoids.remove(plasmoid)\n\nclass Background(pg.sprite.Sprite):\n speed = 4\n\n def __init__(self, clock):\n super(Background, self).__init__()\n\n self.clock = clock\n\n self.image = pg.image.load('game_data/background.png')\n self.rect = self.image.get_rect()\n self.rect.bottom = HEIGHT\n\n def update(self):\n # self.rect.bottom += 2\n #\n # if self.rect.bottom >= self.rect.height:\n # self.rect.bottom = HEIGHT\n self.rect.move_ip((0, self.speed))\n\n if self.rect.bottom >= self.rect.height:\n self.rect.bottom = HEIGHT\n\n\nclass Plasmoid(pg.sprite.Sprite):\n speed = -7\n\n def __init__(self, position):\n super(Plasmoid, self).__init__()\n\n self.image = pg.image.load('game_data/plasmoid.png')\n self.rect = self.image.get_rect()\n self.rect.midbottom = position\n\n def update(self):\n self.rect.move_ip((0, self.speed))\n\nclass Meteor(pg.sprite.Sprite):\n cooldown = 1200\n curent_cooldown = 0\n speed = 7\n\n def __init__(self):\n super(Meteor, self).__init__()\n\n image_name = 'game_data/meteor{}.png'.format(random.randint(1, 6))\n self.image = pg.image.load(image_name)\n self.rect = self.image.get_rect()\n self.rect.midbottom = (random.randint(0, WIDTH), 0)\n\n def update(self):\n self.rect.move_ip((0, self.speed))\n\n @staticmethod\n def meteor_process(clock, meteors):\n if Meteor.curent_cooldown <= 0:\n meteors.add(Meteor())\n Meteor.curent_cooldown = Meteor.cooldown\n else:\n Meteor.curent_cooldown -= clock.get_time()\n\n for meteor in list(meteors):\n if (meteor.rect.right < 0 or\n meteor.rect.left > WIDTH or\n meteor.rect.top > (HEIGHT)):\n meteors.remove(meteor)\n","sub_path":"game_objects.py","file_name":"game_objects.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"224847977","text":"# Class Dictionary Validator\n\nsample_board = {'1a': 'brook', '1b': 'bknight', '1c': 'bbishop', '1d': 'bqueen', '1e': 'bking', '1f': 'bbishop', '1g': 'bknight', '1h': 'brook', '2a': 'bpawn', '2b': 'bpawn', '2c': 'bpawn', '2d': 'bpawn', '2e': 'bpawn', '2f': 'bpawn', '2g': 'bpawn', '2h': 'bpawn', '8a': 'wrook', '8b': 'wknight', '8c': 'wbishop', '8d': 'wqueen', '8e': 'wking', '8f': 'wbishop', '8g': 'wknight', '8h': 'wrook', '7a': 'wpawn', '7b': 'wpawn', '7c': 'wpawn', '7d': 'wpawn', '7e': 'wpawn', '7f': 'wpawn', '7g': 'wpawn', '7h': 'wpawn'}\n\ndef isValidChessBoard(board):\n value_list = list(sample_board.values())\n key_list = list(sample_board.keys())\n value_count = {}\n #key_count = {}\n for value in value_list:\n value_count.setdefault(value, 0)\n value_count[value] += 1\n #for key in key_list:\n #key_count.setdefault(key, 0)\n #key_count[key] += 1\n if value_count['bking'] != 1:\n return('False.')\n return('Incorrect count of black king pieces.')\n if value_count['wking'] != 1:\n return('False.')\n return('Incorrect count of wlack king pieces.')\n \n# Create value_count as global variable for testing\nvalue_count = {}\nfor value in value_list:\n value_count.setdefault(value, 0)\n value_count[value] += 1\n\n# Create key_count as global variable for testing\nkey_list = list(sample_board.keys())\nkey_count = {}\nfor key in key_list:\n key_count.setdefault(key, 0)\n key_count[key] += 1\n\n# Check if 'pawn' in values\nfor value in value_list:\n 'pawn' in value\n \n# Check first character of values\nfor value in value_list:\n print(value[0])\n \n#1 count of 'bking' and 'wking' == 1\n#2 count of dict values (pieces) <= 16\n#3 count of dict values containing 'pawn' <= 8\n#4 first character of dict keys must range from 1 to 8\n#5 second character of dict keys must range from 'a' to 'h'\n#6 dict values begin with 'w' or 'b'\n#7 dict values must contain ['pawn', 'knight', 'bishop', 'rook', 'queen', 'king']\n# Return True or False\n# Give error message when 'bug has resulted in an improper chess board'\n\n# use for k in board.items() to parse keys and check first and second characters (may need to convert to strings)\n# use for v in board.items() to parse keys and check first and remaining characters characters (may need to convert to strings)\n# evaluate in final if statement?\n# how to account for error message?\n","sub_path":"ch5_practice.py","file_name":"ch5_practice.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"126844607","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('article', '0014_auto_20160831_1504'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='blog',\n name='status',\n field=models.CharField(default=b'DRAFT', max_length=7, verbose_name=b'Status', choices=[(b'DRAFT', b'Draft'), (b'PUBLIC', b'Public'), (b'PRIVATE', b'Private')]),\n ),\n ]\n","sub_path":"article/migrations/0015_blog_status.py","file_name":"0015_blog_status.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"515442231","text":"s1=input().split()\ns1=[int(i) for i in s1]\ns2=input().split()\ns2=[int(i) for i in s2]\nn=int(input())\nsum1=sum(s1)\nsum2=sum(s2)\n#print(sum1,\" \",sum2)\na=sum1//5\nb=sum2//10\nc=a+b\nif sum1%5 > 0:\n\tc+=1\nif sum2%10 > 0:\n\tc+=1\n#print(c)\nif n>=c:\n\tprint(\"YES\")\nelse:\n\tprint(\"NO\")\n","sub_path":"CODEFORCES/Rewards 800.py","file_name":"Rewards 800.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"357100749","text":"from django.conf import settings\nfrom django.utils import translation\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import serializers\nimport urllib\nimport json\nfrom tourismiran.utils.tokens import generate_verification_code\nfrom django.contrib.auth.models import User\nfrom tourismiran.models.users import UserVerification, UserProfile\nfrom tourismiran.models.geo import Country\nfrom tourismiran.models.tourism import TouristPlace\nfrom tourismiran.models.stories import Story\nfrom visitiranapi.serializers.geo import CountrySerializer\nfrom django.core.mail import EmailMessage, EmailMultiAlternatives\nfrom django.contrib.auth.hashers import check_password\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth import login, authenticate\nfrom django.utils import timezone\nfrom rest_framework_jwt.serializers import JSONWebTokenSerializer as jwt_serializer\nfrom rest_framework_jwt.settings import api_settings\nfrom rest_framework import exceptions\n\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\nclass FavPlacesSerializer(serializers.ModelSerializer):\n class Meta:\n model = TouristPlace\n fields=('id', 'name', 'cover_image_path')\n\nclass MyStorySerializer(serializers.ModelSerializer):\n cover_image = serializers.ImageField(read_only=True)\n class Meta:\n model = Story\n fields=('id', 'title', 'cover_image', 'views_num', 'publish', 'date_created', 'local_date_created', 'date_published', 'local_date_published')\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n avatar_image = serializers.ImageField(source='profile.image', required=False)\n gender = serializers.ChoiceField(choices=UserProfile.GENDERS, source='profile.gender', required=False)\n get_gender_display = serializers.CharField(source='profile.get_gender_display', read_only=True)\n persona = serializers.ChoiceField(choices=UserProfile.PERSONAS, source='profile.persona', required=False)\n get_persona_display = serializers.CharField(source='profile.get_persona_display', read_only=True)\n birth_date = serializers.DateField(source='profile.birth_date', required=False)\n local_birth_date = serializers.DateField(source='profile.local_birth_date', read_only=True)\n country = CountrySerializer(source='profile.country', read_only=True)\n my_stories = MyStorySerializer(many=True, source='stories_authored', read_only=True)\n fav_places = FavPlacesSerializer(many=True, source='profile.fav_places', read_only=True)\n fav_stories = MyStorySerializer(many=True, source='profile.fav_stories', read_only=True)\n country_id = serializers.PrimaryKeyRelatedField(queryset=Country.objects.all(), source='profile.country', write_only=True, required=False)\n class Meta:\n model = User\n \"\"\"fields = ('url', 'id', 'username', 'first_name', 'last_name', 'avatar_image', 'gender', 'get_gender_display', 'persona', 'get_persona_display',\n 'birth_date')\"\"\"\n exclude = ('email', 'password', 'is_superuser', 'groups', 'user_permissions')\n read_only_fields = ('username', 'is_staff', 'is_active', 'last_login', 'date_joined')\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop('fields', None)\n\n # Instantiate the superclass normally\n super(UserSerializer, self).__init__(*args, **kwargs)\n\n if fields is not None:\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields)\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n def validate(self, data):\n profile_data = data.get('profile', None)\n if profile_data:\n image_data = profile_data.get('image',None)\n if image_data and image_data._size > 3*1024*1024:\n raise exceptions.NotAcceptable(_('Image file maximum size exceeded.'))\n return super().validate(data)\n\n def update(self, instance, validated_data):\n profile_data = validated_data.get('profile', None)\n first_name_data = validated_data.get('first_name', None)\n last_name_data = validated_data.get('last_name', None)\n if first_name_data:\n instance.first_name = first_name_data\n if last_name_data:\n instance.last_name = last_name_data\n instance.save() \n if profile_data:\n #image_data = profile_data.get('image',None)\n #if image_data:\n #profile, created = UserProfile.objects.update_or_create(user__id=instance.id, defaults={'image':image_data})\n profile, created = UserProfile.objects.update_or_create(user__id=instance.id, defaults=profile_data)\n #instance.profile = profile\n #instance.save()\n return instance\n\nclass RegisterSerializer(serializers.Serializer):\n email = serializers.EmailField()\n\n def save(self):\n email = self.validated_data['email'].lower()\n user, created = User.objects.get_or_create(username__iexact=email, defaults={'username': email, 'email': email})\n code, hashed_code, expiry = generate_verification_code()\n verification, created = UserVerification.objects.update_or_create(user=user, defaults={'code': hashed_code, 'code_expire_time': expiry,\n 'code_verified': None})\n subject = _('Your Visit IRAN Account Activation Code')\n html_content = render_to_string('registration/verification_code_email_body.html', {\n 'user': user, \n 'verification_code': code\n })\n to_email = email\n msg = EmailMultiAlternatives(subject, html_content, to=[to_email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n \"\"\"def validate(self, data):\n ''' Begin reCAPTCHA validation '''\n recaptcha_response = self.context.get('request').POST.get('g-recaptcha-response')\n url = 'https://www.google.com/recaptcha/api/siteverify'\n values = {\n 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n 'response': recaptcha_response\n }\n recaptcha_data = urllib.parse.urlencode(values).encode()\n req = urllib.request.Request(url, data=recaptcha_data)\n response = urllib.request.urlopen(req)\n result = json.loads(response.read().decode())\n ''' End reCAPTCHA validation '''\n \n if not result['success']:\n msg = _('Captcha Validation Failed.')\n raise serializers.ValidationError(msg)\n else:\n return data\"\"\" \n\nclass JSONWebTokenSerializer(jwt_serializer):\n def validate(self, attrs):\n credentials = {\n self.username_field: attrs.get(self.username_field).lower(),\n 'password': attrs.get('password')\n }\n\n if all(credentials.values()):\n user = authenticate(**credentials)\n\n if user:\n if not user.is_active:\n msg = _('User account is disabled.')\n raise serializers.ValidationError(msg)\n\n payload = jwt_payload_handler(user)\n login(self.context.get('request'), user)\n return {\n 'token': jwt_encode_handler(payload),\n 'user': user\n }\n else:\n msg = _('Unable to log in with provided credentials.')\n raise serializers.ValidationError(msg)\n else:\n msg = _('Must include \"{username_field}\" and \"password\".')\n msg = msg.format(username_field=self.username_field)\n raise serializers.ValidationError(msg)\n\n\n \n","sub_path":"visitiranapi/serializers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"168626256","text":"import copy\nimport random\n\n\ndef crossover_linear(p1, p2, alpha):\n c1 = copy.deepcopy(p1)\n c2 = copy.deepcopy(p2)\n\n for i in range(len(p1)):\n c1[i] = round(p1[i] + alpha * (p2[i] - p1[i]), 2)\n c2[i] = round(p2[i] - alpha * (p2[i] - p1[i]), 2)\n\n return [c1, c2]\n\n\nrandom.seed(3)\n\np1 = [round(random.uniform(0, 10), 2) for _ in range(6)]\np2 = [round(random.uniform(0, 10), 2) for _ in range(6)]\n\noffspring = crossover_linear(p1, p2, 0.3)\n\nprint(f'Parent 1: {p1}')\nprint(f'Parent 2: {p2}')\nprint(f'Child 1: {offspring[0]}')\nprint(f'Child 2: {offspring[1]}')\n","sub_path":"ch4/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"90923018","text":"\"\"\"\r\nThis contains the main editor window:\r\n\r\nEditorFrame - The main editor window.\r\n\"\"\"\r\n#---logging---------------------------------------------------------------------\r\nimport logging\r\nlog = logging.getLogger(__name__)\r\n#log.setLevel(logging.DEBUG)\r\n\r\n#---imports---------------------------------------------------------------------\r\nimport wx #for gui elements\r\nimport wx.aui as aui\r\n\r\nfrom ptk_lib import VERSION\r\nfrom ptk_lib.controls import aui_addons\r\nfrom ptk_lib.controls import toolpanel\r\n\r\nfrom ptk_lib.resources import common22\r\nfrom ptk_lib.misc import open_help\r\nfrom ptk_lib.core_tools.fileio import FileDrop\r\nfrom ptk_lib.core_tools.taskicon import PTKInfoDialog\r\n\r\nfrom ptk_lib.core_tools.console import console_icons\r\n\r\n\r\nimport editor_icons\r\nfrom editor_notebook import EditorNotebook\r\nfrom search_panel import SearchPanel\r\nfrom dbg_controls import DebugEditorTools, BreakPointListPanel\r\n\r\n\r\n#ids for run menu items\r\nID_RUNMENU_SEL = wx.NewId()\r\nID_RUNMENU_CUR = wx.NewId()\r\nID_RUNMENU_NEW = wx.NewId()\r\nID_RUNMENU_EXT = wx.NewId()\r\n\r\n#---Editor frame ---------------------------------------------------------------\r\nclass EditorFrame(aui_addons.AUIFrame):\r\n \"\"\"Top level Editor window\"\"\"\r\n def __init__(self, tool):\r\n \"\"\"Create editor window\"\"\"\r\n aui_addons.AUIFrame.__init__(self, None, -1, \"PTK Editor\",size=(800,600),pos=(-1,-1))\r\n\r\n #store a reference to the editor tool in the frame.\r\n self.tool = tool\r\n\r\n #set the window icons\r\n ib = wx.IconBundle()\r\n ib.AddIcon(editor_icons.editor16.GetIcon())\r\n ib.AddIcon(editor_icons.editor32.GetIcon())\r\n ib.AddIcon(editor_icons.editor48.GetIcon())\r\n self.SetIcons(ib)\r\n\r\n #create statusbar\r\n self.CreateStatusBar()\r\n self.SetStatusText(\"Python toolkit v\"+VERSION)\r\n\r\n #create the menu\r\n self._CreateMenu()\r\n self.SetToolbarsMenu(self.menubar.toolbars_menu)\r\n self.SetPanesMenu(self.menubar.view_menu)\r\n\r\n #create aui panes\r\n self._CreateNotebook()\r\n self._CreateSearchPane() \r\n self._CreateDebuggerPane()\r\n\r\n #create the main tool bar\r\n self._CreateTools()\r\n\r\n #list of recent files:\r\n self.filehistory = wx.FileHistory(9)\r\n cfg = wx.GetApp().GetConfig()\r\n cfg.SetPath(\"Editor//\")\r\n self.filehistory.Load(cfg)\r\n self.filehistory.UseMenu(self.menubar.recent_menu)\r\n self.filehistory.AddFilesToMenu()\r\n self.Bind(wx.EVT_MENU_RANGE, self.OnFileHistory, id=wx.ID_FILE1, id2=wx.ID_FILE9)\r\n\r\n #create a droptarget\r\n self.dt = FileDrop(self.notebook.OpenFile)\r\n self.SetDropTarget(self.dt) \r\n\r\n #get window close events\r\n self.Bind( wx.EVT_CLOSE, self.OnClose)\r\n\r\n #load the frame settings adding layout checkitems to the view menu\r\n self.SetSavePath('Editor')\r\n self.SetLayoutsMenu(self.menubar.layouts_menu)\r\n\r\n #update aui manager\r\n self.auimgr.Update()\r\n\r\n log.info('Done Initialising Editor Frame')\r\n\r\n def _CreateMenu(self):\r\n self.menubar = EditorMenu(self)\r\n #finally create the menubar\r\n self.SetMenuBar(self.menubar)\r\n \r\n def _CreateTools(self):\r\n self.tools = EditorTools(self)\r\n #add this toolbar to aui manager\r\n pane = (aui.AuiPaneInfo().Name('Editor toolbar')\r\n .Caption('Editor toolbar').ToolbarPane().CloseButton(True)\r\n .CaptionVisible(False)\r\n .DestroyOnClose(False).Top().Row(0).LeftDockable(False)\r\n .RightDockable(False))\r\n\r\n #add to the window using aui manager\r\n self.AddToolbar( self.tools, 'Editor toolbar', pane,\r\n helpstring = 'Show/Hide the editor toolbar' )\r\n\r\n self.formattools = FormatTools(self)\r\n #add this toolbar to aui manager\r\n pane = ( aui.AuiPaneInfo().Name('Format toolbar')\r\n .Caption('Format toolbar').ToolbarPane().CloseButton(True)\r\n .CaptionVisible(False)\r\n .DestroyOnClose(False).Top().Row(0).Position(1)\r\n .LeftDockable(False)\r\n .RightDockable(False) )\r\n\r\n #add to the window using aui manager\r\n self.AddToolbar( self.formattools, 'Format toolbar', pane, \r\n helpstring = 'Show/Hide the Format toolbar' )\r\n\r\n self.dbgtools = DebugEditorTools(self, self.tool)\r\n #add this toolbar to aui manager\r\n pane = ( aui.AuiPaneInfo().Name('Debugger toolbar')\r\n .Caption('Debugger toolbar').ToolbarPane().CloseButton(True)\r\n .CaptionVisible(False)\r\n .DestroyOnClose(False).Top().Row(1).Position(0)\r\n .LeftDockable(False)\r\n .RightDockable(False) )\r\n\r\n #add to the window using aui manager\r\n self.AddToolbar( self.dbgtools, 'Debugger toolbar', pane,\r\n helpstring = 'Show/Hide the debugger toolbar' )\r\n\r\n def _CreateNotebook(self):\r\n self.notebook = EditorNotebook(self)\r\n #setup how to display this in the aui\r\n pane = aui.AuiPaneInfo()\r\n name='Notebook'\r\n pane.Name(name) #id name\r\n pane.CentrePane()\r\n #add the pane\r\n self.auimgr.AddPane(self.notebook,pane)\r\n\r\n def _CreateSearchPane(self):\r\n ctrl = SearchPanel(self)\r\n pane = aui.AuiPaneInfo()\r\n name='Find and Replace'\r\n pane.Name(name) #id name\r\n pane.Caption('Find and Replace')\r\n pane.CloseButton(True) #close button\r\n pane.DestroyOnClose(False)\r\n pane.Floatable(True)\r\n pane.Resizable(True)\r\n pane.MinSize( (-1,65))\r\n pane.MaxSize( (-1,65))\r\n pane.Bottom()\r\n pane.Hide()\r\n\r\n #add the pane and menu item (see aui frame class) and store a pane \r\n #reference\r\n self.search= self.AddPane( ctrl, pane, None)\r\n\r\n\r\n def _CreateDebuggerPane(self):\r\n ctrl = BreakPointListPanel(self, self.tool)\r\n pane = aui.AuiPaneInfo()\r\n name='Debugger Breakpoints'\r\n pane.Name(name) #id name\r\n pane.Caption('Debugger Breakpoints')\r\n pane.CloseButton(True) #close button\r\n pane.MaximizeButton(True)\r\n pane.DestroyOnClose(False)\r\n pane.Floatable(True)\r\n pane.Resizable(True)\r\n pane.MinSize( (100,200))\r\n pane.MaxSize( (-1,-1))\r\n pane.Left()\r\n pane.Dock()\r\n pane.Hide()\r\n\r\n #add the pane and menu item (see aui frame class) and store a pane \r\n #reference\r\n self.bp_pane= self.AddPane( ctrl, pane, None)\r\n\r\n #---Interface methods-------------------------------------------------------\r\n def OpenFile(self,filepath):\r\n \"\"\"Opens the file in the editor\"\"\"\r\n self.notebook.OpenFile(filepath)\r\n self.Show()\r\n self.Raise()\r\n\r\n def GetMenu(self,menu):\r\n \"\"\"\r\n Return a reference to the menu/menubar:\r\n menu = 'menubar','file','edit','format','view','toolbars','tools','help'\r\n \"\"\"\r\n menu = menu.lower()\r\n if menu=='menubar':\r\n return self.menubar\r\n if menu=='file':\r\n return self.menubar.file_menu #file menu\r\n if menu=='edit':\r\n return self.menubar.edit_menu #edit menu\r\n if menu=='format':\r\n return self.menubar.format_menu #format menu\r\n if menu=='view':\r\n return self.menubar.view_menu #view menu\r\n if menu=='toolbars':\r\n return self.menubar.toolbars_menu #toolbar submenu\r\n if menu=='tools':\r\n return self.menubar.tool_menu #tool menu\r\n if menu=='help':\r\n return self.menubar.help_menu #help menu\r\n\n def ToggleFind(self):\n \"\"\"\n Show/Hide the search panel\n \"\"\"\n if self.search.IsShown():\n self.search.Hide()\n page = self.notebook.GetCurrentPage()\n if page is not None:\n page.SetFocus()\n else:\n self.search.Show()\n self.search.window.SetFocus()\n self.auimgr.Update()\n \r\n #---Event handlers----------------------------------------------------------\r\n def OnClose(self,event):\r\n \"\"\"Editor frame close event handler\"\"\"\r\n res = self.notebook.CloseAll()\r\n if res is not True:\n return\n\n #check if console is hidden too\n #console = self.toolmgr.get_tool('Console')\n #if console.frame.IsShown() is False: \n # dlg = wx.MessageDialog(self, \"The Console is also hidden.\\nDo you want to exit PTK?\", \"Hide editor\",\n # wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION)\n # result=dlg.ShowModal()\n # dlg.Destroy()\n # if result==wx.ID_YES:\n # self.ExecFile(num)\r\n self.Hide()\n \r\n\r\n def OnFileHistory(self, event):\r\n filenum = event.GetId() - wx.ID_FILE1\r\n path = self.filehistory.GetHistoryFile(filenum)\r\n self.OpenFile(path)\r\n\r\n#-------------------------------------------------------------------------------\r\nclass EditorTools(toolpanel.ToolPanel):\r\n def __init__(self,parent):\r\n toolpanel.ToolPanel.__init__(self, parent, -1)\r\n self.SetStatusBar(parent.StatusBar)\r\n\r\n #set the icon size\r\n self.SetToolBitmapSize( (22,22) )\r\n\r\n #load some icons\r\n new_bmp = common22.document_new.GetBitmap()\r\n open_bmp = common22.document_open.GetBitmap()\r\n save_bmp = common22.document_save.GetBitmap()\r\n\r\n cut_bmp = common22.edit_cut.GetBitmap()\r\n copy_bmp = common22.edit_copy.GetBitmap()\r\n paste_bmp = common22.edit_paste.GetBitmap()\r\n\r\n undo_bmp = common22.edit_undo.GetBitmap()\r\n redo_bmp = common22.edit_redo.GetBitmap()\r\n search_bmp = common22.edit_find.GetBitmap()\r\n\r\n untab_bmp = editor_icons.format_indent_less.GetBitmap()\r\n tab_bmp = editor_icons.format_indent_more.GetBitmap()\r\n com_bmp = editor_icons.edit_comment.GetBitmap()\r\n uncom_bmp = editor_icons.edit_uncomment.GetBitmap()\r\n sep_bmp = editor_icons.add_separator.GetBitmap()\r\n \r\n #new\r\n self.AddTool( wx.ID_NEW, new_bmp,wx.ITEM_NORMAL, \r\n 'Create a new file', \r\n 'Create a new file to edit')\r\n self.Bind(wx.EVT_TOOL, self.OnNew, id=wx.ID_NEW)\r\n\r\n #open\r\n self.AddTool( wx.ID_OPEN, open_bmp, toolpanel.ITEM_DROPDOWN, \r\n 'Open / Open recent',\r\n 'Open an existing file to edit')\r\n self.Bind(wx.EVT_TOOL, self.OnOpen, id=wx.ID_OPEN)\r\n\r\n #save\r\n self.AddTool( wx.ID_SAVE, save_bmp, wx.ITEM_NORMAL,\r\n 'Save file',\r\n 'Save file to disk')\r\n self.Bind(wx.EVT_TOOL, self.OnSave, id=wx.ID_SAVE)\r\n self.AddSeparator()\r\n \r\n #cut\r\n self.AddTool(wx.ID_CUT, cut_bmp, wx.ITEM_NORMAL,\r\n 'Cut',\r\n 'Cut selection to clipboard')\r\n self.Bind(wx.EVT_TOOL, self.OnCut, id=wx.ID_CUT)\r\n\r\n #copy\r\n self.AddTool(wx.ID_COPY, copy_bmp, wx.ITEM_NORMAL,\r\n 'Copy',\r\n 'Copy selection to clipboard')\r\n self.Bind(wx.EVT_TOOL, self.OnCopy, id=wx.ID_COPY)\r\n\r\n #paste\r\n self.AddTool(wx.ID_PASTE, paste_bmp, wx.ITEM_NORMAL,\r\n 'Paste',\r\n 'Paste selection from clipboard')\r\n self.Bind(wx.EVT_TOOL, self.OnPaste, id=wx.ID_PASTE)\r\n self.AddSeparator()\r\n\r\n #undo\r\n self.AddTool(wx.ID_UNDO, undo_bmp, wx.ITEM_NORMAL,\r\n 'Undo changes',\r\n 'Undo changes')\r\n self.Bind(wx.EVT_TOOL, self.OnUndo, id=wx.ID_UNDO)\r\n \r\n #redo\r\n self.AddTool(wx.ID_REDO, redo_bmp, wx.ITEM_NORMAL,\r\n 'Redo changes',\r\n 'Redo changes')\r\n self.Bind(wx.EVT_TOOL, self.OnRedo, id=wx.ID_REDO)\r\n self.AddSeparator()\r\n\r\n #search and replace\r\n self.AddTool(wx.ID_FIND, search_bmp, wx.ITEM_CHECK,\r\n 'Find and replace', \r\n 'Find and replace')\r\n self.Bind(wx.EVT_TOOL, self.OnFind, id=wx.ID_FIND)\r\n\r\n #bind to the search pane events to update the toggle button\r\n # when shown/hidden\r\n search_pane = self.Parent.search\r\n search_pane.window.Bind( wx.EVT_SHOW, self.OnSearchShow)\r\n self.Realize()\r\n\r\n #---event handlers----------------------------------------------------------\r\n def OnNew(self,event):\r\n \"\"\"New event handler\"\"\"\r\n self.Parent.notebook.New()\r\n\r\n def OnOpen(self,event):\r\n \"\"\"Open event handler\"\"\"\r\n if event.IsChecked():\r\n #Show dropdown menu\r\n but = event.GetEventObject()\n \r\n #create a menu and add recent editor files\n menu = wx.Menu()\r\n self.Parent.filehistory.AddFilesToThisMenu(menu)\r\n menu.Bind(wx.EVT_MENU_RANGE, self.OnMenuOpen, id=wx.ID_FILE1, \r\n id2=wx.ID_FILE9)\n but.PopupMenu(menu)\r\n else:\r\n self.Parent.notebook.Open()\n \r\n def OnMenuOpen(self, event):\n \"\"\"recent files menu handler\"\"\"\r\n filenum = event.GetId() - wx.ID_FILE1\r\n path = self.Parent.filehistory.GetHistoryFile(filenum)\r\n self.Parent.OpenFile(path)\n \r\n def OnSave(self,event):\r\n \"\"\"Save event handler\"\"\"\r\n self.Parent.notebook.Save()\r\n\r\n def OnCut(self,event):\r\n \"\"\"Cut event handler\"\"\"\r\n self.Parent.notebook.Cut()\r\n\r\n def OnCopy(self,event):\r\n \"\"\"Copy event handler\"\"\"\r\n self.Parent.notebook.Copy()\r\n\r\n def OnPaste(self,event):\r\n \"\"\"Paste event handler\"\"\"\r\n self.Parent.notebook.Paste()\r\n\r\n def OnUndo(self,event):\r\n \"\"\"Undo event handler\"\"\"\r\n self.Parent.notebook.Undo()\r\n\r\n def OnRedo(self,event):\r\n \"\"\"Redo event handler\"\"\"\r\n self.Parent.notebook.Redo()\r\n\r\n def OnFind(self,event):\r\n \"\"\"Opens the find/replace pane event handler\"\"\"\r\n self.Parent.ToggleFind()\r\n\r\n def OnSearchShow(self, event):\r\n #The search pane has been shown/hidden\r\n shown = event.GetShow()\r\n self.ToggleTool(wx.ID_FIND, shown)\r\n self.Refresh()\r\n event.Skip()\r\n\r\n#-------------------------------------------------------------------------------\r\nclass FormatTools(toolpanel.ToolPanel):\r\n def __init__(self,parent):\r\n toolpanel.ToolPanel.__init__(self, parent, -1)\r\n self.SetStatusBar(parent.StatusBar)\r\n\r\n self.parent = parent\r\n\r\n #set the icon size\r\n self.SetToolBitmapSize( (22,22) )\r\n\r\n #load some icons\r\n untab_bmp = editor_icons.format_indent_less.GetBitmap()\r\n tab_bmp = editor_icons.format_indent_more.GetBitmap()\r\n com_bmp = editor_icons.edit_comment.GetBitmap()\r\n uncom_bmp = editor_icons.edit_uncomment.GetBitmap()\r\n sep_bmp = editor_icons.add_separator.GetBitmap()\r\n\r\n #unindent\r\n self.AddTool(wx.ID_UNINDENT, untab_bmp, wx.ITEM_NORMAL, \r\n 'Unindent selection',\r\n 'Unindent selection')\r\n self.Bind(wx.EVT_TOOL, self.OnUndent, id=wx.ID_UNINDENT)\r\n \r\n #indent\r\n self.AddTool(wx.ID_INDENT, tab_bmp, wx.ITEM_NORMAL, \r\n 'Indent selection',\r\n 'Indent selection')\r\n self.Bind(wx.EVT_TOOL, self.OnIndent, id=wx.ID_INDENT)\r\n \r\n #comment\r\n id = wx.NewId()\r\n self.AddTool(id, com_bmp, wx.ITEM_NORMAL, \r\n 'Comment selection', \r\n 'Comment selection')\r\n self.Bind(wx.EVT_TOOL, self.OnComment, id=id)\r\n \r\n #uncomment\r\n id = wx.NewId()\r\n self.AddTool(id, uncom_bmp, wx.ITEM_NORMAL,\r\n 'Uncomment selection',\r\n 'Uncomment selection')\r\n self.Bind(wx.EVT_TOOL, self.OnUnComment, id=id)\r\n \r\n #insert cell separator\r\n id = wx.NewId()\r\n self.AddTool(id, sep_bmp, wx.ITEM_NORMAL, \r\n 'Insert cell separator', \r\n 'Insert cell separator')\r\n self.Bind(wx.EVT_TOOL, self.OnInsertCellSeparator, id=id)\r\n \r\n self.Realize()\r\n\r\n #---event handlers----------------------------------------------------------\r\n def OnUndent(self,event):\r\n \"\"\"Undent event handler\"\"\"\r\n self.parent.notebook.Undent()\r\n\r\n def OnIndent(self,event):\r\n \"\"\"Indent event handler\"\"\"\r\n self.parent.notebook.Indent()\r\n\r\n def OnComment(self,event):\r\n \"\"\"Comment event handler\"\"\"\r\n self.parent.notebook.Comment()\r\n\r\n def OnUnComment(self,event):\r\n \"\"\"Uncomment event handler\"\"\"\r\n self.parent.notebook.UnComment()\r\n\r\n def OnInsertCellSeparator(self,event):\r\n \"\"\"Insert separator event handler\"\"\"\r\n self.parent.notebook.InsertCellSeparator()\r\n \r\n#-------------------------------------------------------------------------------\r\nclass EditorMenu(wx.MenuBar):\r\n def __init__(self,parent):\r\n wx.MenuBar.__init__(self)\r\n\r\n self.parent = parent\r\n\r\n self.file_menu = wx.Menu() #file menu\r\n self.edit_menu = wx.Menu() #edit menu\r\n self.format_menu = wx.Menu() #format menu\r\n self.view_menu = wx.Menu() #view menu\r\n self.toolbars_menu = wx.Menu() # toolbar submenu\r\n self.layouts_menu = wx.Menu() # layouts submenu\r\n self.tool_menu = wx.Menu() #tools menu\r\n self.help_menu = wx.Menu() #help menu\r\n \r\n ##add the menus to the menu bar\r\n self.Append(self.file_menu, \"&File\")\r\n self.Append(self.edit_menu, \"&Edit\")\r\n self.Append(self.format_menu, \"&Format\")\r\n self.Append(self.view_menu, \"&View\")\r\n self.Append(self.tool_menu, \"&Tools\")\r\n self.Append(self.help_menu, \"&Help\")\r\n\r\n ##file menu\r\n self.file_menu.Append(wx.ID_NEW, '&New\\tCtrl+N', 'Create a new file') \r\n self.file_menu.Append(wx.ID_OPEN, '&Open\\tCtrl+O', 'Open a file') \r\n self.file_menu.Append(wx.ID_SAVE, '&Save\\tCtrl+S', 'Save the curent file') \r\n self.file_menu.Append(wx.ID_SAVEAS, 'Save &As\\tCtrl+Alt+S', 'Save the current file with a differnt name') \r\n self.file_menu.AppendSeparator()\r\n self.recent_menu = wx.Menu()\r\n self.file_menu.AppendMenu(wx.ID_ANY, \"&Recent Files\", self.recent_menu)\r\n self.file_menu.AppendSeparator()\r\n self.file_menu.Append(wx.ID_CLOSE, \"&Close Editor\\tCtrl+H\",'Closes the editor window')\r\n self.file_menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl+Q','Exit PTK')\r\n\r\n #event bindings\r\n self.parent.Bind(wx.EVT_MENU, self.OnNew, id=wx.ID_NEW)\r\n self.parent.Bind(wx.EVT_MENU, self.OnOpen, id=wx.ID_OPEN) \r\n self.parent.Bind(wx.EVT_MENU, self.OnSave, id=wx.ID_SAVE)\r\n self.parent.Bind(wx.EVT_MENU, self.OnSaveAs, id=wx.ID_SAVEAS) \r\n self.parent.Bind(wx.EVT_MENU, self.OnMenuClose, id=wx.ID_CLOSE)\r\n self.parent.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)\r\n \r\n ##edit menu\r\n self.edit_menu.Append(wx.ID_CUT, 'Cu&t\\tCtrl+X', 'Cut selection') \r\n self.edit_menu.Append(wx.ID_COPY, '&Copy\\tCtrl+C', 'Copy selection') \r\n self.edit_menu.Append(wx.ID_PASTE, '&Paste\\tCtrl+V', 'Paste from clipboard') \r\n self.edit_menu.AppendSeparator()\r\n self.edit_menu.Append(wx.ID_UNDO, '&Undo\\tCtrl+Z', 'Undo past actions') \r\n self.edit_menu.Append(wx.ID_REDO, '&Redo\\tCtrl+Y', 'Redo undone actions') \r\n self.edit_menu.AppendSeparator()\r\n self.edit_menu.Append(wx.ID_REPLACE, '&Find and replace\\tCtrl+F', 'Open the find pane')\r\n #event bindings\r\n self.parent.Bind(wx.EVT_MENU, self.OnCut, id=wx.ID_CUT)\r\n self.parent.Bind(wx.EVT_MENU, self.OnCopy, id=wx.ID_COPY) \r\n self.parent.Bind(wx.EVT_MENU, self.OnPaste, id=wx.ID_PASTE)\r\n self.parent.Bind(wx.EVT_MENU, self.OnUndo, id=wx.ID_UNDO)\r\n self.parent.Bind(wx.EVT_MENU, self.OnRedo, id=wx.ID_REDO) \r\n self.parent.Bind(wx.EVT_MENU, self.OnFind, id=wx.ID_REPLACE) \r\n\r\n ##format menu\r\n indentid=wx.NewId()\r\n self.format_menu.Append(indentid, 'Indent\\tTab', 'Indent selection')\r\n undentid=wx.NewId()\r\n self.format_menu.Append(undentid, 'Undent\\tShift+Tab', 'Undent selection') \r\n comid=wx.NewId()\r\n self.format_menu.Append(comid, 'Comment\\tCtrl+#', 'Comment selection') \r\n uncomid=wx.NewId()\r\n self.format_menu.Append(uncomid, 'Uncomment\\tCtrl+Shift+#', 'Uncomment selection') \r\n self.format_menu.AppendSeparator()\r\n sepid=wx.NewId()\r\n self.format_menu.Append(sepid, 'Insert cell separator\\tCtrl+Enter', 'Insert a separator comment')\r\n #event bindings\r\n self.parent.Bind(wx.EVT_MENU, self.OnIndent, id=indentid) \r\n self.parent.Bind(wx.EVT_MENU, self.OnUndent, id=undentid) \r\n self.parent.Bind(wx.EVT_MENU, self.OnComment, id=comid) \r\n self.parent.Bind(wx.EVT_MENU, self.OnUnComment, id=uncomid) \r\n self.parent.Bind(wx.EVT_MENU, self.OnInsertCellSeparator, id=sepid) \r\n \r\n ##view menu\r\n self.view_menu.AppendSubMenu(self.toolbars_menu , 'Toolbars...','Show Toolbars')\r\n self.view_menu.AppendSubMenu(self.layouts_menu , 'Layouts...','Save/Restore window layouts')\r\n self.view_menu.AppendSeparator()\r\n #layouts added in auimix\r\n\r\n ##tools menu\r\n #Run selection in current engine\r\n item = wx.MenuItem( self.tool_menu, ID_RUNMENU_SEL,\r\n 'Run selection/cell in current engine \\tF9', \r\n 'Run the selected code or current cell in the current engine as if typed at the console',\r\n wx.ITEM_NORMAL)\r\n item.SetBitmap(console_icons.run_sel.GetBitmap())\r\n self.tool_menu.AppendItem(item)\r\n self.parent.Bind(wx.EVT_MENU, self.OnRunSelection, id=ID_RUNMENU_SEL) \r\n self.parent.Bind(wx.EVT_UPDATE_UI, self.OnUpdateRunSel, id = ID_RUNMENU_SEL)\r\n\r\n #run in current engine\r\n item = wx.MenuItem( self.tool_menu, ID_RUNMENU_CUR, \r\n 'Run file in current engine \\tF10', \r\n 'Run the file in the current engine',\r\n wx.ITEM_NORMAL) \r\n item.SetBitmap(console_icons.run.GetBitmap())\r\n self.tool_menu.AppendItem(item)\r\n self.parent.Bind(wx.EVT_MENU, self.OnRunFile, id=ID_RUNMENU_CUR) \r\n\r\n #run in new engine\r\n item = wx.MenuItem( self.tool_menu, ID_RUNMENU_NEW, \r\n 'Run file in a new engine \\tF11', \r\n 'Run file in a new engine',\r\n wx.ITEM_NORMAL) \r\n item.SetBitmap(console_icons.run_neweng.GetBitmap())\r\n self.tool_menu.AppendItem(item)\r\n self.parent.Bind(wx.EVT_MENU, self.OnRunNewEng, id=ID_RUNMENU_NEW)\r\n\r\n #Run in external process\r\n item = wx.MenuItem( self.tool_menu, ID_RUNMENU_EXT, \r\n 'Run file as an external process \\tF12', \r\n 'Run file as an external process',\r\n wx.ITEM_NORMAL) \r\n item.SetBitmap(console_icons.run_ext.GetBitmap())\r\n self.tool_menu.AppendItem(item)\r\n self.parent.Bind(wx.EVT_MENU, self.OnRunExt, id=ID_RUNMENU_EXT) \r\n\r\n ##help menu\r\n self.help_menu.Append(wx.ID_HELP, 'Help', 'Open the python documentation...') \r\n tipid=wx.NewId()\r\n self.help_menu.Append(tipid, 'Show tips', 'Show tips') \r\n self.help_menu.Append(wx.ID_ABOUT, 'About...', 'About this program...') \r\n #bindings\r\n self.parent.Bind(wx.EVT_MENU, self.OnHelp, id=wx.ID_HELP)\r\n self.parent.Bind(wx.EVT_MENU, self.OnTip, id=tipid)\r\n self.parent.Bind(wx.EVT_MENU, self.OnAbout, id=wx.ID_ABOUT)\r\n\r\n #---Event handlers----------------------------------------------------------\r\n #file menu\r\n def OnNew(self,event):\r\n \"\"\"New file event handler\"\"\"\r\n self.parent.notebook.New()\r\n\r\n def OnOpen(self,event):\r\n \"\"\"Open file event handler\"\"\"\r\n self.parent.notebook.Open()\r\n\r\n def OnSave(self,event):\r\n \"\"\"Save file event handler\"\"\"\r\n self.parent.notebook.Save()\r\n\r\n def OnSaveAs(self,event):\r\n \"\"\"Save as event handler\"\"\"\r\n self.parent.notebook.SaveAs()\r\n\r\n def OnMenuClose(self,event):\r\n \"\"\"Menu close event handler\"\"\"\r\n self.parent.Close()\r\n\r\n def OnExit(self, event):\r\n \"\"\"Called when exit menu item selected\"\"\"\r\n app = wx.GetApp()\r\n app.Exit()\r\n\r\n #edit menu\r\n def OnCut(self,event):\r\n \"\"\"Edit.Cut event handler\"\"\"\r\n self.parent.notebook.Cut()\r\n\r\n def OnCopy(self,event):\r\n \"\"\"Edit.Copy event handler\"\"\"\r\n self.parent.notebook.Copy()\r\n\r\n def OnPaste(self,event):\r\n \"\"\"Edit.Paste event handler\"\"\"\r\n self.parent.notebook.Paste()\r\n\r\n def OnUndo(self,event):\r\n \"\"\"Edit.Undo event handler\"\"\"\r\n self.parent.notebook.Undo()\r\n\r\n def OnRedo(self,event):\r\n \"\"\"Edit.Redo event handler\"\"\"\r\n self.parent.notebook.Redo()\r\n\r\n def OnIndent(self,event):\r\n \"\"\"Edit.Indent event handler\"\"\"\r\n self.parent.notebook.Indent()\r\n\r\n def OnUndent(self,event):\r\n \"\"\"Edit.Undent event handler\"\"\"\r\n self.parent.notebook.Undent()\r\n\r\n def OnComment(self,event): \r\n \"\"\"Edit.Comment event handler\"\"\"\r\n self.parent.notebook.Comment()\r\n\r\n def OnUnComment(self,event):\r\n \"\"\"Edit.UnComment event handler\"\"\"\r\n self.parent.notebook.UnComment()\r\n\r\n def OnInsertCellSeparator(self,event):\r\n \"\"\"Edit.Insert Separator event handler\"\"\"\r\n self.parent.notebook.InsertCellSeparator()\r\n \r\n def OnFind(self,event):\r\n \"\"\"Opens the find/replace pane event handler\"\"\"\n self.parent.ToggleFind()\r\n\r\n #view menu\r\n #done in auimixin\r\n\r\n #tools menu\r\n def OnRunSelection(self,event):\r\n \"\"\"Run selection event handler\"\"\"\r\n self.parent.notebook.Run()\r\n\r\n def OnUpdateRunSel(self, event):\r\n \"\"\"Enable/disable the run selection menu item\"\"\"\r\n num = self.parent.notebook.GetSelection()\r\n if num==-1:\r\n enable = False\r\n else:\r\n page = self.parent.notebook.GetPage(num)\r\n cmd = page.GetSelectedText()\r\n if len(cmd)==0:\r\n enable = False\r\n else:\r\n enable = True\r\n #self.tool_menu.Enable(ID_RUNMENU_SEL, enable)\r\n\r\n #tools menu\r\n def OnRunFile(self,event):\r\n \"\"\"Run file (execfile) event handler\"\"\"\r\n self.parent.notebook.ExecFile()\r\n\r\n def OnRunExt(self,event):\r\n \"\"\"Run as external process event handler\"\"\"\r\n self.parent.notebook.ExtRun()\r\n\r\n def OnRunNewEng(self, event):\r\n \"\"\"Run as new engine event handler\"\"\"\r\n self.parent.notebook.RunNewEngine()\r\n\r\n #help menu\r\n def OnHelp(self,event):\r\n \"\"\"Open the help browser\"\"\"\r\n open_help() \r\n\r\n def OnTip(self,event):\r\n \"\"\"Open the help tips\"\"\"\r\n app=wx.GetApp()\r\n app.ShowTips(override=True)\r\n\r\n def OnAbout(self,event):\r\n \"\"\"Opens the about box\"\"\"\r\n PTKInfoDialog()\r\n","sub_path":"PythonToolkit-14.04.04/ptk_lib/core_tools/editor/editor_frame.py","file_name":"editor_frame.py","file_ext":"py","file_size_in_byte":27876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"165951310","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 6 14:59:08 2021\n\n@author: tcai\n\"\"\"\n\n\nfrom gensim.models import Phrases\nfrom gensim.corpora import Dictionary, MmCorpus\nfrom gensim.models.word2vec import LineSentence\nfrom gensim.models.ldamulticore import LdaMulticore\nimport json\n\nimport os\nimport codecs\n\nimport warnings\nimport _pickle as pickle\nimport pandas as pd\nimport datetime\nimport numpy as np\nimport itertools\nimport spacy\nimport nltk\nimport nltk.classify.util\nfrom nltk.tokenize import sent_tokenize, word_tokenize, RegexpTokenizer\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.corpus import stopwords, names\nfrom nltk.util import ngrams\nimport s3fs\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\nimport nltk\nimport nltk.classify.util\nfrom nltk.tokenize import sent_tokenize, word_tokenize, RegexpTokenizer\n\nfrom nltk.corpus import stopwords, names\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom gensim import corpora, models, similarities, matutils\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.decomposition import NMF\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn import preprocessing\nimport json\nimport pyodbc\nfrom pyodbc import DataError\nfrom pyodbc import IntegrityError\nfrom bs4 import BeautifulSoup\nimport requests\nimport threading\nimport re\nimport datetime\nfrom datetime import timedelta\nimport pytz\nimport time\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nimport urllib\nfrom ast import literal_eval\nimport yfinance as yf\nimport urllib\ntry:\n import Queue\nexcept:\n import queue as Queue\nimport json\nimport logging\nfrom get_all_tickers import get_tickers as gt\n\nfrom collections import deque\nimport praw\n\n\nACCESS_KEY = 'AKIAWYG6AKIP2FMXUUXK'\nSECRET_KEY = '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'\nREGION = 'us-east-2'\n\nbase_url = 'http://webhose.io'\nrun_date = str(datetime.date.today())\nCLIENT_ID = 'MakOOBfK0bVAtw'\nSECRET_TOKEN = '76MTpF6ZVdJQ2uuApZC17kRbIdXIgQ'\nusername = 'tcai95'\npassword = 'Working1'\nreddit = praw.Reddit(user_agent=\"Comment Extraction (by /u/USERNAME)\",\n client_id=CLIENT_ID, client_secret=SECRET_TOKEN,\n username=username, password=password)\n\nticker_action_dict = dict() \nlist_of_tickers = []\n\ndef create_bucket(bucket_name, region=None):\n \"\"\"Create an S3 bucket in a specified region\n\n If a region is not specified, the bucket is created in the S3 default\n region (us-east-1).\n\n :param bucket_name: Bucket to create\n :param region: String region to create bucket in, e.g., 'us-west-2'\n :return: True if bucket created, else False\n \"\"\"\n\n # Create bucket\n try:\n if region is None:\n s3_client = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n s3_client = boto3.client('s3', region_name=region, aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\n location = {'LocationConstraint': region}\n s3_client.create_bucket(Bucket=bucket_name,\n CreateBucketConfiguration=location)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n# Output the all bucket names\ndef put_file_s3(bucket_name, local_dir, result_dict = None, bucket_folder = None):\n \n s3_client = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\n\n # Retrieve the list of existing buckets\n response = s3_client.list_buckets()\n if bucket_name not in [bucket['Name'] for bucket in response['Buckets']]:\n create_bucket(bucket_name, 'us-east-2')\n content = open(local_dir, 'rb')\n \n fileName = local_dir.split('/')[-1]\n \n if bucket_folder != None:\n key = bucket_folder + '/' + fileName\n else:\n key = fileName\n \n s3_client = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\n if result_dict == None:\n \n s3_client.put_object(\n Bucket=bucket_name, \n Key = key,\n Body=content\n )\n \n # directly upload file from memory\n else:\n \n s3_client.put_object(\n Bucket = bucket_name,\n Key = key,\n Body = (bytes(json.dumps(result_dict).encode('UTF-8')))\n )\n\nkinesis_client = boto3.client('kinesis', \n region_name=REGION, # enter the region\n aws_access_key_id=ACCESS_KEY, # fill your AWS access key id\n aws_secret_access_key=SECRET_KEY) # fill you aws secret access key\nbase_url = 'https://www.reddit.com'\n\n\ndef get_all_tickers():\n #file_path = 'C:/Users/tcai/Desktop/MIG_Capital/wstBet'\n file_path = '/home/ec2-user/WSB/ticker_list'\n #file_path = 'C:/Users/tcai/Desktop/MIG_Capital/wstBet/ticker_list'\n files = None\n for root, dirs, files in os.walk(file_path):\n root = root\n dirs = dirs\n files = deque(files)\n df_list = []\n \n while len(files) >0:\n file = files.popleft()\n full_file_path = os.path.join(root, file).replace(\"\\\\\", \"/\")\n df = pd.read_csv(full_file_path)\n df_list.append(df)\n # concatenate each df with file date into one single large df for insertion\n df_final = pd.concat(df_list)\n return df_final['Symbol'].values.tolist()\n\nlist_of_tickers = get_all_tickers()\n\n\ndef ticker_lookup(row):\n \n if pd.isnull(row['ticker']) :\n full_text = row[\"body\"]\n tokenized = word_tokenize(full_text)\n \n # Tokenize and compare to the list of U.S. tickers\n for token in tokenized:\n #print(token)\n if ':' in token:\n token = token.split(':')[1]\n token = token.strip('()') \n if token in list_of_tickers:\n ticker = token\n return ticker\n\ndef insert_data(df, schema, table_name):\n creds = dict(driver = '{ODBC Driver 13 for SQL SERVER}', server = 'bc5756vevd.database.windows.net', port = 1433, database = 'MIG', uid = 'garquette', pwd = 'Working1!')\n params = 'DRIVER=' + creds['driver']+';'\\\n 'SERVER=' + creds['server']+';'\\\n 'DATABASE=' + creds['database']+ ';'\\\n 'UID=' + creds['uid']+ ';'\\\n 'PWD=' + creds['pwd']+ ';'\\\n 'PORT=' + str(creds['port']) + ';'\n params = urllib.parse.quote_plus(params)\n db = create_engine('mssql+pyodbc:///?odbc_connect=%s' % params)\n \n # using df.to_sql we could avoid of having nan insertion issue\n df.to_sql(name = table_name, con = db, schema = schema, if_exists = 'append', index = False)\n\ndef get_post_comment():\n global reddit, kinesis_client, ticker_action_dict\n \n subreddit = reddit.subreddit(\"wallstreetbets\")\n #df = pd.DataFrame()\n df = pd.DataFrame()\n while True:\n try:\n \n for comment in subreddit.stream.comments():\n #print(submission.title) # Output: the submission's title\n #print(submission.score) # Output: the submission's score\n #print(submission.id) # Output: the submission's ID\n #print(submission.url)\n \n print(datetime.datetime.fromtimestamp(comment.created_utc) + datetime.timedelta(hours = -8))\n #pprint.pprint(vars(submission))\n #pprint.pprint(vars(comment))\n \n #reddit.submission(comment.link_id.replace('t3_', '')).num_comments\n try:\n print('title: ')\n print(comment.link_title)\n parent_id = str(comment.parent())\n original = reddit.comment(parent_id)\n #link_id = comment.link_id.replace('t3_', '')\n #if link_id not in link_id_dict.keys():\n #link_id_dict[link_id] = reddit.submission(comment.link_id.replace('t3_', '')).permalink\n \n #print('parent: ')\n #print(original.body)\n #print('reply:')\n #print(comment.body)\n except praw.exceptions.PRAWException as e:\n pass\n try:\n comment_name = comment.name\n except:\n comment_name = None\n print('cannot find comment name')\n continue\n try:\n comment_body = comment.body\n except:\n comment_body = None\n print('cannot find comment body')\n continue\n try:\n comment_likes = comment.likes\n except:\n comment_likes = None\n print('cannot find comment likes')\n \n try:\n comment_author = comment.author.name\n except:\n comment_author = None\n print('cannot find comment author')\n \n try:\n comment_id = comment.id\n except:\n comment_id = None\n print('cannot find comment id')\n \n try:\n comment_ups = comment.ups\n except:\n comment_ups = None\n print('cannot find comment ups')\n \n try:\n comment_downs = comment.downs\n except:\n comment_downs = None\n print('cannot find comment downs')\n \n try:\n comment_score = comment.score\n except:\n comment_score = None\n print('cannot find comment score')\n \n try:\n comment_creation_utc = str(datetime.datetime.fromtimestamp(comment.created_utc) + datetime.timedelta(hours = -8))\n except:\n comment_creation_utc = None\n print('cannot find comment creation time')\n \n try:\n comment_permalink = base_url + comment.permalink\n except:\n comment_permalink = None\n print('cannot find comment permalink')\n \n try:\n comment_link_title = comment.link_title\n except:\n comment_link_title = None\n print('cannot find comment link title')\n \n \n try:\n original_body = original.body\n except:\n original_body = None\n print('cannot find post body')\n \n try:\n original_id = original.id\n except:\n original_id = None\n print('cannot find original id')\n \n try:\n original_link = base_url + original.permalink\n except:\n original_link = None\n print('cannot find original link')\n \n try:\n original_ups = original.ups\n except:\n original_ups = None\n print('cannot find original ups')\n \n try:\n original_downs = original.downs\n except:\n original_downs = None\n print('cannot find original downs')\n \n try:\n original_likes = original.likes\n except:\n original_likes = None\n print('cannot find original likes')\n \n \n \n payload = {\n 'name': comment_name,\n 'comment_body':comment_body,\n 'comment_likes':comment_likes,\n 'comment_author':comment_author,\n 'comment_id': comment_id,\n 'comment_ups': comment_ups,\n 'comment_downs': comment_downs,\n 'comment_score': comment_score,\n 'created_utc': comment_creation_utc,\n 'comment_permalink': comment_permalink,\n 'link_title': comment_link_title,\n #'link_permalink':base_url + link_id_dict[link_id],\n 'submission_body': original_body,\n 'submission_id': original_id,\n 'submission_permalink': original_link,\n 'submission_ups': original_ups,\n 'submission_downs':original_downs,\n 'submission_likes':original_likes,\n \n \n 'post_type': 'comment'}\n \n payload_temp = payload\n if payload_temp['submission_body']:\n payload_temp['submission_body'] = payload_temp['submission_body'][:1000]\n if payload_temp['comment_body']:\n payload_temp['comment_body'] = payload_temp['comment_body'][:1000]\n df= df.append(payload_temp, ignore_index = True)\n \n \n if len(df)>=100:\n df['created_utc'] = pd.to_datetime(df['created_utc'])\n df = df[pd.notnull(df['comment_body'])]\n df.reset_index(inplace = True, drop = True)\n df = df.apply(process_entities, axis = 1)\n df = df[(df['ticker'] != '[]') & (pd.notnull(df['ticker']))]\n df.drop_duplicates(inplace = True)\n posts_df2_filter = df\n posts_df2_filter.loc[:, 'ticker'] = posts_df2_filter.loc[:,'ticker'].apply(lambda x: literal_eval(x))\n posts_df2_final_expand = pd.DataFrame({col:np.repeat(posts_df2_filter[col].values, posts_df2_filter['ticker'].str.len()) for col in posts_df2_filter.columns.drop('ticker')}).assign(**{'ticker':np.concatenate(posts_df2_filter['ticker'].values)})\n ticker_action_ls = []\n \n for publish_date in list(ticker_action_dict.keys()):\n ticker_action_df = pd.DataFrame(ticker_action_dict[publish_date]).transpose()\n ticker_action_df.reset_index( inplace =True)\n ticker_action_df.rename(columns = {'index':'ticker'}, inplace = True)\n ticker_action_df['publish_date'] = publish_date\n ticker_action_ls.append(ticker_action_df)\n ticker_action_df = pd.concat(ticker_action_ls) \n insert_data(posts_df2_final_expand, schema = 'WSTBET', table_name = 'reddit_streaming')\n insert_data(ticker_action_df, schema = 'WSTBET', table_name = 'ticker_action_streaming')\n df = pd.DataFrame()\n posts_df2_filter = None\n posts_df2_final_expand = None\n ticker_action_df = None\n ticker_action_dict = dict()\n \n \n try:\n kinesis_client.put_record(StreamName = 'reddit_streaming2', Data = json.dumps(payload), \\\n PartitionKey = str(comment.parent()))\n #print('enter here')\n except (AttributeError, Exception) as e:\n print (e)\n pass\n except (AttributeError, Exception) as e:\n print(e)\n pass\n '''\n df = df.append({\n 'name': submission.name,\n 'body':submission.title,\n 'likes':submission.likes,\n 'author':submission.author,\n 'id': submission.id,\n 'permalink': submission.permalink,\n \n 'ups': submission.ups,\n 'downs': submission.downs,\n 'score': submission.score,\n 'created_utc': datetime.datetime.fromtimestamp(submission.created_utc),\n \n \n 'post_type': 'post'\n }, ignore_index=True)\n submission.comments.replace_more(limit=None)\n for comment in submission.comments.list():\n comment_create_time = datetime.datetime.fromtimestamp(comment.created_utc)\n prev_date = datetime.datetime.today()+timedelta(days = -1)\n if comment_create_time >= prev_date:\n df = df.append({\n 'name': comment.name,\n 'body':comment.body,\n 'likes':comment.likes,\n 'author':comment.author,\n 'id': comment.id,\n 'permalink': comment.permalink,\n \n 'ups': comment.ups,\n 'downs': comment.downs,\n 'score': comment.score,\n 'created_utc': datetime.datetime.fromtimestamp(comment.created_utc),\n \n \n 'post_type': 'comment'}, ignore_index = True)\n return df\n '''\ndef process_entities(row):\n global ticker_action_dict, list_of_tickers\n wordnet_lemmatizer = WordNetLemmatizer()\n \n \n publish_date = row['created_utc']\n publish_date = publish_date.date()\n ticker_ls = []\n full_text = row['comment_body'].strip()\n if row['submission_body']:\n full_text = row['submission_body'].strip() + ' ' + full_text\n \n \n tokenized = word_tokenize(full_text)\n \n tokenized = [token2.strip('()') for token2 in [token1.split(':')[1] if ':' in token1 else token1 for token1 in tokenized]]\n # lemmatize token and perform part of speech tagging\n original_token = [token for token in tokenized if token]\n pos_tag_tokens = nltk.pos_tag([wordnet_lemmatizer.lemmatize(token.lower()) for token in tokenized if token])\n \n for token_pos in pos_tag_tokens: \n # only consider certain part of speech of tokens\n if token_pos[1] in ['NN', 'NNS', 'NNP', 'NNPS']:\n token = original_token[pos_tag_tokens.index(token_pos)]\n if (token == 'RH' and 'restoration hardware' in full_text.lower()) or token =='$RH' or (token == 'RH' and '¢' in full_text) or (token == 'RH' and '$' in full_text) or (token != 'RH' and token in list_of_tickers):\n \n ticker_ls.append(token)\n \n keyword_freq = {'put': 0, 'call':0, 'pump':0, 'dump':0}\n ticker_index = pos_tag_tokens.index(token_pos)\n action = None\n # find posters' action \n for keyword in keyword_freq.keys():\n if ticker_index +1 < len(pos_tag_tokens):\n # AAPL puts\n if keyword in original_token[ticker_index +1].lower():\n action = keyword\n break\n # puts on AAPL or put AAPL\n if ticker_index -1 >=0:\n if keyword in original_token[ticker_index -1].lower():\n action = keyword\n break\n \n if ticker_index -2 >=0:\n if keyword in original_token[ticker_index -2].lower():\n action = keyword\n break\n \n if publish_date and publish_date in ticker_action_dict.keys():\n \n if token in ticker_action_dict[publish_date].keys():\n if action != None:\n ticker_action_dict[publish_date][token][action] = ticker_action_dict[publish_date][token][action]+1\n \n #print('enter here')\n else:\n ticker_action_dict[publish_date].update({token: keyword_freq})\n else:\n ticker_action_dict.update({publish_date:{token: keyword_freq}})\n \n row['ticker'] = json.dumps(list(set(ticker_ls)))\n return row\n\ndef main():\n global reddit\n run_time = datetime.datetime.today()\n get_post_comment()\n \n\nif __name__ == '__main__':\n main() \n\n","sub_path":"reddit_stream_process.py","file_name":"reddit_stream_process.py","file_ext":"py","file_size_in_byte":20649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"555274792","text":"import time\nfrom turtle import *\n# importerer funksjoner fra turtle\nprint(\"Hei, jeg kan tegne en trekant\")\ntrekantRettning = input(\"Ønsker du spissen opp eller ned? (O/N)? \")\npennfarge = input (\"Velg pennefarge, NTNU-gul (G) eller NTNU-turkis (T)\")\nfyllfarge = input(\"Velg fyllfarge, NTNU-Blå (B), NTNU-Rosa(R) eller NTNU-Oranj(O)\")\n\n\n\nif (pennfarge == 'G') :\n pencolor(\"#f1d282\")\nelse :\n pencolor(\"#5cbec9\")\npensize(7) # sett pennen 7 piksler tykk\nbgcolor(\"#00509e\")\nif (fyllfarge == 'B') :\n fillcolor(\"#00509e\")\nelif(fyllfarge == 'R') :\n fillcolor(\"#ad208e\")\nelse :\n fillcolor(\"#f58025\")\n# Tegner en fylt trekant\nbegin_fill()\n\nif (trekantRettning == \"O\") :\n forward(200) # gå 100 piksler framover\n left(120) # drei 120 grader venstre\n forward(200) \n left(120) \n forward(200)\nelse :\n forward(200) # gå 100 piksler framover\n left(-120) \n forward(200) \n left(-120) \n forward(200)\nend_fill()\n \n# Holder vinduet med tegningen åpent i 10 sekunder. Ha dette som siste linje i koden din\ntime.sleep(10)","sub_path":"ITGK/Øving2/trekant.py","file_name":"trekant.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"173607650","text":"#generalized\ndef max_end3(nums):\n if nums[0] > nums[-1]:\n for i in range(len(nums)):\n nums[i] = nums[0]\n \n else:\n for i in range(len(nums)):\n nums[i]=nums[-1]\n return nums\n\n#their\n# def max_end3(nums):\n# big = max(nums[0], nums[2])\n# nums[0] = big\n# nums[1] = big\n# nums[2] = big\n# return nums\n\nprint(max_end3([1, 2, 3]) )\nprint(max_end3([11, 5, 9]) )\nprint(max_end3([2, 11, 3]) )","sub_path":"List-1/max_end3.py","file_name":"max_end3.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"260524049","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nFuture Daily Data Fetch\nCreated on 2020/1/4\n@author: langqing2017\n@group : pylon\n\"\"\"\n\nimport re\nimport datetime\nimport json\nfrom pylon.tools.urllib_utils import *\nfrom pylon.model.future import FutureBarData\n\nregex_code = \"^[a-zA-Z0-9]{5,6}$\"\nregex_delivery_month = \"^\\d{4}$\"\n\ndef __parse_float(str, value):\n if str == \"\" or str == \"0\":\n return value\n return float(str)\n\ndef __parse_int(str):\n if str == \"\":\n return 0\n return int(str)\n\ndef fetch_future_daily_shfe(date):\n url = \"http://www.shfe.com.cn/data/dailydata/kx/kx%s.dat\" % date.strftime(\"%Y%m%d\")\n txt = fetch_url(url)\n data = json.loads(txt)\n md_map = {}\n for item in data[\"o_curinstrument\"]:\n product_id = item[\"PRODUCTID\"].strip().split(\"_\")[0]\n if not re.match(regex_delivery_month, item[\"DELIVERYMONTH\"]):\n continue\n instrument = \"%s%s\" % (product_id, item[\"DELIVERYMONTH\"])\n pre_clear_price = float(item[\"PRESETTLEMENTPRICE\"])\n pre_close_price = pre_clear_price\n open_price = __parse_float(item[\"OPENPRICE\"], pre_clear_price)\n high_price = __parse_float(item[\"HIGHESTPRICE\"], pre_clear_price)\n low_price = __parse_float(item[\"LOWESTPRICE\"], pre_clear_price)\n close_price = __parse_float(item[\"CLOSEPRICE\"], pre_clear_price)\n clear_price = __parse_float(item[\"SETTLEMENTPRICE\"], pre_clear_price)\n volumn = __parse_int(item[\"VOLUME\"])\n money = 0.0\n position = int(item[\"OPENINTEREST\"])\n data = FutureBarData(date, open_price, high_price, low_price, \\\n close_price, clear_price, pre_close_price, pre_clear_price, volumn, money, position)\n md_map[instrument] = data\n return md_map\n\ndef fetch_future_daily_dce(date, product_map):\n url = \"http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html\"\n data = {\n \"dayQuotes.variety\": \"all\",\n \"dayQuotes.trade_type\": \"0\",\n \"year\": date.year,\n \"month\": date.month-1,\n \"day\": date.day,\n \"exportFlag\": \"txt\"\n }\n txt = fetch_url(url, encode=\"utf8\", post=data)\n md_map = {}\n for line in txt.split(\"\\n\"):\n line = line.strip()\n ss = [s.strip() for s in line.split(\"\\t\") if s != \"\"]\n if len(ss) != 14:\n continue\n product = ss[0]\n if product not in product_map:\n continue\n instrument = \"%s%s\" % (product_map[product], ss[1])\n pre_clear_price = float(ss[6].strip().replace(\",\", \"\"))\n pre_close_price = pre_clear_price\n open_price = __parse_float(ss[2].strip().replace(\",\", \"\"), pre_clear_price)\n high_price = __parse_float(ss[3].strip().replace(\",\", \"\"), pre_clear_price)\n low_price = __parse_float(ss[4].strip().replace(\",\", \"\"), pre_clear_price)\n close_price = __parse_float(ss[5].strip().replace(\",\", \"\"), pre_clear_price)\n clear_price = float(ss[7].strip().replace(\",\", \"\"))\n volumn = int(ss[10].strip().replace(\",\", \"\"))\n money = float(ss[13].strip().replace(\",\", \"\"))\n position = int(ss[11].strip().replace(\",\", \"\"))\n data = FutureBarData(date, open_price, high_price, low_price, \\\n close_price, clear_price, pre_close_price, pre_clear_price, volumn, money, position)\n md_map[instrument] = data\n return md_map\n\ndef fetch_future_daily_czce(date):\n url = \"http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataDaily.txt\" \\\n % (date.strftime(\"%Y\"), date.strftime(\"%Y%m%d\"))\n txt = fetch_url(url, allow_404=True)\n md_map = {}\n for line in txt.split(\"\\n\"):\n line = line.strip()\n ss = line.split(\"|\")\n if len(ss) != 14:\n continue\n if not re.match(regex_code, ss[0].strip()):\n continue\n instrument = ss[0].strip()\n pre_clear_price = float(ss[1].strip().replace(\",\", \"\"))\n pre_close_price = pre_clear_price\n open_price = __parse_float(ss[2].strip().replace(\",\", \"\"), pre_clear_price)\n high_price = __parse_float(ss[3].strip().replace(\",\", \"\"), pre_clear_price)\n low_price = __parse_float(ss[4].strip().replace(\",\", \"\"), pre_clear_price)\n close_price = __parse_float(ss[5].strip().replace(\",\", \"\"), pre_clear_price)\n clear_price = float(ss[6].strip().replace(\",\", \"\"))\n volumn = int(ss[9].strip().replace(\",\", \"\"))\n money = float(ss[12].strip().replace(\",\", \"\"))\n position = int(ss[10].strip().replace(\",\", \"\"))\n data = FutureBarData(date, open_price, high_price, low_price, \\\n close_price, clear_price, pre_close_price, pre_clear_price, volumn, money, position)\n md_map[instrument] = data\n return md_map\n\nif __name__ == \"__main__\":\n # print(fetch_future_daily_czce(datetime.datetime.strptime(\"20191223\", \"%Y%m%d\")))\n # print(fetch_future_daily_dce(datetime.datetime.strptime(\"20200103\", \"%Y%m%d\"), {\"豆一\": \"a\"}))\n print(fetch_future_daily_shfe(datetime.datetime.strptime(\"20191223\", \"%Y%m%d\"), {\"镍\": \"ni\"}))\n","sub_path":"pylon/stone/future/fetch_daily.py","file_name":"fetch_daily.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"302751602","text":"'''Creates a class for white backgammon pieces\nCreated Spring 2017\n@author: Nate Gamble (neg6)\n'''\n\nclass Backgammon_White:\n def __init__(self):\n self._pieces = [1, 1,\n 12, 12, 12, 12, 12,\n 17, 17, 17,\n 19, 19, 19, 19, 19]\n \n def get_pieces(self):\n return self._pieces\n \n def set_pieces(self, list_of_pieces):\n self._pieces = list_of_pieces\n self.order()\n \n def move_piece(self, distance, piece, other):\n if distance <= 0:\n raise ValueError('Distance must be greater than 0')\n if piece != 0 and self.capturedPiece():\n raise ValueError('You must move your captured piece first')\n if piece in self._pieces:\n idx = self._pieces.index(piece)\n if self.validMove(piece + distance, other):\n self._pieces[idx] = piece + distance\n else:\n raise ValueError('That is an invalid place to move your piece')\n else:\n raise ValueError('The chosen piece is not valid')\n self.capture(other)\n self.order()\n \n def __str__(self):\n return 'Your pieces are at: ' + str(self._pieces)\n \n def order(self):\n self._pieces.sort()\n \n def capturedPiece(self):\n if 0 in self._pieces:\n return True\n else:\n return False\n \n def capture(self, other):\n op = other.get_pieces()[:]\n for piece in self._pieces:\n if piece in op:\n op[op.index(piece)] = 25\n other.set_pieces(op)\n\n def validMove(self, position, other):\n res = 0\n #The following line was taken almost straight from stackoverflow (http://stackoverflow.com/questions/9542738/python-find-in-list)\n idx = [i for i,x in enumerate(other.get_pieces()) if x==position]\n if len(idx) <= 1:\n res += 1\n if position >= 25:\n if self._pieces[0] >= 19:\n res += 1\n else:\n res += 1\n if res == 2:\n return True\n else:\n return False\n \n \n def win(self):\n return self._pieces == [25,25,25,25,25,25,25,25,25,25,25,25,25,25,25]\n \n \n \n \n \nif __name__ == '__main__':\n player = Backgammon_White()\n p1 = Backgammon_White()\n p1.set_pieces([])\n assert player.get_pieces() == [1, 1, 12, 12, 12, 12, 12, 17, 17, 17, 19, 19, 19, 19, 19]\n player.move_piece(10, 1, p1)\n assert player.get_pieces() == [1, 11, 12, 12, 12, 12, 12, 17, 17, 17, 19, 19, 19, 19, 19]\n print(player)\n try:\n player.move_piece(0, 1, p1)\n print('Error with move_piece')\n except:\n print('move_piece working well with distance')\n try:\n player.move_piece(1, 20, p1)\n print('Error with move_piece')\n except:\n print('move_piece working well with pieces')\n player.set_pieces([0,0,0,1,0,0,0])\n assert player.get_pieces() == [0,0,0,0,0,0,1]\n try:\n player.move_piece(1, 1)\n except:\n print('capturedPiece is working well')\n player.move_piece(1, 0, p1)\n assert player.get_pieces() == [0,0,0,0,0,1,1]\n p1.set_pieces([1,3,3,4,5])\n player.capture(p1)\n assert p1.get_pieces() == [3,3,4,5,25]\n p1.set_pieces([2,2])\n assert player.validMove(2, p1) == False\n assert player.validMove(3, p1) == True\n p1.set_pieces([5,5])\n try:\n player.move_piece(4, 1, p1)\n except:\n print('validMove is working inside of move_piece')\n player.move_piece(4, 0, p1)\n assert player.get_pieces() == [0,0,0,0,1,1,4]\n player.set_pieces([25,25,25,25,25,25,25,25,25,25,25,25,25,25,25])\n assert player.win() == True\n \n \n print('All tests passed')","sub_path":"exe/backgammon_white.py","file_name":"backgammon_white.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"341183379","text":"from typing import Tuple\n\nimport torch\n\n\ndef create_mask(src: torch.Tensor,\n trg: torch.Tensor,\n src_pad_idx: int,\n trg_pad_idx: int) -> Tuple[torch.Tensor, torch.Tensor]:\n src_mask = _create_padding_mask(src, src_pad_idx)\n trg_mask = None\n if trg is not None:\n trg_mask = _create_padding_mask(trg, trg_pad_idx) # (256, 1, 33)\n nopeak_mask = _create_nopeak_mask(trg) # (1, 33, 33)\n trg_mask = trg_mask & nopeak_mask # (256, 33, 33)\n\n return src_mask, trg_mask\n\n\ndef _create_padding_mask(seq: torch.Tensor, pad_idx: int) -> torch.Tensor:\n \"\"\"\n seq 형태를 (256, 33) -> (256, 1, 31) 이렇게 변경합니다.\n\n 아래와 같이 padding index부분을 False로 변경합니다. (리턴 tensor)\n 아래의 vector 하나당 sentence라고 보면 되고, True로 되어 있는건 단어가 있다는 뜻.\n tensor([[[ True, True, True, True, False, False, False]],\n [[ True, True, False, False, False, False, False]],\n [[ True, True, True, True, True, True, False]]])\n \"\"\"\n return (seq != pad_idx).unsqueeze(-2)\n\n\ndef _create_nopeak_mask(trg) -> torch.Tensor:\n \"\"\"\n NO PEAK MASK\n Target의 경우 그 다음 단어를 못보게 가린다\n \"\"\"\n batch_size, seq_len = trg.size()\n nopeak_mask = (1 - torch.triu(torch.ones(1, seq_len, seq_len, device=trg.device), diagonal=1)).bool()\n return nopeak_mask\n","sub_path":"300 Transformer/transformer/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"200857941","text":"##################################################################\n# Created by Marco Soto on 8/21/18\n# Solves HackerRank problem 'Beautiful Binary String'\n# Removes all instances of '010' substring in given binary string\n# by using miniumum fipping of bits.\n# Complexity: O(n)\n##################################################################\n\nimport math\n\nN = input()\nB = input()\nflips = seqCount = 0\ninSequence = False\ni = 1\n\nwhile i < len(B)-1:\n\tsubstring = B[i-1:i+2] # 3 character substring\n\tif substring == '010':\n\t\tinSequence = True if not inSequence else inSequence\n\t\tseqCount += 1\n\t\ti += 1\n\telse:\n\t\tinSequence = False\n\t\tflips += math.ceil(seqCount/2)\n\t\tseqCount = 0\n\ti += 1\n\nflips += math.ceil(seqCount/2)\nprint(flips)","sub_path":"HackerRank/Medium/binaryString.py","file_name":"binaryString.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"70123806","text":"from collatz import Collatz\nimport csv\nimport re\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n\nINT_SIZE = 32\nMAX_CHECK_NUMBER = 10000\nDATA_CSV_NAME = 'Collatz_Iterations'\n\ndef main():\n while True:\n print('1. find iterations')\n print('2. visualize iterations')\n print('0. to exit')\n cho = int(input())\n if cho == 1 :\n findIteration_init()\n if MAX_CHECK_NUMBER < 1_000_000:\n findIteration()\n else:\n findIteration_fast()\n elif cho == 2 :\n visualize_init()\n elif cho == 0 :\n exit()\n else:\n print('invalid input') \n\ndef visualize_init():\n dir=\"data\\\\\"\n files=[]\n for filename in os.listdir(dir):\n if filename.endswith('.csv'):\n files.append(filename)\n print('choose csv file')\n for i,filename in enumerate(files):\n print('{}. {}'.format(i+1,filename))\n cho = int(input())\n visualize(files[cho-1])\n\ndef visualize(filename):\n maxNum, intsize = extractFilename(filename)\n csv = pd.read_csv('data\\\\'+filename,dtype={'Integer Overflow':str})\n maxValue = max(csv['Iteration'])\n corr = round(csv.corr()['Iteration']['X'],3)\n overflows = 0\n try:\n overflows = csv['Integer Overflow'].value_counts()['overflow']\n except KeyError:\n pass\n stdDeviation = round(csv['Iteration'].std(),3)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n csv.plot(x='X',y='Iteration',ax=ax)\n ax.set_ylabel('Iteration')\n ax.set_title('X vs Iteration '+maxNum+' '+intsize)\n fig.text(.01,.01,'Max: {} Overflows: {} Correlation: {} stdDeviation: {} '.format(maxValue,overflows,corr,stdDeviation))\n\n freq = csv['Iteration'].value_counts()\n freq.sort_index(inplace=True)\n fig2 = plt.figure()\n ax2 = fig2.add_subplot(111)\n freq.plot(ax=ax2)\n ax2.set_xlabel('Iteration')\n ax2.set_ylabel('Frequency')\n ax2.set_title('Iteration vs Frequency '+maxNum+' '+intsize)\n\n plt.show()\n\n\n\ndef extractFilename(filename):\n filename = re.sub(r'\\.csv$','',filename)\n arr = filename.split('_')\n return arr[-2],arr[-1]\n#---------------------------------------------------\ndef findIteration_init():\n print('enter max value to run to: ')\n m = re.sub(r'[^\\d]','',input())\n if len(m) == 0:\n m = 1000\n print(' using default value %d'%(m))\n print('enter integer size: ')\n n = re.sub(r'[^\\d]','',input())\n if len(n) == 0:\n n = 32\n print(' using default value %d'%(n))\n global MAX_CHECK_NUMBER,INT_SIZE\n MAX_CHECK_NUMBER = int(m)\n INT_SIZE = int(n)\n\ndef findIteration():\n print('Calculating number of iterations in collatz conjecture')\n print('Highest input : %d' % (MAX_CHECK_NUMBER))\n print('Integer Size : %d' % (INT_SIZE))\n\n dataHeading = ('X','Iteration','Integer Overflow','First Overflow Index','Overflow Number')\n dataTable = list()\n for i in range(1,MAX_CHECK_NUMBER+1):\n seq = Collatz.collatzSequence(i);\n numRounds = len(seq)\n isOverflow,overflowIndex,overflowNum = checkIntegerOverflow(seq)\n dataRow = [i,numRounds]\n if isOverflow:\n dataRow.extend(['overflow',overflowIndex,overflowNum])\n dataTable.append(dataRow)\n print(i,end='\\r')\n f = writeDataCSV(dataTable,dataHeading)\n print('Data File : ' + f)\n\ndef findIteration_fast():\n global MAX_CHECK_NUMBER\n print('Calculating number of iterations in collatz conjecture')\n print('Highest input : %d' % (MAX_CHECK_NUMBER))\n print('Integer Size : %d' % (INT_SIZE))\n \n dataHeading = ('X','Iteration','Integer Overflow','First Overflow Index','Overflow Number')\n dataTable = list()\n int_size_mask = (1<< INT_SIZE) - 1\n MAX_CHECK_NUMBER = MAX_CHECK_NUMBER & int_size_mask\n print_mask = ((1 << 20)-1)\n\t\n for i in range(1,MAX_CHECK_NUMBER+1):\n n = i & int_size_mask\n numRounds = 0\n while n > 1:\n if (n & 1) == 1:\n n = 3*n + 1\n else:\n n = n // 2\n numRounds += 1\n n = n & int_size_mask\n if (i & print_mask) == 0: \n print(i,end=\"\\r\")\n\t\t\n dataRow = [i,numRounds]\n dataTable.append(dataRow)\n #print(i,end=\"\\r\")\n \n f = writeDataCSV(dataTable,dataHeading)\n print('Data File : ' + f)\n \n\ndef checkIntegerOverflow(seq):\n for index,num in enumerate(seq):\n if num >= pow(2,INT_SIZE):\n return True,index,num\n return False,0,0\n\ndef writeDataCSV(data,heading = None):\n fileName = \"data\\{}_{}_{}.csv\".format(DATA_CSV_NAME,MAX_CHECK_NUMBER,INT_SIZE)\n with open(fileName,'w',newline='') as f:\n writer = csv.writer(f)\n if heading: writer.writerow(heading)\n writer.writerows(data)\n return fileName\n\n#----------------------------------------\n\nif __name__ == '__main__':\n main();","sub_path":"complexity_collatz/collatzComplexityAnalysis.py","file_name":"collatzComplexityAnalysis.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"252609735","text":"\nmy_lst = [1, 1, 3, 4, 4, 5, 6, 8, 8, 8, 9]\n\ndef frequency_array(lst):\n high_num = max(lst)\n new_lst_len = high_num + 1\n new_lst = []\n my_dict = {}\n for elem in lst:\n num_count = lst.count(elem)\n my_dict.update({elem: num_count})\n print(my_dict)\n\nx = frequency_array(my_lst)","sub_path":"algorithms/sorting/1_frequency_array/solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"367615981","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nimport pandas as pd\nimport pickle\ninput_file_path = '/data/opencorpus-dataset/papers-2017-02-21.json'\noutput_directory = '/data/split_opencorpus/'\n\ns = 1\nX = []\nshard_size = 100000\nfor line in open(input_file_path):\n X.append(line)\n if s % shard_size == 0:\n with open((output_directory + '{}.json').format(s//shard_size), 'w') as f:\n for line in X:\n f.write(\"%s\" % line)\n X = []\n s += 1\n","sub_path":"utils/split_dataset.py","file_name":"split_dataset.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"444463339","text":"from studentfile import StudentFileReader, StudentCSVFileReader, \\\n StudentFileWriter, StudentFileTerminalWriter\n\nCSV_FILE_NAME = \"students.csv\"\nCSV_FILE_OUT_NAME = \"students_outtest.csv\"\n\ndef main():\n reader = StudentCSVFileReader( CSV_FILE_NAME )\n reader.open()\n studentList = reader.fetchAll()\n reader.close()\n\n sortKey = raw_input( \"Input the sort key:\" )\n studentList.sort( key = lambda rec: getattr( rec, sortKey ) )\n\n writer = StudentFileWriter( CSV_FILE_OUT_NAME )\n studentDisplay = StudentFileTerminalWriter( \"terminal\" )\n writer.open()\n studentDisplay.open()\n writer.writeRecord( studentList[ 0 ] )\n studentDisplay.writeRecord( studentList[ 0 ] )\n writer.writeAll( studentList[ 1: ] )\n studentDisplay.writeAll( studentList[ 1: ] )\n writer.close()\n studentDisplay.close()\n\nmain()\n","sub_path":"data-structures-and-algorithms-using-python/chap1/1.5_studentrecords_writer_test.py","file_name":"1.5_studentrecords_writer_test.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"594511743","text":"from django.core import mail\nfrom django.core.management import call_command\nfrom django.urls import reverse\nfrom guardian.shortcuts import get_users_with_perms\n\nfrom ephios.core.models import AbstractParticipation, LocalParticipation, Notification\nfrom ephios.core.services.notifications.backends import enabled_notification_backends\nfrom ephios.core.services.notifications.types import (\n ConsequenceApprovedNotification,\n ConsequenceDeniedNotification,\n CustomEventParticipantNotification,\n EventReminderNotification,\n NewEventNotification,\n NewProfileNotification,\n ParticipationConfirmedNotification,\n ParticipationRejectedNotification,\n ProfileUpdateNotification,\n ResponsibleParticipationRequested,\n enabled_notification_types,\n)\n\n\nclass TestNotifications:\n def _enable_all_notifications(self, user):\n preferences = {}\n backends = [backend.slug for backend in enabled_notification_backends()]\n for notification_type in enabled_notification_types():\n if notification_type.unsubscribe_allowed:\n preferences[notification_type.slug] = backends\n user.preferences[\"notifications__notifications\"] = preferences\n\n def test_notification_form_render(self, django_app, volunteer):\n form = django_app.get(reverse(\"core:profile_notifications\"), user=volunteer).form\n types = filter(\n lambda notification_type: notification_type.unsubscribe_allowed,\n enabled_notification_types(),\n )\n assert all(notification_type.slug in form.fields.keys() for notification_type in types)\n\n def test_notification_form_submit(self, django_app, volunteer):\n form = django_app.get(reverse(\"core:profile_notifications\"), user=volunteer).form\n form[\"ephios_new_event\"] = [\"ephios_backend_email\"]\n form.submit()\n assert (\n \"ephios_backend_email\"\n in volunteer.preferences[\"notifications__notifications\"][\"ephios_new_event\"]\n )\n\n def test_user_notification_sending(self, volunteer):\n NewProfileNotification.send(volunteer)\n ProfileUpdateNotification.send(volunteer)\n assert Notification.objects.count() == 2\n self._enable_all_notifications(volunteer)\n call_command(\"send_notifications\")\n assert Notification.objects.count() == 0\n\n def test_event_notification_sending(self, event, volunteer):\n self._enable_all_notifications(volunteer)\n NewEventNotification.send(event)\n EventReminderNotification.send(event)\n assert Notification.objects.count() == 2 * len(\n get_users_with_perms(event, only_with_perms_in=[\"view_event\"])\n )\n call_command(\"send_notifications\")\n assert Notification.objects.count() == 0\n\n def test_participation_notification_sending(self, event, qualified_volunteer):\n self._enable_all_notifications(qualified_volunteer)\n participation = LocalParticipation.objects.create(\n shift=event.shifts.first(),\n user=qualified_volunteer,\n state=AbstractParticipation.States.CONFIRMED,\n )\n ParticipationConfirmedNotification.send(participation)\n ParticipationRejectedNotification.send(participation)\n ResponsibleParticipationRequested.send(participation)\n CustomEventParticipantNotification.send(event, \"hi\")\n assert Notification.objects.count() == 3 + len(\n get_users_with_perms(event, only_with_perms_in=[\"change_event\"])\n )\n call_command(\"send_notifications\")\n assert Notification.objects.count() == 0\n\n def test_inactive_user(self, volunteer):\n self._enable_all_notifications(volunteer)\n volunteer.is_active = False\n volunteer.save()\n ProfileUpdateNotification.send(volunteer)\n assert Notification.objects.count() == 1\n call_command(\"send_notifications\")\n assert len(mail.outbox) == 0\n\n def test_consequence_notifications(self, volunteer, workinghours_consequence):\n self._enable_all_notifications(volunteer)\n ConsequenceApprovedNotification.send(workinghours_consequence)\n ConsequenceDeniedNotification.send(workinghours_consequence)\n assert Notification.objects.count() == 2\n call_command(\"send_notifications\")\n assert Notification.objects.count() == 0\n","sub_path":"tests/core/test_notifications.py","file_name":"test_notifications.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"442276280","text":"#part1,爬取单页目标连接,仅包括对应图片名字[0]和对应链接[1]\n# from bs4 import BeautifulSoup\n# import requests\n# import time\n# time_start=time.time()\n# if __name__=='__main__':\n# \turl='http://www.shuaia.net/index.html'\n# \theaders={\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n# }\n# \thtml=requests.get(url,headers=headers)\n# \thtml.encoding='utf-8'\n# \tsoup=BeautifulSoup(html.text,'lxml')\n# \t#print(soup)\n# \ttar_url=soup.find_all(class_='item-img')\n# \tlist_url=[]\n# \tfor each in tar_url:\n# \t\tlist_url.append(each.img.get('alt')+'='+each.get('href'))\n\t#print(list_url) #到此结束,以下7行纯属恶搞\n# \t\tfor each_img in list_url:\n# \t\t\timg_info=each_img.split('=')\n# \t\t\tfilename=img_info[0]+'.jpg'\n# \t\tprint(filename)\n# time_end=time.time()\n# time_spend=(time_end)-(time_start)\n# print(time_spend)\n\n#part2,爬取多页目标,也仅包括图片名字[0]和对应链接[1]\n#发现翻页的规律很简单,仅仅是后缀数字不一样\n\n# from bs4 import BeautifulSoup\n# import requests\n# if __name__=='__main__':\n# \tlist_url=[]\n# \tfor num in range(1,11): #看,第一行特殊,其余翻页仅与数字有关\n# \t\tif num==1:\n# \t\t\turl='http://www.shuaia.net/index.html'\n# \t\telse:\n# \t\t\turl='http://www.shuaia.net/index_%d.html' %num\n# \t\theaders={\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n# }\n# \t\treq=requests.get(url,headers=headers)\n# \t\treq.encoding='utf-8'\n# \t\thtml=req.text\n\n# \t\tsoup=BeautifulSoup(html,'lxml')\n# \t\ttar_url=soup.find_all(class_='item-img')\n# \t\tfor each in tar_url:\n# \t\t\tlist_url.append(each.img.get('alt')+'='+each.get('href'))\n\t#print(list_url)#到此结束,以下几行为恶搞\n\t\t\t# for each_img in list_url:\n\t\t\t# \t\t\t\timg_info=each_img.split('=')\n\t\t\t# \t\t\t\tfilename=img_info[0]+'.jpg'\n\t\t\t# \t\t\tprint(filename)\n\n#part3,爬取进入图片链接后的高清大图片\n# from urllib.request import urlretrieve\n# from bs4 import BeautifulSoup\n# import requests\n# import os,time\n# target_url = 'http://www.shuaia.net/mote/2017-09-01/14900.html'\n# filename = '张根硕拍摄机车型男写真帅气十足' + '.jpg'\n# headers = {\n# \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n# }\n# img_req = requests.get(url = target_url,headers = headers)\n# img_req.encoding = 'utf-8'\n# img_html = img_req.text\n# img_bf_1 = BeautifulSoup(img_html, 'lxml')\n# img_url = img_bf_1.find_all('div', class_='wr-single-content-list')\n# img_bf_2 = BeautifulSoup(str(img_url), 'lxml')\n# top='http://www.shuaia.net'\n# img_url2 = top + img_bf_2.div.img.get('src')\n# if 'images' not in os.listdir():\n# os.makedirs('images')\n# urlretrieve(url = img_url2,filename = 'images/' + filename)\n# print('下载完成!')\n\n#part4,完整版代码\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlretrieve\nimport requests,os,time\n\ntime_start=time.time()#定义开始运行时的时间\nif __name__=='__main__':\n\t\n\tlist_url=[]\n\tfor num in range(1,3):#就翻4页\n\t\tif num ==1:\n\t\t\turl='http://www.shuaia.net/index.html'\n\t\telse:\n\t\t\turl='http://www.shuaia.net/index_%d.html' %num\t\n\t\theaders={\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"}\n\n\t\t#这里的部分格式与我之前的风格略有一丢丢不同,在3,4行处\n\t\treq=requests.get(url=url,headers=headers)\n\t\treq.encoding='utf-8'\n\t\thtml=req.text\n\t\tsoup=BeautifulSoup(html,'lxml')\n\t\ttargets_url=soup.find_all(class_='item-img')\n\t\t#这里是part1的结果,获取了图片[0]名字和对应进入的链接[1]\n\t\tfor each in targets_url:\n\t\t\tlist_url.append(each.img.get('alt')+'='+each.get('href'))\n\n\tprint('连接采集完成')\n\t#进入到part2,3,翻页已经实现\n\tfor each_img in list_url:\n\t\timg_info=each_img.split('=')\n\t\tfilename=img_info[0]+'.jpg'\n\t\ttarget_url=img_info[1]\n\t\tprint('下载:'+filename)\n\t\t#进入每一页图片的链接,get 优化等\n\t\theaders={\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"}\n\t\timg_req=requests.get(url=target_url,headers=headers)\n\t\timg_req.encoding='utf-8'\n\t\thtml2=img_req.text\n\t\tsoup2=BeautifulSoup(html2,'lxml')\n\t\t#找到对应图片的标签部分,再次优化下,\n\t\timg_url=soup2.find_all('div',class_='wr-single-content-list')\n\t\tsoup3=BeautifulSoup(str(img_url),'lxml')\n\t\t#找到���下载的图片发现缺失头部,补充\n\t\ttop='http://www.shuaia.net'\n\t\timg_url2=top+soup3.div.img.get('src')\n\t\t#如果在运行的磁盘下不存在这个文件夹,则新建\n\t\tif 'pmages' not in os.listdir():\n\t\t\tos.makedirs('pmages')\n\t\t#urlretrieve() 方法直接将远程数据下载到本地。\n\t\t#url为下载的链接,此出链接为图片,filename指定保存路径\n\t\turlretrieve(url=img_url2,filename='pmages/'+filename)\n\t\ttime.sleep(1)#设置休眠1s时间,防封\n\tprint('下载完成')\ntime_end=time.time()\ntime_spend=(time_end)-(time_start)\nprint(time_spend)\n\n#128行遇到中文格式时会报错,可用二进制写入图片:\n# with open(filename,'wb')as f:\n# response=requests.get(url=imgurl)\n# for aaa in response.iter_content(1024):\n# if aaa:\n# f.write(aaa)\n#\t\t\t f.close() ","sub_path":"爬取帅哥图片(新方法-sublime 上运行.py","file_name":"爬取帅哥图片(新方法-sublime 上运行.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"559912536","text":"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom updates.views import(\njson_example_view,\nSerializeDetailView,\nSerializeListView) \nurlpatterns = [\n \n url(r'^admin/', admin.site.urls),\n url(r'^api/status/', include('statuses.api.urls')),\n \n]\n","sub_path":"cfeapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"555494985","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 10 15:08:14 2020\r\n\r\n@author: leoring_le@hotmail.com\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.optimize import leastsq\r\nimport pylab as pl\r\n\r\ndef func(x, p):\r\n \"\"\"\r\n 数据拟合所用的函数: a * np.power(x, b)\r\n \"\"\"\r\n a, b = p\r\n return a * np.power(x, b)\r\n\r\ndef residuals(p, y, x):\r\n \"\"\"\r\n 实验数据x, y和拟合函数之间的差,p为拟合需要找到的系数\r\n \"\"\"\r\n return y - func(x, p)\r\n\r\ndef SSE(p,y,x):\r\n sse = 0\r\n \r\n xlen = len(x)\r\n for i in range(xlen):\r\n sse += (y[i] - func(x[i],p))*(y[i] - func(x[i],p))\r\n return sse\r\n \r\ndef SST(y):\r\n sst = 0\r\n \r\n average = np.mean(y)\r\n for item in y:\r\n sst += (item - average) * (item - average)\r\n return sst \r\n\r\ndef main():\r\n \r\n x= [1,2,3,4,\r\n 5,6,7,8,9,\r\n 10,11,12,13,\r\n 14,15,16,17,\r\n 18,19,20]\r\n\r\n y= [27,26,393,1118,1309,3806,\r\n 2077,3248,4148,4812,5019,\r\n 4562,5173,5072,3971,5328,\r\n 4833,4214,3916,4008]\r\n\r\n x = np.array(x)\r\n y = np.array(y) \r\n\r\n p0 = [1, 1] # 第一次猜测的函数拟合参数\r\n\r\n # 调用leastsq进行数据拟合\r\n # residuals为计算误差的函数\r\n # p0为拟合参数的初始值\r\n # args为需要拟合的实验数据\r\n plsq = leastsq(residuals, p0, args=(y, x))\r\n\r\n print (u\"拟合参数\", plsq[0]) # 实验数据拟合后的参数\r\n print(u\"R方\", 1-SSE(plsq[0],y,x)/SST(y))\r\n\r\n pl.plot(x, y, label= \"Data with Noise\")\r\n pl.plot(x, func(x, plsq[0]), label= \"Fitted Value\")\r\n pl.legend()\r\n pl.show()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"exopnentEstimation.py","file_name":"exopnentEstimation.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"185519805","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\nimport re\nfrom collections import defaultdict\nfrom itertools import combinations\nimport argparse\n\nfrom Bio.Seq import Seq\nfrom Bio import SeqIO\n\nfrom common import (Prsm, GeneMatch, Interval, parse_msalign_output,\n gene_match_serialize, MakeSet, Find, Union)\n\n\ndef assign_intervals(records):\n CONV_SHIFT = 1\n for rec in records:\n meta = rec.prot_name.split(\"::\")[1]\n direction, shift_len, genome_pos = meta.split(\"_\")\n\n if direction == \"fwd\":\n genomic_start = int(genome_pos) + (rec.first_res - 1) * 3\n genomic_end = int(genome_pos) + (rec.last_res - 1) * 3\n elif direction == \"rev\":\n genomic_start = int(genome_pos) - (rec.last_res - 1) * 3 + 1 #why +1??\n genomic_end = int(genome_pos) - (rec.first_res - 1) * 3 + 1 #why +1??\n\n assert genomic_end >= genomic_start\n\n rec.interval = Interval(genomic_start + CONV_SHIFT,\n genomic_end + CONV_SHIFT,\n 1 if direction == \"fwd\" else -1)\n\n\ndef assign_genome_seqs(records, genome_file):\n FLANK_LEN = 20\n genome = get_fasta(genome_file)\n\n for record in records:\n if record.interval is None:\n continue\n\n seq_name = record.prot_name.split(\"::\")[0]\n flank_start = (record.interval.start - 1) - (FLANK_LEN * 3)\n flank_end = (record.interval.end - 1) + (FLANK_LEN * 3)\n genome_seq = genome[seq_name].seq[flank_start:flank_end]\n\n if record.interval.strand < 0:\n genome_seq = genome_seq.reverse_complement()\n\n translated = str(genome_seq.translate())\n translated = \".\".join([translated[0:FLANK_LEN].lower(),\n translated[FLANK_LEN:-FLANK_LEN+1],\n translated[-FLANK_LEN+1:].lower()])\n record.genome_seq = translated\n\n\ndef assign_families(records):\n sets = {r.prsm_id : MakeSet(r) for r in records}\n for rec_1, rec_2 in combinations(records, 2):\n int_1 = rec_1.interval\n int_2 = rec_2.interval\n #test for overlapping\n if ((int_1.start <= int_2.start and int_2.end <= int_1.end) or\n (int_2.start <= int_1.start and int_1.end <= int_2.end)):\n Union(sets[rec_1.prsm_id], sets[rec_2.prsm_id])\n\n by_family = defaultdict(list)\n for s in sets.values():\n by_family[Find(s)].append(s.data)\n\n for fam_id, prsms in enumerate(by_family.values()):\n for prsm in prsms:\n prsm.family = fam_id\n\n\ndef filter_evalue(records, e_value):\n return list(filter(lambda r: r.e_value < e_value, records))\n\n\ndef filter_spectras(records):\n groups = defaultdict(list)\n for rec in records:\n groups[rec.spec_id].append(rec)\n\n to_keep = set()\n for group in groups.itervalues():\n by_eval = sorted(group, key=lambda r: r.e_value)\n to_keep.add(by_eval[0])\n\n return [r for r in records if r in to_keep]\n\n\ndef get_fasta(filename):\n return {r.id : r for r in SeqIO.parse(filename, \"fasta\")}\n\n\ndef get_matches(table_file, genome_file, e_value):\n prsms = parse_msalign_output(table_file)\n prsms = filter_spectras(prsms)\n trusted_prsms = filter_evalue(prsms, e_value)\n\n assign_intervals(prsms)\n assign_genome_seqs(prsms, genome_file)\n assign_families(trusted_prsms)\n\n matches = []\n for p in prsms:\n matches.append(GeneMatch(p.family, p.prsm_id, p.spec_id, p.p_value,\n p.e_value, p.interval.start,\n p.interval.end, p.interval.strand,\n p.peptide, p.genome_seq, p.html))\n\n return matches\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Processing MSAlign genome run\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"msalign_output\", metavar=\"msalign_output\",\n help=\"path to result_table.txt\")\n parser.add_argument(\"genome_fasta\", metavar=\"genome_fasta\",\n help=\"path to genome file in FASTA format\")\n parser.add_argument(\"-f\", \"--family\", action=\"store_const\",\n dest=\"family\", default=False, const=True,\n help=\"group by families\")\n parser.add_argument(\"-e\", \"--eval\", dest=\"e_value\",\n help=\"custom e-value threshold\",\n default=\"0.01\")\n\n args = parser.parse_args()\n\n gene_match = get_matches(args.msalign_output, args.genome_fasta,\n float(args.e_value))\n gene_match_serialize(gene_match, sys.stdout, args.family)\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"process_genome.py","file_name":"process_genome.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"481889581","text":"\nimport torch \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom PIL import Image, ImageSequence\nimport dlib\n\nfrom viewport_matrix import get_V\nfrom perspective_projection_matrix import get_perspective, get_P\nfrom morphable_model import get_face_point_cloud, read_pca_model, U, random_face_point_cloud\nimport matplotlib.pyplot as plt\n\ndef rotation_matrix(w, is_numpy=False):\n if is_numpy:\n w = torch.from_numpy(w)\n\n theta1, theta2, theta3 = w[0], w[1], w[2]\n \n zero = theta1.detach()*0\n one = zero.clone()+1\n \n cosx, sinx, cosy, siny, cosz, sinz = theta1.cos(), theta1.sin(), theta2.cos(), theta2.sin(), theta3.cos(), theta3.sin()\n \n r_x = torch.stack([one, zero, zero,\n zero, cosx, sinx,\n zero, -sinx, cosx]).view( 3, 3)\n \n r_y = torch.stack([cosy, zero, -siny,\n zero, one, zero,\n siny, zero, cosy]).view( 3, 3)\n \n r_z = torch.stack([cosz, -sinz, zero,\n sinz, cosz, zero,\n zero, zero, one]).view( 3, 3)\n \n R = r_x @ r_y @ r_z\n \n if is_numpy:\n R = R.numpy()\n return R\n \ndef get_P(n, f, t, b, is_numpy = False):\n if is_numpy:\n return np.array([[(2 * n) / (t-b), 0, 0, 0],\n [0, (2 * n) / (t - b), 0, 0],\n [0, 0, -(f + n) / (f - n), -(2 * f * n) / (f - n)],\n [0, 0, -1, 0]])\n else:\n return torch.Tensor([[(2 * n) / (t-b), 0, 0, 0],\n [0, (2 * n) / (t - b), 0, 0],\n [0, 0, -(f + n) / (f - n), -(2 * f * n) / (f - n)],\n [0, 0, -1, 0]])\n\n\n\ndef normalise(landmarks, is_ground = False, values =None):\n \n max_x = torch.max(landmarks[:,0].detach())\n max_y = torch.max(landmarks[:,1].detach())\n min_x = torch.min(landmarks[:,0].detach())\n min_y = torch.min(landmarks[:,1].detach())\n\n \n scale=torch.sqrt((max_x-min_x).pow(2) + (max_y-min_y).pow(2))\n \n if values!=None:\n length, min_x, min_y = values\n landmarks[:,0] = (landmarks[:,0] - min_x)/scale \n landmarks[:,1] = (landmarks[:,1] - min_y)/scale\n if is_ground:\n return landmarks, [scale, min_x, min_y]\n return landmarks\n\n\n\ndef denormalise(estimated_landmarks, target_landmarks, is_numpy = False):\n if is_numpy:\n estimated_landmarks, target_landmarks = torch.from_numpy(estimated_landmarks) , torch.form_numpy(target_landmarks)\n landmarks, values = normalise(target_landmarks, is_ground = True)\n \n estimated_landmarks = normalise(estimated_landmarks)\n estimated_landmarks[:,0] = estimated_landmarks[:,0]*values[0]+values[1]\n estimated_landmarks[:,1] = estimated_landmarks[:,1]*values[0]+values[2]\n estimated_landmarks = estimated_landmarks.detach().numpy()\n \n return estimated_landmarks\n\n\ndef get_face_point_cloud_torch(p, alpha, delta): \n \"\"\"\n Get face point cloud for given alpha and delta.\n\n :param p: PCA model received with read_pca_model()\n :param alpha: size 30\n :param delta: size 20\n :return: 3D point cloud of size [num_points x 3]\n \"\"\"\n G_id = torch.from_numpy(p[\"mu_id\"]) + torch.from_numpy(p[\"E_id\"]) @ ( torch.from_numpy(p[\"sigma_id\"]) * alpha)\n G_ex = torch.from_numpy(p[\"mu_ex\"]) + torch.from_numpy(p[\"E_ex\"]) @ ( torch.from_numpy(p[\"sigma_ex\"]) * delta)\n return (G_id+G_ex).view((-1, 3))\n\n\n\n\n\ndef facial_landmarks_torch(alpha, delta, w, t):\n \"\"\"\n Construct facial landmarks from facial geometry latent parameters alpha, delta and object transformation w, t.\n\n :param alpha: array, 30dim\n :param delta: array, 20dim\n :param w: rotation angles around x,y, z. Given as list [theta_x, theta_y, theta_z].\n :param t: translation in x,y,z space. Given as list [translation_x, translation_y, translation_z]\n :return:\n \"\"\"\n landmarks_idx = np.loadtxt(\"Landmarks68_model2017-1_face12_nomouth.anl\", dtype=int)\n\n pca = read_pca_model()\n G = get_face_point_cloud_torch(pca, alpha, delta)[landmarks_idx].t()\n G_h = [G , torch.ones(G.shape[1]).view((1, -1))]\n G_h = torch.cat(G_h, dim=0)\n \n # get T matrix\n T = torch.eye(4)\n T[:3, :3] = rotation_matrix(w)#rotation_tensor(w, 1)#get_rotation_matrix_torch(w) #torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])#\n T[:3, 3] = t\n \n # Get V and P matrices\n W = 172\n H = 162\n\n image_aspect_ratio = W / H\n angle = 10\n near = .1\n far = 10\n\n right, left, top, bottom = get_perspective(image_aspect_ratio, angle, near, far)\n \n V = get_V(right, left, top, bottom)\n\n \n [V] = list(map(torch.from_numpy, [V]))\n V = V.to(dtype = torch.float32)\n n,f, t, b = near, far, top, bottom\n P = torch.Tensor([[(2 * n) / (t-b), 0, 0, 0],\n [0, (2 * n) / (t - b), 0, 0],\n [0, 0, -(f + n) / (f - n), -(2 * f * n) / (f - n)],\n [0, 0, -1, 0]])\n i = V @ P @ T @ G_h\n\n # homo to cartesian\n i = i/i[3,:].clone()\n\n # two-dimensional\n return i[:2, :].t()\n\n\n\ndef get_final_landmarks(alpha, delta, w, t, target_landmarks):\n estimated_landmarks = facial_landmarks_torch(alpha, delta, w, t)\n estimated_landmarks = denormalise(estimated_landmarks, target_landmarks)\n return estimated_landmarks","sub_path":"Assignment3/.ipynb_checkpoints/changed_functions-checkpoint.py","file_name":"changed_functions-checkpoint.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"648336351","text":"# coding:utf-8\r\nfrom flask import make_response\r\nimport base64\r\n\r\nimport openFlashChart\r\nfrom openFlashChart_varieties import Bar_3d,Line\r\nfrom openFlashChart_varieties import bar_3d_value,dot_value,x_axis_labels,x_axis_label\r\n\r\ncolorArr = ['#66b032','#d0ea2b','#fefe33','#fabc02','#fb9902',\r\n\t\t\t'#fd5308','#fe2712','#a7194b','#8601af','#3d01a4',\r\n\t\t\t'#4b4bfc','#0247fe']\r\n\r\n#导出flash图片(base64格式)\r\nclass flash_pic():\r\n\tdef bar_3d(self,search_result,y_legend,column_text):\r\n\t\t#定义chart\r\n\t\tchart = openFlashChart.template(u'柱状图')\r\n\t\tchart.set_y_legend(y_legend, style = '{font-size: 12px}')#设置y轴\r\n\t\t#横坐标\r\n\t\tx_labels = []\r\n\t\tfor row in search_result:\r\n\t\t\tx_labels.append(row[0])\r\n\t\tchart.set_x_axis(colour = '#736AFF', three_d = 5, labels = x_axis_labels(labels = x_labels))\r\n\r\n\t\trow_num = len(search_result)#共多少条记录\r\n\t\tcolumn_num = len(search_result[0])#每条记录有几列\r\n\t\t#取值\r\n\t\tmymax = 0\r\n\t\t\r\n\t\tfor column_i in range(1,column_num):\r\n\t\t\tplot = Bar_3d()\r\n\t\t\tvalues = []\r\n\t\t\tfor row in search_result:\r\n\t\t\t\tif int(row[column_i]) > mymax:\r\n\t\t\t\t\tmymax = int(row[column_i])\r\n\t\t\t\tvalues.append(bar_3d_value(int(row[column_i]), colorArr[column_i-1]))\r\n\t\t\tplot.set_values(values=values)\r\n\t\t\tplot.set_colour(colorArr[column_i-1])\r\n\t\t\tplot['on-show'] = dict([['type','pop-up'],['cascade',1],['delay',0.5]])#api中没有on-show 手动加上\r\n\t\t\tplot['text'] = column_text[column_i-1]\r\n\t\t\tchart.add_element(plot)\r\n\t\t\tvalues = []\r\n\t\tchart.set_y_axis(min = 0, max = mymax)\r\n\t\treturn chart.encode()\r\n\r\n\tdef line_hollow(self,search_result,y_legend,line_text):\r\n\t\t#定义chart\r\n\t\tchart = openFlashChart.template(u'折线图')\r\n\t\tchart.set_y_legend(y_legend, style = '{font-size: 12px}')#设置y轴\r\n\t\t#横坐标\r\n\t\tx_labels = []\r\n\t\tfor row in search_result:\r\n\t\t\tx_labels.append(row[0])\r\n\t\tchart.set_x_axis(colour = '#736AFF', three_d = 5, labels = x_axis_labels(labels = x_labels))\r\n\r\n\t\trow_num = len(search_result)#共多少条记录\r\n\t\tcolumn_num = len(search_result[0])#每条记录有几列\r\n\t\t#取值\r\n\t\tmymax = 0\r\n\t\t\r\n\t\tfor column_i in range(1,column_num):\r\n\t\t\tplot = Line()\r\n\t\t\tvalues = []\r\n\t\t\tfor row in search_result:\r\n\t\t\t\tif int(row[column_i]) > mymax:\r\n\t\t\t\t\tmymax = int(row[column_i])\r\n\t\t\t\tvalues.append(int(row[column_i]))\r\n\t\t\tplot.set_values(values=values)\r\n\t\t\tplot.set_colour(colorArr[column_i-1])\r\n\t\t\tplot['on-show'] = dict([['type','shrink-in'],['cascade',1],['delay',0.5]])#api中没有on-show 手动加上\r\n\t\t\tplot['dot-style'] = dict([['type','star'],['colour','#a44a80'],['dot-size',5]])#api中没有dot-style 手动加上\r\n\t\t\tplot['text'] = line_text[column_i-1]\r\n\t\t\tchart.add_element(plot)\r\n\t\t\tvalues = []\r\n\t\tchart.set_y_axis(min = 0, max = mymax)\r\n\t\treturn chart.encode()\r\n\r\n\tdef export(self, imageData,filename):\r\n\t\timgData = base64.b64decode(imageData)\r\n\t\t#return send_file(io.BytesIO(imgData))\r\n\t\tresponse = make_response(imgData)\r\n\t\tresponse.headers['Content-Type'] = 'image/jpeg'\r\n\t\tresponse.headers['Content-Disposition'] = 'attachment; filename='+filename+'.jpg'\r\n\t\treturn response","sub_path":"scapp/tools/flash_pic.py","file_name":"flash_pic.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"375446661","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 08 21:25:40 2017\n\n@author: Chinmay Rawool\n\"\"\"\n\n#Neural Network\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n\ndef cost_function(z):\n e = math.e\n m,n = z.shape\n \n a = np.array([[1]], dtype='float')\n for i in range(0,m):\n z[i]\n value = 1/(1+e**z[i])\n print(value)\n \n a = np.concatenate((a,[value]),axis=0)\n \n cost = a\n cost\n \n return cost\n\ndef ddx_cost(z):\n return z * (1-z)\n\ndef forward_propagation(x,y,theta_1,theta_2):\n\n one = np.array([[1]],dtype='float')\n #array([[1, 2, 3]]).T\n inputMat = x.transpose()\n inputMat\n \n y = y.transpose()\n \n #Add bias unit to a1\n a1 = np.concatenate((one,inputMat),axis=0)\n a1\n #Input layer: x0,x1,x2\n z2 = np.matmul(theta_1,a1)\n z2\n \n a2 = cost_function(z2)\n a2\n \n z3 = np.matmul(theta_2,a2)\n z3\n \n a3 = cost_function(z3)\n a3\n \n a3 = np.delete(a3,0,0)\n a3\n \n #Total Cost \n Total = 0.5*((y[0]-a3[0])**2 + (y[1]-a3[1])**2)\n Total\n \n return a1,a2,a3,Total\n\ndef back_propagation(a1,a2,a3,theta_1,theta_2,y):\n alpha =0.5\n \n error = a3 - y.T\n #delta 3\n d3 = error * ddx_cost(a3)\n d3\n \n m,n = a2.shape\n a2_row = np.reshape(a2,(n,m))\n a2_row.shape\n new_theta_2 = np.matmul(d3,a2_row) * alpha\n new_theta_2\n \n #(a3-y) * (a3*(1-a3)) * theta_2(:,1) *(a2*(1-a2))\n t21 = theta_2[:,1]\n t21.shape\n t21 = np.reshape(t21,(2,1))\n t21\n \n t22 = theta_2[:,2]\n t22.shape\n t22 = np.reshape(t22,(2,1))\n t22\n \n der_a21 = ddx_cost(a2[1])\n der_a22 = ddx_cost(a2[2])\n \n d21 = error * ddx_cost(a3) * t21 * der_a21\n d22 = error * ddx_cost(a3) * t22 * der_a22\n \n #delta 2\n d2 = np.zeros((2,1),dtype='float')\n d2[0,0] = d21.sum(axis=0)\n d2[1,0] = d22.sum(axis=0)\n d2\n \n m,n = a1.shape\n a1_row = np.reshape(a1,(n,m))\n a1_row.shape\n new_theta_1 = np.matmul(d2,a1_row) * alpha\n new_theta_1\n \n new_theta_1 = theta_1 + new_theta_1\n new_theta_2 = theta_2 + new_theta_2\n \n return new_theta_1,new_theta_2,d3,d2\n\n\ndef main():\n\n # ----------------------------------------------------------------\n # Q.2. Neural Networks Implementation\n # ----------------------------------------------------------------\n \n no_of_input_nodes = 2\n no_of_hidden_nodes = 2\n no_of_output_nodes = 2\n \n #Initialize the input and the ouput\n x = np.array([[0.05,0.1]],dtype='float')\n y = np.array([[0.01,0.99]],dtype='float')\n \n np.random.seed(10)\n #Size of theta array: (s_j+1,(s_j + 1)) = 2x3\n #Theta 1: size(2,3) of uniformly distributed random values between 0 and 1\n theta_1 = np.random.uniform(low=0.0,high=1.0,size=(no_of_hidden_nodes,no_of_input_nodes+1))\n theta_1\n #Theta 2: size(2,3) of uniformly distributed random values between 0 and 1\n theta_2 = np.random.uniform(low=0.0,high=1.0,size=(no_of_output_nodes,no_of_hidden_nodes+1))\n theta_2\n \n total_cost = np.zeros((40001,2),dtype='float')\n theta1_value = np.zeros((40001,7),dtype='float')\n theta2_value = np.zeros((40001,7),dtype='float')\n \n for i in range(40001):\n #Forward Propagation\n a1,a2,a3,Total = forward_propagation(x,y,theta_1,theta_2)\n #Total Cost per iteration\n total_cost[i,0]=i+1\n total_cost[i,1]=Total\n #Theta 1 parameters \n theta1_value[i,0]=i+1\n theta1_value[i,1]=theta_1[0,0]\n theta1_value[i,2]=theta_1[0,1]\n theta1_value[i,3]=theta_1[0,2]\n theta1_value[i,4]=theta_1[1,0]\n theta1_value[i,5]=theta_1[1,1]\n theta1_value[i,6]=theta_1[1,2]\n #Theta 2 parameters\n theta2_value[i,0]=i+1\n theta2_value[i,1]=theta_2[0,0]\n theta2_value[i,2]=theta_2[0,1]\n theta2_value[i,3]=theta_2[0,2]\n theta2_value[i,4]=theta_2[1,0]\n theta2_value[i,5]=theta_2[1,1]\n theta2_value[i,6]=theta_2[1,2]\n \n #Backward Propagation\n new_t1, new_t2, delta_3, delta_2 = back_propagation(a1,a2,a3,theta_1,theta_2,y)\n #Update Theta values for next iteration\n theta_1 = new_t1\n theta_2 = new_t2\n \n #Plot\n #Plot total cost vs iterations\n fig = plt.figure(figsize=(8, 6))\n ax = fig.add_subplot(1, 1, 1)\n ax.scatter(total_cost[:,0], total_cost[:,1], color='red',s=1, label='Total Cost vs iterations') \n ax.legend()\n fig.show() \n \n #Plot theta1 parameters vs iterations\n fig = plt.figure(figsize=(16,10))\n ax = fig.add_subplot(2, 3, 1)\n ax.scatter(theta1_value[:,0], theta1_value[:,1], color='black',s=1, label='Theta1_10 vs iterations') \n ax.legend()\n \n ax = fig.add_subplot(2, 3, 2)\n ax.scatter(theta1_value[:,0], theta1_value[:,2], color='black',s=1, label='Theta1_11 vs iterations') \n ax.legend()\n \n ax = fig.add_subplot(2, 3, 3)\n ax.scatter(theta1_value[:,0], theta1_value[:,3], color='black',s=1, label='Theta1_12 vs iterations') \n ax.legend()\n \n ax = fig.add_subplot(2, 3, 4)\n ax.scatter(theta1_value[:,0], theta1_value[:,4], color='black',s=1, label='Theta1_20 vs iterations') \n ax.legend()\n ax = fig.add_subplot(2, 3, 5)\n ax.scatter(theta1_value[:,0], theta1_value[:,5], color='black',s=1, label='Theta1_21 vs iterations') \n ax.legend()\n ax = fig.add_subplot(2, 3, 6)\n ax.scatter(theta1_value[:,0], theta1_value[:,6], color='black',s=1, label='Theta1_22 vs iterations') \n ax.legend()\n fig.show() \n \n \n #Plot Theta2 parameters vs iterations \n fig = plt.figure(figsize=(16,9))\n ax = fig.add_subplot(2, 3, 1)\n ax.scatter(theta2_value[:,0], theta2_value[:,1], color='black',s=1, label='Theta2_10 vs iterations') \n ax.legend()\n \n ax = fig.add_subplot(2, 3, 2)\n ax.scatter(theta2_value[:,0], theta2_value[:,2], color='black',s=1, label='Theta2_11 vs iterations') \n ax.legend()\n \n ax = fig.add_subplot(2, 3, 3)\n ax.scatter(theta2_value[:,0], theta2_value[:,3], color='black',s=1, label='Theta2_12 vs iterations') \n ax.legend()\n \n ax = fig.add_subplot(2, 3, 4)\n ax.scatter(theta2_value[:,0], theta2_value[:,4], color='black',s=1, label='Theta2_20 vs iterations') \n ax.legend()\n ax = fig.add_subplot(2, 3, 5)\n ax.scatter(theta2_value[:,0], theta2_value[:,5], color='black',s=1, label='Theta2_21 vs iterations') \n ax.legend()\n ax = fig.add_subplot(2, 3, 6)\n ax.scatter(theta2_value[:,0], theta2_value[:,6], color='black',s=1, label='Theta2_22 vs iterations') \n ax.legend()\n fig.show() \n\n \nif __name__ == '__main__':\n main() \n \n \n \n ","sub_path":"Neural Networks/neural_networks.py","file_name":"neural_networks.py","file_ext":"py","file_size_in_byte":6687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"87990320","text":"class Solution(object):\n def maxNumberOfBalloons(self, text):\n \"\"\"\n :type text: str\n :rtype: int\n \"\"\"\n mp = {}\n for c in text:\n if not c in mp: mp[c] = 0\n mp[c] += 1\n res = 1000000\n for c,n in [['b',1],['a',1],['l',2],['o',2],['n',1]]:\n if not c in mp:\n res = 0\n else:\n res = min(res,mp[c]//n)\n return res\n","sub_path":"154/5189.py","file_name":"5189.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"308273915","text":"import json\nimport urllib\nfrom urllib.request import Request\n\nfrom django.http import HttpResponse, JsonResponse, StreamingHttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.template.context_processors import csrf\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom main_app import config\nfrom main_app.file_utils import file_iterator, check_file_exist\n\nconfig.job_status = config.JOB_STATUS_STOP\nconfig.upload_success = False\n\n\ndef index(request):\n # if config.job_status == config.JOB_STATUS_RUNNING:\n # return HttpResponseRedirect('/job/result')\n # if config.job_status == config.JOB_STATUS_SUCCESS:\n # return HttpResponseRedirect('/job/result')\n # return HttpResponseRedirect('/upload_file')\n return render(request, 'index.html', {'exit_status': config.job_status})\n\n\ndef upload_file(request):\n if request.method == 'GET':\n error = request.GET.get('error', '')\n return render(request, 'upload.html', {'error': error, 'exit_status': config.job_status})\n elif request.method == 'POST':\n files = request.FILES.getlist(config.FILENAME)\n count = 0\n for current_file in files:\n for filename in config.UPLOAD_FILENAMES:\n if current_file.name == filename:\n count += 1\n try:\n print(config.UPLOAD_DIR + current_file.name)\n destination = open(config.UPLOAD_DIR + current_file.name, 'wb')\n for chunk in current_file.chunks():\n destination.write(chunk)\n destination.close()\n except Exception as e:\n print(\"ERROR while writing files! \" + str(e))\n return HttpResponseRedirect('/upload_file?error=' + 'ERROR while writing files! ' + str(e))\n if count < config.UPLOAD_FILENAMES.__len__():\n return HttpResponseRedirect('/upload_file?error=Some files missed')\n config.upload_success = True\n return HttpResponseRedirect('/job/startPage')\n\n\ndef download_file(request):\n download_filename = config.DOWNLOAD_DIR + config.DOWNLOAD_FILENAME\n\n if check_file_exist(download_filename):\n return HttpResponse(\"no file\")\n\n response = StreamingHttpResponse(file_iterator(download_filename))\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(config.DOWNLOAD_FILENAME)\n return response\n\n\ndef start_page(request):\n error = request.GET.get('error', '')\n return render(request, 'start.html', {'upload_success': config.upload_success, 'error': error, 'exit_status': config.job_status})\n\n\ndef start_jop(request):\n start_jop_req = Request(url=config.JOB_START_URL, method='POST')\n raw_response = urllib.request.urlopen(start_jop_req).read()\n str_response = raw_response.decode('utf-8')\n json_response = json.loads(str_response)\n if json_response['status'] == config.RESPONSE_STATUS_ERROR:\n return HttpResponseRedirect('/job/startPage?error=Start job failed!')\n else:\n config.upload_success = False\n config.job_status = config.JOB_STATUS_RUNNING\n return HttpResponseRedirect('/job/result')\n\n\ndef stop_jop(request):\n stop_jop_req = Request(url=config.JOB_STOP_URL, method='POST')\n raw_response = urllib.request.urlopen(stop_jop_req).read()\n str_response = raw_response.decode('utf-8')\n json_response = json.loads(str_response)\n if json_response['status'] == config.RESPONSE_STATUS_ERROR:\n return HttpResponseRedirect('/job/result?error=Stop failed!')\n else:\n config.job_status = config.JOB_STATUS_STOP\n return HttpResponseRedirect('/upload_file')\n\n\n@csrf_exempt\ndef job_callback(request):\n data = json.loads(request.body.decode('utf-8'))\n exit_status = data['exitStatus']\n error = data['error']\n print('job_callback')\n print('exitStatus: ' + str(exit_status) + ', error: ' + error)\n\n if exit_status == 0:\n config.job_status = config.JOB_STATUS_SUCCESS\n else:\n config.job_status = config.JOB_STATUS_FAILED\n return HttpResponse(exit_status)\n\n\ndef result(request):\n error = request.GET.get('error', '')\n return render(request,\n 'result.html',\n {'exit_status': config.job_status, 'error': error})\n\n\n@csrf_exempt\ndef job_run_test(request):\n if request.method == 'POST':\n return JsonResponse({\"msg\": \"\", \"status\": 0})\n else:\n return JsonResponse({'msg': 'method is GET', 'status': 1})\n\n\ndef callback_test(request):\n data = urllib.parse.urlencode({'exitStatus': 0}).encode('utf-8')\n stop_jop_req = Request(url='http://localhost:8000/api/job/callback', method='POST', data=data)\n raw_response = urllib.request.urlopen(stop_jop_req).read()\n return HttpResponse(raw_response)\n","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"484220040","text":"from django.conf.urls import patterns, url\n\nfrom shop.views import ShopListView\nfrom shop.views.product import ProductDetailView\nfrom shop.models.productmodel import Product\n\n\nurlpatterns = patterns('',\n url(r'^$',\n ShopListView.as_view(model=Product),\n name='product_list'\n ),\n url(r'^(?P[0-9A-Za-z-_.//]+)/$',\n ProductDetailView.as_view(),\n name='product_detail'\n ),\n )\n","sub_path":"shop/urls/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"5371306","text":"import math\nfrom functools import lru_cache\n\n@lru_cache(maxsize=None)\ndef factors(n):\n if n == 1:\n return set([1])\n for f in range(2, int(math.sqrt(n))+1):\n if n%f == 0: #f is smallest factor of n\n of = factors(n//f)\n return of.union(set(g*f for g in of))\n else: #prime\n return set([1, n])\n\ndef sum_of_proper_divisors(n):\n return sum(factors(n))-n\n\nabundant_numbers = [n for n in range(12, 28123+1) if sum_of_proper_divisors(n) > n]\n\nabundant_sums = set([an1+an2 for an1 in abundant_numbers for an2 in abundant_numbers if an1+an2 < 28123+1])\n\nprint(sum([n for n in range(1, 28123+1) if n not in abundant_sums]))\n","sub_path":"P0023.py","file_name":"P0023.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"464730082","text":"import os\nimport shutil\nimport subprocess\nimport tempfile\nfrom os.path import join, exists\n\nimport mock\nfrom testify import setup_teardown, TestCase, test_program\nfrom testify.assertions import assert_equal, assert_raises, assert_in\nfrom optparse import OptionParser\n\n\nclass OptionParserErrorException(Exception):\n pass\n\n\nclass ParseTestRunnerCommandLineArgsTest(TestCase):\n @setup_teardown\n def patch_OptionParser_error(self):\n def new_error(*args, **kwargs):\n raise OptionParserErrorException(*args, **kwargs)\n with mock.patch.object(OptionParser, 'error', side_effect=new_error):\n yield\n\n def test__parse_test_runner_command_line_module_method_overrides_empty_input(self):\n \"\"\"Make sure _parse_test_runner_command_line_module_method_overrides\n returns something sensible if you pass it an empty list of arguments.\n \"\"\"\n assert_equal(test_program._parse_test_runner_command_line_module_method_overrides([]), (None, {}))\n\n def test_parse_test_runner_command_line_args_rerun_test_file(self):\n \"\"\"Make sure that when --rerun-test-file is passed,\n parse_test_runner_command_line_args doesn't complain about a missing\n test path.\n \"\"\"\n test_program.parse_test_runner_command_line_args([], ['--rerun-test-file', '-'])\n\n def test_parse_test_runner_command_line_args_connect(self):\n \"\"\"Make sure that when --connect is passed,\n parse_test_runner_command_line_args doesn't complain about a missing\n test path.\n \"\"\"\n test_program.parse_test_runner_command_line_args([], ['--connect', 'localhost:65537'])\n\n def test_parse_test_runner_command_line_args_replay_json_inline(self):\n \"\"\"Make sure that when --replay-json-inline is passed,\n parse_test_runner_command_line_args doesn't complain about a missing\n test path.\n \"\"\"\n test_program.parse_test_runner_command_line_args([], ['--replay-json-inline', '{something that obviously isnt json}'])\n\n def test_parse_test_runner_command_line_args_replay_json(self):\n \"\"\"Make sure that when --replay-json-inline is passed,\n parse_test_runner_command_line_args doesn't complain about a missing\n test path.\n \"\"\"\n test_program.parse_test_runner_command_line_args([], ['--replay-json', 'somejsonfile.txt'])\n\n def test_parse_test_runner_command_line_args_no_test_path(self):\n \"\"\"Make sure that if no options and no arguments are passed,\n parse_test_runner_command_line_args DOES complain about a missing test\n path.\n \"\"\"\n with assert_raises(OptionParserErrorException):\n test_program.parse_test_runner_command_line_args([], [])\n\n\ndef test_call(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n if proc.returncode:\n raise subprocess.CalledProcessError(proc.returncode, command)\n return stdout.strip().decode('UTF-8')\n\n\nclass TestifyRunAcceptanceTestCase(TestCase):\n\n expected_list = (\n 'testing_suite.example_test ExampleTestCase.test_one\\n'\n 'testing_suite.example_test ExampleTestCase.test_two\\n'\n 'testing_suite.example_test SecondTestCase.test_one'\n )\n\n expected_tests = 'PASSED. 3 tests'\n\n def test_help(self):\n output = test_call(['python', '-m', 'testify.test_program', '--help'])\n assert_in('Usage:', output)\n\n def test_run_testify_from_bin_list_tests(self):\n output = test_call(['bin/testify', '--list-tests', 'testing_suite'])\n assert_equal(output, self.expected_list)\n\n def test_run_testify_as_module_list_tests(self):\n output = test_call([\n 'python', '-m', 'testify.test_program',\n '--list-tests', 'testing_suite'])\n assert_equal(output, self.expected_list)\n\n def test_run_testify_from_bin(self):\n output = test_call(['bin/testify', 'testing_suite', '-v'])\n assert_in(self.expected_tests, output)\n\n def test_run_testify_test_module(self):\n output = test_call(['python', '-m', 'testing_suite.example_test', '-v'])\n assert_in(self.expected_tests, output)\n\n def test_run_testify_test_file(self):\n output = test_call(['python', 'testing_suite/example_test.py', '-v'])\n assert_in(self.expected_tests, output)\n\n def test_run_testify_test_file_class(self):\n output = test_call([\n 'python', 'testing_suite/example_test.py', '-v',\n 'ExampleTestCase'])\n assert_in('PASSED. 2 tests', output)\n\n def test_run_testify_test_file_class_and_method(self):\n output = test_call([\n 'python', 'testing_suite/example_test.py', '-v',\n 'ExampleTestCase.test_one'])\n assert_in('PASSED. 1 test', output)\n\n def test_run_testify_with_failure(self):\n assert_raises(\n subprocess.CalledProcessError,\n test_call,\n ['python', 'testing_suite/example_test.py', 'DoesNotExist'])\n\n def test_failure_on_interrupt(self):\n with assert_raises(subprocess.CalledProcessError):\n test_call([\n 'python', '-m', 'testify.test_program',\n 'test.failing_test_interrupt'\n ])\n\n\nclass TestClientServerReturnCode(TestCase):\n def test_client_returns_zero_on_success(self):\n server_process = subprocess.Popen(\n [\n 'python', '-m', 'testify.test_program',\n 'testing_suite.example_test',\n '--serve', '9001',\n ],\n stdout=open(os.devnull, 'w'),\n stderr=open(os.devnull, 'w'),\n )\n # test_call has the side-effect of asserting the return code is 0\n ret = test_call([\n 'python', '-m', 'testify.test_program',\n '--connect', 'localhost:9001',\n ])\n assert_in('PASSED', ret)\n assert_equal(server_process.wait(), 0)\n\n\nclass TestClientScheduling(TestCase):\n @setup_teardown\n def create_temporary_directory(self):\n self.tempdir = tempfile.mkdtemp()\n shutil.copy(\n join('test', 'failing_test_after_signal.py'), self.tempdir,\n )\n try:\n yield\n finally:\n shutil.rmtree(self.tempdir)\n\n def test_client_returns_nonzero_on_failure(self):\n tempdir = self.tempdir\n server_process = subprocess.Popen(\n [\n 'python', '-m', 'testify.test_program',\n 'failing_test_after_signal',\n '--serve', '9001',\n '--server-timeout', '10',\n '-v',\n ],\n stdout=open(os.devnull, 'w'),\n stderr=open(os.devnull, 'w'),\n cwd=tempdir,\n )\n\n # Read from both of the processes until we get some output\n # Then send sigint to that process\n # We're doing this as a synchronization mechanism to guarantee\n # two clients are connected to the server when the test fails.\n # The expected behaviour is that the failed test is not re-run on\n # the same client\n\n class Client(object):\n def __init__(self, number):\n filename = join(tempdir, str(number) + '.stdout')\n self.proc = subprocess.Popen(\n [\n 'python', '-m', 'testify.test_program',\n '--connect', 'localhost:9001',\n '--runner-timeout', '5',\n '-v',\n ],\n stdout=open(filename, 'w'),\n stderr=open(os.devnull, 'w'),\n env=dict(os.environ, client_num=str(number)),\n cwd=tempdir,\n )\n self.number = number\n self.ready = False\n self.outfile = filename\n\n clients = [Client(1), Client(2)]\n while True:\n for client in clients:\n client.ready = exists(join(tempdir, str(client.number)))\n\n if all(client.ready for client in clients):\n # all ready!\n break\n\n # All of our tests are ready, send them 'go!' so they continue\n with open(join(tempdir, 'go!'), 'w'):\n pass\n\n assert_equal(clients[0].proc.wait(), 1)\n assert_equal(clients[1].proc.wait(), 1)\n assert_equal(server_process.wait(), 1)\n\n # The failing test should have been run on both clients\n for client in clients:\n client.output = open(client.outfile).read()\n assert_in('Intentional failure!', client.output)\n\n # The passing test should have been run on just one clients\n assert_equal(set([True, False]), set([\n 'test_pass' in client.output\n for client in clients\n ]))\n\n for client in clients:\n if 'test_pass' in client.output:\n assert_in(\n 'FAILED. 2 tests / 2 cases: 1 passed, 1 failed.',\n client.output\n )\n else:\n assert_in(\n 'FAILED. 1 test / 1 case: 0 passed, 1 failed.',\n client.output,\n )\n\n\nclass Test240Regression(TestCase):\n def _test(self, testname):\n \"\"\"Regression test for #240.\"\"\"\n server_process = subprocess.Popen(\n [\n 'python', '-m', 'testify.test_program',\n testname,\n '--serve', '9001',\n '-v',\n ],\n stdout=open(os.devnull, 'w'),\n stderr=open(os.devnull, 'w'),\n )\n client_1 = subprocess.Popen(\n [\n 'python', '-m', 'testify.test_program',\n '--connect', 'localhost:9001',\n '-v',\n ],\n stdout=open(os.devnull, 'w'),\n stderr=open(os.devnull, 'w'),\n )\n assert_equal(client_1.wait(), 1)\n assert_equal(server_process.wait(), 1)\n\n def test_one_test_one_fail(self):\n self._test('test.failing_test')\n\n def test_lots_of_fails(self):\n \"\"\"Not sure why three fails triggers this behaviour as well...\"\"\"\n self._test('test.lots_of_fail')\n","sub_path":"test/test_program_test.py","file_name":"test_program_test.py","file_ext":"py","file_size_in_byte":10236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"584766019","text":"from django import forms\nfrom django.conf import settings\nfrom django_comments.forms import CommentForm, ContentType\n\nimport datetime\n\nclass BasicCommentForm(CommentForm):\n \"\"\"\n A comment form which matches the default djanago.contrib.comments one, but\n doesn't have a URL field.\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n # first call parent's constructor\n super(BasicCommentForm, self).__init__(*args, **kwargs)\n # there's a `fields` property now\n self.fields.pop('name')\n self.fields.pop('email')\n self.fields.pop('url')\n self.fields.pop('honeypot')\n\n def get_comment_create_data(self, site_id):\n # Use the data of the superclass, and add in the title field\n return dict(\n content_type = ContentType.objects.get_for_model(self.target_object),\n object_pk = self.target_object._get_pk_val(),\n comment = self.cleaned_data[\"comment\"],\n submit_date = datetime.datetime.now(),\n site_id = settings.SITE_ID,\n is_public = True,\n is_removed = False,\n )\n","sub_path":"django/azotobacter/comments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"488355959","text":"from sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session, query, Query\nfrom sqlalchemy import create_engine\nimport datetime\nimport sensitive\n\n\nxkom = 'x-kom'\nalto = 'alto'\nkomputronik = 'komputronik'\nproline = 'proline'\nmorele = 'morele'\nhelion = 'helion'\nmall = 'mall'\nibood = 'ibood'\nonepress = 'one press'\nsensus = 'sensus'\nseptem = 'septem'\nothertees = 'othertees'\n\nxkomURL = 'http://www.x-kom.pl/'\naltoURL = 'http://www.al.to/'\nkomputronikURL = 'http://www.komputronik.pl/'\nprolineURL = 'https://proline.pl/'\nmoreleURL = 'https://www.morele.net/'\nhelionURL = 'http://helion.pl/'\nmallURL = 'https://www.mall.pl/'\niboodURL = 'http://www.ibood.com/pl/pl/'\nonepressURL = 'http://onepress.pl/'\nsensusURL = 'http://sensus.pl/'\nseptemURL = 'http://septem.pl/'\notherteesURL = 'http://www.othertees.com/?lang=pl'\n\ncategoryElectonics = 'electronics'\ncategoryBooks = 'books'\ncategoryOther = 'other'\ncategoryClothes = 'clothes'\n\ncategoryList = [categoryElectonics,categoryBooks,categoryOther]\n\nclass DatabaseManager():\n\n\tdef __init__(self):\n\t\tBase = automap_base()\n\t\tdbAdress = sensitive.dbconnection\n\t\tengine = create_engine(dbAdress,echo=False)\n\n\t\tBase.prepare(engine, reflect=True)\n\n\t\tself.WebPages = Base.classes.web_pages\n\t\tself.WebPagesCategory = Base.classes.web_page_category\n\t\tself.HotShotList = Base.classes.hot_shot_list\n\n\t\tself.session = Session(engine)\n\n\tdef GetAllRecordsFromWebPage(self, webPage):\n\t\twebPage = self.session.query(self.WebPages).filter(self.WebPages.name_web_page == webPage)\n\t\thotShots = self.session.query(self.HotShotList).filter(self.HotShotList.web_page_id == webPage[0].id_web_page)\n\t\treturn hotShots\n\n\tdef AddNewHotShot(self, webPage, productName, oldPrice, newPrice, productURL, imgUrl):\n\t\ttime = datetime.datetime.now()\n\t\twebPage = self.session.query(self.WebPages).filter(self.WebPages.name_web_page == webPage)\n\t\tself.session.add(self.HotShotList(product_name = productName, old_price = oldPrice, new_price = newPrice,\n\t\t\t\t\tweb_page_id = webPage[0].id_web_page, product_url = productURL, img_url = imgUrl, last_check = time))\n\t\tself.session.commit()\n\t\tself.session.flush()\n\n\tdef UpgradeExistingHotShot(self, hotShotId, webPage, productName, oldPrice, newPrice, productURL, imgUrl):\n\t\ttime = datetime.datetime.now()\n\t\twebPage = self.session.query(self.WebPages).filter(self.WebPages.name_web_page == webPage)\n\t\tself.session.merge(self.HotShotList(id_hot_shot = hotShotId, product_name = productName, old_price = oldPrice, new_price = newPrice,\n\t\t\t\t\tweb_page_id = webPage[0].id_web_page, product_url = productURL, img_url = imgUrl, last_check = time))\n\t\tself.session.commit()\n\t\tself.session.flush()\n\n\tdef IfWebPageDoesNotExistCreate(self, pageName,pageURL,isActive,category):\n\t\tq = self.session.query(self.WebPages).filter(self.WebPages.name_web_page == pageName)\n\t\tcategory = self.session.query(self.WebPagesCategory).filter(self.WebPagesCategory.category_type == category)\n\t\tif not q.count() > 0:\n\t\t\tprint(\"adding new web page - \" + pageName)\n\t\t\tself.session.add(self.WebPages(name_web_page = pageName,url_web_page = pageURL, is_active_page = isActive, web_page_category = category[0].idweb_page_category ))\n\t\t\tself.session.commit()\n\t\t\tself.session.flush()\n\n\t\telse:\n\t\t\tprint(\"this web page - \" + pageName + \" - already exists\")\n\n\tdef CreateCategoryIfDoesNotExist(self, category):\n\t\tq = self.session.query(self.WebPagesCategory).filter(self.WebPagesCategory.category_type == category)\n\t\tif not q.count() > 0:\n\t\t\tprint(\"adding new category - \" + category)\n\t\t\tself.session.add(self.WebPagesCategory(category_type = category))\n\t\t\tself.session.commit()\n\t\t\tself.session.flush()\n\n\t\telse:\n\t\t\tprint(\"this category - \" + category + \" - already exists\")\n\n\tdef AddAllCategories(self):\n\t\tfor category in categoryList:\n\t\t\tself.CreateCategoryIfDoesNotExist(category)\n\n\tdef AddAllWebPages(self):\n\t\tself.IfWebPageDoesNotExistCreate(xkom,xkomURL,True,categoryElectonics)\n\t\tself.IfWebPageDoesNotExistCreate(alto, altoURL, True, categoryOther)\n\t\tself.IfWebPageDoesNotExistCreate(morele, moreleURL, True, categoryElectonics)\n\t\tself.IfWebPageDoesNotExistCreate(proline, prolineURL, True, categoryElectonics)\n\t\tself.IfWebPageDoesNotExistCreate(komputronik, komputronikURL, True, categoryElectonics)\n\t\tself.IfWebPageDoesNotExistCreate(mall, mallURL, True, categoryOther)\n\t\tself.IfWebPageDoesNotExistCreate(ibood, iboodURL, True, categoryOther)\n\t\tself.IfWebPageDoesNotExistCreate(onepress, onepressURL, True, categoryBooks)\n\t\tself.IfWebPageDoesNotExistCreate(helion, helionURL, True, categoryBooks)\n\t\tself.IfWebPageDoesNotExistCreate(sensus, sensusURL, True, categoryBooks)\n\t\tself.IfWebPageDoesNotExistCreate(septem, septem, True, categoryBooks)\n\t\tself.IfWebPageDoesNotExistCreate(othertees, otherteesURL, True, categoryClothes)\n","sub_path":"PythonWebPage/MySQL/DatabaseManager.py","file_name":"DatabaseManager.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"595078406","text":"\"\"\"\nThis script uses libraries in arcgis to create connections from\na series of points (buildings) to the closest street\n\"\"\"\n\nimport os\nimport cea.globalvar\nimport cea.inputlocator\nfrom cea.interfaces.arcgis.modules import arcpy\nimport cea.config\nimport os\n\n__author__ = \"Jimeno A. Fonseca\"\n__copyright__ = \"Copyright 2017, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Jimeno A. Fonseca\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"cea@arch.ethz.ch\"\n__status__ = \"Production\"\n\ndef calc_connectivity_network(path_arcgis_db, path_streets_shp, path_connection_point_buildings_shp, path_potential_network):\n \"\"\"\n This script outputs a potential network connecting a series of building points to the closest street network\n the street network is assumed to be a good path to the district heating or cooling network\n\n :param path_arcgis_db: path to default ArcGIS database\n :param path_streets_shp: path to street shapefile\n :param path_connection_point_buildings_shp: path to substations in buildings (or close by)\n :param path_potential_network: output path shapefile\n :return:\n \"\"\"\n # first add distribution network to each building form the roads\n\n arcpy.env.overwriteOutput = True\n spatialReference = arcpy.Describe(path_connection_point_buildings_shp).spatialReference\n memorybuildings = path_arcgis_db + \"\\\\\" + \"points\"\n merge = path_arcgis_db + \"\\\\\" + \"merge\"\n Newlines = path_arcgis_db + \"\\\\\" + \"linesToerase\"\n Finallines = path_arcgis_db + \"\\\\\" + \"final_line\"\n\n arcpy.CopyFeatures_management(path_connection_point_buildings_shp, memorybuildings)\n arcpy.Near_analysis(memorybuildings, path_streets_shp, location=True, angle=True)\n arcpy.MakeXYEventLayer_management(memorybuildings, \"NEAR_X\", \"NEAR_Y\", \"Line_Points_Layer\", spatialReference)\n arcpy.FeatureClassToFeatureClass_conversion(\"Line_Points_Layer\", path_arcgis_db, \"Line_points\")\n arcpy.Append_management(path_arcgis_db + '\\\\' + \"Line_points\", memorybuildings, \"No_Test\")\n arcpy.MakeFeatureLayer_management(memorybuildings, \"POINTS_layer\")\n arcpy.env.workspace = path_arcgis_db\n arcpy.PointsToLine_management(memorybuildings, Newlines, \"Name\", \"#\", \"NO_CLOSE\")\n arcpy.Merge_management([path_streets_shp, Newlines], merge)\n arcpy.FeatureToLine_management(merge, path_potential_network) # necessary to match vertices\n\ndef main(config):\n assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario\n locator = cea.inputlocator.InputLocator(scenario=config.scenario)\n\n path_streets_shp = locator.get_street_network() # shapefile with the stations\n path_connection_point_buildings_shp = locator.get_connection_point() # substation, it can be the centroid of the building\n path_potential_network = locator.get_connectivity_potential() # shapefile, location of output.\n path_default_arcgis_db = os.path.expanduser(os.path.join('~', 'Documents', 'ArcGIS', 'Default.gdb'))\n calc_connectivity_network(path_default_arcgis_db, path_streets_shp, path_connection_point_buildings_shp,\n path_potential_network)\n\nif __name__ == '__main__':\n main(cea.config.Configuration())","sub_path":"cea/technologies/thermal_network/network_layout/connectivity_potential.py","file_name":"connectivity_potential.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"377927146","text":"import numpy as np\nimport gym\nimport random\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\n\n#PART-1\n#Creating environment\nenv = gym.make('Taxi-v2')\nenv.render()\n\n#PART-2\n#Q-table\naction_size = env.action_space.n\nstate_size = env.observation_space.n\n\nprint('action :', action_size, 'state :', state_size)\n\nqtable = np.zeros((state_size, action_size))\n\n#PART-3\n#Hyperparameters\ntotal_episodes = 50000\ntotal_test_episodes = 100\nmax_steps = 99\t\t\t#maximum steps per episode\n\nlearning_rate = 0.7\ngamma = 0.618\t\t\t#discounting rate\n\n#Exploration parameter\nepsilon = 1\nmax_epsilon = 1\nmin_epsilon = 0.01\ndecay_rate = 0.01\n\n#PART-4\n#Q-Learning Algorithm\nfor episode in range(total_episodes):\n\tstate = env.reset()\n\tstep = 0\n\tdone = False\n\t\n\tfor step in range(max_steps):\n\t\texplore_exploit_tradeoff = random.uniform(0, 1)\n\t\t#If this value > epsilon, exploit the already gained knowledge\n\t\tif explore_exploit_tradeoff > epsilon:\n\t\t\taction = np.argmax(qtable[state, :])\n\t\t\n\t\t#Else take random action\n\t\telse:\n\t\t\taction = env.action_space.sample()\n\t\t\n\t\t#Take the action and enter new state and receive reward\n\t\tnew_state, reward, done, info = env.step(action)\n\t\t\n\t\t#Update qtable\n\t\tqtable[state, action] = qtable[state, action] + learning_rate * (reward + gamma * np.max(qtable[new_state, :]) - qtable[state, action])\n\t\tstate = new_state\n\t\t\n\t\tif done == True:\t\t\t#episode finished\n\t\t\tbreak\n\t\n\t#Reduce epsilon\n\tepsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay_rate * episode)\n\n#PART-5\n#Test qtable to play the game\nenv.reset()\nrewards = []\n\nfor episode in range(total_test_episodes):\n\tprint('*******************')\n\tprint('Episode ', episode)\n\tstate = env.reset()\n\tstep = 0\n\tdone = False\n\ttotal_rewards = 0\n\t\n\tfor step in range(max_steps):\n\t\tenv.render()\n\t\taction = np.argmax(qtable[state, :])\n\t\t\n\t\tnew_state, reward, done, info = env.step(action)\n\t\t\n\t\ttotal_rewards += reward\n\t\t\n\t\tstate = new_state\n\t\t\n\t\tif done == True:\n\t\t\trewards.append(total_rewards)\n\t\t\tbreak\n\nenv.close()\nprint('Avg. reward', str(sum(rewards)/total_test_episodes))\n\t\n\n\t\t\n","sub_path":"Taxi_v2.py","file_name":"Taxi_v2.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"456099807","text":"# -*- coding: utf-8 -*-\nimport re\n\n\ndef re_start_end(s: str, k: str) -> None:\n \"\"\"\n >>> re_start_end('aaadaa', 'aa')\n (0, 1)\n (1, 2)\n (4, 5)\n \"\"\"\n pattern = re.compile(k)\n r = pattern.search(s)\n if not r:\n print(\"(-1, -1)\")\n while r:\n print(\"({0}, {1})\".format(r.start(), r.end() - 1))\n r = pattern.search(s, r.start() + 1)\n\n\nif __name__ == '__main__':\n s, k = (input() for _ in range(2))\n re_start_end(s, k)\n","sub_path":"python/easy/regex-and-parsing/re_start_end.py","file_name":"re_start_end.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"49328751","text":"from django.urls import path\nfrom api import views\n\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\nurlpatterns = [\n path('login/', TokenObtainPairView.as_view(), name=\"apilogin\"),\n\n path('register/', views.Register.as_view(), name='register'),\n path('profile/', views.Profile.as_view(), name='profile'),\n\n\n\tpath('eventlist/', views.OwnerEventList.as_view(), name='owner-list'),\n\tpath('eventlist/', views.EventList.as_view(), name='owner-list'),\n\tpath('upcomingeventlist/', views.UpEventList.as_view(), name='owner-list'),\n\n path('bookedlist/', views.BookedEventsList.as_view(), name='user-booking-list'),\n\tpath('create/', views.CreateEvent.as_view(), name='create-event-api'),\n\n path('/update/', views.UpdateEvent.as_view(), name='update-event-api'),\n\n path('/book/', views.BookEvent.as_view(), name='book-event-api'),\n\n path('event//detail/',views.EventDetails.as_view(), name=\"event-detail-api\"),\n path('event//attendants/', views.AttendantsView.as_view(), name=\"event-attendants\"),\n\n\n ]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"602158523","text":"#imports\nimport sys\n\n#Declare a main function\ndef main():\n\n while True:\n\n try:\n #if the user did not enter the file name as a parameter\n if len(sys.argv) == 1:\n #prompt the user to enter an input file name\n fileName = input(\"\\nInput a file name: \")\n #open the file \n inFile = open(fileName , \"r+\")\n #Declare a file to be the output file\n outputFile = open (\"output.txt\",\"w+\")\n\n else:\n\n fileName = sys.argv[1]\n #open the file\n inFile = open(fileName,\"r+\")\n #Declare a file to be the output file\n outputFile = open (\"output.txt\",\"w+\")\n\n outputFileMethod(inFile , outputFile)\n\n #Close the files\n inFile.close()\n outputFile.close()\n\n\n break;\n\n \n\n except IOError:\n print(\"File count not be found.\")\n\n\ndef outputFileMethod(inFile , outputFile):\n #Create a variable to print the line count\n line_count = 0\n\n #using a for loop, wrtie each line from the\n #input file to the output file\n\n for line in inFile.readlines():\n line_count+=1\n outputFile.write( \"/*\"+str(line_count)+\"*/\"+line)\n\n \n\n\n\nmain()\n \n \n \n","sub_path":"Homework-7/7_4.py","file_name":"7_4.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"359150783","text":"import pygame, sys\nfrom pygame.locals import *\n\n\n\ndef window():\n pygame.init()\n size = 800, 600\n speed = [2, 2]\n white = 255, 255, 255\n screen = pygame.display.set_mode(size) \n return screen\n \n\ndef chatBox(screen):\n size_rect = pygame.Rect((0, 500), (600,100))\n chatbox = screen.subsurface(size_rect)\n return chatbox\n\n\n\ndef userInterface(screen):\n size_rect = pygame.Rect((600,0), (200, 600))\n UI = screen.subsurface(size_rect)\n return UI\n\n\ndef game(windows):\n screen = windows[\"screen\"]\n chatbox = windows[\"chatbox\"]\n UI = windows[\"userinterface\"]\n while 1:\n chatbox.fill((255,255,255))\n UI.fill((255,255,255))\n pygame.display.flip()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return 0\n \n\n\ndef main():\n windows = dict()\n windows[\"screen\"] = window()\n screen = windows[\"screen\"]\n windows[\"chatbox\"] = chatBox(screen)\n windows[\"userinterface\"] = userInterface(screen)\n game(windows)\n\nif __name__ == '__main__':\n main()\n","sub_path":"User Interface.py","file_name":"User Interface.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"127291035","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport sys\nimport os\nimport re\nimport collections\nimport tempfile\nsys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))\n\nimport module\nimport vtypes\nimport function\nimport task\n\nimport pyverilog.vparser.ast as vast\nfrom pyverilog.vparser.parser import VerilogCodeParser\nfrom pyverilog.dataflow.modulevisitor import ModuleVisitor\n\n#-------------------------------------------------------------------------------\n# User interfaces to read Verilog source code\n#-------------------------------------------------------------------------------\ndef read_verilog_stubmodule(*filelist, **opt):\n module_dict = to_module_dict(*filelist, **opt)\n stubs = collections.OrderedDict([ (name, module.StubModule(name)) \n for name in module_dict.keys() ])\n return stubs\n \ndef read_verilog_module(*filelist, **opt):\n module_dict = to_module_dict(*filelist, **opt)\n visitor = VerilogReadVisitor()\n modules = collections.OrderedDict([ (name, visitor.visit(m) )\n for name, m in module_dict.items() ])\n return modules\n\ndef read_verilog_module_str(code, encode='utf-8'):\n tmp = tempfile.NamedTemporaryFile()\n tmp.write(code.encode(encode))\n tmp.read()\n filename = tmp.name\n ret = read_verilog_module(filename)\n tmp.close()\n return ret\n \ndef read_verilog_stubmodule_str(code, encode='utf-8'):\n tmp = tempfile.NamedTemporaryFile()\n tmp.write(code.encode(encode))\n tmp.read()\n filename = tmp.name\n ret = read_verilog_stubmodule(filename)\n tmp.close()\n return ret\n \n#-------------------------------------------------------------------------------\ndef to_module_dict(*filelist, **opt):\n ast = to_ast(*filelist, **opt)\n \n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n module_names = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n moduleinfo = moduleinfotable.getDefinitions()\n module_dict = collections.OrderedDict([ (n, d.definition) for n, d in moduleinfo.items() ])\n\n return module_dict\n\n#-------------------------------------------------------------------------------\ndef to_ast(*filelist, **opt):\n include = opt['include'] if 'include' in opt else ()\n define = opt['define'] if 'define' in opt else ()\n if not isinstance(include, tuple) and not isinstance(include, list):\n raise TypeError('\"include\" option of read_verilog must be tuple or list, not %s' %\n type(include))\n if not isinstance(include, tuple) and not isinstance(include, list):\n raise TypeError('\"include\" option of read_verilog must be tuple or list, not %s' %\n type(include))\n \n code_parser = VerilogCodeParser(filelist,\n preprocess_include=include,\n preprocess_define=define)\n ast = code_parser.parse()\n\n return ast\n\n#-------------------------------------------------------------------------------\ndef str_to_signed(s):\n targ = s.replace('_','')\n match = re.search(r's(.+)', targ)\n if match is not None:\n return True\n return False\n\ndef str_to_value(s):\n targ = s.replace('_','')\n match = re.search(r'h(.+)', targ)\n if match is not None:\n return int(match.group(1), 16), 16\n match = re.search(r'd(.+)', targ)\n if match is not None:\n return int(match.group(1), 10), 10\n match = re.search(r'o(.+)', targ)\n if match is not None:\n return int(match.group(1), 8), 8\n match = re.search(r'b(.+)', targ)\n if match is not None:\n return int(match.group(1), 2), 2\n return int(targ, 10), None\n \ndef str_to_width(s):\n targ = s.replace('_','')\n match = re.search(r'(.+)\\'h.+', targ)\n if match is not None:\n return int(match.group(1), 10)\n match = re.search(r'(.+)\\'d.+', targ)\n if match is not None:\n return int(match.group(1), 10)\n match = re.search(r'(.+)\\'o.+', targ)\n if match is not None:\n return int(match.group(1), 10)\n match = re.search(r'(.+)\\'b.+', targ)\n if match is not None:\n return int(match.group(1), 10)\n return None\n\ndef to_tuple(s):\n if not isinstance(s, (list, tuple)):\n return tuple([s])\n return s\n\n#-------------------------------------------------------------------------------\nclass VerilogReadVisitor(object):\n def __init__(self):\n self.m = None\n self.module_stack = []\n\n def push_module(self, m):\n self.module_stack.append(self.m)\n self.m = m\n\n def pop_module(self):\n self.m = self.module_stack.pop()\n \n def add_object(self, obj):\n if isinstance(self.m, module.Module):\n self.m.add_object(obj)\n \n def generic_visit(self, node):\n for c in node.children():\n self.visit(c)\n #raise TypeError(\"Unsupported object '%s'\" % str(type(node)))\n \n def visit(self, node):\n method = 'visit_' + node.__class__.__name__\n visitor = getattr(self, method, self.generic_visit)\n return visitor(node)\n\n def visit_ModuleDef(self, node):\n # create new Verilog module\n m = module.Module(node.name)\n self.push_module(m)\n self.generic_visit(node)\n self.pop_module()\n return m\n\n def visit_Paramlist(self, node):\n params = []\n for param in node.params:\n p = self.visit(param)\n params.append(p)\n return params\n \n def visit_Portlist(self, node):\n ports = []\n for port in node.ports:\n p = self.visit(port)\n ports.append(p)\n return ports\n \n def visit_Port(self, node):\n if node.type is None: return None\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n _type = getattr(vtypes, node.type, None)\n if _type is None:\n raise TypeError(\"No such port type '%s'\" % node.type)\n p = _type(name, width)\n self.add_object(p)\n return p\n \n def visit_Width(self, node):\n msb = self.visit(node.msb)\n width = msb + 1\n return width\n \n def visit_Length(self, node):\n lsb = self.visit(node.lsb)\n length = lsb + 1\n return length\n \n def visit_Identifier(self, node):\n if node.scope is not None:\n labels = self.visit(node.scope)\n labels.append(node.name)\n return vtypes.Scope(*labels)\n if not isinstance(self.m, module.Module):\n return vtypes.AnyType(node.name)\n return self.m.find_identifier(node.name)\n \n def visit_IntConst(self, node):\n value, base = str_to_value(node.value)\n width = str_to_width(node.value)\n signed = str_to_signed(node.value)\n return vtypes.Int(value, width, base, signed)\n\n def visit_FloatConst(self, node):\n return vtypes.Float(node.value)\n \n def visit_StringConst(self, node):\n return vtypes.Str(node.value)\n \n def visit_Input(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n obj = vtypes.Input(name, width, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_Output(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n obj = vtypes.Output(name, width, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_Inout(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n obj = vtypes.Inout(name, width, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_Tri(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n obj = vtypes.Tri(name, width, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_Wire(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n obj = vtypes.Wire(name, width, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_Reg(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n obj = vtypes.Reg(name, width, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_WireArray(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n length = self.visit(node.length)\n signed = node.signed\n obj = vtypes.Wire(name, width, length=length, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_RegArray(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n length = self.visit(node.length)\n signed = node.signed\n obj = vtypes.Reg(name, width, length=length, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_Integer(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n obj = vtypes.Integer(name, width, signed=signed)\n self.add_object(obj)\n return obj\n \n def visit_Real(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n obj = vtypes.Real(name, width)\n self.add_object(obj)\n return obj\n \n def visit_Genvar(self, node):\n name = node.name\n width = self.visit(node.width) if node.width is not None else None\n obj = vtypes.Genvar(name, width)\n self.add_object(obj)\n return obj\n \n def visit_Ioport(self, node):\n first = self.visit(node.first)\n second = self.visit(node.second) if node.second is not None else None\n return (first, second)\n \n def visit_Parameter(self, node):\n name = node.name\n value = self.visit(node.value)\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n param = vtypes.Parameter(name, value, width, signed)\n self.add_object(param)\n return param\n\n def visit_Localparam(self, node):\n name = node.name\n value = self.visit(node.value)\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n param = vtypes.Localparam(name, value, width, signed)\n self.add_object(param)\n return param\n \n def visit_Supply(self, node):\n name = node.name\n value = self.visit(node.value)\n width = self.visit(node.width) if node.width is not None else None\n signed = node.signed\n param = vtypes.Supply(name, value, width, signed)\n self.add_object(param)\n return param\n \n def visit_Decl(self, node):\n decl = [ self.visit(d) for d in node.list ]\n return decl\n \n def visit_Concat(self, node):\n vars = [ self.visit(var) for var in node.list ]\n return vtypes.Cat(*vars)\n \n def visit_LConcat(self, node):\n vars = [ self.visit(var) for var in node.list ]\n return vtypes.Cat(*vars)\n \n def visit_Repeat(self, node):\n var = self.visit(node.value)\n times = self.visit(node.times)\n return vtypes.Repeat(var, times)\n \n def visit_Partselect(self, node):\n var = self.visit(node.var)\n msb = self.visit(node.msb)\n lsb = self.visit(node.lsb)\n return vtypes.Slice(var, msb, lsb)\n \n def visit_Pointer(self, node):\n var = self.visit(node.var)\n pos = self.visit(node.ptr)\n return vtypes.Pointer(var, pos)\n \n def visit_Lvalue(self, node):\n return self.visit(node.var)\n \n def visit_Rvalue(self, node):\n return self.visit(node.var)\n \n def visit_Uplus(self, node):\n return vtypes.Uplus(self.visit(node.right))\n \n def visit_Uminus(self, node):\n return vtype.Uminus(self.visit(node.right))\n \n def visit_Ulnot(self, node):\n return vtypes.Ulnot(self.visit(node.right))\n \n def visit_Unot(self, node):\n return vtypes.Unot(self.visit(node.right))\n \n def visit_Uand(self, node):\n return vtypes.Uand(self.visit(node.right))\n\n def visit_Unand(self, node):\n return vtypes.Unand(self.visit(node.right))\n \n def visit_Uor(self, node):\n return vtypes.Uor(self.visit(node.right))\n\n def visit_Unor(self, node):\n return vtypes.Unor(self.visit(node.right))\n \n def visit_Uxor(self, node):\n return vtypes.Uxor(self.visit(node.right))\n\n def visit_Uxnor(self, node):\n return vtypes.Uxnor(self.visit(node.right))\n \n def visit_Power(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Power(left, right)\n \n def visit_Times(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Times(left, right)\n \n def visit_Divide(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Divide(left, right)\n \n def visit_Mod(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Mod(left, right)\n \n def visit_Plus(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Plus(left, right)\n \n def visit_Minus(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Minus(left, right)\n \n def visit_Sll(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Sll(left, right)\n \n def visit_Srl(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Srl(left, right)\n \n def visit_Sra(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Sra(left, right)\n \n def visit_LessThan(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.LessThan(left, right)\n \n def visit_GreaterThan(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.GreaterThan(left, right)\n \n def visit_LessEq(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.LessEq(left, right)\n \n def visit_GreaterEq(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.GreaterEq(left, right)\n \n def visit_Eq(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Eq(left, right)\n \n def visit_NotEq(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.NotEq(left, right)\n \n def visit_Eql(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Eql(left, right)\n \n def visit_NotEql(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.NotEql(left, right)\n \n def visit_And(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.And(left, right)\n \n def visit_Xor(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Xor(left, right)\n \n def visit_Xnor(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Xnor(left, right)\n \n def visit_Or(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Or(left, right)\n \n def visit_Land(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Land(left, right)\n \n def visit_Lor(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return vtypes.Lor(left, right)\n \n def visit_Cond(self, node):\n condition = self.visit(node.cond)\n true_value = self.visit(node.true_value)\n false_value = self.visit(node.false_value)\n return vtypes.Cond(condition, true_value, false_value)\n \n def visit_Assign(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n ldelay= self.visit(node.ldelay.value) if node.ldelay is not None else None\n rdelay = self.visit(node.rdelay.value) if node.rdelay is not None else None\n subst = vtypes.Subst(left, right, ldelay=ldelay, rdelay=rdelay)\n assign = vtypes.Assign(subst)\n self.add_object(assign)\n return assign\n \n def visit_Always(self, node):\n sensitivity = self.visit(node.sens_list)\n statement = to_tuple(self.visit(node.statement))\n always = vtypes.Always(*sensitivity)\n always = always(*statement)\n self.add_object(always)\n return always\n \n def visit_SensList(self, node):\n return [ self.visit(s) for s in node.list ]\n \n def visit_Sens(self, node):\n if node.type == 'posedge':\n sig = self.visit(node.sig)\n return vtypes.Posedge(sig)\n if node.type == 'negedge':\n sig = self.visit(node.sig)\n return vtypes.Negedge(sig)\n if node.type == 'all':\n return vtypes.SensitiveAll()\n if node.type == 'level':\n sig = self.visit(node.sig)\n return sig\n \n def visit_BlockingSubstitution(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n ldelay= self.visit(node.ldelay.delay) if node.ldelay is not None else None\n rdelay = self.visit(node.rdelay.delay) if node.rdelay is not None else None\n return vtypes.Subst(left, right, blk=True, ldelay=ldelay, rdelay=rdelay)\n \n def visit_NonblockingSubstitution(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n ldelay= self.visit(node.ldelay.delay) if node.ldelay is not None else None\n rdelay = self.visit(node.rdelay.delay) if node.rdelay is not None else None\n return vtypes.Subst(left, right, blk=False, ldelay=ldelay, rdelay=rdelay)\n \n def visit_IfStatement(self, node):\n if isinstance(self.m, (module.GenerateFor, module.GenerateIf)):\n return self._visit_GenerateIf(node)\n condition = self.visit(node.cond)\n true_statement = self.visit(node.true_statement)\n false_statement = (self.visit(node.false_statement)\n if node.false_statement is not None else None)\n true_statement = to_tuple(true_statement)\n false_statement = (to_tuple(false_statement)\n if false_statement is not None else None)\n _if = vtypes.If(condition)\n _if = _if(*true_statement)\n if false_statement is not None:\n _if = _if(*false_statement)\n return _if\n \n def visit_ForStatement(self, node):\n if isinstance(self.m, (module.GenerateFor, module.GenerateIf)):\n return self._visit_GenerateFor(node)\n pre = self.visit(node.pre)\n condition = self.visit(node.cond)\n post = self.visit(node.post)\n statement = to_tuple(self.visit(node.statement))\n _for = vtypes.For(pre, condition, post)\n _for = _for(*statement)\n return _for\n \n def visit_WhileStatement(self, node):\n condition = self.visit(node.cond)\n statement = to_tuple(self.visit(node.statement))\n _while = vtypes.While(pre, condition, post)\n _while = _while(*statement)\n return _while\n \n def visit_CaseStatement(self, node):\n comp = self.visit(node.comp)\n statement = tuple([ self.visit(case) for case in node.caselist ])\n case = vtypes.Case(comp)\n case = case(*statement)\n return case\n \n def visit_CasexStatement(self, node):\n comp = self.visit(node.comp)\n statement = tuple([ self.visit(case) for case in node.caselist ])\n case = vtypes.Casex(comp)\n case = case(*statement)\n return case\n \n def visit_Case(self, node):\n condition = tuple([ self.visit(c) for c in node.cond ])\n statement = to_tuple(self.visit(node.statement))\n when = vtypes.When(*condition)\n when = when(*statement)\n return when\n \n def visit_Block(self, node):\n statements = [ self.visit(statement) for statement in node.statements ]\n return statements\n \n def visit_Initial(self, node):\n statement = to_tuple(self.visit(node.statement))\n initial = vtypes.Initial(*statement)\n self.add_object(initial)\n return initial\n \n def visit_EventStatement(self, node):\n sensitivity = self.visit(node.sens_list)\n event = vtypes.Event(*sensitivity)\n return event\n \n def visit_WaitStatement(self, node):\n condition = self.visit(node.cond)\n statement = to_tuple(self.visit(node.statement)) if node.statement else None\n wait = vtypes.Wait(condition)\n if statement: wait = wait(*statement)\n return wait\n \n def visit_ForeverStatement(self, node):\n statement = to_tuple(self.visit(node.statement))\n forever = vtypes.Forever(*statement)\n return forever\n \n def visit_DelayStatement(self, node):\n value = self.visit(node.delay)\n delay = vtypes.Delay(value)\n return delay\n \n def visit_InstanceList(self, node):\n return [ self.visit(instance) for instance in node.instances ]\n \n def visit_Instance(self, node):\n m = module.StubModule(node.module)\n instname = node.name\n params = [ self.visit(param) for param in node.parameterlist ]\n ports = [ self.visit(port) for port in node.portlist ]\n if node.array is not None:\n raise ValueError(\"Instance array is not currently supported.\")\n instance = module.Instance(m, instname, params, ports)\n self.add_object(instance)\n return instance\n \n def visit_ParamArg(self, node):\n paramname = node.paramname\n argname = self.visit(node.argname)\n return (paramname, argname)\n \n def visit_PortArg(self, node):\n portname = node.portname\n argname = self.visit(node.argname) if node.argname is not None else None\n return (portname, argname)\n \n def visit_Function(self, node):\n name = node.name\n width = self.visit(node.retwidth) if node.retwidth is not None else None\n func = function.Function(name, width)\n statement = [ self.visit(s) for s in node.statement ]\n body = []\n for s in statement:\n if isinstance(s, vtypes.Input):\n func.Input(s.name, s.width, s.length, s.signed, s.value)\n elif isinstance(s, vtypes.Reg):\n func.Reg(s.name, s.width, s.length, s.signed, s.value)\n elif isinstance(s, vtypes.Integer):\n func.Integer(s.name, s.width, s.length, s.signed, s.value)\n else:\n body.append(s)\n func.Body(*body)\n self.add_object(func)\n return func\n \n def visit_FunctionCall(self, node):\n name = self.visit(node.name)\n args = tuple([ self.visit(arg) for arg in node.args ])\n call = function.FunctionCall(name, args)\n return call\n \n def visit_Task(self, node):\n name = node.name\n _task = task.Task(name)\n statement = [ self.visit(s) for s in node.statement ]\n body = []\n for s in statement:\n if isinstance(s, vtypes.Input):\n _task.Input(s.name, s.width, s.length, s.signed, s.value)\n elif isinstance(s, vtypes.Reg):\n _task.Reg(s.name, s.width, s.length, s.signed, s.value)\n elif isinstance(s, vtypes.Integer):\n _task.Integer(s.name, s.width, s.length, s.signed, s.value)\n else:\n body.append(s)\n _task.Body(*body)\n self.add_object(_task)\n return _task\n \n def visit_TaskCall(self, node):\n name = self.visit(node.name)\n args = tuple([ self.visit(arg) for arg in node.args ])\n call = task.TaskCall(name, args)\n return call\n\n def _visit_GenerateFor(self, item):\n pre = self.visit(item.pre)\n cond = self.visit(item.cond)\n post = self.visit(item.post)\n scope = (item.statement.scope\n if isinstance(item.statement, vast.Block)\n else None)\n _for = module.GenerateFor(pre, cond, post, scope)\n ret = _for\n self.add_object(_for)\n self.push_module(_for)\n statement = self.visit(item.statement)\n self.pop_module()\n return ret\n \n def _visit_GenerateIf(self, item):\n cond = self.visit(item.cond)\n true_scope = (item.true_statement.scope\n if isinstance(item.true_statement, vast.Block)\n else None)\n false_scope = (item.false_statement.scope\n if isinstance(item.false_statement, vast.Block)\n else None)\n _if_true = module.GenerateIf(cond, true_scope)\n ret = _if_true\n self.add_object(_if_true)\n self.push_module(_if_true)\n statement = self.visit(item.true_statement)\n self.pop_module()\n _if_false = _if_true.Else(false_scope)\n self.push_module(_if_false)\n statement = (self.visit(item.false_statement)\n if item.false_statement is not None else None)\n self.pop_module()\n return ret\n \n def visit_GenerateStatement(self, node):\n ret = []\n for item in node.items:\n if isinstance(item, vast.ForStatement):\n ret.append( self._visit_GenerateFor(item) )\n elif isinstance(item, vast.IfStatement):\n ret.append( self._visit_GenerateIf(item) )\n else:\n raise TypeError(\"Only generate-for and generate-if statements are supported.\")\n return ret\n \n def visit_SystemCall(self, node):\n cmd = node.syscall\n args = tuple([ self.visit(arg) for arg in node.args ])\n systask = vtypes.SystemTask(cmd, *args)\n return systask\n \n def visit_IdentifierScopeLabel(self, node):\n if node.loop is None:\n return node.name\n index = self.visit(node.loop)\n return vtypes.ScopeIndex(node.name, index)\n \n def visit_IdentifierScope(self, node):\n args = [ self.visit(label) for label in node.labellist ]\n return args\n \n def visit_Pragma(self, node):\n raise TypeError(\"Pragma is not currently supported.\")\n \n def visit_PragmaEntry(self, node):\n raise TypeError(\"Pragma is not currently supported.\")\n \n def visit_Disable(self, node):\n raise TypeError(\"Disable is not currently supported.\")\n \n def visit_ParallelBlock(self, node):\n raise TypeError(\"Fork/Join is not currently supported.\")\n \n def visit_SingleStatement(self, node):\n statement = self.visit(node.statement)\n if isinstance(statement, vtypes.Delay): return statement\n return vtypes.SingleStatement(statement)\n","sub_path":"veriloggen/from_verilog.py","file_name":"from_verilog.py","file_ext":"py","file_size_in_byte":28364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"127344217","text":"import sys\nimport wget\n\ninpFile = sys.argv[1]\n\nwith open(inpFile, 'r') as f:\n for line in f:\n l = line.strip()\n l = l.split('|')\n code = l[3].split('.')[0]\n url = \"https://www.uniprot.org/uniprot/\"+code+\".fasta\"\n print(url)\n wget.download(url)","sub_path":"Scripts/retreive.py","file_name":"retreive.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"489863798","text":"#! python3\n# excelToCsv.py - converts a number of .xlsx to .csv\n\nimport csv, openpyxl, os\n\nos.chdir('C:\\PythonScripts\\Chapter 14 - CSV and JSON\\excelToCsv')\n\nfor excelFile in os.listdir('.'):\n #Skip non-xlsx files, load the workbook object.\n if not excelFile.endswith('.xlsx'):\n continue\n wb = openpyxl.load_workbook(excelFile)\n for sheetName in wb.get_sheet_names():\n # Loop through every sheet in the workbook.\n sheet = wb.get_sheet_by_name(sheetName) \n \n # Create the CSV filename from the Excel filename and sheet title.\n csvName = str(excelFile).rstrip('.xlsx')+'_'+sheetName+'.csv'\n\n # Create csv.writer object for this SCV file.\n outputFile = open(csvName, 'w', newline='')\n outputWriter = csv.writer(outputFile)\n \n # Loop through every row in the sheet.\n for rowNum in range(1,sheet.max_row+1):\n rowData = [] #append each cell to this list\n # Loop through each cell in the row.\n for colNum in range(1, sheet.max_column+1):\n #Append each cell's data to row Data.\n rowData.append(sheet.cell(row = rowNum,column = colNum).value) \n # Write the rowData list to the CSV file.\n outputWriter.writerow(rowData)\n \n outputFile.close()\n \n \n ","sub_path":"How_to_Automate_Stuff_w_Python/Chapter 14 - CSV and JSON/excelToCsv/excelToCsv.py","file_name":"excelToCsv.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"261650448","text":"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim import losses\nfrom tensorflow.contrib.slim import arg_scope\nimport numpy as np\n\nfrom nets.network import Network\nfrom model.config import cfg\n\n\nclass vgg16(Network):\n def __init__(self):\n Network.__init__(self)\n self._feat_stride = [16, ]\n self._feat_compress = [1. / float(self._feat_stride[0]), ]\n self._scope = 'vgg_16'\n\n def _image_to_head(self, is_training, reuse=None):\n with tf.variable_scope(self._scope, self._scope, reuse=reuse):\n net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],\n trainable=False, scope='conv1')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],\n trainable=False, scope='conv2')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],\n trainable=False, scope='conv3')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],\n trainable=False, scope='conv4')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],\n trainable=False, scope='conv5')\n\n self._act_summaries.append(net)\n self._layers['head'] = net\n\n return net\n\n def _head_to_tail(self, pool5, is_training, prefix=None, reuse=None, open_scope=True):\n prefix = '' if prefix is None else prefix+'_'\n if open_scope:\n with tf.variable_scope(self._scope, self._scope, reuse=reuse):\n pool5_flat = slim.flatten(pool5, scope=prefix+'flatten')\n fc6 = slim.fully_connected(pool5_flat, 4096, scope=prefix+'fc6')\n if is_training:\n fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True,\n scope=prefix+'dropout6')\n fc7 = slim.fully_connected(fc6, 4096, scope=prefix+'fc7')\n if is_training:\n fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True,\n scope=prefix+'dropout7')\n else:\n pool5_flat = slim.flatten(pool5, scope=prefix + 'flatten')\n fc6 = slim.fully_connected(pool5_flat, 4096, scope=prefix + 'fc6')\n if is_training:\n fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True,\n scope=prefix + 'dropout6')\n fc7 = slim.fully_connected(fc6, 4096, scope=prefix + 'fc7')\n if is_training:\n fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True,\n scope=prefix + 'dropout7')\n return fc7\n\n def get_variables_to_restore(self, variables, var_keep_dic):\n variables_to_restore = []\n\n for v in variables:\n # exclude the conv weights that are fc weights in vgg16\n if v.name == (self._scope + '/fc6/weights:0') or \\\n v.name == (self._scope + '/fc7/weights:0'):\n self._variables_to_fix[v.name] = v\n continue\n # exclude the first conv layer to swap RGB to BGR\n if v.name == (self._scope + '/conv1/conv1_1/weights:0'):\n self._variables_to_fix[v.name] = v\n continue\n if v.name.split(':')[0] in var_keep_dic:\n print('Variables restored: %s' % v.name)\n variables_to_restore.append(v)\n\n return variables_to_restore\n\n def restore_variables_from_npz(self, sess, pretrained_model):\n data_dict = np.load(pretrained_model, encoding=\"latin1\").item()\n for key in data_dict:\n with tf.variable_scope(self._scope, reuse=True):\n with tf.variable_scope(key.split('_')[0], reuse=True):\n if key.startswith(\"conv\"):\n with tf.variable_scope(key, reuse=True):\n for subkey, data in zip(('weights', 'biases'), data_dict[key]):\n var = tf.get_variable(subkey) # vgg_16/conv1/conv1_1/weights\n sess.run(var.assign(data))\n else:\n #continue# for CBAM\n for subkey, data in zip(('weights', 'biases'), data_dict[key]):\n var = tf.get_variable(subkey) # vgg_16/fc6/weights\n sess.run(var.assign(data))\n\n def get_variables_to_restore_nofix(self, variables, var_keep_dic):\n variables_to_restore = []\n for v in variables:\n # exclude learning rate\n if v.name =='Variable:0':\n continue\n if v.name.split(':')[0] in var_keep_dic:\n # exclude the prediction layer\n if 'cls_score' not in v.name and 'bbox_pred' not in v.name and \\\n list(v.shape) == var_keep_dic[v.name.split(':')[0]]:\n print('Variables restored: %s' % v.name)\n variables_to_restore.append(v)\n else:\n print(\"Variables skipped: %s\" % v.name)\n return variables_to_restore\n\n def fix_variables(self, sess, pretrained_model):\n print('Fix VGG16 layers..')\n with tf.variable_scope('Fix_VGG16') as scope:\n with tf.device(\"/cpu:0\"):\n # fix the vgg16 issue from conv weights to fc weights\n # fix RGB to BGR\n fc6_conv = tf.get_variable(\"fc6_conv\", [7, 7, 512, 4096], trainable=False)\n fc7_conv = tf.get_variable(\"fc7_conv\", [1, 1, 4096, 4096], trainable=False)\n conv1_rgb = tf.get_variable(\"conv1_rgb\", [3, 3, 3, 64], trainable=False)\n restorer_fc = tf.train.Saver({self._scope + \"/fc6/weights\": fc6_conv,\n self._scope + \"/fc7/weights\": fc7_conv,\n self._scope + \"/conv1/conv1_1/weights\": conv1_rgb})\n restorer_fc.restore(sess, pretrained_model)\n\n sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc6/weights:0'], tf.reshape(fc6_conv,\n self._variables_to_fix[\n self._scope + '/fc6/weights:0'].get_shape())))\n sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc7/weights:0'], tf.reshape(fc7_conv,\n self._variables_to_fix[\n self._scope + '/fc7/weights:0'].get_shape())))\n sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/conv1_1/weights:0'],\n tf.reverse(conv1_rgb, [2])))\n","sub_path":"lib/nets/vgg16.py","file_name":"vgg16.py","file_ext":"py","file_size_in_byte":7678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"150714284","text":"class Solution(object):\n def findLadders(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: List[str]\n :rtype: List[List[str]]\n \"\"\"\n words = set(wordList)\n if endWord not in words:\n return []\n dist = 1\n toVisit, visited = [beginWord], {beginWord: dist}\n backtrace, found = {beginWord: []}, False\n while len(toVisit) > 0 and not found:\n cnt = len(toVisit)\n dist += 1\n for i in xrange(cnt):\n word = toVisit[i]\n for j in xrange(len(word)):\n for ch in xrange(ord(\"a\"), ord(\"z\")+1):\n candidate = word[:j] + chr(ch) + word[j+1:]\n if candidate not in words:\n continue\n if candidate == endWord:\n found = True\n if candidate not in visited or visited[candidate] == dist:\n if candidate in backtrace:\n backtrace[candidate].append(word)\n else:\n backtrace[candidate] = [word]\n toVisit.append(candidate)\n visited[candidate] = dist\n toVisit = toVisit[cnt:]\n if not found:\n return []\n else:\n path, res = [], []\n self.dfs(backtrace, endWord, path, res)\n return res\n \n def dfs(self, backtrace, word, path, res):\n path.append(word)\n if len(backtrace[word]) == 0:\n res.append(path[::-1])\n for neighbor in backtrace[word]:\n self.dfs(backtrace, neighbor, path, res)\n path.pop()\n","sub_path":"hard/126_Word_Ladder_II/ladder.py","file_name":"ladder.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"577667086","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport time\nfrom azure.iot.hub.devicesdk import ModuleClient, Message\nfrom azure.iot.hub.devicesdk.auth.authentication_provider_factory import from_environment\nimport uuid\n\n# The \"Authentication Provider\" is the object in charge of creating authentication \"tokens\" for the module client.\nauth_provider = from_environment()\n# For now, the SDK only supports MQTT as a protocol.\n# Inputs/Ouputs are only supported in the context of Azure IoT Edge and module client\n# The module client object acts as an Azure IoT Edge module and interacts with an Azure IoT Edge hub\n# It needs an Authentication Provider to secure the communication with the Edge hub.\n# This authentication provider is created from environment & delegates token generation to iotedged.\nmodule_client = ModuleClient.from_authentication_provider(auth_provider, \"mqtt\")\n\n# Connect the client.\nmodule_client.connect()\n\n# send 5 messages with a 1 second pause between each message\nfor i in range(0, 5):\n print(\"sending message #\" + str(i))\n msg = Message(\"test wind speed \" + str(i))\n msg.message_id = uuid.uuid4()\n msg.correlation_id = \"correlation-1234\"\n msg.custom_properties[\"tornado-warning\"] = \"yes\"\n module_client.send_to_output(msg, \"twister\")\n time.sleep(1)\n\n# send only string messages\nfor i in range(5, 10):\n print(\"sending message #\" + str(i))\n module_client.send_to_output(\"test payload message \" + str(i), \"tracking\")\n time.sleep(1)\n\n\n# finally, disconnect\nmodule_client.disconnect()\n","sub_path":"azure-iot-hub-devicesdk/samples/send_to_output_module.py","file_name":"send_to_output_module.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"227608933","text":"# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple\n\nfrom deeppavlov.core.commands.utils import expand_path\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.data.dataset_reader import DatasetReader\n\n\n@register('insurance_reader')\nclass InsuranceReader(DatasetReader):\n \"\"\"The class to read the InsuranceQA V1 dataset from files.\n\n Please, see https://github.com/shuzi/insuranceQA.\n \"\"\"\n\n def read(self, data_path: str, **kwargs) -> Dict[str, List[Tuple[List[str], int]]]:\n \"\"\"Read the InsuranceQA V1 dataset from files.\n\n Args:\n data_path: A path to a folder with dataset files.\n \"\"\"\n\n data_path = expand_path(data_path)\n dataset = {'train': None, 'valid': None, 'test': None}\n train_fname = data_path / 'insuranceQA-master/V1/question.train.token_idx.label'\n valid_fname = data_path / 'insuranceQA-master/V1/question.dev.label.token_idx.pool'\n test_fname = data_path / 'insuranceQA-master/V1/question.test1.label.token_idx.pool'\n int2tok_fname = data_path / 'insuranceQA-master/V1/vocabulary'\n response2ints_fname = data_path / 'insuranceQA-master/V1/answers.label.token_idx'\n self.int2tok_vocab = self._build_int2tok_vocab(int2tok_fname)\n self.idxs2cont_vocab = self._build_context2toks_vocab(train_fname, valid_fname, test_fname)\n self.response2str_vocab = self._build_response2str_vocab(response2ints_fname)\n dataset[\"valid\"] = self._preprocess_data_valid_test(valid_fname)\n dataset[\"train\"] = self._preprocess_data_train(train_fname)\n dataset[\"test\"] = self._preprocess_data_valid_test(test_fname)\n\n return dataset\n\n def _build_context2toks_vocab(self, train_f: Path, val_f: Path, test_f: Path) -> Dict[int, str]:\n contexts = []\n with open(train_f, 'r') as f:\n data = f.readlines()\n for eli in data:\n eli = eli[:-1]\n c, _ = eli.split('\\t')\n contexts.append(c)\n with open(val_f, 'r') as f:\n data = f.readlines()\n for eli in data:\n eli = eli[:-1]\n _, c, _ = eli.split('\\t')\n contexts.append(c)\n with open(test_f, 'r') as f:\n data = f.readlines()\n for eli in data:\n eli = eli[:-1]\n _, c, _ = eli.split('\\t')\n contexts.append(c)\n idxs2cont_vocab = {el[1]: el[0] for el in enumerate(contexts)}\n return idxs2cont_vocab\n\n def _build_int2tok_vocab(self, fname: Path) -> Dict[int, str]:\n with open(fname, 'r') as f:\n data = f.readlines()\n int2tok_vocab = {int(el.split('\\t')[0].split('_')[1]): el.split('\\t')[1][:-1] for el in data}\n return int2tok_vocab\n\n def _build_response2str_vocab(self, fname: Path) -> Dict[int, str]:\n with open(fname, 'r') as f:\n data = f.readlines()\n response2idxs_vocab = {int(el.split('\\t')[0]) - 1:\n (el.split('\\t')[1][:-1]).split(' ') for el in data}\n response2str_vocab = {el[0]: ' '.join([self.int2tok_vocab[int(x.split('_')[1])]\n for x in el[1]]) for el in response2idxs_vocab.items()}\n return response2str_vocab\n\n def _preprocess_data_train(self, fname: Path) -> List[Tuple[List[str], int]]:\n positive_responses_pool = []\n contexts = []\n responses = []\n labels = []\n with open(fname, 'r') as f:\n data = f.readlines()\n for k, eli in enumerate(data):\n eli = eli[:-1]\n q, pa = eli.split('\\t')\n q_tok = ' '.join([self.int2tok_vocab[int(el.split('_')[1])] for el in q.split()])\n pa_list = [int(el) - 1 for el in pa.split(' ')]\n pa_list_tok = [self.response2str_vocab[el] for el in pa_list]\n for elj in pa_list_tok:\n contexts.append(q_tok)\n responses.append(elj)\n positive_responses_pool.append(pa_list_tok)\n labels.append(k)\n train_data = list(zip(contexts, responses))\n train_data = list(zip(train_data, labels))\n return train_data\n\n def _preprocess_data_valid_test(self, fname: Path) -> List[Tuple[List[str], int]]:\n pos_responses_pool = []\n neg_responses_pool = []\n contexts = []\n pos_responses = []\n with open(fname, 'r') as f:\n data = f.readlines()\n for eli in data:\n eli = eli[:-1]\n pa, q, na = eli.split('\\t')\n q_tok = ' '.join([self.int2tok_vocab[int(el.split('_')[1])] for el in q.split()])\n pa_list = [int(el) - 1 for el in pa.split(' ')]\n pa_list_tok = [self.response2str_vocab[el] for el in pa_list]\n nas = [int(el) - 1 for el in na.split(' ')]\n nas_tok = [self.response2str_vocab[el] for el in nas]\n for elj in pa_list_tok:\n contexts.append(q_tok)\n pos_responses.append(elj)\n pos_responses_pool.append(pa_list_tok)\n neg_responses_pool.append(nas_tok)\n data = [[el[0]] + el[1] for el in zip(contexts, neg_responses_pool)]\n data = [(el[0], len(el[1])) for el in zip(data, pos_responses_pool)]\n return data\n","sub_path":"deeppavlov/dataset_readers/insurance_reader.py","file_name":"insurance_reader.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"446923122","text":"# coding: UTF-8\nimport glob\nimport pandas as pd\n\ncsvlist = []\ncsvfiles = glob.glob(\"./output/*.csv\")\n\nfor csvfile in csvfiles:\n csvlist.append(pd.read_csv(csvfile))\n\ndf = pd.concat(csvlist, sort=False)\ndf.to_csv(\"all.csv\", encoding=\"utf_8\", index=False)\n","sub_path":"marge-csv.py","file_name":"marge-csv.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"75347939","text":"MAX_COLOR_VALUE = 254.0\nBASE_QUALITY_CAP = 40.0\nMAP_QUALITY_CAP = 60.0\nMAP_QUALITY_FILTER = 5.0\nMIN_DELETE_QUALITY = 20.0\nMATCH_CIGAR_CODE = 0\nINSERT_CIGAR_CODE = 1\nDELETE_CIGAR_CODE = 2\nIMAGE_DEPTH_THRESHOLD = 120\n\nglobal_base_color_dictionary = {'A': 254.0, 'C': 100.0, 'G': 180.0, 'T': 30.0, '*': 60.0, '.': 150.0, 'N': 5.0}\nglobal_cigar_color_dictionary = {0: MAX_COLOR_VALUE, 1: MAX_COLOR_VALUE*0.6, 2: MAX_COLOR_VALUE*0.3}\nglobal_allele_support_dictionary = {0: 5.0, 1: 254.0, 2: 254.0}\nglobal_allele_type_dictionary = {0: 2.0, 1: 150.0, 2: 254.0, 3: 50.0}\n\n\nclass ImageChannels:\n \"\"\"\n Handles how many channels to create for each base and their way of construction.\n \"\"\"\n\n def __init__(self, pileup_attributes, ref_base):\n \"\"\"\n Initialize a base with it's attributes\n :param pileup_attributes: Attributes of a pileup base\n :param ref_base: Reference base corresponding to that pileup base\n \"\"\"\n self.pileup_base = pileup_attributes[0]\n self.base_qual = pileup_attributes[1]\n self.map_qual = pileup_attributes[2]\n self.cigar_code = pileup_attributes[3]\n self.is_rev = pileup_attributes[4]\n self.supports_allele = pileup_attributes[5]\n self.support_allele_type = pileup_attributes[6]\n self.ref_base = ref_base\n self.is_match = True if self.ref_base == self.pileup_base and self.ref_base != '*' else False\n\n @staticmethod\n def get_total_number_of_channels():\n return len(ImageChannels.get_empty_channels())\n\n @staticmethod\n def get_empty_channels():\n \"\"\"\n Get empty channel values\n :return:\n \"\"\"\n return [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n def get_channels(self):\n \"\"\"\n Get a bases's channel construction\n :return: [color spectrum of channels based on base attributes]\n \"\"\"\n base_color = global_base_color_dictionary[self.pileup_base] \\\n if self.pileup_base in global_base_color_dictionary else 0.0\n\n base_qual_color = (MAX_COLOR_VALUE * min(self.base_qual, BASE_QUALITY_CAP)) / BASE_QUALITY_CAP\n\n map_qual_color = (MAX_COLOR_VALUE * min(self.map_qual, MAP_QUALITY_CAP)) / MAP_QUALITY_CAP\n\n strand_color = 254.0 if self.is_rev else 70.0\n\n match_color = MAX_COLOR_VALUE * 0.2 if self.is_match is True else MAX_COLOR_VALUE * 1.0\n\n cigar_color = global_cigar_color_dictionary[self.cigar_code] \\\n if self.cigar_code in global_cigar_color_dictionary else 0.0\n\n support_color1, support_type_color1, support_color2, support_type_color2 = 5.0, 2.0, 5.0, 2.0\n if self.supports_allele == 1:\n support_color1 = global_allele_support_dictionary[self.supports_allele]\n support_type_color1 = global_allele_type_dictionary[self.support_allele_type]\n elif self.supports_allele == 2:\n support_color2 = global_allele_support_dictionary[self.supports_allele]\n support_type_color2 = global_allele_type_dictionary[self.support_allele_type]\n\n return [base_color, base_qual_color, map_qual_color, strand_color, match_color, cigar_color, support_color1,\n support_type_color1, support_color2, support_type_color2]\n\n @staticmethod\n def get_channels_for_ref(pileup_base):\n \"\"\"\n Get a reference bases's channel construction\n :param pileup_base: Reference base\n :return: [color spectrum of channels based on some default values]\n \"\"\"\n cigar_code = MATCH_CIGAR_CODE if pileup_base != '*' else INSERT_CIGAR_CODE\n base_qual = BASE_QUALITY_CAP\n map_qual = 60.0\n is_rev = False\n is_match = True\n supports_allele = 1\n support_type = 0\n\n base_color = global_base_color_dictionary[pileup_base] \\\n if pileup_base in global_base_color_dictionary else 0.0\n\n base_qual_color = (MAX_COLOR_VALUE * min(base_qual, BASE_QUALITY_CAP)) / BASE_QUALITY_CAP\n\n map_qual_color = (MAX_COLOR_VALUE * min(map_qual, MAP_QUALITY_CAP)) / MAP_QUALITY_CAP\n\n strand_color = 240.0 if is_rev else 70.0\n\n match_color = MAX_COLOR_VALUE * 0.2 if is_match is True else MAX_COLOR_VALUE * 1.0\n\n cigar_color = global_cigar_color_dictionary[cigar_code] \\\n if cigar_code in global_cigar_color_dictionary else 0.0\n\n support_color = global_allele_support_dictionary[supports_allele] \\\n if supports_allele in global_allele_support_dictionary else 0.0\n\n support_type_color = global_allele_type_dictionary[support_type] \\\n if support_type in global_allele_type_dictionary else 0.0\n\n return [base_color, base_qual_color, map_qual_color, strand_color, match_color, cigar_color, support_color,\n support_type_color, support_color, support_type_color]\n","sub_path":"modules/handlers/ImageChannels.py","file_name":"ImageChannels.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"628264770","text":"import datetime\nimport json\nfrom typing import List\nfrom frameworkAG.Entities import Assignment\n\nclass MapperToSemana():\n \n @staticmethod\n def mapperSemana(data):\n solucion = sorted(data,key=lambda assignment: (assignment.slot.week_day.value, assignment.slot.hour))\n json_data = json.dumps(solucion, skipkeys=True, check_circular=False,\n default=lambda o: MapperToSemana.json_default(o), indent=4)\n print(\"--------------------inicio solucion --------------------------\")\n print(json_data)\n print(\"--------------------fin solucion --------------------------\")\n \n\n @staticmethod\n def json_default(value):\n if isinstance(value, Assignment):\n return {\"day\": value.slot.week_day.name,\n \"hora\": value.slot.hour,\n \"subject\": value.listResourceId[0],\n \"teacher \":value.listResourceId[1]\n }\n else:\n return value.__dict__","sub_path":"schoolSchedule/mapperToSemana.py","file_name":"mapperToSemana.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"87749132","text":"\"\"\"Numpy-related utilities.\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n\ndef rand_replace_elem(x, ratio, fill=0, inplace=True):\n \"\"\"Random replace array with specific value at certain ratio.\n\n Parameters\n ----------\n x : numpy.ndarray\n 2D array.\n ratio : float\n Filled value's ratio. \n It must be between 0 and 1.\n fill : x.dtype.type, default 0\n Filled value.\n inplace : bool, default True\n Whether to create a copy of `x` (False) or to replace values\n in-place (True).\n\n Returns\n -------\n new_x : numpy.ndarray\n When inplace is False, it would return a array copied from\n x and replace elements.\n \"\"\"\n shape = x.shape\n nb_true = round(ratio * x.size)\n mask = np.zeros(shape, dtype=bool)\n for i in range(shape[0]):\n mask[i, :] = rand_bool_array(nb_true, (1, shape[1]))\n if inplace:\n x[mask] = fill\n return\n else:\n new_x = x.copy()\n new_x[mask] = fill\n return new_x\n\n\ndef rand_bool_array(nb_true, out_shape):\n \"\"\"Generate random bool array.\n\n Parameters\n ----------\n nb_true : int\n Number of True.\n out_shape : tuple\n Random bool array's shape.\n\n Returns\n -------\n arr : numpy.array\n Random bool array.\n \"\"\"\n nb_element = 1\n for i in out_shape:\n nb_element = nb_element * i\n arr = np.zeros(nb_element, dtype=bool)\n nb_true = int(nb_true)\n arr[:nb_true] = True\n np.random.shuffle(arr)\n arr = arr.reshape(out_shape)\n\n return arr\n\n\ndef crop_zeros(array, remain=0, return_bound=False):\n \"\"\"\n Crop the edge zero of the input array.\n\n Parameters\n ----------\n array : numpy.ndarray\n 2D numpy array.\n remain : int\n The number of edges of all zeros which you want to remain.\n return_bound : str or bool\n Select the mode to manipulate the drawing.\n True: return array and bound.\n 'only_bound': return bound.\n Others: return array.\n\n Returns\n -------\n out : np.ndarray, optional\n Cropped array.\n left_bound : int, optional\n The edge of the left cropping.\n right_bound : int, optional\n The edge of the right cropping.\n upper_bound : int, optional\n The edge of the upper cropping.\n lower_bound : int, optional\n The edge of the lower cropping.\n\n References\n ----------\n https://stackoverflow.com/questions/48987774/how-to-crop-a-numpy-2d-array-to-non-zero-values\n \"\"\"\n row = array.any(1)\n if row.any():\n row_size, col_size = array.shape\n col = array.any(0)\n\n left_bound = np.max([col.argmax() - remain, 0])\n right_bound = np.min([col_size - col[::-1].argmax() + remain, col_size - 1]) # col[::-1] is reverse of col\n upper_bound = np.max([row.argmax() - remain, 0])\n lower_bound = np.min([row_size - row[::-1].argmax() + remain, row_size - 1]) # row[::-1] is reverse of row\n out = array[upper_bound:lower_bound, left_bound:right_bound]\n else:\n left_bound = None\n right_bound = None\n upper_bound = None\n lower_bound = None\n out = np.empty((0, 0), dtype=bool)\n\n if isinstance(return_bound, bool) and return_bound:\n return out, (left_bound, right_bound, upper_bound, lower_bound)\n elif return_bound == 'only_bound':\n return left_bound, right_bound, upper_bound, lower_bound\n else:\n return out\n\n\ndef _new_view(arr):\n \"\"\"\n View the array as a dtype that contains the entire column per row.\n\n Parameters\n ----------\n arr : numpy.ndarray, Iterable, int, float\n\n Returns\n -------\n arr : numpy.ndarray\n New view of the input arr.\n\n References\n ----------\n https://stackoverflow.com/questions/22699756/python-version-of-ismember-with-rows-and-index\n https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n \"\"\"\n arr = np.ascontiguousarray(arr) # Return a contiguous array (ndim >= 1) in memory (C order)\n ncols = arr.shape[-1]\n dtype = {'names': ['f{}'.format(i) for i in range(ncols)],\n 'formats': [arr[0, i].dtype for i in range(ncols)]}\n return arr.view(dtype)\n\n\ndef in2d(arr1, arr2, assume_unique=False, invert=False):\n \"\"\"\n Test whether each row of a 2-D array is also present in a second array.\n\n Returns a boolean array the same length as number of row of arr1 that is True\n where an row of arr1 is in arr2 and False otherwise.\n\n Parameters\n ----------\n arr1 : array_like\n Input 2D array.\n arr2 : array_like\n The 'rows' against which to test each row of arr1.\n assume_unique : bool, optional\n If True, the 'rows' of input arrays are both assumed to be unique, which can speed up the calculation.\n Default is False.\n invert : bool, optional\n If True, the 'rows' in the returned array are inverted\n (that is, False where an element of arr1 is in arr2 and True otherwise).\n Default is False.\n\n Returns\n -------\n arr3 : numpy.ndarray\n The rows arr1[in2d, :] are in arr2.\n\n References\n ----------\n https://stackoverflow.com/questions/22699756/python-version-of-ismember-with-rows-and-index\n https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n \"\"\"\n # view the array as a dtype that contains the entire column per row.\n tmp1, tmp2 = map(_new_view, (arr1, arr2))\n return np.in1d(tmp1, tmp2, assume_unique=assume_unique, invert=invert)\n\n\ndef intersect2d(arr1, arr2, assume_unique=False, return_indices=False):\n \"\"\"\n Find the intersection of two arrays.\n\n Return the sorted, unique 'rows' that are in both of the input arrays.\n\n Parameters\n ----------\n arr1, arr2 : array_like\n Input 2D arrays.\n assume_unique : bool\n If True, the 'rows' of input arrays are both assumed to be unique, which can speed up the calculation.\n Default is False.\n return_indices : bool\n If True, the indices which correspond to the intersection of 'rows' of the two arrays are returned.\n The first instance of a value is used if there are multiple. Default is False.\n\n Returns\n -------\n arr3 : numpy.ndarray\n Sorted 2D array of common and unique 'row'.\n comm1 : numpy.ndarray\n The indices of the first occurrences of the common values in arr1. Only provided if return_indices is True.\n comm2 : numpy.ndarray\n The indices of the first occurrences of the common values in arr2. Only provided if return_indices is True.\n\n References\n ----------\n https://stackoverflow.com/questions/22699756/python-version-of-ismember-with-rows-and-index\n https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n \"\"\"\n # view the array as a dtype that contains the entire column per row.\n tmp1, tmp2 = map(_new_view, (arr1, arr2))\n if return_indices:\n arr3, comm1, comm2 = np.intersect1d(tmp1, tmp2,\n assume_unique=assume_unique, return_indices=return_indices)\n # reshape the structured array with original view.\n arr3 = arr3.view(arr1.dtype).reshape(-1, arr1.shape[-1])\n return arr3, comm1, comm2\n else:\n arr3 = np.intersect1d(tmp1, tmp2, assume_unique=assume_unique)\n # reshape the structured array with original view.\n arr3 = arr3.view(arr1.dtype).reshape(-1, arr1.shape[-1])\n return arr3\n\n\ndef setdiff2d(arr1, arr2, assume_unique=False):\n \"\"\"\n Find the set difference of two arrays.\n\n Return the sorted, unique 'rows' in arr1 that are not in arr2.\n\n Parameters\n ----------\n arr1 : array_like\n Input 2D array.\n arr2 : array_like\n Input comparison 2D array.\n assume_unique : bool\n If True, the 'rows' of input arrays are both assumed to be unique, which can speed up the calculation.\n Default is False.\n\n Returns\n -------\n arr3 : numpy.ndarray\n Sorted 2D array of 'rows' in arr1 that are not in arr2.\n\n References\n ----------\n https://stackoverflow.com/questions/22699756/python-version-of-ismember-with-rows-and-index\n https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n \"\"\"\n # view the array as a dtype that contains the entire column per row.\n tmp1, tmp2 = map(_new_view, (arr1, arr2))\n arr3 = np.setdiff1d(tmp1, tmp2, assume_unique=assume_unique)\n # reshape the structured array with original view.\n arr3 = arr3.view(arr1.dtype).reshape(-1, arr1.shape[-1])\n return arr3\n\n\ndef setxor2d(arr1, arr2, assume_unique=False):\n \"\"\"\n Find the set exclusive-or of rows of two arrays.\n\n Return the sorted, unique 'rows' that are in only one (not both) of the input arrays.\n\n Parameters\n ----------\n arr1, arr2 : array_like\n Input 2D arrays.\n assume_unique : bool\n If True, the 'rows' of input arrays are both assumed to be unique, which can speed up the calculation.\n Default is False.\n Returns\n -------\n arr3 : numpy.ndarray\n Sorted 2D array of unique 'rows' that are in only one of the input arrays.\n\n References\n ----------\n https://stackoverflow.com/questions/22699756/python-version-of-ismember-with-rows-and-index\n https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n \"\"\"\n # view the array as a dtype that contains the entire column per row.\n tmp1, tmp2 = map(_new_view, (arr1, arr2))\n arr3 = np.setxor1d(tmp1, tmp2, assume_unique=assume_unique)\n # reshape the structured array with original view.\n arr3 = arr3.view(arr1.dtype).reshape(-1, arr1.shape[-1])\n return arr3\n\n\ndef union2d(arr1, arr2):\n \"\"\"\n Return the unique, sorted array of 'rows' that are in either of the two input arrays.\n\n Parameters\n ----------\n arr1, arr2 : array_like\n Input 2D arrays.\n\n Returns\n -------\n arr3 : numpy.ndarray\n Unique, sorted union of the input arrays.\n\n References\n ----------\n https://stackoverflow.com/questions/22699756/python-version-of-ismember-with-rows-and-index\n https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n \"\"\"\n # view the array as a dtype that contains the entire column per row.\n tmp1, tmp2 = map(_new_view, (arr1, arr2))\n arr3 = np.union1d(tmp1, tmp2)\n # reshape the structured array with original view.\n arr3 = arr3.view(arr1.dtype).reshape(-1, arr1.shape[-1])\n return arr3\n\n\ndef set_ops(arr1, arr2, relation, dim=2, **kwargs):\n \"\"\"\n Set operation.\n The operation contains 'in', 'intersection', 'difference ', 'exclusive-or' and 'union'.\n\n Parameters\n ----------\n arr1, arr2 : array_like\n Input array.\n For more details, see 'in1d', 'intersect1d', 'isin',\n 'setdiff1d', 'setxor1d', 'union1d' and the corresponding 2d version.\n relation : str\n Set relationship.\n dim : int, optional\n 1D or 2D set operation.\n kwargs : bool, optional\n For more details, see 'in1d', 'intersect1d', 'isin',\n 'setdiff1d', 'setxor1d', 'union1d' and the corresponding 2d version.\n\n Returns\n -------\n For more details, see 'in1d', 'intersect1d', 'isin',\n 'setdiff1d', 'setxor1d', 'union1d' and the corresponding 2d version.\n\n References\n ----------\n https://stackoverflow.com/questions/22699756/python-version-of-ismember-with-rows-and-index\n https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n\n Examples\n --------\n >>> import numpy as np\n >>> from erinn.python.utils.np_utils import set_ops\n >>> a = np.array([[5, 1, 2, 3, -1.6, 1000, 0],\n ... [5, 1, 2, 4, -3.2, 1000, 0],\n ... [5, 1, 2, 6, -3.6, 1000, 0],\n ... [5, 1, 2, 7, -2.5, 1000, 0],\n ... [5, 1, 2, 8, -2.1, 1000, 0],\n ... [5, 1, 2, 10, -1.5, 1000, 0],\n ... [5, 1, 2, 11, -1.8, 1000, 0],\n ... [5, 1, 2, 12, -1.8, 1000, 0],\n ... [5, 1, 2, 14, -1.8, 1000, 0],\n ... [5, 1, 2, 15, -1.6, 1000, 0]])\n >>> b = a[::2, :].copy()\n >>> b = np.vstack((b, a[-1, :], a[0, :]))\n >>> print(set_ops(a, b, 'in', dim=2))\n [ True False True False True False True False True True]\n \"\"\"\n\n if dim == 1:\n if relation.lower() == 'in':\n return np.in1d(arr1, arr2, **kwargs)\n elif relation.lower() == 'intersect':\n return np.intersect1d(arr1, arr2, **kwargs)\n elif relation.lower() == 'isin':\n np.isin(arr1, arr2, **kwargs)\n elif relation.lower() == 'diff':\n return np.setdiff1d(arr1, arr2, **kwargs)\n elif relation.lower() == 'xor':\n return np.setxor1d(arr1, arr2, **kwargs)\n elif relation.lower() == 'union':\n return np.union1d(arr1, arr2)\n else:\n raise ValueError(\"The positional argument 'relation' is wrong.\"\n + \" Please input 'in', 'intersect', 'isin', 'diff', 'xor' or 'union'.\")\n elif dim == 2:\n if relation.lower() == 'in':\n return in2d(arr1, arr2, **kwargs)\n elif relation.lower() == 'intersect':\n return intersect2d(arr1, arr2, **kwargs)\n elif relation.lower() == 'diff':\n return setdiff2d(arr1, arr2, **kwargs)\n elif relation.lower() == 'xor':\n return setxor2d(arr1, arr2, **kwargs)\n elif relation.lower() == 'union':\n return union2d(arr1, arr2)\n else:\n raise ValueError(\"The positional argument 'relation' is wrong.\"\n + \" Please input 'in', 'intersect', 'diff', 'xor' or 'union'.\")\n else:\n raise ValueError(\"The keyword argument 'dim' is wrong. Please input 1 or 2.\")\n","sub_path":"erinn/python/utils/np_utils.py","file_name":"np_utils.py","file_ext":"py","file_size_in_byte":14004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"39968421","text":"# Copyright (c) 2021\n# @Author: xiaoweixiang\nimport collections\nfrom typing import List\n\n\nclass Solution:\n def canArrange(self, arr: List[int], k: int) -> bool:\n \"\"\"\n python 和 Java 在取余上的计算方式不同\n :param arr:\n :param k:\n :return:\n \"\"\"\n m = collections.defaultdict(int)\n for a in arr:\n b = a % k\n # if a < 0: b = k - b\n m[b] += 1\n for i in range(1, k // 2 + 1):\n if m[i] != m[k - i]: return False\n return m[0] % 2 == 0\n\n\nif __name__ == '__main__':\n solution = Solution()\n arr = [1, 2, 3, 4, 5, 10, 6, 7, 8, 9]\n k = 5\n res = solution.canArrange(arr, k)\n print(\"res=\", res)\n","sub_path":"src/greedy/1497.py","file_name":"1497.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"480086902","text":"\"\"\"\nMomos Market\nSend Feedback\nShreya loves to eat momos. Her mother gives her money to buy vegetables but she manages to save some money out of it daily. After buying vegetables, she goes to \"Momos Market\", where there are ‘n’ number of shops of momos. Each of the shops of momos has a rate per momo. She visits the market and starts buying momos (one from each shop) starting from the first shop. She will visit the market for ‘q’ days. You have to tell that how many momos she can buy each day if she starts buying from the first shop daily. She cannot use the remaining money of one day on some other day. But she will save them for other expenses in the future, so, you also need to tell the sum of money left with her at the end of each day.\nInput Format:\nFirst line will have an integer ‘n’ denoting the number of shops in market.\nNext line will have ‘n’ numbers denoting the price of one momo of each shop.\nNext line will have an integer ‘q’ denoting the number of days she will visit the market.\nNext ‘q’ lines will have one integer ‘X’ denoting the money she saved after buying vegetables.\nConstraints:\n1 <= n <= 10^5\n1 <= q <= 10^5\n1 <= X <= 10^9\nOutput:\nThere will be ‘q’ lines of output each having two space separated integers denoting number of momos she can buy and amount of money she saved each day.\nSample Input:\n4\n2 1 6 3\n1\n11\nSample Output:\n3 2\nExplanation:\nShreya visits the \"Momos Market\" for only one day. She has 11 INR to spend. She can buy 3 momos, each from the first 3 shops. She would 9 INR (2 + 1 + 6) for the same and hence, she will save 2 INR.\n\"\"\"\n\n\n\ndef main():\n n = int(input())\n prices = list(map(int, input().strip().split()))\n q = int(input())\n amount = []\n for i in range(q):\n a = int(input().strip())\n amount.append(a)\n for i in range(q):\n money_spent = 0\n index = 0\n for index,price in enumerate(prices):\n if money_spent + price > amount[i]:\n break\n money_spent += price\n print(index, amount[i]-money_spent)\n \n\n\nif __name__ == \"__main__\":\n main()","sub_path":"searching_and_sorting/momos_market.py","file_name":"momos_market.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"461684938","text":"import sys\nimport requests\nimport json\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\n\nclass MoviesQT(QMainWindow):\n def __init__(self):\n super(MoviesQT, self).__init__()\n\n self.setWindowTitle(\"Movie Recommender\")\n self.resize(500,500)\n \n self.central = MoviesCentral(parent = self)\n self.setCentralWidget(self.central)\n\n \n \nclass MoviesCentral(QWidget):\n def __init__(self, parent=None):\n super(MoviesCentral, self).__init__(parent)\n\n menubar = parent.menuBar()\n self.fileMenu = menubar.addMenu('&File')\n self.userMenu = menubar.addMenu('&User')\n\n self.userMenu.addAction(\"View Profile\", self.userInfo)\n self.userMenu.addAction(\"Set User\", self.setUser)\n \n self.upButton = QPushButton('UP', self)\n self.upButton.move(10, 250)\n self.downButton = QPushButton('DOWN', self)\n self.downButton.move(415, 250)\n\n self.connect(self.upButton, SIGNAL(\"clicked()\"),self.upVoteMovie)\n self.connect(self.downButton, SIGNAL(\"clicked()\"),self.downVoteMovie)\n \n \n self.titleText = QLabel(\"Empty Now\", self)\n self.titleText.setGeometry(75,25, 350, 15)\n self.titleText.setAlignment(Qt.AlignCenter)\n\n self.Image = QLabel(self)\n self.Image.setGeometry(160,50,300,400)\n pixmap = QPixmap(200, 300)\n self.Image.setPixmap(pixmap)\n self.Image.show()\n \n self.genreText = QLabel(\"Empty Now\", self)\n self.genreText.setGeometry(75,410, 350, 15)\n self.genreText.setAlignment(Qt.AlignCenter)\n\n self.ratingText = QLabel(\"Empty Now\", self)\n self.ratingText.setGeometry(75, 440, 350, 15)\n self.ratingText.setAlignment(Qt.AlignCenter)\n\n self.SITE_URL = 'http://ash.campus.nd.edu:40001'\n self.RECOMMENDATIONS_URL = self.SITE_URL + '/recommendations/'\n self.MOVIES_URL = self.SITE_URL + '/movies/'\n self.USER_URL = self.SITE_URL + '/users/'\n self.IMG_DIR = '/home/scratch/paradigms/data/images'\n self.RATINGS_URL = self.SITE_URL + '/ratings/'\n\n self.uid = 50\n \n self.rateOneMovie()\n \n def rateOneMovie(self):\n r = requests.get(self.RECOMMENDATIONS_URL + str(self.uid))\n resp = json.loads(r.content.decode())\n mid = resp['movie_id'] #mid is now the ID of the recommended movie\n self.mid = mid\n movie = self.get_movie(mid)\n #print(movie)\n title = movie['title']\n imgExt = movie['img']\n genre = movie['genres']\n self.titleText.setText(title)\n self.ImageName = QPixmap(self.IMG_DIR + imgExt)\n #self.setPixmap(self.Image)\n pixmap = QPixmap(self.ImageName)\n self.Image.setPixmap(pixmap)\n self.genreText.setText(genre)\n rating = self.get_rating(self.mid)\n self.ratingText.setText(\"{0:.2f}\".format(rating))\n \n def get_movie(self, movie_id):\n #gets info on given movie id\n r = requests.get(self.MOVIES_URL + str(movie_id))\n resp = json.loads(r.content.decode('utf-8'))\n return resp\n\n def get_rating(self, movie_id):\n r = requests.get(self.RATINGS_URL + str(movie_id))\n resp = json.loads(r.content.decode('utf-8'))\n return resp['rating']\n \n def upVoteMovie(self):\n data = {\"movie_id\":str(self.mid), \"rating\":\"5\"}\n data = json.dumps(data)\n r = requests.put(self.RECOMMENDATIONS_URL + str(self.uid), data)\n self.rateOneMovie()\n\n def downVoteMovie(self):\n data = {\"movie_id\":str(self.mid), \"rating\":\"1\"}\n data = json.dumps(data)\n r = requests.put(self.RECOMMENDATIONS_URL + str(self.uid), data)\n self.rateOneMovie()\n\n def userInfo(self):\n return\n\n def setUser(self):\n return\n \n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n gui = MoviesQT()\n gui.show()\n sys.exit(app.exec_())\n\n \n","sub_path":"old/pyqt/rater.py","file_name":"rater.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"374950920","text":"'''\nGiven a Undirected Graph. Check whether it contains a cycle or not. \n\nhttps://www.geeksforgeeks.org/detect-cycle-undirected-graph/\n\n'''\ndef dfs(g,node,parent,vis):\n \n vis[node] = 1\n \n for nodes in g:\n \n if(vis[nodes] == 0):\n \n if(dfs(g, nodes, node, vis) == 1):\n return 1\n \n else:\n if(nodes != parent):\n return 1\n \n return 0\n\ndef isCyclic(g,n):\n \n # mark each edge as not visited\n vis = [0 for i in range(n)]\n \n for node in range(n):\n if(vis[node] == False):\n if(dfs(g, node, -1, vis)):\n return 1\n return 0","sub_path":"geeksforgeeks/graph/5_Detect_cycle_in_undirected_graph_difficult.py","file_name":"5_Detect_cycle_in_undirected_graph_difficult.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"244944716","text":"import typing\nfrom collections import OrderedDict\n\nimport pytest\n\nfrom flytekit.common.exceptions.user import FlyteValidationException\nfrom flytekit.common.translator import get_serializable\nfrom flytekit.core import context_manager\nfrom flytekit.core.context_manager import Image, ImageConfig\nfrom flytekit.core.launch_plan import LaunchPlan\nfrom flytekit.core.task import task\nfrom flytekit.core.workflow import ImperativeWorkflow, get_promise, workflow\nfrom flytekit.models import literals as literal_models\n\ndefault_img = Image(name=\"default\", fqn=\"test\", tag=\"tag\")\nserialization_settings = context_manager.SerializationSettings(\n project=\"project\",\n domain=\"domain\",\n version=\"version\",\n env=None,\n image_config=ImageConfig(default_image=default_img, images=[default_img]),\n)\n\n\ndef test_imperative():\n @task\n def t1(a: str) -> str:\n return a + \" world\"\n\n @task\n def t2():\n print(\"side effect\")\n\n wb = ImperativeWorkflow(name=\"my.workflow\")\n wb.add_workflow_input(\"in1\", str)\n node = wb.add_entity(t1, a=wb.inputs[\"in1\"])\n wb.add_entity(t2)\n wb.add_workflow_output(\"from_n0t1\", node.outputs[\"o0\"])\n\n assert wb(in1=\"hello\") == \"hello world\"\n\n wf_spec = get_serializable(OrderedDict(), serialization_settings, wb)\n assert len(wf_spec.template.nodes) == 2\n assert wf_spec.template.nodes[0].task_node is not None\n assert len(wf_spec.template.outputs) == 1\n assert wf_spec.template.outputs[0].var == \"from_n0t1\"\n assert len(wf_spec.template.interface.inputs) == 1\n assert len(wf_spec.template.interface.outputs) == 1\n\n # Create launch plan from wf, that can also be serialized.\n lp = LaunchPlan.create(\"test_wb\", wb)\n lp_model = get_serializable(OrderedDict(), serialization_settings, lp)\n assert lp_model.spec.workflow_id.name == \"my.workflow\"\n\n\ndef test_imperative_list_bound():\n @task\n def t1(a: typing.List[int]) -> int:\n return sum(a)\n\n wb = ImperativeWorkflow(name=\"my.workflow.a\")\n wb.add_workflow_input(\"in1\", int)\n wb.add_workflow_input(\"in2\", int)\n node = wb.add_entity(t1, a=[wb.inputs[\"in1\"], wb.inputs[\"in2\"]])\n wb.add_workflow_output(\"from_n0t1\", node.outputs[\"o0\"])\n\n assert wb(in1=3, in2=4) == 7\n\n\ndef test_imperative_map_bound():\n @task\n def t1(a: typing.Dict[str, typing.List[int]]) -> typing.Dict[str, int]:\n return {k: sum(v) for k, v in a.items()}\n\n wb = ImperativeWorkflow(name=\"my.workflow.a\")\n in1 = wb.add_workflow_input(\"in1\", int)\n wb.add_workflow_input(\"in2\", int)\n in3 = wb.add_workflow_input(\"in3\", int)\n node = wb.add_entity(t1, a={\"a\": [in1, wb.inputs[\"in2\"]], \"b\": [wb.inputs[\"in2\"], in3]})\n wb.add_workflow_output(\"from_n0t1\", node.outputs[\"o0\"])\n\n assert wb(in1=3, in2=4, in3=5) == {\"a\": 7, \"b\": 9}\n\n\ndef test_imperative_with_list_io():\n @task\n def t1(a: int) -> typing.List[int]:\n return [1, a, 3]\n\n @task\n def t2(a: typing.List[int]) -> int:\n return sum(a)\n\n wb = ImperativeWorkflow(name=\"my.workflow.a\")\n t1_node = wb.add_entity(t1, a=2)\n t2_node = wb.add_entity(t2, a=t1_node.outputs[\"o0\"])\n wb.add_workflow_output(\"from_n0t2\", t2_node.outputs[\"o0\"])\n\n assert wb() == 6\n\n\ndef test_imperative_wf_list_input():\n @task\n def t1(a: int) -> typing.List[int]:\n return [1, a, 3]\n\n @task\n def t2(a: typing.List[int], b: typing.List[int]) -> int:\n return sum(a) + sum(b)\n\n wb = ImperativeWorkflow(name=\"my.workflow.a\")\n wf_in1 = wb.add_workflow_input(\"in1\", typing.List[int])\n t1_node = wb.add_entity(t1, a=2)\n t2_node = wb.add_entity(t2, a=t1_node.outputs[\"o0\"], b=wf_in1)\n wb.add_workflow_output(\"from_n0t2\", t2_node.outputs[\"o0\"])\n\n assert wb(in1=[5, 6, 7]) == 24\n\n wf_spec = get_serializable(OrderedDict(), serialization_settings, wb)\n assert len(wf_spec.template.nodes) == 2\n assert wf_spec.template.nodes[0].task_node is not None\n\n\ndef test_imperative_scalar_bindings():\n @task\n def t1(a: typing.Dict[str, typing.List[int]]) -> typing.Dict[str, int]:\n return {k: sum(v) for k, v in a.items()}\n\n wb = ImperativeWorkflow(name=\"my.workflow.a\")\n node = wb.add_entity(t1, a={\"a\": [3, 4], \"b\": [5, 6]})\n wb.add_workflow_output(\"from_n0t1\", node.outputs[\"o0\"])\n\n assert wb() == {\"a\": 7, \"b\": 11}\n\n wf_spec = get_serializable(OrderedDict(), serialization_settings, wb)\n assert len(wf_spec.template.nodes) == 1\n assert wf_spec.template.nodes[0].task_node is not None\n\n\ndef test_imperative_list_bound_output():\n @task\n def t1() -> int:\n return 3\n\n @task\n def t2(a: typing.List[int]) -> int:\n return sum(a)\n\n wb = ImperativeWorkflow(name=\"my.workflow.a\")\n t1_node = wb.add_entity(t1)\n t2_node = wb.add_entity(t2, a=[1, 2, 3])\n wb.add_workflow_output(\"wf0\", [t1_node.outputs[\"o0\"], t2_node.outputs[\"o0\"]], python_type=typing.List[int])\n\n assert wb() == [3, 6]\n\n\ndef test_call_normal():\n @task\n def t1(a: int) -> (int, str):\n return a + 2, \"world\"\n\n @workflow\n def my_functional_wf(a: int) -> (int, str):\n return t1(a=a)\n\n my_functional_lp = LaunchPlan.create(\"my_functional_wf.lp0\", my_functional_wf, default_inputs={\"a\": 3})\n\n wb = ImperativeWorkflow(name=\"imperio\")\n node = wb.add_entity(my_functional_wf, a=3)\n wb.add_workflow_output(\"from_n0_1\", node.outputs[\"o0\"])\n wb.add_workflow_output(\"from_n0_2\", node.outputs[\"o1\"])\n\n assert wb() == (5, \"world\")\n\n wb_lp = ImperativeWorkflow(name=\"imperio\")\n node = wb_lp.add_entity(my_functional_lp)\n wb_lp.add_workflow_output(\"from_n0_1\", node.outputs[\"o0\"])\n wb_lp.add_workflow_output(\"from_n0_2\", node.outputs[\"o1\"])\n\n assert wb_lp() == (5, \"world\")\n\n\ndef test_imperative_call_from_normal():\n @task\n def t1(a: str) -> str:\n return a + \" world\"\n\n wb = ImperativeWorkflow(name=\"my.workflow\")\n wb.add_workflow_input(\"in1\", str)\n node = wb.add_entity(t1, a=wb.inputs[\"in1\"])\n wb.add_workflow_output(\"from_n0t1\", node.outputs[\"o0\"])\n\n assert wb(in1=\"hello\") == \"hello world\"\n\n @workflow\n def my_functional_wf(a: str) -> str:\n x = wb(in1=a)\n return x\n\n assert my_functional_wf(a=\"hello\") == \"hello world\"\n\n # Create launch plan from wf\n lp = LaunchPlan.create(\"test_wb_2\", wb, fixed_inputs={\"in1\": \"hello\"})\n\n @workflow\n def my_functional_wf_lp() -> str:\n x = lp()\n return x\n\n assert my_functional_wf_lp() == \"hello world\"\n\n\ndef test_codecov():\n with pytest.raises(FlyteValidationException):\n get_promise(literal_models.BindingData(), {})\n\n with pytest.raises(FlyteValidationException):\n get_promise(literal_models.BindingData(promise=3), {})\n\n @task\n def t1(a: str) -> str:\n return a + \" world\"\n\n wb = ImperativeWorkflow(name=\"my.workflow\")\n wb.add_workflow_input(\"in1\", str)\n node = wb.add_entity(t1, a=wb.inputs[\"in1\"])\n wb.add_workflow_output(\"from_n0t1\", node.outputs[\"o0\"])\n\n assert wb(in1=\"hello\") == \"hello world\"\n\n with pytest.raises(AssertionError):\n wb(3)\n\n with pytest.raises(ValueError):\n wb(in2=\"hello\")\n","sub_path":"tests/flytekit/unit/core/test_imperative.py","file_name":"test_imperative.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"68674532","text":"lines = open('input.txt').readlines()\n\n\nfor x in range(0, len(lines) - 1):\n templines = lines.copy()\n if templines[x][0: 3] == 'nop':\n templines[x] = 'jmp ' + templines[x][4:]\n elif templines[x][0: 3] == 'jmp':\n templines[x] = 'nop ' + templines[x][4:]\n index = 0\n accumulator = 0\n cmdsExecuted = []\n while index > -1:\n if index > len(templines) - 1:\n print(accumulator)\n break\n if index in cmdsExecuted:\n break\n cmdsExecuted.append(index)\n cmd = templines[index][0: 3]\n loc = int(templines[index][4:])\n if cmd == 'nop':\n index += 1\n elif cmd == 'acc':\n accumulator += loc\n index += 1\n elif cmd == 'jmp':\n index += loc\n\n","sub_path":"2020/day08/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"5024208","text":"#! /usr/bin/python\r\n\r\n\"\"\"\r\n Program: debug4_3.py\r\n Function: reverse the matrix\r\n\"\"\"\r\n\r\n\r\nq = [ [ 1, 2, 3],\r\n [ 4, 5, 6],\r\n [ 7, 8, 9] ]\r\n\r\n[ q[i].reverse() for i in range(len(q)).reverse() ]\r\n#[ q[i].reverse() for i in range(len(q)).reverse() ]\r\n\r\nprint(\"q =\", q)\r\n#print \"k =\", k\r\n\r\nprint(\"That's all folks!\")\r\n\r\n\r\n\r\n","sub_path":"Ch04_lists/debug4_3.py","file_name":"debug4_3.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"85751416","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 9 09:51:36 2020\n\n@author: JiaoJy\n\"\"\"\n# encoding: utf-8\nimport fcm_fnn\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef loadDataSet(filepath):\n with open(filepath) as f:\n rawList = list(map(lambda line: float(line.strip()), f.readlines()))\n labelSet = []\n testSet = []\n for i in range(8, 8 + 500):\n labelSet.append((rawList[i:i+4], rawList[i+1:i+5]))\n # labelSet.append((rawList[i], rawList[i+1]))\n for i in range(8 + 500, 8 + 1000):\n testSet.append((rawList[i:i+4], rawList[i+1:i+5]))\n # testSet.append((rawList[i], rawList[i+1]))\n return labelSet, testSet\n\ndef loadDataSets(filepath):\n data = pd.read_csv('./data/dataProcess.csv',index_col = 0)\n data = np.array(data).tolist()\n labelSet = []\n testSet = []\n for i in range(8,8 + 500):\n labelSet.append((data[i][:],data[i+1][:]))\n for i in range(8 + 500, 8 + 1000):\n testSet.append((data[i][:],data[i+1][:]))\n return labelSet,testSet\n\ndef errorLp(p,data_pre,data_real):\n dist = np.linalg.norm(data_pre-data_real,ord=p)/(len(data_pre))\n return dist\n\ndef MSE(data_pre,data_real):\n if data_pre.ndim == 1:\n num = 1\n elif data_pre.ndim == 2:\n num = data_pre.shape[data_pre.ndim-1]\n dist = [0] * num\n\n dist_temp = (np.power((data_pre - data_real),2))/len(data_pre)\n dist = dist_temp.sum(axis=0)\n return dist\n\n\ndef RMSE(M):\n dist = np.power(M,0.5)\n return dist\n\n\ndef drawPre(title,preData,realData,dataNum=50):\n plt.title(title)\n plt.plot(range(dataNum), preData, color='green', label='predict');\n plt.plot(range(dataNum), realData, color='red', label='real');\n plt.legend();\n plt.xlabel('time');\n plt.ylabel('value');\n plt.show();\n\nif __name__ == \"__main__\":\n # data = pd.read_csv('./data/dataProcess.csv',index_col = 0)\n # data = pd.DataFrame(np.array(data.iloc[:,1]))\n # data.to_csv('./data/datatest.csv',index=None)\n labelSet, testSet = loadDataSet(\"./data/datatest.csv\")\n\n # with open(\"fnn.bin\", \"rb\") as f:\n # fnn = pickle.loads(f.read())\n\n fnn = fcm_fnn.FCM_FNN(4,4,4,4)\n # fnn = fcm_fnn.FCM_FNN(6)\n\n for i in range(25):\n print(\"train err: %s test err: %s\" % (fnn.train(labelSet, 0.02), fnn.test(testSet)))\n\n for oj, oconcept in enumerate(fnn.concepts):\n for omj in range(oconcept.numOfTerms):\n print(\"(%s, %s) C:%s W:%s Xi:%s\" % (oj, omj, oconcept.C[omj], oconcept.sigma[omj], oconcept.xi[omj]))\n print()\n \n # print(\"write fnn data\")\n # with open(\"fnn.bin\", \"wb\") as f:\n # f.write(pickle.dumps(fnn))\n\n print(\"write test files\")\n testData = []\n for X, D in testSet:\n testData.append(D[0])\n testData += testSet[-1][1][1:4]\n with open(\"testData.csv\", \"w\") as f:\n f.write(\"\\n\".join(map(lambda v:str(v), testData)))\n\n print(\"write predict files\")\n predictData = []\n for X, D in testSet:\n predictData.append(fnn.predict(X)[0])\n predictData += fnn.predict(testSet[-1][0])[1:4]\n with open(\"predictData.csv\", \"w\") as f:\n f.write(\"\\n\".join(map(lambda v:str(v), predictData)))\n \n Mwucha = MSE(np.array(predictData), np.array(testData))\n Rwucha = RMSE(Mwucha)\n drawPre(\"CO\",predictData,testData,len(predictData))\n print(\"欧式距离:%s\" % errorLp(2,np.array(predictData),np.array(testData)))\n print(\"MSE:%s\" % Mwucha)\n print(\"RMSE:%s\" % Rwucha)","sub_path":"FCM_FNN/test_one.py","file_name":"test_one.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"296349862","text":"from datetime import datetime\nfrom bson.objectid import ObjectId\nimport graphene\n\nfrom lib.mongo import db\nfrom lib.schemas.types.article import Article, ArticleStatus\n\n\nclass UpdateArticle(graphene.Mutation):\n class Meta:\n output = Article\n\n class Arguments:\n id = graphene.ID(required=True)\n title = graphene.String()\n content = graphene.String()\n\n async def mutate(self, info, id, title=None, content=None):\n if not info.context.logged_in:\n raise Exception('You must log in to update an article.')\n\n article = await info.context.loaders.article.load(id)\n if article is None:\n raise Exception('The article does not exist.')\n\n me = info.context.user\n if me.id != article.author_id or article.status != ArticleStatus.DRAFT:\n raise Exception('Access denied.')\n\n now = datetime.now()\n fields_set = {'updated_at': now}\n article.updated_at = now\n\n if title is not None:\n if len(title) == 0:\n raise Exception('The title must not be empty.')\n if len(title) > 100:\n raise Exception('Tht title must not more than 64 words.')\n fields_set['title'] = title\n article.title = title\n\n if content is not None:\n if len(content) == 0:\n raise Exception('The content must not be empty.')\n fields_set['content'] = content\n article.content = content\n\n db().articles.update_one({'_id': ObjectId(id)}, {'$set': fields_set})\n\n return article\n","sub_path":"lib/schemas/mutations/update_article.py","file_name":"update_article.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"69518291","text":"from django.conf.urls import include, url\nfrom adminPortal import views\nfrom django.urls import path\nurlpatterns=[\n url(r'^admin.login$',views.index,name='index'),\n url(r'^index$', views.index, name='index'),\n url(r'^login$', views.login, name='login'),\n url(r'^verfiyCar$', views.verfiyCar, name='verfiyCar'),\n url(r'^verfiyID$', views.verfiyID, name='verfiyID'),\n url(r'^Promotion$', views.Promotion, name='Promotion'),\n url(r'^SendNotification$', views.SendNotification, name='SendNotification'),\n #url(r'^admin$', views.admin, name='verfiyID'),\n url(r'^logout$', views.logout, name='logout'),\n url(r'^appDis$', views.appDis, name='appDis'),\n url(r'^CarVer$', views.CarVer, name='appDisCar'),\n url(r'^users$', views.users, name='users'),\n url(r'^tripRequest$', views.tripRequest, name='users'),\n url(r'^userinfo$', views.userinfo, name='userinfo'),\n url(r'^tripInfo$', views.tripInfo, name='tripInfo'),\n url(r'^offer$', views.offer, name='offer'),\n url(r'^groupInfo$', views.groupInfo, name='offer'),\n url(r'^groupall$', views.groupall, name='groupall'),\n]","sub_path":"adminPortal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"210776610","text":"\n\narr = list(map(int, input(\"Enter The List :\").split()))\n\n\ndef swap(i, j):\n tmp = arr[i]\n arr[i] = arr[j]\n arr[j] = tmp\n\n\ndef bubSort(arr):\n size = len(arr)\n for i in range(0, size):\n for j in range(0, size-i-1):\n \n if arr[j] > arr[j+1]:\n swap(j+1, j)\n\n\nbubSort(arr)\nprint(\"Sorted List :\", *arr)\n","sub_path":"bubleSort.py","file_name":"bubleSort.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"604769587","text":"#!/usr/bin/env python\n\nimport sys, logging\nimport ast\nfrom bandit import result_store as b_result_store\nfrom bandit import node_visitor as b_node_visitor\nfrom bandit import test_set as b_test_set\nfrom bandit import meta_ast as b_meta_ast\n\nclass BanditManager():\n\n scope = []\n progress = 50\n\n def __init__(self, test_config, debug=False):\n self.logger = self._init_logger(debug)\n self.b_ma = b_meta_ast.BanditMetaAst(self.logger)\n self.b_rs = b_result_store.BanditResultStore(self.logger)\n self.b_ts = b_test_set.BanditTestSet(self.logger, test_config)\n\n def get_logger(self):\n return self.logger\n\n def get_resultstore(self):\n return self.b_rs\n\n def output_results(self, lines, level):\n self.b_rs.report(scope=self.scope, lines=lines, level=level)\n\n def output_metaast(self):\n self.b_ma.report()\n\n def run_scope(self, scope):\n if scope:\n self.scope = scope\n if len(scope) > self.progress:\n sys.stdout.write(\"%s [\" % len(scope))\n for i, fname in enumerate(scope):\n self.logger.debug(\"working on file : %s\" % fname)\n if len(scope) > self.progress:\n if i % self.progress == 0:\n sys.stdout.write(\"%s.. \" % i)\n sys.stdout.flush()\n try:\n with open(fname, 'rU') as fdata:\n try:\n self._execute_ast_visitor(fname, fdata, self.b_ma, self.b_rs, self.b_ts)\n except KeyboardInterrupt as e:\n sys.exit(2)\n except IOError as e:\n self.b_rs.skip(fname, e.strerror)\n if len(scope) > self.progress:\n sys.stdout.write(\"]\\n\")\n sys.stdout.flush()\n else:\n self.logger.info(\"no filename/s provided, working from stdin\")\n try:\n self._execute_ast_visitor('STDIN', sys.stdin, self.b_ma, self.b_rs)\n except KeyboardInterrupt:\n self.logger.debug(\"exiting\")\n sys.exit(1)\n\n def _execute_ast_visitor(self, fname, fdata, b_ma, b_rs, b_ts):\n if fdata != None:\n res = b_node_visitor.BanditNodeVisitor(fname, self.logger, b_ma, b_rs, b_ts)\n try:\n res.visit(ast.parse(\"\".join(fdata.readlines())))\n except SyntaxError as e:\n b_rs.skip(fname, \"syntax error while parsing AST from file\")\n\n def _init_logger(self, debug=False):\n log_level = logging.INFO\n if debug:\n log_level = logging.DEBUG\n log_format = '[%(module)s]\\t%(levelname)s\\t%(message)s'\n logger = logging.getLogger()\n logger.setLevel(log_level)\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(log_format))\n logger.addHandler(handler)\n logger.debug(\"logging initialized\")\n return logger\n\n","sub_path":"bandit/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"512653821","text":"import abjad\nimport baca\nfrom desir.legatissimo.transpose import transpose_bass_clarinet\n\n\ndef populate_star_chain_measures(score):\n print(\"populating star chain measures ...\")\n violin_staff = score[\"Violin\"]\n clarinet_staff = score[\"Clarinet\"]\n\n violin_measures = abjad.iterate.measures_forward_in(violin_staff, 69 - 1)\n violin_measures = list(violin_measures)\n assert len(violin_measures) == 5\n\n bass_clarinet_measures = abjad.iterate.measures_forward_in(\n clarinet_staff, 69 - 1\n )\n bass_clarinet_measures = list(bass_clarinet_measures)\n assert len(bass_clarinet_measures) == 5\n\n violin_pitch = abjad.NamedPitch(9)\n\n leaves = [abjad.Note(violin_pitch, (8, 8)), abjad.Rest((1, 8))]\n tuplet = abjad.Tuplet.from_duration(1, leaves)\n tuplet.leaves[0].note_head.style = \"la\"\n tuplet.leaves[0].dynamics.mark = \"f\"\n violin_measures[0][:] = [tuplet]\n\n markup = abjad.Markup(r\"\\italic { gridato prolungato \\hspace #0.3 }\")\n leaves = violin_measures[0].leaves\n spanner = abjad.solid_text_spanner_below_with_nib_at_right(markup, leaves)\n spanner.thickness = 1.5\n spanner.bound_details__left__stencil_align_dir_y = 0\n\n leaves = [abjad.Rest((8, 8))]\n tuplet = abjad.Tuplet.from_duration(1, leaves)\n violin_measures[1][:] = [tuplet]\n\n leaves = [abjad.Note(violin_pitch, (8, 8)), abjad.Rest((1, 8))]\n tuplet = abjad.Tuplet.from_duration(1, leaves)\n tuplet.leaves[0].note_head.style = \"la\"\n tuplet.leaves[0].dynamics.mark = \"f\"\n violin_measures[2][:] = [tuplet]\n\n markup = abjad.Markup(r\"\\italic { gridato prolungato \\hspace #0.3 }\")\n leaves = violin_measures[2].leaves\n spanner = abjad.solid_text_spanner_below_with_nib_at_right(markup, leaves)\n spanner.thickness = 1.5\n spanner.bound_details__left__stencil_align_dir_y = 0\n\n leaves = [abjad.Rest((8, 8))]\n tuplet = abjad.Tuplet.from_duration(1, leaves)\n violin_measures[3][:] = [tuplet]\n\n leaves = [\n abjad.Note(violin_pitch, (16, 8)),\n abjad.Rest((8, 8)),\n abjad.Rest((3, 8)),\n ]\n tuplet = abjad.Tuplet.from_duration((24, 8), leaves)\n tuplet.leaves[0].note_head.style = \"la\"\n tuplet.leaves[0].dynamics.mark = \"f\"\n violin_measures[4][:] = [tuplet]\n\n markup = abjad.Markup(r\"\\italic { gridato prolungato \\hspace #0.3 }\")\n leaves = violin_measures[4].leaves[:2]\n spanner = abjad.solid_text_spanner_below_with_nib_at_right(markup, leaves)\n spanner.thickness = 1.5\n spanner.bound_details__left__stencil_align_dir_y = 0\n\n bass_clarinet_pitch = abjad.NamedPitch(-23)\n\n leaves = [abjad.Rest((8, 8))]\n tuplet = abjad.Tuplet.from_duration(1, leaves)\n bass_clarinet_measures[0][:] = tuplet * 1\n bass_clarinet_measures[1][:] = tuplet * 1\n bass_clarinet_measures[2][:] = tuplet * 1\n bass_clarinet_measures[3][:] = tuplet * 1\n\n note = abjad.Note(bass_clarinet_pitch, (16, 8))\n leaves = [abjad.Rest((8, 8)), note, abjad.Rest((3, 8))]\n tuplet = abjad.Tuplet.from_duration((24, 8), leaves)\n bass_clarinet_measures[4][:] = [tuplet]\n\n leaves = bass_clarinet_measures[4].leaves[1:3]\n abjad.hairpin(\"niente o< ff\", leaves)\n\n transpose_bass_clarinet(bass_clarinet_measures)\n","sub_path":"desir/etc/legatissimo/py/populate_star_chain_measures.py","file_name":"populate_star_chain_measures.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"487900244","text":"#!/usr/bin/env python\n\n\"\"\"Downsample volume by blockwise reduction.\n\n\"\"\"\n\nimport sys\nimport argparse\n\nimport numpy as np\nfrom skimage.util import view_as_blocks\nfrom scipy.stats import mode as scipy_mode\n\nfrom wmem import parse, utils\n\n\ndef main(argv):\n \"\"\"Downsample volume by blockwise reduction.\"\"\"\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser = parse.parse_downsample_blockwise(parser)\n parser = parse.parse_common(parser)\n args = parser.parse_args()\n\n downsample_blockwise(\n args.inputfile,\n args.blockreduce,\n args.func,\n args.dataslices,\n args.outputfile,\n args.save_steps,\n args.protective,\n )\n\n\ndef downsample_blockwise(\n h5path_in,\n blockreduce=[3, 3, 3],\n func='np.amax',\n dataslices=None,\n h5path_out='',\n save_steps=False,\n protective=False,\n ):\n \"\"\"Downsample volume by blockwise reduction.\"\"\"\n\n # Check if any output paths already exist.\n outpaths = {'out': h5path_out}\n status = utils.output_check(outpaths, save_steps, protective)\n if status == \"CANCELLED\":\n return\n\n # Open the inputfile for reading.\n # TODO: option to get the input data passed\n h5file_in, ds_in, elsize, axlab = utils.h5_load(h5path_in)\n\n # Get the matrix size and resolution of the outputdata.\n outsize, elsize = get_new_sizes(func, blockreduce, ds_in.shape, elsize)\n\n # Open the outputfile for writing and create the dataset or output array.\n h5file_out, ds_out = utils.h5_write(None, outsize, ds_in.dtype,\n h5path_out,\n element_size_um=elsize,\n axislabels=axlab)\n\n # Get the slice objects for the input data.\n slices = utils.get_slice_objects_prc(dataslices, ds_in.shape)\n\n # Reformat the data to the outputsize.\n if func == 'expand':\n out = ds_in[slices[0], ...]\n for axis in range(0, ds_out.ndim):\n out = np.repeat(out, blockreduce[axis], axis=axis)\n ds_out[slices[0], ...] = out\n else:\n \"\"\" TODO: flexible mapping from in to out\n now:\n the reduction factor of the first axis must be 1;\n the extent of the remaining axes must be full\n \"\"\"\n ds_out[slices[0], ...] = block_reduce(ds_in[slices[0], ...],\n block_size=tuple(blockreduce),\n func=eval(func))\n\n # Close the h5 files or return the output array.\n try:\n h5file_in.close()\n h5file_out.close()\n except (ValueError, AttributeError):\n return ds_out\n\n\ndef get_new_sizes(func, blockreduce, dssize, elsize):\n \"\"\"Calculate the reduced dataset size and voxelsize.\"\"\"\n\n if func == 'expand':\n fun_dssize = lambda d, b: int(np.ceil(float(d) * b))\n fun_elsize = lambda e, b: float(e) / b\n else:\n fun_dssize = lambda d, b: int(np.ceil(float(d) / b))\n fun_elsize = lambda e, b: float(e) * b\n\n dssize = [fun_dssize(d, b) for d, b in zip(dssize, blockreduce)]\n elsize = [fun_elsize(e, b) for e, b in zip(elsize, blockreduce)]\n\n return dssize, elsize\n\n\n# NOTE: adapted version of scikit-image-dev0.13 block_reduce\n# it uses flattened blocks to calculate the (scipy) mode\ndef block_reduce(image, block_size, func=np.sum, cval=0):\n \"\"\"Down-sample image by applying function to local blocks.\n Parameters\n ----------\n image : ndarray\n N-dimensional input image.\n block_size : array_like\n Array containing down-sampling integer factor along each axis.\n func : callable\n Function object which is used to calculate the return value for each\n local block. This function must implement an ``axis`` parameter such\n as ``numpy.sum`` or ``numpy.min``.\n cval : float\n Constant padding value if image is not perfectly divisible by the\n block size.\n Returns\n -------\n image : ndarray\n Down-sampled image with same number of dimensions as input image.\n Examples\n --------\n >>> from skimage.measure import block_reduce\n >>> image = np.arange(3*3*4).reshape(3, 3, 4)\n >>> image # doctest: +NORMALIZE_WHITESPACE\n array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]],\n [[24, 25, 26, 27],\n [28, 29, 30, 31],\n [32, 33, 34, 35]]])\n >>> block_reduce(image, block_size=(3, 3, 1), func=np.mean)\n array([[[ 16., 17., 18., 19.]]])\n >>> image_max1 = block_reduce(image, block_size=(1, 3, 4), func=np.max)\n >>> image_max1 # doctest: +NORMALIZE_WHITESPACE\n array([[[11]],\n [[23]],\n [[35]]])\n >>> image_max2 = block_reduce(image, block_size=(3, 1, 4), func=np.max)\n >>> image_max2 # doctest: +NORMALIZE_WHITESPACE\n array([[[27],\n [31],\n [35]]])\n \"\"\"\n\n if len(block_size) != image.ndim:\n raise ValueError(\"`block_size` must have the same length \"\n \"as `image.shape`.\")\n\n pad_width = []\n for i in range(len(block_size)):\n if block_size[i] < 1:\n raise ValueError(\"Down-sampling factors must be >= 1. Use \"\n \"`skimage.transform.resize` to up-sample an \"\n \"image.\")\n if image.shape[i] % block_size[i] != 0:\n after_width = block_size[i] - (image.shape[i] % block_size[i])\n else:\n after_width = 0\n pad_width.append((0, after_width))\n\n image = np.pad(image, pad_width=pad_width, mode='constant',\n constant_values=cval)\n\n out = view_as_blocks(image, block_size)\n\n if func is mode:\n # TODO: implement restriding here instead of reshape?\n outshape = tuple(out.shape[:3]) + tuple([-1])\n out = np.reshape(out, outshape)\n out = scipy_mode(out)\n else:\n for i in range(len(out.shape) // 2):\n out = func(out, axis=-1)\n\n return out\n\n\ndef mode(array, axis=None): # axis argument needed for block_reduce\n \"\"\"Calculate the blockwise mode.\"\"\"\n\n smode = np.zeros_like(array[:, :, :, 0])\n for i in range(array.shape[0]):\n for j in range(array.shape[1]):\n for k in range(array.shape[2]):\n block = array[i, j, k, :].ravel()\n smode[i, j, k] = np.argmax(np.bincount(block))\n\n return smode\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"wmem/downsample_blockwise.py","file_name":"downsample_blockwise.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"581731817","text":"print('Chapter 13: Extracting Data from XML')\r\n\r\n\r\n#In this assignment you will write a Python program somewhat similar to http://www.py4e.com/code3/geoxml.py. The program will prompt for a URL, read the XML data from that URL using urllib and then parse and extract the comment counts from the XML data, compute the sum of the numbers in the file.\r\n\r\nimport urllib.request, urllib.parse, urllib.error\r\nimport xml.etree.ElementTree as ET\r\nimport ssl\r\n\r\nsample = ' http://py4e-data.dr-chuck.net/comments_42.xml'\r\nactual = 'http://py4e-data.dr-chuck.net/comments_1119258.xml'\r\n\r\ninputURL = input('Please provide a website URL: ',)\r\n#inputURL = sample\r\n#inputURL = actual\r\n\r\nopenURL = urllib.request.urlopen(inputURL).read()\r\nprint('URL successfully opened', type(openURL))\r\n\r\ndata = openURL\r\nprint('Retrieved', len(data), 'characters')\r\n\r\ntree = ET.fromstring(data)\r\n\r\nresults = tree.findall('.//count')\r\n\r\nprint('Total items counted', len(results))\r\n#print('results type', type(results))\r\n\r\nsum = 0\r\nfor numbers in results:\r\n sum = int(numbers.text) + sum\r\n #print(item.text)\r\n #print(type(item.text))\r\n\r\n\r\nprint('Sum is:', sum)\r\n\r\n#The following two loops are just looking to find different ways to print out the information from the URL and practice XML more\r\n#results2 = tree.findall('comments/comment/name')\r\n#print('Names count 2', len(results2))\r\n\r\n#namelist = []\r\n#for names in results2:\r\n #namelist.append(names.text)\r\n#print(namelist)\r\n\r\n#results3 = tree.findall('comments/comment')\r\n#for item in results3:\r\n #print(item.find('count').text)\r\n #print(item.find('name').text)\r\n","sub_path":"pfe1302_Extract_XML.py","file_name":"pfe1302_Extract_XML.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"281834403","text":"\nfrom collections import defaultdict\nimport json\nimport difflib\nimport re\n\nfrom mhapi import skills\nfrom mhapi.model import SharpnessLevel, _break_find\n\n\nWEAKPART_WEIGHT = 0.5\n\n\ndef raw_damage(true_raw, sharpness, affinity, monster_hitbox, motion):\n \"\"\"\n Calculate raw damage to a monster part with the given true raw,\n sharpness, monster raw weakness, and weapon motion value.\n \"\"\"\n return (true_raw\n * SharpnessLevel.raw_modifier(sharpness)\n * (1 + (affinity / 400.0))\n * motion / 100.0\n * monster_hitbox / 100.0)\n\n\ndef element_damage(element, sharpness, monster_ehitbox):\n \"\"\"\n Calculate elemental damage to a monster part with the given elemental\n attack, the given sharpness, and the given monster elemental weakness.\n Note that this is independent of the motion value of the attack.\n \"\"\"\n return (element / 10.0\n * SharpnessLevel.element_modifier(sharpness)\n * monster_ehitbox / 100.0)\n\n\nclass MotionType(object):\n CUT = \"cut\"\n IMPACT = \"impact\"\n FIXED = \"fixed\"\n\n\nclass MotionValue(object):\n def __init__(self, name, types, powers):\n self.name = name\n self.types = types\n self.powers = powers\n self.average = sum(self.powers) / len(self.powers)\n\n\nclass WeaponTypeMotionValues(object):\n def __init__(self, weapon_type, motion_data):\n self.weapon_type = weapon_type\n self.motion_values = dict()\n for d in motion_data:\n name = d[\"name\"]\n self.motion_values[name] = MotionValue(name, d[\"type\"], d[\"power\"])\n\n self.average = (sum(mv.average\n for mv in self.motion_values.itervalues())\n / len(self))\n\n def __len__(self):\n return len(self.motion_values)\n\n def keys(self):\n return self.motion_values.keys()\n\n def __getitem__(self, key):\n return self.motion_values[key]\n\n\nclass MotionValueDB(object):\n def __init__(self, json_path):\n with open(json_path) as f:\n self._raw_data = json.load(f)\n\n self.motion_values_map = dict()\n\n for d in self._raw_data:\n wtype = d[\"name\"]\n if wtype == \"Sword\":\n wtype = \"Sword and Shield\"\n self.motion_values_map[wtype] = WeaponTypeMotionValues(wtype,\n d[\"motions\"])\n\n def __getitem__(self, weapon_type):\n return self.motion_values_map[weapon_type]\n\n def keys(self):\n return self.motion_values_map.keys()\n\n def __len__(self):\n return len(self.motion_values_map)\n\n\nclass WeaponType(object):\n \"\"\"\n Enumeration for weapon types.\n \"\"\"\n SWITCH_AXE = \"Switch Axe\"\n HAMMER = \"Hammer\"\n HUNTING_HORN = \"Hunting Horn\"\n GREAT_SWORD = \"Great Sword\"\n CHARGE_BLADE = \"Charge Blade\"\n LONG_SWORD = \"Long Sword\"\n INSECT_GLAIVE = \"Insect Glaive\"\n LANCE = \"Lance\"\n GUNLANCE = \"Gunlance\"\n HEAVY_BOWGUN = \"Heavy Bowgun\"\n SWORD_AND_SHIELD = \"Sword and Shield\"\n DUAL_BLADES = \"Dual Blades\"\n LIGHT_BOWGUN = \"Light Bowgun\"\n BOW = \"Bow\"\n\n IMPACT = \"impact\"\n CUT = \"cut\"\n SHOT = \"shot\"\n MIXED = \"cut/impact\"\n\n _multiplier = {\n \"Switch Axe\": 5.4,\n \"Hammer\": 5.2,\n \"Hunting Horn\": 5.2,\n \"Great Sword\": 4.8,\n \"Charge Blade\": 3.6,\n \"Long Sword\": 3.3,\n \"Insect Glaive\": 3.1,\n \"Lance\": 2.3,\n \"Gunlance\": 2.3,\n \"Heavy Bowgun\": 1.5,\n \"Sword and Shield\": 1.4,\n \"Dual Blades\": 1.4,\n \"Light Bowgun\": 1.3,\n \"Bow\": 1.2,\n }\n\n @classmethod\n def all(cls):\n return cls._multiplier.keys()\n\n @classmethod\n def damage_type(cls, weapon_type):\n if weapon_type in (cls.HAMMER, cls.HUNTING_HORN):\n return cls.IMPACT\n elif weapon_type == cls.LANCE:\n return cls.MIXED\n elif weapon_type in (cls.LIGHT_BOWGUN, cls.HEAVY_BOWGUN, cls.BOW):\n return cls.SHOT\n else:\n return cls.CUT\n\n @classmethod\n def multiplier(cls, weapon_type):\n return cls._multiplier[weapon_type]\n\n\nclass WeaponMonsterDamage(object):\n \"\"\"\n Class for calculating how much damage a weapon does to a monster.\n Does not include overall monster defense.\n \"\"\"\n def __init__(self, weapon_row, monster_row, monster_damage, motion,\n sharp_plus=False, breakable_parts=None,\n attack_skill=skills.AttackUp.NONE,\n critical_eye_skill=skills.CriticalEye.NONE,\n element_skill=skills.ElementAttackUp.NONE,\n awaken=False):\n self.weapon = weapon_row\n self.monster = monster_row\n self.monster_damage = monster_damage\n self.motion = motion\n self.sharp_plus = sharp_plus\n self.breakable_parts = breakable_parts\n self.attack_skill = attack_skill\n self.critical_eye_skill = critical_eye_skill\n self.element_skill = element_skill\n self.awaken = awaken\n\n self.damage_map = defaultdict(PartDamage)\n self.average = 0\n self.weakness_weighted = 0\n self.best_weighted = 0\n self.break_weighted = 0\n\n self.weapon_type = self.weapon[\"wtype\"]\n self.true_raw = (self.weapon[\"attack\"]\n / WeaponType.multiplier(self.weapon_type))\n if sharp_plus:\n self.sharpness = self.weapon.sharpness_plus.max\n else:\n self.sharpness = self.weapon.sharpness.max\n #print \"sharpness=\", self.sharpness\n self.affinity = int(self.weapon[\"affinity\"] or 0)\n self.damage_type = WeaponType.damage_type(self.weapon_type)\n self.etype = self.weapon[\"element\"]\n self.eattack = self.weapon[\"element_attack\"]\n if not self.etype and self.awaken:\n self.etype = self.weapon.awaken\n self.eattack = self.weapon.awaken_attack\n\n if self.eattack:\n self.eattack = int(self.eattack)\n else:\n self.eattack = 0\n\n self.true_raw = skills.AttackUp.modified(attack_skill,\n self.true_raw)\n self.affinity = skills.CriticalEye.modified(critical_eye_skill,\n self.affinity)\n self.eattack = skills.ElementAttackUp.modified(element_skill,\n self.eattack)\n\n self.parts = []\n self.break_count = 0\n\n self.averages = dict(\n uniform=0,\n raw=0,\n element=0,\n weakpart_raw=0,\n weakpart_element=0,\n )\n self.max_raw_part = (None, 0)\n self.max_element_part = (None, 0)\n self._calculate_damage()\n\n @property\n def attack(self):\n return self.true_raw * WeaponType.multiplier(self.weapon_type)\n\n def _calculate_damage(self):\n for row in self.monster_damage._rows:\n # TODO: refactor to take advantage of new model\n part = row[\"body_part\"]\n alt = None\n m = re.match(r\"([^(]+) \\(([^)]+)\\)\", part)\n if m:\n part = m.group(1)\n alt = m.group(2)\n #print part, alt\n hitbox = 0\n hitbox_cut = int(row[\"cut\"])\n hitbox_impact = int(row[\"impact\"])\n if self.damage_type == WeaponType.CUT:\n hitbox = hitbox_cut\n elif self.damage_type == WeaponType.IMPACT:\n hitbox = hitbox_impact\n elif self.damage_type == WeaponType.MIXED:\n hitbox = max(hitbox_cut, hitbox_impact)\n\n raw = raw_damage(self.true_raw, self.sharpness, self.affinity,\n hitbox, self.motion)\n\n element = 0\n ehitbox = 0\n if self.etype in \"Fire Water Ice Thunder Dragon\".split():\n ehitbox = int(row[str(self.etype.lower())])\n element = element_damage(self.eattack, self.sharpness, ehitbox)\n\n part_damage = self.damage_map[part]\n part_damage.set_damage(raw, element, hitbox, ehitbox, state=alt)\n if not part_damage.part:\n part_damage.part = part\n if alt is None:\n if (self.breakable_parts\n and _break_find(part, self.monster_damage.parts.keys(),\n self.breakable_parts)):\n part_damage.breakable = True\n if hitbox > self.max_raw_part[1]:\n self.max_raw_part = (part, hitbox)\n if ehitbox > self.max_element_part[1]:\n self.max_element_part = (part, ehitbox)\n for d in self.damage_map.values():\n if d.is_breakable():\n self.break_count += 1\n self.parts = self.damage_map.keys()\n self.averages[\"uniform\"] = self.uniform()\n self.averages[\"raw\"] = self.weighted_raw()\n self.averages[\"element\"] = self.weighted_element()\n self.averages[\"weakpart_raw\"] = self.weakpart_weighted_raw()\n self.averages[\"weakpart_element\"] = self.weakpart_weighted_element()\n self.averages[\"break_raw\"] = self.break_weakpart_raw()\n self.averages[\"break_element\"] = self.break_weakpart_element()\n self.averages[\"break_only\"] = self.break_only()\n\n def uniform(self):\n average = 0.0\n for part, damage in self.damage_map.iteritems():\n average += damage.average()\n return average / len(self.damage_map)\n\n def weighted_raw(self):\n \"\"\"\n Average damage weighted by non-broken raw hitbox. For each part the\n damage is averaged across broken vs non-broken, weighted by the\n default of broken for 25% of the hits.\n \"\"\"\n average = 0.0\n total_hitbox = 0.0\n for part, damage in self.damage_map.iteritems():\n average += damage.average() * damage.hitbox\n total_hitbox += damage.hitbox\n if total_hitbox == 0:\n return 0\n return average / total_hitbox\n\n def weighted_element(self):\n \"\"\"\n Average damage weighted by non-broken element hitbox.\n \"\"\"\n average = 0.0\n total_ehitbox = 0.0\n for part, damage in self.damage_map.iteritems():\n average += damage.average() * damage.ehitbox\n total_ehitbox += damage.ehitbox\n if total_ehitbox == 0:\n return 0\n return average / total_ehitbox\n\n def weakpart_weighted_raw(self, weak_weight=WEAKPART_WEIGHT):\n other_weight = (1 - weak_weight) / (len(self.parts) - 1)\n average = 0\n for part, damage in self.damage_map.iteritems():\n if part == self.max_raw_part[0]:\n weight = weak_weight\n else:\n weight = other_weight\n average += damage.average() * weight\n return average\n\n def weakpart_weighted_element(self, weak_weight=WEAKPART_WEIGHT):\n other_weight = (1 - weak_weight) / (len(self.parts) - 1)\n average = 0\n for part, damage in self.damage_map.iteritems():\n if part == self.max_element_part[0]:\n weight = weak_weight\n else:\n weight = other_weight\n average += damage.average() * weight\n return average\n\n def break_weakpart_raw(self):\n \"\"\"\n Split evenly among break parts and weakest raw part.\n \"\"\"\n if not self.break_count:\n return 0\n average = 0.0\n count = self.break_count + 1\n for part, damage in self.damage_map.iteritems():\n if part == self.max_raw_part[0]:\n average += damage.average()\n if damage.is_breakable():\n count -= 1\n elif damage.is_breakable():\n # for breaks, assume attack until broken, unless it's a\n # weak part and covered above\n average += damage.total\n return average / count\n\n def break_weakpart_element(self):\n \"\"\"\n Split evenly among break parts and weakest element part.\n \"\"\"\n if not self.break_count:\n return 0\n average = 0.0\n count = self.break_count + 1\n for part, damage in self.damage_map.iteritems():\n if part == self.max_element_part[0]:\n # If weakpart is also a break, assume continue attacking\n # even after broken\n average += damage.average()\n if damage.is_breakable():\n count -= 1\n elif damage.is_breakable():\n # for breaks that aren't the weakpart, assume attack until\n # broken and then go back to weakpart\n average += damage.total\n return average / count\n\n def break_only(self):\n \"\"\"\n Split evenly among break parts. If there are breaks that are weak\n to element but not to raw or vice versa, this will represent that\n when comparing weapons.\n \"\"\"\n if not self.break_count:\n return 0\n average = 0.0\n for part, damage in self.damage_map.iteritems():\n if damage.is_breakable():\n # attack until broken, then move to next break\n average += damage.total\n return average / self.break_count\n\n def __getitem__(self, key):\n return self.damage_map[key]\n\n def keys(self):\n return self.parts\n\n\nclass PartDamageState(object):\n def __init__(self, raw, element, hitbox, ehitbox, state=None):\n self.raw = raw\n self.element = element\n self.hitbox = hitbox\n self.ehitbox = ehitbox\n self.state = state\n\n\nclass PartDamage(object):\n \"\"\"\n Class to represent the damage done to a single hitzone on a monster,\n default state and alternate state (broken, enraged, etc).\n \"\"\"\n def __init__(self):\n self.states = dict()\n self.part = None\n self.breakable = False\n\n @property\n def raw(self):\n return self.states[None].raw\n\n @property\n def element(self):\n return self.states[None].element\n\n @property\n def hitbox(self):\n return self.states[None].hitbox\n\n @property\n def ehitbox(self):\n return self.states[None].ehitbox\n\n @property\n def break_raw(self):\n if \"Break Part\" in self.states:\n return self.states[\"Break Part\"].raw\n else:\n return self.raw\n\n @property\n def break_element(self):\n if \"Break Part\" in self.states:\n return self.states[\"Break Part\"].element\n else:\n return self.element\n\n @property\n def rage_raw(self):\n if \"Enraged\" in self.states:\n return self.states[\"Enraged\"].raw\n else:\n return self.raw\n\n @property\n def rage_element(self):\n if \"Enraged\" in self.states:\n return self.states[\"Enraged\"].element\n else:\n return self.element\n\n @property\n def total(self):\n return self.raw + self.element\n\n @property\n def total_break(self):\n return self.break_raw + self.break_element\n\n @property\n def total_rage(self):\n return self.rage_raw + self.rage_element\n\n def break_diff(self):\n return self.total_break - self.total\n\n def rage_diff(self):\n return self.total_rage - self.total\n\n def is_breakable(self):\n # If the part has a hitbox with different damage in the break\n # rows from the db, or if it's explicitly marked as breakable\n # (done by checking hunt rewards for breaks).\n return self.break_diff() > 0 or self.breakable\n\n def average(self, break_weight=0.25, rage_weight=0.5):\n if self.break_diff():\n assert not self.rage_diff()\n return self.average_break(break_weight)\n else:\n return self.average_rage(rage_weight)\n\n def average_break(self, break_weight=0.25):\n return (self.total_break * break_weight\n + self.total * (1 - break_weight))\n\n def average_rage(self, rage_weight=0.5):\n return (self.total_rage * rage_weight\n + self.total * (1 - rage_weight))\n\n def set_damage(self, raw, element, hitbox, ehitbox, state=None):\n if state == \"Without Hide\":\n state = \"Break Part\"\n self.states[state] = PartDamageState(raw, element,\n hitbox, ehitbox, state)\n\n\ndef element_attack_up(value):\n return value * 1.1\n\n\ndef element_x_attack_up(value, level=1):\n value = value * (1 + .05 * level)\n if level == 1:\n value += 40\n elif level == 2:\n value += 60\n elif level == 3:\n value += 90\n else:\n raise ValueError(\"level must be 1, 2, or 3\")\n","sub_path":"mhapi/damage.py","file_name":"damage.py","file_ext":"py","file_size_in_byte":16874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"307424982","text":"from collections import namedtuple\n\n\ndef goal_test(state):\n return str(state) == str(range(0, 9))\n\n\n# BFS Search\ndef bfs(start):\n \"\"\" \n Performs breadth-first search starting with the 'start' as the beginning\n node. Returns a namedtuple 'Success' which contains namedtuple 'position'\n (includes: node, cost, depth, prev), 'max_depth' and 'nodes_expanded'\n if a node that passes the goal test has been found.\n\n \"\"\"\n\n # SearchPos used for bookeeping and finding the path:\n SearchPos = namedtuple('SearchPos', 'node, cost, depth, prev')\n\n # Initial position does not have a predecessor\n position = SearchPos(start, 0, 0, None)\n\n\n # frontier contains unexpanded positions\n frontier = [position]\n explored = set()\n while len(frontier) > 0:\n\n # current position is the first position in the frontier\n position = frontier.pop(0)\n\n node = position.node\n\n # goal test: return success if True\n if goal_test(node):\n max_depth = max([pos.depth for pos in frontier])\n Success = namedtuple('Success', \n 'position, max_depth, nodes_expanded')\n success = Success(position, max_depth, len(explored))\n return success\n\n # expanded nodes are added to explored set\n explored.add(node)\n\n # All reachable positions from current postion is added to frontier\n for neighbor in node.successors():\n new_position = SearchPos(neighbor, position.cost + 1,\n position.depth + 1, position)\n frontier_check = neighbor in [pos.node for pos in frontier]\n if neighbor not in explored and not frontier_check:\n frontier.append(new_position)\n\n # the goal could not be reached.\n return None\n\n\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"431633216","text":"from django.db import models\nfrom guardian.shortcuts import (assign, remove_perm)\nfrom django.contrib.auth.models import User\nfrom django.dispatch import receiver\nfrom flying_frogs.friends.signals import (friend_request_accepted,\n friend_request_sent, friend_removed)\nfrom flying_frogs.notifications.models import (NOTIFICATION_MESSAGE,\n NOTIFICATIONS,\n Notification)\n\n\nclass FriendList(models.Model):\n owner = models.ForeignKey(User, null=True)\n pending_sent_list = models.ManyToManyField(User, null=True,\n related_name='pending_requests')\n pending_received_list = models.ManyToManyField(User, null=True,\n related_name='pending')\n user_list = models.ManyToManyField(User, null=True,\n related_name='accepted')\n\n def __unicode__(self):\n return \"%s's Friend's List\" % self.owner.username\n\n def accept_friend_request(self, friend):\n friends = friend.get_profile().friends\n\n self.pending_received_list.remove(friend)\n self.user_list.add(friend)\n friends.pending_sent_list.remove(self.owner)\n friends.user_list.add(self.owner)\n self.save()\n friend_request_accepted.send_robust(\n sender=self.accept_friend_request.__name__,\n user=self.owner,\n friend=friend,\n permission=assign)\n\n def decline_friend_request(self, friend):\n self.pending_received_list.remove(friend)\n friend.get_profile().friends.pending_sent_list.remove(self.owner)\n self.save()\n friend.save()\n\n def send_friend_request(self, friend):\n if((friend not in self.pending_sent_list.all()) and\n (not self.is_friend(friend))):\n self.pending_sent_list.add(friend)\n friend.get_profile().friends.pending_received_list.add(self.owner)\n self.save()\n friend.save()\n friend_request_sent.send_robust(\n sender=self.send_friend_request.__name__,\n user=self.owner,\n friend=friend)\n\n def remove_friend(self, friend):\n self.user_list.remove(friend)\n friend.get_profile().friends.user_list.remove(self.owner)\n self.save()\n friend.save()\n friend_removed.send_robust(sender=self.__class__,\n user=self.owner,\n friend=friend,\n permission=remove_perm)\n\n def is_friend(self, friend):\n if(friend in self.user_list.all()):\n return True\n else:\n return False\n\n def check_sent_pending(self, friend):\n if(User.objects.get(username=friend) in self.pending_sent_list.all()):\n return True\n else:\n return False\n\n def check_received_pending(self, friend):\n if(User.objects.get(username=friend) in\n self.pending_received_list.all()):\n return True\n else:\n return False\n\n def get_friends_subset(self, friend_type):\n friend_subset = []\n for friend in self.user_list.all():\n if(friend.get_profile().user_type == friend_type):\n friend_subset.append(friend)\n return friend_subset\n\n\n@receiver(friend_request_sent)\n@receiver(friend_request_accepted)\ndef friend_request_notification(sender, **kw):\n notification_type, message = NOTIFICATIONS[sender]\n user = kw['user']\n friend = kw['friend']\n\n if(user.get_full_name()):\n message = NOTIFICATION_MESSAGE(user.get_full_name(), message)\n else:\n message = NOTIFICATION_MESSAGE(user.username, message)\n notification = Notification(owner=user,\n message=message,\n notification_type=notification_type)\n notification.save()\n friend.get_profile().notifications.add(notification)\n friend.save()\n\n\n@receiver(friend_removed)\n@receiver(friend_request_accepted)\ndef assign_friend_perms(sender, **kw):\n user = kw['user']\n friend = kw['friend']\n permission = kw['permission']\n\n permission('view_friend_profile', user, friend.profile)\n permission('view_friend_profile', friend, user.profile)\n","sub_path":"friends/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"503569667","text":"import logging\nimport asyncio\nimport sys\n\nfrom kademlia.network import Server\n\nimport math\nimport random\nimport time\n\ndef nextTime(rateParameter = 1/2):\n return -math.log(1.0 - random.random()) / rateParameter\n\nloop = asyncio.get_event_loop()\nloop.set_debug(True)\n\nvalue = 0\n# server = Server()\nbootstrap_node = (\"0.0.0.0\", 8469 + int(sys.argv[1]))\nfirst = [True] * 10\nstart_device_id = int(sys.argv[1]) * 10\nwhile True:\n\tserver = Server()\n\tloop.run_until_complete(server.listen(8470 + int(sys.argv[1])))\n\tloop.run_until_complete(server.bootstrap([bootstrap_node]))\n\tnext_time = time.time() + nextTime()\n\twhile time.time() < next_time:\n\t# while True:\n\t\tfor device in range(start_device_id, start_device_id + 10):\n\t\t\tresult = loop.run_until_complete(server.set(str(device), value))\n\t\t\tif result and first[device - start_device_id]:\n\t\t\t\tfirst[device - start_device_id] = False\t\n\t\t\t\tprint(\"writing\", sys.argv[1])\n\t\t\t\tf = open(\"devicefile.txt\", \"a\")\n\t\t\t\tf.write(\"1\")\n\t\t\t\tf.close()\n\t\t\t\tprint(\"done writing\", sys.argv[1])\n\t\t\telse:\n\t\t\t\ttime.sleep(0.25)\n\t\tvalue += 1\n\tserver.stop()\nloop.close()\n","sub_path":"kademlia-dht/run_wireless_device.py","file_name":"run_wireless_device.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"34016080","text":"\"\"\" \n Summary line. \n Downloads files(log_data & song_data) from Udacity Bucket \n and merges respective files into a single file\n\"\"\"\nimport os\nimport glob\nimport json\n\nimport boto3\nimport configparser\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom time import time\n\n#Functions\ndef download_files_from_s3(Bucket, prefix, limit):\n \"\"\"\n Summary line. \n Download files from AWS_S3\n \n Parameters: \n arg1 (prefix of files to be downloaded)\n arg2 (number of files to be downloaded)\n \n Returns: \n None\n \"\"\" \n i = 0\n files = [] \n tenPercentOfTotal = round(limit/10)\n \n for obj in Bucket.objects.filter(Prefix=prefix):\n if(len(files) % tenPercentOfTotal == 0 and i>1):\n print('Downloaded {}/{}'.format(len(files), limit) )\n \n if i>limit:\n break\n if obj.key.rsplit('/', 1)[1] != '': \n folder = obj.key.rsplit('/', 1)[0]\n file= obj.key.rsplit('/', 1)[1]\n files.append(obj.key)\n #print(obj.key)\n os.makedirs(folder, exist_ok=True) # succeeds even if directory exists.\n Bucket.download_file(obj.key, obj.key) \n i+=1\n \ndef get_all_files(folder):\n \"\"\"\n Summary line. \n Scans folder and prepares files list\n \n Parameters: \n arg1 (folder path)\n \n Returns: \n Array of filepath\n \"\"\" \n # 1. checking your current working directory\n print('Current Working Directory : ',os.getcwd())\n\n # Get your current folder and subfolder event data\n filepath = os.getcwd() + folder\n print('Scanning Directory : ',filepath)\n\n # 2. Create a for loop to create a list of files and collect each filepath\n # join the file path and roots with the subdirectories using glob\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # 3. get total number of files found\n num_files = len(all_files)\n print('{} files found'.format(num_files))\n #print(all_files)\n return all_files\n \n \ndef merge_song_files(folder, mergedfile): \n \"\"\"\n Summary line. \n Merges all song data into one file. Each song file contains only one JSON object.\n \n Parameters: \n arg1 (folder path)\n arg2 (merged output filename)\n \n Returns: \n None\n \"\"\" \n output_list = []\n all_files = get_all_files(folder)\n if len(all_files) > 0:\n for f in all_files:\n with open(f, \"rb\") as infile:\n output_list.append(json.load(infile))\n\n with open(mergedfile, \"w\", encoding=\"utf8\") as outfile:\n json.dump(output_list, outfile) \n\n \ndef merge_log_files(folder, mergedfile):\n \"\"\"\n Summary line. \n Merges all log data into one file. Each log file may contain more than one JSON objects and there will be only one JSON object per line.\n \n Parameters: \n arg1 (folder path)\n arg2 (merged output filename)\n \n Returns: \n None\n \"\"\" \n output_list = []\n all_files = get_all_files(folder)\n if len(all_files) > 0:\n for f in all_files:\n with open(f, 'r') as f:\n for line in f:\n output_list.append(json.loads(line))\n\n with open(mergedfile, \"w\", encoding=\"utf8\") as outfile:\n json.dump(output_list, outfile) \n\n \ndef json_to_dataframe(infile):\n \"\"\"\n Summary line. \n Reads JSON file loads panda datafame\n \n Parameters: \n arg1 (filename)\n \n Returns: \n dataframe\n \"\"\" \n with open(infile) as datafile:\n data = json.load(datafile)\n\n df = pd.DataFrame(data)\n print(\"df shape {}\".format(df.shape))\n return df\n \n \ndef main():\n config = configparser.ConfigParser()\n config.read_file(open('aws/credentials.cfg'))\n KEY=config.get('AWS','AWS_ACCESS_KEY_ID')\n SECRET= config.get('AWS','AWS_SECRET_ACCESS_KEY')\n\n s3 = boto3.resource('s3',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n sampleDbBucket = s3.Bucket(\"udacity-dend\")\n \n #print('Downloading song_data')\n download_files_from_s3(sampleDbBucket, 'song_data', 20000)\n \n #print('Downloading log_data')\n download_files_from_s3(sampleDbBucket, 'log_data', 20000)\n \n print('Downloading log_json_path.json')\n sampleDbBucket.download_file('log_json_path.json', 'log_json_path.json')\n \n print('Merging song_data')\n merge_song_files('/song_data', 'merged_song_data.json')\n \n print('Merging log_data')\n merge_log_files('/log_data', 'merged_log_data.json')\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Project 4 - Cloud Lake with Spark(AWS)/S3Download.py","file_name":"S3Download.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"67589790","text":"import dask.array as da\nimport numpy as np\nfrom numba import guvectorize\nfrom xarray import Dataset\n\nfrom sgkit import variables\nfrom sgkit.typing import ArrayLike\nfrom sgkit.utils import conditional_merge_datasets, create_dataset\n\n\n@guvectorize( # type: ignore\n [\n \"void(float64[:], uint8[:], float64, int8[:])\",\n \"void(float32[:], uint8[:], float64, int8[:])\",\n ],\n \"(p),(k),()->(k)\",\n nopython=True,\n cache=True,\n)\ndef _convert_probability_to_call(\n gp: ArrayLike, _: ArrayLike, threshold: float, out: ArrayLike\n) -> None: # pragma: no cover\n \"\"\"Generalized U-function for converting genotype probabilities to hard calls\n\n Parameters\n ----------\n gp\n Genotype probabilities of shape (genotypes,) containing unphased, biallelic\n probabilities in the order homozygous reference, heterozygous, homozygous alternate.\n _\n Dummy variable of type `uint8` and shape (ploidy,) used to define\n the ploidy of the resulting array\n threshold\n Probability threshold that must be met or exceeded by at least one genotype\n probability in order for any calls to be made -- all values will be -1 (missing)\n otherwise. Setting this value to less than 0 disables any effect it has.\n out\n Hard calls array of shape (ploidy,).\n \"\"\"\n # Ignore singleton array inputs used for metadata inference by dask\n if gp.shape[0] == 1 and out.shape[0] == 1:\n return\n if gp.shape[0] != 3 or out.shape[0] != 2:\n raise NotImplementedError(\n \"Hard call conversion only supported for diploid, biallelic genotypes.\"\n )\n out[:] = -1 # (ploidy,)\n # Return no call if any probability is absent\n if np.any(np.isnan(gp)):\n return\n i = np.argmax(gp)\n # Return no call if max probability does not exceed threshold\n if threshold > 0 and gp[i] < threshold:\n return\n # Return no call if max probability is not unique\n if (gp[i] == gp).sum() > 1:\n return\n # Homozygous reference\n if i == 0:\n out[:] = 0\n # Heterozygous\n elif i == 1:\n out[0] = 1\n out[1] = 0\n # Homozygous alternate\n else:\n out[:] = 1\n\n\ndef convert_probability_to_call(\n ds: Dataset,\n call_genotype_probability: str = variables.call_genotype_probability,\n threshold: float = 0.9,\n merge: bool = True,\n) -> Dataset:\n \"\"\"\n Convert genotype probabilities to hard calls.\n\n Parameters\n ----------\n ds\n Dataset containing genotype probabilities, such as from :func:`sgkit.io.bgen.read_bgen`.\n call_genotype_probability\n Genotype probability variable to be converted as defined by\n :data:`sgkit.variables.call_genotype_probability_spec`.\n threshold\n Probability threshold in [0, 1] that must be met or exceeded by at least one genotype\n probability in order for any calls to be made -- all values will be -1 (missing)\n otherwise. Setting this value to less than or equal to 0 disables any effect it has.\n Default value is 0.9.\n merge\n If True (the default), merge the input dataset and the computed\n output variables into a single dataset, otherwise return only\n the computed output variables.\n See :ref:`dataset_merge` for more details.\n\n Returns\n -------\n A dataset containing the following variables:\n\n - `call_genotype` (variants, samples, ploidy): Converted hard calls.\n Defined by :data:`sgkit.variables.call_genotype_spec`.\n\n - `call_genotype_mask` (variants, samples, ploidy): Mask for converted hard calls.\n Defined by :data:`sgkit.variables.call_genotype_mask_spec`.\n \"\"\"\n if not (0 <= threshold <= 1):\n raise ValueError(f\"Threshold must be float in [0, 1], not {threshold}.\")\n variables.validate(\n ds, {call_genotype_probability: variables.call_genotype_probability_spec}\n )\n if ds.dims[\"genotypes\"] != 3:\n raise NotImplementedError(\n f\"Hard call conversion only supported for diploid, biallelic genotypes; \"\n f\"num genotypes in provided probabilities array = {ds.dims['genotypes']}.\"\n )\n GP = da.asarray(ds[call_genotype_probability])\n # Remove chunking in genotypes dimension, if present\n if len(GP.chunks[2]) > 1:\n GP = GP.rechunk((None, None, -1))\n K = da.empty(2, dtype=np.uint8)\n GT = _convert_probability_to_call(GP, K, threshold)\n new_ds = create_dataset(\n {\n variables.call_genotype: ((\"variants\", \"samples\", \"ploidy\"), GT),\n variables.call_genotype_mask: ((\"variants\", \"samples\", \"ploidy\"), GT < 0),\n }\n )\n return conditional_merge_datasets(ds, new_ds, merge)\n","sub_path":"sgkit/stats/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"466966115","text":"#!/usr/bin/env python\n\nimport subprocess as sp\nimport shlex\n\n\nclass Sh(object):\n\n\n def __init__(self, *args, stdin=None, stdout=None):\n '''\n initialize piped together shell processes.\n '''\n self.stdin, self.stdout = stdin, stdout\n self.cmds = tuple([arg for arg in args])\n\n def __or__(self, other):\n '''pipe method'''\n args = self.cmds + other.cmds\n return Sh(*args, stdin=self.stdin, stdout=other.stdout)\n \n def run(self):\n '''\n run a bunch of shell commands\n '''\n cmds, procs = [shlex.split(x) for x in self.cmds], []\n\n if len(cmds) == 1:\n sp.Popen(cmds[0], stdin=self.stdin, stdout=self.stdout)\n else:\n for k, cmd in enumerate(cmds):\n if k == 0:\n procs.append(sp.Popen(cmd, stdin=self.stdin, stdout=sp.PIPE))\n elif k == len(cmds) - 1:\n procs.append(sp.Popen(cmd, stdin=procs[k-1].stdout, stdout=self.stdout))\n else:\n procs.append(sp.Popen(cmd, stdin=procs[k-1].stdout, stdout=sp.PIPE))\n try:\n self.stdin.close()\n except:\n pass\n \n procs[-1].wait()\n","sub_path":"sh.py","file_name":"sh.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"293714261","text":"import argparse\nimport pkgutil\n\nfrom gameanalysis import script\n\n\ndef create_parser():\n modules = [imp.find_module(name).load_module(name) for imp, name, _\n in pkgutil.iter_modules(script.__path__)]\n parser = argparse.ArgumentParser(\n description=\"\"\"Command line access to the game analysis toolkit.\"\"\")\n subparsers = parser.add_subparsers(\n title='commands', dest='command', metavar='', help=\"\"\"The\n commands to execute. Available commands are:\"\"\")\n for module in modules:\n subparser = module.add_parser(subparsers)\n subparser.main = module.main\n return parser, subparsers.choices\n\n\ndef main():\n parser, commands = create_parser()\n args = parser.parse_args()\n commands[args.command].main(args)\n","sub_path":"gameanalysis/ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"540762057","text":"# 3.\tПо введенным пользователем координатам двух точек вывести\n# уравнение прямой вида y = kx + b, проходящей через эти точки.\n\nUSER_X1 = float(input(\"A(x= \"))\nUSER_Y1 = float(input(\"A(y= \"))\nUSER_X2 = float(input(\"B(x= \"))\nUSER_Y2 = float(input(\"B(y= \"))\n\nK = (USER_Y1 - USER_Y2) / (USER_X1 - USER_X2)\nB = USER_Y2 - K * USER_X2\n\nprint(f'y = {round(K,2)}x + {round(B,2)}')\n","sub_path":"Lesson_1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"254411905","text":"from numpy import loadtxt, zeros, ones, array, linspace, logspace\nfrom pylab import scatter, show, title, xlabel, ylabel, plot, contour\n\n#################################################\n# Data Initialization\n#################################################\n\n# Load data\ndata = loadtxt('extdata1.csv', delimiter=',')\n\n# Split data for arithmetic\nx = data[:, 0]\ny = data[:, 1]\n\n# Set Constants\niterations = 1500\nalpha = 0.01\n\n#Insert Bias Column to the data and rename y\nfeatures = ones(shape=(y.size, 2))\nfeatures[:, 1] = x\ntargets = y\n\n#Initialize theta parameters\ntheta = zeros(shape=(2, 1))\n\n\n#################################################\n# Function Definitions\n#################################################\n\ndef computeCost(x, y, theta):\n #Number of training samples\n predictions = x.dot(theta).flatten()\n sqErrors = (predictions - y) ** 2\n cost = (1.0 / (2 * y.size)) * sqErrors.sum()\n return cost\n\ndef gradientDescent(x, y, theta, alpha, iterations):\n costHistory = zeros(shape=(iterations, 1))\n\n for i in range(iterations):\n predictions = x.dot(theta).flatten()\n x1Errors = (predictions - y) * x[:, 0]\n x2Errors = (predictions - y) * x[:, 1]\n theta[0][0] = theta[0][0] - alpha * (1.0 / y.size) * x1Errors.sum()\n theta[1][0] = theta[1][0] - alpha * (1.0 / y.size) * x2Errors.sum()\n costHistory[i, 0] = computeCost(x, y, theta)\n\n return theta, costHistory\n\n\n#################################################\n# Run\n#################################################\n\ntheta, costHistory = gradientDescent(features, targets, theta, alpha, iterations)\nresults = features.dot(theta).flatten()\n\n#Plot the data\nscatter(x, y, marker='o', c='b')\ntitle ('Profits distribution')\nxlabel('Population of City in 10,000s')\nylabel('Profit in $10,000s')\nplot(x, results)\nshow()\n","sub_path":"CPSC4383/LinearRegression/Python Implementation/linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"446310868","text":"import time\r\nimport csv\r\nimport threading\r\nimport requests\r\nimport os\r\n\r\ndef benchmark(_func=None, *, warmups=0, iter=1, verbose=False, csv_file=None):\r\n \"\"\"\r\n Decorator parametrico che esegue dei warmup prima delle esecuzioni effettive\r\n della funzione decorata. Calcola poi la media e la varianza delle esecuzioni\r\n e mostra una tabella riassuntisa sullo standard output. Se richiesto mostra anche\r\n i tempi ottenuti e li salva su un file.\r\n \"\"\"\r\n def benchmark_dec(f):\r\n def benchmark_wrap(*args, **kwargs):\r\n #TODO: benchmark\r\n warmups_times = []\r\n for _ in range(0, warmups):\r\n start_time = time.perf_counter() \r\n f(*args, **kwargs)\r\n end_time = time.perf_counter()\r\n run_time = end_time - start_time\r\n warmups_times.append(run_time)\r\n \r\n iter_times = []\r\n for _ in range(0, iter):\r\n start_time = time.perf_counter() \r\n f(*args, **kwargs)\r\n end_time = time.perf_counter()\r\n run_time = end_time - start_time\r\n iter_times.append(run_time)\r\n\r\n if verbose:\r\n for (i, w) in enumerate(warmups_times):\r\n print(\"Warmup #\" + str(i+1) + \" took: \" + str(w))\r\n for (i, t) in enumerate(iter_times):\r\n print(\"Iteration #\" + str(i+1) + \" took: \" + str(t))\r\n\r\n average = sum(iter_times) / iter\r\n variance = sum([(i - average) ** 2 for i in iter_times]) / iter\r\n\r\n print(\"| Iter\\t| Warm\\t| Aver\\t| Var\\t|\")\r\n print(\"| \" + str(iter) + \"\\t| \" + str(warmups) + \"\\t| \" + str(average) + \"s\\t| \" + str(variance) + \"s\\t|\")\r\n\r\n if csv_file is not None:\r\n with open(csv_file, 'w', newline='') as csvf:\r\n csv_writer = csv.writer(csvf, delimiter=',')\r\n csv_writer.writerow([\"run num\", \"is warmup\", \"timing\"])\r\n for (i, w) in enumerate(warmups_times):\r\n csv_writer.writerow([str(i+1), \"yes\", str(w) + \"s\"])\r\n for (i, t) in enumerate(iter_times):\r\n csv_writer.writerow([str(i+1), \"no\", str(t) + \"s\"])\r\n\r\n return benchmark_wrap\r\n \r\n if _func is None:\r\n return benchmark_dec\r\n else:\r\n return benchmark_dec(_func)\r\n\r\ndef fibo(n):\r\n \"\"\"\r\n Funzione per il calcolo ricorsivo dell'n-esimo numero di fibonacci\r\n \"\"\"\r\n if n <= 0:\r\n return 0\r\n elif n == 1:\r\n return 1\r\n else:\r\n return fibo(n - 1) + fibo(n - 2)\r\n\r\nclass fibo_call:\r\n \"\"\"\r\n Classe callable per il calcolo dell'n-esimo numero di fibonacci\r\n \"\"\"\r\n def __init__(self, n):\r\n self.n = n\r\n\r\n def __call__(self):\r\n fibo(self.n)\r\n\r\ndef th_aux(nthreads, fibo_par):\r\n \"\"\"\r\n Funzione che esegue la funzione di fibonacci su nthreads paralleli\r\n contemporaneamente.\r\n \"\"\"\r\n clb = fibo_call(fibo_par)\r\n ths = []\r\n for _ in range(0, nthreads):\r\n ths.append(threading.Thread(target=clb))\r\n\r\n for th in ths:\r\n th.start()\r\n\r\n for th in ths:\r\n th.join()\r\n\r\ndef loadAndExec(url):\r\n \"\"\"\r\n Scarica uno script in python e lo esegue. Salva lo script in un\r\n file temporaneo e lo elimina dopo l'esecuzione.\r\n \"\"\"\r\n if(url == \"\"): \r\n return\r\n\r\n tmp = \"tmp\" + str(time.time()) + \".py\"\r\n r = requests.get(url)\r\n with open(tmp, \"w\") as f:\r\n f.write(r.text)\r\n \r\n os.system(\"python3 \" + tmp)\r\n os.remove(tmp)\r\n\r\ndef prepost(_func=None, *, preurl=\"\", posturl=\"\"):\r\n \"\"\"\r\n Decorator che esegue due script scaricati da due url passati come paramentri,\r\n prima e dopo l'esecuzione della funzione decorata.\r\n \"\"\"\r\n def prepost_dec(f):\r\n def prepost_wrap(*args, **kwargs):\r\n loadAndExec(preurl)\r\n f(*args, **kwargs)\r\n loadAndExec(posturl)\r\n \r\n return prepost_wrap\r\n \r\n if _func is None:\r\n return prepost_dec\r\n else:\r\n return prepost_dec(_func)\r\n\r\n\r\n@prepost(preurl=\"http://pages.di.unipi.it/corradini/Didattica/AP-19/PROG-ASS/02/pre.py\", posturl=\"http://pages.di.unipi.it/corradini/Didattica/AP-19/PROG-ASS/02/post.py\")\r\ndef test(fibo_par):\r\n \"\"\"\r\n Funzione per l'esecuzione dei test richiesti.\r\n \"\"\"\r\n\r\n #f-1-16\r\n f_1_16 = benchmark(th_aux, iter=16, csv_file=\"f-1-16.csv\")\r\n f_1_16(1, fibo_par)\r\n\r\n #f-2-8\r\n f_2_8 = benchmark(th_aux, iter=8, csv_file=\"f-2-8.csv\")\r\n f_2_8(2, fibo_par)\r\n\r\n #f-4-4\r\n f_4_4 = benchmark(th_aux, iter=4, csv_file=\"f-4-4.csv\")\r\n f_4_4(4, fibo_par)\r\n\r\n #f-8-2\r\n f_8_2 = benchmark(th_aux, iter=2, csv_file=\"f-8-2.csv\")\r\n f_8_2(8, fibo_par)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test(30)\r\n\r\n\r\n\r\n# Concretamente non si osserva un apparente vantaggio nella parallelizzazione\r\n# delle esecuzioni. Pare, anzi, che ogni thread, nelle esecuzioni multithreaded\r\n# venga eseguito in modo sequenziale. A ciò sì aggiunge l'overhead della creazione\r\n# di nuovi thread, quindi probabilmente l'esecuzione di della funzione su un\r\n# numero minore di thread porta a tempistiche migliori. Ciò è dovuto alla gestione\r\n# del multithreading in python in relazione al garbage collector, che rappresenta\r\n# una delle critiche maggiori a tale linguaggio.","sub_path":"source/assignment2/part3/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"185153472","text":"#!/usr/bin/env python\n#author:https://github.com/TravellerXi\n# coding:utf-8\n\nimport socket\ntimeout=100\nsocket.setdefaulttimeout(timeout)\nprint('Please put \"ip.txt\" and \"port.txt\" in the root directory of the D drive.')\ndef get_ip_status(ip, port):\n server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n ADDR = (ip, port)\n server.connect(ADDR)\n server.close()\n print('connect to ip {0} on port {1} success'.format(ip, port))\n file= open('D:\\\\sucess.txt','a+')\n file.write('connect to ip {0} on port {1} success\\n'.format(ip, port))\n file.close()\n\n except Exception as err:\n print('connect to ip {0} on port {1} failed'.format(ip, port))\n file = open(\"D:\\\\failed.txt\", 'a+')\n file.write('connect to ip {0} on port {1} failed\\n'.format(ip, port))\n file.close()\n\n finally:\n server.close()\n\n#data=[]\n\n#print (data)\n\nip=[]\nport=[]\nwith open(\"D:\\port.txt\",\"r\") as f:\n porta=f.read().split('\\n')\n #print('主机列表:')\n #print (ipa)\n for port in porta:\n #print (host)\n port=int(port)\n with open(\"D:\\ip.txt\", \"r\") as p:\n ipa = p.read().split('\\n')\n #print('端口列表:')\n # print(porta)\n for host in ipa:\n host=str(host)\n get_ip_status(host, port)\n #print(host)\n # print('\\n')\nwhile(1):\n print(\"Done! Press 1 to quit!\")\n exist=str(input())\n if exist=='1':\n quit()\n else:\n continue\n\n","sub_path":"UDP.py","file_name":"UDP.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"544780866","text":"#!/usr/bin/env python\n\"\"\"\nCopyright 2019, Yao Yao, HKUST.\nTest script.\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport time\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport mvsnet.utils as mu\nimport mvsnet.predictlib as pl\n\nlogger = mu.setup_logger('mvsnet-inference')\nsys.path.append(\"../\")\n\n# dataset parameters\ntf.app.flags.DEFINE_string('input_dir', None,\n \"\"\"Path to data to run inference on\"\"\")\ntf.app.flags.DEFINE_string('output_dir', None,\n \"\"\"Path to data to dir to output results\"\"\")\ntf.app.flags.DEFINE_string('model_dir',\n 'gs://mvs-training-mlengine/trained-models/08-19-2019/',\n \"\"\"Path to restore the model.\"\"\")\ntf.app.flags.DEFINE_integer('ckpt_step', 400000,\n \"\"\"ckpt step.\"\"\")\n# input parameters\ntf.app.flags.DEFINE_integer('view_num', 8,\n \"\"\"Number of images (1 ref image and view_num - 1 view images).\"\"\")\ntf.app.flags.DEFINE_integer('max_d', 256,\n \"\"\"Maximum depth step when testing.\"\"\")\ntf.app.flags.DEFINE_integer('width', 1024,\n \"\"\"Maximum image width when testing.\"\"\")\ntf.app.flags.DEFINE_integer('height', 768,\n \"\"\"Maximum image height when testing.\"\"\")\ntf.app.flags.DEFINE_float('sample_scale', 0.25,\n \"\"\"Downsample scale for building cost volume (W and H).\"\"\")\ntf.app.flags.DEFINE_float('interval_scale', 1.0,\n \"\"\"Downsample scale for building cost volume (D).\"\"\")\ntf.app.flags.DEFINE_float('base_image_size', 8,\n \"\"\"Base image size\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 1,\n \"\"\"Testing batch size.\"\"\")\ntf.app.flags.DEFINE_bool('adaptive_scaling', True,\n \"\"\"Let image size to fit the network, including 'scaling', 'cropping'\"\"\")\n\n# network architecture\ntf.app.flags.DEFINE_string('regularization', '3DCNN',\n \"\"\"Regularization method, including '3DCNN' and 'GRU'\"\"\")\ntf.app.flags.DEFINE_boolean('refinement', False,\n \"\"\"Whether to apply depth map refinement for MVSNet\"\"\")\ntf.app.flags.DEFINE_bool('inverse_depth', False,\n \"\"\"Whether to apply inverse depth for R-MVSNet\"\"\")\ntf.app.flags.DEFINE_string('network_mode', 'normal',\n \"\"\"One of 'normal', 'lite' or 'ultralite'. If 'lite' or 'ultralite' then networks have fewer params\"\"\")\ntf.app.flags.DEFINE_string('refinement_network', 'original',\n \"\"\"Specifies network to use for refinement. One of 'original' or 'unet'.\n If 'original' then the original mvsnet refinement network is used, otherwise a unet style architecture is used.\"\"\")\ntf.app.flags.DEFINE_boolean('upsample_before_refinement', False,\n \"\"\"Whether to upsample depth map to input resolution before the refinement network.\"\"\")\ntf.app.flags.DEFINE_boolean('refine_with_confidence', False,\n \"\"\"Whether or not to concatenate the confidence map as an input channel to refinement network\"\"\")\n\n# Parameters for writing and benchmarking output\ntf.app.flags.DEFINE_bool('visualize', False,\n \"\"\"If visualize is true, the inference script will write some auxiliary files for visualization and debugging purposes.\n This is useful when developing and debugging, but should probably be turned off in production\"\"\")\ntf.app.flags.DEFINE_bool('benchmark', False,\n \"\"\"If benchmark is True, the network results will be benchmarked against GT.\n This should only be used if the input_dir contains GT depth maps\"\"\")\ntf.app.flags.DEFINE_bool('write_output', True,\n \"\"\"When benchmarking you can set this to False if you don't need the output\"\"\")\ntf.app.flags.DEFINE_bool('reuse_vars', False,\n \"\"\"A global flag representing whether variables should be reused. This should be\n set to False by default and is switched on or off by individual methods\"\"\")\ntf.app.flags.DEFINE_integer('max_clusters_per_session', None,\n \"\"\"The maximum number of clusters to benchmark per session. If not benchmarking this should probably be set to None\"\"\")\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef compute_depth_maps(input_dir, **kwargs):\n \"\"\" Performs inference using trained MVSNet model on data located in input_dir and writes data to disk\"\"\"\n output_dir = pl.init_inference(input_dir, **kwargs)\n mvs_iterator, sample_size = pl.setup_data_iterator(input_dir)\n scaled_images, full_images, scaled_cams, full_cams, image_index = mvs_iterator.get_next()\n\n depth_start, depth_end, depth_interval, depth_num = pl.set_shapes(\n scaled_images, full_images, scaled_cams, full_cams)\n\n depth_map, prob_map, residual_depth_map = pl.get_depth_and_prob_map(\n full_images, scaled_cams, depth_start, depth_interval)\n\n # init option\n var_init_op = tf.local_variables_initializer()\n init_op, config = mu.init_session()\n with tf.Session(config=config) as sess:\n # initialization\n sess.run(var_init_op)\n sess.run(init_op)\n pl.load_model(sess)\n sess.run(mvs_iterator.initializer)\n num_batches = int(np.ceil(float(sample_size) / float(FLAGS.batch_size)))\n for step in range(num_batches):\n start_time = time.time()\n out_residual_depth_map = None\n fetches = [depth_map, prob_map, scaled_images,\n scaled_cams, full_cams, full_images, image_index, residual_depth_map]\n try:\n out_depth_map, out_prob_map, out_images, out_cams, out_full_cams, out_full_images, out_index, out_residual_depth_map = sess.run(\n fetches)\n pass\n except tf.errors.OutOfRangeError:\n logger.info(\"all dense finished\") # ==> \"End of dataset\"\n break\n logger.info('Depth inference {}/{} finished. ({:.3f} sec/step)'.format(step*FLAGS.batch_size, sample_size, time.time() - start_time))\n pl.write_output(output_dir, out_depth_map, out_prob_map, out_images,\n out_cams, out_full_cams, out_full_images, out_index, out_residual_depth_map)\n\ndef main(_): # pylint: disable=unused-argument\n \"\"\"\n Program entrance for running inference with MVSNet\n Acceptable input for the input_dir are (1) a single test folder, or (2) a folder containing multiple\n test folders. We check to see which one it is\n \"\"\"\n run_dir = os.path.isfile(os.path.join(\n FLAGS.input_dir, 'covisibility.json'))\n sub_dirs = [f for f in tf.gfile.ListDirectory(\n FLAGS.input_dir) if not f.startswith('.') if not f.endswith('.txt')]\n if run_dir:\n compute_depth_maps(FLAGS.input_dir)\n else:\n for f in sub_dirs:\n data_dir = os.path.join(\n FLAGS.input_dir, f)\n logger.info('Computing depth maps on dir {}'.format(data_dir))\n compute_depth_maps(data_dir)\n # By setting reuse_vars = True this ensures that the second time compute_depth_maps\n # is run that the computational graph is not re-initialized\n tf.app.flags.FLAGS.reuse_vars = True\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"mvsnet/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":7516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"374633816","text":"'''\nGiven a Linked List Representation of Complete Binary Tree. The task is to construct the Binary tree.\nNote : The complete binary tree is represented as a linked list in a way where if root node is stored at position i, its left, \nand right children are stored at position 2*i+1, 2*i+2 respectively.\n \nGiven tree is \n 1\n / \\\n 2 3\n / \\\n 4 5\nNow, the level order traversal of the above tree is 1 2 3 4 5.\n'''\nclass ListNode:\n \n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass BinaryTreeNode:\n \n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n \nclass Conversion:\n \n def __init__(self, data = None):\n \n self.head = None\n self.root = None\n \n def push(self, new_data):\n \n node = ListNode(new_data)\n node.next = self.head\n self.head = node\n \n def convertList2Binary(self):\n \n q = []\n \n if(self.head is None):\n self.head = None\n self.root = None\n \n self.root = BinaryTreeNode(self.head.data)\n \n q.append(self.root)\n \n self.head = self.head.next\n \n while(self.head):\n \n parent = q.pop(0)\n \n leftChild = None\n rightChild = None\n \n leftChild = BinaryTreeNode(self.head.data)\n q.append(leftChild)\n self.head = self.head.next\n \n if(self.head):\n rightChild = BinaryTreeNode(self.head.data)\n q.append(rightChild)\n self.head = self.head.next\n \n parent.left = leftChild\n parent.right = rightChild\n \n def inOrderTraversal(self, root):\n \n if(root is None):\n return\n \n self.inOrderTraversal(root.left)\n print(root.data, end = \" \")\n self.inOrderTraversal(root.right)\n \nconv = Conversion() \nconv.push(36) \nconv.push(30) \nconv.push(25) \nconv.push(15) \nconv.push(12) \nconv.push(10)\n\nconv.convertList2Binary()\nconv.inOrderTraversal(conv.root)\n \n ","sub_path":"geeksforgeeks/tree/21_Make_Binary_Tree_From_Linked_List.py","file_name":"21_Make_Binary_Tree_From_Linked_List.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"541597900","text":"import nltk\nimport json\n\ndef download_data():\n nltk.download()\n nltk.download('punkt')\n\n\ndef main():\n words_by_speech_parts = {}\n\n with open(\"words_dictionary.json\", \"r\") as dictionary:\n data = json.load(dictionary)\n for key in data.keys():\n print(f'Processing word: {key}')\n words_by_speech_parts[key] = nltk.pos_tag(nltk.word_tokenize(key))[0][1]\n\n with open(\"dictionary_processed.json\", \"w\") as dictionary_processed:\n json.dump(words_by_speech_parts, dictionary_processed, sort_keys=True, indent=4)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"495119411","text":"import json\nimport os\n\nimport boto3\n\n\nSSM_PS_PATH = os.getenv(\"SSM_PS_PATH\")\n\n\ndef callback_handler(event, context):\n\n client = boto3.client('ssm')\n response = client.get_parameter(\n Name=SSM_PS_PATH,\n WithDecryption=True\n )\n\n body = {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"existingToken\": response['Parameter']['Value']\n }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body)\n }\n\n return response\n\n","sub_path":"dearie-hue/lambda/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"488912711","text":"# Faça um programa que leia um numero de 0 a 9999 e mostre na tela cada um de seus digitos separados\n# ex: digite o numero 1834\n# unidade:4\n# dezena:3\n# centena:8\n# milhas:1\n\nnum = int(input('Digite um numero: '))\nu = num // 1 % 10 # Pega o numero e divide(//) por 1 depois por 10 e o resto(%) e o valor da unidade\nd = num // 10 % 10 # Pega o numero e divide(//) por 10 depois por 10 e o resto(%) e o valor da dezena\nc = num // 100 % 10 # Pega o numero e divide(//) por 100 depois por 10 e o resto(%) e o valor da centena\nm = num // 1000 % 10 # Pega o numero e divide(//) por 100 depois por 10 e o resto(%) e o valor da milhar\nprint('Unidade:{} \\nDezena:{} \\nCentena:{} \\nMilhar:{}'.format(u, d, c, m))\n","sub_path":"desafio23.py","file_name":"desafio23.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"509786216","text":"import simuPOP as sim\nimport random\nfrom simuPOP.utils import saveCSV\n\npop = sim.Population(size=[6000], loci=1000, infoFields=[\"qtrait\"])\n\n\ndef qtrait(geno):\n trait = random.normalvariate(sum(geno)*5, random.uniform(0.0001, 3))\n if trait <= 0:\n trait = random.uniform(0.0001, 1)\n return trait\n\npop.evolve(\n initOps=[\n sim.InitSex(),\n sim.InitGenotype(prop=[0.7, 0.3])\n ],\n matingScheme=sim.RandomMating(),\n postOps=[sim.PyQuanTrait(loci=(0, 1, 2, 3, 4, 5, 6, 10, 100), func=qtrait, infoFields=[\"qtrait\"])],\n gen=10\n)\n\ngeno = list()\nfor i in pop.individuals():\n geno.append(i.genotype())\n\npheno = list()\nfor i in pop.individuals():\n pheno.append(i.qtrait)\n\nf = open(\"qtrait1.txt\", \"w\")\nf.write(\"\\n\".join(map(lambda x: str(x), pheno)))\nf.close()\n\nsaveCSV(pop,\"sample.csv\")","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"146358176","text":"# Aerolith 2.0: A web-based word game website\n# Copyright (C) 2011 Cesar Del Solar\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# To contact the author, please email delsolar at gmail dot com\n\nfrom wordwalls.models import DailyChallenge, DailyChallengeLeaderboard, DailyChallengeLeaderboardEntry, SavedList, DailyChallengeName\nfrom wordwalls.models import WordwallsGameModel\nfrom django.contrib import admin\n\nclass DailyChallengeAdmin(admin.ModelAdmin):\n fields = ['lexicon', 'date', 'name']\n search_fields = ['name']\n list_display = ('date', 'name', 'lexicon')\n readonly_fields = ('date',)\n\nclass DailyChallengeLeaderboardAdmin(admin.ModelAdmin):\n fields = ['challenge', 'maxScore']\n\nclass DailyChallengeLeaderboardEntryAdmin(admin.ModelAdmin):\n fields = ['user', 'score', 'timeRemaining', 'board']\n\n\nadmin.site.register(DailyChallengeLeaderboard, DailyChallengeLeaderboardAdmin)\nadmin.site.register(DailyChallengeLeaderboardEntry, DailyChallengeLeaderboardEntryAdmin)\nadmin.site.register(DailyChallenge, DailyChallengeAdmin)\nadmin.site.register(DailyChallengeName)\n\nclass WordwallsGameAdmin(admin.ModelAdmin):\n fields = ['host', 'playing', 'inTable', 'lastActivity', 'currentGameState', 'gameType', 'playerType']\n search_fields = ['host', 'lastActivity']\n readonly_fields = ('lastActivity', )\n\nadmin.site.register(WordwallsGameModel, WordwallsGameAdmin)\n\nclass WordwallsSavedListAdmin(admin.ModelAdmin):\n fields = ['user', 'name', 'lexicon', 'created', 'lastSaved', 'numAlphagrams', 'goneThruOnce']\n \n readonly_fields = ('lastSaved', 'created')\n\nadmin.site.register(SavedList, WordwallsSavedListAdmin)","sub_path":"djAerolith/wordwalls/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"268107203","text":"'''\nprogram\n'''\n\nfrom random import *\n\njedynka = 0\ndwójka = 0\ntrójka = 0\nczwórka = 0\npiątka = 0\nszóstka = 0\n\nfor i in range(100):\n x = randint(1, 6)\n if x == 1:\n jedynka += 1\n elif x == 2:\n dwójka += 1\n elif x == 3:\n trójka += 1\n elif x == 4:\n czwórka += 1\n elif x == 5:\n piątka += 1\n else:\n szóstka += 1 \n\nprint(f'jedynka = {jedynka}\\ndwójka = {dwójka}\\ntrójka = {trójka}\\nczwórka = {czwórka}\\npiątka = {piątka}\\nszóstka = {szóstka}') \n \n\n","sub_path":"02-ControlStructures/CS40.py","file_name":"CS40.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"456361140","text":"#!/usr/bin/python3\n# SPDX-License-Identifier: Apache-2.0\n\n\nimport mwopenstackclients\n\nclients = mwopenstackclients.clients()\n\nnova = clients.novaclient()\nglance = clients.glanceclient()\nimages = glance.images.list()\nimagedict = {f.id: f for f in images}\nbeforeimagedict = imagedict.copy()\n\nif False:\n projects = clients.allprojects()\n for project in projects:\n glance_per_project = clients.glanceclient(project.id)\n project_images = glance_per_project.images.list()\n for project_image in project_images:\n if project_image.id not in imagedict:\n imagedict[project_image.id] = project_image\n\nfor id, image in imagedict.items():\n image.usage = 0\n\ninstances = clients.allinstances(allregions=True)\ni = 0\nfor instance in instances:\n if instance.image[\"id\"] not in imagedict:\n print(\" -- unknown image %s\" % instance.image[\"id\"])\n else:\n imagedict[instance.image[\"id\"]].usage += 1\n\nsorted = {k: v for k, v in sorted(imagedict.items(), key=lambda item: item[1].usage)}\n\nfor id, image in sorted.items():\n print(\"%s: %s, %s\" % (id, image.name, image.usage))\n","sub_path":"modules/openstack/files/zed/admin_scripts/wmcs-imageusage.py","file_name":"wmcs-imageusage.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"65536669","text":"\nimport numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport argparse\nfrom rram import rram\n\nt_ramp = 1e-6\nvdd = 1.\ndt = 1e-10\n\n############################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gap_min', type=float, default=2e-10)\nparser.add_argument('--gap_max', type=float, default=19e-10)\nparser.add_argument('--I0', type=float, default=1e-6)\nparser.add_argument('--g0', type=float, default=0.375e-9)\nargs = parser.parse_args()\n\nprint (args)\n\ngap_min=args.gap_min\ngap_max=args.gap_max\nI0=args.I0\ng0=args.g0\ndeltaGap0=1e-4\nmodel_switch=0\n\n############################\n\nr = rram(shape=(2, 2), gap_min=gap_min, gap_max=gap_max, gap_ini=gap_max, I0=I0, g0=g0, deltaGap0=deltaGap0, model_switch=model_switch)\n\nVs = np.concatenate((np.linspace(0., vdd, t_ramp/dt), np.linspace(vdd, vdd, 1e-6/dt), np.linspace(vdd, 0., t_ramp/dt)))\nsteps = np.shape(Vs)[0] \nTs = np.linspace(0., steps*dt, steps)\nIs = []\nRs = []\n\nfor v in Vs:\n Rs.append(r.R())\n i = r.step(v, dt)\n Is.append(i)\n \nVs1 = np.copy(Vs)\nIs1 = np.copy(Is)\nRs1 = np.copy(Rs)\n\n############################\n\n# r = rram(shape=(2, 2), gap_min=gap_min, gap_max=gap_max, gap_ini=gap_min, I0=I0, g0=g0, deltaGap0=deltaGap0, model_switch=model_switch)\n\nVs = np.concatenate((np.linspace(0., -vdd, t_ramp/dt), np.linspace(-vdd, -vdd, 1e-6/dt), np.linspace(-vdd, 0., t_ramp/dt)))\nsteps = np.shape(Vs)[0] \nTs = np.linspace(0., steps*dt, steps)\nIs = []\nRs = []\n\nfor v in Vs:\n Rs.append(r.R())\n i = r.step(v, dt)\n Is.append(i)\n \nIs = np.array(Is) * -1.\n\nVs2 = np.copy(Vs)\nIs2 = np.copy(Is)\nRs2 = np.copy(Rs)\n\n############################\n \nTs = np.linspace(0., 2*steps*dt, 2*steps)\nVs = np.concatenate((Vs1, Vs2))\nIs = np.concatenate((Is1, Is2))\nRs = np.concatenate((Rs1, Rs2))\n\n############################\n\nratio = np.max(Rs) / np.min(Rs)\n\nflag = True\nflag = flag and (np.min(Rs) > 5e5) and (np.min(Rs) < 5e6) \nflag = flag and (np.max(Rs) > 5e7) and (np.max(Rs) < 5e8)\nflag = flag and (ratio > 90.) and (ratio < 150.)\n\nprint (np.min(Rs) / 1e6, np.max(Rs) / 1e6, ratio)\n\nflag = True\n\nif flag:\n plt.rcParams['font.sans-serif'] = \"Arial\"\n plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.size'] = 10.\n f, ax = plt.subplots(2, 1)\n f.set_size_inches(3.5, 3.5)\n\n ax[0].semilogy(Vs, Is)\n ax[1].semilogy(Ts, Rs)\n\n name = '%0.12f_%0.12f_%0.12f_%0.12f.png' % (args.gap_min, args.gap_max, args.I0, args.g0)\n plt.show()\n \n############################\n\n","sub_path":"1.0/v1/sweep/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"615048573","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport csv\nimport math\n\ndef std_derivation(list_of_values, med=None):\n\n rel_med = 1\n #berechnet den Mittelwert\n if not med:\n avg_vals = 0.0\n for val in list_of_values:\n avg_vals = avg_vals + val\n med = avg_vals / len(list_of_values)\n rel_med = 0 \n\n #berechnet Standardabweichung\n sum_vals = 0.0\n if len(list_of_values) > 1:\n for val in list_of_values:\n sum_vals = sum_vals + (val - med)*(val - med)\n res = math.sqrt(sum_vals / (len(list_of_values) - rel_med))\n elif len(list_of_values) == 1:\n res = list_of_values[0]\n else:\n print(\"empty list\")\n res = 0.0\n return res\n\n\ndef daten_aufbereiten(filename, kraft_schwellwert):\n \"\"\"\n\n \"\"\"\n #setzt den initialen wdh_counter auf 0\n wdh_counter = 0\n\n #csv reader, r bedeutet read, durch quote_nonnumeric werden werte als numersich dargestellt\n with open(filename, \"r\") as csvfile:\n csvreader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC)\n\n #Leere Liste wird erstellt\n row_old = []\n #beginnend ab Zeile 14\n zeile = 14\n # für jede Zeile der Datei\n for row in csvreader:\n\n #Iterator, so lange die vorherige Zeile nicht der aktuellen entspricht, mache weiter\n if not row_old:\n row_old = row\n #yield is a keyword that is used like return, except the function will return a generator.\n yield 0.0, 0.0, 0.0, 0\n continue\n\n # definition der Parameter für Liste\n kraft, zeit = row\n kraft_old, zeit_old = row_old\n zeitdelta = zeit - zeit_old\n kraft_integral = zeitdelta*kraft\n \n #wenn Kraft größer gleich dem Kraftschwellwert und vorherige Kraft kleiner Kraftschwellwert erhöhe \n #wdh counter um 1\n if kraft >= kraft_schwellwert and kraft_old < kraft_schwellwert:\n wdh_counter = wdh_counter + 1\n\n yield kraft, zeitdelta, kraft_integral, wdh_counter\n\n # hebe die aktuelle Zeile auf\n row_old = row\n \n\n\n# funktion zur Analyse der einzelnen Intervalle mit filename und maximal_kraft als Eingabeparameter\ndef analyse_intervals(filename, maximal_kraft):\n\n kraft_target = maximal_kraft * 0.6\n min_TZ = kraft_target - (kraft_target * 0.05)\n max_TZ = kraft_target + (kraft_target * 0.05)\n min_TZ_10 = kraft_target - (kraft_target * 0.1)\n max_TZ_10 = kraft_target + (kraft_target * 0.1)\n\n kraft_schwellwert = kraft_target * 0.2\n\n #erstelle leeres dictionary\n summup = {}\n \n for prep in daten_aufbereiten(filename, kraft_schwellwert):\n kraft, delta_t, _int_k, wdh = prep\n #wenn \n if wdh not in summup:\n #definiere Einträge für dictionary\n summup[wdh] = {\n 'bT': 0.0, 'pT': 0.0, 'gT': 0.0, 'dTZ': 0.0, 'dTZ_10': 0.0, \n 'aK': 0.0, 'bC': 0.0, 'sK': 0.0}\n\n #wenn Bedingung erfüllt:\n if kraft >= kraft_schwellwert:\n #summiere in Abhängigkeit der Wdh vorherige Summe + Zeitdelta\n summup[wdh]['bT'] = summup[wdh]['bT'] + delta_t\n # ansonsten\n else:\n #summiere in Abhängigkeit der Wdh vorherige Summe + Zeitdelta\n summup[wdh]['pT'] = summup[wdh]['pT'] + delta_t\n\n #summiere in Abhängigkeit der Wdh vorherige Summe + Zeitdelta\n summup[wdh]['gT'] = summup[wdh]['gT'] + delta_t\n\n # zeit in TargetZone 5%\n if kraft >= min_TZ and kraft <= max_TZ:\n # summup[wdh]['kTZ'].append(kraft)\n summup[wdh]['dTZ'] = summup[wdh]['dTZ'] + delta_t\n\n # zeit in TargetZone 10%\n if kraft >= min_TZ_10 and kraft <= max_TZ_10:\n # summup[wdh]['kTZ'].append(kraft)\n summup[wdh]['dTZ_10'] = summup[wdh]['dTZ_10'] + delta_t\n \n\n # Kraftdurchschnitt über Schwellwert\n if kraft >= kraft_schwellwert:\n #sk = summe Kraft\n summup[wdh]['sK'] = summup[wdh]['sK'] + kraft * delta_t\n #Zählt die Anzahl der Werte\n summup[wdh]['bC'] = summup[wdh]['bC'] + 1.0\n # ak = average Kraft\n summup[wdh]['aK'] = summup[wdh]['sK'] / summup[wdh]['bT']\n\n return summup\n\n\ndef kraftausdauer_auswertung(Wiederholungen, Maximalkraft,\n Zeitbeschränkung=None, Zeitschwellwert=2.0,\n Kraftbeschränkung=None):\n counter = 0\n summup_bT = 0.0\n summup_dTZ = 0.\n summup_dTZ_10 = 0.0\n\n Liste_bT = []\n Liste_avgKraft = []\n Zeitschwellwert = 2\n\n zeit_wdh = -1\n kraft_wdh = -1\n \n kraft_target = Maximalkraft * 0.6\n Kraftschwellwert = kraft_target * 0.2\n\n wdh_list = []\n\n for wdh, data_dict in Wiederholungen.items():\n\n # skip the first interval as it contains nothing relevant\n if wdh == 0:\n continue\n\n belastungs_kraft = data_dict.get('aK')\n belastungs_zeit = data_dict.get('bT')\n\n # nimm zu kleine Werte nicht mit in die Berechnung\n if Zeitschwellwert != 0 and belastungs_zeit <= Zeitschwellwert:\n continue\n\n # nimm zu kleine Werte nicht mit in die Berechnung\n if Kraftschwellwert != 0 and belastungs_kraft <= Kraftschwellwert:\n continue\n\n\n if Zeitbeschränkung != 0 and belastungs_zeit <= Zeitbeschränkung:\n if wdh - zeit_wdh == 1:\n break\n zeit_wdh = wdh\n \n\n if Kraftbeschränkung != 0 and belastungs_kraft <= kraft_target - kraft_target * Kraftbeschränkung:\n if wdh - kraft_wdh == 1:\n break\n kraft_wdh = wdh\n\n #print('%d: %r' % (wdh, data_dict))\n counter = counter + 1\n summup_bT = summup_bT + data_dict.get('bT')\n summup_dTZ = summup_dTZ + data_dict.get('dTZ')\n summup_dTZ_10 = summup_dTZ_10 + data_dict.get('dTZ_10')\n \n Liste_bT.append(data_dict.get('bT'))\n Liste_avgKraft.append(data_dict.get('aK'))\n wdh_list.append(wdh)\n\n avg_bT = summup_bT / counter\n avg_dTZ = summup_dTZ / counter\n avg_dTZ_10 = summup_dTZ_10 / counter\n\n std_bT = std_derivation(Liste_bT, Kraftschwellwert)\n std_avgKraft = std_derivation(Liste_avgKraft, kraft_target)\n\n results = {}\n results['Kraftbeschränkung (%)'] = Kraftbeschränkung\n \n results['effektive Kraftbeschränkung (kg)'] = (kraft_target - kraft_target * Kraftbeschränkung)\n results['Zeitbeschränkung'] = Zeitbeschränkung\n\n results[\"Abruch bei Interval\"] = wdh\n results['gültige Intervalle'] = counter\n results['durchschnittliche Belastungszeit'] = avg_bT \n results['Durchschnitt in Targetzone 5%'] = avg_dTZ\n results['Durchschnitt in Targetzone 10%'] = avg_dTZ_10\n results['Standardabweichung Belastungszeit'] = std_bT\n results['Standartabweichung Krafttarget'] = std_avgKraft\n\n idx = \"k%.2f:t%.2f\" % (Kraftbeschränkung, Zeitbeschränkung)\n return {idx: {\n 'detail': results,\n 'valid': counter,\n 'wdhs': wdh_list,\n }\n }\n\n\ndef get_kraftausdauer(filename, maximalkraft):\n \"\"\"\n Kraftausdauer Auswertung mit verschiedenen Zeit und Kraft Beschränkungen\n \"\"\"\n\n Zeitbeschränkungen = [0.0, 6.0, 5.0, 4.0]\n Kraftbeschränkungen = [0.0, 0.1, 0.07]\n\n wiederholungen = analyse_intervals(filename, maximal_kraft=maximalkraft)\n\n for Kraftbeschränkung in Kraftbeschränkungen:\n for Zeitbeschränkung in Zeitbeschränkungen:\n Ergebniss = kraftausdauer_auswertung(wiederholungen,\n Maximalkraft=maximalkraft,\n Zeitbeschränkung=Zeitbeschränkung,\n Kraftbeschränkung=Kraftbeschränkung\n )\n\n yield Ergebniss\n\n return\n\n\n\n\n\n\n\n# %%\n","sub_path":"kraftausdauer.py","file_name":"kraftausdauer.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"42973071","text":"import math \nfrom random import shuffle \nfrom random import randint\n\nglobal vetorClasses\nvetorClasses = []\n\n\n#Função que le o arquivo.\ndef readDataFun(fileName): \n \n data = [] \n f = open(fileName, 'r') \n lines = f.read().splitlines()\n f.close() \n\n for i in range(0, len(lines)): \n line = lines[i];\n data.append(line) \n\n return data \n\n#Função que calcula a distância euclidiana dos pontos.\ndef EuclideanDistance(x,y):\n distance = 0\n\n a = x.split(' ')\n b = y.split(' ')[:-1]\n\n for i in range(0,(len(a)-1)):\n distance += math.pow(float(a[i]) - float(b[i]),2) \n\n return (math.sqrt(distance), y[-1])\n\n#Função de classificação.\ndef classify(feature,k,training):\n global vetorClasses\n distancias = []\n for i in training:\n distancias.append(EuclideanDistance(feature,i))\n distancias.sort() \n\n for i in range(0,k):\n vetorClasses[int(distancias[i][1])] += 1\n \n maior = 0\n for i in range(len(vetorClasses)):\n if vetorClasses[i] >= vetorClasses[maior]:\n maior = i\n clearArray() \n return maior\n\n#Função que limpa o vetor de classes.\ndef clearArray():\n for i in range(len(vetorClasses)):\n vetorClasses[i] = 0\n\n#Função que separa treino e teste.\ndef FoldsFun(k,data,iterations):\n corrects = 0.0\n total = len(data)\n matrix = [[0 for x in range(10)] for y in range(10)] \n\n training = data[iterations:len(data)]\n test = data[0:iterations]\n\n for item in test:\n itemClass = item[-1]\n feature = item[:-1]\n \n deduction = classify(feature,k,training)\n\n if(float(deduction) == float(itemClass)):\n corrects += 1\n matrix[int(itemClass)][int(deduction)] += 1\n else:\n matrix[int(itemClass)][int(deduction)] += 1\n\n print(\"\")\n print(\"------Matriz de Confusão------\")\n print(\" 0 1 2 3 4 5 6 7 8 9 = LABEL\")\n for i in range(0,10):\n print(matrix[i], end=\"\")\n print(\" =\",i)\n print(\"\")\n \n accuracy = corrects / iterations\n return accuracy\n\n#Função que devolve a porcentagem de acerto.\ndef accuracyFun(k,data,iterations):\n accuracy = 0.0\n shuffle(data)\n\n accuracy = FoldsFun(k, data,iterations)\n print(\"Accuracy: %f\" % accuracy)\n \n#Função main.\ndef main():\n global vetorClasses\n\n data = readDataFun('treinamento.txt')\n\n print(\"Digite um valor para [K]:\", end=\"\")\n k = input()\n\n print(\"\\n[1] você escolher o folds [2] aleatorio entre 25%,50%,75%:\", end=\"\")\n tmp = input()\n\n if(int(tmp) == 1):\n print(\"Digite um valor para [Folds]:\", end=\"\")\n folds = input()\n else:\n tmp = (randint(0,100))\n\n if(tmp < 33):\n var = 25\n folds = (25 * len(data))/100\n elif(33 < tmp < 66):\n var = 50\n folds = (50 * len(data))/100\n elif(66 < tmp):\n var = 75\n folds = (75 * len(data))/100\n\n print(\"Porcentagem escolhida: %d\" % var, end=\"\")\n print(\"%\")\n\n for i in range(0,10):\n vetorClasses.append(0)\n\n accuracyFun(int(k),data,int(folds))\n \nif __name__ == '__main__': \n main() ","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"180155222","text":"import json\r\nfrom datetime import datetime\r\nfrom requests import Request, Session\r\nfrom requests.exceptions import ConnectionError, Timeout, TooManyRedirects\r\n\r\n# set currency and API url for global metrics\r\nwhile True:\r\n limit = input(\"How many cryptos would you like to return?\");\r\n currency = 'USD'\r\n quotes_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?limit='+str(limit)\r\n\r\n #set headers and API key\r\n headers = {\r\n 'Accepts': 'application/json',\r\n 'X-CMC_PRO_API_KEY': '5ba046a8-ce16-4013-8b5c-7370e96a4e1a',\r\n }\r\n\r\n session = Session()\r\n session.headers.update(headers)\r\n\r\n try:\r\n response = session.get(quotes_url)\r\n data = json.loads(response.text)\r\n # print(json.dumps(data, sort_keys=True, indent=4))\r\n except (ConnectionError, Timeout, TooManyRedirects) as e:\r\n print(e)\r\n\r\n data = data['data']\r\n for currency in data:\r\n rank = currency['cmc_rank']\r\n name = currency['name']\r\n symbol = currency['symbol']\r\n \r\n circ_supply = int(currency['circulating_supply'])\r\n total_supply = int(currency['total_supply'])\r\n\r\n quotes = currency['quote']['USD']\r\n market_cap = quotes['market_cap']\r\n hour_change = quotes[\"percent_change_1h\"]\r\n day_change = quotes[\"percent_change_24h\"]\r\n week_change = quotes[\"percent_change_7d\"]\r\n price = quotes['price']\r\n volume = quotes['volume_24h']\r\n\r\n volume_string = '{:,}'.format(volume)\r\n market_cap_string = '{:,}'.format(market_cap)\r\n circ_supply_string = '{:,}'.format(circ_supply)\r\n total_supply_string = '{:,}'.format(total_supply)\r\n\r\n print(str(rank) + ': ' + name + '(' + symbol + ')')\r\n print('Market cap: \\t\\t$' + market_cap_string)\r\n print('Price: \\t\\t\\t$' + str(price))\r\n print('24Hr Volume: \\t\\t$' + volume_string)\r\n print('Hour change: \\t\\t' + str(hour_change) + '%')\r\n print('Day change: \\t\\t' + str(day_change) + '%')\r\n print('Week change: \\t\\t' + str(week_change) + '%')\r\n print(\"Total supply: \\t\\t$\" + total_supply_string)\r\n print('Circulating supply: \\t$' + circ_supply_string)\r\n print('Percentage of coins in circulation: ' + str(int(circ_supply/total_supply)) + '%')\r\n print('\\n')\r\n\r\n\r\n choice = input('Again? (y/n)')\r\n\r\n if choice == 'n':\r\n break","sub_path":"src/funStuff/coincap_ticker.py","file_name":"coincap_ticker.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"83005228","text":"'''\nauthor: shane caldwell \ninput:\nplain text file containing contets of academic paper\nreturns:\ndictionary of metadata\n\ntitle:\nauthor: for first author\njournal: \ndate: publish date. datettime object\nabstract: \ncitations: \nkeywords: \n'''\nimport re \nfrom helpers import date_parser\nfrom helpers.keyword_finder import Keyword_Finder\n\nclass Node_Parser:\n\n\tdef __init__(self, filename):\n\t\tself.metadata = {}\n\t\tself.filename = filename\n\t\n\tdef parse_node(self):\n\t\ttxt = open(self.filename)\n\t\traw_text = txt.read()\n\t\t#begin parsing\n\t\tself.metadata['title'] = self.parse_title(raw_text)\n\t\tself.metadata['authors'] = self.parse_authors(raw_text, self.metadata['title'])\n\t\tself.metadata['author'] = self.parse_first_author(self.metadata['authors'])\n\t\tself.metadata['abstract'] = self.parse_abstract(raw_text)\n\t\tself.metadata['date'] = self.parse_date(raw_text)\n\t\tself.metadata['citations'] = self.parse_citations(raw_text)\n\t\tself.metadata['keywords'] = self.parse_keywords(raw_text)\n\t\treturn self.metadata\n\n\tdef parse_abstract(self, raw_text):\n\t\tstart_index = raw_text.find(\"ABSTRACT\")\n\t\tabstract_start = raw_text[start_index + len(\"ABSTRACT\") + len(\"\\n\"):]\n\t\tend = abstract_start.find(\"\\n\")\n\t\treturn abstract_start[:end]\n\n\tdef parse_title(self, raw_text):\n\t\ttitle_end = raw_text.find(\"\\n\")\n\t\ttitle = raw_text[:title_end]\n\t\treturn title\n\n\tdef parse_authors(self, raw_text, title):\n\t\tauthors = []\n\t\tstart_index = raw_text.find(title)\n\t\tauthor_start = raw_text[start_index + len(title) + len(\"\\n\"):]\n\t\tauthor_end = author_start.find('\\n')\n\t\tauthor_string = author_start[:author_end]\n\t\tsplit = re.split('(\\d+)', author_string)\n\t\tfor entry in split:\n\t\t\tif entry.isdigit():\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tauthors.append(entry)\n\t\tfor i in range(len(authors)):\n\t\t\tauthors[i] = authors[i].replace(\")\", \"\")\n\t\t\tauthors[i] = authors[i].replace(\",\", \"\")\n\t\tauthors = filter(None, authors)\n\t\treturn authors\n\n\tdef parse_first_author(self, authors):\n\t\tFIRST = 0 \n\t\tLAST = 1\n\t\tfirst_author = authors[FIRST]\n\t\tfirst_author = first_author.split()\n\t\treturn (first_author[FIRST], first_author[LAST])\n\n\n\tdef parse_date(self, raw_text):\n\t\t'''\n\t\tNaively assuming the first date pulled will be the publish date.\n\t\tOthers are probably nonense - it'd be nice to be able to know\n\t\tthe difference between first review of paper and final review though \n\t\t'''\n\t\tFIRST_DATE = 0\n\t\tdates = []\n\t\tcandidate_dates = date_parser.find_dates(raw_text, 1)\n\t\tfor entry in candidate_dates:\n\t\t\tdates.append(entry)\n\t\treturn dates[FIRST_DATE]\n\n\tdef parse_citations(self, raw_text):\n\t\tstart_index = raw_text.find(\"REFERENCES\")\n\t\tcitations_start = raw_text[start_index + len(\"REFERENCES\") + len(\"\\n\"):]\n\t\tcitations = citations_start.split('\\n')\n\t\treturn citations\n\n\tdef parse_keywords(self, raw_text):\n\t\tkeyword_finder = Keyword_Finder(['../helpers/corpus/science_essays.txt', '../helpers/corpus/science_paper.txt',\n '../helpers/corpus/sherlock.txt'], raw_text)\n\t\treturn keyword_finder.keywords\n\n\nif __name__ == \"__main__\":\n\tnode_parser = Node_Parser(\"test_docs/paper1.txt\")\n\tnode_parser.parse_node()\n\tnode_parser.metadata['keywords']\n","sub_path":"src/node_parser.py","file_name":"node_parser.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"481216806","text":"import inspect\nfrom functools import wraps\nfrom urllib.parse import urljoin\n\nfrom requests import Session\n\nfrom .config_manager import v\nfrom .utils import format_json\n\n\nclass Method:\n GET = 'GET'\n POST = 'POST'\n PUT = 'PUT'\n DELETE = 'DELETE'\n\n\ndef request_mapping(path='', method=Method.GET):\n def decorate(cls_or_func):\n @wraps(cls_or_func)\n def wrapper_cls(*args, **kwargs):\n cls = cls_or_func\n obj = cls(*args, **kwargs)\n try:\n obj.path = path.format(**v.wrapped_v)\n except KeyError as e:\n raise ValueError('%s格式化时发生错误,找不到%s配置,请检查' % (path, e))\n obj.session = Session()\n return obj\n\n @wraps(cls_or_func)\n def wrapper_func(*args, **kwargs):\n func = cls_or_func\n requester = func(*args, **kwargs) or Requester()\n func_args = inspect.getfullargspec(func).args\n if len(func_args) > 0 and func_args[0] == 'self':\n self = args[0]\n requester.base_path = getattr(self, 'path', '')\n requester.session = getattr(self, 'session', Session())\n requester.func = func\n requester.path = path\n requester.method = method\n requester.do()\n return requester\n\n if isinstance(cls_or_func, type):\n return wrapper_cls\n else:\n return wrapper_func\n\n return decorate\n\n\ndef add_header(self, key, value):\n session = getattr(self, 'session', None)\n if not session:\n raise ValueError('add_header需在api类上添加RequestMapping装饰器后可使用')\n session.headers[key] = value\n\n\ndef add_headers(self, headers):\n session = getattr(self, 'session', None)\n if not session:\n raise ValueError('add_headers需在api类上添加RequestMapping装饰器后可使用')\n session.headers.update(headers)\n\n\ndef get_session(self) -> Session:\n return getattr(self, 'session', None)\n\n\nclass Requester:\n def __init__(self, **kwargs):\n self.func = None\n self.session = None\n self.base_path = ''\n self.path = ''\n self.method = ''\n self.kwargs = kwargs\n self.res = None\n self.url = ''\n\n def __assemble_url(self):\n if self.path.startswith('http'):\n self.url = self.path\n else:\n self.url = urljoin(self.base_path, self.path)\n\n pv = self.kwargs.pop('path_var', {})\n self.url = self.url.format(**pv)\n\n def __prepare_request(self):\n self.__assemble_url()\n\n def __log(self):\n print('\\n******************************************************')\n print('1、请求url:\\n%s\\n' % self.res.request.url)\n print('2、api描述:\\n%s\\n' % (self.func.__doc__ or self.func.__name__).strip())\n print('3、请求headers:\\n%s\\n' % format_json(dict(self.res.request.headers)))\n print('4、请求body:\\n%s\\n' % format_json(self.res.request.body))\n print('5、响应结果:\\n%s\\n' % format_json(self.res.content))\n\n def do(self):\n self.__prepare_request()\n self.res = (self.session or Session()).request(self.method, self.url, **self.kwargs)\n self.__log()\n\n def __getattr__(self, item):\n return getattr(self.res, item)\n\n @property\n def content(self):\n return self.res.content\n\n def json(self):\n return self.res.json()\n","sub_path":"src/walnuts/api_decorator.py","file_name":"api_decorator.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"225075284","text":"from .models import *\n\nfrom rest_framework import serializers\n\nimport random\nimport re\n\n\nclass AttackSerializer(serializers.ModelSerializer):\n class Meta:\n model = Attack\n fields = '__all__'\n\n\nclass EdgeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Edge\n fields = '__all__'\n\n\nclass EvolutionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Evolution\n fields = '__all__'\n\n\nclass FeatureSerializer(serializers.ModelSerializer):\n class Meta:\n model = Feature\n fields = '__all__'\n\n\nclass ItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = Item\n fields = '__all__'\n\n\nclass TrainerItemSerializer(serializers.ModelSerializer):\n item = ItemSerializer()\n class Meta:\n model = TrainerItem\n fields = '__all__'\n\n\nclass TrainerItemSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = TrainerItem\n fields = '__all__'\n\n def update(self, instance, validated_data):\n instance.number = validated_data.get('number', instance.number)\n if instance.number <= 0:\n instance.delete()\n else:\n instance.save()\n return instance\n\n\nclass SpeciesAttackSerializer(serializers.ModelSerializer):\n attack = AttackSerializer()\n\n class Meta:\n model = SpeciesAttack\n fields = '__all__'\n\n\nclass SpeciesSerializer(serializers.ModelSerializer):\n speciesAttack = SpeciesAttackSerializer(many=True, read_only=True)\n\n class Meta:\n model = Species\n fields = '__all__'\n\n\nclass PokemonAttackSerializer(serializers.ModelSerializer):\n attack = AttackSerializer()\n\n class Meta:\n model = PokemonAttack\n fields = '__all__'\n\n\nclass PokemonAttackSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = PokemonAttack\n fields = '__all__'\n\n\nclass PokemonSerializer(serializers.ModelSerializer):\n pokemonAttack = PokemonAttackSerializer(many=True, read_only=True)\n species = SpeciesSerializer()\n\n class Meta:\n model = Pokemon\n fields = '__all__'\n\n\nclass PokemonSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Pokemon\n fields = '__all__'\n\n def update(self, instance, validated_data):\n instance.experience = validated_data.get('experience', instance.experience)\n while instance.experience >= instance.exp_to_level(instance.level + 1):\n instance.level += 1\n instance.name = validated_data.get('name', instance.name)\n instance.trainer = validated_data.get('trainer', instance.trainer)\n instance.species = validated_data.get('species', instance.species)\n instance.nature = validated_data.get('nature', instance.nature)\n instance.inParty = validated_data.get('inParty', instance.inParty)\n instance.constitution = validated_data.get('constitution', instance.constitution)\n instance.attack = validated_data.get('attack', instance.attack)\n instance.defense = validated_data.get('defense', instance.defense)\n instance.special_attack = validated_data.get('special_attack', instance.special_attack)\n instance.special_defense = validated_data.get('special_defense', instance.special_defense)\n instance.speed = validated_data.get('speed', instance.speed)\n instance.current_hp = validated_data.get('current_hp', instance.current_hp)\n instance.save()\n return instance\n\n def create(self, validated_data):\n pokemon = Pokemon()\n\n pokemon.name = validated_data['name']\n pokemon.species = validated_data['species']\n pokemon.nature = random.randint(0, 35)\n pokemon.level = validated_data['level']\n pokemon.game = validated_data['game']\n pokemon.experience = pokemon.exp_to_level(pokemon.level)\n\n pokemon.constitution = 0\n pokemon.attack = 0\n pokemon.defense = 0\n pokemon.special_attack = 0\n pokemon.special_defense = 0\n pokemon.speed = 0\n pokemon.current_hp = 0\n\n species = pokemon.species\n base_stat_total = species.base_constitution + species.base_attack + species.base_defense + \\\n species.base_special_attack + species.base_special_defense + species.base_speed\n\n for i in range(10 + pokemon.level):\n stat = random.randint(0, base_stat_total)\n if stat < species.base_constitution:\n pokemon.constitution = pokemon.constitution + 1\n continue\n stat = stat - species.base_constitution\n if stat < species.base_attack:\n pokemon.attack = pokemon.attack + 1\n continue\n stat = stat - species.base_attack\n if stat < species.base_defense:\n pokemon.defense = pokemon.defense + 1\n continue\n stat = stat - species.base_defense\n if stat < species.base_special_attack:\n pokemon.special_attack = pokemon.special_attack + 1\n continue\n stat = stat - species.base_special_attack\n if stat < species.base_special_defense:\n pokemon.special_defense = pokemon.special_defense + 1\n continue\n pokemon.speed = pokemon.speed + 1\n\n pokemon.save()\n\n species_attacks = SpeciesAttack.objects.filter(species=pokemon.species,\n level__lte=pokemon.level).select_related('attack').order_by(\n '-level')[:6]\n for speciesAttack in species_attacks:\n pokemon_attack = PokemonAttack()\n pokemon_attack.attack = speciesAttack.attack\n pokemon_attack.pokemon = pokemon\n pokemon_attack.save()\n\n return pokemon\n\n\nclass TrainerAttackSerializer(serializers.ModelSerializer):\n attack = AttackSerializer()\n\n class Meta:\n model = TrainerAttack\n fields = '__all__'\n\n\nclass TrainerAttackSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = TrainerAttack\n fields = '__all__'\n\n\nclass TrainerEdgeSerializer(serializers.ModelSerializer):\n edge = EdgeSerializer()\n\n class Meta:\n model = TrainerEdge\n fields = '__all__'\n\n\nclass TrainerEdgeSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = TrainerEdge\n fields = '__all__'\n\n\nclass TrainerFeatureSerializer(serializers.ModelSerializer):\n feature = FeatureSerializer()\n\n class Meta:\n model = TrainerFeature\n fields = '__all__'\n\n\nclass TrainerFeatureSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = TrainerFeature\n fields = '__all__'\n\n def create(self, validated_data):\n trainer_feature = TrainerFeature.objects.create(\n trainer=validated_data['trainer'],\n feature=validated_data['feature']\n )\n trainer_feature.save()\n if '+' in trainer_feature.feature.tags:\n tags_split = trainer_feature.feature.tags.split(',')\n for tag in tags_split:\n m = re.search('(?<=\\+)\\w+', tag)\n if m is not None:\n attribute = m.group(0)\n attribute_value = getattr(trainer_feature.trainer, attribute, 0)\n attribute_value += 1\n setattr(trainer_feature.trainer, attribute, attribute_value)\n trainer_feature.trainer.save()\n\n return trainer_feature\n\n\nclass TrainerSerializer(serializers.ModelSerializer):\n pokemon = PokemonSerializer(many=True, read_only=True)\n trainerEdge = TrainerEdgeSerializer(many=True, read_only=True)\n trainerFeature = TrainerFeatureSerializer(many=True, read_only=True)\n item = TrainerItemSerializer(many=True, read_only=True)\n trainerAttack = TrainerAttackSerializer(many=True, read_only=True)\n\n class Meta:\n model = Trainer\n fields = '__all__'\n\n\nclass TrainerSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Trainer\n fields = ['id', 'name']\n\n\nclass GameSerializer(serializers.ModelSerializer):\n trainer = TrainerSerializer(many=True, read_only=True)\n pokemon = PokemonSerializer(many=True, read_only=True)\n\n class Meta:\n model = Game\n fields = '__all__'\n\n\nclass GameSimpleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Game\n fields = ['id', 'title']\n","sub_path":"ptu/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"648915056","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_selection import SelectFromModel\nimport numpy as np\nimport pandas as pd\nfrom pprint import pprint\nfrom os import listdir\nfrom os.path import isfile, join\nfrom sklearn.model_selection import StratifiedKFold\nfrom scipy import stats\nimport numpy as np\nfrom sklearn import linear_model, svm\nimport re\nfrom sklearn.metrics import roc_curve, auc,f1_score\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import RandomForestClassifier\n#s = \"../braindata/data_1_mor_select_100.csv\"\nimport os \nfrom sklearn import linear_model, svm\nfrom sklearn.ensemble import ExtraTreesClassifier\n#import xgboost as xgb\n\nprint('finished this block')\n\n\n# In[3]:\n\n\nos.getcwd()\nos.chdir('../data/adni')\nprint('finished this block')\n\n\n# In[4]:\n\n\ndd =pd.read_csv(\"combine_new_biomarker_correct.csv\",header=0)\nprint('the original training data dimension is')\nprint(dd.shape)\nimport csv\n\n\n# In[29]:\n\n\nwith open('combine_new_biomarker_correct.csv', 'r') as f:\n d_reader = csv.DictReader(f)\n #get fieldnames from DictReader object and store in list\n headers = d_reader.fieldnames\n\nos.getcwd()\nos.chdir('../../Chenxiao_Results_Output')\n\ndata=np.array(dd)\nidx_IN_columns = np.append(np.array(range(1,6)),np.array([14,22]))\nidx_IN_columns = np.append(idx_IN_columns,np.array(range(23,data.shape[1])))\n\nX=data[:,idx_IN_columns]\nX_biomarker=X[:,0:5]\n\ny=data[:,9]\ny_dxbl=data[:,10]\n\nind_num_matrix=np.isnan(X_biomarker)\nind_num_vector=np.any(ind_num_matrix,axis=1)\ny_dxbl_no_nan = y_dxbl[~ind_num_vector]\n\n\n\n\nX_no_nan=X[~ind_num_vector,:]\ny_no_nan=y[~ind_num_vector]\n\nX=X_no_nan\ny=y_no_nan\ny_dxbl=y_dxbl_no_nan\n\nMCI = (y_dxbl==2)\nMCI_index =[ i for i in range(0, MCI.shape[0]) if MCI[i]]\n\nX = stats.zscore(X)\n\nnp.isnan(X).any()\nX[np.isnan(X)] = np.median(X[~np.isnan(X)])\n\nprint(\"after the precoessing, the X.shape is \")\nprint(X.shape)\nprint(\"the y.shape is\")\nprint(y.shape)\nprint(\"the number of MCI case is\")\nprint(len(MCI_index))\n\n\n\ntest_number=1\n\nC_all=[0.01, 0.1, 1.0, 10.0, 100.0, 1000.0, 10000.0]\n#C_all=[0.01, 0.1]\nGamma_all=[0.001, 0.01, 0.1, 1.0]\n#Gamma_all=[0.001]\nKernel='linear'\n\ntextfile_name='SVM_grid_search'+Kernel+str(test_number)+'.txt'\n\nsep1 = '*' * 100\nsep2 = '*' * 50\nsep3 = '*' * 30\n\n# In[ ]:\n\n\n\nprint(\"Begin\")\nf = open(textfile_name, 'w')\nf.write(\"begin!!\\n\")\nf.close()\n\n\n\n# In[30]:\n\nfor C_select in C_all:\n for Gamma_select in Gamma_all:\n sep1 = '*' * 100\n sep2 = '*' * 50\n sep3 = '*' * 30\n \n print(\"beginning selection best number of features\")\n n_features = [10,30,50,70,80,100,1000,2000]\n# n_features = [10]\n rbf = svm.SVC(C=C_select,kernel=Kernel,gamma=Gamma_select)\n\n accr_feature = []\n f1s_feature = []\n accr_MCI_feature = []\n f1s_MCI_feature = [] \n\n for i in n_features:\n print(\"\\n\\n Number of Feature: {} {} \\n\".format(i, sep1))\n \n accr_run = []\n f1s_run = []\n accr_MCI_run = []\n f1s_MCI_run = []\n base_labels=[]\n \n for runs in range(10):\n counter=0\n print(\"\\n RUN: {} {} \\n\".format(runs, sep3))\n \n accr_CV = []\n f1s_CV=[]\n\n test_labels_MCI_CV = []\n y_pred_MCI_CV = []\n\n strat_labels = []\n \n skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=int(runs))\n for train_index, test_index in skf.split(X, y): \n print(\"\\n Fold: {} {} \\n\".format(counter, sep2)) \n counter=counter+1\n \n train_data_origin, test_data_origin=X[train_index], X[test_index]\n train_labels, test_labels = y[train_index], y[test_index]\n strat_labels = np.append(strat_labels, test_labels)\n\n # print(\"Random forest for feature selection\")\n clf = ExtraTreesClassifier(n_estimators=250,random_state=0)\n clf = clf.fit(train_data_origin, train_labels)\n importances = clf.feature_importances_\n indices = np.argsort(importances)[::-1] \n \n index=indices[0:i]\n train_data=train_data_origin[:, index]\n test_data=test_data_origin[:, index]\n\n test_index_MCI = [test_index[i] for i in range(0, len(test_index)) if test_index[i] in MCI_index]\n\n\n \n #SVM\n rbf = rbf.fit(train_data, train_labels)\n acc = rbf.score(test_data, test_labels)\n \n #f1 calculation\n y_pred = rbf.predict(test_data)\n f1 = f1_score(test_labels, y_pred)\n \n print('SVM Accuracy: %f' % acc)\n print('SVM F1 score: %f' % f1)\n \n accr_CV = np.append(accr_CV, acc)\n f1s_CV=np.append(f1s_CV, f1)\n #MCI_case \n if (len(test_index_MCI)==0):\n print(\"There is no MCI in this shuffling test set, so we skip it\")\n else:\n test_data_MCI_origin = X[test_index_MCI]\n test_data_MCI = test_data_MCI_origin[:, index] \n test_labels_MCI = y[test_index_MCI]\n \n y_pred_MCI = rbf.predict(test_data_MCI)\n \n print(y_pred_MCI)\n print(test_labels_MCI)\n \n y_pred_MCI_CV = np.append(y_pred_MCI_CV, y_pred_MCI)\n test_labels_MCI_CV = np.append(test_labels_MCI_CV, test_labels_MCI)\n\n\n base_labels=np.append(base_labels, strat_labels) \n accr_run=np.append(accr_run, np.mean(accr_CV))\n f1s_run =np.append(f1s_run, np.mean(f1s_CV))\n accr_MCI_run = np.append(accr_MCI_run, np.sum(test_labels_MCI_CV==y_pred_MCI_CV)/len(test_labels_MCI_CV))\n f1s_MCI_run = np.append(f1s_MCI_run, f1_score(test_labels_MCI_CV, y_pred_MCI_CV))\n\n print(\"the mean accr_CV is\")\n print(np.mean(accr_CV))\n print(\"the mean f1s_CV is\")\n print(np.mean(f1s_CV))\n print(\"the accr_MCI_CV is\")\n print(np.sum(test_labels_MCI_CV==y_pred_MCI_CV)/len(test_labels_MCI_CV))\n print(\"the f1_score_MCI is\")\n print(f1_score(test_labels_MCI_CV, y_pred_MCI_CV))\n \n accr_feature=np.append(accr_feature, np.mean(accr_run))\n f1s_feature=np.append(f1s_feature, np.mean(f1s_run))\n accr_MCI_feature=np.append(accr_MCI_feature, np.mean(accr_MCI_run))\n f1s_MCI_feature=np.append(f1s_MCI_feature, np.mean(f1s_MCI_run))\n\n f = open(textfile_name,'a')\n f.write('\\nC=%f. \\n' % C_select )\n f.write('Gamma=%f. \\n' % Gamma_select ) \n f.write(\"Runs Max Accuracies: {}\".format(np.max(accr_feature)))\n f.write(\" Happens at:\")\n f.write(str(np.argmax(accr_feature)))\n f.write(\"\\nRuns Max F1s: {}\".format(np.max(f1s_feature)))\n f.write(\" Happens at:\")\n f.write(str(np.argmax(f1s_feature)))\n f.write(\"\\nRuns Max MCI Accuracies: {}\".format(np.max(accr_MCI_feature)))\n f.write(\" Happens at:\")\n f.write(str(np.argmax(accr_MCI_feature)))\n f.write(\"\\nRuns Max MCI F1s: {}\".format(np.max(f1s_MCI_feature)))\n f.write(\" Happens at:\")\n f.write(str(np.argmax(f1s_MCI_feature))) \n\n f.close()\n\n print(\"finished one loop\")\n \n","sub_path":"src_adni/CX_finaldx_prediction_SVM_linear_grid_search.py","file_name":"CX_finaldx_prediction_SVM_linear_grid_search.py","file_ext":"py","file_size_in_byte":7939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"302313502","text":"import numpy as np\nimport pandas as pd\n\nsamsung = np.load('./samsung/data/samsung.npy')\nkospi200 = np.load('./samsung/data/kospi200.npy')\n\nprint(samsung) #(426, 5)\nprint(kospi200) #(426, 5)\nprint(samsung.shape) #(426, 5)\nprint(kospi200.shape) #(426, 5)\n\ndef split_xy5(dataset, time_steps, y_column):\n x, y = list(), list()\n for i in range(len(dataset)):\n x_end_number = i + time_steps \n y_end_number = x_end_number + y_column \n \n if y_end_number > len(dataset) :\n break\n tmp_x = dataset[i:x_end_number, :] # x값(5x5)\n tmp_y = dataset[x_end_number : y_end_number, 3] # y값(종가)\n x.append(tmp_x) \n y.append(tmp_y)\n return np.array(x), np.array(y)\n\nx1, y1 = split_xy5(samsung, 5, 1)\nx2, y2 = split_xy5(kospi200, 5, 1)\nprint(x1.shape) #(421, 5, 5)\nprint(y1.shape) #(421, 1)\nprint(x1[0, :], '\\n', y1[0])\n\n# 데이터셋 나누기\nfrom sklearn.model_selection import train_test_split\nx1_train, x1_test, y1_train, y1_test = train_test_split(x1, y1, random_state = 1, test_size = 0.3, shuffle = False)\nx2_train, x2_test, y2_train, y2_test = train_test_split(x2, y2, random_state = 1, test_size = 0.3, shuffle = False)\n\nprint(x1_train.shape) #(294, 5, 5)\nprint(x1_test.shape) #(127, 5, 5)\nprint(x2_train.shape) #(294, 5, 5)\nprint(x2_test.shape) #(127, 5, 5)\nprint(y1_train.shape) #(294, 5, 5)\nprint(y1_test.shape) #(127, 5, 5)\n\n# 데이터 전처리\n# StandardScaler\nx1_train = x1_train.reshape(x1_train.shape[0], x1_train.shape[1]*x1_train.shape[2])\nx1_test = x1_test.reshape(x1_test.shape[0], x1_test.shape[1]*x1_test.shape[2])\nx2_train = x2_train.reshape(x2_train.shape[0], x2_train.shape[1]*x2_train.shape[2])\nx2_test = x2_test.reshape(x2_test.shape[0], x2_test.shape[1]*x2_test.shape[2])\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaler.fit(x1_train)\nx1_train = scaler.transform(x1_train)\nx1_test = scaler.transform(x1_test)\nprint(x1_train[0, :])\n\nscaler.fit(x2_train)\nx2_train = scaler.transform(x2_train)\nx2_test = scaler.transform(x2_test)\nprint(x2_train[0, :])\n\nx1_train = x1_train.reshape(294, 5, 5)\nx1_test = x1_test.reshape(127, 5, 5)\nx2_train = x2_train.reshape(294, 5, 5)\nx2_test = x2_test.reshape(127, 5, 5)\n\n# 모델\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, LSTM, Input\n\n# model 1\ninput1 = Input(shape=(5, 5))\ndense1 = LSTM(64)(input1)\ndense1 = Dense(32)(dense1)\ndense1 = Dense(32)(dense1)\ndense1 = Dense(32)(dense1)\noutput1 = Dense(1)(dense1)\n\n# model 2\ninput2 = Input(shape=(5, 5))\ndense2 = LSTM(64)(input2)\ndense2 = Dense(32)(dense2)\ndense2 = Dense(32)(dense2)\ndense2 = Dense(32)(dense2)\noutput2 = Dense(1)(dense2)\n\nfrom keras.layers import Concatenate, concatenate\nmerge1 = concatenate([output1, output2])\nmiddle1 = Dense(16)(merge1)\nmiddle2 = Dense(8)(middle1)\noutput = Dense(1)(middle2)\n\nmodel = Model(inputs = [input1, input2], outputs = output)\nmodel.summary()\n\nmodel.compile(loss='mse', optimizer='adam', metrics=['mse']) # mse, mae 사용\nfrom keras.callbacks import EarlyStopping\nearly_stopping = EarlyStopping(patience=20)\nmodel.fit([x1_train, x2_train], y1_train, epochs=50, batch_size = 1, validation_split = 0, callbacks=[early_stopping]) \n\nloss, mse = model.evaluate([x1_test, x2_test], y1_test, batch_size = 1)\nprint('loss:', loss)\n\ny_pred = model.predict([x1_test, x2_test])\n\nfor i in range(5):\n print('종가:', y1_test[i], 'y예측값:', y_pred[i])\n \n# RMSE\nfrom sklearn.metrics import mean_squared_error\ndef RMSE(y_test, y_pred):\n return np.sqrt(mean_squared_error(y1_test, y_pred))\nprint('RMSE : ', RMSE(y1_test, y_pred))","sub_path":"삼성_예측/samsung05_ensemble_LSTM.py","file_name":"samsung05_ensemble_LSTM.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"152243046","text":"from django.contrib import admin\nfrom django.urls import include, path\n\nfrom ftman import views\n\nurlpatterns = [\n # ACME URL for LetsEncrypt\n path(\n \".well-known/acme-challenge/\",\n views.acme_challenge,\n name=\"acme-challenge\",\n ),\n # Tournament URLs\n path(\"\", include(\"tournament.urls\")),\n # Authentication URLs\n path(\"users/\", include(\"django.contrib.auth.urls\")),\n path(\"auth/\", include(\"allauth.urls\")),\n # Admin URLs\n path(\"admin/\", admin.site.urls),\n]\n","sub_path":"ftman/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"321986848","text":"__author__ = 'Robin'\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^logout/$', views.logout_page),\n url(r'^register/$', views.register_page),\n url(r'^login/$', 'django.contrib.auth.views.login'),\n url(r'^park', views.park_page),\n url(r'^contact$', views.contact_page),\n url(r'', views.main_page, name='main_page'),\n\n]\n","sub_path":"parkowanie/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"258114896","text":"from fastapi import FastAPI, Depends\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.middleware.httpsredirect import HTTPSRedirectMiddleware\nfrom starlette.requests import Request\nfrom fastapi.staticfiles import StaticFiles\nimport uvicorn\n\n\n############################# Routers ###########################################\nfrom app.api.api_v1.routers.pessoas import pessoas_router\nfrom app.api.api_v1.routers.projeto import projeto_router\nfrom app.api.api_v1.routers.experiencia.profissional import (\n experiencia_prof_router,\n)\nfrom app.api.api_v1.routers.experiencia.academica import experiencia_acad_router\nfrom app.api.api_v1.routers.experiencia.projeto import experiencia_proj_router\nfrom app.api.api_v1.routers.habilidade import habilidades_router\nfrom app.api.api_v1.routers.papel import papel_router\nfrom app.api.api_v1.routers.pesquisa.projeto import pesquisa_projeto_router\nfrom app.api.api_v1.routers.pesquisa.pessoa import pesquisa_pessoa_router\nfrom app.api.api_v1.routers.area import area_router\nfrom app.api.api_v1.routers.auth import auth_router\nfrom app.api.api_v1.routers.pessoa_projeto import pessoa_projeto_router\nfrom app.api.api_v1.routers.papel import papel_router\nfrom app.api.api_v1.routers.tipo_acordo import tipo_acordo_router\nfrom app.api.api_v1.routers.experiencia.academica import experiencia_acad_router\nfrom app.api.api_v1.routers.experiencia.projeto import experiencia_proj_router\nfrom app.api.api_v1.routers.habilidade import habilidades_router\nfrom app.api.api_v1.routers.area import area_router\nfrom app.api.api_v1.routers.auth import auth_router\nfrom app.api.api_v1.routers.pesquisa.pessoa import pesquisa_pessoa_router\nfrom app.api.api_v1.routers.pesquisa.projeto import pesquisa_projeto_router\nfrom app.api.api_v1.routers.pessoa_projeto import pessoa_projeto_router\nfrom app.api.api_v1.routers.reacoes import reacoes_router\nfrom app.api.api_v1.routers.notificacao import notificacao_router\n############################# Routers ###########################################\n\n# from app.core import config\n# from app.db.session import SessionLocal\n# from app.core.auth import get_current_active_pessoa\nfrom core import config\nfrom db.session import SessionLocal\nfrom core.auth import get_current_active_pessoa\n\nimport os\n\nDEV_ENV = os.getenv(\"DEV_ENV\")\napp = FastAPI(\n title=config.PROJECT_NAME, docs_url=\"/api/docs\", openapi_url=\"/api\"\n)\n\napp.mount(\"/api/uploads\", StaticFiles(directory=\"uploads\"), name=\"uploads\")\n\n# Go to localhost:8000/api/coverage/index.html to see coverage report\n# app.mount(\"/api/coverage\", StaticFiles(directory=\"htmlcov\"), name=\"htmlcov\")\n\n# Use HTTPS in production\nif not DEV_ENV:\n app.add_middleware(HTTPSRedirectMiddleware)\n origins = [\n \"https://conectar-frontend.vercel.app\",\n \"conectar-frontend.vercel.app\",\n \"https://boraconectar.com\",\n ]\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"Content-Type\", \"Accept\", \"authorization\"],\n )\n\n\n@app.middleware(\"http\")\nasync def db_session_middleware(request: Request, call_next):\n request.state.db = SessionLocal()\n response = await call_next(request)\n request.state.db.close()\n return response\n\n\n# Routers\napp.include_router(\n pessoas_router,\n prefix=\"/api/v1\",\n tags=[\"pessoas\"]\n)\n\napp.include_router(\n projeto_router,\n prefix=\"/api/v1\",\n tags=[\"projeto\"],\n)\n\napp.include_router(\n experiencia_prof_router,\n prefix=\"/api/v1\",\n tags=[\"experiencia profissional\"],\n dependencies=[Depends(get_current_active_pessoa)],\n)\n\napp.include_router(\n experiencia_acad_router,\n prefix=\"/api/v1\",\n tags=[\"experiencia academica\"],\n dependencies=[Depends(get_current_active_pessoa)],\n)\n\napp.include_router(\n experiencia_proj_router,\n prefix=\"/api/v1\",\n tags=[\"experiencia projeto\"],\n dependencies=[Depends(get_current_active_pessoa)],\n)\n\napp.include_router(\n area_router,\n prefix=\"/api/v1\",\n tags=[\"area\"],\n dependencies=[Depends(get_current_active_pessoa)],\n)\n\napp.include_router(\n habilidades_router,\n prefix=\"/api/v1\",\n tags=[\"habilidade\"],\n dependencies=[Depends(get_current_active_pessoa)],\n)\n\napp.include_router(\n papel_router,\n prefix=\"/api/v1\",\n tags=[\"papel\"],\n dependencies=[Depends(get_current_active_pessoa)],\n)\n\napp.include_router(\n pesquisa_projeto_router,\n prefix=\"/api/v1\",\n tags=[\"pesquisa_projeto\"],\n)\n\napp.include_router(\n pesquisa_pessoa_router,\n prefix=\"/api/v1\",\n tags=[\"pesquisa_pessoa\"],\n)\n\napp.include_router(\n pessoa_projeto_router,\n prefix=\"/api/v1\",\n tags=[\"pessoa_projeto\"],\n)\n\napp.include_router(\n notificacao_router,\n prefix=\"/api/v1\",\n tags=[\"notificacao\"],\n)\n\napp.include_router(\n tipo_acordo_router,\n prefix=\"/api/v1\",\n tags=[\"tipo_acordo\"],\n)\n\napp.include_router(\n reacoes_router,\n prefix=\"/api/v1\",\n tags=[\"reacoes\"]\n)\n\napp.include_router(auth_router, prefix=\"/api\", tags=[\"auth\"])\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", reload=True, port=8888)","sub_path":"{{cookiecutter.project_slug}}/backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"604972680","text":" # -*- coding: utf-8 -*-\r\n\"\"\"\r\nweather\r\nu:cd \r\np:b\r\n\"\"\"\r\n\r\nfrom flask import Flask, render_template, request\r\nimport json\r\nimport requests\r\nimport sqlite3\r\n\r\n#list of city codes\r\ndata_file = open('countries.json')\r\ndata = json.load(data_file)\r\ndata_file.close()\r\n#print json.dumps(data)\r\n\r\nf=\"cities.db\"\r\ndb = sqlite3.connect(f, check_same_thread = False) #open\r\nc = db.cursor()\r\n\r\n#get a json from an url\r\ndef getjson(url):\r\n result = requests.get(url).json()\r\n return result\r\n \r\nkey = \"058432795340dade2316e429dcb44099\"\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return render_template(\"countrylist.html\", lst=sorted(data))\r\n \r\n \r\n@app.route('/cities')\r\ndef cities():\r\n country= request.args[\"country\"]\r\n command = \"SELECT * FROM \" + country\r\n c.execute(command)\r\n temp = c.fetchall()\r\n return render_template(\"citylist.html\", lst=temp, country=request.args[\"country\"])\r\n \r\n@app.route('/query')\r\ndef query():\r\n #create a link\r\n link = \"http://api.openweathermap.org/data/2.5/weather?id=\" \\\r\n + request.args[\"id\"] + \"&appid=\" + key\r\n jsn = requests.get(link).json()\r\n '''Test\r\n jsn = requests.get(\"http://samples.openweathermap.org/data/2.5/weather?id=2172797&appid=b1b15e88fa797225412429c1c50c122a1\").json()\r\n ''' \r\n return render_template(\"weather.html\", entry=jsn)\r\n\r\nif __name__ == \"__main__\":\r\n app.debug = True\r\n app.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"176723965","text":"import dialogflow\nimport numpy as np\nimport os\nimport random\n\nfrom collections import OrderedDict\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import session\nfrom flask import send_from_directory\nfrom flask_cors import CORS\nfrom flask_socketio import disconnect\nfrom flask_socketio import emit\nfrom flask_socketio import SocketIO\nfrom google.cloud import speech\nfrom google.cloud import texttospeech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\nfrom scipy.io import wavfile\n\n\napp = Flask(__name__, static_url_path='')\nCORS(app)\napp.config['SECRET_KEY'] = 'rezzi secrets'\nsocketio = SocketIO(app, async_mode='eventlet')\n\n\nPROJECT_ID = 'rezzi-72934'\nSESSION_ID = 44\nSAMPLE_RATE = 44100\nLANGUAGE = 'en'\nAUDIO_OUTPUT_PATH = os.path.join(os.path.abspath('./static/audio'), 'out.wav')\nAUDIO_RESPONSE_BASE = os.path.abspath('./static/audio')\n\nSESSION_CLIENT = dialogflow.SessionsClient()\nSESSION = SESSION_CLIENT.session_path(PROJECT_ID, SESSION_ID)\n\nDATA = {}\n\n\n@app.route('/')\ndef index():\n return render_template('rezzi.html')\n\n\n@app.route('/static/audio')\ndef send_mp3(path):\n return send_from_directory('static', path)\n\n\n@socketio.on('connect')\ndef connect():\n session['audio'] = []\n\n\n@socketio.on('sample_rate')\ndef handle_my_sample_rate(sampleRate):\n session['sample_rate'] = sampleRate\n\n\n@socketio.on('audio')\ndef handle_my_custom_event(audio):\n values = OrderedDict(\n sorted(audio.items(), key=lambda t: int(t[0]))).values()\n session['audio'] += values\n\n\n@socketio.on('disconnect_request')\ndef test_disconnect():\n sample_rate = session['sample_rate']\n my_audio = np.array(session['audio'], np.float32)\n sindata = np.sin(my_audio)\n scaled = np.round(32767 * sindata)\n newdata = scaled.astype(np.int16)\n wavfile.write(AUDIO_OUTPUT_PATH, sample_rate, newdata)\n\n query, params, fulfillment = detect_intent_audio(sample_rate=sample_rate)\n for p_k, p_v in params.items():\n if p_k in [\"language\", \"jobs\", \"date-period\"]:\n DATA[p_k] = str(p_v)\n else:\n DATA[p_k] = [p_v]\n\n print(\"NEW DATA\", DATA)\n\n filename = synthesize_text(fulfillment)\n emit('my_response', {'data': f\"/audio/{filename}\", 'query': query,\n 'answer': fulfillment})\n session['audio'] = []\n disconnect()\n\n\ndef speech_to_text(sample_rate=SAMPLE_RATE, file_name=AUDIO_OUTPUT_PATH):\n client = speech.SpeechClient()\n\n with open(file_name, 'rb') as audio_file:\n content = audio_file.read()\n audio = types.RecognitionAudio(content=content)\n\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=sample_rate,\n language_code='en-US')\n\n response = client.recognize(config, audio)\n\n text = []\n for result in response.results:\n text.append(result.alternatives[0].transcript)\n print('Transcript: {}'.format(result.alternatives[0].transcript))\n\n return text\n\n\ndef detect_intent_audio(project_id=PROJECT_ID, session_id=SESSION_ID,\n audio_file_path=AUDIO_OUTPUT_PATH,\n language_code=LANGUAGE, sample_rate=SAMPLE_RATE):\n \"\"\"Returns the result of detect intent with an audio file as input.\n\n Using the same `session_id` between requests allows continuation\n of the conversaion.\n \"\"\"\n\n # Note: hard coding audio_encoding and sample_rate_hertz for simplicity.\n audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16\n sample_rate_hertz = sample_rate\n\n with open(audio_file_path, 'rb') as audio_file:\n input_audio = audio_file.read()\n\n audio_config = dialogflow.types.InputAudioConfig(\n audio_encoding=audio_encoding, language_code=language_code,\n sample_rate_hertz=sample_rate_hertz)\n query_input = dialogflow.types.QueryInput(audio_config=audio_config)\n\n response = SESSION_CLIENT.detect_intent(\n session=SESSION, query_input=query_input,\n input_audio=input_audio)\n\n print('=' * 20)\n print('Query text: {}'.format(response.query_result.query_text))\n print('Detected intent: {} (confidence: {})\\n'.format(\n response.query_result.intent.display_name,\n response.query_result.intent_detection_confidence))\n print('Fulfillment text: {}\\n'.format(\n response.query_result.fulfillment_text))\n\n return (response.query_result.query_text, response.query_result.parameters,\n response.query_result.fulfillment_text)\n\n\ndef detect_intent_texts(project_id=PROJECT_ID, session_id=SESSION_ID,\n texts=['hello'], language_code=LANGUAGE):\n \"\"\"Returns the result of detect intent with texts as inputs.\n\n Using the same `session_id` between requests allows continuation\n of the conversaion.\n \"\"\"\n session_client = dialogflow.SessionsClient()\n\n session = session_client.session_path(project_id, session_id)\n print('Session path: {}\\n'.format(session))\n\n for text in texts:\n text_input = dialogflow.types.TextInput(\n text=text, language_code=language_code)\n\n query_input = dialogflow.types.QueryInput(text=text_input)\n\n response = session_client.detect_intent(\n session=session, query_input=query_input)\n\n print('=' * 20)\n print('Query text: {}'.format(response.query_result.query_text))\n print('Detected intent: {} (confidence: {})\\n'.format(\n response.query_result.intent.display_name,\n response.query_result.intent_detection_confidence))\n print('Fulfillment text: {}\\n'.format(\n response.query_result.fulfillment_text))\n\n\ndef synthesize_text(text):\n \"\"\"Synthesizes speech from the input string of text.\"\"\"\n client = texttospeech.TextToSpeechClient()\n\n input_text = texttospeech.types.SynthesisInput(text=text)\n\n # Note: the voice can also be specified by name.\n # Names of voices can be retrieved with client.list_voices().\n voice = texttospeech.types.VoiceSelectionParams(\n language_code='en-US',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE)\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3)\n\n response = client.synthesize_speech(input_text, voice, audio_config)\n\n filename = f'resp_{random.randint(0, 999999)}.mp3'\n filepath = os.path.join(AUDIO_RESPONSE_BASE, filename)\n\n with open(filepath, 'wb') as out:\n out.write(response.audio_content)\n print(f'Audio content written to file {filepath}')\n\n return filename\n\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)\n","sub_path":"pythonScripts/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"602039900","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProduce a report of the daily revenue.\nInput is the file produced by clean.py.\n\"\"\"\nimport argparse\nimport datetime as dt\nimport os.path\nimport pandas as pd\nimport sys\n\nOUTDIR = '/Users/mlg/pyprj/hrm/results/tickets'\n\n\ndef getargs():\n parser = argparse.ArgumentParser(\n description='''\n Produce a report of the daily revenue.\n Input is the file produced by clean.py.\n ''')\n parser.add_argument('infile', help='''\n The CSV file that has been cleaned by tickets.clean.py''')\n parser.add_argument('-o', '--outdir', default=OUTDIR,\n help='''Directory to contain the\n output report file. If omitted, the default is the directory\n \"~/pyrpj/hrm/results/analytics/tickets\".\n ''')\n parser.add_argument('-m', '--month', type=int,\n choices=list(range(1, 13)), help='''\n If specified, limit reporting to the given month in the current year.\n If the month specified is greater than the current month, the year is\n last year. This is only used in January when reporting the December data.\n ''')\n args = parser.parse_args()\n if args.month:\n today = dt.date.today()\n args.year = today.year\n if args.month > today.month:\n args.year -= 1\n return args\n\n\ndef main(args):\n incsvfile = args.infile\n basename = 'tickets_'\n if _args.month:\n basename += f'{args.year:04d}-{args.month:-02d}'\n basename += '_daily.xlsx'\n outreport = os.path.join(args.outdir, basename)\n df = pd.read_csv(incsvfile,\n usecols=(0, 1, 2, 4),\n names='date quantity type totprice'.split(),\n index_col=False)\n df.date = pd.to_datetime(df.date, format='%d/%m/%Y')\n if args.month:\n m = df.date.dt.month\n y = df.date.dt.year\n df = df[(m == args.month) & (y == args.year)]\n assert len(df.date) > 0, f'No data in {args.year}-{args.month:02}.'\n g = df.groupby(['date', 'type'])\n gg = g.sum().unstack().fillna('')\n gg['datetot'] = df.groupby('date').sum()['totprice']\n print(f\"Writing to: {outreport}.\")\n gg.to_excel(outreport)\n\n\nif __name__ == '__main__':\n assert sys.version_info >= (3, 8)\n _args = getargs()\n main(_args)\n print('End daily.')\n","sub_path":"src/tickets/daily.py","file_name":"daily.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"326713662","text":"###############################################################################\n# Author: Wasi Ahmad\n# Project: Learning Vision to Language\n# Date Created: 4/02/2017\n#\n# File Description: This script contains code related to the sequence-to-sequence\n# network.\n###############################################################################\n\nimport torch, helper, nn_layer\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass Sequence2Sequence(nn.Module):\n \"\"\"Class that classifies question pair as duplicate or not.\"\"\"\n\n def __init__(self, dictionary, embedding_index, args):\n \"\"\"\"Constructor of the class.\"\"\"\n super(Sequence2Sequence, self).__init__()\n self.dictionary = dictionary\n self.embedding_index = embedding_index\n self.config = args\n self.embedding = nn_layer.EmbeddingLayer(len(dictionary), self.config.emsize, self.config.dropout)\n self.encoder = nn_layer.RNN(self.config.model, self.config.emsize, self.config.nhid, self.config.nlayers,\n self.config.dropout, True)\n self.decoder = nn_layer.RNN(self.config.model, self.config.emsize + self.config.nhid, self.config.nhid,\n self.config.nlayers, self.config.dropout)\n self.attention = nn_layer.ApplyAttention(len(dictionary), self.config.nhid)\n\n # Initializing the weight parameters for the embedding layer.\n self.embedding.init_embedding_weights(self.dictionary, self.embedding_index, self.config.emsize)\n\n @staticmethod\n def compute_loss(logits, target, seq_idx, length, regularization_param=None):\n # logits: batch x vocab_size, target: batch x 1\n losses = -torch.gather(logits, dim=1, index=target.unsqueeze(1))\n # mask: batch x 1\n mask = helper.mask(length, seq_idx)\n losses = losses * mask.float()\n num_non_zero_elem = torch.nonzero(mask.data).size()\n if not num_non_zero_elem:\n loss = losses.sum()\n else:\n loss = losses.sum() / num_non_zero_elem[0]\n if regularization_param:\n regularized_loss = logits.exp().mul(logits).sum(1).squeeze() * regularization_param\n loss += regularized_loss.mean()\n return loss\n\n def forward(self, batch_sentence1, batch_sentence2, length):\n \"\"\"\"Defines the forward computation of the question classifier.\"\"\"\n embedded = self.embedding(batch_sentence1)\n if self.config.model == 'LSTM':\n init_hidden, init_cell = self.encoder.init_weights(batch_sentence1.size(0))\n encoder_output, encoder_hidden = self.encoder(embedded, (init_hidden, init_cell))\n else:\n init_hidden = self.encoder.init_weights(batch_sentence1.size(0))\n encoder_output, encoder_hidden = self.encoder(embedded, init_hidden)\n\n if self.config.bidirection:\n encoder_hidden = torch.mean(encoder_hidden[0], 0), torch.mean(encoder_hidden[1], 0)\n encoder_output = torch.div(\n torch.add(encoder_output[:, :, 0:self.config.nhid],\n encoder_output[:, :, self.config.nhid:2 * self.config.nhid]), 2)\n\n # Initialize hidden states of decoder with the last hidden states of the encoder\n decoder_hidden = encoder_hidden\n context_vector = Variable(torch.zeros(batch_sentence2.size(0), 1, self.config.nhid))\n if self.config.cuda:\n context_vector = context_vector.cuda()\n\n loss = 0\n for idx in range(batch_sentence2.size(1) - 1):\n # Use the real target outputs as each next input (teacher forcing)\n input_variable = batch_sentence2[:, idx]\n target_variable = batch_sentence2[:, idx + 1]\n\n embedded_input = self.embedding(input_variable).unsqueeze(1)\n embedded_input = torch.cat((embedded_input, context_vector), 2)\n decoder_output, decoder_hidden = self.decoder(embedded_input, decoder_hidden)\n output, context_vector, attn_weights = self.attention(decoder_output, encoder_output)\n loss += self.compute_loss(output, target_variable, idx, length)\n\n return loss\n","sub_path":"seq_to_seq_model/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"606183265","text":"# from linkedlist import LinkedList\r\nclass LinkedList:\r\n def __init__(self):\r\n self.length = 0\r\n self.head = None\r\n self.tail = self.head\r\n \r\n def printList(self):\r\n values = []\r\n curNode = self.head\r\n while curNode != None:\r\n values.append(curNode['value'])\r\n curNode = curNode['next']\r\n return values\r\n def append(self, value):\r\n node = {\r\n 'value' : value,\r\n 'next' : None\r\n }\r\n if self.length == 0:\r\n self.head = node\r\n self.tail = node\r\n else:\r\n self.tail['next'] = node\r\n self.tail = node\r\n self.length += 1\r\n def prepend(self, value):\r\n node = {\r\n 'value' : value,\r\n 'next' : None\r\n }\r\n if self.length == 0:\r\n self.tail = node\r\n self.head = node\r\n else:\r\n node['next'] = self.head\r\n self.head = node\r\n self.length += 1\r\n def insert(self, index, value):\r\n if self.length == 0:\r\n return self.prepend(value)\r\n if index == self.length:\r\n return self.append(value)\r\n prevNode = None\r\n curNode = self.head\r\n node = {\r\n 'value' : value,\r\n 'next' : None\r\n }\r\n i = 0\r\n while i < index:\r\n prevNode = curNode\r\n curNode = curNode['next']\r\n i+=1\r\n if prevNode == None:\r\n node['next'] = self.head\r\n self.head = node\r\n else:\r\n if index == self.length:\r\n return self.append(value)\r\n node['next'] = curNode\r\n prevNode['next'] = node\r\n self.length+=1 \r\n def remove(self, index):\r\n i = 0\r\n curNode = self.head\r\n prevNode = None\r\n while i < index:\r\n prevNode = curNode\r\n curNode = curNode['next']\r\n i += 1\r\n if index == self.length:\r\n self.tail = curNode\r\n elif prevNode == None:\r\n self.head = self.head['next']\r\n else:\r\n prevNode['next'] = curNode['next']\r\n curNode = curNode['next']\r\n self.length-=1\r\n\r\n def delete_by_value(self, value):\r\n prevNode = None\r\n curNode = self.head\r\n if self.head['value'] == value:\r\n self.head = self.head['next']\r\n else:\r\n prevNode = curNode\r\n curNode = curNode['next']\r\n while True:\r\n if curNode['value'] == value:\r\n prevNode['next'] = curNode['next']\r\n curNode = curNode['next']\r\n self.length-=1\r\n break\r\n else:\r\n prevNode = curNode\r\n curNode = curNode['next']\r\n\r\n def get(self, value):\r\n if self.length == 0:\r\n return\r\n curNode = self.head\r\n while True:\r\n if curNode['value'] == value:\r\n return curNode\r\n \r\n curNode = curNode['next']\r\n\r\n def reverse(self):\r\n if self.length == 1:\r\n return self.head\r\n first = self.head\r\n second = first['next']\r\n while second:\r\n third = second['next']\r\n second['next'] = first\r\n first = second\r\n second = third\r\n self.head['next'] = None\r\n self.head = first \r\n def get_by_index(self,index):\r\n curNode = self.head\r\n while index>0:\r\n curNode = curNode['next']\r\n index -= 1\r\n return curNode['value']\r\n def remove_all(self):\r\n self.head = None\r\n self.head = None\r\n self.length = 0\r\n\r\n\r\n\r\nclass Stack:\r\n def __init__(self):\r\n self.ll = LinkedList()\r\n def peek(self):\r\n return self.ll.head\r\n def push(self,value):\r\n self.ll.prepend(value)\r\n def pop(self):\r\n self.ll.remove(0)\r\n def get(self):\r\n stack = {\r\n 'top':self.ll.head,\r\n 'bottom':self.ll.tail,\r\n 'length':self.ll.length\r\n }\r\n return stack\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n","sub_path":"stack_impl_using_linkedlists.py","file_name":"stack_impl_using_linkedlists.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"313862510","text":"import numpy as np\nimport math\nimport cv2\nimport torch\n\nimage_normalize_mean = [0.485, 0.456, 0.406]\nimage_normalize_std = [0.229, 0.224, 0.225]\n\n\nclass Utils(object):\n def __init__(self):\n pass\n\n @staticmethod\n def get_angle(center_coor, coor2, coor3):\n L1 = Utils.cal_dis(coor2,coor3)\n L2 = Utils.cal_dis(center_coor,coor3)\n L3 = Utils.cal_dis(center_coor,coor2)\n Angle = Utils.cal_angle(L1,L2,L3)\n return Angle\n\n @staticmethod\n def cal_dis(coor1, coor2):\n out = np.square(coor1[0] - coor2[0]) + np.square(coor1[1] - coor2[1])\n return np.sqrt(out)\n\n @staticmethod\n def cal_angle(L1, L2, L3):\n out = (np.square(L2) + np.square(L3) - np.square(L1)) / (2 * L2 * L3)\n try:\n return math.acos(out) * (180 / math.pi)\n except ValueError:\n return 180\n\n @staticmethod\n def image_normalize(image, size=224):\n image_array = cv2.resize(image, (size, size))\n image_array = np.ascontiguousarray(image_array[..., ::-1], dtype=np.float32)\n image_array = image_array.transpose((2, 0, 1))\n for channel, _ in enumerate(image_array):\n image_array[channel] /= 255.0\n image_array[channel] -= image_normalize_mean[channel]\n image_array[channel] /= image_normalize_std[channel]\n image_tensor = torch.from_numpy(image_array).float()\n return image_tensor\n\n\ndef cut_image(img, bottom=0, top=0, left=0, right=0):\n height, width = img.shape[0], img.shape[1]\n return np.asarray(img[top: height - bottom, left: width - right])\n\n\ndef box2str(boxes):\n string = \"\"\n for box in boxes:\n sub_str = \"\"\n for coor in box:\n sub_str += str(coor)\n sub_str += \" \"\n string += sub_str[:-1]\n string += \",\"\n return string[:-1]\n\n\ndef str2box(string):\n if string == \"\":\n return None\n tmp = string.split(\",\")\n boxes = []\n for item in tmp:\n boxes.append([float(i) for i in item.split(\" \")])\n return boxes\n\ndef score2str(scores):\n if isinstance(scores, float):\n return str(scores)\n string = \"\"\n for s in scores:\n string += str(s)\n string += \",\"\n return string[:-1]\n\n\ndef str2score(string):\n if string == \"\":\n return None\n return [float(item) for item in string.split(\",\")]\n\n\ndef write_file(res, box_f, score_f):\n (_, box, score) = res\n if box is not None:\n box_str = box2str(box.tolist())\n score_str = score2str(score.squeeze().tolist())\n box_f.write(box_str)\n box_f.write(\"\\n\")\n score_f.write(score_str)\n score_f.write(\"\\n\")\n else:\n box_f.write(\"\\n\")\n score_f.write(\"\\n\")\n\n\nif __name__ == '__main__':\n ut = Utils()\n # res = ut.time_to_string(\"10.0000\")\n # print(res)\n _ = ut.get_angle([0, 0], [1, -1], [0, 1])\n print(_)","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"150139504","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom collections import deque\n\n\nclass SearchManager:\n '''The SearchManager combines the data from the Flowfactory and the\n MetaManager to guarantee best search results based on queries.\n\n Queries will be logical AND-combined. The type of the query can\n be writen with a colon followed by the tag.\n\n :as=Foerder\n :asn=2323\n :bgp=123.123.123.123/24\n :src_ip=10.0.0.1\n :dst_ip=10.0.0.1\n :sport=12345\n :dport=80\n :port=2323\n :rating=upload\n :file=my-snip.pcap\n '''\n\n def __init__(self, flow_factory, meta_manager, flow_store):\n self.flow_factory = flow_factory\n self.meta_manager = meta_manager\n self.flow_store = flow_store\n self._history = deque([], maxlen=5)\n\n def _data_pair_dict(self):\n 'Returns a dict of pcap_path -> (MetaFlow, Ratings)'\n mm_data = self.meta_manager.meta_store.copy()\n ff_data = map(lambda f: ('{}/{}'.format(self.flow_store, f[0]), f[1]),\n self.flow_factory.all_flows(self.flow_store))\n\n for ff in ff_data:\n if ff[0] not in mm_data:\n print('{} does not appear in the MetaManager!'.format(ff[0]))\n continue\n mm_data.update({ff[0]: (mm_data[ff[0]], ff[1])})\n\n for k, v in mm_data.items():\n if type(v) is not tuple:\n print('{} has no partner in the Flowfactory!'.format(k))\n del mm_data[k]\n\n return mm_data\n\n def data_pairs(self):\n 'Returns a list of (MetaFlow, Ratings) for each (stored) pcap.'\n return self._data_pair_dict().values()\n\n @staticmethod\n def _query_splitter(query):\n 'Creates a list of tuples (key, query) for search keyboards.'\n import re\n\n pattern = re.compile(':([a-z_]+)=(\\S+)')\n for part in re.split('\\s+', query):\n part_match = pattern.search(part)\n if part_match:\n yield part_match.group(1, 2)\n else:\n yield (None, part)\n\n @staticmethod\n def _filter_fun(data_pair, queries):\n 'Checks if a data_pair matches a query from _query_splitter.'\n chks = {\n # str/None -> ((data_pair, query) -> bool)\n 'as': lambda dp, q: dp[0].as_name and q in dp[0].as_name,\n 'asn': lambda dp, q: str(dp[0].asn) == q,\n 'bgp': lambda dp, q: dp[0].bgp_prefix and q in dp[0].bgp_prefix,\n 'src_ip': lambda dp, q: q in dp[0].src_ip,\n 'dst_ip': lambda dp, q: q in dp[0].dst_ip,\n 'ip': lambda dp, q: q in dp[0].dst_ip or q in dp[0].src_ip,\n 'sport': lambda dp, q: str(dp[0].src_port) == q,\n 'dport': lambda dp, q: str(dp[0].dst_port) == q,\n 'port': lambda dp, q: str(dp[0].src_port) == q or str(dp[0].dst_port) == q,\n 'rating': lambda dp, q: q in dp[1],\n 'file': lambda dp, q: q in dp[0].filename\n }\n\n for query in queries:\n if query[0] in chks.keys():\n chk = chks[query[0]]\n if not chk(data_pair, query[1]):\n return False\n else:\n flag = False\n for chk in chks.values():\n if chk(data_pair, query[1]):\n flag = True\n if not flag:\n return False\n return True\n\n def search(self, query):\n 'Returns a list of matching data_pairs for a query.'\n if query and query not in self.history():\n self._history.appendleft(query)\n\n queries = list(self._query_splitter(query))\n return filter(lambda dp: self._filter_fun(dp, queries), self.data_pairs())\n\n def history(self):\n 'Return search-history for the last entries.'\n return list(self._history)\n","sub_path":"traffic_ui/searchmanager.py","file_name":"searchmanager.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"6943924","text":"# -*- coding: utf-8 -*-\n\n\"\"\"PyOBO's Mapping Service.\n\nRun with ``python -m pyobo.apps.mapper``.\n\"\"\"\n\nimport os\nfrom functools import lru_cache\nfrom typing import Iterable, List, Mapping, Optional, Union\n\nimport click\nimport networkx as nx\nimport pandas as pd\nfrom flasgger import Swagger\nfrom flask import Blueprint, Flask, current_app, jsonify, render_template, url_for\nfrom flask_bootstrap import Bootstrap\nfrom werkzeug.local import LocalProxy\n\nfrom pyobo.apps.utils import gunicorn_option, host_option, port_option, run_app\nfrom pyobo.cli_utils import verbose_option\nfrom pyobo.identifier_utils import normalize_curie, normalize_prefix\nfrom pyobo.xrefdb.xrefs_pipeline import (\n Canonicalizer, all_shortest_paths, get_graph_from_xref_df, get_xref_df, single_source_shortest_path,\n summarize_xref_df,\n)\n\n__all__ = [\n 'get_app',\n 'main',\n]\n\nsummary_df = LocalProxy(lambda: current_app.config['summary'])\ngraph: nx.Graph = LocalProxy(lambda: current_app.config['graph'])\ncanonicalizer: Canonicalizer = LocalProxy(lambda: current_app.config['canonicalizer'])\n\n\n@lru_cache()\ndef _single_source_shortest_path(curie: str) -> Optional[Mapping[str, List[Mapping[str, str]]]]:\n return single_source_shortest_path(graph=graph, curie=curie)\n\n\n@lru_cache()\ndef _all_shortest_paths(source_curie: str, target_curie: str) -> List[List[Mapping[str, str]]]:\n return all_shortest_paths(graph=graph, source_curie=source_curie, target_curie=target_curie)\n\n\n#: The blueprint that gets added to the app\nsearch_blueprint = Blueprint('search', __name__)\n\n\n@search_blueprint.route('/')\ndef home():\n \"\"\"Show the home page.\"\"\"\n return render_template('mapper_home.html')\n\n\n@search_blueprint.route('/mappings/')\ndef single_source_mappings(curie: str):\n \"\"\"Return all length xrefs from the given identifier.\"\"\"\n if curie not in graph:\n return jsonify(\n success=False,\n query=dict(curie=curie),\n message='could not find curie',\n )\n return jsonify(_single_source_shortest_path(curie))\n\n\n@search_blueprint.route('/mappings//')\ndef all_mappings(source_curie: str, target_curie: str):\n \"\"\"Return all shortest paths of xrefs between the two identifiers.\"\"\"\n if source_curie not in graph:\n return jsonify(\n success=False,\n query=dict(source_curie=source_curie, target_curie=target_curie),\n message='could not find source curie',\n )\n if target_curie not in graph:\n return jsonify(\n success=False,\n query=dict(source_curie=source_curie, target_curie=target_curie),\n message='could not find target curie',\n )\n\n return jsonify(_all_shortest_paths(source_curie, target_curie))\n\n\n@search_blueprint.route('/mappings/summarize')\ndef summarize():\n \"\"\"Summarize the mappings.\"\"\"\n return summary_df.to_html(index=False)\n\n\n@search_blueprint.route('/mappings/summarize_by/')\ndef summarize_one(prefix: str):\n \"\"\"Summarize the mappings.\"\"\"\n prefix = normalize_prefix(prefix)\n in_df = summary_df.loc[summary_df['target_ns'] == prefix, ['source_ns', 'count']]\n out_df = summary_df.loc[summary_df['source_ns'] == prefix, ['target_ns', 'count']]\n return f'''\n

Incoming Mappings to {prefix}

\n {in_df.to_html(index=False)}\n

Outgoing Mappings from {prefix}

\n {out_df.to_html(index=False)}\n '''\n\n\n@search_blueprint.route('/canonicalize/')\ndef canonicalize(curie: str):\n \"\"\"Return the best CURIE.\"\"\"\n # TODO maybe normalize the curie first?\n norm_prefix, norm_identifier = normalize_curie(curie)\n if norm_prefix is None or norm_identifier is None:\n return jsonify(\n query=curie,\n normalizable=False,\n )\n\n norm_curie = f'{norm_prefix}:{norm_identifier}'\n\n rv = dict(query=curie)\n if norm_curie != curie:\n rv['norm_curie'] = norm_curie\n\n if norm_curie not in graph:\n rv['found'] = False\n else:\n result_curie = canonicalizer.canonicalize(norm_curie)\n rv.update(\n found=True,\n result=result_curie,\n mappings=url_for(\n f'.{all_mappings.__name__}',\n source_curie=norm_curie,\n target_curie=result_curie,\n ),\n )\n\n return jsonify(rv)\n\n\ndef get_app(paths: Union[None, str, Iterable[str]] = None) -> Flask:\n \"\"\"Build the Flask app.\"\"\"\n if paths is None:\n paths = os.path.join(os.path.expanduser('~'), 'Desktop', 'all_xrefs.tsv')\n if os.path.exists(paths):\n df = pd.read_csv(paths, sep='\\t', dtype=str)\n else:\n df = get_xref_df()\n df.to_csv(paths, sep='\\t', index=False)\n elif isinstance(paths, str):\n df = pd.read_csv(paths, sep='\\t', dtype=str)\n else:\n df = pd.concat(\n pd.read_csv(path, sep='\\t', dtype=str)\n for path in paths\n )\n\n df['source_ns'] = df['source_ns'].map(normalize_prefix)\n df['target_ns'] = df['target_ns'].map(normalize_prefix)\n return _get_app_from_xref_df(df)\n\n\ndef _get_app_from_xref_df(df: pd.DataFrame):\n app = Flask(__name__)\n Swagger(app)\n Bootstrap(app)\n app.config['summary'] = summarize_xref_df(df)\n app.config['graph'] = get_graph_from_xref_df(df)\n # TODO allow for specification of priorities in the canonicalizer\n app.config['canonicalizer'] = Canonicalizer(graph=app.config['graph'])\n app.register_blueprint(search_blueprint)\n return app\n\n\n@click.command()\n@click.option('-x', '--mappings-file')\n@port_option\n@host_option\n@gunicorn_option\n@verbose_option\ndef main(mappings_file, host: str, port: int, gunicorn: bool):\n \"\"\"Run the mappings app.\"\"\"\n app = get_app(mappings_file)\n run_app(app=app, host=host, port=port, gunicorn=gunicorn)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/pyobo/apps/mapper/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"355387912","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 20:51:50 2018\n\n@author: john\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nX_train = np.load('omniglot/X_train.npy')\ny_train = np.load('omniglot/y_train.npy')\nX_test = np.load('omniglot/X_test.npy')\ny_test = np.load('omniglot/y_test.npy')\n\nX_train = np.expand_dims(X_train,axis=3)\nX_test = np.expand_dims(X_test,axis=3)\n\n\n#n_select = int(.1*len(X_train))\n#\n#list_select = list(range(n_select))\n#np.random.shuffle(list_select)\n#X_train = X_train[list_select]\n#y_train = y_train[list_select]\n\nn_train = X_train.shape[0]\nn_test = X_test.shape[0]\n\n\nbatch_size_train = 32\nbatch_size_test = 16\n\n\nn_batch_test = int(n_test/batch_size_test)\n\ninds_train = list(range(n_train))\ninds_test = list(range(n_test))\n\nn_epoch = 1000\n###############################################################################\ntf.reset_default_graph()\nsess = tf.Session()\n\nX1 = tf.placeholder(tf.float32,[None,105,105,1],name = 'X1')\nX2 = tf.placeholder(tf.float32,[None,105,105,1],name = 'X2')\ny = tf.placeholder(tf.float32,[None],name = 'y')\n\n##############################################################################\nn_filter1 = 16\nkernel1 = 3\nn_filter2 = 16 \nkernel2 = 3\nn_filter_end = n_filter2\nn_final_features = 16\n \nwith tf.variable_scope('conv'):\n \n conv1X1 = tf.layers.conv2d(X1,n_filter1,kernel1,activation = tf.nn.relu,reuse=None,padding='SAME',name='conv1') \n conv2X1 = tf.layers.conv2d(conv1X1,n_filter2,kernel2,padding = 'SAME',name='conv2') \n\n XfX1 = tf.reshape(conv2X1,[-1,n_filter_end*105*105])\n \n logits11 = tf.layers.dense(XfX1,n_final_features) \n \n logits1 = tf.nn.l2_normalize(logits11,axis=1)\n\nwith tf.variable_scope('conv',reuse=True):\n \n conv1X2 = tf.layers.conv2d(X2,n_filter1,kernel1,activation = tf.nn.relu,reuse=None,padding='SAME',name='conv1') \n conv2X2 = tf.layers.conv2d(conv1X2,n_filter2,kernel2,padding = 'SAME',name='conv2') \n\n\n XfX2 = tf.reshape(conv2X2,[-1,n_filter_end*105*105])\n\n logits22 = tf.layers.dense(XfX2,n_final_features) \n \n logits2 = tf.nn.l2_normalize(logits22,axis=1)\n\n##############################################################################\n\nm = .1\n\ndiff = logits1 - logits2 + 1e-16\ndist = tf.norm((diff),axis=1)\n\nLs = dist**2\n\nz = tf.zeros_like(dist)\n\nLd = tf.maximum(z,m - dist)**2\n\nwith tf.variable_scope('loss'):\n loss = (1-y)*Ls + y*Ld\n\ndiff_margin = m - dist\ny_preds = tf.greater(0.0,m - dist)\ncorrect_preds = tf.equal(tf.cast(y_preds,tf.float32),y)\nn_correct = tf.reduce_sum(tf.cast(correct_preds,tf.float32))\n\n\nreduced_loss = tf.reduce_mean(loss,name = 'reduced_loss')\noptimizer = tf.train.AdamOptimizer(1e-3).minimize(reduced_loss)\nsess.run(tf.initialize_all_variables())\n\ninds_test1 = inds_test.copy()\ninds_test2 = inds_test.copy()\n\nnp.random.shuffle(inds_test1)\nnp.random.shuffle(inds_test2) \n\ninds_train1 = inds_train.copy()\ninds_train2 = inds_train.copy()\nn_batch_train = int(len(inds_train1)/batch_size_train)\n\n#n_batch_train = int(len(inds_train)/batch_size_train)\n\nfor e in range(n_epoch):\n\n\n #print(len(inds_train1),len(inds_train2))\n np.random.shuffle(inds_train1)\n np.random.shuffle(inds_train2)\n \n total_loss = 0\n\n \n for i in range(n_batch_train):\n \n inds_batch1 = inds_train1[i*batch_size_train:(i+1)*batch_size_train]\n inds_batch2 = inds_train2[i*batch_size_train:(i+1)*batch_size_train]\n \n X_batch1 = X_train[inds_batch1]\n y_batch1 = y_train[inds_batch1]\n\n X_batch2 = X_train[inds_batch2]\n y_batch2 = y_train[inds_batch2]\n \n y_batch = np.abs(y_batch1 - y_batch2)\n y_batch[y_batch>0] = 1\n \n \n feed_dict = {X1: X_batch1, X2: X_batch2, y: y_batch} \n sess.run(optimizer,feed_dict)\n #loss_batch = sess.run(reduced_loss,feed_dict) \n \n #total_loss += loss_batch\n #print(i,loss_batch)\n #print(i)\n \n# if math.isnan(loss_batch):\n# STOP\n \n\n# inds_test1 = inds_test.copy()\n# inds_test2 = inds_test.copy()\n \n# np.random.shuffle(inds_test1)\n# np.random.shuffle(inds_test2)\n\n total_tp = 0\n total_tn = 0 \n total_fp = 0\n total_fn = 0\n \n total_correct = 0\n for i in range(n_batch_test):\n \n inds_batch1 = inds_test1[i*batch_size_test:(i+1)*batch_size_test]\n inds_batch2 = inds_test2[i*batch_size_test:(i+1)*batch_size_test]\n \n X_batch1 = X_test[inds_batch1]\n y_batch1 = y_test[inds_batch1]\n\n X_batch2 = X_test[inds_batch2]\n y_batch2 = y_test[inds_batch2]\n \n y_batch = np.abs(y_batch1 - y_batch2)\n y_batch[y_batch>0] = 1\n \n feed_dict = {X1: X_batch1, X2: X_batch2, y: y_batch} \n total_correct += sess.run(n_correct,feed_dict) \n preds = sess.run(y_preds,feed_dict) \n preds = preds.astype(int)\n true_minus_pred = y_batch - preds\n \n n_false_negative = len(np.where(true_minus_pred==-1)[0]) \n n_false_positive = len(np.where(true_minus_pred==1)[0]) \n true_plus_pred = y_batch + preds\n n_true_negative = len(np.where(true_plus_pred==2)[0]) \n n_true_positive = batch_size_test - n_false_negative - n_false_positive - n_true_negative \n\n# print(y_batch1)\n# print(y_batch2)\n# print(y_batch)\n# print(preds)\n# print(preds.astype(int))\n# print(n_false_negative)\n# print(\"=\"*20) \n \n total_tp += n_true_positive\n total_tn += n_true_negative \n total_fp += n_false_positive\n total_fn += n_false_negative\n\n \n\n\n n_test_local = n_batch_test*batch_size_test\n \n# print('tp',total_tp/n_test_local,'tn',total_tn/n_test_local)\n# print('fp',total_fp/n_test_local,'fn',total_fn/n_test_local)\n print('epoch',e,total_correct/n_test_local) \n# print('='*batch_size_test*2)\n","sub_path":"contrastive_omniglot.py","file_name":"contrastive_omniglot.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"633195739","text":"import sys\nimport os\nimport subprocess\n\nbatcmd = \"dmesg | tail -100\"\ndmesg = subprocess.check_output(batcmd, shell=True)\n\ncheck = 0\nfor line in dmesg.split('\\n') :\n\tif line.find(\"ZFS function profiling END\") != -1 :\n\t\tcheck = 1\n\tif line.find(\"SPL: Unloaded module\") != -1 and check == 1 :\n\t\tcheck = 0\n\tif check == 1 :\n\t\tfor zfs in [\"[__zio_execute]\", \"[zio_write_bp_init]\", \"[zio_issue_async]\", \"[zio_write_compress]\", \"[zio_checksum_generate]\", \"[zio_dva_throttle]\", \"[zio_dva_allocate]\", \n\t\t\t\"[zio_ready]\", \"[zio_vdev_io_start]\", \"[zio_vdev_io_done]\", \"[zio_vdev_io_assess]\", \"[zio_done]\", \"[abd_iterate_func]\", \"[abd_a]\", \"[abd_c]\", \"[ISSUE_spa_taskq]\", \"[INTERRUPT_spa_taskq]\"] :\n\t\t\tif line.find(zfs) != -1 :\n\t\t\t\tchunks = line.split(' ')\n\t\t\t\tfor words in chunks :\n\t\t\t\t\tif words.find(\"time\") != -1 :\n\t\t\t\t\t\ttime = words.split(':')[1]\n\t\t\t\t\tif words.find(\"count\") != -1 :\n\t\t\t\t\t\tcount = words.split(':')[1]\n\t\t\t\tprint(time + ' ' + count)\n\n\t\t\"\"\"\n\t\tif line.find(\"__zio_execute\") != -1 :\n\t\t\tchunks = line.split(' ')\n\t\t\tfor words in chunks :\n\t\t\t\tif words.find(\"time\") != -1 :\n\t\t\t\t\ttime = words.split(':')[1]\n\t\t\t\tif words.find(\"count\") != -1 :\n\t\t\t\t\tcount = words.split(':')[1]\nprint(time)\nprint(count)\n\"\"\"\n\n","sub_path":"backup/parse_dmesg.py","file_name":"parse_dmesg.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"199822662","text":"from collections import deque\n\ndef urlify(s, n):\n s = list(s)\n j = len(s) - 1\n\n for i in range(n - 1, -1, -1):\n if s[i] == ' ':\n s[j - 2:j + 1] = '%20'\n j -= 3\n else:\n s[j] = s[i]\n j -= 1\n\n return ''.join(s)\n\n\ndef main():\n print(urlify('Mr A John Qwerty ', 16))\n\n\nif __name__ == '__main__':\n main()","sub_path":"solutions/arrays_and_strings/question_1_3.py","file_name":"question_1_3.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"592924666","text":"# coding = utf-8\nimport re\nimport csv\nimport json\nimport itertools\nimport scipy.stats\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom pretreatment.load_data import *\nfrom pretreatment.evaluation import *\n\nproj_path = '/Users/DerekChiang/Documents/Github repo/social_influencer/'\ndatasets = ['Python_dataset/21631/', 'Python_dataset/55326/', 'Java_dataset/6273/']\ndataset_path = proj_path + datasets[1]\n\n\ndef getBaselines(userList):\n\n\t# check input\n\tif not userList:\n\t\traise TypeError('It\\'s an empty user list.')\n\n\tuserNum = len(userList)\n\ttruth = [x + 1 for x in range(userNum)]\n\n\t# initialize a table to save the results\n\theader = ['', 'spearman', 'kendall']\n\ttable = list()\n\ttable.append(header)\n\n\t# load repositories information\n\tfname = dataset_path + 'repo_info.json'\n\ttry:\n\t\tfhand = open(fname, 'r')\n\t\trepo_dicts = json.load(fhand)\n\texcept:\n\t\tprint('Could not read file', fname)\n\n\n\t# load user information\n\tfname = dataset_path + 'user_info.json'\n\ttry:\n\t\tfhand = open(fname, 'r')\n\t\tuser_dicts = json.load(fhand)\n\texcept:\n\t\tprint('Could not read file', fname)\n\n\n\t# Baseline 1: # Owned repositories (binary)\n\ttry:\n\t\trepoList = get_userInfo(userList, dataset_path, mode = 'o')\n\t\t# print([len(x) for x in repoList])\n\texcept:\n\t\tprint('Fail to load the repositories of users.')\n\telse:\n\t\town = rank_of_list([len(i) for i in repoList])\n\t\ttable.append(['Owned', \"{0:.6f}\".format(spearman(truth, own)), \"{0:.6f}\".format(kendall(truth, own))])\n\n\t# Baseline 1: # Owned repositories (weighted)\n\towned_c = [0 for x in range(userNum)]\n\towned_cb = [0 for x in range(userNum)]\n\towned_f = [0 for x in range(userNum)]\n\towned_w = [0 for x in range(userNum)]\n\towned_s = [0 for x in range(userNum)]\n\n\tfor i in range(len(repoList)):\n\t\tfor repo in repoList[i]:\n\t\t\tif repo in repo_dicts:\n\t\t\t\towned_c[i] += repo_dicts[repo]['commits']\n\t\t\t\towned_cb[i] += len(repo_dicts[repo]['contributors'])\n\t\t\t\tif 'stars' in repo_dicts[repo]:\n\t\t\t\t\towned_f[i] += repo_dicts[repo]['forks']\n\t\t\t\t\towned_w[i] += repo_dicts[repo]['watchers']\n\t\t\t\t\towned_s[i] += repo_dicts[repo]['stars']\n\t\t\telse:\n\t\t\t\tprint('Could not find owned repo', repo)\t\t\t\n\n\t# with open('stars.txt', 'w') as out:\n\t# \tfor x in owned_s:\n\t# \t\tout.write(str(x)+'\\n')\n\t\t\n\townedRank_c = rank_of_list(owned_c)\n\townedRank_cb = rank_of_list(owned_cb)\n\townedRank_f = rank_of_list(owned_f)\n\townedRank_w = rank_of_list(owned_w)\n\townedRank_s = rank_of_list(owned_s)\n\n\ttable.append(['Owned(c)', \"{0:.6f}\".format(spearman(truth, ownedRank_c)), \"{0:.6f}\".format(kendall(truth, ownedRank_c))])\n\ttable.append(['Owned(cb)', \"{0:.6f}\".format(spearman(truth, ownedRank_cb)), \"{0:.6f}\".format(kendall(truth, ownedRank_cb))])\n\ttable.append(['Owned(f)', \"{0:.6f}\".format(spearman(truth, ownedRank_f)), \"{0:.6f}\".format(kendall(truth, ownedRank_f))])\n\ttable.append(['Owned(w)', \"{0:.6f}\".format(spearman(truth, ownedRank_w)), \"{0:.6f}\".format(kendall(truth, ownedRank_w))])\n\ttable.append(['Owned(s)', \"{0:.6f}\".format(spearman(truth, ownedRank_s)), \"{0:.6f}\".format(kendall(truth, ownedRank_s))])\n\ttable.append(list()) # as a blank row\n\n\n\t# Baseline 2: # Written repositories (binary)\n\ttry:\n\t\trepoList = get_userInfo(userList, dataset_path, mode = 'w')\n\t\t# print([len(x) for x in repoList])\n\texcept:\n\t\tprint('Could not load the repositories of users.')\t\n\telse:\n\t\twrittenRank = rank_of_list([len(i) for i in repoList])\n\t\ttable.append(['Written', \"{0:.6f}\".format(spearman(truth, writtenRank)), \"{0:.6f}\".format(kendall(truth, writtenRank))])\n\n\n\t# Baseline 2: # Written repositories (weighted)\n\twritten_c = [0 for x in range(userNum)]\n\twritten_cb = [0 for x in range(userNum)]\n\twritten_f = [0 for x in range(userNum)]\n\twritten_w = [0 for x in range(userNum)]\n\twritten_s = [0 for x in range(userNum)]\n\tfor i in range(len(repoList)):\n\t\tfor repo in repoList[i]:\n\t\t\tif repo in repo_dicts:\n\t\t\t\twritten_c[i] += repo_dicts[repo]['commits']\n\t\t\t\twritten_cb[i] += len(repo_dicts[repo]['contributors'])\n\t\t\t\tif 'stars' in repo_dicts[repo]:\n\t\t\t\t\twritten_f[i] += repo_dicts[repo]['forks']\n\t\t\t\t\twritten_w[i] += repo_dicts[repo]['watchers']\n\t\t\t\t\twritten_s[i] += repo_dicts[repo]['stars']\n\t\t\telse:\n\t\t\t\tprint('Could not find written repo', repo)\t\n\twrittenRank_c = rank_of_list(written_c)\n\twrittenRank_cb = rank_of_list(written_cb)\n\twrittenRank_f = rank_of_list(written_f)\n\twrittenRank_w = rank_of_list(written_w)\n\twrittenRank_s = rank_of_list(written_s)\n\n\ttable.append(['Written(c)', \"{0:.6f}\".format(spearman(truth, writtenRank_c)), \"{0:.6f}\".format(kendall(truth, writtenRank_c))])\n\ttable.append(['Written(cb)', \"{0:.6f}\".format(spearman(truth, writtenRank_cb)), \"{0:.6f}\".format(kendall(truth, writtenRank_cb))])\n\ttable.append(['Written(f)', \"{0:.6f}\".format(spearman(truth, writtenRank_f)), \"{0:.6f}\".format(kendall(truth, writtenRank_f))])\n\ttable.append(['Written(w)', \"{0:.6f}\".format(spearman(truth, writtenRank_w)), \"{0:.6f}\".format(kendall(truth, writtenRank_w))])\n\ttable.append(['Written(s)', \"{0:.6f}\".format(spearman(truth, writtenRank_s)), \"{0:.6f}\".format(kendall(truth, writtenRank_s))])\n\ttable.append(list()) # as a blank row\n\n\n\n\twritten_c_s = [0 for x in range(userNum)]\n\tfor i in range(userNum):\n\t\tfor repo in repoList[i]:\n\t\t\tif 'stars' in repo_dicts[repo]:\n\t\t\t\tval = user_dicts[userList[i]]['written repos'][repo] / repo_dicts[repo]['commits']\n\t\t\t\tweight = repo_dicts[repo]['watchers']\n\t\t\t\twritten_c_s[i] += val * weight\n\t\t\telse:\n\t\t\t\tprint(i, repo) \t\n\n\twrittenRank_c_s = rank_of_list(written_c_s)\n\ttable.append(['Written(c_s)', \"{0:.6f}\".format(spearman(truth, writtenRank_c_s)), \"{0:.6f}\".format(kendall(truth, writtenRank_c_s))])\n\n\n\n\n\t# Baseline 3: # Commits\n\ttry:\n\t\tuser_commit = get_userInfo(userList, dataset_path, mode = 'c')\n\t\t# print([x for x in user_commit])\n\texcept:\n\t\tprint('Could not load the number of commits of users.')\t\n\telse:\t\n\t\tcommitRank = rank_of_list(user_commit)\n\t\ttable.append(['Commits', \"{0:.6f}\".format(spearman(truth, commitRank)), \"{0:.6f}\".format(kendall(truth, commitRank))])\n\n\n\t# Baseline 4: # Collaborators\n\ttry:\n\t\tcollabList = get_collaborators(userList, dataset_path)\n\texcept:\n\t\tprint('Could not load the number of collaborators of users.')\t\n\telse:\t\n\t\tcollabRank = rank_of_list([len(i) for i in collabList])\n\t\ttable.append(['Collaborators', \"{0:.6f}\".format(spearman(truth, collabRank)), \"{0:.6f}\".format(kendall(truth, collabRank))])\n\n\n\t# Baseline 5: # Related projects\n\ttry:\n\t\trelatedList = get_relatedRepos(userList, dataset_path)\n\texcept:\n\t\tprint('Could not load the number of related repositories of users.')\t\n\telse:\t\t\n\t\trelatedRank = rank_of_list([len(i) for i in relatedList])\n\t\ttable.append(['Related repos', \"{0:.6f}\".format(spearman(truth, relatedRank)), \"{0:.6f}\".format(kendall(truth, relatedRank))])\n\t\n\n\treturn table\n\n\n##################################################\n################## Main Command ##################\n##################################################\n\n# top30 = top30_users(dataset_path, 'f')\n\n# result_table = list()\n# table1 = getBaselines(top30[:10])\n# table2 = getBaselines(top30[10:20])\n# table3 = getBaselines(top30[:20])\n# table4 = getBaselines(top30[20:30])\n# table5 = getBaselines(top30)\n\n# for r1, r2, r3, r4, r5 in zip(table1, table2, table3, table4, table5):\n# \tresult_table.append(r1 + r2[1:] + r3[1:] + r4[1:] + r5[1:])\n\n# # save the result in a .csv file\t\n# with open('results.csv', 'w') as outfile:\n# \tcsvout = csv.writer(outfile)\n# \tcsvout.writerows(result_table)\n# print(\"The results.csv file is written and saved.\")\n\n\n#################################################\n################### PageRank ####################\n#################################################\t\n\n# names = list()\n# # load user name\n# fname = dataset_path + 'userList.json'\n# try:\n# \tfhand = open(fname, 'r')\n# \tuserlist = json.load(fhand)\n# except:\n# \tprint('Could not read file', fname)\n# else:\n# \tfor name in userlist:\n# \t\tnames.append(name)\n# \tfhand.close()\n\n\n# G = nx.Graph()\n# G.add_nodes_from(names)\n\n\n\n# fname = dataset_path + 'repo_info.json'\n# try:\n# \tfhand = open(fname, 'r')\n# \tdicts = json.load(fhand)\n# except:\n# \tprint('Could not read file', fname)\n# else:\n# \tfor repo in dicts:\n# \t\tcontributors = dicts[repo]['contributors']\n# \t\tfor x, y in itertools.combinations(contributors, 2):\n# # adj[x][y] = 1\n# # adj[y][x] = 1\n# # if (x in users or y in users):\n# \t\t\te = (x, y)\n# \t\t\tG.add_edge(*e)\n# \t\t\t# if 'weight' not in G[x][y]:\n# \t\t\t# \tG[x][y]['weight'] = repo['stars'] \t\n# \t\t\t# else:\n# \t\t\t# \tG[x][y]['weight'] += repo['stars'] \t\n \n\t\n# \tpr = nx.pagerank(G)\n\n# \ttop30 = top30_users(dataset_path, 'f')\n# \tlists = [top30[:10], top30[10:20], top30[:20], top30[20:30], top30]\n# \tfor users in lists:\n# \t\ttruth = [x + 1 for x in range(len(users))]\n\n# \t\tscore = list()\n# \t\tfor user in users:\n# \t\t\tscore.append(pr[user])\n\n# \t\tRank = rank_of_list(score)\n# \t\tprint('Pagerank : rho = %.6f, tau = %.6f, %s' % (spearman(truth, Rank), kendall(truth, Rank), Rank))\n \n\n \n# finally:\n# fhand.close()\n\n##################################################\n########### Calulate repo correlation ############\n##################################################\n# stars = list()\n# forks = list()\n# watches = list()\n# commits = list()\n# contributors = list()\n\n# # load repositories information\n# fname = dataset_path + 'repo_info.json'\t\n# try:\n# \tfhand = open(fname, 'r')\n# \trepo_dicts = json.load(fhand)\n# except:\n# \traise OSError('Could not read file', fname)\n\n# s = 0\n# for repo, dicts in repo_dicts.items():\t\n# \tif 'stars' in dicts:\n# \t\tstars.append(dicts['stars'])\n# \t\tforks.append(dicts['forks'])\n# \t\twatches.append(dicts['watchers'])\n# \t\tcommits.append(dicts['commits'])\n# \t\tcontributors.append(len(dicts['contributors']))\n\t\t\n# # \t\t# \"\"\"Pearson coefficient\"\"\"\n# # \t\t# x = load.regression_of_repos([x for x in range(21631)])\n# # \t\t# y = load.stars_of_repos([x for x in range(21631)])\n# x = stars\n# y = contributors\n# r, pval = scipy.stats.pearsonr(x, y)\n# print(\"r = %.3f\" % r)\n\n\"\"\" Simple linear regression \"\"\"\n# ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat\n# slope, intercept, r_value, p_value, std_err_slope = scipy.stats.linregress(x, y)\t\n# std_err = std_err_slope * np.sqrt(len(x) * ssxm)\n# hx = [xi * slope + intercept for xi in x]\n# print(ssxm, ssym ,ssxym)\n# print(slope, intercept, std_err)\n# print(r_value) \t\n\n# \"\"\" Multiple linear regression \"\"\"\n# x = [[forks[i], watches[i], commits[i], contributors[i]] for i in range(len(forks))]\n# x = np.array(x)\n# # x = np.array(watches)\n# # x = x.reshape(len(x), 1)\n# y = np.array(stars)\n# y = y.reshape(len(y), 1)\n# clf = linear_model.LinearRegression()\n# clf.fit(x, y)\n# print(clf.coef_)\n# print(np.sqrt(clf.residues_ / (len(stars)-2)))\n# print(clf.intercept_)\n# y_est = clf.predict(x)\n# print(y_est)\n\n# \"\"\" Display the plot \"\"\"\n# inx = np.argsort(x)\n# xs = np.array(x)[inx]\n# hxs = np.array(hx)[inx]\n# ys = np.array(y)[inx] \n# x_max = np.amax(xs)\n# y_max = np.amax(ys)\n# x_min = np.amin(xs)\n# y_min = np.amin(ys)\n# axes = plt.gca()\n# axes.set_xlim([x_min,x_max])\n# axes.set_ylim([y_min,y_max])\n# plt.scatter(xs, ys, label = 'Data sets (21516)')\n# plt.scatter(xs, hxs, color = 'red', label = 'Estimated values')\n# plt.plot(xs, slope*xs + intercept, color = 'red', label = 'Regrssion line', linewidth = 2)\n# plt.xlabel('# of contributors')\n# plt.ylabel('# of stars')\n# plt.title(r'Contributor-star distribution:')\n# plt.legend(loc = 'lower right')\n# plt.show() \n\n# file = open(dataset_path+'repoList.txt', 'w')\n# fhand = open(dataset_path+'repo_info.json', 'r')\n# dicts = json.load(fhand)\n# i = 0\n# for repo in dicts:\n# \tif not 'forks' in dicts[repo]:\n# \t\ti +=1\n\t# file.write(repo[0]+'\\n')\n# file.close()\n# print(i)\n# fhand.close()\t\n\n\n\n# ### new top30\n# fname = dataset_path + 'repo_info.json'\t\n# try:\n# \tfhand = open(fname, 'r')\n# \trepo_dicts = json.load(fhand)\n# except:\n# \tprint('Could not read file', fname)\t\n\n\n# top30 = top30_users(dataset_path)\n# stars = [0 for x in range(30)]\n# repoList = get_userInfo(top30, dataset_path, mode = 'o')\n\n# for i in range(len(repoList)):\n# \tfor repo in repoList[i]:\n# \t\tif repo in repo_dicts:\n# \t\t\tif 'stars' in repo_dicts[repo]:\n# \t\t\t\tstars[i] += repo_dicts[repo]['stars']\n# \t\telse:\n# \t\t\tprint('Could not find owned repo', repo)\t\n\n# fhand = open(dataset_path+'top30-stars.txt', 'w')\n# indices = list(range(30))\n# indices.sort(key = lambda x: stars[x], reverse = True)\n# print(indices)\n# newtop30 = list()\n# for user in indices:\n# \tnewtop30.append(top30[user])\n# \tfhand.write(top30[user]+'\\n')\n# print(newtop30)\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"392596200","text":"import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport time, os\n\nimport tvm\nfrom tvm import relay\nfrom tvm import autotvm\nfrom tvm.relay import testing\nfrom tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner\nfrom tvm.autotvm.graph_tuner import DPTuner, PBQPTuner\nimport tvm.contrib.graph_runtime as runtime\n\nimport tensorflow as tf\nimport tvm.relay.testing.tf as tf_testing\n\nfrom tensorflow import nn\n\nnp.random.seed(0)\n\n\"\"\" \n Network parameters\n\"\"\"\nBATCH_SIZE = 8\nN = 56\nFIN = 32\nFOUT = 32\n\nK_Y = 3\nK_X = 3\n\nNB_TESTS = 101\n\n\"\"\" \n Target settings\n\"\"\"\ntarget = \"llvm -mcpu=core-avx2\"\ntarget_host = \"llvm\"\nlayout = None\n\ninput_shape = (BATCH_SIZE, FIN, N + 2, N + 2)\n\n\"\"\" \n Create the graph in TensorFlow \n\"\"\"\ndef Convolution(X, weights, bias):\n conv = nn.conv2d(X, weights, strides=[1, 1, 1, 1], padding=\"VALID\", data_format=\"NCHW\")\n conv_bias = nn.bias_add(conv, bias, data_format=\"NCHW\")\n\n return conv_bias\n\nweights = np.random.rand(K_Y, K_X, FIN, FOUT)\nbias = np.random.rand(FOUT)\n\nX = tf.compat.v1.placeholder(tf.float32, [BATCH_SIZE, FIN, N + 2, N + 2], name=\"X\")\nactivations = Convolution(X, weights, bias)\n\nmodel_path = \"tf_model.pb\"\ntf.io.write_graph(tf.compat.v1.get_default_graph(), \"\", model_path, as_text=False)\n\n\"\"\" \n Create TF graph definition \n\"\"\"\nwith tf.io.gfile.GFile(model_path, \"rb\") as f:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.import_graph_def(graph_def, name=\"\")\n\n graph_def = tf_testing.ProcessGraphDefParam(graph_def)\n\n# Import TF graph definition to Relay frontend\nshape_dict = {\"X\": input_shape}\nmod, parameters = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict)\n\n\"\"\" \n AutoTune the network (taken from https://docs.tvm.ai/tutorials/autotvm/tune_relay_x86.html) \n\"\"\"\nlog_file = \"tvm_autotuning.log\"\n\ntuner_type = \"random\"\ninput_name = \"X\"\ndtype = \"float32\"\n\ntuning_option = {\n \"log_filename\": log_file,\n \"tuner\": tuner_type,\n \"early_stopping\": None,\n\n \"measure_option\": autotvm.measure_option(\n builder=autotvm.LocalBuilder(),\n runner=autotvm.LocalRunner(number=10, repeat=1,\n min_repeat_ms=1000),\n ),\n}\n\n# Set number of threads used for tuning\nnum_threads = 4\nos.environ[\"TVM_NUM_THREADS\"] = str(num_threads)\n\n# Tune a set of convolutions\ndef tune_kernels(tasks,\n measure_option,\n tuner=\"gridsearch\",\n early_stopping=None,\n log_filename=\"tuning.log\"):\n\n for i, tsk in enumerate(tasks):\n prefix = \"[Task %2d/%2d] \" % (i+1, len(tasks))\n\n # converting conv2d tasks to conv2d_NCHWc tasks\n op_name = tsk.workload[0]\n if op_name == \"conv2d\":\n func_create = \"topi_x86_conv2d_NCHWc\"\n elif op_name == \"depthwise_conv2d_nchw\":\n func_create = \"topi_x86_depthwise_conv2d_NCHWc_from_nchw\"\n else:\n raise ValueError(\"Tuning {} is not supported on x86\".format(op_name))\n\n task = autotvm.task.create(func_create, args=tsk.args,\n target=target, template_key=\"direct\")\n task.workload = tsk.workload\n\n # create tuner\n if tuner == \"xgb\" or tuner == \"xgb-rank\":\n tuner_obj = XGBTuner(task, loss_type='rank')\n elif tuner == \"ga\":\n tuner_obj = GATuner(task, pop_size=50)\n elif tuner == \"random\":\n tuner_obj = RandomTuner(task)\n elif tuner == \"gridsearch\":\n tuner_obj = GridSearchTuner(task)\n else:\n raise ValueError(\"Invalid tuner: \" + tuner)\n\n # do tuning\n n_trial = len(task.config_space)\n tuner_obj.tune(n_trial=n_trial,\n early_stopping=early_stopping,\n measure_option=measure_option,\n callbacks=[\n autotvm.callback.progress_bar(n_trial, prefix=prefix),\n autotvm.callback.log_to_file(log_filename)])\n\n# Call this function with tuning options to start tuning\ndef tune_and_evaluate(tuning_opt):\n # extract workloads from relay program\n print(\"Extract tasks...\")\n tasks = autotvm.task.extract_from_program(mod[\"main\"], target=target,\n params=parameters, ops=(relay.op.nn.conv2d,))\n\n # run tuning tasks\n print(\"Tuning...\")\n tune_kernels(tasks, **tuning_opt)\n\n # compile kernels with best records\n with autotvm.apply_history_best(log_file):\n print(\"Compile...\")\n with relay.build_config(opt_level=3):\n graph, lib, params = relay.build_module.build(\n mod, target=target, params=parameters)\n\n # upload parameters to device\n ctx = tvm.cpu()\n data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n module = runtime.create(graph, lib, ctx)\n module.set_input(input_name, data_tvm)\n module.set_input(**params)\n\n # evaluate\n print(\"Evaluate inference time cost...\")\n ftimer = module.module.time_evaluator(\"run\", ctx, number=NB_TESTS, repeat=1)\n prof_res = np.array(ftimer().results) * 1000 # convert to millisecond\n\n print(\"Tuned network execution time : \", np.median(prof_res))\n\ntune_and_evaluate(tuning_option)\n","sub_path":"benchmarks/DNN/layers/convolution/direct/cpu_optimal_unrolled/conv_tvm.py","file_name":"conv_tvm.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449619463","text":"import os\n\n\ndef find_file(name, path):\n \"\"\"\n Finds file with the specified name in the given path.\n\n Args:\n name (str): file name.\n path (str): starting path for search.\n\n Returns:\n str: absolute file path (if found)\n \"\"\"\n for root, dirs, files in os.walk(path, followlinks=True):\n for file in files:\n if name in file:\n return os.path.join(root, file)\n","sub_path":"express/parsers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"349865451","text":"import socket\nimport errno\nimport time\nimport random\nfrom os import system\n\n\n_ = system('clear')\n\n# Choosing Nickname\nprint(\"-------------------------------------------------------------------\")\nprint(\"| Welcome to the Snake and Ladder Game |\")\nprint(\"-------------------------------------------------------------------\")\n\nprint (\"Name will display on server\")\nnickname = input(\"Choose your nickname: \")\n\n# Connecting To Server\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect(('192.168.1.15', 55555))\n\n# just of effects. add a delay of 1 second before performing any action\nWAIT = 1\nMAX_VAL = 50\n\n# snake takes you down\nsnakes = {\n 39: 3,\n 46: 14,\n 49: 13,\n}\n\n# ladder takes you up\nladders = {\n 5: 43,\n 9: 30,\n 20: 41,\n}\n\n\nplayer_turn_text = [\n \"Your turn.\",\n \"Go.\",\n \"Please proceed.\",\n \"Lets win this.\",\n \"Are you ready?\",\n \"\",\n]\n\n\nsnake_bite = [\n \"Oops\",\n \"OMG !\",\n \"you got snake bite\",\n \"oh no !!\",\n \"auchhhhhh \"\n]\n\n\nladder_jump = [\n \"Phew\",\n \"woww\",\n \"nailed it\",\n \"oh my God...\",\n \"yaayyy\"\n]\n\n\n\ndef welcome_msg():\n msg = \"\"\"\n Welcome to Snake and Ladder Game.\n 41 42 43 44 45 46 47 48 49 50\n ^ ^ * *\n 40 39 38 37 36 35 34 33 32 31\n | * \\ \\ /\n 21 22 23 24 25 26 27 28 29 30\n | \\ \\ \\ / ^\n 20 19 18 17 16 15 14 13 12 11\n \\ \\ /\n 1 2 3 4 5 6 7 8 9 10\n Snake Ladder\n 49->13 20->41\n 46->14 9->30\n 39->3 5->43\n \"\"\"\n print(msg)\n\ndef get_dice_value():\n time.sleep(WAIT)\n dice_value = random.randint(1,6)\n print(\"Its a \" + str(dice_value))\n return dice_value\n\n\ndef got_snake_bite(old_value, current_value, player_name):\n print(\"\\n\" + random.choice(snake_bite).upper() + \" ~~~~~~~~>\")\n print(\"\\n\" + player_name + \" got a snake bite. Down from \" + str(old_value)+ \" to \" + str(current_value))\n\ndef got_ladder_jump(old_value, current_value, player_name):\n print(\"\\n\" + random.choice(ladder_jump).upper() + \" ########\")\n print(\"\\n\" + player_name + \" climbed the ladder from \" + str(old_value) + \" to \" + str(current_value))\n\n\n\n\ndef snake_ladder(player_name, current_value, dice_value):\n time.sleep(WAIT)\n old_value = current_value\n current_value = current_value + dice_value\n\n if current_value > MAX_VAL:\n print(\"You need \" + str(MAX_VAL - old_value) + \" to win this game. Keep trying,OKAY !\" )\n return old_value\n\n print(\"\\n\" + player_name + \" moved from \" + str(old_value) + \" to \" + str(current_value))\n if current_value in snakes:\n final_value = snakes.get(current_value)\n got_snake_bite(current_value, final_value, player_name)\n\n elif current_value in ladders:\n final_value = ladders.get(current_value)\n got_ladder_jump(current_value, final_value, player_name)\n\n else:\n final_value = current_value\n\n return final_value\n\n\ndef check_win(player_name, position):\n time.sleep(WAIT)\n if MAX_VAL == position:\n print(\"\\n\\n\\nThats it.\\n\\n\" + player_name + \" won the game.\")\n print(\"Congratulations \" + player_name)\n print(\"\\nThank you for playing the game.\\n\\n\")\n again=input(\"\\nDo you want to play again? (y/n) \")\n if again == 'n':\n sys.exit(1)\n\n#gamestart\ndef startgame():\n while True:\n try:\n # Receive Message From Server\n # If 'NICK' Send Nickname\n message = client.recv(1024).decode('ascii')\n if message == 'NICK':\n client.send(nickname.encode('ascii'))\n else:\n time.sleep(WAIT)\n player_name = nickname\n time.sleep(WAIT)\n player_current_position = 0\n round = 0\n while True:\n welcome_msg()\n time.sleep(WAIT)\n print(\"Round:\")\n print(round)\n print(\"\\nCurrent position :\")\n print(player_current_position)\n input_1 = input(\"\\n\" + player_name + \": \" + random.choice(player_turn_text) + \" Hit the enter to roll dice: \")\n print(\"\\nRolling dice...\")\n dice_value = get_dice_value()\n time.sleep(WAIT)\n print(player_name + \" moving....\")\n player_current_position = snake_ladder(player_name, player_current_position, dice_value)\n time.sleep(3)\n round=round+1\n\n\n check_win(player_name, player_current_position)\n\n _ = system('clear')\n\n except socket.error as e:\n print (str(e))\n sys.exit()\n\n\nstartgame()\n","sub_path":"client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"486293219","text":"#coding: utf-8\nfrom flask import Flask, request, render_template\nimport bm\n\nlisten = '0.0.0.0'\nport = 5000\ndebug = False\ndbfile = 'store.db'\n\napp = Flask(__name__)\n\n# index paged\n@app.route('/')\ndef view_index():\n bm.createDBfile(dbfile)\n return render_template('index.html')\n\n\n# login\n@app.route('/login', methods=['GET', 'POST'])\ndef view_login():\n if request.method == 'POST':\n userData = request.form.to_dict()\n username = userData['username']\n password = userData['password']\n\n loginResult, loginInfo = bm.login(dbfile, username, password)\n return render_template('result.html', info=loginInfo)\n\n return render_template('login.html')\n\n\n# register\n@app.route('/register', methods=['GET', 'POST'])\ndef view_register():\n if request.method == 'POST':\n userData = request.form.to_dict()\n registerResult, registerInfo = bm.register(dbfile, userData)\n return render_template('result.html', info=registerInfo)\n return render_template('register.html')\n \n\n# show usres\n@app.route('/users')\ndef view_users():\n bm.createDBfile(dbfile)\n users = bm.loadData(dbfile)\n status = None\n if users:\n for k, v in users.iteritems():\n if v[0] == 0:\n status = u'正常'\n else:\n status = u'锁定'\n return render_template('users.html', users=users, status=status)\n\n\nif __name__ == '__main__':\n app.run(host=listen, port=port, debug=debug)\n","sub_path":"lesson5/zhanghe/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"588442233","text":"class Solution:\n def twoSum1(self, nums, target):\n for i in range(len(nums)):\n for j in range(i+1,len(nums)):\n if nums[i]+nums[j] == target:\n return [i,j]\n def twoSum(self, nums, target):\n d = {}\n for i in range(len(nums)):\n if target - nums[i] in d:\n return [d[target-nums[i]],i]\n else:\n d[nums[i]] = i\n\n\nif __name__ == \"__main__\":\n a = Solution()\n print(a.twoSum([2,7,11,15],9))","sub_path":"两数之和.py","file_name":"两数之和.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"188373459","text":"\"\"\"The tests for the signal_messenger platform.\"\"\"\n\nimport os\nimport tempfile\nimport unittest\nfrom unittest.mock import patch\n\nfrom pysignalclirestapi import SignalCliRestApi\nimport requests_mock\n\nimport homeassistant.components.signal_messenger.notify as signalmessenger\nfrom homeassistant.setup import async_setup_component\n\nBASE_COMPONENT = \"notify\"\n\n\nasync def test_signal_messenger_init(hass):\n \"\"\"Test that service loads successfully.\"\"\"\n\n config = {\n BASE_COMPONENT: {\n \"name\": \"test\",\n \"platform\": \"signal_messenger\",\n \"url\": \"http://127.0.0.1:8080\",\n \"number\": \"+43443434343\",\n \"recipients\": [\"+435565656565\"],\n }\n }\n\n with patch(\"pysignalclirestapi.SignalCliRestApi.send_message\", return_value=None):\n assert await async_setup_component(hass, BASE_COMPONENT, config)\n await hass.async_block_till_done()\n\n # Test that service loads successfully\n assert hass.services.has_service(BASE_COMPONENT, \"test\")\n\n\nclass TestSignalMesssenger(unittest.TestCase):\n \"\"\"Test the signal_messenger notify.\"\"\"\n\n def setUp(self):\n \"\"\"Set up things to be run when tests are started.\"\"\"\n recipients = [\"+435565656565\"]\n number = \"+43443434343\"\n client = SignalCliRestApi(\"http://127.0.0.1:8080\", number)\n self._signalmessenger = signalmessenger.SignalNotificationService(\n recipients, client\n )\n\n @requests_mock.Mocker()\n def test_send_message(self, mock):\n \"\"\"Test send message.\"\"\"\n message = \"Testing Signal Messenger platform :)\"\n mock.register_uri(\n \"POST\",\n \"http://127.0.0.1:8080/v2/send\",\n status_code=201,\n )\n mock.register_uri(\n \"GET\",\n \"http://127.0.0.1:8080/v1/about\",\n status_code=200,\n json={\"versions\": [\"v1\", \"v2\"]},\n )\n with self.assertLogs(\n \"homeassistant.components.signal_messenger.notify\", level=\"DEBUG\"\n ) as context:\n self._signalmessenger.send_message(message)\n self.assertIn(\"Sending signal message\", context.output[0])\n self.assertTrue(mock.called)\n self.assertEqual(mock.call_count, 2)\n\n @requests_mock.Mocker()\n def test_send_message_should_show_deprecation_warning(self, mock):\n \"\"\"Test send message.\"\"\"\n message = \"Testing Signal Messenger platform with attachment :)\"\n mock.register_uri(\n \"POST\",\n \"http://127.0.0.1:8080/v2/send\",\n status_code=201,\n )\n mock.register_uri(\n \"GET\",\n \"http://127.0.0.1:8080/v1/about\",\n status_code=200,\n json={\"versions\": [\"v1\", \"v2\"]},\n )\n with self.assertLogs(\n \"homeassistant.components.signal_messenger.notify\", level=\"WARNING\"\n ) as context, tempfile.NamedTemporaryFile(\n suffix=\".png\", prefix=os.path.basename(__file__)\n ) as tf:\n data = {\"data\": {\"attachment\": tf.name}}\n self._signalmessenger.send_message(message, **data)\n self.assertIn(\n \"The 'attachment' option is deprecated, please replace it with 'attachments'. This option will become invalid in version 0.108\",\n context.output[0],\n )\n self.assertTrue(mock.called)\n self.assertEqual(mock.call_count, 2)\n\n @requests_mock.Mocker()\n def test_send_message_with_attachment(self, mock):\n \"\"\"Test send message.\"\"\"\n message = \"Testing Signal Messenger platform :)\"\n mock.register_uri(\n \"POST\",\n \"http://127.0.0.1:8080/v2/send\",\n status_code=201,\n )\n mock.register_uri(\n \"GET\",\n \"http://127.0.0.1:8080/v1/about\",\n status_code=200,\n json={\"versions\": [\"v1\", \"v2\"]},\n )\n with self.assertLogs(\n \"homeassistant.components.signal_messenger.notify\", level=\"DEBUG\"\n ) as context, tempfile.NamedTemporaryFile(\n suffix=\".png\", prefix=os.path.basename(__file__)\n ) as tf:\n data = {\"data\": {\"attachments\": [tf.name]}}\n self._signalmessenger.send_message(message, **data)\n self.assertIn(\"Sending signal message\", context.output[0])\n self.assertTrue(mock.called)\n self.assertEqual(mock.call_count, 2)\n","sub_path":"tests/components/signal_messenger/test_notify.py","file_name":"test_notify.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"639653619","text":"from models_utility.param_gp import Param\r\nfrom kernels.kernel import StationaryKernel\r\nimport torch\r\nimport numpy as np\r\n\r\n\r\ntorch.set_default_tensor_type(torch.FloatTensor)\r\ntorch.set_default_dtype(torch.float32)\r\n\r\n\r\nclass Periodic(StationaryKernel):\r\n\r\n def __init__(self, variance, length_scale, periodic, device):\r\n super(Periodic, self).__init__(device)\r\n self.kernel_name = 'PERIODIC'\r\n self._assign_Periodic_param(variance, length_scale, periodic)\r\n\r\n def _assign_Periodic_param(self, variance, length_scale, periodic):\r\n self.variance = Param(torch.tensor(variance).to(self.device),\r\n requires_grad=True, requires_transform=True, param_name='periodic_variance')\r\n\r\n self.length_scales = Param(torch.tensor(length_scale).to(self.device),\r\n requires_grad=True, requires_transform=True, param_name='periodic_length')\r\n\r\n self.periodic = Param(torch.tensor(periodic).to(self.device),\r\n requires_grad=True, requires_transform=True, param_name='periodic_period')\r\n\r\n return\r\n\r\n def K(self, x1, x2=None):\r\n pi = np.pi\r\n x1, x2 = self.check_tensortype(x1, x2)\r\n x1 = pi * x1.div(self.periodic.transform())\r\n x2 = pi * x2.div(self.periodic.transform())\r\n outs = ((torch.sin(x1 - x2.t())) / self.length_scales.transform()).pow(2)\r\n\r\n return self.variance.transform() * torch.exp(-2 * outs)\r\n\r\n","sub_path":"kernels/.ipynb_checkpoints/Periodic_kernel-checkpoint.py","file_name":"Periodic_kernel-checkpoint.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"485753456","text":"'''\nAuthor: Bazil Muzaffar Kotriwala\nTimestamp: 25 - Mar - 17 11:24PM\n'''\n\nfrom quicksort import quick_sort\n\nclass QuadraticProbeHashTable:\n\n def __init__(self, size):\n\n '''\n When an object is created using the QuadraticProbeHashTable, a table with linear probing of a default size is created.\n :param: Size of the table (size is a prime number)\n :precondition: None\n :postcondition: None\n :complexity: Best Case = Worst Case = O(1), constant time complexity, since there are single statements being executed.\n '''\n\n self.array = [None] * size\n self.table_size = size\n self.count = 0\n\n def hash(self, key):\n '''\n This function is the universal hash function which calculates the hash value for the given key.\n :param: The keys in the hash table\n :precondition: None\n :postcondition: None\n :complexity: Best Case = Worst Case = O(n), where n is the length of hash table\n '''\n\n value = 0\n a = 31415\n b = 27183\n for i in range(len(key)):\n value = (ord(key[i]) + a * value) % self.table_size\n a = a * b % (self.table_size - 1)\n return value\n\n def __setitem__(self, key, value):\n '''\n This function puts the value corresponding to the key at the position which is empty in the Hash Table or\n updates a value if there is something in the position with the same key or finds a new empty spot if there is\n already something there with a different key. (Uses linear search)\n :param: Key and its corresponding value\n :precondition: None\n :postcondition: None\n :complexity: Best Case O(1), constant time complexity, if the code does not enter the for loop and raises exception\n Worst Case O(n), linear time complexity, where n is the size of the hash table.\n '''\n\n position = self.hash(key)\n fixed_position = position\n copyPos = position\n if self.count == self.table_size:\n self.rehash(size)\n for i in range(self.table_size):\n if self.array[position] is None: # if the slot is empty in the table it sets the value at that position\n self.array[position] = (key, value)\n self.count += 1\n return\n elif self.array[position][0] == key: # if the same key is found it updates the value at that position\n self.array[position] = (key, value)\n return\n else:\n position = (fixed_position + ((i + 1) ** 2)) % self.table_size # if key is not found, look for the next position\n if copyPos == position:\n raise Exception('High load factor')\n\n def __getitem__(self, key):\n '''\n This function returns the value corresponding to the key at the position in the Hash Table. The function raises\n a KeyError if the key does not exist in the Hash Table\n :param: The key which may or may not exist in the Hash Table\n :precondition: None\n :postcondition: The value is returned corresponding to the key in the hash table or a KeyError if key does not exist\n :complexity: Best Case = Worst Case = O(n), where n is the size of the table\n '''\n\n position = self.hash(key)\n fixed_position = position\n for i in range(self.table_size):\n if self.array[position] is None: # at that key position there is an empty slot\n break\n elif self.array[position][0] == key: # if the same key is found we return the value at that key\n return self.array[position][1]\n else:\n position = (fixed_position + ((i+1) ** 2)) % self.table_size # if the key is not found, we look for the next position\n raise KeyError(key) # if the for loop exits without returning means key does\n # not exist the hash table\n\n def __contains__(self, key):\n '''\n This function returns True if key is in the table and False otherwise.\n :param: The key which may or may not exist in the table\n :precondition: A hash table must already exist\n :postcondition: Returns true or false depending on whether the key exists in the table or not\n :complexity: Best Case = Worst Case = O(n), where n is the size of the hash table\n '''\n\n position = self.hash(key)\n fixed_position = position\n for i in range(self.table_size):\n if self.array[position] is None: # at that key position there is an empty slot, hence false since key does not exist there\n return False\n elif self.array[position][0] == key: # if the same key is found we return true as the key is found\n return True\n else:\n position = (fixed_position + ((i+1) ** 2)) % self.table_size # if the key is not found, we look for the next position in the hash table\n return False\n\n def delete(self, key):\n '''\n This function takes key as the input and then deletes the entry corresponding to the key, raises a keyerror if key not in the table\n :param: key\n :precondition: None\n :postcondition: None\n :complexity: Best Case O(1) and Worst Case O(n)\n '''\n\n if key not in self:\n raise KeyError\n position = self.hash(key)\n fixed_position = position\n for i in range(self.table_size):\n if self.array[position][0] == key:\n self.array[position] = (None, None)\n return\n else:\n position = (fixed_position + ((i + 1) ** 2)) % self.table_size\n\n def rehash(self, size):\n '''\n This function changes the size of the table and reinserts all the key value pairs. It raises a valuerror if size < 1\n :param: size\n :precondition: None\n :postcondition: None\n :complexity: Best Case O(1) and Worst Case O(n)\n '''\n\n if size < 1:\n raise ValueError\n resize_table = QuadraticProbeHashTable(size)\n for i in range(self.table_size):\n if self.array[i] is None or self.array[i] == (None, None):\n continue\n else:\n resize_table[self.array[i][0]] = self.array[i][1]\n self.array = resize_table.array\n self.size = resize_table.size\n\ndef read_file(L):\n '''\n This function reads the file splitwords, adds the distinct words in the file into a Quadratic Probe Hash Table and appends them into a list as well\n :param: We pass the Hashtable through as an argument\n :precondition: The file splitwords.txt must already exist\n :postcondition: The HashTable is filled up with the respective keys(words) and their initial frequency values of 0 as a counter.\n :return: The word list and the hashtable is returned\n :complexity: Best Case = Worst Case = O(n), where n is the length of the contents (no of rows in the txt file)\n '''\n\n word_list = []\n file = open('splitwords.txt', 'r')\n contents = file.readlines()\n for word in range(len(contents)):\n word_list.append(contents[word].rstrip('\\n'))\n L[contents[word].rstrip('\\n')] = 0\n return word_list, L\n\ndef calc_freq(L):\n '''\n This function sorts the word list and increments the frequency of the words in the hashtable, creates a final unique word list and sorts it\n :param: We pass the hashtable through as an argument\n :precondition: The word_list and hashtable must exist with the respective words and values\n :postcondition: The hashtable now contains the words with their respective frequencies and the word_list is also sorted, and a new unique sorted list is created\n :return: Sorted word list and the hashtable\n :complexity: Best Case = Worst Case = O(n), where n is the length of the list\n '''\n\n word_list, L = read_file(L)\n final_word_list = []\n for word in word_list:\n L[word] += 1\n if word not in final_word_list:\n final_word_list.append(word)\n quick_sort(final_word_list)\n return final_word_list, L\n\ndef write_file(L):\n '''\n This function writes the words in sorted order with their respective frequencies to an output text file\n :param: The hash table\n :precondition: None\n :postcondition: The output is written to a text file\n :return: None\n :complexity: Best Case = Worst Case = O(n), where n is the length of the final word list\n '''\n\n file = open('frequencies.txt', 'w')\n final_word_list, L = calc_freq(L)\n for word in final_word_list:\n file.write(word + '\\t' + str(L[word]) + '\\n')\n file.close()\n\nif __name__ == '__main__':\n size = 402221\n h = QuadraticProbeHashTable(size)\n write_file(h)","sub_path":"Algorithms/Dynamic Programming/Minimum Cost BST/calcfreq.py","file_name":"calcfreq.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"113662704","text":"from django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom .views import index, login_frame, recovery, agregar_mascota, reg_admin, reg_user, cerrar, listar_mascota, eliminar_mascota, actualizar_mascota\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom api_perris import views \n\nurlpatterns = [\n # ! STANDARD URLS\n url(r'^$', login_frame, name='login'),\n url(r'^index/$', index, name='index'),\n url(r'^login_frame/$', login_frame, name='login'),\n url(r'^add_mascota/$', agregar_mascota, name='add'),\n url(r'^reg_adopt/$', reg_user, name='reg_ad'),\n url(r'^cerrar/$', cerrar, name='cerrar'),\n url(r'^pw_recover/$', recovery, name='recuperar'),\n \n # ! CRUD MASCOTAS\n url(r'^list_mascota/$', listar_mascota, name='list'),\n url(r'^del_mascota/$', eliminar_mascota, name='delete'),\n url(r'^upd_mascota/$', actualizar_mascota, name='update'),\n \n # ! API URLS\n url(r'^lista_vol/$',views.VoluntariosList.as_view()),\n url(r'^lista_res/$',views.RescatadosList.as_view()),\n url(r'^lista_raz/$',views.RazasList.as_view()),\n url(r'^lista_est/$',views.EstadosList.as_view()),\n url(r'^detalle_res/(?P[0-9]+)$',views.RescatadosDetail.as_view()),\n url(r'^detalle_est/(?P[0-9]+)$',views.EstadosDetail.as_view()),\n url(r'^filtro_res/(?P[0-9]+)$',views.EstadosFilter.as_view()),\n url(r'^get_raza/(?P[0-9]+)$',views.NombreRaza.as_view()), \n\n # ! SERVICE WORKER\n]\n\nurlpatterns +=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"329574470","text":"'''\nApplication init\n'''\nimport os\nfrom flask import request, Blueprint, Flask\nfrom sqlalchemy import MetaData\nfrom flask_restplus import Api\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bcrypt import Bcrypt\nfrom .config import config_by_name\n\n\n_org_url: str = os.environ.get('OKTA_ORG_URL')\n_token_url: str = f'{_org_url}/oauth2/default/v1/token'\n_auth_url: str = f'{_org_url}/oauth2/v1/authorize'\n\nauthorizations = {\n 'apikey': {\n 'type': 'apiKey',\n 'in': 'header',\n 'name': 'X-API'\n },\n 'oauth2': {\n 'type': 'oauth2',\n 'flow': 'accessCode',\n 'tokenUrl': _token_url,\n 'authorizationUrl': _auth_url,\n 'redirect_uri': 'http://localhost:3330/authorization-code/callback',\n 'scopes': {\n 'read': 'Grant read-only access',\n 'write': 'Grant read-write access',\n }\n }\n}\n\nnaming_convention = {\n \"ix\": 'ix_%(column_0_label)s',\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(column_0_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\"\n}\ndb = SQLAlchemy(metadata=MetaData(naming_convention=naming_convention))\n\nflask_bcrypt = Bcrypt()\n\nblueprint = Blueprint('api', __name__, url_prefix='/api')\n\napi = Api(blueprint,\n title='Flask RESPLUS API BOILER-PLATE WITH JWT',\n version='1.0',\n description='a boilerplate for flask restplus web service',\n validate=True,\n authorizations=authorizations,\n security=['apikey', {'oauth2': 'read'}]\n )\n\n\ndef register_namespaces(pdarineApi: Api) -> None:\n '''Flask namespaces registration'''\n from .controller.auth_controller import api as auth_ns\n from .controller.user_controller import API as user_ns\n pdarineApi.add_namespace(user_ns, path='/users')\n pdarineApi.add_namespace(auth_ns, path='/auth')\n\n\ndef register_exceptions() -> None:\n '''Global exceptions handlers registration'''\n from app.main.exception import application_exception\n\n\ndef create_app(config_name: str = 'dev') -> Flask:\n ''' APP factory '''\n app = Flask(__name__)\n app.config.from_object(config_by_name[config_name])\n db.init_app(app)\n flask_bcrypt.init_app(app)\n app.register_blueprint(blueprint)\n app.url_map.strict_slashes = False\n app.app_context().push()\n register_exceptions()\n return app\n","sub_path":"app/main/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"584033075","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import Int32MultiArray, Float32MultiArray, String\nfrom geometry_msgs.msg import PoseStamped\nimport tf\nimport numpy as np\n\ndef pose_to_xyth(pose):\n th = tf.transformations.euler_from_quaternion((pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w))[2]\n return [pose.position.x, pose.position.y, th]\n\n\nclass Supervisor:\n\n def __init__(self):\n rospy.init_node('turtlebot_supervisor', anonymous=True)\n self.trans_listener = tf.TransformListener()\n self.trans_broad = tf.TransformBroadcaster()\n\n rospy.Subscriber('/move_base_simple/goal', PoseStamped, self.rviz_goal_callback) # rviz \"2D Nav Goal\"\n\n self.waypoint_locations = {} # dictionary that caches the most updated locations of each mission waypoint\n self.waypoint_offset = PoseStamped()\n self.waypoint_offset.pose.position.z = .4 # waypoint is located 40cm in front of the AprilTag, facing it\n quat = tf.transformations.quaternion_from_euler(0., np.pi/2, np.pi/2)\n self.waypoint_offset.pose.orientation.x = quat[0]\n self.waypoint_offset.pose.orientation.y = quat[1]\n self.waypoint_offset.pose.orientation.z = quat[2]\n self.waypoint_offset.pose.orientation.w = quat[3]\n\n def rviz_goal_callback(self, msg):\n pose_to_xyth(msg.pose) # example usage of the function pose_to_xyth (defined above)\n # this callback does nothing... yet!\n\n def update_waypoints(self):\n for tag_number in self.mission:\n try:\n self.waypoint_offset.header.frame_id = \"/tag_{0}\".format(tag_number)\n self.waypoint_locations[tag_number] = self.trans_listener.transformPose(\"/map\", self.waypoint_offset)\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n pass\n\n def run(self):\n rate = rospy.Rate(1) # 1 Hz, change this to whatever you like\n while not rospy.is_shutdown():\n self.update_waypoints()\n\n # FILL ME IN!\n\n rate.sleep()\n\nif __name__ == '__main__':\n sup = Supervisor()\n sup.run()\n","sub_path":"scripts/supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"326838627","text":"def fatorial (n):\n fat=1\n while n>0:\n fat*=n\n n-=1\n return fat\n \ndef calcula_euler(x,n):\n a=0\n while a 100:\n umidade = \"Erro de leitura\"\n temperatura = \"Erro de leitura\"\n elif temperatura >= 70:\n SalvarAlarme(sala_id, \"Temperatura maior que 70ºC\")\n return umidade, temperatura\n# /Função para ler a temperatura e umidade\n\n# Função para ler a luminosidade\ndef lerLuminosidade(luminosidade, sala_id):\n if luminosidade == 6:\n # Nenhum sensor\n nivel_luminosidade = \"Não usado\"\n elif luminosidade == 7:\n # Sensor de luminosidade BH1750\n import smbus\n DEVICE = 0x23 # Endereço I2C para o sensor de luminosidade\n POWER_DOWN = 0x00\n POWER_ON = 0x01\n RESET = 0x07\n CONTINUOUS_LOW_RES_MODE = 0x13\n CONTINUOUS_HIGH_RES_MODE_1 = 0x10\n CONTINUOUS_HIGH_RES_MODE_2 = 0x11\n ONE_TIME_HIGH_RES_MODE_1 = 0x20\n ONE_TIME_HIGH_RES_MODE_2 = 0x21\n ONE_TIME_LOW_RES_MODE = 0x23\n bus = smbus.SMBus(1)\n try:\n leitura = bus.read_i2c_block_data(DEVICE,ONE_TIME_HIGH_RES_MODE_1)\n nivel_luminosidade = ((leitura[1] + (256 * leitura[0])) / 1.2)\n except:\n nivel_luminosidade = \"Erro de leitura\"\n else:\n # Caso seja passado um parâmetro incorreto\n nivel_luminosidade = \"Erro de Config\"\n return nivel_luminosidade\n# /Função para ler a luminosidade\n\n# Função para ler as portas de expansão\ndef lerPortaExpansao(porta, funcao, sala_id):\n if funcao == 1:\n # Nenhum sensor instalado\n status = \"Não usado\"\n elif funcao == 2:\n # Sensor de abertura de janela (Magnético)\n GPIO.setup(porta, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n if GPIO.input(porta) == False:\n status = \"Janela aberta\"\n else:\n status = \"Janela fechada\"\n elif funcao == 3:\n # Sensor de abertura de porta (Magnético)\n GPIO.setup(porta, GPIO.IN, pull_up_down=GPIO.PUD_UP) \n if GPIO.input(porta) == False:\n status = \"Porta aberta\"\n else:\n status = \"Porta fechada\"\n elif funcao == 4:\n # Sensor de gás e fumaça (MQ-2)\n GPIO.setup(porta, GPIO.IN)\n if GPIO.input(porta) == False:\n status = \"Fumaça detectada\"\n SalvarAlarme(sala_id, \"Fumaça detectada\")\n else:\n status = \"Sem fumaça\"\n elif funcao == 5:\n # Sensor de movimentação (HC-SR501)\n GPIO.setup(porta, GPIO.IN)\n # GPIO.add_event_detect(pino_PIR, GPIO.RISING, callback=LIGHTS)\n if GPIO.input(porta) == True:\n status = \"Presença detectada\"\n else:\n status = \"Sem movimentação\"\n else:\n # Caso seja passado um parâmetro incorreto\n status = \"Erro de Config\"\n return status\n# /Função para ler as portas de expansão\n\n# Função para ler os sensores internos e as portas de expansão\ndef lerSensores(sala_id, dht, luminosidade, funcao1, funcao2, funcao3, funcao4, funcao5, funcao6, funcao7, funcao8, funcao9, funcao10, funcao11, funcao12, funcao13, funcao14, funcao15, funcao16, funcao17, intervalo_leitura):\n Timer(intervalo_leitura , lerSensores, args=(sala_id, dht, luminosidade, funcao1, funcao2, funcao3, funcao4, funcao5, funcao6, funcao7, funcao8, funcao9, funcao10, funcao11, funcao12, funcao13, funcao14, funcao15, funcao16, funcao17,intervalo_leitura,)).start()\n\n umidade,temperatura=lerTemperaturaUmidade(dht, sala_id) # Leitura de temperatura e umidade\n nivel_luminosidade = lerLuminosidade(luminosidade, sala_id) # Leitura da luminosidade\n\n PortasExpansao = {\n #\"porta\": [GPIO,função da porta]\n \"porta01\": [26,funcao1],\n \"porta02\": [19,funcao2],\n \"porta03\": [6,funcao3],\n \"porta04\": [13,funcao4],\n \"porta05\": [5,funcao5],\n \"porta06\": [11,funcao6],\n \"porta07\": [9,funcao7],\n \"porta08\": [10,funcao8],\n \"porta09\": [22,funcao9],\n \"porta10\": [27,funcao10],\n \"porta11\": [8,funcao11],\n \"porta12\": [25,funcao12],\n \"porta13\": [23,funcao13],\n \"porta14\": [18,funcao14],\n \"porta15\": [4,funcao15],\n \"porta16\": [21,funcao16],\n \"porta17\": [20,funcao17]\n }\n status = list() # variável para armazenar o status das 17 portas de expansão\n status.append(0) # entre status[1] e status[17]\n for porta,funcao in sorted(PortasExpansao.items()):\n status.append(lerPortaExpansao(funcao[0],funcao[1],sala_id)) # Leitura das 17 portas de expansão\n\n \"\"\"\n # Print de conferência\n print (\" ____________________\")\n print (\" | - Resultado -\")\n print (\" | Sala ID:\",sala_id)\n print (\" | Umidade:\",umidade,\"%UR\")\n print (\" | Temperatura:\",temperatura,\"ºC\")\n print (\" | Luminosidade: \",nivel_luminosidade)\n for i in range(1,18):\n print (\" | Sensor %d: %s\" %(i,status[i]))\n print (\" |___________________\")\n \"\"\"\n\n # Salva valores lidos dos sensores no banco de dados local\n SalvarSalasLog(sala_id, umidade, temperatura, nivel_luminosidade, status[1], status[2], status[3], status[4], status[5], status[6], status[7], status[8], status[9], status[10], status[11], status[12], status[13], status[14], status[15], status[16], status[17])\n# /Função para ler os sensores internos e as portas de expansão\n\nprint (\"= = = Fim LerSensores.py = = =\")\n","sub_path":"unidade-de-monitoramento/LerSensores.py","file_name":"LerSensores.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"463826153","text":"# Copyright 2015-2016 ARM Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"This module contains the class for plotting and\ncustomizing Line/Linear Plots with :mod:`trappy.trace.FTrace`\nThis plot only works when run from an IPython notebook\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom trappy.plotter import AttrConf\nfrom trappy.plotter import Utils\nfrom trappy.plotter.Constraint import ConstraintManager\nfrom trappy.plotter.ILinePlotGen import ILinePlotGen\nfrom trappy.plotter.AbstractDataPlotter import AbstractDataPlotter\nfrom trappy.plotter.ColorMap import ColorMap\nfrom trappy.plotter import IPythonConf\nimport pandas as pd\n\nif not IPythonConf.check_ipython():\n raise ImportError(\"Ipython Environment not Found\")\n\nclass ILinePlot(AbstractDataPlotter):\n \"\"\"\n This class uses :mod:`trappy.plotter.Constraint.Constraint` to\n represent different permutations of input parameters. These\n constraints are generated by creating an instance of\n :mod:`trappy.plotter.Constraint.ConstraintManager`.\n\n :param trace: The input data\n :type trace: :mod:`trappy.trace.FTrace` or :mod:`pandas.DataFrame`, list or single\n\n :param column: specifies the name of the column to\n be plotted.\n :type column: (str, list(str))\n\n :param templates: TRAPpy events\n\n .. note::\n\n This is not required if a :mod:`pandas.DataFrame` is\n used\n\n :type templates: :mod:`trappy.base.Base`\n\n :param filters: Filter the column to be plotted as per the\n specified criteria. For Example:\n ::\n\n filters =\n {\n \"pid\": [ 3338 ],\n \"cpu\": [0, 2, 4],\n }\n :type filters: dict\n\n :param per_line: Used to control the number of graphs\n in each graph subplot row\n :type per_line: int\n\n :param concat: Draw all the pivots on a single graph\n :type concat: bool\n\n :param permute: Draw one plot for each of the traces specified\n :type permute: bool\n\n :param fill: Fill the area under the plots\n :type fill: bool\n\n :param drawstyle: Set the drawstyle to a matplotlib compatible\n drawing style.\n\n .. note::\n\n Only \"steps-post\" is supported as a valid value for\n the drawstyle. This creates a step plot.\n\n :type drawstyle: str\n\n :param signals: A string of the type event_name:column\n to indicate the value that needs to be plotted\n\n .. note::\n\n - Only one of `signals` or both `templates` and\n `columns` should be specified\n - Signals format won't work for :mod:`pandas.DataFrame`\n input\n\n :type signals: str\n \"\"\"\n\n def __init__(self, traces, templates=None, **kwargs):\n # Default keys, each can be overridden in kwargs\n self._layout = None\n super(ILinePlot, self).__init__(traces=traces,\n templates=templates)\n\n self.set_defaults()\n\n for key in kwargs:\n self._attr[key] = kwargs[key]\n\n if \"signals\" in self._attr:\n self._describe_signals()\n\n self._check_data()\n\n if \"column\" not in self._attr:\n raise RuntimeError(\"Value Column not specified\")\n\n if self._attr[\"drawstyle\"] and self._attr[\"drawstyle\"].startswith(\"steps\"):\n self._attr[\"step_plot\"] = True\n\n zip_constraints = not self._attr[\"permute\"]\n\n self.c_mgr = ConstraintManager(traces, self._attr[\"column\"], self.templates,\n self._attr[\"pivot\"],\n self._attr[\"filters\"], zip_constraints)\n\n\n def savefig(self, *args, **kwargs):\n raise NotImplementedError(\"Not Available for ILinePlot\")\n\n def view(self, test=False):\n \"\"\"Displays the graph\"\"\"\n\n # Defer installation of IPython components\n # to the .view call to avoid any errors at\n # when importing the module. This facilitates\n # the importing of the module from outside\n # an IPython notebook\n IPythonConf.iplot_install(\"ILinePlot\")\n\n if self._attr[\"concat\"]:\n self._plot_concat()\n else:\n self._plot(self._attr[\"permute\"])\n\n def set_defaults(self):\n \"\"\"Sets the default attrs\"\"\"\n self._attr[\"per_line\"] = AttrConf.PER_LINE\n self._attr[\"concat\"] = AttrConf.CONCAT\n self._attr[\"filters\"] = {}\n self._attr[\"pivot\"] = AttrConf.PIVOT\n self._attr[\"permute\"] = False\n self._attr[\"drawstyle\"] = None\n self._attr[\"step_plot\"] = False\n self._attr[\"fill\"] = AttrConf.FILL\n self._attr[\"draw_line\"] = True\n self._attr[\"scatter\"] = AttrConf.PLOT_SCATTER\n self._attr[\"point_size\"] = AttrConf.POINT_SIZE\n self._attr[\"map_label\"] = {}\n\n def _plot(self, permute):\n \"\"\"Internal Method called to draw the plot\"\"\"\n pivot_vals, len_pivots = self.c_mgr.generate_pivots(permute)\n\n self._layout = ILinePlotGen(self._attr[\"per_line\"],\n len_pivots,\n **self._attr)\n plot_index = 0\n for p_val in pivot_vals:\n data_frame = pd.Series()\n for constraint in self.c_mgr:\n\n if permute:\n trace_idx, pivot = p_val\n if constraint.trace_index != trace_idx:\n continue\n title = constraint.get_data_name() + \":\"\n legend = constraint._column\n else:\n pivot = p_val\n title = \"\"\n legend = str(constraint)\n\n result = constraint.result\n if pivot in result:\n data_frame[legend] = result[pivot]\n\n if pivot == AttrConf.PIVOT_VAL:\n title += \",\".join(self._attr[\"column\"])\n else:\n title += \"{0}: {1}\".format(self._attr[\"pivot\"], self._attr[\"map_label\"].get(pivot, pivot))\n\n self._layout.add_plot(plot_index, data_frame, title)\n plot_index += 1\n\n self._layout.finish()\n\n def _plot_concat(self):\n \"\"\"Plot all lines on a single figure\"\"\"\n\n pivot_vals, _ = self.c_mgr.generate_pivots()\n plot_index = 0\n\n self._layout = ILinePlotGen(self._attr[\"per_line\"], len(self.c_mgr),\n **self._attr)\n\n for constraint in self.c_mgr:\n result = constraint.result\n title = str(constraint)\n data_frame = pd.Series()\n\n for pivot in pivot_vals:\n if pivot in result:\n if pivot == AttrConf.PIVOT_VAL:\n key = \",\".join(self._attr[\"column\"])\n else:\n key = \"{0}: {1}\".format(self._attr[\"pivot\"], self._attr[\"map_label\"].get(pivot, pivot))\n\n data_frame[key] = result[pivot]\n\n self._layout.add_plot(plot_index, data_frame, title)\n plot_index += 1\n\n self._layout.finish()\n","sub_path":"trappy/plotter/ILinePlot.py","file_name":"ILinePlot.py","file_ext":"py","file_size_in_byte":7632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"234231183","text":"from random import randrange\n\ndef list(length, max):\n list=[]\n for i in range(0,length):\n list.append(randrange(max))\n return list\n\ndef DialogList():\n print(\"Введите значение для длины списка:\")\n length = int(input())\n print(\"Введите значение, которое будет максимальным:\")\n max = int(input())\n\n print(list(length, max))\n\nDialogList()\n","sub_path":"6 - list.py","file_name":"6 - list.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"643524505","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 12 11:04:33 2018\r\n\r\n@author: bhaumik\r\n\"\"\"\r\n\r\nimport pycuda.autoinit\r\nimport pycuda.driver as drv\r\nimport numpy\r\nfrom pycuda.compiler import SourceModule\r\n\r\n# Kernel function\r\nmod = SourceModule(\"\"\"\r\n #include \r\n __global__ void add_num(float *d_result, float *d_a, float *d_b)\r\n {\r\n const int i = threadIdx.x; \r\n d_result[i] = d_a[i] + d_b[i];\r\n }\r\n\"\"\")\r\nadd_num = mod.get_function(\"add_num\")\r\n\r\n# Defining host variables\r\nh_a = numpy.random.randn(1).astype(numpy.float32)\r\nh_b = numpy.random.randn(1).astype(numpy.float32)\r\nh_result = numpy.zeros_like(h_a)\r\n\r\n# Allocating memory on Device\r\nd_a = drv.mem_alloc(h_a.nbytes)\r\nd_b = drv.mem_alloc(h_b.nbytes)\r\nd_result = drv.mem_alloc(h_result.nbytes)\r\n\r\n# Coping value of host variables in device memory\r\ndrv.memcpy_htod(d_a, h_a)\r\ndrv.memcpy_htod(d_b, h_b)\r\n\r\n# Calling kernel\r\nadd_num(\r\n d_result, d_a, d_b,\r\n block=(1, 1, 1), grid=(1, 1))\r\n\r\n# Coping result from device memory to host\r\ndrv.memcpy_dtoh(h_result, d_result)\r\n\r\nprint(\"Addition on GPU:\")\r\nprint(h_a[0], \"+\", h_b[0], \"=\", h_result[0])\r\n","sub_path":"Chapter11/04_add_num.py","file_name":"04_add_num.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"116681956","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom collections import defaultdict\nimport functools\nfrom typing import TYPE_CHECKING\n\nfrom azure.core.exceptions import DecodeError, HttpResponseError, ResourceExistsError, ResourceNotFoundError\nfrom azure.core.pipeline.policies import ContentDecodePolicy\n\nif TYPE_CHECKING:\n # pylint:disable=unused-import,ungrouped-imports\n from typing import Optional, Type\n from azure.core.pipeline.transport import HttpResponse\n\n\ndef _get_exception_for_key_vault_error(cls, response):\n # type: (Type[HttpResponseError], HttpResponse) -> HttpResponseError\n \"\"\"Construct cls (HttpResponseError or subclass thereof) with Key Vault's error message.\"\"\"\n\n try:\n body = ContentDecodePolicy.deserialize_from_http_generics(response)\n message = \"({}) {}\".format(body[\"error\"][\"code\"], body[\"error\"][\"message\"]) # type: Optional[str]\n except (DecodeError, KeyError):\n # Key Vault error response bodies should have the expected shape and be deserializable.\n # If we somehow land here, we'll take HttpResponse's default message.\n message = None\n\n return cls(message=message, response=response)\n\n\n# errors map to HttpResponseError...\n_default = functools.partial(_get_exception_for_key_vault_error, HttpResponseError)\n\n# ...unless this mapping specifies another type\n_code_to_core_error = {404: ResourceNotFoundError, 409: ResourceExistsError}\n\n\nclass _ErrorMap(defaultdict):\n \"\"\"A dict whose 'get' method returns a default value.\n\n defaultdict would be preferable but defaultdict.get returns None for keys having no value\n (azure.core.exceptions.map_error calls error_map.get)\n \"\"\"\n\n def get(self, key, value=None): # pylint:disable=unused-argument\n return self[key]\n\n\n# map status codes to callables returning appropriate azure-core errors\nerror_map = _ErrorMap(lambda: _default,\n {\n status_code: functools.partial(_get_exception_for_key_vault_error, cls)\n for status_code, cls in _code_to_core_error.items()\n }\n)\n","sub_path":"sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_shared/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449448418","text":"#!\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import absolute_import\r\nfrom lxml import etree\r\n\r\nimport requests\r\nimport datetime\r\nimport sys\r\nimport re\r\n\r\nsys.path.append('.')\r\n\r\nfrom utils import Connect2MySQL\r\n\r\n\r\nclass Main_Lottery():\r\n def __init__(self):\r\n self.sql = Connect2MySQL()\r\n\r\n def get_all_content(self):\r\n history_url = ''\r\n\r\n a = requests.get(history_url)\r\n b = etree.HTML(a.text)\r\n\r\n results = []\r\n\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[1]')) # 期号\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[2]')) # 第1个蓝球\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[3]')) # 第2个蓝球\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[4]')) # 第3个蓝球\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[5]')) # 第4个蓝球\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[6]')) # 第5个蓝球\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[7]')) # 第6个蓝球\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[8]')) # 第1个红球\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[10]')) # 奖池\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[11]')) # 一等奖注数\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[12]')) # 一等奖奖金\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[13]')) # 二等奖注数\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[14]')) # 二等奖奖金\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[15]')) # 奖总投注额\r\n results.append(b.xpath('//*[@id=\"tdata\"]/tr[@class=\"t_tr1\"]/td[16]')) # 日期 \r\n\r\n length1 = len(results[0])\r\n length2 = len(results)\r\n\r\n # print(type(results[1][1].xpath('string(.)')))\r\n # with open('./lottery.csv', 'w', encoding='utf-8') as fp:\r\n # for i in range(length1):\r\n # for j in range(length2):\r\n # if j <= length2-2:\r\n # fp.write('%s,' % (results[j][i].xpath('string(.)')))\r\n # elif j == length2-1:\r\n # fp.write('%s\\n' % (results[j][i].xpath('string(.)')))\r\n # else:\r\n # break\r\n\r\n for i in range(length1):\r\n temp = []\r\n for j in range(length2):\r\n if j <= length2-2:\r\n temp.append(results[j][i].xpath('string(.)'))\r\n elif j == length2-1:\r\n temp.append(results[j][i].xpath('string(.)'))\r\n else:\r\n break\r\n \r\n date_ = datetime.datetime(int(str(temp[-1])[:4]), int(str(temp[-1])[5:7]), int(str(temp[-1])[8:])).strftime('%Y%m%d')\r\n temp_ = [int(W.replace(',', '')) for W in temp[:-1]]\r\n temp_.append(date_)\r\n # print(temp)\r\n # print('******')\r\n self.sql._insert(temp_)\r\n\r\n\r\n print('获取完毕!')\r\n\r\n def update_lottery(self):\r\n url = ''\r\n reg = re.compile('\\d{1,50}')\r\n\r\n content = requests.get(url).text\r\n content_e = etree.HTML(content)\r\n \r\n value = []\r\n \r\n value.append(int(''.join(content_e.xpath('//*[@class=\"td_title01\"]//font[@class=\"cfont2\"]//text()')).replace(',', '')))\r\n if self.sql._exist_data(value[0]):\r\n return 0\r\n \r\n red_ball_list = [int(i.xpath('string(.)')) for i in content_e.xpath('//*[@class=\"ball_red\"]')]\r\n for num in red_ball_list:\r\n value.append(num)\r\n\r\n value.append(int(''.join(content_e.xpath('//*[@class=\"ball_blue\"]/text()'))))\r\n value.append(int(''.join(content_e.xpath('//table[@class=\"kj_tablelist02\"]/tr[3]/td/span[2]/text()'))[:-2].replace(',', '')))\r\n value.append(int(''.join(reg.findall(''.join(content_e.xpath('//table[@class=\"kj_tablelist02\"]/tr[3]/td[2]/text()')))).replace(',', '')))\r\n value.append(int(''.join(reg.findall(''.join(content_e.xpath('//table[@class=\"kj_tablelist02\"]/tr[3]/td[3]/text()')))).replace(',' , '')))\r\n value.append(int(''.join(reg.findall(''.join(content_e.xpath('//table[@class=\"kj_tablelist02\"]/tr[4]/td[2]/text()')))).replace(',', '')))\r\n value.append(int(''.join(reg.findall(''.join(content_e.xpath('//table[@class=\"kj_tablelist02\"]/tr[4]/td[3]/text()')))).replace(',', '')))\r\n value.append(int(''.join(content_e.xpath('//table[@class=\"kj_tablelist02\"]/tr[3]/td/span[1]/text()'))[:-2].replace(',', '')))\r\n\r\n date_ = reg.findall(''.join(content_e.xpath('//td[@class=\"td_title01\"]/span[@class=\"span_right\"]/text()')))[:-3]\r\n value.append(datetime.datetime(int(date_[0]), int(date_[1]), int(date_[2])).strftime('%Y%m%d'))\r\n\r\n # print(value)\r\n self.sql._insert(value)\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lo_run = Main_Lottery()\r\n\r\n if lo_run.sql._is_existed():\r\n lo_run.get_all_content()\r\n\r\n lo_run.update_lottery()\r\n","sub_path":"lottery/lottery.py","file_name":"lottery.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"281890566","text":"from .verilog_modeling import Site, Bel\n\n\ndef get_ioi_site(db, grid, tile, site):\n \"\"\"\n Returns a prxjray.tile.Site object for given ILOGIC/OLOGIC/IDELAY site.\n \"\"\"\n\n gridinfo = grid.gridinfo_at_tilename(tile)\n tile_type = db.get_tile_type(gridinfo.tile_type)\n\n site_type, site_y = site.split(\"_\")\n\n sites = tile_type.get_instance_sites(gridinfo)\n sites = [s for s in sites if site_type in s.name]\n sites.sort(key=lambda s: s.y)\n\n if len(sites) == 1:\n iob_site = sites[0]\n else:\n iob_site = sites[1 - int(site[-1])]\n\n return iob_site\n\n\ndef process_idelay(top, features):\n\n aparts = features[0].feature.split('.')\n # tile_name = aparts[0]\n ioi_site = get_ioi_site(top.db, top.grid, aparts[0], aparts[1])\n\n site = Site(features, ioi_site)\n\n if site.has_feature(\"IN_USE\") and (site.has_feature(\"IDELAY_VALUE\")\n or site.has_feature(\"ZIDELAY_VALUE\")):\n bel = Bel('IDELAYE2')\n\n if site.has_feature(\"CINVCTRL_SEL\"):\n bel.parameters[\"CINVCTRL_SEL\"] = '\"TRUE\"'\n\n if site.has_feature(\"PIPE_SEL\"):\n bel.parameters['PIPE_SEL'] = '\"TRUE\"'\n\n if site.has_feature(\"HIGH_PERFORMANCE_MODE\"):\n bel.parameters['HIGH_PERFORMANCE_MODE'] = '\"TRUE\"'\n\n if site.has_feature(\"DELAY_SRC_DATAIN\"):\n bel.parameters['DELAY_SRC'] = '\"DATAIN\"'\n site.add_sink(bel, 'DATAIN', 'DATAIN')\n elif site.has_feature(\"DELAY_SRC_IDATAIN\"):\n bel.parameters['DELAY_SRC'] = '\"IDATAIN\"'\n site.add_sink(bel, 'IDATAIN', 'IDATAIN')\n\n if site.has_feature(\"IDELAY_VALUE\"):\n idelay_value = site.decode_multi_bit_feature('IDELAY_VALUE')\n bel.parameters['IDELAY_VALUE'] = idelay_value\n\n if site.has_feature(\"IS_DATAIN_INVERTED\"):\n bel.parameters['IS_DATAIN_INVERTED'] = 1\n\n if site.has_feature(\"IS_IDATAIN_INVERTED\"):\n bel.parameters['IS_IDATAIN_INVERTED'] = 1\n\n if site.has_feature(\"IDELAY_TYPE_VARIABLE\"):\n bel.parameters['IDELAY_TYPE'] = '\"VARIABLE\"'\n elif site.has_feature(\"IDELAY_TYPE_VAR_LOAD\"):\n bel.parameters['IDELAY_TYPE'] = '\"VAR_LOAD\"'\n else:\n bel.parameters['IDELAY_TYPE'] = '\"FIXED\"'\n\n # Adding sinks\n site.add_sink(bel, 'C', 'C')\n site.add_sink(bel, 'CE', 'CE')\n site.add_sink(bel, 'CINVCTRL', 'CINVCTRL')\n site.add_sink(bel, 'INC', 'INC')\n site.add_sink(bel, 'LD', 'LD')\n site.add_sink(bel, 'LDPIPEEN', 'LDPIPEEN')\n site.add_sink(bel, 'REGRST', 'REGRST')\n\n # Adding sources\n site.add_source(bel, 'DATAOUT', 'DATAOUT')\n\n site.add_bel(bel)\n\n # TODO: handle CNTVALUEIN and CNTVALUEOUT\n\n top.add_site(site)\n\n\ndef process_ilogic_idelay(top, features):\n\n ilogic_features = features['ILOGIC']\n idelay_features = features['IDELAY']\n\n ilogic_aparts = ilogic_features[0].feature.split('.')\n idelay_aparts = idelay_features[0].feature.split('.')\n\n # tile_name = aparts[0]\n ioi_ilogic_site = get_ioi_site(\n top.db, top.grid, ilogic_aparts[0], ilogic_aparts[1]\n )\n ioi_idelay_site = get_ioi_site(\n top.db, top.grid, idelay_aparts[0], idelay_aparts[1]\n )\n\n site = Site(ilogic_features, ioi_ilogic_site)\n\n # Get idelay site corresponding to this tile and check if it is used\n idelay_site = None\n if len(idelay_features):\n idelay_site = Site(idelay_features, ioi_idelay_site)\n\n if site.has_feature(\"ISERDES.IN_USE\") and site.has_feature(\n \"IDDR_OR_ISERDES.IN_USE\"):\n # ISERDES\n bel = Bel('ISERDESE2')\n\n data_rate = None\n if site.has_feature(\"ISERDES.DATA_RATE.SDR\"):\n data_rate = '\"SDR\"'\n else:\n data_rate = '\"DDR\"'\n bel.parameters['DATA_RATE'] = data_rate\n\n # TODO: There shouldn't be mixed width in FASM features.\n # Probably it is worth revisiting the fuzzer, as it\n # is not possible to determine the width in case there\n # is a multiple choice in the fasm features.\n data_width = None\n if site.has_feature(\"ISERDES.DATA_WIDTH.W3\"):\n data_width = 3\n elif site.has_feature(\"ISERDES.DATA_WIDTH.W4_6\"):\n data_width = 6\n elif site.has_feature(\"ISERDES.DATA_WIDTH.W5_7\"):\n data_width = 7\n elif site.has_feature(\"ISERDES.DATA_WIDTH.W8\"):\n data_width = 8\n else:\n data_width = 2\n\n bel.parameters['DATA_WIDTH'] = data_width\n\n interface = None\n if site.has_feature(\"ISERDES.INTERFACE_TYPE.MEMORY_DDR3\"):\n interface = '\"MEMORY_DDR3\"'\n elif site.has_feature(\"ISERDES.INTERFACE_TYPE.NOT_MEMORY\"\n ) and site.has_feature(\n \"ISERDES.INTERFACE_TYPE.Z_MEMORY\"):\n interface = '\"NETWORKING\"'\n elif site.has_feature(\"ISERDES.INTERFACE_TYPE.OVERSAMPLE\"):\n interface = '\"OVERSAMPLE\"'\n else:\n assert False\n\n bel.parameters['INTERFACE_TYPE'] = interface\n\n site.add_source(bel, 'O', 'O')\n\n site.add_sink(bel, 'CLK', 'CLK')\n site.add_sink(bel, 'CLKB', 'CLKB')\n site.add_sink(bel, 'CLKDIV', 'CLKDIV')\n\n site.add_sink(bel, 'RST', 'SR')\n\n if site.has_feature('ZINV_D'):\n bel.parameters['IS_D_INVERTED'] = 0\n else:\n bel.parameters['IS_D_INVERTED'] = 1\n\n if site.has_feature('IFF.ZINV_C'):\n bel.parameters['IS_CLK_INVERTED'] = 0\n bel.parameters['IS_CLKB_INVERTED'] = 1\n else:\n bel.parameters['IS_CLK_INVERTED'] = 1\n bel.parameters['IS_CLKB_INVERTED'] = 0\n\n num_ce = None\n if site.has_feature('ISERDES.NUM_CE.N2'):\n num_ce = 2\n else:\n num_ce = 1\n\n bel.parameters['NUM_CE'] = num_ce\n\n if site.has_feature('IDELMUXE3.P0') and site.has_feature(\n 'IFFDELMUXE3.P0'):\n bel.parameters['IOBDELAY'] = '\"BOTH\"'\n elif site.has_feature('IFFDELMUXE3.P0'):\n bel.parameters['IOBDELAY'] = '\"IFD\"'\n elif site.has_feature('IDELMUXE3.P0'):\n bel.parameters['IOBDELAY'] = '\"IBUF\"'\n\n site.add_sink(bel, 'CE1', 'CE1')\n site.add_sink(bel, 'CE2', 'CE2')\n site.add_sink(bel, 'BITSLIP', 'BITSLIP')\n\n if idelay_site and idelay_site.has_feature(\"IN_USE\") and (\n idelay_site.has_feature(\"IDELAY_VALUE\")\n or idelay_site.has_feature(\"ZIDELAY_VALUE\")):\n site.add_sink(bel, 'DDLY', 'DDLY')\n else:\n site.add_sink(bel, 'D', 'D')\n\n for i in range(1, 9):\n port_q = 'Q{}'.format(i)\n site.add_source(bel, port_q, port_q)\n\n site.add_bel(bel)\n else:\n site.sources['O'] = None\n site.sinks['D'] = []\n site.outputs['O'] = 'D'\n\n top.add_site(site)\n\n\ndef process_ologic(top, features):\n\n aparts = features[0].feature.split('.')\n # tile_name = aparts[0]\n ioi_site = get_ioi_site(top.db, top.grid, aparts[0], aparts[1])\n\n site = Site(features, ioi_site)\n\n if site.has_feature(\"OSERDES.IN_USE\"):\n # OSERDES\n bel = Bel('OSERDESE2')\n\n data_rate_oq = None\n if site.has_feature(\"OSERDES.DATA_RATE_OQ.DDR\"):\n data_rate_oq = '\"DDR\"'\n elif site.has_feature(\"OSERDES.DATA_RATE_OQ.SDR\"):\n data_rate_oq = '\"SDR\"'\n else:\n assert False\n bel.parameters['DATA_RATE_OQ'] = data_rate_oq\n\n data_rate_tq = None\n if site.has_feature(\"OSERDES.DATA_RATE_TQ.DDR\"):\n data_rate_tq = '\"DDR\"'\n elif site.has_feature(\"OSERDES.DATA_RATE_TQ.SDR\"):\n data_rate_tq = '\"SDR\"'\n elif site.has_feature(\"OSERDES.DATA_RATE_TQ.BUF\"):\n data_rate_tq = '\"BUF\"'\n else:\n assert False\n bel.parameters['DATA_RATE_TQ'] = data_rate_tq\n\n data_width = None\n if site.has_feature(\"OSERDES.DATA_WIDTH.W2\"):\n data_width = 2\n elif site.has_feature(\"OSERDES.DATA_WIDTH.W3\"):\n data_width = 3\n elif site.has_feature(\"OSERDES.DATA_WIDTH.W4\"):\n data_width = 4\n elif site.has_feature(\"OSERDES.DATA_WIDTH.W5\"):\n data_width = 5\n elif site.has_feature(\"OSERDES.DATA_WIDTH.W6\"):\n data_width = 6\n elif site.has_feature(\"OSERDES.DATA_WIDTH.W7\"):\n data_width = 7\n elif site.has_feature(\"OSERDES.DATA_WIDTH.W8\"):\n data_width = 8\n else:\n assert False\n\n bel.parameters['DATA_WIDTH'] = data_width\n\n bel.parameters['TRISTATE_WIDTH'] = \"4\" if site.has_feature(\n \"OSERDES.TRISTATE_WIDTH.W4\"\n ) else \"1\"\n bel.parameters['SERDES_MODE'] = '\"SLAVE\"' if site.has_feature(\n \"OSERES.SERDES_MODE.SLAVE\"\n ) else '\"MASTER\"'\n\n site.add_source(bel, 'OQ', 'OQ')\n site.add_source(bel, 'TQ', 'TQ')\n\n site.add_sink(bel, 'CLK', 'CLK')\n site.add_sink(bel, 'CLKDIV', 'CLKDIV')\n\n for i in range(1, 9):\n site.add_sink(bel, 'D{}'.format(i), 'D{}'.format(i))\n\n inverted = (\"IS_D{}_INVERTED\".format(i))\n if site.has_feature(inverted):\n bel.parameters[inverted] = 1\n\n for i in range(1, 5):\n site.add_sink(bel, 'T{}'.format(i), 'T{}'.format(i))\n\n if not site.has_feature(\"ZINV_T{}\".format(i)):\n bel.parameters[\"IS_T{}_INVERTED\".format(i)] = 1\n\n site.add_sink(bel, 'OCE', 'OCE')\n site.add_sink(bel, 'TCE', 'TCE')\n\n site.add_sink(bel, 'RST', 'SR')\n\n site.add_bel(bel)\n\n else:\n # PASS THROUGH\n site.sources['OQ'] = None\n site.sinks['D1'] = []\n site.outputs['OQ'] = 'D1'\n\n site.sources['TQ'] = None\n site.sinks['T1'] = []\n site.outputs['TQ'] = 'T1'\n\n top.add_site(site)\n\n\ndef process_ioi(conn, top, tile, features):\n\n ilogic_idelay = {\n \"0\": {\n 'ILOGIC': [],\n 'IDELAY': []\n },\n \"1\": {\n 'ILOGIC': [],\n 'IDELAY': []\n },\n }\n idelay = {\n \"0\": [],\n \"1\": [],\n }\n ologic = {\n \"0\": [],\n \"1\": [],\n }\n\n for f in features:\n site = f.feature.split('.')[1]\n\n if site.startswith('IDELAY_Y'):\n ilogic_idelay[site[-1]]['IDELAY'].append(f)\n idelay[site[-1]].append(f)\n if site.startswith('ILOGIC_Y'):\n ilogic_idelay[site[-1]]['ILOGIC'].append(f)\n if site.startswith('OLOGIC_Y'):\n ologic[site[-1]].append(f)\n\n for features in idelay.values():\n if len(features):\n process_idelay(top, features)\n\n for features in ilogic_idelay.values():\n if len(features['ILOGIC']):\n process_ilogic_idelay(top, features)\n\n for features in ologic.values():\n if len(features):\n process_ologic(top, features)\n","sub_path":"xc/xc7/fasm2bels/ioi_models.py","file_name":"ioi_models.py","file_ext":"py","file_size_in_byte":11048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"577857557","text":"data=input(\"輸入月租型式及通話時間為:\").split(\",\")\nkind=[186,386,586,986]\nmon=[0.09, 0.08, 0.07, 0.06]\nless=[0.9,0.8,0.7,0.6]\nmany=[0.8,0.7,0.6,0.5]\n\npt=kind.index(int(data[0]))\nmoney=int(int(data[1])*mon[pt]+0.5)\n\nif money<=kind[pt]:\n print(\"通話費為:\",kind[pt])\nelse:\n if money*2>=kind[pt]:\n print(\"通話費為:\",int(money*many[pt]+0.5))\n else:\n print(\"通話費為:\",int(money*less[pt]+0.5))","sub_path":"109-2_First_Midterm/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"265862632","text":"#!/usr/bin/env python3\nimport requests\nimport os\nimport sys\nfrom dotenv import load_dotenv\nimport argparse\nload_dotenv()\n\nheaders = {\"Authorization\": os.getenv(\"TOKEN\")}\n\n\ndef is_inactive_user(user):\n # not a github user\n if user is None:\n return False\n\n # take older accounts\n if int(user[\"createdAt\"][0:4]) > 2011:\n return False\n\n curr_user = []\n\n curr_user.append(user[\"starredRepositories\"][\"totalCount\"])\n curr_user.append(user[\"repositories\"][\"totalCount\"])\n curr_user.append(\n user[\"contributionsCollection\"][\"totalCommitContributions\"]\n )\n curr_user.append(\n user[\"contributionsCollection\"][\"restrictedContributionsCount\"]\n )\n curr_user.append(user[\"following\"][\"totalCount\"])\n curr_user.append(\n 0\n if user[\"contributionsCollection\"][\"hasAnyContributions\"] is False\n else 1\n )\n\n return sum(curr_user) < 1\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-i', '--inp', help='input file')\n parser.add_argument('-o', '--out', help='output file')\n\n args = parser.parse_args()\n\n if not args.inp or not args.out:\n print(\"Must provide valid I/O files.\", file=sys.stderr)\n sys.exit(1)\n\n with open(args.inp, \"r\") as users:\n with open(args.out, \"w\") as output:\n\n for user in users:\n res = build_query(user.rstrip())[\"data\"]\n if is_inactive_user(res[\"user\"]):\n print(\"OK!\\nUser: {}\".format(user))\n output.write(\"{}\\n\".format(user))\n\n\ndef run_query(query, variables):\n request = requests.post(\n \"https://api.github.com/graphql\",\n json={\"query\": query, \"variables\": variables},\n headers=headers,\n )\n if request.status_code == 200:\n return request.json()\n else:\n raise requests.HTTPError(\n \"Query failed to run by returning code of {}. {}\".format(\n request.status_code, query\n )\n )\n\n\ndef build_query(user):\n query = \"\"\"\n query userInfo($login: String!) {\n user(login: $login) {\n name\n login\n createdAt\n starredRepositories {\n totalCount\n }\n repositories {\n totalCount\n }\n following {\n totalCount\n }\n contributionsCollection {\n totalCommitContributions\n restrictedContributionsCount\n hasAnyContributions\n }\n }\n }\"\"\"\n\n variables = {\"login\": user}\n return run_query(query, variables)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"277316754","text":"from openpype.hosts.maya.api import plugin, lib\n\n\nclass CreateMultiverseUsdComp(plugin.Creator):\n \"\"\"Create Multiverse USD Composition\"\"\"\n\n name = \"mvUsdCompositionMain\"\n label = \"Multiverse USD Composition\"\n family = \"mvUsdComposition\"\n icon = \"cubes\"\n\n def __init__(self, *args, **kwargs):\n super(CreateMultiverseUsdComp, self).__init__(*args, **kwargs)\n\n # Add animation data first, since it maintains order.\n self.data.update(lib.collect_animation_data(True))\n\n # Order of `fileFormat` must match extract_multiverse_usd_comp.py\n self.data[\"fileFormat\"] = [\"usda\", \"usd\"]\n self.data[\"stripNamespaces\"] = False\n self.data[\"mergeTransformAndShape\"] = False\n self.data[\"flattenContent\"] = False\n self.data[\"writeAsCompoundLayers\"] = False\n self.data[\"writePendingOverrides\"] = False\n self.data[\"numTimeSamples\"] = 1\n self.data[\"timeSamplesSpan\"] = 0.0\n","sub_path":"openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py","file_name":"create_multiverse_usd_comp.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"404486651","text":"class Stack(object):\n \"\"\"Stack Project With Python\"\"\"\n def __init__(self, size):\n self._stack = []\n self.size = size\n\n def push(self, item):\n if len(self._stack) < self.size:\n self._stack.append(item)\n else:\n raise Exception ('Stack is full')\n \n def pop(self):\n try:\n return self._stack.pop()\n except:\n raise Exception('Stack was empty')\n\nif __name__ == '__main__':\n a = Stack(4)\n a.push(1)\n a.push(4)\n print(a.pop())\n\n\n\n","sub_path":"src/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"455043182","text":"import random\nimport pandas as pd\nfrom time import sleep\n\ndef makeAnnonyCode(code):\n return code[:6] + \"***\"\n\ndef makeAnnonyName(name):\n if(len(name)==2):\n return \"*\"+name[1]\n elif(len(name)==3):\n return name[0]+ \"*\" +name[2]\n elif(len(name) > 3):\n return name[:2] + \"*\" + name[3] \n\ndef printInfo(df,random_list):\n for i in range(len(random_list)):\n sleep(0.5)\n grade = str(i + 1)+\"번\"\n\n index = random_list[i]\n name = makeAnnonyName(str(df[df.keys()[0]][index]))\n major = str(df[df.keys()[1]][index])\n code = makeAnnonyCode(str(df[df.keys()[2]][index]))\n \n print(\"{} / {} / {} / {}\".format(grade, name, major, code))\n\ndef Lots(number):\n random_list = []\n rand_num = random.randint(0, count-1)\n for _ in range(number):\n while rand_num in random_list:\n rand_num = random.randint(0, count-1)\n random_list.append(rand_num)\n random_list.sort()\n return random_list\n\nif __name__ == \"__main__\":\n print(\"*****************************************************\")\n name = input(\"Excel Name : \")\n df = pd.read_excel(name + '.xlsx')\n print(\"*****************************************************\")\n count = len(df.values)\n print(\"총 응답 개수 : \" + str(count)+\"개\")\n print(\"*****************************************************\")\n number = int(input(\"Lots Number : \"))\n print(str(number)+\"개를 추첨합니다\")\n print(\"*****************************************************\")\n random_list = Lots(number)\n printInfo(df,random_list)\n print(\"*****************************************************\")\n\n","sub_path":"lots.py","file_name":"lots.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"589772463","text":"# coding=utf-8\n\"\"\"\n Copyright (c) 2018-present, Ant Financial Service Group\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n ------------------------------------------------------\n File Name : test_mesh_client\n Author : jiaqi.hjq\n\"\"\"\nimport json\nimport unittest\nimport attr\nimport requests_mock\n\nfrom anthunder.mesh.mesh_client import MeshClient, ApplicationInfo, PublishServiceRequest, ProviderMetaInfo\n\n\nclass TestMeshClient(unittest.TestCase):\n @requests_mock.Mocker()\n def test_start(self, session_mock):\n session_mock.post('http://127.0.0.1:13330/configs/application', text=json.dumps(dict(success=True)))\n mesh = MeshClient(ApplicationInfo(\"pybolt_test_app\", \"\", \"\", \"\"))\n mesh.startup()\n\n @requests_mock.Mocker()\n def test_pub(self, session_mock):\n session_mock.post('http://127.0.0.1:13330/services/publish', text=json.dumps(dict(success=True)))\n pubreq = PublishServiceRequest(serviceName=\"com.alipay.pybolt.test:1.0\",\n providerMetaInfo=ProviderMetaInfo(protocol=\"1\",\n version=\"4.0\",\n serializeType=\"protobuf\",\n appName=\"pybolt_test_app\"))\n print(attr.asdict(pubreq))\n mesh = MeshClient(ApplicationInfo(\"pybolt_test_app\"))\n mesh.publish(pubreq)\n\n @requests_mock.Mocker()\n def test_subscribe(self, session_mock):\n session_mock.post('http://127.0.0.1:13330/services/subscribe', text=json.dumps(dict(success=True)))\n mesh = MeshClient(ApplicationInfo(\"pybolt_test_app\"))\n mesh.subscribe(\"com.alipay.test\")\n\n @requests_mock.Mocker()\n def test_unpublish(self, session_mock):\n session_mock.post('http://127.0.0.1:13330/services/unpublish', text=json.dumps(dict(success=True)))\n mesh = MeshClient(ApplicationInfo(\"pybolt_test_app\"))\n mesh.unpublish(\"com.alipay.test\")\n\n @requests_mock.Mocker()\n def test_unsubscribe(self, session_mock):\n print(session_mock)\n session_mock.post('http://127.0.0.1:13330/services/unsubscribe', text=json.dumps(dict(success=True)))\n mesh = MeshClient(ApplicationInfo(\"pybolt_test_app\"))\n mesh.unsubscribe(\"com.alipay.test\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_mesh_client.py","file_name":"test_mesh_client.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"232499322","text":"#!/usr/bin/env python\nfrom numpy import product\n\n'''\nMultiples of 3 and 5\nIf we list all the natural numbers below 10 that are multiples of 3 or 5, \nwe get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the\nmultiples of 3 or 5 below 1000.\n'''\n\ndef getSumArithmeticSeries(differences, bound):\n ''' Return the sum of all multiples of 'differences' below 'bound' \n difference -- The common difference between consecutive terms in the \n arithmetic progression. \n bound -- The upper bound of a finite arithmetic progression. \n '''\n sumDivisors = sum(\n [getArithmeticSeries(difference, bound) for difference in differences]\n )\n\n '''\n Get the product of common differences. Subtract the arithmetic series \n generated by using this product as a common difference. This will remove\n the sum of common multiples. \n '''\n sumDivisorsProduct = getArithmeticSeries(product(differences), bound)\n\n return sumDivisors - sumDivisorsProduct\n\n\ndef getArithmeticSeries(difference, bound):\n ''' Return an arithmetic series, or sum of a finite arithmetic progression. \n difference -- The common difference between consecutive terms in the \n arithmetic progression. \n bound -- The upper bound of a finite arithmetic progression. \n '''\n highestMultiple = ((bound - 1) // difference) \n return (difference * highestMultiple * (highestMultiple + 1)) // 2\n","sub_path":"py/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"264052647","text":"from .constants import *\nimport pandas as pd\nimport numpy as np\n\ndef read_track_csv(arguments):\n \"\"\"\n This method reads the tracks file from highD data.\n\n :param arguments: the input path for the tracks csv file.\n :return: a list containing all tracks as dictionaries.\n \"\"\"\n # Read the csv file, convert it into a useful data structure\n df = pd.read_csv(arguments)\n\n # Use groupby to aggregate track info. Less error prone than iterating over the data.\n grouped = df.groupby([TRACK_ID], sort=False)\n # Efficiently pre-allocate an empty list of sufficient size\n tracks = [None] * grouped.ngroups\n current_track = 0\n for group_id, rows in grouped:\n bounding_boxes = np.transpose(np.array([rows[X].values,\n rows[Y].values,\n rows[WIDTH].values,\n rows[HEIGHT].values]))\n tracks[current_track] = {TRACK_ID: np.int64(group_id), # for compatibility, int would be more space efficient\n FRAME: rows[FRAME].values,\n BBOX: bounding_boxes,\n X: rows[X].values,\n Y: rows[Y].values,\n WIDTH: rows[WIDTH].values,\n X_VELOCITY: rows[X_VELOCITY].values,\n Y_VELOCITY: rows[Y_VELOCITY].values,\n X_ACCELERATION: rows[X_ACCELERATION].values,\n Y_ACCELERATION: rows[Y_ACCELERATION].values,\n FRONT_SIGHT_DISTANCE: rows[FRONT_SIGHT_DISTANCE].values,\n BACK_SIGHT_DISTANCE: rows[BACK_SIGHT_DISTANCE].values,\n THW: rows[THW].values,\n TTC: rows[TTC].values,\n DHW: rows[DHW].values,\n PRECEDING_X_VELOCITY: rows[PRECEDING_X_VELOCITY].values,\n PRECEDING_ID: rows[PRECEDING_ID].values,\n FOLLOWING_ID: rows[FOLLOWING_ID].values,\n LEFT_FOLLOWING_ID: rows[LEFT_FOLLOWING_ID].values,\n LEFT_ALONGSIDE_ID: rows[LEFT_ALONGSIDE_ID].values,\n LEFT_PRECEDING_ID: rows[LEFT_PRECEDING_ID].values,\n RIGHT_FOLLOWING_ID: rows[RIGHT_FOLLOWING_ID].values,\n RIGHT_ALONGSIDE_ID: rows[RIGHT_ALONGSIDE_ID].values,\n RIGHT_PRECEDING_ID: rows[RIGHT_PRECEDING_ID].values,\n LANE_ID: rows[LANE_ID].values\n }\n current_track = current_track + 1\n return tracks\n\n\ndef read_track_meta(arguments):\n \"\"\"\n This method reads the static info file from highD data.\n\n :param arguments: the input path for the static csv file.\n :return: the static dictionary - the key is the track_id and the value is the corresponding data for this track\n \"\"\"\n # Read the csv file, convert it into a useful data structure\n df = pd.read_csv(arguments)\n\n # Declare and initialize the static_dictionary\n static_dictionary = {}\n\n # Iterate over all rows of the csv because we need to create the bounding boxes for each row\n for i_row in range(df.shape[0]):\n track_id = int(df[TRACK_ID][i_row])\n static_dictionary[track_id] = {TRACK_ID: track_id,\n WIDTH: float(df[WIDTH][i_row]),\n HEIGHT: float(df[HEIGHT][i_row]),\n INITIAL_FRAME: int(df[INITIAL_FRAME][i_row]),\n FINAL_FRAME: int(df[FINAL_FRAME][i_row]),\n NUM_FRAMES: int(df[NUM_FRAMES][i_row]),\n CLASS: str(df[CLASS][i_row]),\n DRIVING_DIRECTION: float(df[DRIVING_DIRECTION][i_row]),\n TRAVELED_DISTANCE: float(df[TRAVELED_DISTANCE][i_row]),\n MIN_X_VELOCITY: float(df[MIN_X_VELOCITY][i_row]),\n MAX_X_VELOCITY: float(df[MAX_X_VELOCITY][i_row]),\n MEAN_X_VELOCITY: float(df[MEAN_X_VELOCITY][i_row]),\n MIN_TTC: float(df[MIN_TTC][i_row]),\n MIN_THW: float(df[MIN_THW][i_row]),\n MIN_DHW: float(df[MIN_DHW][i_row]),\n NUMBER_LANE_CHANGES: int(df[NUMBER_LANE_CHANGES][i_row])\n }\n return static_dictionary\n\n\ndef read_recording_meta(arguments):\n \"\"\"\n This method reads the video meta file from highD data.\n\n :param arguments: the input path for the video meta csv file.\n :return: the meta dictionary containing the general information of the video\n \"\"\"\n # Read the csv file, convert it into a useful data structure\n df = pd.read_csv(arguments)\n\n # Declare and initialize the extracted_meta_dictionary\n extracted_meta_dictionary = {ID: int(df[ID][0]),\n FRAME_RATE: int(df[FRAME_RATE][0]),\n LOCATION_ID: int(df[LOCATION_ID][0]),\n SPEED_LIMIT: float(df[SPEED_LIMIT][0]),\n MONTH: str(df[MONTH][0]),\n WEEKDAY: str(df[WEEKDAY][0]),\n START_TIME: str(df[START_TIME][0]),\n DURATION: float(df[DURATION][0]),\n TOTAL_DRIVEN_DISTANCE: float(df[TOTAL_DRIVEN_DISTANCE][0]),\n TOTAL_DRIVEN_TIME: float(df[TOTAL_DRIVEN_TIME][0]),\n N_VEHICLES: int(df[N_VEHICLES][0]),\n N_CARS: int(df[N_CARS][0]),\n N_TRUCKS: int(df[N_TRUCKS][0]),\n UPPER_LANE_MARKINGS: np.fromstring(df[UPPER_LANE_MARKINGS][0], sep=\";\"),\n LOWER_LANE_MARKINGS: np.fromstring(df[LOWER_LANE_MARKINGS][0], sep=\";\")}\n return extracted_meta_dictionary\n","sub_path":"Python/src/data_management/highd_reader.py","file_name":"highd_reader.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"139336550","text":"\"\"\"\nUnit tests for hxl-proxy dao module\nDavid Megginson\nFebruary 2016\n\nLicense: Public Domain\n\"\"\"\n\nimport unittest, os\nfrom hxl_proxy import app, dao\nfrom . import base\n\n\nclass AbstractDAOTest(base.AbstractDBTest):\n \"\"\"Abstract base class for DAO tests.\"\"\"\n \n def setUp(self):\n super().setUp()\n\n def tearDown(self):\n super().tearDown()\n\n def assertEquiv(self, model, actual):\n \"\"\"Test equivalence where everything in model must be the same in actual\n (but actual can have extra values).\"\"\"\n for key in model:\n self.assertEqual(model.get(key), actual.get(key), key)\n\n\nclass TestUser(AbstractDAOTest):\n \"\"\"Test user DAO functionality\"\"\"\n\n NEW_USER = {\n 'user_id': 'user3',\n 'email': 'user3@example.org',\n 'name': 'User Three',\n 'name_given': 'User',\n 'name_family': 'Three'\n }\n\n def test_create(self):\n dao.users.create(self.NEW_USER)\n result = dao.users.read(self.NEW_USER['user_id'])\n self.assertEquiv(self.NEW_USER, result)\n assert result.get('last_login') is not None\n\n def test_read(self):\n user = {\n 'user_id': 'user1',\n 'email': 'user1@example.org',\n 'name': 'User One',\n 'name_given': 'User',\n 'name_family': 'One'\n }\n self.assertEquiv(user, dao.users.read('user1'))\n\n def test_update(self):\n user = dict(self.NEW_USER)\n user['user_id'] = 'user1'\n dao.users.update(user)\n self.assertEquiv(user, dao.users.read(user['user_id']))\n\n def test_delete(self):\n dao.users.create(self.NEW_USER)\n assert dao.users.read(self.NEW_USER['user_id']) is not None\n dao.users.delete(self.NEW_USER['user_id'])\n assert dao.users.read(self.NEW_USER['user_id']) is None\n\n\nclass TestRecipe(AbstractDAOTest):\n\n NEW_RECIPE = {\n 'recipe_id': 'XXXXX',\n 'passhash': '5f4dcc3b5aa765d61d8327deb882cf99',\n 'name': 'Recipe X',\n 'description': 'New test recipe',\n 'cloneable': 1,\n 'stub': 'recipex',\n 'args': {}\n }\n\n def test_create(self):\n dao.recipes.create(self.NEW_RECIPE)\n result = dao.recipes.read(self.NEW_RECIPE['recipe_id'])\n self.assertEquiv(self.NEW_RECIPE, result)\n assert result['date_created']\n self.assertEqual(result['date_created'], result['date_modified'])\n\n def test_read(self):\n recipe = {\n 'recipe_id': 'AAAAA',\n 'passhash': '5f4dcc3b5aa765d61d8327deb882cf99',\n 'name': 'Recipe #1',\n 'description': 'First test recipe',\n 'cloneable': 1,\n 'stub': 'recipe1',\n 'args': {'url':'http://example.org/basic-dataset.csv'}\n }\n self.assertEquiv(recipe, dao.recipes.read(recipe['recipe_id']))\n\n def test_update(self):\n recipe = dict(self.NEW_RECIPE)\n recipe['recipe_id'] = 'AAAAA'\n dao.recipes.update(recipe)\n result = dao.recipes.read('AAAAA')\n self.assertEquiv(recipe, result)\n self.assertNotEqual(result['date_created'], result['date_modified'])\n\n def test_delete(self):\n assert dao.recipes.read('AAAAA') is not None\n dao.recipes.delete('AAAAA')\n assert dao.recipes.read('AAAAA') is None\n","sub_path":"tests/test_dao.py","file_name":"test_dao.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"91818212","text":"import multiprocessing\nimport sys\nimport pyvips\n\nNUM_WORKER = 16\nprint(NUM_WORKER)\n\ndef get_metadata(metadata_path='/home/gpzlx1/metadata'):\n with open(\"metadata\", \"r\") as f:\n lines = f.readlines()\n records = []\n for line in lines:\n line = line.split()\n records.append((line[0] , int(line[1])))\n return records\n\ndef work_process(q_in):\n while True:\n deq = q_in.get()\n if deq is None:\n break\n path, label = deq\n image = pyvips.Image.new_from_file(path, access='sequential')\n\n image *= [1, 2, 1]\n\n\n\n #process.process(path)\n #insert your process function\n\n\ndef main(records):\n if NUM_WORKER > 0:\n q_in = [ multiprocessing.Queue(1024) for i in range(NUM_WORKER) ]\n multiprocess = [ multiprocessing.Process(target=work_process, args=(q_in[i],)) for i in range(NUM_WORKER) ]\n \n for p in multiprocess:\n p.start()\n\n for i, record in enumerate(records):\n q_in[i % NUM_WORKER].put(record)\n\n for q in q_in:\n q.put(None)\n\n for p in multiprocess:\n p.join()\n\nif __name__ == \"__main__\":\n import time\n #you should prepare your metadata \n #and give your metadata path\n records = get_metadata('/home/Adama/metadata')\n # records = records[0:10000]\n print(len(records))\n begin = time.time()\n main(records)\n end = time.time()\n print(\"speed:\", len(records) / (end - begin))","sub_path":"src/loading-test/raw copy.py","file_name":"raw copy.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"649627533","text":"from __future__ import print_function\n\nimport regex\n\n# to convert DOC. Must already have antiword installed directly on system\nfrom subprocess import Popen, PIPE\n\n# to convert DOCX\nfrom docx import opendocx, getdocumenttext\n\n# to convert PDFs\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfpage import PDFPage\nfrom cStringIO import StringIO\n\n# to ocr pdfs\nfrom wand.image import Image\nfrom PIL import Image as PI\nimport pyocr\nimport pyocr.builders\nimport io\n\n#\n# TEXT CONVERSION FUNCTIONS\n#\n\ndef convert_pdf_to_txt(file_path):\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, laparams=laparams)\n fp = file(file_path, 'rb')\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n password = \"\"\n maxpages = 0\n caching = True\n pagenos = set()\n for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True):\n interpreter.process_page(page)\n fp.close()\n device.close()\n str = retstr.getvalue().decode('utf8')\n retstr.close()\n\n try:\n if len(str) > 15:\n print('')\n return str\n else:\n print(', needs OCR...')\n raise ValueError\n except:\n tool = pyocr.get_available_tools()[0]\n lang = tool.get_available_languages()[0]\n\n req_image = []\n final_text = []\n\n with Image(filename=file_path, resolution=150) as image_jpeg:\n image_jpeg.compression_quality = 99\n image_jpeg = image_jpeg.convert('jpeg')\n\n for img in image_jpeg.sequence:\n with Image(image=img) as img_page:\n req_image.append(img_page.make_blob('jpeg'))\n image_jpeg.destroy()\n\n for img in req_image:\n txt = tool.image_to_string(\n PI.open(io.BytesIO(img)),\n lang=lang,\n builder=pyocr.builders.TextBuilder()\n )\n final_text.append(txt)\n ocr = ''.join(final_text)\n\n return(ocr)\n\n\ndef getrtffields(t):\n fieldsearch=regex.compile(r\"{\\\\field[^{]*?({(?>[^{}]+|(?1))*})({(?>[^{}]+|(?1))*})}\")\n\n textboxes,drops,checks=[],[],[]\n checkboxoptions=[\"No\",\"Yes\"]\n\n #Deal with text boxes\n m = fieldsearch.finditer(t)\n if m:\n for field in m:\n if \"FORMTEXT\" in field[0]:\n textboxes.append(field[0])\n elif \"FORMDROPDOWN\" in field[0]:\n drops.append(field[0])\n elif \"FORMCHECKBOX\" in field[0]:\n checks.append(field[0])\n else:\n pass\n\n for textbox in textboxes:\n try:\n result = regex.search(r\"fldrslt ({(?>[^{}]+|(?1))*})}\",textbox)[1]\n if result:\n t=t.replace(textbox,result)\n except:\n pass\n for drop in drops:\n try:\n ddresult = regex.search(r\"fftype2.*ffres([0-9]*)\",drop)[1]\n if ddresult==\"25\":\n ddresult=regex.search(r\"ffdefres([0-9]*)\",drop)[1]\n ddlist = re.findall(r\"ffl ([^}]*)}\",drop)\n t=t.replace(drop,\"{\\\\rtlch \"+ddlist[int(ddresult)]+\"}\")\n except:\n pass\n for check in checks:\n try:\n result = regex.search(r\"fftype1.*ffres([0-9]*)\",check)[1]\n if result==\"25\":\n result=regex.search(r\"ffdefres([0-9]*)\",check)[1]\n t=t.replace(check,\"{\\\\rtlch \"+checkboxoptions[int(ddresult)]+\"}\")\n except:\n pass\n\n return t\n\n\ndef striprtf(text):\n text=getrtffields(text)\n pattern = regex.compile(r\"\\\\([a-z]{1,32})(-?\\d{1,10})?[ ]?|\\\\'([0-9a-f]{2})|\\\\([^a-z])|([{}])|[\\r\\n]+|(.)\", regex.I)\n # control words which specify a \"destionation\".\n destinations = frozenset((\n 'aftncn','aftnsep','aftnsepc','annotation','atnauthor','atndate','atnicn','atnid',\n 'atnparent','atnref','atntime','atrfend','atrfstart','author','background',\n 'bkmkend','bkmkstart','blipuid','buptim','category','colorschememapping',\n 'colortbl','comment','company','creatim','datafield','datastore','defchp','defpap',\n 'do','doccomm','docvar','dptxbxtext','ebcend','ebcstart','factoidname','falt',\n 'fchars','ffdeftext','ffentrymcr','ffexitmcr','ffformat','ffhelptext','ffl',\n 'ffname','ffstattext','field','file','filetbl','fldinst','fldrslt','fldtype',\n 'fname','fontemb','fontfile','fonttbl','footer','footerf','footerl','footerr',\n 'footnote','formfield','ftncn','ftnsep','ftnsepc','g','generator','gridtbl',\n 'header','headerf','headerl','headerr','hl','hlfr','hlinkbase','hlloc','hlsrc',\n 'hsv','htmltag','info','keycode','keywords','latentstyles','lchars','levelnumbers',\n 'leveltext','lfolevel','linkval','list','listlevel','listname','listoverride',\n 'listoverridetable','listpicture','liststylename','listtable','listtext',\n 'lsdlockedexcept','macc','maccPr','mailmerge','maln','malnScr','manager','margPr',\n 'mbar','mbarPr','mbaseJc','mbegChr','mborderBox','mborderBoxPr','mbox','mboxPr',\n 'mchr','mcount','mctrlPr','md','mdeg','mdegHide','mden','mdiff','mdPr','me',\n 'mendChr','meqArr','meqArrPr','mf','mfName','mfPr','mfunc','mfuncPr','mgroupChr',\n 'mgroupChrPr','mgrow','mhideBot','mhideLeft','mhideRight','mhideTop','mhtmltag',\n 'mlim','mlimloc','mlimlow','mlimlowPr','mlimupp','mlimuppPr','mm','mmaddfieldname',\n 'mmath','mmathPict','mmathPr','mmaxdist','mmc','mmcJc','mmconnectstr',\n 'mmconnectstrdata','mmcPr','mmcs','mmdatasource','mmheadersource','mmmailsubject',\n 'mmodso','mmodsofilter','mmodsofldmpdata','mmodsomappedname','mmodsoname',\n 'mmodsorecipdata','mmodsosort','mmodsosrc','mmodsotable','mmodsoudl',\n 'mmodsoudldata','mmodsouniquetag','mmPr','mmquery','mmr','mnary','mnaryPr',\n 'mnoBreak','mnum','mobjDist','moMath','moMathPara','moMathParaPr','mopEmu',\n 'mphant','mphantPr','mplcHide','mpos','mr','mrad','mradPr','mrPr','msepChr',\n 'mshow','mshp','msPre','msPrePr','msSub','msSubPr','msSubSup','msSubSupPr','msSup',\n 'msSupPr','mstrikeBLTR','mstrikeH','mstrikeTLBR','mstrikeV','msub','msubHide',\n 'msup','msupHide','mtransp','mtype','mvertJc','mvfmf','mvfml','mvtof','mvtol',\n 'mzeroAsc','mzeroDesc','mzeroWid','nesttableprops','nextfile','nonesttables',\n 'objalias','objclass','objdata','object','objname','objsect','objtime','oldcprops',\n 'oldpprops','oldsprops','oldtprops','oleclsid','operator','panose','password',\n 'passwordhash','pgp','pgptbl','picprop','pict','pn','pnseclvl','pntext','pntxta',\n 'pntxtb','printim','private','propname','protend','protstart','protusertbl','pxe',\n 'result','revtbl','revtim','rsidtbl','rxe','shp','shpgrp','shpinst',\n 'shppict','shprslt','shptxt','sn','sp','staticval','stylesheet','subject','sv',\n 'svb','tc','template','themedata','title','txe','ud','upr','userprops',\n 'wgrffmtfilter','windowcaption','writereservation','writereservhash','xe','xform',\n 'xmlattrname','xmlattrvalue','xmlclose','xmlname','xmlnstbl',\n 'xmlopen',\n ))\n # Translation of some special characters.\n specialchars = {\n 'par': '\\n',\n 'sect': '\\n\\n',\n 'page': '\\n\\n',\n 'line': '\\n',\n 'tab': '\\t',\n 'emdash': u'\\u2014',\n 'endash': u'\\u2013',\n 'emspace': u'\\u2003',\n 'enspace': u'\\u2002',\n 'qmspace': u'\\u2005',\n 'bullet': u'\\u2022',\n 'lquote': u'\\u2018',\n 'rquote': u'\\u2019',\n 'ldblquote': u'\\201C',\n 'rdblquote': u'\\u201D', \n }\n stack = []\n ignorable = False # Whether this group (and all inside it) are \"ignorable\".\n ucskip = 1 # Number of ASCII characters to skip after a unicode character.\n curskip = 0 # Number of ASCII characters left to skip\n out = [] # Output buffer.\n \n for match in pattern.finditer(text):\n word,arg,hex,char,brace,tchar = match.groups()\n if brace:\n curskip = 0\n if brace == '{':\n # Push state\n stack.append((ucskip,ignorable))\n elif brace == '}':\n # Pop state\n ucskip,ignorable = stack.pop()\n elif char: # \\x (not a letter)\n curskip = 0\n if char == '~':\n if not ignorable:\n out.append(u'\\xA0')\n elif char in '{}\\\\':\n if not ignorable:\n out.append(char)\n elif char == '*':\n ignorable = True\n elif word: # \\foo\n curskip = 0\n if word in destinations:\n ignorable = True\n elif ignorable:\n pass\n elif word in specialchars:\n out.append(specialchars[word])\n elif word == 'uc':\n ucskip = int(arg)\n elif word == 'u':\n c = int(arg)\n if c < 0: c += 0x10000\n if c > 127: out.append(unichr(c))\n else: out.append(chr(c))\n curskip = ucskip\n elif hex: # \\'xx\n if curskip > 0:\n curskip -= 1\n elif not ignorable:\n c = int(hex,16)\n if c > 127: out.append(unichr(c))\n else: out.append(chr(c))\n elif tchar:\n if curskip > 0:\n curskip -= 1\n elif not ignorable:\n out.append(tchar)\n return ''.join(out)\n\n\n\ndef get_text(file_path):\n\n try:\n print('Filetype:',file_path[-3:],end='')\n if file_path[-4:] == (\".doc\" or \".DOC\"):\n p = Popen(['osascript', '-'] + args, stdout=PIPE)\n stdout, stderr = p.communicate()\n return stdout.decode('utf8', 'ignore')\n elif file_path[-5:] == (\".docx\" or \"DOCX\"):\n document = opendocx(file_path)\n paratextlist = getdocumenttext(document)\n newparatextlist = []\n for paratext in paratextlist:\n newparatextlist.append(paratext)\n return '\\n\\n'.join(newparatextlist)\n elif file_path[-4:] == (\".odt\" or \".ODT\"):\n cmd = ['odt2txt', file_path]\n p = Popen(cmd, stdout=PIPE)\n stdout, stderr = p.communicate()\n return stdout.decode('ascii', 'ignore')\n elif file_path[-4:] == (\".pdf\" or \".PDF\"):\n return convert_pdf_to_txt(file_path)\n elif file_path[-4:] == (\".rtf\" or \".RTF\"):\n with open(file_path) as f:\n return striprtf(f.read())\n elif (file_path[-4:] == \".txt\") or (file_path[-4:] == \".TXT\"):\n with open(file_path) as t:\n return t.read().decode('latin-1')\n else:\n return 'Could not extract text from file (not recognized): ' + file_path\n except:\n return 'Could not extract text from file (extraction error): ' + file_path\n \n\n","sub_path":"dudb/text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":10804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"584200906","text":"\nfrom .error import ModelError\n\nfrom .db import sql\n\ndef deposit(r, user, amount):\n txn = sql.create(r.db, 'transactions', {\n \t'source_id': '',\n \t'destination_id': user['_id'],\n \t'amount': amount,\n \t'notes': 'DevTest'\n \t})\n return txn\n","sub_path":"api-engine/engine/models/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"309280445","text":"import load_data\nimport tune\n\nval_conv, val_emb, val_rec, val_spk, val_text, val_mfcc = load_data.dataset()\n\n\ndef classify():\n \"\"\"\n Method which calls for every layer the tune method in order to find the right parameters\n :return:\n \"\"\"\n file = './data/tuning/flickr8k-speaker.txt'\n tune.tune(val_mfcc, val_spk, file)\n tune.tune(val_conv, val_spk, file)\n amount_layers = val_rec.shape[1]\n for i in range(amount_layers):\n layer = val_rec[:, i, :]\n tune.tune(layer, val_spk, file)\n tune.tune(val_emb, val_spk, file)\n\nclassify()","sub_path":"classify_flickr8k.py","file_name":"classify_flickr8k.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"621404835","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nfrom plone import api\nfrom plone.app.contenttypes.interfaces import ILink\nfrom plone.dexterity.interfaces import IDexterityFTI\nfrom zope.component import queryUtility\n\nfrom collective.videolink.interfaces import IVideoLinkThumb, IVideoLinkOembedable\nfrom collective.videolink.testing import COLLECTIVE_VIDEOLINK_INTEGRATION_TESTING\n\nclass ContentTypeTestCase(unittest.TestCase):\n\n layer = COLLECTIVE_VIDEOLINK_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n\n with api.env.adopt_roles(['Manager']):\n self.f1 = api.content.create(\n self.portal, 'Folder', 'f1')\n self.l1 = api.content.create(\n container=self.f1,\n type=\"Link\",\n id=\"l1\",\n safe_id=False,\n title='Bored of Approval',\n remoteUrl='https://www.youtube.com/watch?v=IZ1IS7_o0zQ'\n )\n self.l2 = api.content.create(\n container=self.f1,\n type=\"Link\",\n id=\"l2\",\n safe_id=False,\n title='Link to Plone.com',\n remoteUrl='https://www.plone.com'\n )\n\n def test_fti(self):\n fti = queryUtility(IDexterityFTI, name='Folder')\n self.assertIsNotNone(fti)\n\n def test_is_selectable_as_folder_default_view(self):\n self.portal.setDefaultPage('f1')\n self.assertEqual(self.portal.default_page, 'f1')\n\n def test_adding_l1(self):\n self.assertTrue(ILink.providedBy(self.l1))\n \n def test_adding_l2(self):\n self.assertTrue(ILink.providedBy(self.l2))\n \n def test_has_videolinkthumb_interface(self):\n self.assertTrue(IVideoLinkThumb.providedBy(self.l1))\n \n def test_does_not_have_videolinkthumb_interface(self):\n self.assertFalse(IVideoLinkThumb.providedBy(self.l2))\n \n def test_has_videolinkoembedable_interface(self):\n self.assertTrue(IVideoLinkOembedable.providedBy(self.l1))\n \n def test_does_not_have_videolinkoembedable_interface(self):\n self.assertFalse(IVideoLinkOembedable.providedBy(self.l2))\n \n def test_nolonger_has_thumbnail_after_adding_non_oembed_url(self):\n \"\"\" remove oembed thumb after changing remoteurl to a\n non-oembedable url\n \"\"\"\n # XXX Fix me\n self.l1.remoteUrl = \"http://plone.com\"\n self.l1.reindexObject()\n self.l1.reindexObject(idxs=['modified'])\n self.assertFalse(IVideoLinkThumb.providedBy(self.l1))\n\n def test_now_has_oembed_after_adding_oembed_url(self):\n \"\"\" add oembed content after changing remoteurl to\n an oembedable url\n \"\"\"\n self.l2.remoteUrl = \"https://www.youtube.com/watch?v=dQw4w9WgXcQ\"\n self.l2.title = \"Rick Rolled\"\n self.l2.reindexObject()\n self.l2.reindexObject(idxs=['modified'])\n #import pdb;pdb.set_trace()\n self.assertTrue(IVideoLinkOembedable.providedBy(self.l2))\n","sub_path":"src/collective.videolink/src/collective/videolink/tests/test_content.py","file_name":"test_content.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"410093161","text":"import sys\n\nf1 = open(sys.argv[1], 'rb')\nrawdata1 = f1.read()\nf1.close()\n\nf2 = open('descomprimido-elmejorprofesor.txt', 'rb')\nrawdata2 = f2.read()\nf2.close()\n\nif rawdata1 == rawdata2:\n print('ok')\nelse:\n print('nok')","sub_path":"PC2_Compresor-master/verificador.py","file_name":"verificador.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"240677993","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\n\nfrom django.views.generic import TemplateView\nfrom django.views.generic.edit import DeleteView\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom django.shortcuts import redirect, render\nfrom django.contrib import messages\nfrom django.urls import reverse_lazy\nimport json\nfrom django.template.loader import render_to_string\n\nfrom .models import ImprovementPlan, ImprovementPlanPriority, ImprovementPlanTask, ImprovementPlanPriorityOutcome, \\\n HowWeKnow, ImprovementPlanTaskAction\n\nfrom base.models import ThemeFeature, BasePlanPriority, FeatureExemplification, BasePlanPriorityHeader, Theme\n\nfrom .forms import ImprovementPlanTaskForm, TaskProgressForm, NextStepsForm, \\\n CreateImprovementPlanForm, NewPlanPriorityForm, AddPriorityOutcomesForm, HowWeKnowSelectorForm, AddOutcomeHWWKForm,\\\n PriorityResponsibilityForm, PriorityNIFForm, NewPlanCustomPriorityForm, AllPlansForm, NewPlanPreviousPriorityForm, \\\n AllEvaluationsForm, NewPlanEvaluationPriorityForm, CustomPriorityThemeForm, SchoolInfoForm, VisionValuesAimsForm, \\\n ConsultationCollaborationForm, ToolsForMeasurementForm, FactorsInfluencingForm, TaskActionForm\n\nfrom evaluations.models import Evaluation\n\nfrom surveys.models import Survey\nimport operator\n\nfrom selfeval.forms import CustomQIForm\n\nfrom common import common_helpers\nfrom plans import helpers\n\nfrom django.core.paginator import Paginator\n\nSTATUSCREATE = 1\nSTATUSPRIORITIES = 2\nSTATUSRESPONSIBILITIES = 3\nSTATUSOUTCOMES = 4\nSTATUSHWWK = 5\nSTATUSNIF = 6\nSTATUSSCHOOLINFO = 7\nSTATUSVISIONSVALUESAIMS = 8\nSTATUSCONSULTATION = 9\nSTATUSTOOLS = 10\nSTATUSFACTORS = 11\nSTATUSCOMPLETE = 12\n\n\n\nclass ImprovementPlansHomeView(LoginRequiredMixin, TemplateView):\n template_name = 'plans/improvement-plan-home.html'\n\n def get(self, request, *args, **kwargs):\n\n current_user = self.request.user\n user_school = current_user.get_school()\n\n improvement_plans = ImprovementPlan.objects.get_school_plans(user_school)\n improvement_plan_list = []\n for plan in improvement_plans:\n sessions_shorthand = helpers.get_plan_sessions_shorthand(plan)\n improvement_plan_list.append({\n 'plan': plan,\n 'sessions_shorthand': sessions_shorthand\n })\n\n context = {\n 'improvement_plan_list': improvement_plan_list\n }\n\n return render(request, self.template_name, context)\n\n\n\nclass ImprovementPlanOverview(LoginRequiredMixin, DetailView):\n\n template_name = 'plans/add-plan-view.html'\n\n model = ImprovementPlan\n pk_url_kwarg = \"plan_pk\"\n\n def get_context_data(self, **kwargs):\n context = super(ImprovementPlanOverview, self).get_context_data(**kwargs)\n context['plan_pk'] = self.kwargs['plan_pk']\n context['plan_sessions_string'] = helpers.get_plan_sessions_string(self.object)\n return context\n\n\nclass CreateImprovementPlan(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-plan.html'\n\n def get(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if plan_pk is not None:\n\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n create_plan_form = CreateImprovementPlanForm(instance=plan)\n base_template_name = 'plan-base-current-plan.html'\n else:\n create_plan_form = CreateImprovementPlanForm()\n base_template_name = 'plan-base.html'\n\n context = {\n 'plan_pk': plan_pk,\n 'create_plan_form': create_plan_form,\n 'base_template_name': base_template_name\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_plan' in request.POST:\n\n create_plan_form = CreateImprovementPlanForm(request.POST)\n\n if create_plan_form.is_valid():\n\n if plan_pk is not None:\n plan_pk = kwargs.get('plan_pk')\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n\n cleaned_created_plan_form = create_plan_form.cleaned_data\n plan.plan_name = cleaned_created_plan_form.get('plan_name')\n plan.plan_description = cleaned_created_plan_form.get('plan_description')\n plan.plan_session = cleaned_created_plan_form.get('plan_session')\n plan.plan_session_length = cleaned_created_plan_form.get('plan_session_length')\n else:\n plan = create_plan_form.save(commit=False)\n plan.created_by = request.user\n\n school = request.user.school\n plan.school = school\n plan.save()\n\n if plan_pk is not None:\n return HttpResponseRedirect(reverse('plans:add-plan-view', args=(plan.pk,)))\n else:\n return HttpResponseRedirect(reverse('plans:view-plan-menu', args=(plan.pk,)))\n\n else:\n\n messages.error(request, create_plan_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n if plan_pk is not None:\n return HttpResponseRedirect(reverse('plans:add-plan-view', args=(plan_pk,)))\n else:\n return HttpResponseRedirect(reverse('plans:improvement-plan-home'))\n\n\n\n\n\nclass DeleteImprovementPlan(LoginRequiredMixin, DeleteView):\n\n model = ImprovementPlan\n\n def get_success_url(self):\n return reverse_lazy('plans:improvement-plan-home')\n\n def post(self, request, *args, **kwargs):\n if 'cancel' in request.POST:\n return HttpResponseRedirect(reverse('plans:view-plan-menu', args=(self.get_object().pk,)))\n else:\n return super(DeleteImprovementPlan, self).post(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(DeleteImprovementPlan, self).get_context_data(**kwargs)\n context['plan_pk'] = self.kwargs['pk']\n return context\n\n\n\nclass ImprovementPlanMenu(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/view-plan-menu.html'\n\n def get(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('pk')\n\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n plan_priorities = ImprovementPlanPriority.objects.get_improvement_plan_priorities(plan)\n\n school_info_status = ImprovementPlan.objects.get_school_info_status(plan)\n vision_values_aims_status = ImprovementPlan.objects.get_vision_values_aims_status(plan)\n consultation_collaboration_status = ImprovementPlan.objects.get_consultation_collaboration_status(plan)\n tools_for_measurement_status = ImprovementPlan.objects.get_tools_for_measurement_status(plan)\n factors_influencing_status = ImprovementPlan.objects.get_factors_influencing_status(plan)\n\n plan_priority_list = []\n for plan_priority in plan_priorities:\n priority_name = helpers.get_priority_name(plan_priority)\n\n priority_status = ImprovementPlanPriority.objects.get_priority_status(plan_priority)\n\n plan_priority_list.append({\n 'priority_pk': plan_priority.pk,\n 'priority_name': priority_name,\n 'priority_status': priority_status\n })\n\n context = {\n 'plan': plan,\n 'plan_priority_list': plan_priority_list,\n 'school_info_status':school_info_status,\n 'vision_values_aims_status': vision_values_aims_status,\n 'consultation_collaboration_status': consultation_collaboration_status,\n 'tools_for_measurement_status': tools_for_measurement_status,\n 'factors_influencing_status': factors_influencing_status\n\n }\n\n return render(request, self.template_name, context)\n\n\n\nclass PlanPriorityOverview(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/plan-priority-overview.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_name = helpers.get_priority_name(priority)\n plan_pk = priority.plan.pk\n priority_tasks = ImprovementPlanTask.objects.get_tasks(priority_pk)\n\n priority_responsibility_status = ImprovementPlanPriority.objects.get_priority_responsibility_status(priority)\n priority_outcome_status = ImprovementPlanPriorityOutcome.objects.get_priority_outcome_status(priority)\n priority_hwwk_status = HowWeKnow.objects.get_priority_hwwk_status(priority)\n priority_nif_status = ImprovementPlanPriority.objects.get_priority_nif_status(priority)\n\n context = {\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'plan_pk': plan_pk,\n 'priority_tasks': priority_tasks,\n 'priority_responsibility_status': priority_responsibility_status,\n 'priority_outcome_status': priority_outcome_status,\n 'priority_hwwk_status': priority_hwwk_status,\n 'priority_nif_status': priority_nif_status\n }\n\n return render(request, self.template_name, context)\n\n\n\nclass PriorityResponsibilityView(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/priority-responsibility-view.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_name = helpers.get_priority_name(priority)\n priority_responsibility = priority.responsibility\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'priority_responsibility': priority_responsibility\n }\n\n return render(request, self.template_name, context)\n\nclass PriorityResponsibility(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/priority-responsibility.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_name = helpers.get_priority_name(priority)\n\n priority_responsibility_form = PriorityResponsibilityForm()\n\n school = request.user.school\n priority_responsibility_form.populate_staff_roles(school)\n\n priority_responsibility_form.set_initial(priority.responsibility)\n\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'priority_responsibility_form': priority_responsibility_form\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority_responsibility_form = PriorityResponsibilityForm(request.POST)\n\n if 'save_responsibility' in request.POST:\n\n if priority_responsibility_form.is_valid():\n cleaned_priority_responsibility_form = priority_responsibility_form.cleaned_data\n responsibility = cleaned_priority_responsibility_form.get('responsibility')\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n\n ImprovementPlanPriority.objects.update_responsibility(priority, responsibility)\n\n return HttpResponseRedirect(reverse('plans:priority-responsibility-view', args=(priority_pk,)))\n\n\n\nclass ImprovementPlanPrioritySummaryView(LoginRequiredMixin, DetailView):\n template_name = 'plans/plan-priority-summary.html'\n\n model = ImprovementPlanPriority\n\n pk_url_kwarg = \"priority_pk\"\n\n\n def get_context_data(self, **kwargs):\n\n context = super().get_context_data(**kwargs)\n\n outcome_list = []\n outcomes = ImprovementPlanPriorityOutcome.objects.get_outcomes(self.object)\n for outcome_num, outcome in enumerate(outcomes):\n\n outcome_hwwk_list = []\n outcome_how_we_knows = HowWeKnow.objects.get_outcome_how_we_know(outcome)\n for hwwk in outcome_how_we_knows:\n outcome_hwwk_list.append({\n 'hwwk_name': helpers.get_hwwk_name(hwwk)\n })\n\n outcome_list.append({\n 'outcome_num': outcome_num + 1,\n 'outcome_name': helpers.get_outcome_name(outcome),\n 'outcome_hwwk_list': outcome_hwwk_list\n })\n\n context['priority_name'] = helpers.get_priority_name(self.object)\n context['plan_pk'] = self.object.plan.pk\n context['priority_qis'] = helpers.get_priority_qi_line(self.object)\n context['priority_outcomes'] = outcome_list #self.get_priority_outcomes()\n context['priority_progress'] = ImprovementPlanTask.objects.calculate_priority_progress(self.object)\n context['priority_next_steps'] = ImprovementPlanPriority.objects.get_priority_next_steps(self.object).splitlines()\n return context\n\n\n\n \"\"\"\n Get a display list of priority outcomes that can be shown by cols/rows in a table\n \"\"\"\n\n def get_priority_outcomes(self):\n\n priority_outcomes = []\n outcome_row = []\n TABLECOLS = 3\n outcome_no = 1\n outcomes = ImprovementPlanPriorityOutcome.objects.get_outcomes(self.object)\n\n for outcome in outcomes:\n outcome_how_we_know = HowWeKnow.objects.get_outcome_how_we_know(outcome)\n display_outcome = str(outcome_no) + '. '\n if outcome.is_custom is False:\n display_outcome += outcome.priority_outcome.exemplification_header\n else:\n display_outcome += outcome.custom_priority_outcome\n\n outcome_row.append({'outcome': display_outcome, 'outcome_how_we_know': outcome_how_we_know})\n outcome_no += 1\n\n # if the number of cols has reached the number in the table, start a new row\n if len(outcome_row) == TABLECOLS:\n priority_outcomes.append(outcome_row)\n outcome_row = []\n\n # if there are some widow cols, pad them out to the correct number for the table row and then append\n if len(outcome_row) > 0:\n\n blank_columns = TABLECOLS - len(outcome_row)\n for blank_col in range(blank_columns):\n outcome_row.append({'outcome': None, 'outcome_how_we_know': None})\n\n priority_outcomes.append(outcome_row)\n\n return priority_outcomes\n\nclass DeletePlanPriority(LoginRequiredMixin, DeleteView):\n\n model = ImprovementPlanPriority\n\n pk_url_kwarg = \"priority_pk\"\n\n def get_success_url(self):\n return reverse_lazy('plans:view-plan-menu', args=(self.get_object().plan.pk,))\n\n def post(self, request, *args, **kwargs):\n if 'cancel' in request.POST:\n return HttpResponseRedirect(reverse('plans:plan-priority-overview', args=(self.get_object().pk,)))\n else:\n return super(DeletePlanPriority, self).post(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(DeletePlanPriority, self).get_context_data(**kwargs)\n priority_pk = self.kwargs['priority_pk']\n plan_priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n context['plan_pk'] = plan_priority.plan.pk\n context['priority_pk'] = priority_pk\n\n return context\n\nclass AddPlanPrioritiesMenu(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-plan-priorities-menu.html'\n\n def get(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('plan_pk')\n\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n plan_priorities = ImprovementPlanPriority.objects.get_improvement_plan_priorities(plan)\n\n plan_priority_list = []\n for priority_num, priority in enumerate(plan_priorities):\n priority_name = helpers.get_priority_name(priority)\n plan_priority_list.append({\n 'priority_num': priority_num + 1,\n 'priority_name': priority_name\n })\n\n context = {\n 'plan_pk': plan_pk,\n 'plan_priority_list': plan_priority_list\n }\n\n return render(request, self.template_name, context)\n\n\nclass AddPlanPriorities(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-plan-priorities.html'\n\n def get(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n improvement_plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n\n new_priority_form_list = []\n all_priority_headers = BasePlanPriorityHeader.objects.all()\n\n for priority_header in all_priority_headers:\n new_priority_form = NewPlanPriorityForm(priority_header)\n new_priority_form.prefix = priority_header.pk\n\n new_priority_form_list.append({\n 'priority_header': priority_header.priority_header,\n 'new_priority_form': new_priority_form\n })\n\n new_priority_form.set_initial(improvement_plan, priority_header)\n\n context = {\n 'plan_pk':plan_pk,\n 'new_priority_form_list': new_priority_form_list,\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_priorities' in request.POST:\n improvement_plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n\n all_priority_headers = BasePlanPriorityHeader.objects.all()\n\n for priority_header in all_priority_headers:\n\n form_key = priority_header.pk\n\n new_priority_form = NewPlanPriorityForm(priority_header, request.POST, prefix=form_key)\n\n if new_priority_form.is_valid():\n cleaned_new_priority_form = new_priority_form.cleaned_data\n\n priority_list = cleaned_new_priority_form.get('priority_field')\n\n ImprovementPlanPriority.objects.delete_old_priorities_by_header(improvement_plan, priority_header, priority_list)\n\n\n ImprovementPlanPriority.objects.create_plan_priority(improvement_plan, priority_list)\n\n else:\n messages.error(request, new_priority_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-menu', args=(plan_pk,)))\n\nclass PlanPrioritiesCustom(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-plan-priorities-custom.html'\n\n def get(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('plan_pk')\n improvement_plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n\n new_custom_priority_form = NewPlanCustomPriorityForm()\n\n new_custom_priority_form.set_initial(improvement_plan)\n\n context = {\n 'plan_pk': plan_pk,\n 'new_custom_priority_form': new_custom_priority_form\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_custom_priorities' in request.POST:\n\n improvement_plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n\n new_custom_priority_form = NewPlanCustomPriorityForm(request.POST)\n\n if new_custom_priority_form.is_valid():\n cleaned_new_custom_priority_form = new_custom_priority_form.cleaned_data\n\n custom_outcomes = cleaned_new_custom_priority_form.get('custom_field')\n\n ImprovementPlanPriority.objects.delete_old_custom_priorities(improvement_plan, custom_outcomes)\n\n ImprovementPlanPriority.objects.save_custom_priorities(improvement_plan, custom_outcomes)\n\n if len(custom_outcomes) > 0:\n return HttpResponseRedirect(reverse('plans:custom-priority-themes', args=(plan_pk,)))\n\n\n else:\n messages.error(request, new_custom_priority_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-menu', args=(plan_pk,)))\n\n\nclass PlanPriorityCustomThemes(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/custom-priority-themes.html'\n\n def get(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('plan_pk')\n\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n custom_plan_priorities = ImprovementPlanPriority.objects.get_custom_plan_priorities(plan)\n\n custom_plan_priority_theme_list = []\n\n for custom_plan_priority_num, custom_plan_priority in enumerate(custom_plan_priorities):\n\n theme = custom_plan_priority.custom_theme\n\n if theme is None:\n theme_pk = None\n else:\n theme_pk = theme.pk\n\n custom_plan_priority_qi_form = CustomQIForm(theme_pk)\n custom_plan_priority_qi_form.prefix = custom_plan_priority.pk\n\n custom_plan_priority_theme_form = CustomPriorityThemeForm(theme_pk)\n custom_plan_priority_theme_form.prefix = custom_plan_priority.pk\n\n custom_plan_priority_theme_list.append({\n 'custom_plan_priority_num': custom_plan_priority_num + 1,\n 'custom_plan_priority': custom_plan_priority,\n 'custom_plan_priority_qi_form': custom_plan_priority_qi_form,\n 'custom_plan_priority_theme_form': custom_plan_priority_theme_form\n })\n\n custom_plan_priority_theme_rows = common_helpers.display_data_in_rows(custom_plan_priority_theme_list, 1)\n\n context = {\n 'custom_plan_priority_theme_rows': custom_plan_priority_theme_rows,\n 'plan_pk': plan_pk\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_custom_priority_themes' in request.POST:\n\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n\n custom_plan_priorities = ImprovementPlanPriority.objects.get_custom_plan_priorities(plan)\n for custom_plan_priority in custom_plan_priorities:\n\n form_key = custom_plan_priority.pk\n\n custom_plan_priority_theme_form = CustomPriorityThemeForm(None, request.POST, prefix=form_key)\n\n if custom_plan_priority_theme_form.is_valid():\n cleaned_custom_plan_priority_theme_form = custom_plan_priority_theme_form.cleaned_data\n\n custom_theme = cleaned_custom_plan_priority_theme_form.get('custom_theme')\n\n ImprovementPlanPriority.objects.update_custom_priority_theme(custom_plan_priority, custom_theme)\n else:\n print(custom_plan_priority_theme_form.errors)\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-menu', args=(plan_pk,)))\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-custom', args=(plan_pk,)))\n\n\ndef load_custom_priority_themes(request, *args, **kwargs):\n\n # either #id_623-qi or #id_623-custom_theme depending on which dropdown type was clicked\n # the number refers to the priority pk set as a prefix\n element_id = request.GET.get('element_id')\n\n # the qi id from the dropdown\n selection = request.GET.get('selection')\n\n # check if a qi dropdown and not a theme dropdown has been selectd\n if 'qi' in element_id:\n themes = Theme.objects.get_qi_themes(selection)\n\n theme_dropdown_html = render_to_string('base/theme_dropdown_list_options.html', {'themes': themes})\n\n # if it's a qi, the element id needs to be replaced to the theme id so that the theme dropdown can be updated\n prefix_element_id = element_id.replace('-qi', '')\n data = json.dumps({\n 'theme_dropdown_id': '#' + prefix_element_id + '-custom_theme',\n 'theme_dropdown_html': theme_dropdown_html,\n })\n else:\n data = json.dumps({\n 'theme_dropdown_id': '',\n 'theme_dropdown_html': '',\n })\n\n return HttpResponse(data, content_type='application/json')\n\n\n\nclass PlanPrioritiesPrevious(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-plan-priorities-previous.html'\n\n def get(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('plan_pk')\n previous_pk = kwargs.get('previous_pk')\n\n school = request.user.school\n\n all_plans_form = AllPlansForm(school, plan_pk)\n\n if previous_pk is not None:\n previous_plan = ImprovementPlan.objects.get_improvement_plan(previous_pk)\n all_plans_form.set_initial(previous_plan)\n\n previous_plan_name = previous_plan.plan_name\n previous_plan_priority_form = NewPlanPreviousPriorityForm(previous_plan)\n\n previous_plan_priority_form.set_initial(plan_pk)\n else:\n previous_plan_priority_form = None\n previous_plan_name = None\n\n\n context = {\n 'plan_pk': plan_pk,\n 'all_plans_form': all_plans_form,\n 'previous_plan_priority_form': previous_plan_priority_form,\n 'previous_plan_name': previous_plan_name\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n previous_pk = kwargs.get('previous_pk')\n\n school = request.user.school\n\n if 'select_plan' in request.POST:\n all_plans_form = AllPlansForm(school, plan_pk, request.POST)\n\n if all_plans_form.is_valid():\n cleaned_all_plans_form = all_plans_form.cleaned_data\n plan_choice = cleaned_all_plans_form.get('plan_choice_field')\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-previous', args=(plan_pk, plan_choice.pk)))\n\n else:\n messages.error(request, all_plans_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n elif 'save_previous_plan_priorities' in request.POST:\n\n if previous_pk is not None:\n improvement_plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n previous_plan_priority_form = NewPlanPreviousPriorityForm(previous_pk, request.POST)\n\n if previous_plan_priority_form.is_valid():\n cleaned_previous_plan_priority_form = previous_plan_priority_form.cleaned_data\n\n priority_list = cleaned_previous_plan_priority_form.get('priority_field')\n\n ImprovementPlanPriority.objects.delete_priorities_previous_plan(improvement_plan, previous_pk, priority_list)\n\n ImprovementPlanPriority.objects.create_plan_priority(improvement_plan, priority_list)\n\n else:\n messages.error(request, previous_plan_priority_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-menu', args=(plan_pk,)))\n\n else:\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n else:\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-menu', args=(plan_pk,)))\n\n\n\nclass PlanPrioritiesEvaluation(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-plan-priorities-evaluation.html'\n\n def get(self, request, *args, **kwargs):\n\n plan_pk = kwargs.get('plan_pk')\n eval_pk = kwargs.get('eval_pk')\n\n\n school = request.user.school\n\n all_evaluations_form = AllEvaluationsForm(school, False)\n\n if eval_pk is not None:\n evaluation = Evaluation.objects.get_evaluation(eval_pk)\n\n # get a dict of all priority keys from surveys in the evaluation along with a count\n priority_keys = Survey.objects.get_evaluation_survey_priority_count(evaluation)\n sorted_keys = self.get_sorted_keys(priority_keys)\n\n evaluation_plan_priority_form = NewPlanEvaluationPriorityForm(sorted_keys)\n\n sorted_votes = self.get_sorted_votes(priority_keys, sorted_keys, evaluation)\n\n evaluation_plan_priority_form.set_initial(plan_pk)\n\n all_evaluations_form.set_initial(evaluation)\n\n evaluation_name = evaluation.evaluation_name\n\n else:\n evaluation_plan_priority_form = None\n sorted_votes = None\n evaluation_name = None\n\n\n\n context = {\n 'plan_pk': plan_pk,\n 'all_evaluations_form': all_evaluations_form,\n 'evaluation_plan_priority_form': evaluation_plan_priority_form,\n 'sorted_votes': sorted_votes,\n 'evaluation_name': evaluation_name\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n eval_pk = kwargs.get('eval_pk')\n\n school = request.user.school\n\n if 'select_evaluation' in request.POST:\n all_evaluations_form = AllEvaluationsForm(school, False, request.POST)\n\n if all_evaluations_form.is_valid():\n cleaned_all_evaluations_form = all_evaluations_form.cleaned_data\n evaluation_choice = cleaned_all_evaluations_form.get('evaluation_choice_field')\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-evaluation', args=(plan_pk, evaluation_choice.pk)))\n\n else:\n messages.error(request, all_evaluations_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n elif 'save_evaluation_priorities' in request.POST:\n\n if eval_pk is not None:\n evaluation = Evaluation.objects.get_evaluation(eval_pk)\n priority_keys = Survey.objects.get_evaluation_survey_priority_count(evaluation)\n sorted_keys = self.get_sorted_keys(priority_keys)\n\n evaluation_plan_priority_form = NewPlanEvaluationPriorityForm(sorted_keys, request.POST)\n\n if evaluation_plan_priority_form.is_valid():\n improvement_plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n cleaned_evaluation_plan_priority_form = evaluation_plan_priority_form.cleaned_data\n\n priority_list = cleaned_evaluation_plan_priority_form.get('priority_field')\n\n ImprovementPlanPriority.objects.delete_priorities_evaluation_plan(improvement_plan, sorted_keys, priority_list)\n\n ImprovementPlanPriority.objects.create_plan_priority(improvement_plan, priority_list)\n\n else:\n messages.error(request, evaluation_plan_priority_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-menu', args=(plan_pk,)))\n\n else:\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n else:\n return HttpResponseRedirect(reverse('plans:add-plan-priorities-menu', args=(plan_pk,)))\n\n def get_sorted_keys(self, priority_keys):\n\n # sort the keys, and then convert them into ints\n sorted_keys = sorted(priority_keys, key=operator.itemgetter(1))\n sorted_keys = list(map(int, sorted_keys))\n\n return sorted_keys\n\n def get_sorted_votes(self, priority_keys, sorted_keys, evaluation):\n surveys = Survey.objects.count_evaluation_surveys(evaluation)\n\n sorted_votes = []\n for sorted_key in sorted_keys:\n vote = priority_keys[str(sorted_key)]\n percentage= int(round(vote / surveys * 100, 0))\n priority_name = BasePlanPriority.objects.get_base_plan_priority(sorted_key).plan_priority\n sorted_votes.append({'priority_name': priority_name, 'vote': vote, 'percentage': percentage})\n\n return sorted_votes\n\n\nclass PriorityNIFView(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/priority-nif-view.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_name = helpers.get_priority_name(priority)\n nif_priority = priority.nif_priority\n nif_driver = priority.nif_driver\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'nif_priority': nif_priority,\n 'nif_driver': nif_driver\n }\n\n return render(request, self.template_name, context)\n\nclass PriorityNIF(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/priority-nif.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_name = helpers.get_priority_name(priority)\n\n priority_nif_form = PriorityNIFForm()\n priority_nif_form.set_initial(priority)\n\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'priority_nif_form': priority_nif_form\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority_nif_form = PriorityNIFForm(request.POST)\n\n if 'save_nif' in request.POST:\n\n if priority_nif_form.is_valid():\n cleaned_priority_nif_form = priority_nif_form.cleaned_data\n nif_priority = cleaned_priority_nif_form.get('nif_priority')\n nif_driver = cleaned_priority_nif_form.get('nif_driver')\n\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n ImprovementPlanPriority.objects.update_nif(priority, nif_priority, nif_driver)\n\n return HttpResponseRedirect(reverse('plans:priority-nif-view', args=(priority_pk,)))\n\n\nclass PriorityOutcomeView(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/priority-outcome-view.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n plan_priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_name = helpers.get_priority_name(plan_priority)\n outcomes = ImprovementPlanPriorityOutcome.objects.get_outcomes(plan_priority)\n\n outcome_list = []\n for outcome in outcomes:\n outcome_name = helpers.get_outcome_name(outcome)\n outcome_list.append({'outcome_name': outcome_name})\n\n context = {\n 'plan_pk': plan_priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'outcome_list': outcome_list\n }\n\n return render(request, self.template_name, context)\n\n\nclass AddPriorityOutcomes(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-priority-outcomes.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n school = request.user.school\n add_outcomes_form = AddPriorityOutcomesForm(priority_pk, school)\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority': priority,\n 'add_outcomes_form': add_outcomes_form\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n\n if 'save_outcomes' in request.POST:\n school = request.user.school\n add_outcomes_form = AddPriorityOutcomesForm(priority_pk, school, request.POST)\n\n if add_outcomes_form.is_valid():\n cleaned_add_outcomes_form = add_outcomes_form.cleaned_data\n\n outcomes = cleaned_add_outcomes_form.get('outcome_field')\n ImprovementPlanPriorityOutcome.objects.add_priority_outcomes(priority_pk, outcomes)\n\n custom_outcomes = cleaned_add_outcomes_form.get('custom_field')\n ImprovementPlanPriorityOutcome.objects.save_custom_priority_outcomes(priority_pk, custom_outcomes)\n\n else:\n messages.error(request, add_outcomes_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n return HttpResponseRedirect(reverse('plans:priority-outcome-view', args=(priority_pk,)))\n\n\nclass AddHowWeKnowsPriority(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-how-we-knows-priority.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n plan_priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n\n outcomes = ImprovementPlanPriorityOutcome.objects.get_outcomes(plan_priority)\n\n outcomes_with_hwwks = []\n\n for outcome_num, outcome in enumerate(outcomes):\n hwwks = HowWeKnow.objects.get_outcome_how_we_know(outcome)\n hwwk_names = []\n for hwwk in hwwks:\n hwwk_name = helpers.get_hwwk_name(hwwk)\n hwwk_names.append(hwwk_name)\n outcome_name = helpers.get_outcome_name(outcome)\n\n outcomes_with_hwwks.append({\n 'outcome_pk': outcome.pk,\n 'outcome_num': outcome_num + 1,\n 'outcome_name': outcome_name,\n 'hwwk_names': hwwk_names\n })\n\n outcome_hwwk_rows = common_helpers.display_data_in_rows(outcomes_with_hwwks, 2)\n\n priority_name = helpers.get_priority_name(plan_priority)\n\n context = {\n 'plan_pk': plan_priority.plan.pk,\n 'priority_pk': priority_pk,\n 'outcome_hwwk_rows': outcome_hwwk_rows,\n 'priority_name': priority_name\n }\n\n return render(request, self.template_name, context)\n\n\nclass AddOutcomeHowWeKnows(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-outcome-how-we-knows.html'\n\n def get(self, request, *args, **kwargs):\n\n outcome_pk = kwargs.get('outcome_pk')\n\n add_outcome_hwwk_form = AddOutcomeHWWKForm(outcome_pk)\n outcome = ImprovementPlanPriorityOutcome.objects.get_outcome(outcome_pk)\n\n context = {\n 'plan_pk': outcome.plan_priority.plan.pk,\n 'priority_pk': outcome.plan_priority.pk,\n 'outcome': outcome,\n 'add_outcome_hwwk_form': add_outcome_hwwk_form\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n outcome_pk = kwargs.get('outcome_pk')\n outcome = ImprovementPlanPriorityOutcome.objects.get_outcome(outcome_pk)\n\n if 'save_hwwks' in request.POST:\n\n\n add_outcome_hwwk_form = AddOutcomeHWWKForm(outcome_pk, request.POST)\n\n if add_outcome_hwwk_form.is_valid():\n cleaned_add_outcome_hwwk_form = add_outcome_hwwk_form.cleaned_data\n\n hwwks = cleaned_add_outcome_hwwk_form.get('hwwk_field')\n HowWeKnow.objects.add_outcome_hwwks(outcome_pk, hwwks)\n\n custom_hwwks = cleaned_add_outcome_hwwk_form.get('custom_field')\n HowWeKnow.objects.save_custom_outcome_hwwks(outcome, custom_hwwks)\n\n else:\n messages.error(request, add_outcome_hwwk_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n priority_pk = outcome.plan_priority.pk\n return HttpResponseRedirect(reverse('plans:add-how-we-knows-priority', args=(priority_pk,)))\n\n\nclass PlanSchoolInfoView(LoginRequiredMixin, DetailView):\n\n template_name = 'plans/plan-school-info-view.html'\n\n model = ImprovementPlan\n pk_url_kwarg = \"plan_pk\"\n\n def get_context_data(self, **kwargs):\n context = super(PlanSchoolInfoView, self).get_context_data(**kwargs)\n context['plan_pk'] = self.kwargs['plan_pk']\n return context\n\n\nclass ImprovementPlanSchoolInfo(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/plan-school-info.html'\n\n def get(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n plan_name = plan.plan_name\n\n school_info_form = SchoolInfoForm(plan_pk)\n school_name = request.user.school.school_name\n\n context = {\n 'school_info_form': school_info_form,\n 'school_name': school_name,\n 'plan_pk': plan_pk,\n 'plan_name': plan_name,\n }\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_edited_school_info' in request.POST:\n school_info_form = SchoolInfoForm(plan_pk, request.POST)\n if school_info_form.is_valid():\n cleaned_school_info_form = school_info_form.cleaned_data\n school_information = cleaned_school_info_form.get('school_information')\n ImprovementPlan.objects.update_school_information(plan_pk, school_information)\n\n\n return HttpResponseRedirect(reverse('plans:plan-school-info-view', args=(plan_pk,)))\n\n\nclass PlanVisionValuesAims(LoginRequiredMixin, DetailView):\n\n template_name = 'plans/plan-vision-values-aims-view.html'\n\n model = ImprovementPlan\n pk_url_kwarg = \"plan_pk\"\n\n def get_context_data(self, **kwargs):\n context = super(PlanVisionValuesAims, self).get_context_data(**kwargs)\n context['plan_pk'] = self.kwargs['plan_pk']\n context['aims_lines'] = self.object.aims.splitlines()\n return context\n\n\nclass ImprovementPlanVisionValuesAims(LoginRequiredMixin, TemplateView):\n template_name = 'plans/plan-vision-values-aims.html'\n\n def get(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n plan_name = plan.plan_name\n\n vision_values_aims_form = VisionValuesAimsForm(plan_pk)\n school_name = request.user.school.school_name\n\n context = {\n 'vision_values_aims_form': vision_values_aims_form,\n 'school_name': school_name,\n 'plan_pk': plan_pk,\n 'plan_name': plan_name\n }\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_edited_vision_values_aims' in request.POST:\n vision_values_aims_form = VisionValuesAimsForm(plan_pk, request.POST)\n if vision_values_aims_form.is_valid():\n cleaned_vision_values_aims_form = vision_values_aims_form.cleaned_data\n vision_values = cleaned_vision_values_aims_form.get('vision_values')\n aims = cleaned_vision_values_aims_form.get('aims')\n ImprovementPlan.objects.update_vision_values_aims(plan_pk, vision_values, aims)\n\n\n return HttpResponseRedirect(reverse('plans:plan-vision-values-aims-view', args=(plan_pk,)))\n\nclass PlanConsultationCollaborationView(LoginRequiredMixin, DetailView):\n\n template_name = 'plans/plan-consultation-collaboration-view.html'\n\n model = ImprovementPlan\n pk_url_kwarg = \"plan_pk\"\n\n def get_context_data(self, **kwargs):\n context = super(PlanConsultationCollaborationView, self).get_context_data(**kwargs)\n context['plan_pk'] = self.kwargs['plan_pk']\n context['consultation_lines'] = self.object.consultation_collaboration.splitlines()\n return context\n\nclass ImprovementPlanConsultationCollaboration(LoginRequiredMixin, TemplateView):\n template_name = 'plans/plan-consultation-collaboration.html'\n\n def get(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n plan_name = plan.plan_name\n\n consultation_collaboration_form = ConsultationCollaborationForm(plan_pk)\n\n context = {\n 'consultation_collaboration_form': consultation_collaboration_form,\n 'plan_pk': plan_pk,\n 'plan_name': plan_name\n }\n return render(request, self.template_name, context)\n\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_edited_consultation_collaboration' in request.POST:\n\n consultation_collaboration_form = ConsultationCollaborationForm(plan_pk, request.POST)\n\n if consultation_collaboration_form.is_valid():\n cleaned_consultation_collaboration_form = consultation_collaboration_form.cleaned_data\n consultation_collaboration = cleaned_consultation_collaboration_form.get('consultation_collaboration')\n ImprovementPlan.objects.update_consultation_collaboration(plan_pk, consultation_collaboration)\n\n return HttpResponseRedirect(reverse('plans:plan-consultation-collaboration-view', args=(plan_pk,)))\n\n\nclass PlanToolsForMeasurementView(LoginRequiredMixin, DetailView):\n\n template_name = 'plans/plan-tools-for-measurement-view.html'\n\n model = ImprovementPlan\n pk_url_kwarg = \"plan_pk\"\n\n def get_context_data(self, **kwargs):\n context = super(PlanToolsForMeasurementView, self).get_context_data(**kwargs)\n context['plan_pk'] = self.kwargs['plan_pk']\n context['tools'] = self.object.tools_for_measurement.splitlines()\n return context\n\nclass ImprovementPlanToolsForMeasurement(LoginRequiredMixin, TemplateView):\n template_name = 'plans/plan-tools-for-measurement.html'\n\n def get(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n plan_name = plan.plan_name\n\n tools_for_measurement_form = ToolsForMeasurementForm(plan_pk)\n\n context = {\n 'tools_for_measurement_form': tools_for_measurement_form,\n 'plan_pk': plan_pk,\n 'plan_name': plan_name\n }\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_edited_tools_for_measurement' in request.POST:\n\n tools_for_measurement_form = ToolsForMeasurementForm(plan_pk, request.POST)\n\n if tools_for_measurement_form.is_valid():\n cleaned_tools_for_measurement_form = tools_for_measurement_form.cleaned_data\n tools_for_measurement = cleaned_tools_for_measurement_form.get('tools_for_measurement')\n ImprovementPlan.objects.update_tools_for_measurement(plan_pk, tools_for_measurement)\n\n\n return HttpResponseRedirect(reverse('plans:plan-tools-for-measurement-view', args=(plan_pk,)))\n\n\nclass PlanFactorsInfluencingView(LoginRequiredMixin, DetailView):\n template_name = 'plans/plan-factors-influencing-view.html'\n\n model = ImprovementPlan\n pk_url_kwarg = \"plan_pk\"\n\n def get_context_data(self, **kwargs):\n context = super(PlanFactorsInfluencingView, self).get_context_data(**kwargs)\n context['plan_pk'] = self.kwargs['plan_pk']\n context['school_factors'] = self.object.factors_influencing_school.splitlines()\n context['authority_factors'] = self.object.factors_influencing_authority.splitlines()\n context['national_factors'] = self.object.factors_influencing_national.splitlines()\n return context\n\n\nclass ImprovementPlanFactorsInfluencing(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/plan-factors-influencing.html'\n\n def get(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n plan = ImprovementPlan.objects.get_improvement_plan(plan_pk)\n plan_name = plan.plan_name\n\n factors_influencing_form = FactorsInfluencingForm(plan_pk)\n\n context = {\n 'factors_influencing_form': factors_influencing_form,\n 'plan_pk': plan_pk,\n 'plan_name': plan_name\n }\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_pk = kwargs.get('plan_pk')\n\n if 'save_edited_factors_influencing' in request.POST:\n\n factors_influencing_form = FactorsInfluencingForm(plan_pk, request.POST)\n if factors_influencing_form.is_valid():\n cleaned_factors_influencing_form = factors_influencing_form.cleaned_data\n factors_influencing_school = cleaned_factors_influencing_form.get('factors_influencing_school')\n factors_influencing_authority = cleaned_factors_influencing_form.get('factors_influencing_authority')\n factors_influencing_national = cleaned_factors_influencing_form.get('factors_influencing_national')\n\n ImprovementPlan.objects.update_factors_influencing_plan(\n plan_pk, factors_influencing_school, factors_influencing_authority, factors_influencing_national)\n\n\n return HttpResponseRedirect(reverse('plans:plan-factors-influencing-view', args=(plan_pk,)))\n\n\n\nclass ImprovementPlanPriorityTaskListView(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/plan-priority-task-list.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('pk')\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_tasks = ImprovementPlanTask.objects.get_tasks(priority_pk)\n\n\n\n priority_task_list = []\n for task_num, task in enumerate(priority_tasks):\n\n if task.resources is not None:\n line_split_resources = task.resources.splitlines()\n else:\n line_split_resources = ['None']\n\n task_actions = ImprovementPlanTaskAction.objects.get_task_actions(task)\n\n priority_task_list.append({\n 'plan_pk': priority.plan.pk,\n 'task_pk': task.pk,\n 'task_num': task_num + 1,\n 'task_name': task.task_name,\n 'by_whom': task.by_whom,\n 'time_number': task.task_time_number,\n 'time_units': task.task_time_units,\n 'time_duration': task.task_time_duration,\n 'by_when': task.by_when,\n 'progress': task.progress,\n 'resources': line_split_resources,\n 'task_actions': task_actions\n\n })\n\n priority_name = helpers.get_priority_name(priority)\n\n priority_task_rows = common_helpers.display_data_in_rows(priority_task_list, 3)\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'priority_task_rows':priority_task_rows,\n }\n return render(request, self.template_name, context)\n\n\nclass CreateImprovementPlanTask(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/add-plan-task.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n task_pk = kwargs.get('task_pk')\n\n new_task_form = ImprovementPlanTaskForm()\n\n #edit task\n if priority_pk is None:\n task = ImprovementPlanTask.objects.get_task(task_pk)\n new_task_form.set_initial_edit(task)\n priority_pk = task.plan_priority.pk\n status = 'edit'\n # create_task\n else:\n new_task_form.set_initial_add()\n status = 'create'\n\n plan_priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n\n\n\n plan_name = plan_priority.plan.plan_name\n priority_name = helpers.get_priority_name(plan_priority)\n\n context = {\n 'plan_pk': plan_priority.plan.pk,\n 'priority_pk': priority_pk,\n 'plan_name': plan_name,\n 'priority_name': priority_name,\n 'new_task_form':new_task_form,\n 'status': status\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n plan_priority_pk = kwargs.get('priority_pk')\n priority_task_pk = kwargs.get('task_pk')\n\n new_task_form = ImprovementPlanTaskForm(request.POST)\n\n if 'save_task' in request.POST:\n if new_task_form.is_valid():\n cleaned_new_task_form = new_task_form.cleaned_data\n\n\n\n task_name = cleaned_new_task_form.get('task_name')\n by_whom = cleaned_new_task_form.get('by_whom')\n resources = cleaned_new_task_form.get('resources')\n task_time_number = cleaned_new_task_form.get('task_time_number')\n task_time_units = cleaned_new_task_form.get('task_time_units')\n task_time_duration = cleaned_new_task_form.get('task_time_duration')\n by_when = cleaned_new_task_form.get('by_when')\n progress = cleaned_new_task_form.get('progress')\n actions = cleaned_new_task_form.get('actions')\n\n if priority_task_pk is None:\n improvement_plan_priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(plan_priority_pk)\n ImprovementPlanTask.objects.add_plan_task(\n improvement_plan_priority,\n task_name,\n by_whom,\n resources,\n task_time_number,\n task_time_units,\n task_time_duration,\n by_when,\n progress,\n actions\n )\n\n\n return HttpResponseRedirect(reverse('plans:view-plan-priority-task-list', args=(plan_priority_pk,)))\n\n else:\n current_task = ImprovementPlanTask.objects.get_task(priority_task_pk)\n ImprovementPlanTask.objects.edit_plan_task(\n current_task,\n task_name,\n by_whom,\n resources,\n task_time_number,\n task_time_units,\n task_time_duration,\n by_when,\n progress,\n actions\n )\n\n return HttpResponseRedirect(reverse('plans:view-plan-priority-task-list', args=(current_task.plan_priority.pk,)))\n\n else:\n messages.error(request, new_task_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n elif 'delete_task' in request.POST:\n return HttpResponseRedirect(reverse('plans:delete-plan-task', args=(priority_task_pk,)))\n\n else:\n # cancel from add task\n if priority_task_pk is None:\n return HttpResponseRedirect(reverse('plans:view-plan-priority-task-list', args=(plan_priority_pk,)))\n\n # return from edit task\n else:\n current_task = ImprovementPlanTask.objects.get_task(priority_task_pk)\n return HttpResponseRedirect(reverse('plans:view-plan-priority-task-list', args=(current_task.plan_priority.pk,)))\n\n\n\nclass DeleteImprovementPlanTask(LoginRequiredMixin, DeleteView):\n model = ImprovementPlanTask\n\n def get_success_url(self):\n priority = self.object.plan_priority\n return reverse_lazy('plans:view-plan-priority-task-list', args=(priority.id,))\n\n def post(self, request, *args, **kwargs):\n if 'cancel' in request.POST:\n return HttpResponseRedirect(reverse('plans:add-plan-task-edit', args=(self.get_object().pk,)))\n else:\n return super(DeleteImprovementPlanTask, self).post(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(DeleteImprovementPlanTask, self).get_context_data(**kwargs)\n task_pk = self.kwargs['pk']\n task = ImprovementPlanTask.objects.get_task(task_pk)\n context['plan_pk'] = task.plan_priority.plan.pk\n context['priority_pk'] = task.plan_priority.pk\n\n return context\n\n\nclass UpdatePriorityTaskProgress(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/update-priority-task-progress.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('pk')\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n priority_tasks = ImprovementPlanTask.objects.get_tasks(priority_pk)\n\n task_progress_list = []\n\n for task_num, task in enumerate(priority_tasks):\n task_progress_form = TaskProgressForm()\n\n task_progress_form.prefix = str(task.pk)\n task_progress_form.set_initial(task.progress)\n\n task_action_list = []\n task_actions = ImprovementPlanTaskAction.objects.get_task_actions(task)\n for action in task_actions:\n task_action_form = TaskActionForm()\n task_action_form.set_initial(action.is_complete)\n task_action_form.prefix = str(action.pk)\n task_action_list.append({'action_check': task_action_form, 'action_name': action.action})\n\n task_progress_list.append({\n 'task_num': task_num + 1,\n 'task_name': task.task_name,\n 'task_progress_form': task_progress_form,\n 'task_action_list': task_action_list\n })\n\n priority_name = helpers.get_priority_name(priority)\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'priority_name': priority_name,\n 'task_progress_list':task_progress_list,\n }\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n\n priority_pk = kwargs.get('pk')\n\n if 'save_task_progress' in request.POST:\n\n priority_tasks = ImprovementPlanTask.objects.get_tasks(priority_pk)\n\n for task in priority_tasks:\n form_key = str(task.pk)\n task_progress_form = TaskProgressForm(request.POST, prefix=form_key)\n\n if task_progress_form.is_valid():\n cleaned_task_progress_form = task_progress_form.cleaned_data\n\n task_progress = cleaned_task_progress_form.get('progress')\n\n ImprovementPlanTask.objects.update_progress(task, task_progress)\n\n actions = ImprovementPlanTaskAction.objects.get_task_actions(task)\n for action in actions:\n task_action_form_key = str(action.pk)\n task_action_form = TaskActionForm(request.POST, prefix=task_action_form_key)\n\n if task_action_form.is_valid():\n cleaned_task_action_form = task_action_form.cleaned_data\n is_complete = cleaned_task_action_form.get('is_complete')\n ImprovementPlanTaskAction.objects.update_action(action, is_complete)\n\n else:\n messages.error(request, task_progress_form.non_field_errors())\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'), '/')\n\n return HttpResponseRedirect(reverse('plans:view-plan-priority-task-list', args=(priority_pk,)))\n\n\nclass ImprovementPlanNextSteps(LoginRequiredMixin, TemplateView):\n\n template_name = 'plans/priority-next-steps.html'\n\n def get(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n\n next_steps_form = NextStepsForm(priority_pk)\n\n priority_name = helpers.get_priority_name(priority)\n\n context = {\n 'plan_pk': priority.plan.pk,\n 'priority_pk': priority_pk,\n 'next_steps_form': next_steps_form,\n 'priority_name': priority_name\n }\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n priority_pk = kwargs.get('priority_pk')\n priority = ImprovementPlanPriority.objects.get_improvement_plan_priority(priority_pk)\n\n if 'save_next_steps' in request.POST:\n\n\n next_steps_form = NextStepsForm(priority_pk, request.POST)\n\n if next_steps_form.is_valid():\n cleaned_next_steps_form = next_steps_form.cleaned_data\n next_steps = cleaned_next_steps_form.get('next_steps')\n\n ImprovementPlanPriority.objects.update_next_steps(priority, next_steps)\n\n return HttpResponseRedirect(reverse('plans:plan-priority-overview', args=(priority.pk,)))\n\n","sub_path":"plans/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":62249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"569200991","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /users/payno/.local/share/virtualenvs/tomwer_venc/lib/python3.7/site-packages/orangecontrib/tomwer/widgets/reconstruction/DarkRefAndCopyOW.py\n# Compiled at: 2020-03-06 02:01:31\n# Size of source mod 2**32: 5238 bytes\n__authors__ = [\n 'C. Nemoz', 'H. Payno']\n__license__ = 'MIT'\n__date__ = '10/01/2018'\nimport logging\nfrom Orange.widgets import widget, gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.widget import Input, Output\nfrom orangecontrib.tomwer.orange.settings import CallbackSettingsHandler\nfrom tomwer.core.process.reconstruction.darkref.darkrefs import DarkRefs, logger as DRLogger\nfrom tomwer.core.scan.scanbase import TomoBase\nfrom tomwer.gui.reconstruction.darkref.darkrefcopywidget import DarkRefAndCopyWidget\nfrom tomwer.synctools.ftseries import QReconsParams\nfrom tomwer.web.client import OWClient\nlogger = logging.getLogger(__name__)\n\nclass DarkRefAndCopyOW(widget.OWWidget, OWClient):\n __doc__ = '\\n A simple widget managing the copy of an incoming folder to an other one\\n\\n :param parent: the parent widget\\n '\n name = 'dark and flat field construction'\n id = 'orange.widgets.tomwer.darkrefs'\n description = 'This widget will generate dark refs for a received scan '\n icon = 'icons/darkref.svg'\n priority = 25\n category = 'esrfWidgets'\n keywords = ['tomography', 'dark', 'darks', 'ref', 'refs']\n want_main_area = True\n resizing_enabled = True\n compress_signal = False\n settingsHandler = CallbackSettingsHandler()\n _rpSetting = Setting(dict())\n assert len(DarkRefs.inputs) == 1\n\n class Inputs:\n data_in = Input(name=(DarkRefs.inputs[0].name), type=(DarkRefs.inputs[0].type),\n doc=(DarkRefs.inputs[0].doc))\n\n assert len(DarkRefs.outputs) == 1\n\n class Outputs:\n data_out = Output(name=(DarkRefs.outputs[0].name), type=(DarkRefs.outputs[0].type),\n doc=(DarkRefs.outputs[0].doc))\n\n def __init__(self, parent=None, _connect_handler=True, reconsparams=None):\n \"\"\"\n\n :param bool _connect_handler: True if we want to store the modifications\n on the setting. Need for unit test since\n keep alive qt widgets.\n :param QReconsParams reconsparams: reconstruction parameters\n \"\"\"\n widget.OWWidget.__init__(self, parent)\n OWClient.__init__(self, (logger, DRLogger))\n reconsparams = reconsparams or QReconsParams()\n if self._rpSetting != dict():\n try:\n reconsparams.dkrf.load_from_dict(self._rpSetting)\n except:\n logger.warning('fail to load reconstruction settings')\n\n self.widget = DarkRefAndCopyWidget(parent=self, reconsparams=reconsparams)\n self._layout = gui.vBox(self.mainArea, self.name).layout()\n self._layout.addWidget(self.widget)\n self.setForceSync = self.widget.setForceSync\n self.hasRefStored = self.widget.hasRefStored\n self.setModeAuto = self.widget.setModeAuto\n self.setRefsFromScan = self.widget.setRefsFromScan\n self.widget.sigScanReady.connect(self.signalReady)\n if _connect_handler:\n self.settingsHandler.addCallback(self._updateSettingsVals)\n\n @Inputs.data_in\n def process(self, scanID):\n assert isinstance(scanID, TomoBase)\n return self.widget.process(scanID)\n\n def signalReady(self, scanID):\n assert isinstance(scanID, TomoBase)\n self.Outputs.data_out.send(scanID)\n\n def _updateSettingsVals(self):\n self._rpSetting = self.widget.recons_params.to_dict()\n\n @property\n def recons_params(self):\n return self.widget.recons_params\n\n def close(self):\n logger.info('close Dark refs')\n self.widget.close()\n super(DarkRefAndCopyOW, self).close()","sub_path":"pycfiles/tomwer-0.4.0.linux-x86_64.tar/DarkRefAndCopyOW.cpython-37.py","file_name":"DarkRefAndCopyOW.cpython-37.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"582318216","text":"class Solution(object):\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n dict=set()\n for i in xrange(9):\n for j in xrange(9):\n if board[i][j].isdigit():\n tmp=board[i][j]\n if (i, tmp) in dict or (tmp, j) in dict or (i/3, j/3, tmp) in dict:\n return False\n dict.add((i, tmp))\n dict.add((tmp, j))\n dict.add((i/3, j/3, tmp))\n return True \n ","sub_path":"36-Valid-Sudoku/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48572488","text":"\"\"\"\nTugas Seleksi 1 Lab Basdat\nNIM/Nama: 13517021/Abda Shaffan Diva\nNama File: scrape.py\n\"\"\"\n\n# Modul utama untuk scraping\nfrom bs4 import BeautifulSoup\n\n# Modul buat convert string jadi angka\nfrom word2number import w2n\n\n# Modul untuk get request ke URL\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\n# Modul buat ngolah data JSON\nimport json\nfrom pandas.io.json import json_normalize\n\nimport time\n\nimport os\nfrom pathlib import Path\n\n\ndef requests_retry_session(\n # Fungsi buat handle request dan error handling-nya\n retries=3,\n backoff_factor=0.3,\n status_forcelist=(500, 502, 504),\n session=None,\n):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n return session\n\n\npage_num = 1\nbook_list = []\njson_filename = \"books.json\"\ncsv_filename = \"books_normalized.csv\"\nroot = Path(__file__).parent.parent\nbase_data_path = (root / \"data/\").resolve()\njson_path = (base_data_path / json_filename).resolve()\n\nwhile page_num <= 50:\n t0 = time.time()\n try:\n # Ambil semua isi HTML dari books.toscrape.com\n url = \"http://books.toscrape.com/catalogue/page-{}.html\".format(page_num)\n response = requests_retry_session().get(url)\n\n except Exception as e:\n # Gagal request\n print(\"{} Connection failed: {}\".format(url, e.__class__.__name__))\n\n else:\n print(\"{} connected. Status: {}\".format(url, response.status_code))\n soup = BeautifulSoup(response.text, \"lxml\")\n # Mencari data-data yang dibutuhkan dari container div \"product_pod\"\n books = soup.find_all(\"article\", class_=\"product_pod\")\n for book in books:\n # Dictionary untuk menyimpan data-data setiap buku\n book_dict = {}\n\n # Ambil Judul buku\n book_title = book.img[\"alt\"]\n book_dict[\"title\"] = book_title\n\n # Ambil rating buku\n rating_classes = book.p[\"class\"]\n book_rating = rating_classes[1]\n book_dict[\"rating\"] = w2n.word_to_num(book_rating)\n\n # Ambil link detail buku\n book_link = \"http://books.toscrape.com/catalogue/\"\n book_link += book.find(\"div\", class_=\"image_container\").a[\"href\"]\n book_dict[\"link\"] = book_link\n\n # Masukkan dict ke dalam list semua buku pada katalog\n book_list.append(book_dict)\n\n page_num += 1\n\n finally:\n # Waktu request\n t1 = time.time()\n print(\"took {} seconds\\n\\n\".format(t1 - t0))\n\n\nfor book in book_list:\n # Loop untuk mengambil detail tambahan dari setiap buku yang sudah diambil linknya\n t0 = time.time()\n try:\n # Ambil semua isi HTML dari buku\n url = book[\"link\"]\n response = requests_retry_session().get(url)\n\n except Exception as e:\n # Gagal request\n print(\"{} Connection failed: {}\".format(url, e.__class__.__name__))\n\n else:\n print(\"{} connected. Status: {}\".format(url, response.status_code))\n soup = BeautifulSoup(response.text, \"lxml\")\n\n # Ambil kategori buku dari ul dengan class breadcrumb\n breadcrumb = soup.find(\"ul\", class_=\"breadcrumb\")\n breadcrumb_list = breadcrumb.find_all(\"li\")\n book_category = breadcrumb_list[2].a.text\n book[\"category\"] = book_category\n\n # Ambil harga buku\n price = soup.find(\"p\", class_=\"price_color\").text\n book_price_in_euro = float(price[2:])\n book[\"price_in_euro\"] = book_price_in_euro\n\n # Ambil jumlah stok buku yang tersedia\n instock = soup.select(\"p.instock.availability\")[0].text.strip()\n book_stock = int(\"\".join(filter(str.isdigit, instock)))\n book[\"stock\"] = book_stock\n\n # Ambil summary dari buku\n summary_el = soup.find(\"div\", id=\"product_description\")\n if summary_el is None:\n summary = \"-\"\n else:\n summary_el = summary_el.findNext(\"p\")\n summary = summary_el.text.split(\"...\")[0].strip()\n book[\"summary\"] = summary\n\n # Ambil kode UPC buku\n upc = soup.find(\"tr\").td.text\n book[\"upc\"] = upc\n\n # Menyimpan dict buku ke dalam file \"books.json\"\n if not os.path.exists(base_data_path):\n os.makedirs(base_data_path)\n with open(json_path, \"w\") as f:\n json.dump(book_list, f, sort_keys=True, indent=4)\n\n finally:\n # Waktu request\n t1 = time.time()\n print(\"took {} seconds\\n\\n\".format(t1 - t0))\n\n\n# Buat data csv dari JSON\ncsv_path = (base_data_path / csv_filename).resolve()\nnormalized_data = json_normalize(book_list)\nnormalized_data.to_csv(csv_path, encoding=\"utf-8\")\nprint(\"Data disimpan di ./data/books.json dan ./data/books_normalized.csv\")\n","sub_path":"src/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"529967339","text":"import time\nimport re\n# Import the twython library for Twitter APIs\nfrom twython import Twython\nfrom twython import TwythonError\n# Imports the Google Cloud client library\nfrom google.cloud import language\n#from google.cloud.language import enums\n#from google.cloud.language import types\n\nCOUNT = 100\n\n# filter the tweet text to delete the hash tag, urls, etc.\ndef filter(text):\n\ttext = re.sub('RT \\@+\\w+\\:','',text) #delete head of retweet\n\ttext = re.sub('\\#+\\w+\\s','',text) #delete hashtag\n\ttext = re.sub('https://t.co/+\\w+.','',text) #delete url\n\ttext = re.sub('\\@+\\w+(\\\\n|\\s)','',text) #delete @people\t\n\ttext = re.sub('\\n','',text) #delete \\n\n\treturn text\n\n# return a list of tweets and their attributes\n# Param:keywords=[keyword1,keyword2...];\n# Twitter API\ndef search_keyword(keyword_dic, function_name):\n\ttwitter_list = []\n\thash_list = []\n\t# Fill in your keys and tokens\n\tAPP_KEY= ''\n\tAPP_SECRET = ''\n\tOAUTH_TOKEN = ''\n\tOAUTH_TOKEN_SECRET = ''\n\ttwitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\n\tSUPPORTED_LANGUAGE = ['zh', 'zh-Hant', 'en', 'fr', 'de', 'it',\n\t\t\t\t\t'ja', 'ko', 'pt', 'es', \n\t\t\t\t\t]\n\tkeyword = keyword_dic[\"main_keyword\"] + ' \"' + keyword_dic[\"restriction\"] +'\"'\n\t\n\ttry:\n\t\tresults = twitter.cursor(twitter.search, q=keyword, result_type = 'recent'\n\t\t\t\t\t\t\t\t, count = COUNT, include_entities = True)\n\t\tif function_name == 'keyword_sentiment':\n\t\t\tMAX_TWEETS = 30\n\t\telif function_name == 'picture_list':\n\t\t\tMAX_TWEETS = COUNT * 10\n\t\telse:\n\t\t\tprint(\"wrong function name\")\n\t\tfor idx, status in enumerate(results): # 'results' is a generator. It yields tweet objects\n\t\t\tif idx < MAX_TWEETS:\n\t\t\t\t#print(idx)\n\t\t\t\tcontent={}\n\t\t\t\tcontent['lang'] = status['lang']\n\t\t\t\thashValue = hash(status[\"text\"]) #if texts are identical, hash value is same\n\t\t\t\tflag = False\n\t\t\t\tif function_name == 'keyword_sentiment':\n\t\t\t\t\tflag = content['lang'] in SUPPORTED_LANGUAGE\n\t\t\t\telif function_name == 'picture_list':\n\t\t\t\t\tflag = (content['lang'] in SUPPORTED_LANGUAGE) and ('media' in status['entities'])\n\t\t\t\telse:\n\t\t\t\t\tprint(\"wrong function name.\")\n\t\t\t\tif flag:\n\t\t\t\t\tif (hashValue not in hash_list) : #or (content[\"hash\"] in twitter_list and content['text'] not in twitter_list)\n\t\t\t\t\t\thash_list.append(hashValue)\n\t\t\t\t\t\tcontent[\"text\"] = filter(status['text'])\n\t\t\t\t\t\tcontent[\"entities\"] = status['entities']\n\t\t\t\t\t\tcontent[\"retweet_count\"] = status['retweet_count'] # return int\n\t\t\t\t\t\tcontent[\"favorite_count\"] = status['favorite_count'] #return integer or Nullable\n\t\t\t\t\t\ttwitter_list.append(content)\n\t\t\telse:\n\t\t\t\tbreak\n\texcept TwythonError as e:\n\t\tif e.error_code == 429:\n\t\t\tprint(\"Too many requests!\")\n\t\telse:\n\t\t\tprint(e.error_code)\n\t\t# print(\"begin sleep for 15 minutes. Please wait...\")\n\t\t# time.sleep(60 * 15)\n\t\t# print(\"wake up!\")\n\texcept StopIteration:\n\t\tpass\n\n\treturn twitter_list\n\n\n# Google API\ndef keyword_sentiment(tweet_dict):\n\tclient = language.LanguageServiceClient()\n\tdocument = types.Document(\n\t\tcontent=tweet_dict[\"text\"],\n\t\tlanguage=tweet_dict[\"lang\"],\n\t\ttype=enums.Document.Type.PLAIN_TEXT,\n\t\t)\n\t# Detects the sentiment of the text\n\tsentiment = client.analyze_sentiment(document=document).document_sentiment\n\treturn sentiment.score, sentiment.magnitude\n\nif __name__ == '__main__':\n\tkeyword_dic={'main_keyword': 'jhope', 'restriction': 'bts'}\n\ttwitter_list= search_keyword(keyword_dic, 'keyword_sentiment')\n\t\n\t# for i in range(len(twitter_list)):\n\t# \tprint(twitter_list[i])\n\t# \tprint('\\n')\n\tprint(len(twitter_list))\n","sub_path":"project2/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"551093918","text":"# The code used to do prepare docs to align with bert pretrain usage. \n# It use spacy module to do sentences separation.\n# Used below command to prepare the running code env.\n# pip install spacy\n# python -m spacy download en_core_web_lg\n\n'''Example of adding a pipeline component to prohibit sentence boundaries\nbefore certain tokens.\n\nWhat we do is write to the token.is_sent_start attribute, which\ntakes values in {True, False, None}. The default value None allows the parser\nto predict sentence segments. The value False prohibits the parser from inserting\na sentence boundary before that token. Note that fixing the sentence segmentation\nshould also improve the parse quality.\n\nThe specific example here is drawn from https://github.com/explosion/spaCy/issues/2627\nOther versions of the model may not make the original mistake, so the specific\nexample might not be apt for future versions.\n'''\nimport plac\nimport spacy\nimport codecs\nimport os\n\ndef prevent_sentence_boundaries(doc):\n for token in doc:\n if not can_be_sentence_start(token):\n token.is_sent_start = False\n return doc\n\ndef can_be_sentence_start(token):\n if token.i == 0:\n return True\n elif token.is_title:\n return True\n elif token.nbor(-1).is_punct:\n return True\n elif token.nbor(-1).is_space:\n return True\n else:\n return False\n\ndef main():\n nlp = spacy.load('en_core_web_lg')\n nlp.add_pipe(prevent_sentence_boundaries, before='parser')\n nlp.max_length = 200000000\n input_dir = '/mnt/newcpfs/wikiextractor/wikiextractor/enwiki'\n for root, dirs, files in os.walk(input_dir):\n father, child = os.path.split(root)\n new_father = father+'_processed'\n new_root = os.path.join(new_father, child)\n if not os.path.exists(new_root):\n os.makedirs(new_root)\n for file in files:\n abs_file = os.path.join(root, file)\n new_abs_file = os.path.join(new_root, file)\n raw_text = open(abs_file).read().decode('utf8')\n fo = open(\"test.txt\", \"w\")\n # doc = nlp(raw_text)\n # sentences = [sent.string.strip() for sent in doc.sents]\n # print(sentences)\n # nlp.add_pipe(prevent_sentence_boundaries, before='parser')\n doc = nlp(raw_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n with codecs.open(new_abs_file ,'w', 'utf-8') as f:\n for sentence in sentences:\n if '' in sentence:\n f.write('\\n')\n elif 'alert('您状态为禁用状态,请联系管理员');location.href='/login/'\"\n\t\t\t\t# login_user(loginUser)\n\t\t\t\tsession['username'] = loginUser.username\n\t\t\t\tsession['id'] = loginUser.id\n\t\t\t\tif request.form.get('week'):\n\t\t\t\t\tapp.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=7) # 登录时记住用户名和秘密\n\t\t\t\t\tsession.permanent = True\n\t\t\t\treturn redirect('/')\n\t\t\treturn \"\"\n\t\treturn \"\"\n\n#---------------------------------------- 退出 ----------------------------------------\n@app.route('/logout')\ndef logout():\n\t# logout_user()\n\tsession.pop('username')\n\tsession.pop('id')\n\treturn redirect(url_for('login'))\n#---------------------------------------- 首页 ----------------------------------------\n@app.route('/')\n# @login_required\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/head/')\ndef head():\n\treturn render_template('head.html')\n\n@app.route('/left/')\ndef left():\n\t#根据不同的用户左侧菜单栏不同\n\tid = session.get('id')\n\tuser_Menus = UserDaoObj.selectMenuByUser(id)\n\t# print(user_Menus)\n\treturn render_template('left.html',user_Menus=user_Menus)\n\n@app.route('/main/')\ndef main():\n\treturn render_template('main.html')\n\n@app.route('/bottom/')\ndef bottom():\n\treturn render_template('bottom.html')\n\n\n#---------------------------------------- 用户 ----------------------------------------\n'''查询所有用户信息'''\n@app.route('/user_list/',methods=['get','post'])\n# @login_required\ndef user_list():\n\tif request.method == 'GET':\n\t\tallUser = UserDaoObj.selectAllUser('')\n\telif request.method == 'POST':\n\t\tsearch = request.form.get('search')\n\t\tallUser = UserDaoObj.selectAllUser(search)\n\treturn render_template('user/user_list.html', allUser=allUser)\n\n'''添加用户'''\n@app.route('/add_user/', methods=['get','post'])\ndef add_user():\n\tif request.method == 'GET':\n\t\treturn render_template('user/user_addInput.html')\n\telif request.method == 'POST':\n\t\tusername = request.form.get('username')\n\t\tif not re.match('[a-zA-z]+\\w{2,21}$',username):\n\t\t\tresult = error(False,'用户名格式错误',fail_url='add_user',success_url='')\n\t\t\treturn result\n\t\tpassword = request.form.get('password')\n\t\tif not re.match('\\w{3,10}$',password):\n\t\t\tresult = error(False,'密码格式错误',fail_url='add_user',success_url='')\n\t\t\treturn result\n\t\t# header = request.form.get('header')\n\t\tstatus = request.form.get('status')\n\t\tpassword = hashlib.md5(bytes(password, encoding='utf8')).hexdigest()\n\t\tresult = UserDaoObj.insert(username,password,int(status))\n\t\tresult = error(result, errorMsg='添加用户失败', fail_url='add_user', success_url='user_list')\n\t\treturn result\n\n'''删除用户'''\n@app.route('/del_user/')\ndef del_user():\n\tid = request.args.get('id')\n\tresult = UserDaoObj.delete(id)\n\tresult = error(result, errorMsg='删除用户失败', fail_url='user_list', success_url='user_list')\n\treturn result\n\n'''修改用户信息'''\n@app.route('/update_user/', methods=['get','post'])\ndef update_user():\n\tif request.method == 'GET':\n\t\tid = request.args.get('id')\n\t\tuser = UserDaoObj.selectById(id)\n\t\treturn render_template('user/user_updateInput.html', user=user)\n\telif request.method == 'POST':\n\t\tid = request.form.get('id')\n\t\tusername = request.form.get('username')\n\t\tif not re.match('[a-zA-z]+\\w{2,21}$', username):\n\t\t\tresult = error(False, '用户名格式错误', fail_url='update_user', success_url='')\n\t\t\treturn result\n\t\tpassword = request.form.get('password')\n\t\tif not re.match('\\w{3,10}$', password):\n\t\t\tresult = error(False, '密码格式错误', fail_url='update_user', success_url='')\n\t\t\treturn result\n\t\theader = request.form.get('header')\n\t\tstatus = request.form.get('status')\n\t\tpassword = hashlib.md5(bytes(password, encoding='utf8')).hexdigest()\n\t\tresult = UserDaoObj.update(id,username, password,header,int(status))\n\t\tresult = error(result, errorMsg='修改用户信息失败', fail_url='user_list', success_url='user_list')\n\t\treturn result\n\n'''用户绑定角色'''\n@app.route('/bind_role/',methods=['get','post'])\ndef bind_role():\n\tif request.method == 'GET':\n\t\tusername = request.args.get('username')\n\t\tid = request.args.get('id')\n\t\tallRole = RoleDaoObj.selecteAllRole()\n\t\told_haveRole = UserDaoObj.selectRole_byId(id)\n\t\tj = 0\n\t\tprint(old_haveRole,'----------')\n\t\tfor i in old_haveRole:\n\t\t\told_haveRole[j]=i[0]\n\t\t\tj += 1\n\t\treturn render_template('user/user_bindRoleInput.html', id=id, username=username, old_haveRole=old_haveRole, allRole=allRole)\n\telif request.method == 'POST':\n\t\tuser_id = request.form.get('user_id')\n\t\tnew_haveRole = request.form.getlist('role_id') #新的用户绑定角色列表\n\t\tresult = UserDaoObj.bindRole(user_id,new_haveRole)\n\t\tresult = error(result, errorMsg='给用户绑定角色失败', fail_url='user_list', success_url='user_list')\n\t\treturn result\n\n\n\n#---------------------------------------- 角色 ----------------------------------------\n'''查询所有角色'''\n@app.route('/role_list/')\ndef role_list():\n\tallRole = RoleDaoObj.selecteAllRole()\n\treturn render_template('role/role_list.html', allRole = allRole)\n\n'''添加角色'''\n@app.route('/add_role/',methods=['get','post'])\ndef add_role():\n\tif request.method == 'GET':\n\t\treturn render_template('role/role_addInput.html')\n\telif request.method == 'POST':\n\t\tname = request.form.get('name')\n\t\tresult = RoleDaoObj.insert(name)\n\t\tresult = error(result,'添加角色失败',fail_url='add_role',success_url='role_list')\n\t\treturn result\n\n'''删除角色'''\n@app.route('/del_role/')\ndef del_role():\n\tid = request.args.get('id')\n\tresult = RoleDaoObj.delete(id)\n\tresult = error(result,'删除角色失败',fail_url='role_list',success_url='role_list')\n\treturn result\n\n'''修改角色'''\n@app.route('/update_role/',methods=['get','post'])\ndef update_role():\n\tif request.method == 'GET':\n\t\tid = request.args.get('id')\n\t\trole = RoleDaoObj.selectById(id)\n\t\treturn render_template('role/role_updateInput.html', role=role)\n\telif request.method == 'POST':\n\t\tid = request.form.get('id')\n\t\tname = request.form.get('name')\n\t\tresult = RoleDaoObj.update(id,name)\n\t\tresult = error(result, '修改角色失败', fail_url='update_role', success_url='role_list')\n\t\treturn result\n\n'''角色绑定菜单'''\n@app.route('/bind_menu/',methods=['get','post'])\ndef bind_menu():\n\tif request.method == 'GET':\n\t\troleid = request.args.get('id')\n\t\tname = request.args.get('name')\n\t\tallMenu = MenuDaoObj.selectAllMenu()\n\t\told_haveMenu = RoleDaoObj.selectMenu_byId(roleid)\n\t\tprint(allMenu)\n\t\tj = 0\n\t\tfor i in old_haveMenu:\n\t\t\told_haveMenu[j] = i[0]\n\t\t\tj += 1\n\t\treturn render_template('role/role_bindMenuInput.html', roleid=roleid, name=name, allMenu=allMenu, old_haveMenu=old_haveMenu)\n\telif request.method == 'POST':\n\t\trole_id = request.form.get('role_id')\n\t\tnew_haveMenu = request.form.getlist('menu_id')\n\t\tresult = RoleDaoObj.bindMenu(role_id, new_haveMenu)\n\t\tresult = error(result, errorMsg='给角色绑定菜单失败', fail_url='role_list', success_url='role_list')\n\t\treturn result\n\n#---------------------------------------- 菜单 ----------------------------------------\n'''菜单列表'''\n@app.route('/menu_list/')\ndef menu_list():\n\tallMenu = MenuDaoObj.selectAllMenu()\n\tfor i in allMenu:\n\t\tprint(i)\n\treturn render_template('menu/menu_list.html', allMenu = allMenu)\n\n'''添加菜单'''\n@app.route('/add_menu/',methods=['get','post'])\ndef add_menu():\n\tif request.method == 'GET':\n\t\tallMenu = MenuDaoObj.selectAllMenu()\n\t\treturn render_template('menu/menu_addInput.html', allMenu = allMenu)\n\telif request.method == 'POST':\n\t\tname = request.form.get('name')\n\t\tparent_id = request.form.get('parent_id')\n\t\tif parent_id=='':\n\t\t\tparent_id = None\n\t\turl = request.form.get('url')\n\t\tresult = MenuDaoObj.insert(name,parent_id,url)\n\t\tresult = error(result,'添加菜单失败',fail_url='menu_list',success_url='menu_list')\n\t\treturn result\n\n'''删除菜单'''\n@app.route('/del_menu/')\ndef del_menu():\n\tid = request.args.get('id')\n\tprint(id)\n\tresult = MenuDaoObj.delete(id)\n\tresult = error(result,'删除菜单失败',fail_url='menu_list',success_url='menu_list')\n\treturn result\n\n'''修改菜单'''\n@app.route('/update_menu/',methods=['get','post'])\ndef update_menu():\n\tif request.method == 'GET':\n\t\tname = request.args.get('name')\n\t\tid = request.args.get('id')\n\t\tparentName = request.args.get('parentName')\n\t\turl = request.args.get('url')\n\t\tallMenu = MenuDaoObj.selectAllMenu()\n\t\treturn render_template('menu/menu_updateInput.html', id=id, name=name, parentName=parentName, url=url, allMenu=allMenu)\n\telif request.method == 'POST':\n\t\tid = request.form.get('id')\n\t\tname = request.form.get('name')\n\t\tparent_id = request.form.get('parent_id')\n\t\tif parent_id == '':\n\t\t\tparent_id = None\n\t\turl = request.form.get('url')\n\t\tresult = MenuDaoObj.update(id, name, parent_id, url)\n\t\tresult = error(result, '修改菜单失败', fail_url='update_menu', success_url='menu_list')\n\t\treturn result\n\n# ----------------------------------------封装的函数----------------------------------------\n@app.route('/error/')\ndef error(result, errorMsg, fail_url, success_url):\n\tif result:\n\t\treturn redirect(url_for(success_url))\n\treturn render_template('error.html', errorMsg=errorMsg, fail_url=fail_url)\n\n\t\nif __name__ == '__main__':\n\tapp.run()\n","sub_path":"rbac2.0/rbac2.0.py","file_name":"rbac2.0.py","file_ext":"py","file_size_in_byte":10724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"28214371","text":"class Solution(object):\n def uniqueOccurrences(self, arr):\n \"\"\"\n :type arr: List[int]\n :rtype: bool\n \"\"\"\n res = list(collections.Counter(arr).values())\n list.sort(res)\n for i in range(len(res)-1):\n if(res[i]==res[i+1]):\n return False\n return True\n","sub_path":"Leetcode/hashmap/1207_UniqueNumberOfOccurrences/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"653733734","text":"# -*- coding: utf-8 -*-plt.\n\"\"\"\nCreated on Fri Dec 11 05:44:57 2020\nHad difficulty placing these bar graphs in a tkinter window, shows in console only\nImplemented button for console\nShows mean and std for 3 sets of data\n@author: Michelle Umali\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tkinter as tk\nfrom tkinter import ttk\n\n\nroot = tk.Tk()\ndef center_window(width=300, height=200):\n s_width = root.winfo_screenwidth()\n s_height = root.winfo_screenheight()\n x = (s_width/2) - (width/2)\n y = (s_height/2) - (height/2)\n root.geometry('%dx%d+%d+%d' % (width, height, x, y))\n\ncenter_window(700,400)\n#first button\nfirstBtn=tk.Button(root,text=\"Submit for Bullock's Orioles mean and std graphs\",width=50,height=10,bg=\"orange\",fg=\"black\",command=lambda:LoadFirstGraph())\nfirstBtn.pack()\n#second button\nsecondBtn=tk.Button(root,text='Submit for Blue grosbeak mean and std',width=50,height=10,bg=\"blue\",fg=\"white\",command=lambda:LoadSecondGraph())\nsecondBtn.pack()\n\nplot1 = ttk.Labelframe(root, text='Plot Area')\nfig, ax = plt.subplots()\n\ndf = pd.read_csv('Ictterus bullockii.csv',\n skiprows=0,nrows=428000, encoding='utf-8-sig',\n parse_dates=['eventDate'])\nds=df.sort_values('eventDate')\ndt=ds.dropna(how='any')\n#----------------------------------------------------------------------\nmask1 = (dt['eventDate']>='2016-9-10')&(dt['eventDate']<='2017-4-14')\nmask2 = (dt['eventDate']>='2017-9-15')&(dt['eventDate']<='2018-3-16')\nmask3 = (dt['eventDate']>='2018-9-22')&(dt['eventDate']<='2019-3-14')\nx1,x2=dt[mask1].mean()\ny1,y2=dt[mask1].std()\nx3,x4=dt[mask2].mean()\ny3,y4=dt[mask2].std()\nx5,x6=dt[mask3].mean()\ny5,y6=dt[mask3].std() \ndef LoadFirstGraph():\n \n plt.figure(figsize=(7,4),dpi=100)\n means=(x1,x3,x5)\n positions=(0,1,2)\n std=(y1,y3,y5)\n bar_tick_label=['2016','2017','2018']\n plt.bar(positions, means, tick_label=bar_tick_label, color=\"y\", yerr=std)\n plt.xlabel(\"Icterrus bullockii or Bullock's Orioles\")\n plt.ylabel('mean and std in latitudes')\n plt.title('After nesting seasons')\n #plt.legend()\n plt.show()\n \ndg = pd.read_csv('Passerina caerula 2.csv',\n skiprows=0,nrows=571402, encoding='utf-8-sig',\n parse_dates=['eventDate'])\ndu=dg.sort_values('eventDate')\ndv=du.dropna(how='any')\n#----------------------------------------------------------------------\nmask4 = (dt['eventDate']>='2016-11-5')&(dt['eventDate']<='2017-3-29')\nmask5 = (dt['eventDate']>='2017-11-13')&(dt['eventDate']<='2018-3-28')\nmask6 = (dt['eventDate']>='2018-11-12')&(dt['eventDate']<='2019-3-14')\nx7,x8=dt[mask4].mean()\ny7,y8=dt[mask4].std()\nx9,x10=dt[mask5].mean()\ny9,y10=dt[mask5].std()\nx11,x12=dt[mask6].mean()\ny11,y12=dt[mask6].std()\n\ndef LoadSecondGraph(): \n plt.figure(figsize=(7,4),dpi=100)\n means=(x7,x9,x11)\n positions=(3,4,5)\n std=(y7,y9,y11)\n bar_tick_label=['2016','2017','2018'] \n plt.bar(positions, means, tick_label=bar_tick_label,color=\"b\", yerr=std)\n \n plt.xlabel('Passerina caerula or Blue grosbeak')\n plt.ylabel('mean and std in latitudes')\n plt.title('After nesting seasons')\n #plt.legend()\n plt.show()\n \n#used for statistics\nprint(dt[mask1].describe())\nprint(dt[mask2].describe())\nprint(dt[mask3].describe())\nprint(dt[mask4].describe())\nprint(dt[mask5].describe())\nprint(dt[mask6].describe())\nroot.mainloop()","sub_path":"describestats_an.py","file_name":"describestats_an.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"85842105","text":"from __future__ import annotations\nimport asyncio\nimport dataclasses\nimport threading\n\nLINE_ENDING = b\"\\r\\n\"\n\nclass ThreadSafeDict(object):\n def __init__(self):\n self.lock = threading.Lock()\n self.dict = {}\n def set(self, key, value):\n with self.lock:\n self.dict[key] = value\n def get(self, key):\n with self.lock:\n return self.dict.get(key)\n def __getitem__(self, key):\n return self.get(key)\n def __setitem__(self, key, value):\n self.set(key, value)\n\ndef get_senddata(data: bytes) -> bytes:\n \"\"\"Return the data to send back to the client.\"\"\"\n return b\"+\" + data + LINE_ENDING\ndef get_nildata() -> bytes:\n return b\"$-1\" + LINE_ENDING\n\n\n@dataclasses.dataclass\nclass RedisCommand:\n \"\"\"A Redis command.\"\"\"\n command: bytes\n args: list[bytes]\n @staticmethod\n def parse(data: bytes) -> RedisCommand:\n \"\"\"Parse the data into a RedisCommand.\"\"\"\n array = data.split(LINE_ENDING)\n command = array[2].upper()\n args = []\n for i in range(3, len(array)):\n if i % 2 == 0:\n args.append(array[i])\n return RedisCommand(command, args)\nasync def redis(reader, writer):\n itemMap = ThreadSafeDict()\n while True:\n data = await reader.read(1024)\n if not data:\n break\n cmd = RedisCommand.parse(data)\n resp = get_nildata()\n if cmd.command == b\"ECHO\":\n resp = get_senddata(cmd.args[0])\n elif cmd.command == b\"SET\":\n itemMap[cmd.args[0]] = cmd.args[1]\n resp = get_senddata(b\"OK\")\n elif cmd.command == b\"GET\":\n resp = itemMap.get(cmd.args[0])\n if resp:\n resp = get_senddata(resp)\n else:\n resp = get_nildata()\n elif cmd.command == b\"PING\":\n resp = get_senddata(b\"PONG\")\n writer.write(resp)\n await writer.drain()\n\nasync def main(host, port):\n srv = await asyncio.start_server(redis, host, port)\n async with srv:\n await srv.serve_forever()\n\nif __name__ == \"__main__\":\n asyncio.run(main(\"localhost\", 6379))\n\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"227639202","text":"from torch import nn\nfrom torchvision.models.alexnet import alexnet\n\n\nclass AlexNetFC6(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n base = alexnet(pretrained=True)\n print(base)\n self.conv = base.features\n self.avgpool = base.avgpool\n self.fc_6 = base.classifier[:3]\n\n self.eval()\n\n def forward(self, stimuli):\n x = self.conv(stimuli)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc_6(x) # shape: [10, 4096]\n return x\n\nclass AlexNetConv5(nn.Module):\n def __init__(self):\n super().__init__()\n\n base = alexnet(pretrained=True)\n self.conv = base.features\n self.conv1 = base.features[:2]\n self.avgpool = base.avgpool\n \n self.eval()\n\n def forward(self, stimuli):\n x = self.conv(stimuli) #shape: [stim, 256, 7, 7] / mean test set r = 0.7297\n #x = self.avgpool(x) #shape: [stim, 256, 6, 6]\n x = x.view(x.size(0), -1) #shape: [stim, 12544] / mean test set r = 0.7218\n #x = self.fc_6(x) #shape: [stim, 4096] / mean test set r = 0.7142\n return x","sub_path":"kh/object2vec_encoder_python/feature_extractors.py","file_name":"feature_extractors.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"167840846","text":"from tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.applications import ResNet50\nfrom tensorflow.python.keras.layers import Dense\nimport config\n\n\ndef get_age_model():\n\n age_model = ResNet50(\n include_top=False,\n weights=\"imagenet\",\n input_shape=(\n config.RESNET50_DEFAULT_IMG_WIDTH,\n config.RESNET50_DEFAULT_IMG_WIDTH,\n 3,\n ),\n pooling=\"avg\",\n )\n\n prediction = Dense(\n units=101,\n kernel_initializer=\"he_normal\",\n use_bias=False,\n activation=\"softmax\",\n name=\"pred_age\",\n )(age_model.output)\n\n age_model = Model(inputs=age_model.input, outputs=prediction)\n return age_model\n\n\ndef get_model(ignore_age_weights=False):\n\n base_model = get_age_model()\n if not ignore_age_weights:\n base_model.load_weights(config.AGE_TRAINED_WEIGHTS_FILE)\n print(\"Loaded weights from age classifier\")\n last_hidden_layer = base_model.get_layer(index=-2)\n\n base_model = Model(inputs=base_model.input, outputs=last_hidden_layer.output)\n prediction = Dense(1, kernel_initializer=\"normal\")(base_model.output)\n\n model = Model(inputs=base_model.input, outputs=prediction)\n return model\n","sub_path":"bmi-project-master/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"511722113","text":"\"\"\"plot general log file according to given indexes\"\"\"\nimport sys\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef moving_average(data_set, periods=3):\n weights = np.ones(periods) / periods\n return np.convolve(data_set, weights, mode='valid')\n\ndef plot(filename):\n\n data = []\n\n with open(filename, 'r') as fin:\n for line in fin.readlines():\n items = line.split('\\t')\n\n row = []\n for item in items[1:]:\n t = eval(item.split(':')[1])\n if isinstance(t, list):\n for x in t:\n row.append(x)\n else:\n row.append(t)\n if len(row) > 0:\n data.append(row)\n\n data = np.array(data)\n print(data.shape)\n num = 100\n #print(data[-num:, 0].mean())\n print(data[-num:, 1].mean())\n print(data[-num:, 2].mean())\n\n for index in [1, 2]:\n index = int(index)\n plt.plot(moving_average(data[:, index], 500), label=filename+'agent' if index == 1 else filename+'agent_strong')\n\n #plt.show()\n\nif __name__=='__main__':\n import sys\n # for i in ['.5', '1', '1.5', '2', '2.5']:\n # for i in ['.5', '1.5', '2.5']:\n # plot('collab_shaping_bad_'+i+'.log')\n for i in ['0', '1', '2']:\n plot('collab_dim_'+i+'.log')\n\n\n print('saving ...')\n plt.legend()\n plt.savefig('tmp.png')\n","sub_path":"plot_log.py","file_name":"plot_log.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"625658242","text":"import os\nimport sys\nimport yaml\nimport pickle\n\nfrom strange_case.registry import Registry\nfrom strange_case.nodes import Node\nfrom strange_case.config_dict import ConfigDict\nfrom strange_case import strange_case\n\n\ndef fancy_import(import_name):\n \"\"\"\n This takes a fully qualified object name, like\n 'strange_case.extensions.markdown', and returns the last\n object. equivalent to `from strange_case.extensions import markdown`.\n \"\"\"\n\n import_path, import_me = import_name.rsplit('.', 1)\n imported = __import__(import_path, globals(), locals(), [import_me], -1)\n return getattr(imported, import_me)\n\n\ndef run():\n import logging\n logging.basicConfig()\n\n # so that strange_case.py can be executed from any project folder, add CWD to the import paths\n sys.path.insert(0, os.getcwd())\n\n import argparse\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-w', '--watch', dest='watch', action='store_const',\n const=True, default=False,\n help='watch the site_path for changes (default: find the max)')\n conf_overrides = [\n 'project_path',\n 'site_path',\n 'deploy_path',\n 'remove_stale_files',\n 'config_file',\n ]\n parser.add_argument('-x', '--exclude', nargs='*', dest='exclude_paths', default=None)\n parser.add_argument('-p', '--project', dest='project_path')\n parser.add_argument('-s', '--site', dest='site_path')\n parser.add_argument('-d', '--deploy', dest='deploy_path')\n parser.add_argument('-r', '--remove', dest='remove_stale_files', action='store_true', default=None)\n parser.add_argument('-n', '--no-remove', dest='remove_stale_files', action='store_false', default=None)\n parser.add_argument('-c', '--config', dest='config_file')\n parser.add_argument('configs', nargs='*')\n\n # config section catches assertion errors and prints them as error messages\n try:\n if os.path.isfile(os.path.join(os.getcwd(), 'config.py')):\n from config import CONFIG\n if not isinstance(CONFIG, ConfigDict):\n CONFIG = ConfigDict(CONFIG)\n else:\n from strange_case.strange_case_config import CONFIG\n\n # normalize paths\n for conf in ['project_path', 'site_path', 'deploy_path']:\n if CONFIG[conf][0] == '~':\n CONFIG[conf] = os.path.expanduser(CONFIG[conf])\n elif CONFIG[conf][0] == '.':\n CONFIG[conf] = os.path.abspath(CONFIG[conf])\n\n # now we can look for the app config\n config_path = os.path.join(CONFIG['project_path'], CONFIG['config_file'])\n\n if os.path.isfile(config_path):\n with open(config_path, 'r') as config_file:\n yaml_config = yaml.load(config_file)\n if yaml_config:\n CONFIG.update(yaml_config)\n\n args = parser.parse_args()\n for conf in conf_overrides:\n if getattr(args, conf) is not None:\n CONFIG[conf] = getattr(args, conf)\n\n assign = None\n for confs in args.configs:\n if assign:\n CONFIG[assign] = confs\n assign = None\n elif ':' in confs:\n key, val = confs.split(':', 1)\n CONFIG[key] = val\n else:\n assign = confs\n\n if CONFIG['config_hook']:\n CONFIG['config_hook'](CONFIG)\n del CONFIG['config_hook']\n\n assert CONFIG['project_path'], \"project_path is required\"\n assert CONFIG['site_path'], \"site_path is required\"\n assert CONFIG['deploy_path'], \"deploy_path is required\"\n except AssertionError as e:\n sys.stderr.write(\"\\033[1;31mError:\\033[0m \\033[1m\" + e.message + \"\\033[0m\\n\")\n return\n\n if not os.path.isdir(CONFIG['deploy_path']):\n os.mkdir(CONFIG['deploy_path'])\n\n from strange_case.support.jinja import StrangeCaseEnvironment\n\n extensions = []\n if 'extensions' in CONFIG:\n for extension in CONFIG['extensions']:\n if isinstance(extension, basestring):\n try:\n extension = fancy_import(extension)\n except ImportError:\n sys.error.write('Error in processors: Could not find \"%s\"\\n' % extension)\n raise\n extensions.append(extension)\n del CONFIG['extensions']\n\n if not Registry.get('jinja_environment', None):\n jinja_environment = StrangeCaseEnvironment(extensions=extensions, project_path=CONFIG['project_path'])\n Registry.set('jinja_environment', jinja_environment)\n else:\n jinja_environment = Registry.get('jinja_environment')\n\n if 'filters' in CONFIG:\n for filter_name, method in CONFIG['filters'].iteritems():\n if isinstance(method, basestring):\n try:\n method = fancy_import(method)\n except ImportError:\n sys.error.write('Error in filters: Could not find \"%s\"\\n' % method)\n raise\n jinja_environment.filters[filter_name] = method\n del CONFIG['filters']\n\n if 'processors' in CONFIG:\n for processor in CONFIG['processors']:\n try:\n fancy_import(processor)\n except ImportError:\n sys.error.write('Error in processors: Could not find \"%s\"\\n' % processor)\n raise\n del CONFIG['processors']\n\n configurators = []\n if 'configurators' in CONFIG:\n for configurator in CONFIG['configurators']:\n if isinstance(configurator, basestring):\n configurator = fancy_import(configurator)\n configurators.append(configurator)\n Registry.add_configurator(configurator)\n del CONFIG['configurators']\n\n # additional configurators, in addition to the all-important defaults\n if 'configurators +' in CONFIG:\n for configurator in CONFIG['configurators +']:\n if isinstance(configurator, basestring):\n configurator = fancy_import(configurator)\n configurators.append(configurator)\n Registry.add_configurator(configurator)\n del CONFIG['configurators +']\n\n # additional file_types\n for entry in Registry.file_types:\n CONFIG['file_types'].append(entry)\n\n if 'file_types +' in CONFIG:\n CONFIG['file_types'].extend(CONFIG['file_types +'])\n\n # read timestamps file\n timestamps_file = os.path.join(CONFIG['project_path'], '.timestamps')\n if os.path.exists(timestamps_file):\n CONFIG['file_mtimes'] = pickle.load(open(timestamps_file))\n\n timestamps = {}\n for file_tracked in Node.files_tracked:\n f = os.path.abspath(file_tracked)\n timestamps[f] = os.stat(file_tracked).st_mtime\n\n if args.watch:\n import time\n from watchdog.observers import Observer\n from watchdog.events import FileSystemEventHandler\n\n class Regenerate(FileSystemEventHandler):\n last_run = None\n\n def on_any_event(self, event, alert=True):\n if self.last_run and time.time() - self.last_run < .1:\n return\n\n if alert:\n sys.stderr.write(\"Change detected. Running StrangeCase\\n\")\n strange_case(CONFIG)\n sys.stderr.write(\"StrangeCase generated at %i\\n\" % int(time.time()))\n self.last_run = time.time()\n\n exclude_paths = [\n os.path.abspath('.git'),\n os.path.abspath(CONFIG['deploy_path']),\n ]\n if args.exclude_paths:\n exclude_paths.extend([os.path.abspath(path) for path in args.exclude_paths])\n\n observer = Observer()\n handler = Regenerate()\n for path in os.listdir(os.getcwd()):\n path = os.path.abspath(path)\n if os.path.isdir(path) and path not in exclude_paths:\n sys.stderr.write('Watching \"%s\" for changes\\n' % path)\n observer.schedule(handler, path=path, recursive=True)\n observer.start()\n try:\n handler.on_any_event(None, False) # run the first time, no alert\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n sys.stderr.write(\"Stopping\\n\")\n observer.stop()\n observer.join()\n else:\n strange_case(CONFIG)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"strange_case/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":8460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"155099447","text":"\nimport os\n\nimport pandas as pd\nimport numpy as np\n\n\n# ---------------------------------------------------------------------\n# Question # 0\n# ---------------------------------------------------------------------\n\ndef consecutive_ints(ints):\n \"\"\"\n consecutive_ints tests whether a list contains two \n adjacent elements that are consecutive integers.\n\n :param ints: a list of integers\n :returns: a boolean value if ints contains two \n adjacent elements that are consecutive integers.\n\n :Example:\n >>> consecutive_ints([5,3,6,4,9,8])\n True\n >>> consecutive_ints([1,3,5,7,9])\n False\n \"\"\"\n\n if len(ints) == 0:\n return False\n\n for k in range(len(ints) - 1):\n diff = abs(ints[k] - ints[k+1])\n if diff == 1:\n return True\n\n return False\n\n\n# ---------------------------------------------------------------------\n# Question # 1 \n# ---------------------------------------------------------------------\n\ndef median(nums):\n \"\"\"\n median takes a non-empty list of numbers,\n returning the median element of the list.\n If the list has even length, it should return\n the mean of the two elements in the middle.\n\n :param nums: a non-empty list of numbers.\n :returns: the median of the list.\n \n :Example:\n >>> median([6, 5, 4, 3, 2]) == 4\n True\n >>> median([50, 20, 15, 40]) == 30\n True\n >>> median([1, 2, 3, 4]) == 2.5\n True\n \"\"\"\n arr = np.array(nums)\n \n return np.median(arr)\n\n\n# ---------------------------------------------------------------------\n# Question # 2\n# ---------------------------------------------------------------------\n\ndef same_diff_ints(ints):\n \"\"\"\n same_diff_ints tests whether a list contains\n two list elements i places apart, whose distance\n as integers is also i.\n\n :param ints: a list of integers\n :returns: a boolean value if ints contains two\n elements as described above.\n\n :Example:\n >>> same_diff_ints([5,3,1,5,9,8])\n True\n >>> same_diff_ints([1,3,5,7,9])\n False\n \"\"\"\n for i in range(len(ints)):\n for j in range(i+1,len(ints)):\n if (i - j) == (ints[i]-ints[j]):\n return True\n return False\n\n\n# ---------------------------------------------------------------------\n# Question # 3\n# ---------------------------------------------------------------------\n\ndef prefixes(s):\n \"\"\"\n prefixes returns a string of every \n consecutive prefix of the input string.\n\n :param s: a string.\n :returns: a string of every consecutive prefix of s.\n\n :Example:\n >>> prefixes('Data!')\n 'DDaDatDataData!'\n >>> prefixes('Marina')\n 'MMaMarMariMarinMarina'\n >>> prefixes('aaron')\n 'aaaaaraaroaaron'\n \"\"\"\n Str = \"\"\n for i in range(len(s)+1):\n Str = Str+s[:i]\n return Str\n\n\n# ---------------------------------------------------------------------\n# Question # 4\n# ---------------------------------------------------------------------\n\ndef evens_reversed(N):\n \"\"\"\n evens_reversed returns a string containing \n all even integers from 1 to N (inclusive)\n in reversed order, separated by spaces. \n Each integer is zero padded.\n\n :param N: a non-negative integer.\n :returns: a string containing all even integers \n from 1 to N reversed, formatted as decsribed above.\n\n :Example:\n >>> evens_reversed(7)\n '6 4 2'\n >>> evens_reversed(10)\n '10 08 06 04 02'\n \"\"\"\n result = \"\"\n length = len(str(N))\n for i in range((N//2)*2,0,-2):\n st = str(i)\n while len(st)>> fp = os.path.join('data', 'chars.txt')\n >>> last_chars(open(fp))\n 'hrg'\n \"\"\"\n result = \"\"\n a = fh.readlines()\n for i in range(len(a)):\n result = result+a[i][-2]\n \n return result\n\n\n# ---------------------------------------------------------------------\n# Question # 6\n# ---------------------------------------------------------------------\n\ndef arr_1(A):\n \"\"\"\n arr_1 takes in a numpy array and\n adds to each element the square-root of\n the index of each element.\n\n :param A: a 1d numpy array.\n :returns: a 1d numpy array.\n\n :Example:\n >>> A = np.array([2, 4, 6, 7])\n >>> out = arr_1(A)\n >>> isinstance(out, np.ndarray)\n True\n >>> np.all(out >= A)\n True\n \"\"\"\n a = np.arange(len(A))\n a = a+1\n\n return A+np.sqrt(a)\n\n\ndef arr_2(A):\n \"\"\"\n arr_2 takes in a numpy array of integers\n and returns a boolean array (i.e. an array of booleans)\n whose ith element is True if and only if the ith element\n of the input array is divisble by 16.\n\n :param A: a 1d numpy array.\n :returns: a 1d numpy boolean array.\n\n :Example:\n >>> out = arr_2(np.array([1, 2, 16, 17, 32, 33]))\n >>> isinstance(out, np.ndarray)\n True\n >>> out.dtype == np.dtype('bool')\n True\n \"\"\"\n\n return A%16==0\n\n\ndef arr_3(A):\n \"\"\"\n arr_3 takes in a numpy array of stock\n prices per share on successive days in\n USD and returns an array of growth rates.\n\n :param A: a 1d numpy array.\n :returns: a 1d numpy array.\n\n :Example:\n >>> fp = os.path.join('data', 'stocks.csv')\n >>> stocks = np.array([float(x) for x in open(fp)])\n >>> out = arr_3(stocks)\n >>> isinstance(out, np.ndarray)\n True\n >>> out.dtype == np.dtype('float')\n True\n >>> out.max() == 0.03\n True\n \"\"\"\n a = A[:-1]\n b = A[1:]\n\n return np.round(((b-a)/a)*100)/100\n\n\ndef arr_4(A):\n \"\"\"\n Create a function arr_4 that takes in A and \n returns the day on which you can buy at least \n one share from 'left-over' money. If this never \n happens, return -1. The first stock purchase occurs on day 0\n :param A: a 1d numpy array of stock prices.\n :returns: an integer of the total number of shares.\n\n :Example:\n >>> import numbers\n >>> stocks = np.array([3, 3, 3, 3])\n >>> out = arr_4(stocks)\n >>> isinstance(out, numbers.Integral)\n True\n >>> out == 1\n True\n \"\"\"\n a = np.full(len(A),20)\n b = a%A\n c = np.cumsum(b)\n d = c>A\n result = np.where(d == True)\n if (len(result[0]))==0:\n return -1\n else:\n return np.min(result)\n\n\n# ---------------------------------------------------------------------\n# Question # 7\n# ---------------------------------------------------------------------\n\ndef movie_stats(movies):\n \"\"\"\n movies_stats returns a series as specified in the notebook.\n\n :param movies: a dataframe of summaries of\n movies per year as found in `movies_by_year.csv`\n :return: a series with index specified in the notebook.\n\n :Example:\n >>> movie_fp = os.path.join('data', 'movies_by_year.csv')\n >>> movies = pd.read_csv(movie_fp)\n >>> out = movie_stats(movies)\n >>> isinstance(out, pd.Series)\n True\n >>> 'num_years' in out.index\n True\n >>> isinstance(out.loc['second_lowest'], str)\n True\n \"\"\"\n lst = []\n name = []\n try:\n a = max(movies[\"Year\"])-min(movies[\"Year\"])+1\n lst.append(a)\n name.append('num_years')\n except:\n pass\n try:\n b = sum(movies[\"Number of Movies\"])\n lst.append(b)\n name.append('tot_movies')\n except:\n pass\n try:\n c=movies[\"Year\"][pd.Index(movies[\"Number of Movies\"]).get_loc(min(movies[\"Number of Movies\"]))]\n lst.append(c)\n name.append('yr_fewest_movies')\n except:\n pass\n try:\n d = sum(movies[\"Total Gross\"])/len(movies)\n lst.append(d)\n name.append('avg_gross')\n except:\n pass\n try:\n temp = movies[\"Total Gross\"]/movies[\"Number of Movies\"]\n e = movies[\"Year\"][pd.Index(temp).get_loc(max(temp))]\n lst.append(e)\n name.append('highest_per_movie')\n except:\n pass\n try:\n second_smallest = movies[\"Total Gross\"].nsmallest(2).iloc[-1]\n f = movies[\"#1 Movie\"][pd.Index(movies[\"Total Gross\"]).get_loc(second_smallest)]\n lst.append(f)\n name.append('second_lowest')\n except:\n pass\n try:\n te = movies[\"#1 Movie\"].str.contains('Harry Potter', regex=False)\n index = max(list(te[te==True].index))\n g = sum(movies[\"Total Gross\"][:index+1])/(index+1)\n lst.append(g)\n name.append('avg_after_harry')\n except:\n pass\n data = np.array(lst)\n ser = pd.Series(data) \n ser.index = name\n return ser\n \n\n# ---------------------------------------------------------------------\n# Question # 8\n# ---------------------------------------------------------------------\n\ndef parse_malformed(fp):\n \"\"\"\n Parses and loads the malformed csv data into a \n properly formatted dataframe (as described in \n the question).\n\n :param fh: file handle for the malformed csv-file.\n :returns: a Pandas DataFrame of the data, \n as specificed in the question statement.\n\n :Example:\n >>> fp = os.path.join('data', 'malformed.csv')\n >>> df = parse_malformed(fp)\n >>> cols = ['first', 'last', 'weight', 'height', 'geo']\n >>> list(df.columns) == cols\n True\n >>> df['last'].dtype == np.dtype('O')\n True\n >>> df['height'].dtype == np.dtype('float64')\n True\n >>> df['geo'].str.contains(',').all()\n True\n >>> len(df) == 100\n True\n >>> dg = pd.read_csv(fp, nrows=4, skiprows=10, names=cols)\n >>> dg.index = range(9, 13)\n >>> (dg == df.iloc[9:13]).all().all()\n True\n \"\"\"\n a = open(fp)\n header = a.readline()\n header = header.replace('\\n','')\n ls = header.split(',')\n df = pd.DataFrame(columns = ls)\n lst = a.readlines()\n for i in range(len(lst)):\n lst[i] = lst[i].replace('\"','')\n lst[i] = lst[i].replace('\\n','')\n LiST = []\n for i in range(len(lst)):\n l = lst[i].split(',')\n if '' in l:\n l.remove('')\n LiST.append(l[0])\n LiST.append(l[1])\n LiST.append(float(l[2]))\n LiST.append(float(l[3]))\n LiST.append(l[4]+','+l[5])\n lst[i] = LiST\n LiST = []\n df = pd.DataFrame(lst, columns = ls, index = range(len(lst))) \n return df\n\n\n# ---------------------------------------------------------------------\n# DO NOT TOUCH BELOW THIS LINE\n# IT'S FOR YOUR OWN BENEFIT!\n# ---------------------------------------------------------------------\n\n\n# Graded functions names! DO NOT CHANGE!\n# This dictionary provides your doctests with\n# a check that all of the questions being graded\n# exist in your code!\n\nGRADED_FUNCTIONS = {\n 'q00': ['consecutive_ints'],\n 'q01': ['median'],\n 'q02': ['same_diff_ints'],\n 'q03': ['prefixes'],\n 'q04': ['evens_reversed'],\n 'q05': ['last_chars'],\n 'q06': ['arr_%d' % d for d in range(1, 5)],\n 'q07': ['movie_stats'],\n 'q08': ['parse_malformed']\n}\n\n\ndef check_for_graded_elements():\n \"\"\"\n >>> check_for_graded_elements()\n True\n \"\"\"\n \n for q, elts in GRADED_FUNCTIONS.items():\n for elt in elts:\n if elt not in globals():\n stmt = \"YOU CHANGED A QUESTION THAT SHOULDN'T CHANGE! \\\n In %s, part %s is missing\" % (q, elt)\n raise Exception(stmt)\n\n return True\n","sub_path":"labs/lab01/lab01.py","file_name":"lab01.py","file_ext":"py","file_size_in_byte":11617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"161320229","text":"import tempfile\nimport os\nimport subprocess\n\nfrom django.conf import settings\nfrom django.db import connection\n\nclass DbUtil(object):\n \"\"\"\n a set of utility method to access db\n \"\"\"\n def __init__(self):\n raise Exception(\"Utility class can't be instantiated.\")\n\n \n _database = settings.DATABASES[\"default\"]\n _env = os.environ.copy()\n _table_schema_dump_cmd = [\"pg_dump\", \"-h\", _database[\"HOST\"], \"-d\", _database[\"NAME\"], \"-U\", _database[\"USER\"], \"-F\", \"p\", \"-w\", \"-x\", \"-O\", \"--no-security-labels\", \"--no-tablespaces\", \"-s\"]\n if 'PASSWORD' in _database and _database['PASSWORD'].strip():\n _env[\"PGPASSWORD\"] = _database[\"PASSWORD\"]\n if _database[\"PORT\"]:\n _table_schema_dump_cmd += [\"-p\", str(_database[\"PORT\"])]\n\n _cursor=connection.cursor()\n\n _query_index_constraint_sql = \"\"\"\nSELECT s.conname ,s.contype\nFROM pg_constraint s JOIN pg_class c ON s.conrelid = c.oid JOIN pg_namespace n on c.relnamespace = n.oid \nWHERE n.nspname='{0}' and c.relname='{1}' and s.contype in ('p','u')\n\"\"\"\n _query_index_sql = \"\"\"\nSELECT ci.relname,i.indisprimary \nFROM pg_index i JOIN pg_class ci ON i.indexrelid = ci.oid JOIN pg_class ct ON i.indrelid = ct.oid JOIN pg_namespace np on ct.relnamespace = np.oid \nWHERE np.nspname='{0}' and ct.relname='{1}'\n\"\"\"\n\n @staticmethod\n def get_create_table_sql(schema,table):\n #get the input table structure\n f = tempfile.NamedTemporaryFile(delete=False)\n f.close()\n cmd = DbUtil._table_schema_dump_cmd + [\"-t\", schema + \".\" + table, \"-f\", f.name]\n output = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, env=DbUtil._env).communicate()\n if output[1].strip() :\n raise Exception(output[1])\n try:\n reader = open(f.name,'r') \n return ''.join([s for s in reader if not (s.startswith('SET') or s.startswith('--')) and s.strip() ])\n finally:\n if reader:\n reader.close()\n os.unlink(f.name)\n\n @staticmethod\n def drop_all_indexes(schema,table,include_pk=False):\n \"\"\"\n drop all indexes.\n drop primary key also if include_pk is true\n \"\"\"\n #drop related constraint first\n #import ipdb;ipdb.set_trace()\n sql_result = DbUtil._cursor.execute(DbUtil._query_index_constraint_sql.format(schema,table))\n rows = None\n if sql_result: \n rows = sql_result.fetchall()\n else:\n rows = DbUtil._cursor.fetchall()\n drop_constraint_sql = \"\\r\\n\".join([\"ALTER TABLE \\\"{0}\\\".{1} DROP CONSTRAINT IF EXISTS {2} CASCADE;\".format(schema,table,r[0]) for r in rows if r[1] != 'p' or include_pk ])\n if drop_constraint_sql:\n DbUtil._cursor.execute(drop_constraint_sql)\n\n sql_result = DbUtil._cursor.execute(DbUtil._query_index_sql.format(schema,table))\n rows = None\n if sql_result: \n rows = sql_result.fetchall()\n else:\n rows = DbUtil._cursor.fetchall()\n drop_index_sql = \"\\r\\n\".join([\"DROP INDEX IF EXISTS \\\"{0}\\\".\\\"{1}\\\" CASCADE;\".format(schema,r[0]) for r in rows if not r[1] or include_pk ])\n if drop_index_sql:\n DbUtil._cursor.execute(drop_index_sql)\n\n","sub_path":"borg_utils/db_util.py","file_name":"db_util.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"19698186","text":"#!/usr/bin/python3\n# Sin comprobar\nimport socket\n\ndef banner(ip, port):\n s = socket.socket()\n s.connect((ip, int (port)))\n s.setimeout(5)\n print(str(s.recv(1024)).strip('b'))\ndef main():\n ip = input(\"Enter IP: \")\n port = str(input(\"Enter port: \"))\n banner(ip,port)\nmain()","sub_path":"BannerGrabber/bannergrabber.py","file_name":"bannergrabber.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"521311385","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport utils\n\n\nclass GlobalAttention(nn.Module):\n \"\"\"\n attention types:\n bilinear: H_j^T W_a q (bilinear)\n mlp: v_a^T tanh(W_a q + U_a h_j)\n \"\"\"\n\n\n def __init__(self, query_dim, key_dim, attn_type=\"bilinear\"):\n super(GlobalAttention, self).__init__()\n\n self.query_dim = query_dim\n self.key_dim = key_dim\n assert attn_type in [\"bilinear\", \"mlp\"]\n\n self.attn_type = attn_type\n\n if self.attn_type == \"bilinear\":\n self.linear_in = nn.Linear(query_dim, key_dim, bias=False)\n self.linear_out = nn.Linear(query_dim + key_dim, query_dim, False)\n elif self.attn_type == \"mlp\":\n self.linear_context = nn.Linear(dim, dim, bias=False)\n self.linear_query = nn.Linear(dim, dim, bias=True)\n self.v = nn.Linear(dim, 1, bias=False)\n self.linear_out = nn.Linear(dim * 2, dim, True)\n\n def score(self, h_t, h_s):\n \"\"\"\n Args:\n h_t (FloatTensor): sequence of queries [batch x tgt_len x h_t_dim]\n h_s (FloatTensor): sequence of sources [batch x src_len x h_s_dim]\n Returns:\n raw attention scores for each src index [batch x tgt_len x src_len]\n \"\"\"\n\n src_batch, src_len, src_dim = h_s.size()\n tgt_batch, tgt_len, tgt_dim = h_t.size()\n utils.aeq(src_batch, tgt_batch)\n #utils.aeq(src_dim, tgt_dim)\n\n if self.attn_type == \"bilinear\":\n h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)\n h_t_ = self.linear_in(h_t_)\n h_t = h_t_.view(tgt_batch, tgt_len, src_dim)\n h_s_ = h_s.transpose(1, 2)\n return torch.bmm(h_t, h_s_)\n else:\n dim = self.dim\n wq = self.linear_query(h_t.view(-1, dim))\n wq = wq.view(tgt_batch, tgt_len, 1, dim)\n wq = wq.expand(tgt_batch, tgt_len, src_len, dim)\n\n uh = self.linear_context(h_s.contiguous().view(-1, dim))\n uh = uh.view(src_batch, 1, src_len, dim)\n uh = uh.expand(src_batch, tgt_len, src_len, dim)\n\n # (batch, t_len, s_len, d)\n wquh = torch.tanh(wq + uh)\n\n return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)\n\n def forward(self, query, memory_bank, memory_lengths=None, use_softmax=True):\n \"\"\"\n Args:\n query (FloatTensor): query vectors [batch x tgt_len x dim]\n memory_bank (FloatTensor): source vectors [batch x src_len x dim]\n memory_lengths (LongTensor): source context lengths [batch]\n use_softmax (bool): use softmax to produce alignment score,\n otherwise use sigmoid for each individual one\n Returns:\n (FloatTensor, FloatTensor)\n computed attention weighted average: [batch x tgt_len x dim]\n attention distribution: [batch x tgt_len x src_len]\n \"\"\"\n '''\n print(\"memory_bank:\")\n print(memory_bank.size())\n '''\n\n if query.dim == 2:\n one_step = True\n query = query.unsqueeze(1)\n else:\n one_step = False\n\n src_batch, src_len, src_dim = memory_bank.size()\n query_batch, query_len, query_dim = query.size()\n utils.aeq(src_batch, query_batch)\n #utils.aeq(src_dim, query_dim)\n\n align = self.score(query, memory_bank)\n '''\n print(\"memory_lengths:\")\n print(memory_lengths.size())\n print(memory_lengths)\n '''\n\n if memory_lengths is not None:\n mask = utils.sequence_mask(memory_lengths, max_len=align.size(-1))\n mask = mask.unsqueeze(1)\n align.masked_fill_(1 - mask, -float('inf'))\n '''\n print(\"align:\")\n print(align)\n print(align.size())\n '''\n if use_softmax:\n align_vectors = F.softmax(align.view(src_batch * query_len, src_len), -1)\n align_vectors = align_vectors.view(src_batch, query_len, src_len)\n else:\n align_vectors = F.sigmoid(align)\n '''\n print(\"align after normalize:\")\n print(align_vectors)\n print(\"align_vectors:\")\n print(align_vectors)\n print(align_vectors.size())\n print(\"memory_bank:\")\n print(memory_bank)\n print(memory_bank.size())\n '''\n\n c = torch.bmm(align_vectors, memory_bank)\n # c is the attention weighted context representation\n # [batch x tgt_len x hidden_size]\n '''\n print(\"c:\")\n print(c.size())\n print(\"query:\")\n print(query.size())\n '''\n\n concat_c = torch.cat([c, query], 2).view(src_batch * query_len, src_dim + query_dim)\n '''\n print(\"concat_c:\")\n print(concat_c.size())\n '''\n attn_h = self.linear_out(concat_c).view(src_batch, query_len, query_dim)\n if self.attn_type == \"bilinear\":\n attn_h = torch.tanh(attn_h)\n\n if one_step:\n attn_h = attn_h.squeeze(1)\n align_vectors = align_vectors.squeeze(1)\n\n batch_, dim_ = attn_h.size()\n utils.aeq(src_batch, batch_)\n utils.aeq(src_dim, dim_)\n batch_, src_l_ = align_vectors.size()\n utils.aeq(src_batch, batch_)\n utils.aeq(src_len, src_l_)\n\n else:\n\n batch_, target_l_, dim_ = attn_h.size()\n utils.aeq(target_l_, query_len)\n utils.aeq(batch_, query_batch)\n utils.aeq(dim_, query_dim)\n\n batch_, target_l_, source_l_ = align_vectors.size()\n utils.aeq(target_l_, query_len)\n utils.aeq(batch_, query_batch)\n utils.aeq(source_l_, src_len)\n\n return attn_h, align_vectors, align","sub_path":"modules/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238411350","text":"def sortIndexes(amount, values):\r\n answer = []\r\n\r\n #sorting\r\n sortedValues = sorted(values)\r\n\r\n #finding indexes\r\n for sortedValue in sortedValues: #for x in each valuein the list\r\n for x in range(int(amount)): #for y in length of list\r\n if sortedValue == values[x]: #if no[x]==no[y]\r\n answer.append(str(x+1))\r\n print(' '.join(answer))\r\nsortIndexes(input(),list(map(int,input().split(' '))))","sub_path":"sort_indexes.py","file_name":"sort_indexes.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"221019294","text":"#!/bin/python3\nimport hashlib\n\ninp = \"ckczppom\"\n\ndef hash(i):\n return hashlib.md5(i.encode()).hexdigest()\n\ni=0\nwhile True:\n if hash(f\"{inp}{i}\").startswith(\"00000\"):\n print(i)\n break\n i += 1\n","sub_path":"2015/day04/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"574662668","text":"#\n# @lc app=leetcode.cn id=169 lang=python3\n#\n# [169] 多数元素\n#\n# https://leetcode-cn.com/problems/majority-element/description/\n#\n# algorithms\n# Easy (65.15%)\n# Likes: 822\n# Dislikes: 0\n# Total Accepted: 243.1K\n# Total Submissions: 373.1K\n# Testcase Example: '[3,2,3]'\n#\n# 给定一个大小为 n 的数组,找到其中的多数元素。多数元素是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。\n# \n# 你可以假设数组是非空的,并且给定的数组总是存在多数元素。\n# \n# \n# \n# 示例 1:\n# \n# 输入: [3,2,3]\n# 输出: 3\n# \n# 示例 2:\n# \n# 输入: [2,2,1,1,1,2,2]\n# 输出: 2\n# \n# \n#\n# dict计数 / \n# @lc code=start\nclass Solution:\n def majorityElement(self, nums: List[int]) -> int:\n '''\n n = len(nums)\n target = n // 2\n cnt = {}\n for i in range(n):\n if nums[i] in cnt:\n cnt[nums[i]] += 1\n else: cnt[nums[i]] = 1\n if cnt[nums[i]] > target:\n return nums[i]\n '''\n # 摩尔投票\n l = len(nums)\n if l == 1: return nums[0]\n\n res = nums[0]\n cnt = 1\n for i in range(1,l):\n if cnt == 0:\n cnt += 1\n res = nums[i]\n elif res == nums[i]:\n cnt += 1\n else:\n cnt -= 1\n return res\n\n# @lc code=end\n\n","sub_path":"169.多数元素.py","file_name":"169.多数元素.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"273170991","text":"#!/usr/bin/python3.4\n\nif __name__ == \"__main__\":\n \"\"\"Given 2 integer arrays, determine if the 2nd array is a rotated version\n of the 1st array. Ex. Original Array A={1,2,3,5,6,7,8}\n Rotated Array B={5,6,7,8,1,2,3}\n Assume there are no duplicated numbers in the array\"\"\"\n\n A = [1, 2, 3, 4, 5, 6, 7, 8]\n B = [8, 1, 2, 3, 4, 5, 6, 7]\n B = [5, 6, 7, 8, 1, 2, 3, 4]\n\n print(A, B)\n\n index_A = 0\n index_X = 0\n\n # find pivot of A in B\n for idx, x in enumerate(B):\n if x == A[0]:\n pivot = idx\n break\n\n if A == B[pivot:] + B[:pivot]:\n print(\"2nd array is a rotated version if the 1st array\")\n","sub_path":"python/tasks/rotated_array.py","file_name":"rotated_array.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"618320282","text":"'''\n Name: Vidit Maheshwari\n Description:\n'''\n\nfrom django.conf.urls import url\n\nfrom . import services\n\nurlpatterns = [\n url(r'^addState', services.addState, name='addState'),\n url(r'^addCities', services.addCities, name='addCities'),\n url(r'^getStates', services.getStates, name='getStates'),\n url(r'^getCities', services.getCities, name='getCities'),\n]","sub_path":"indianStatesAndCities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"526401845","text":"#Cooley–Tukey FFT algorithm\r\nimport math, time\r\nN = 1024\r\n\r\ndef W(N, r):\r\n return math.e**(-2*r*math.pi*1j/N)\r\n \r\ndef symme(a, N):\r\n s = str(bin(a))[2:]\r\n s = (int(math.log2(N)) - len(s))*\"0\" + s\r\n s = s[::-1]\r\n return int(''.join(s), 2)\r\n\r\ndef cal(li, m):\r\n y = [None for i in range(len(li))]\r\n\r\n for i in range(len(li)):\r\n if i%2**(m+1) == i%2**m:\r\n y[i] = li[i] + W(2**(m+1), i%(2**m))*li[i + 2**m]\r\n else:\r\n y[i] = li[i - 2**m] - W(2**(m+1), i%(2**m))*li[i]\r\n return y\r\n\r\ndef fft(li, N = None):\r\n if N == None:\r\n if int(math.log2(len(li))) == math.log2(len(li)):\r\n N = 2**(int(math.log2(len(li))))\r\n else:\r\n N = 2**(int(math.log2(len(li))) + 1)\r\n y = []\r\n if len(li) < N:\r\n temp = [0 for i in range(N - len(li))]\r\n li = li + temp\r\n for i in range(len(li)):\r\n y.append(li[symme(i, N)])\r\n for i in range(int(math.log2(N))):\r\n if i == 0:\r\n li = cal(y, i)\r\n else:\r\n li = cal(li, i)\r\n return li\r\n\r\nstart = time.time()\r\nli = [i for i in range(1024)]\r\ny = fft(li, 1024)\r\n#numpy.fft.fft(li)\r\nend = time.time() - start\r\n\r\n","sub_path":"fft.py","file_name":"fft.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"581854075","text":"##############################################################################\n#\n# Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.\n# Copyright (c) 2007-2008 Albert Cervera i Areny \n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsability of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# garantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\nfrom Koo.Common import Common\nfrom PyQt5.QtWidgets import *\nfrom Koo.Common import Shortcuts\n\nfrom Koo.Common.Calendar import *\nfrom Koo.Search.AbstractSearchWidget import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom Koo.Common.Ui import *\n\n(DateSearchWidgetUi, DateSearchWidgetBase) = loadUiType(\n Common.uiPath('search_date.ui'))\n\n\nclass DateSearchWidget(AbstractSearchWidget, DateSearchWidgetUi):\n def __init__(self, name, parent, attrs={}):\n AbstractSearchWidget.__init__(self, name, parent, attrs)\n DateSearchWidgetUi.__init__(self)\n self.setupUi(self)\n\n # Catch keyDownPressed\n self.uiStart.installEventFilter(self)\n self.uiEnd.installEventFilter(self)\n\n # Add shortcuts\n self.scStartSearch = QShortcut(self.uiStart)\n self.scStartSearch.setKey(Shortcuts.SearchInField)\n self.scStartSearch.setContext(Qt.WidgetShortcut)\n self.scStartSearch.activated.connect(self.showStartCalendar)\n\n self.scEndSearch = QShortcut(self.uiEnd)\n self.scEndSearch.setKey(Shortcuts.SearchInField)\n self.scEndSearch.setContext(Qt.WidgetShortcut)\n self.scEndSearch.activated.connect(self.showEndCalendar)\n\n self.widget = self\n self.focusWidget = self.uiStart\n self.pushStart.clicked.connect(self.showStartCalendar)\n self.pushEnd.clicked.connect(self.showEndCalendar)\n\n def showStartCalendar(self):\n PopupCalendarWidget(self.uiStart)\n\n def showEndCalendar(self):\n PopupCalendarWidget(self.uiEnd)\n\n def value(self):\n res = []\n date = textToDate(self.uiStart.text())\n val = dateToStorage(date)\n if val:\n self.uiStart.setText(dateToText(date))\n res.append((self.name, '>=', val))\n else:\n self.uiStart.clear()\n date = textToDate(self.uiEnd.text())\n val = dateToStorage(date)\n if val:\n self.uiEnd.setText(dateToText(date))\n res.append((self.name, '<=', val))\n else:\n self.uiEnd.clear()\n return res\n\n def clear(self):\n self.uiStart.clear()\n self.uiEnd.clear()\n\n def setValue(self, value):\n if value:\n self.uiStart.setText(str(value))\n self.uiEnd.setText(str(value))\n else:\n self.uiStart.clear()\n self.uiEnd.clear()\n","sub_path":"Koo/Search/Calendar/DateSearchWidget.py","file_name":"DateSearchWidget.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"519274943","text":"from typing import List\nfrom collections import defaultdict\nimport copy\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n def dfs(nums, r, path):\n if not nums: # len(p) = len(n)\n r.append(path)\n for i in range(len(nums)):\n dfs(nums[:i] + nums[i+1:], r, path + [nums[i]])\n res = []\n dfs(nums, res, [])\n return res\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n d = defaultdict(list)\n for s in strs:\n bag=[0] * 26\n for c in s:\n bag[ord(c) - ord('a')] += 1\n d[tuple(bag)].append(s)\n return d.values()\n def myPow(self, x: float, n: int) -> float:\n if not n:\n return 1\n if n < 0:\n return 1 / myPow(x, -n)\n if n % 2: # n is odd\n return x * myPow(x, n - 1)\n return myPow(x * x, n / 2)\n\n# 2.00000 -2147483648\na = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\nb = [1,1,2]\nc = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n]\nres = Solution()\n#print(res.permute(a))\n#print(res.permuteUnique(b))\nans = res.groupAnagrams(a)\nprint(ans)","sub_path":"leetcode/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"258487213","text":"import sys\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\n\nclass ResultAnalyzer(object):\n def __init__(self, file_pred, file_true):\n self.file_pred = file_pred\n self.file_true = file_true\n self.y_pred = []\n self.y_true = []\n self.load_data()\n\n def load_data(self):\n self.y_pred = self.load(self.file_pred)\n self.y_true = self.load(self.file_true)\n \n def load(self, file_name):\n y = []\n with open(file_name, 'r') as fp:\n for elem in fp:\n y.append(float(elem.strip()))\n return np.array(y,dtype=float)\n \n def calculate(self):\n accuracy = accuracy_score(self.y_true, self.y_pred)\n print(accuracy*100)\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if len(args) == 2:\n file_pred = args[0]\n file_true = args[1]\n ra = ResultAnalyzer(file_pred, file_true)\n ra.calculate()\n else:\n print(\"python Analyzer.py \")","sub_path":"Trabalho1/Analyzer.py","file_name":"Analyzer.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"224849149","text":"from flask.ext.lastuser import LastUser\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import Flask, _app_ctx_stack\nfrom sqlite3 import dbapi2 as sqlite3\nfrom flask_mail import Mail\nimport logging\n\nlogging.basicConfig(filename = \"flaskr.log\", level=logging.INFO)\n\n__all__=['app','init_db', 'get_db', 'close_db_connection', 'lastuser', 'mail']\n\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_object('config.DevelopmentConfig')\nmail = Mail(app)\nDebugToolbarExtension(app)\n\ndb = SQLAlchemy(app)\nlastuser = LastUser()\nlastuser.init_app(app)\n\ndef init_db():\n \"\"\"Creates the database tables.\"\"\"\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\ndef get_db():\n \"\"\"Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n top = _app_ctx_stack.top\n if not hasattr(top, 'sqlite_db'):\n sqlite_db = sqlite3.connect(app.config['DATABASE'])\n sqlite_db.row_factory = sqlite3.Row\n top.sqlite_db = sqlite_db\n return top.sqlite_db\n\n@app.teardown_appcontext\ndef close_db_connection(exception):\n \"\"\"Closes the database again at the end of the request.\"\"\"\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()\n\nfrom views import index,login","sub_path":"flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"326841246","text":"import os\nimport telebot\nfrom flask import Flask, request\n\nTOKEN = ''\nbot = telebot.TeleBot(token=TOKEN)\nserver = Flask(__name__)\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n\tbot.reply_to(message, \"Howdy, how are you doing? \\n Answer with the following replies \\n /parade_state \\n /sarprogramme \\n /126parade_state \\n /flyingprogramme \\n Thank you\")\n\n@bot.message_handler(commands=['hello'])\ndef send_welcome(message):\n\tbot.send_message(message.chat.id, \"hello\")\n\n@bot.message_handler(commands=['parade_state'])\ndef get_ps(message):\n with open(\"sqnexcel.csv\",\"rb\") as misc:\n f=misc.read()\n bot.send_message(message.chat.id,f)\n\n\n\n@bot.message_handler(commands=['sarprogramme'])\ndef get_sar(message):\n with open(\"sarprogramme.csv\",\"rb\") as misc1:\n g=misc1.read()\n bot.send_message(message.chat.id,g)\n\n\n@bot.message_handler(commands=['126parade_state'])\ndef get_ps(message):\n with open(\"126sqnexcel.csv\",\"rb\") as misc2:\n h=misc2.read()\n bot.send_message(message.chat.id,h)\n\n@bot.message_handler(commands=['flyingprogramme'])\ndef get_ps(message):\n with open(\"flyingprogramme.csv\",\"rb\") as misc3:\n j=misc3.read()\n bot.send_message(message.chat.id,j)\n\n\n@server.route('/' + TOKEN, methods=['POST'])\ndef getMessage():\n bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode(\"utf-8\"))])\n return \"!\", 200\n\n@server.route(\"/\")\ndef webhook():\n bot.remove_webhook()\n bot.set_webhook(url='' + TOKEN)\n return \"!\", 200\n\nif __name__ == \"__main__\":\n server.run(host=\"0.0.0.0\", port=int(os.environ.get('PORT', 5000)))\n","sub_path":"main[for github].py","file_name":"main[for github].py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"87636545","text":"import pandas as pd\nimport numpy as np\n\n\nposteriors = pd.read_csv(\"posterior.csv\")\nprint(posteriors.columns)\ndef highest_density_interval(pmf, p=.9, debug=False):\n print(pmf.describe())\n print(\"hello\")\n # If we pass a DataFrame, just call this recursively on the columns\n if(isinstance(pmf, pd.DataFrame)):\n return pd.DataFrame([highest_density_interval(pmf[col], p=p) for col in pmf],index=pmf.columns)\n \n cumsum = np.cumsum(pmf.values)\n \n # N x N matrix of total probability mass for each low, high\n total_p = cumsum - cumsum[:, None]\n \n # Return all indices with total_p > p\n lows, highs = (total_p > p).nonzero()\n \n # Find the smallest range (highest density)\n# print(\"total_p\\n{}lows\\n{}highs\\n{}\".format(total_p,lows,highs))\n# print(\"\\nhighs and lows: \",len(highs),len(lows))\n \n# print(total_p.max())\n if len(lows)>0:\n best = (highs - lows).argmin()\n\n low = pmf.index[lows[best]]\n high = pmf.index[highs[best]]\n \n return pd.Series([low, high],\n index=[f'Low_{p*100:.0f}',\n f'High_{p*100:.0f}'])\n \n\n# hdi = highest_density_interval(posteriors, debug=True)\n# hdi.tail()","sub_path":"highDensityInterval.py","file_name":"highDensityInterval.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"462593353","text":"\"\"\"\nDjango settings for alefone_web project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n#/home/santosh/code/alefone_web/src\n\n\n\n#email related config\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = 'bsantoshraj@gmail.com'\nEMAIL_HOST_PASSWORD = 'k1ngf1sh'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'wuw--w$&_+mxadi(gn3)5-1dc9y^_2hxb3dx=6(4sntjosn3r9'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['/home/santosh/code/alefone_web/static/templates/'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.csrf',\n # 'allauth.account.context_processors.account',\n # 'allauth.socialaccount.context_processors.socialaccount',\n ],\n },\n },\n]\n\n\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\"\n)\n\n\n\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n# 'django_admin_bootstrapped',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.sites',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'profiles',\n 'polls',\n 'provider',\n 'provider.oauth2',\n 'tastypie',\n 'crispy_forms',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n)\n\nSITE_ID=1\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n#LOGIN_REDIRECT_URL = '/profile/'\n#LOGIN_URL='/acounts/login/'\nACCOUNT_AUTHENTICATION_METHOD=\"username_email\"\n#ACCOUNT_USERNAME_REQUIRED=False\n#ACCOUNT_EMAIL_REQUIRED=True\n#ACCOUNT_USER_MODEL_USERNAME_FIELD=None\n#ACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nLOGIN_REDIRECT_URL='/'\n\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_LOGOUT_ON_GET = False\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_USER_MODEL_USERNAME_FIELD = None\nACCOUNT_USER_MODEL_EMAIL_FIELD = \"email\"\nACCOUNT_USER_DISPLAY = lambda user: user.get_short_name()\nACCOUNT_EMAIL_VERIFICATION = \"optional\"\n\n#STRIPE INFO\n#TEST KEYS\nSTRIPE_PUBLISHABLE_KEY = 'pk_test_ewCzElJ9vuuc426wMqPUNMbn'\nSTRIPE_SECRET_KEY = 'sk_test_3iXHFWwjUtXOEICNCxf54bmy'\n\n\n#LIVE KEYS\n#STRIPE_PUBLISHABLE_KEY = 'pk_live_qEq33XtDyB2AonDjzeokYNhi'\n#STRIPE_SECRET_KEY = 'sk_live_Tvda7Kepq238nvTsyTpZvFnd'\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'alefone_web.urls'\n\nWSGI_APPLICATION = 'alefone_web.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nTEMPLATE_DIRS = (\n #'/home/santosh/code/alefone_web/static/templates',\n os.path.join(os.path.dirname(BASE_DIR),\"static\",\"templates\"),\n)\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\n#media is anything uploaded by user\n\nMEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), \"static\", \"media\")\n#/home/santosh/code/alefone_web/static/media\nMEDIA_URL = '/media/'\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), \"static\", \"root\")\n#STATIC_ROOT = '/home/santosh/code/alefone_web/static/root'\n#/home/santosh/code/alefone_web/static/root\nSTATICFILES_DIRS = (\n os.path.join(os.path.dirname(BASE_DIR), \"static\",\"static\"),\n)\n#/home/santosh/code/alefone_web/static/static\n","sub_path":"src/alefone_web/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"466382079","text":"# -*- coding: utf-8 -*-\n\"\"\"\nInit module.\n\"\"\"\nimport socket\nimport flask\nfrom guest_book import config\n\n\napp = flask.Flask(__name__)\napp.config.from_object(config.Config)\n\n\ndef get_cur_addr() -> str:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.connect(('10.255.255.255', 1))\n addr = sock.getsockname()[0]\n except Exception:\n addr = '127.0.0.1'\n sock.close()\n return addr\n\n\nCUR_ADDR = get_cur_addr()\n\nfrom guest_book import routes # noqa E402\n","sub_path":"guest_book/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449105366","text":"from django.db.models import Count\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom .models import Post\n# Create your views here.\nfrom django.core.paginator import Paginator\n\nclass BlogView(View):\n def get(self, request):\n post_objects = Post.objects.filter(is_active=True)\n paginator = Paginator(post_objects, 6)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except:\n posts = paginator.page(1)\n\n current_page = posts.number - 1 # возвращает минимум 1, а так как у нас индексы с 0 начинаются, ,поэтому отнимаем 1\n start_index = current_page - 3\n if start_index < 0:\n start_index = 0\n max_pages = paginator.num_pages # то же самое как и с currentp_page4\n end_index = current_page + 3\n if end_index > max_pages:\n end_index = max_pages\n page_range = list(paginator.page_range)[start_index:end_index]\n\n return render(request, 'blog.html', context={'posts': posts, 'page_range': page_range})\n\n\nclass PostView(View):\n def get(self, request, post_slug):\n post = Post.objects.get(is_active=True, slug__iexact=post_slug)\n posts = Post.objects.filter(is_active=True)\n most_viewed_posts = posts.order_by('-views')[:5]\n #queryset = Post.objects.filter(is_active=True).values(\"pub_date\").order_by('-pub_date')\n #queryset = Post.objects.filter(is_active=True).values(\"pub_date__date\") Вариант Dan Tyan\n #queryset = posts.objects.annotate(year=models.functions.ExtractYear('pub_date'), month=models.functions.ExtractMonth('pub_date'),).values('year', 'month').distinct('year', 'month').order_by('-year', '-month')\n #queryset = Post.objects.filter(is_active=True).values(\"pub_date__date__month\", \"pub_date__date__year\").order_by()\n queryset = Post.objects.filter(is_active=True).values(\"pub_date__date\").order_by().annotate(Count('id'))\n #queryset = Post.objects.annotate(year=models.functions.ExtractYear('pub_date'), month=models.functions.ExtractMonth('pub_date'), ).values('year', 'month').annotate(total_posts=Count('id')).order_by('-year', '-month')\n for item in queryset:\n for i in item:\n print(item[i])\n #for item in queryset:\n # print(queryset(item).strftime('%B'))\n\n #print('Конец queryset!')\n #archive_dates = []\n #for item in posts:\n # item_dates = (item.pub_date.month, item.pub_date.year)\n # if item_dates not in archive_dates:\n # archive_dates.append(item_dates)\n #print(archive_dates)\n #archive_dates = sorted(archive_dates, key=lambda x: x[::-1], reverse=True)\n #print(archive_dates)\n try:\n post_views = request.session['post_views_id_{}'.format(post.id)]\n except:\n request.session['post_views_id_{}'.format(post.id)] = post.id\n post.views += 1\n post.save()\n try:\n year = post.pub_date.year\n print(year)\n month = post.pub_date.strftime('%B')\n print(month)\n except:\n print('Не сработало!')\n return render(request, 'post_detail.html', context={'post': post, 'most_viewed_posts': most_viewed_posts, 'queryset': queryset})\n\nclass BlogArchiveView(View):\n def get(self, request, month, year):\n posts = Post.objects.filter(pub_date__month=month, pub_date__year=year)\n print(posts)\n return render(request, 'blog_archive.html', context={'posts': posts})\n\nclass BlogSearchView(View):\n def get(self, request):\n search_query = request.GET.get('blog-search-field', '')\n\n if search_query:\n posts = Post.objects.filter(post_title__icontains=search_query, is_active=True)\n return render(request, 'blog_search.html', context={'posts': posts, 'search_query': search_query})\n else:\n return render(request, 'blog_search.html', context={'search_query': '- Вы ничего не указали!'})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"213589853","text":"import logging\n\ndef set_log(file_level,stream_level):\n logger=logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n formatter=logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n\n file_handler=logging.FileHandler('youtube.log')\n file_handler.setLevel(file_level)\n file_handler.setFormatter(formatter)\n\n stream_handler=logging.StreamHandler()\n stream_handler.setLevel(stream_level)\n stream_handler.setFormatter(formatter)\n\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n\n","sub_path":"yt_concate/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"534237355","text":"\n# coding: utf-8\n\n# # File Sorting For XRF Data\n\n# This code will search through all subdirectories in \"src_dir\", looking for folders with \"Run\" and \"{E}kV\" in the name, where {E} is a specified energy value. It will then copy all files in those folders to the appropriate folders in \"out_dir\", based on {E}.\n\n# In[1]:\n\nimport os\nimport shutil\n\nsrc = 'C:\\\\Users\\\\levay_b\\\\Work\\\\XRF_Data\\\\Users\\\\367\\\\U1499A'\nsrc_dir = os.path.join(src,\"\")\nout_dir = os.path.join(os.getcwd(),\"out\")\n\n\n# In[2]:\n\ndef sort_by_energy(src_dir,out_dir,energies):\n for energy in energies:\n energy_path = os.path.join(out_dir,str(energy)+\"kV\")\n if (os.path.isdir(energy_path) == False):\n os.mkdir(energy_path)\n\n for root, dirs, files in os.walk(src_dir):\n dirname = os.path.split(root)[1]\n run_loc = dirname.find(\"Run\")\n if (run_loc) != -1:\n for energy in energies:\n energy_str = str(energy) + \"kV\"\n energy_loc = dirname.find(energy_str)\n if (energy_loc) != -1:\n dest_path = os.path.join(out_dir,energy_str)\n for file in files:\n orig_file_path = os.path.join(root,file)\n new_file_path = os.path.join(dest_path,file)\n if (os.path.isfile(new_file_path) == False):\n shutil.copy(orig_file_path, dest_path)\n return None\n\n\n# In[3]:\n\nsort_by_energy(src_dir,out_dir,[10,30,50])\n\n","sub_path":"file_reorg/file_reorg.py","file_name":"file_reorg.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"3517776","text":"#!/usr/bin/env python3\n\nfrom amplify import Solver\nfrom amplify.client import FixstarsClient\n\n\ndef solve_problem(model):\n # set client and parameters\n client = FixstarsClient()\n client.token = \"xxxxx\"\n client.parameters.timeout = 9999\n # set solver\n solver = Solver(client)\n # get result\n result = solver.solve(model)\n # extract value of objective function and binary variables\n obj, values = result[0].energy, result[0].values\n # get values of constraints\n broken = model.check_constraints(values)\n return obj, values, broken\n","sub_path":"solve_problem.py","file_name":"solve_problem.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"147698531","text":"from marshmallow import fields\nfrom .....messaging.agent_message import AgentMessage, AgentMessageSchema\nfrom ..message_types import ACKNOWLEDGE_PROOF, PROTOCOL_PACKAGE\n\nHANDLER_CLASS = f\"{PROTOCOL_PACKAGE}.handlers.acknowledge_proof.AcknowledgeProofHandler\"\n\n\nclass AcknowledgeProof(AgentMessage):\n class Meta:\n handler_class = HANDLER_CLASS\n schema_class = \"AcknowledgeProofSchema\"\n message_type = ACKNOWLEDGE_PROOF\n\n def __init__(\n self,\n _id: str = None,\n *,\n credential=None,\n **kwargs,\n ):\n \"\"\"Initialize credential issue object.\"\"\"\n super().__init__(_id=_id, **kwargs)\n self.credential = credential\n\n\nclass AcknowledgeProofSchema(AgentMessageSchema):\n \"\"\"Credential schema.\"\"\"\n\n class Meta:\n \"\"\"Credential schema metadata.\"\"\"\n\n model_class = AcknowledgeProof\n\n credential = fields.Str(required=False)\n","sub_path":"aries_cloudagent/protocols/present_proof/v1_1/messages/acknowledge_proof.py","file_name":"acknowledge_proof.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"472423285","text":"from collections import defaultdict \nfrom itertools import combinations \n\nfrom util import timeit \n\nclass Eclat: \n def __init__(self, minSupport, minConfidence): \n self.minSupport = minSupport \n self.minConfidence = minConfidence \n\n @timeit \n def generateBitvectors(self, db): \n bitvectors = defaultdict(int) \n for tIdx, transaction in enumerate(db.transactions): \n for itemIdx in transaction: \n bitvectors[frozenset([itemIdx])] |= (1 << tIdx) \n return bitvectors \n\n @timeit \n def prune(self, bitvectors, db): \n pruned = defaultdict(int) \n transactionCounts = len(db.transactions) \n for itemset, bitvector in bitvectors.items(): \n count = bin(bitvector).count('1') \n if count / transactionCounts >= self.minSupport: \n pruned[itemset] = count \n return pruned \n\n @timeit \n def mining(self, bitvectors, db): \n largeItemsets = defaultdict(int) \n pruned = self.prune(bitvectors, db) \n if not pruned: \n return largeItemsets \n largeItemsets.update(pruned) \n itemsets = list(pruned.keys())\n for i, itemsetI in enumerate(itemsets): \n candidates = defaultdict(int) \n for j, itemsetJ in enumerate(itemsets[i+1:]):\n candidate = itemsetI | itemsetJ \n candidates[candidate] = bitvectors.get(itemsetI) & bitvectors.get(itemsetJ) \n largeItemsets.update(self.mining(candidates, db)) \n return largeItemsets \n\n @timeit \n def generateLargeItemsets(self, db):\n bitvectors = self.generateBitvectors(db) \n largeItemsets = self.mining(bitvectors, db) \n return largeItemsets \n","sub_path":"hw1/src/Eclat.py","file_name":"Eclat.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"563487970","text":"#!?usr/bin/env python3\r\n\r\n##########################################################################\r\n#Calculating Fibonacci numbers\r\n#This is a famous mathematics series.\r\n#\r\n# 1 1 2 3 5 8 13 21 34\r\n#\r\n#This first two numbers in the series are 1.\r\n#The third number is assigned the sum of the previous two numbers.\r\n############################################################################\r\n\r\n\r\n###########################################################################\r\n#The traditional way to program the Fibonacci series\r\n###########################################################################\r\ndef trad_fib(n):\r\n a = 1 #The first number in the series\r\n b = 1 #The second number in the series\r\n\r\n while b < n:\r\n print(a, end=\"\")\r\n old_b = b #Keep the original value of b\r\n b = a + b #The new value of b\r\n a = old_b #The new value of a\r\n print(a, end=\"\")\r\n print()\r\n return a\r\n\r\n\r\n#Call the function, to display the Fibonacci numbers less than 400\r\nx = trad_fib(400)\r\nprint(\"The Fibonacci number that is less than 400...\",x)\r\n\r\nprint()\r\n\r\n#Call the function, to display the Fibonaci numbers less than 2000\r\nx = trad_fib(2000)\r\nprint(\"The Fibonacci numbers that is less than 2000...\",x)\r\n","sub_path":"try_this Lab 34.py","file_name":"try_this Lab 34.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"323622342","text":"'''\nThis class has static functions to calculate Google Maps coordinates.\n'''\n\nimport math\n\nclass GoogleMapsCoord:\n TITLE_SIZE = 256\n\n @staticmethod\n def latlngToWorld(lat, lng):\n '''\n Generates an X,Y world coordinate based on the latitude, longitude\n\n Returns: An X,Y world coordinate\n '''\n\n siny = math.sin(lat * math.pi / 180)\n x = GoogleMapsCoord.TITLE_SIZE * (0.5 + lng / 360)\n y = GoogleMapsCoord.TITLE_SIZE * (0.5 - math.log((1 + siny) / (1 - siny)) / (4 * math.pi))\n\n return x, y\n\n @staticmethod\n def latlngToPixel(lat, lng, zoom):\n '''\n Generates an X,Y pixel coordinate based on the latitude, longitude\n and zoom level\n\n Returns: An X,Y pixel coordinate\n '''\n\n scale = 1 << zoom\n\n x, y = GoogleMapsCoord.latlngToWorld(lat, lng)\n x = math.floor(x * scale)\n y = math.floor(y * scale)\n \n return int(x), int(y)\n\n @staticmethod\n def latlngToTile(lat, lng, zoom):\n '''\n Generates an X,Y tile coordinate based on the latitude, longitude\n and zoom level\n\n Returns: An X,Y tile coordinate\n '''\n\n x, y = GoogleMapsCoord.latlngToPixel(lat, lng, zoom)\n x = math.floor(x/GoogleMapsCoord.TITLE_SIZE)\n y = math.floor(y/GoogleMapsCoord.TITLE_SIZE)\n \n return int(x), int(y)","sub_path":"AI for Robotics/Exercices/Lesson 4 - Search/GoogleMapsCoord.py","file_name":"GoogleMapsCoord.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"529160368","text":"# -*- coding:utf-8 -*-\n# python version= python3.X\n# code lines count about 60\nfrom hashlib import md5\n\n\ndef _fingerprint(string):\n \"\"\"\n the core of finger print functions\n :param string:\n :return: finger print string\n \"\"\"\n # use sha1 or md5 ,because the result of md5 is shorter ,so we chose it.\n fp = md5(string.encode())\n finger = fp.hexdigest()\n # print(finger)\n return finger\n\n\ndef data_fingerprint(*args):\n \"\"\"\n this is an data finger print maker\n :param args: data type must be 'string'\n :return: finger print string\n \"\"\"\n data_tuple = args\n target_str = \"\"\n\n # check the data type if anyone is not belong to sting will raise an error information\n for item in data_tuple:\n data_type = type(item)\n if data_type is not str:\n raise PermissionError(\"the permission need be 'string' type but '{}' given\".format(data_type))\n else:\n target_str += item\n return _fingerprint(target_str)\n\n\ndef _sort_url(url):\n \"\"\"\n sort url to avoid request the same url repeat just because the query sort in url is different.\n :param url:\n :return: sorted url string\n \"\"\"\n # if there is not query,just return url\n # because one url rote can must be in one sort rule\n if \"?\" not in url:\n return url\n url_splited = url.split(\"?\")\n url_rote = url_splited[0]\n query_str = url_splited[1]\n # if there are many query key and value ,sort them, and then reset the url\n if \"&\" in query_str:\n query_list = query_str.split(\"&\")\n query_list.sort()\n query_str = \"&\".join(query_list)\n url_sorted = url_rote + \"?\" + query_str\n return url_sorted\n\n\ndef url_fingerprint(url):\n \"\"\"\n first to sort the url by '_sort_url',then create the finger print for this url\n :param url:\n :return: finger print string of url\n \"\"\"\n url_sorted = _sort_url(url)\n return _fingerprint(url_sorted)\n\n\nif __name__ == '__main__':\n # a = \"nihao\"\n # b = \"xixi\"\n # c = \"haha\"\n # finger_print = data_fingerprint(a, b, c)\n # print(finger_print)\n #\n # url1 = \"http://blog.csdn.net/hechaoyuyu/article/details/6690912\"\n # url2 = \"https://www.baidu.com/s?ie=utf-8\"\n # url3 = \"https://www.baidu.com/s?ie=utf-8&f=3&rsv_bp=1&tn=baidu&wd=python%20%E8%8E%B7%E5%8F%96ascii&oq=python%2520%25E8%258E%25B7%25E5%258F%2596ascii&rsv_pq=907d8b15000122f4&rsv_t=fe65VoqsncMJTT3Ce0%2FRaVBNOGRR4CBd2DL1K47IA57Gs84yPjCuCr2IpMA&rqlang=cn&rsv_enter=0&prefixsug=python%2520%25E8%258E%25B7%25E5%258F%2596ascii&rsp=0\"\n # urlsorted = _sort_url(url3)\n # print(urlsorted)\n pass\n","sub_path":"customTools/fingerPrintHome.py","file_name":"fingerPrintHome.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"360071773","text":"from PyQt4.QtCore import QObject, pyqtSignal, QEvent, Qt, QPoint\n\nclass ClickReportingInterpreter(QObject):\n rightClickReceived = pyqtSignal(object, QPoint) # list of indexes, global window coordinate of click\n leftClickReceived = pyqtSignal(object, QPoint) # ditto\n \n def __init__(self, navigationInterpreter, positionModel):\n QObject.__init__(self)\n self.baseInterpret = navigationInterpreter\n self.posModel = positionModel\n\n def start( self ):\n self.baseInterpret.start()\n\n def stop( self ):\n self.baseInterpret.stop()\n\n def eventFilter( self, watched, event ):\n if event.type() == QEvent.MouseButtonPress:\n pos = self.posModel.cursorPos\n pos = [int(i) for i in pos]\n pos = [0,] + pos + [0,]\n\n if event.button() == Qt.LeftButton:\n gPos = watched.mapToGlobal( event.pos() )\n self.leftClickReceived.emit( pos, gPos )\n if event.button() == Qt.RightButton:\n gPos = watched.mapToGlobal( event.pos() )\n self.rightClickReceived.emit( pos, gPos ) \n\n # Event is always forwarded to the navigation interpreter.\n return self.baseInterpret.eventFilter(watched, event)\n\n","sub_path":"volumina/clickReportingInterpreter.py","file_name":"clickReportingInterpreter.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"19572014","text":"import pickle\nimport openpyxl as xl\n\ndef loadAISC():\n\twb2 = xl.load_workbook('shapes.xlsx')\n\titem2pickle = wb2.get_sheet_by_name('Database v15.0')\n\treturn item2pickle\n\t\n\t\ndef database2list():\n\t\n\tsheet = xl.load_workbook('shapes.xlsx')['Database v15.0']\n\t\n\tdata = []\n\tlabels = []\n\tfor row in sheet.iter_rows(max_row=2092):\n\t\tlabels.append(row[2].value)\n\t\trow_data = []\n\t\tfor cell in row:\n\t\t\trow_data.append(cell.value)\n\t\t\n\t\tdata.append(row_data)\n\t\t\n\treturn (data, labels)\n\ndef pickleObject(item2pickle, filename='pickleditem.txt'):\n\tfileObject = open(filename, 'wb')\n\tpickle.dump(item2pickle, fileObject)\n\tfileObject.close()\n\ndef unPickleObject(filename):\n\tfileObject = open(filename, 'rb')\n\tb = pickle.load(fileObject)\n\treturn b\n\ndef main():\n\ta = database2list()\n\tpickleObject(a)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"pickle_sections.py","file_name":"pickle_sections.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"426710390","text":"# -*- coding: utf-8 -*-\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nimport time\nfrom lj.items import IJItem\n\n\nclass IJSpider(CrawlSpider):\n name = '5i5j'\n allowed_domains = ['bj.5i5j.com']\n\n def __init__(self, category=None, *args, **kwargs):\n if category is None:\n category = 'haidian'\n category = str(category)\n self.start_urls = [\n 'http://bj.5i5j.com/exchange/n1',\n ]\n self.rules = (\n Rule(LinkExtractor(allow='exchange/n[0-9]+$', ), callback='parse_item', follow=True),\n )\n super(IJSpider, self).__init__(*args, **kwargs)\n\n def parse_item(self, response):\n li = response.xpath('//ul[@class=\"list-body\"]/li')\n for i in li:\n item = IJItem()\n item['id'] = i.xpath('.//a/@href').extract()[0].split('/')[2]\n item['unitPrice'] = i.xpath('.//div[@class=\"list-info-r\"]/p/text()').extract()[0]\n item['time'] = time.strftime('%Y-%m-%d %H:%M:%S')\n item['totalPrice'] = i.xpath('.//div[@class=\"list-info-r\"]/h3/text()').extract()[0]\n item['village'] = i.xpath('.//ul[@class=\"list-info-l\"]/li/a/h3/text()').extract()[0].strip()\n try:\n item['position'] = i.xpath('.//ul[@class=\"list-info-l\"]/li/a/text()').extract()[1]\n except IndexError:\n pass\n item['houseInfo'] = ' '.join(i.xpath('.//li[@class=\"font-balck\"]/span/text()').extract())\n yield item\n","sub_path":"zl/lj/lj/spiders/5i5j.py","file_name":"5i5j.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"469081485","text":"#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The MeasurementManager class, which conducts and reports on measurements\nwhich may be measured at varying intervals through training.\"\"\"\n\nfrom typing import Dict, Iterable, List, Any, Union, Callable, Optional\nfrom uv.reporter import AbstractReporter\n\n\nclass MeasurementManager:\n \"\"\"Class for a MeasurementManager. This manager is instantiated with a\n particular reporter, which it will use to report whatever measurements is\n conducts.\n\n The MeasurementManager has a set of measurements---stored in a dictionary\n (described below)---which are to be carried out at a certain frequency, which\n is specific to each measurement.\n\n Whenever the MeasurementManager's process() function is called, with a\n step number provided as an argument, the MeasurementManager decides which\n measurements are required at each step, carries them out, and reports them.\n\n In order to carry out the measurement, the Manager has a state (Dict) at each\n step, which contains all the information necessary to perform the measurement.\n Part of this state is static (e.g., the training dataset), while part can\n change at each step. This dynamic state is passed along in the process() call.\n\n Args:\n static_state: Dict of variables which will be used to compute the measured\n values. The static_state is fixed throughout time.\n reporter: Reporter which will report the measurements.\n \"\"\"\n\n def __init__(self, static_state: Dict[str, Any], reporter: AbstractReporter):\n self.static_state = static_state\n self.reporter = reporter\n self.measurements = {}\n\n def add_measurement(self, measurement_spec: Dict[str, Union[str, int,\n Callable]]):\n \"\"\"Adds a measurement specification to the MeasurementManager.\n Args:\n measurement_spec: Dict containing keys:\n 'name': String, the name of the measurement, e.g. 'training_loss'\n 'interval': Integer, how often to measure\n 'function': Function, taking in the state_dictionary, and returning the\n measured value\n \"\"\"\n name = measurement_spec['name']\n\n if measurement_spec['interval'] <= 0:\n raise ValueError(\n 'measurement_spec[\\'interval\\'] must be greater than zero')\n\n self.measurements[name] = {\n 'interval': measurement_spec['interval'],\n 'function': measurement_spec['function']\n }\n\n def measure(self,\n step: int,\n dynamic_state: Dict[str, Any],\n measurement_list: Optional[Iterable[str]] = None):\n \"\"\" At a given step, decide which measurements need to be\n performed at this step and perform them\n\n Args:\n step: Integer, the number of the current step\n dynamic_state: Dict, the state variables necessary to perform the\n measurement\n measurement_list: Iterable[str], the list of measurements to make this\n step.\n If not provided (more common behavior), report the\n measurements required by the measurements' intervals.\n \"\"\"\n\n # Add measurements required by this step to the specified measurements\n if measurement_list is None:\n measurement_list = self.triggered(step)\n\n full_state = {**self.static_state, **dynamic_state}\n\n measurements = {}\n for name in measurement_list:\n measurement_fn = self.measurements[name]['function']\n measured_value = measurement_fn(full_state)\n\n measurements[name] = measured_value\n\n if len(measurements) > 0:\n self.reporter.report_all(step, measurements)\n\n def triggered(self, step: int) -> Iterable[str]:\n \"\"\" Returns an iterable of measurement names which are to be\n performed a the given step \"\"\"\n\n for name, spec in self.measurements.items():\n if step % spec['interval'] == 0:\n yield name\n","sub_path":"uv/manager/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"309891072","text":"#!/usr/bin/env python\nfrom appionlib import appiondata\nfrom appionlib import apParam\nfrom appionlib import apDisplay\n\ndef getScriptProgramRunFromRunname(runname,pathdata,jobdata=None):\n\tq = appiondata.ScriptProgramRun(runname=runname,rundir=pathdata,job=jobdata)\n\tresults = q.query()\n\tif len(results) == 1:\n\t\treturn results[0]\n\telse:\n\t\tif len(results) == 0:\n\t\t\t# Try with only runname\n\t\t\tq = appiondata.ScriptProgramRun(runname=runname,job=jobdata)\n\t\t\tif len(results) == 1:\n\t\t\t\treturn results[0]\n\t\t\tapDisplay.printError('No ScriptProgramRun is found for runname %s' % (runname))\n\t\telse:\n\t\t\tapDisplay.printWarning('%d ScriptProgramRuns are found for runname %s' % (len(results),runname))\n\t\t\tapDisplay.printWarning('Use most recent one')\n\t\t\treturn results[0]\n\ndef getScriptParamValuesFromRunname(runname,pathdata,jobdata=None):\n\tparams = {}\n\tprogram = getScriptProgramRunFromRunname(runname,pathdata, jobdata)\n\tif program is not None:\n\t\tq = appiondata.ScriptParamValue(progrun=program)\n\t\tresults = q.query()\n\t\tfor result in results:\n\t\t\tparams[result['paramname']['name']]=apParam.tc(result['value'])\n\treturn params\n\n","sub_path":"lib/python2.7/site-packages/appionlib/apScriptLog.py","file_name":"apScriptLog.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"84678363","text":"\nimport numpy as np\nimport sys\nimport argparse\nimport pickle # for handling the new data source\nimport h5py # for saving the model\nimport keras\nimport tensorflow as tf\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Dropout, Flatten, Dense, Input\nfrom keras.layers.merge import concatenate\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.models import Model\nfrom datetime import datetime # for filename conventions\n\nfrom tensorflow.python.lib.io import file_io # for better file I/O\n\n\ndef multiinput_generator(full, med, low, label):\n '''custom generator to be passed to main training\n note samplewise std normalization + batch size\n '''\n while True:\n # shuffled indices\n idx = np.random.permutation(full.shape[0])\n # create image generator\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=True, # divide each input by its std\n zca_whitening=False) # randomly flip images\n batches = datagen.flow(full[idx], label[idx], batch_size=16, shuffle=False)\n idx0 = 0\n for batch in batches:\n idx1 = idx0 + batch[0].shape[0]\n yield [batch[0], med[idx[idx0:idx1]], low[idx[idx0:idx1]]], batch[1]\n idx0 = idx1\n if idx1 >= full.shape[0]:\n break\n\n\ndef resizer(arrays, size, method):\n return tf.map_fn(lambda array: \n tf.image.resize_images(array,\n [size, size],\n method=method), \n arrays)\n\n\ndef singleres_to_multires(arrays, size1=64, size2=32, \n method=tf.image.ResizeMethod.BILINEAR):\n with tf.Session() as session:\n size1_arrays = resizer(arrays, size1, method).eval()\n size2_arrays = resizer(arrays, size2, method).eval()\n return [arrays, size1_arrays, size2_arrays]\n\n\ndef load_multires(images, labels):\n images_reshape = reshape(images)\n multires_images = singleres_to_multires(images)\n return multires_images, labels\n\n\ndef get_input_shape(data):\n num_samples = data.shape[0]\n channels = 3\n img_rows = data.shape[2]\n img_cols = data.shape[3]\n return (num_samples, img_rows, img_cols, channels)\n\n\ndef reshape(data):\n return np.reshape(data, get_input_shape(data))\n\n\ndef train_test_split(array, proportion=0.8):\n '''non randomised train split\n '''\n index = int(len(array) * proportion)\n index = 10\n train = array[:index]\n test = array[index:]\n return train, test\n\n\ndef radian_to_angle(radian_array):\n '''converts original radian to angle which\n will be error metric\n '''\n return (radian_array * 180 / np.pi) - 90\n\n\ndef reverse_mean_std(standardized_array, prev_mean, prev_std):\n '''undo transformation in order to calculate\n angle loss\n '''\n de_std = standardized_array * prev_std\n de_mean = de_std + prev_mean\n return de_mean\n\n\ndef generator_train(images, labels):\n '''main entry point\n calls customised multiinput generator\n and tests angle loss\n '''\n multires_data, labels = load_multires(images, labels)\n multires_data = [x.astype('float32') for x in multires_data]\n multires_data = [x / 255 for x in multires_data]\n model = multires_CNN(16, 5, multires_data)\n full = multires_data[0]\n med = multires_data[1]\n low = multires_data[2]\n train_full, test_full = train_test_split(full)\n train_med, test_med = train_test_split(med)\n train_low, test_low = train_test_split(low)\n labels_angles = radian_to_angle(labels)\n train_orig_lab, test_orig_lab = train_test_split(labels_angles)\n labels_standardised, mean_, std_ = mean_std_norm(labels_angles)\n train_labels, test_labels = train_test_split(labels_standardised)\n model.fit_generator(multiinput_generator(train_full, train_med, train_low, train_labels),\n steps_per_epoch=16,\n epochs=50)\n return model, test_full, test_med, test_low, test_labels\n\n\ndef calculate_error(model, test_full, test_med, test_low, test_labels):\n std_angles = model.predict([test_full, test_med, test_low])\n unstd_angles = reverse_mean_std(std_angles, mean_, std_)\n error = unstd_angles - test_labels\n mean_error_elevation = np.mean(abs(error[:, 0]))\n mean_error_zenith = np.mean(abs(error[:, 1]))\n print(mean_error_zenith)\n print(mean_error_zenith)\n return mean_error_elevation, mean_error_zenith\n\n\ndef mean_std_norm(array):\n '''standardization for labels\n '''\n mean_ = mean(array)\n std_ = std(array)\n standardized = (array - mean_) / std_\n return standardized, mean_, std_\n\n\ndef multires_CNN(filters, kernel_size, multires_data):\n '''uses Functional API for Keras 2.x support.\n multires data is output from load_standardized_multires()\n '''\n input_fullres = Input(multires_data[0].shape[1:], name = 'input_fullres')\n fullres_branch = Conv2D(filters, (kernel_size, kernel_size),\n activation = LeakyReLU())(input_fullres)\n fullres_branch = MaxPooling2D(pool_size = (2,2))(fullres_branch)\n fullres_branch = BatchNormalization()(fullres_branch)\n fullres_branch = Flatten()(fullres_branch)\n\n input_medres = Input(multires_data[1].shape[1:], name = 'input_medres')\n medres_branch = Conv2D(filters, (kernel_size, kernel_size),\n activation=LeakyReLU())(input_medres)\n medres_branch = MaxPooling2D(pool_size = (2,2))(medres_branch)\n medres_branch = BatchNormalization()(medres_branch)\n medres_branch = Flatten()(medres_branch)\n\n input_lowres = Input(multires_data[2].shape[1:], name = 'input_lowres')\n lowres_branch = Conv2D(filters, (kernel_size, kernel_size),\n activation = LeakyReLU())(input_lowres)\n lowres_branch = MaxPooling2D(pool_size = (2,2))(lowres_branch)\n lowres_branch = BatchNormalization()(lowres_branch)\n lowres_branch = Flatten()(lowres_branch)\n\n merged_branches = concatenate([fullres_branch, medres_branch, lowres_branch])\n merged_branches = Dense(128, activation=LeakyReLU())(merged_branches)\n merged_branches = Dropout(0.5)(merged_branches)\n merged_branches = Dense(2,activation='linear')(merged_branches)\n\n model = Model(inputs=[input_fullres, input_medres ,input_lowres],\n outputs=[merged_branches])\n model.compile(loss='mean_absolute_error', optimizer='adam')\n\n return model\n\n\ndef train_model():\n\n images = np.load('AllImages.npy')\n #labels_ = np.load(labelsio)\n #labels = labels_[:500]\n labels = np.load('AllAngles.npy')\n\n model, test_full, test_med, test_low, test_labels = generator_train(images, labels)\n\n error = calculate_error(model, test_full, test_med, test_low, test_labels)\n #file_stream_images = file_io.FileIO(train_files+'/AllImages.npy', mode='r')\n #file_stream_labels = file_io.FileIO(train_files+'/AllAngles.npy', mode='r')\n #images = np.load(file_stream_images)\n #labels = np.load(file_stream_labels)\n\n\n\nif __name__ == '__main__':\n train_model()\n\n","sub_path":"modelling/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"375769281","text":"# Copyright 2020 Tensorforce Team. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport importlib\nimport json\nimport os\nimport random\nimport time\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport tensorforce.agents\nfrom tensorforce import util, TensorforceError\nfrom tensorforce.core import ArrayDict, ListDict, TensorDict, TensorforceConfig, TensorSpec\n\n\nclass Agent(object):\n \"\"\"\n Tensorforce agent interface.\n \"\"\"\n\n @staticmethod\n def create(agent='tensorforce', environment=None, **kwargs):\n \"\"\"\n Creates an agent from a specification.\n\n Args:\n agent (specification | Agent class/object): JSON file, specification key, configuration\n dictionary, library module, or `Agent` class/object\n (default: Policy agent).\n environment (Environment object): Environment which the agent is supposed to be trained\n on, environment-related arguments like state/action space specifications and\n maximum episode length will be extract if given\n (recommended).\n kwargs: Additional agent arguments.\n \"\"\"\n if isinstance(agent, Agent):\n if environment is not None:\n # TODO:\n # assert agent.spec['states'] == environment.states()\n # assert agent.spec['actions'] == environment.actions()\n # assert environment.max_episode_timesteps() is None or \\\n # agent.spec['max_episode_timesteps'] >= environment.max_episode_timesteps()\n pass\n\n for key, value in kwargs.items():\n if key == 'parallel_interactions':\n assert agent.spec[key] >= value\n else:\n assert agent.spec[key] == value\n\n if agent.is_initialized:\n agent.reset()\n else:\n agent.initialize()\n\n return agent\n\n elif isinstance(agent, type) and issubclass(agent, Agent):\n if environment is not None:\n if 'states' in kwargs:\n # TODO:\n # assert kwargs['states'] == environment.states()\n pass\n else:\n kwargs['states'] = environment.states()\n if 'actions' in kwargs:\n # assert kwargs['actions'] == environment.actions()\n pass\n else:\n kwargs['actions'] = environment.actions()\n if environment.max_episode_timesteps() is None:\n pass\n elif 'max_episode_timesteps' in kwargs:\n # assert kwargs['max_episode_timesteps'] >= environment.max_episode_timesteps()\n pass\n else:\n kwargs['max_episode_timesteps'] = environment.max_episode_timesteps()\n\n agent = agent(**kwargs)\n assert isinstance(agent, Agent)\n return Agent.create(agent=agent, environment=environment)\n\n elif isinstance(agent, dict):\n # Dictionary specification\n agent.update(kwargs)\n kwargs = dict(agent)\n agent = kwargs.pop('agent', kwargs.pop('type', 'default'))\n\n return Agent.create(agent=agent, environment=environment, **kwargs)\n\n elif isinstance(agent, str):\n if os.path.isfile(agent):\n # JSON file specification\n with open(agent, 'r') as fp:\n agent = json.load(fp=fp)\n return Agent.create(agent=agent, environment=environment, **kwargs)\n\n elif '.' in agent:\n # Library specification\n library_name, module_name = agent.rsplit('.', 1)\n library = importlib.import_module(name=library_name)\n agent = getattr(library, module_name)\n return Agent.create(agent=agent, environment=environment, **kwargs)\n\n elif agent in tensorforce.agents.agents:\n # Keyword specification\n agent = tensorforce.agents.agents[agent]\n return Agent.create(agent=agent, environment=environment, **kwargs)\n\n else:\n raise TensorforceError.value(name='Agent.create', argument='agent', value=agent)\n\n else:\n raise TensorforceError.type(name='Agent.create', argument='agent', dtype=type(agent))\n\n @staticmethod\n def load(directory=None, filename=None, format=None, environment=None, **kwargs):\n \"\"\"\n Restores an agent from a directory/file.\n\n Args:\n directory (str): Checkpoint directory\n (required, unless saver is specified).\n filename (str): Checkpoint filename, with or without append and extension\n (default: \"agent\").\n format (\"checkpoint\" | \"saved-model\" | \"numpy\" | \"hdf5\"): File format, \"saved-model\" loads\n an act-only agent based on a Protobuf model\n (default: format matching directory and\n filename, required to be unambiguous).\n environment (Environment object): Environment which the agent is supposed to be trained\n on, environment-related arguments like state/action space specifications and\n maximum episode length will be extract if given\n (recommended).\n kwargs: Additional agent arguments.\n \"\"\"\n if directory is not None:\n if filename is None:\n filename = 'agent'\n agent = os.path.join(directory, os.path.splitext(filename)[0] + '.json')\n if not os.path.isfile(agent) and agent[agent.rfind('-') + 1: -5].isdigit():\n agent = agent[:agent.rindex('-')] + '.json'\n if os.path.isfile(agent):\n with open(agent, 'r') as fp:\n agent = json.load(fp=fp)\n if 'agent' in kwargs:\n if 'agent' in agent and agent['agent'] != kwargs['agent']:\n raise TensorforceError.value(\n name='Agent.load', argument='agent', value=kwargs['agent']\n )\n agent['agent'] = kwargs.pop('agent')\n else:\n agent = kwargs\n kwargs = dict()\n else:\n agent = kwargs\n kwargs = dict()\n\n # Overwrite values\n if environment is not None and environment.max_episode_timesteps() is not None:\n if 'max_episode_timesteps' in kwargs:\n assert kwargs['max_episode_timesteps'] >= environment.max_episode_timesteps()\n agent['max_episode_timesteps'] = kwargs['max_episode_timesteps']\n else:\n agent['max_episode_timesteps'] = environment.max_episode_timesteps()\n if 'parallel_interactions' in kwargs and kwargs['parallel_interactions'] > 1:\n agent['parallel_interactions'] = kwargs['parallel_interactions']\n\n agent.pop('internals', None)\n agent.pop('initial_internals', None)\n saver_restore = False\n if 'saver' in agent and isinstance(agent['saver'], dict):\n if not agent.get('load', True):\n raise TensorforceError.value(\n name='Agent.load', argument='saver[load]', value=agent['saver']['load']\n )\n agent['saver'] = dict(agent['saver'])\n agent['saver']['load'] = True\n saver_restore = True\n elif 'saver' in kwargs and isinstance(kwargs['saver'], dict):\n if not kwargs.get('load', True):\n raise TensorforceError.value(\n name='Agent.load', argument='saver[load]', value=kwargs['saver']['load']\n )\n kwargs['saver'] = dict(kwargs['saver'])\n kwargs['saver']['load'] = True\n saver_restore = True\n agent = Agent.create(agent=agent, environment=environment, **kwargs)\n if not saver_restore:\n agent.restore(directory=directory, filename=filename, format=format)\n\n return agent\n\n def __init__(\n self, states, actions, max_episode_timesteps=None, parallel_interactions=1, config=None,\n recorder=None\n ):\n util.overwrite_staticmethod(obj=self, function='create')\n util.overwrite_staticmethod(obj=self, function='load')\n\n self.is_initialized = False\n\n # Check whether spec attribute exists\n if not hasattr(self, 'spec'):\n raise TensorforceError.required_attribute(name='Agent', attribute='spec')\n\n # States/actions, plus single state/action flag\n if 'shape' in states:\n self.states_spec = dict(state=states)\n self.single_state = True\n else:\n self.states_spec = states\n self.single_state = False\n if 'type' in actions:\n self.actions_spec = dict(action=actions)\n self.single_action = True\n else:\n self.actions_spec = actions\n self.single_action = False\n\n # Max episode timesteps\n self.max_episode_timesteps = max_episode_timesteps\n\n # Parallel interactions\n if isinstance(parallel_interactions, int):\n if parallel_interactions <= 0:\n raise TensorforceError.value(\n name='Agent', argument='parallel_interactions', value=parallel_interactions,\n hint='<= 0'\n )\n self.parallel_interactions = parallel_interactions\n else:\n raise TensorforceError.type(\n name='Agent', argument='parallel_interactions', dtype=type(parallel_interactions)\n )\n\n # Config\n if config is None:\n config = dict()\n self.config = TensorforceConfig(**config)\n\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(self.config.tf_log_level)\n\n # Import tensorflow after setting log level\n import tensorflow as tf\n\n # Eager mode\n if self.config.eager_mode:\n tf.config.experimental_run_functions_eagerly(run_eagerly=True)\n\n # Random seed\n if self.config.seed is not None:\n random.seed(a=self.config.seed)\n np.random.seed(seed=self.config.seed)\n tf.random.set_seed(seed=self.config.seed)\n\n # Recorder\n if recorder is None:\n pass\n elif not all(key in ('directory', 'frequency', 'max-traces', 'start') for key in recorder):\n raise TensorforceError.value(\n name='Agent', argument='recorder values', value=list(recorder),\n hint='not from {directory,frequency,max-traces,start}'\n )\n self.recorder_spec = recorder if recorder is None else dict(recorder)\n\n def __str__(self):\n return self.__class__.__name__\n\n def initialize(self):\n \"\"\"\n Initialize the agent. Automatically triggered as part of Agent.create/load.\n \"\"\"\n # Check whether already initialized\n if self.is_initialized:\n raise TensorforceError(\n message=\"Agent is already initialized, possibly as part of Agent.create().\"\n )\n self.is_initialized = True\n\n # Initialize model\n if not hasattr(self, 'model'):\n raise TensorforceError.required_attribute(name='Agent', attribute='model')\n self.model.initialize()\n\n # Value space specifications\n self.states_spec = self.model.states_spec\n self.internals_spec = self.model.internals_spec\n self.auxiliaries_spec = self.model.auxiliaries_spec\n self.actions_spec = self.model.actions_spec\n self.terminal_spec = self.model.terminal_spec\n self.reward_spec = self.model.reward_spec\n self.parallel_spec = self.model.parallel_spec\n\n # Act-observe timestep completed check\n self.timestep_completed = np.ones(\n shape=(self.parallel_interactions,), dtype=util.np_dtype(dtype='bool')\n )\n\n # Parallel terminal/reward buffers\n self.buffers = ListDict()\n self.buffers['terminal'] = [list() for _ in range(self.parallel_interactions)]\n self.buffers['reward'] = [list() for _ in range(self.parallel_interactions)]\n\n # Recorder buffers if required\n if self.recorder_spec is not None:\n self.num_episodes = 0\n\n def function(spec):\n return [list() for _ in range(self.parallel_interactions)]\n\n self.buffers['states'] = self.states_spec.fmap(function=function, cls=ListDict)\n self.buffers['auxiliaries'] = self.auxiliaries_spec.fmap(\n function=function, cls=ListDict\n )\n self.buffers['actions'] = self.actions_spec.fmap(function=function, cls=ListDict)\n\n function = (lambda x: list())\n\n self.recorded = ListDict()\n self.recorded['states'] = self.states_spec.fmap(function=function, cls=ListDict)\n self.recorded['auxiliaries'] = self.auxiliaries_spec.fmap(\n function=function, cls=ListDict\n )\n self.recorded['actions'] = self.actions_spec.fmap(function=function, cls=ListDict)\n self.recorded['terminal'] = list()\n self.recorded['reward'] = list()\n\n if self.model.saver is not None:\n path = os.path.join(self.model.saver_directory, self.model.saver_filename + '.json')\n try:\n with open(path, 'w') as fp:\n spec = OrderedDict(self.spec)\n spec['internals'] = self.internals_spec\n spec['initial_internals'] = self.initial_internals()\n json.dump(obj=spec, fp=fp, cls=TensorforceJSONEncoder)\n except BaseException:\n try:\n with open(path, 'w') as fp:\n spec = OrderedDict()\n spec['states'] = self.spec['states']\n spec['actions'] = self.spec['actions']\n spec['internals'] = self.internals_spec\n spec['initial_internals'] = self.initial_internals()\n json.dump(obj=spec, fp=fp, cls=TensorforceJSONEncoder)\n except BaseException:\n os.remove(path)\n raise\n\n # Reset model\n timesteps, episodes, updates = self.model.reset()\n self.timesteps = timesteps.numpy().item()\n self.episodes = episodes.numpy().item()\n self.updates = updates.numpy().item()\n\n def close(self):\n \"\"\"\n Closes the agent.\n \"\"\"\n self.model.close()\n del self.model\n\n def reset(self):\n \"\"\"\n Resets possibly inconsistent internal values, for instance, after saving and restoring an\n agent. Automatically triggered as part of Agent.create/load/initialize/restore.\n \"\"\"\n # Reset timestep completed\n self.timestep_completed = np.ones(\n shape=(self.parallel_interactions,), dtype=util.np_dtype(dtype='bool')\n )\n\n # Reset buffers\n for buffer in self.buffers.values():\n for x in buffer:\n x.clear()\n if self.recorder_spec is not None:\n for x in self.recorded.values():\n x.clear()\n\n # Reset model\n timesteps, episodes, updates = self.model.reset()\n self.timesteps = timesteps.numpy().item()\n self.episodes = episodes.numpy().item()\n self.updates = updates.numpy().item()\n\n if self.model.saver is not None:\n self.model.save()\n\n def initial_internals(self):\n \"\"\"\n Returns the initial internal agent state(s), to be used at the beginning of an episode as\n `internals` argument for `act()` in independent mode\n\n Returns:\n dict[internal]: Dictionary containing initial internal agent state(s).\n \"\"\"\n return self.model.internals_init.fmap(function=(lambda x: x), cls=OrderedDict)\n\n def act(\n self, states, internals=None, parallel=0, independent=False,\n # Deprecated\n deterministic=None, evaluation=None\n ):\n \"\"\"\n Returns action(s) for the given state(s), needs to be followed by `observe()` unless\n independent mode.\n\n Args:\n states (dict[state] | iter[dict[state]]): Dictionary containing state(s) to be acted on\n (required).\n internals (dict[internal] | iter[dict[internal]]): Dictionary containing current\n internal agent state(s), either given by `initial_internals()` at the beginning of\n an episode or as return value of the preceding `act()` call\n (required if independent mode and agent\n has internal states).\n parallel (int | iter[int]): Parallel execution index\n (default: 0).\n independent (bool): Whether act is not part of the main agent-environment interaction,\n and this call is thus not followed by observe\n (default: false).\n\n Returns:\n dict[action] | iter[dict[action]], dict[internal] | iter[dict[internal]] if `internals`\n argument given: Dictionary containing action(s), dictionary containing next internal\n agent state(s) if independent mode.\n \"\"\"\n if deterministic is not None:\n raise TensorforceError.deprecated(\n name='Agent.act', argument='deterministic', replacement='independent'\n )\n if evaluation is not None:\n raise TensorforceError.deprecated(\n name='Agent.act', argument='evaluation', replacement='independent'\n )\n\n # Independent and internals\n if independent:\n if parallel != 0:\n raise TensorforceError.invalid(\n name='Agent.act', argument='parallel', condition='independent is true'\n )\n is_internals_none = (internals is None)\n if is_internals_none and len(self.internals_spec) > 0:\n raise TensorforceError.required(\n name='Agent.act', argument='internals', condition='independent is true'\n )\n else:\n if internals is not None:\n raise TensorforceError.invalid(\n name='Agent.act', argument='internals', condition='independent is false'\n )\n\n # Process states input and infer batching structure\n states, batched, num_parallel, is_iter_of_dicts, input_type = self._process_states_input(\n states=states, function_name='Agent.act'\n )\n\n if independent:\n # Independent mode: handle internals argument\n\n if is_internals_none:\n # Default input internals=None\n pass\n\n elif is_iter_of_dicts:\n # Input structure iter[dict[internal]]\n if not isinstance(internals, (tuple, list)):\n raise TensorforceError.type(\n name='Agent.act', argument='internals', dtype=type(internals),\n hint='is not tuple/list'\n )\n internals = [ArrayDict(internal) for internal in internals]\n internals = internals[0].fmap(\n function=(lambda *xs: np.stack(xs, axis=0)), zip_values=internals[1:]\n )\n\n else:\n # Input structure dict[iter[internal]]\n if not isinstance(internals, dict):\n raise TensorforceError.type(\n name='Agent.act', argument='internals', dtype=type(internals),\n hint='is not dict'\n )\n internals = ArrayDict(internals)\n\n if not independent or not is_internals_none:\n # Expand inputs if not batched\n if not batched:\n internals = internals.fmap(function=(lambda x: np.expand_dims(x, axis=0)))\n\n # Check number of inputs\n for name, internal in internals.items():\n if internal.shape[0] != num_parallel:\n raise TensorforceError.value(\n name='Agent.act', argument='len(internals[{}])'.format(name),\n value=internal.shape[0], hint='!= len(states)'\n )\n\n else:\n # Non-independent mode: handle parallel input\n\n if parallel == 0:\n # Default input parallel=0\n if batched:\n assert num_parallel == self.parallel_interactions\n parallel = np.asarray(list(range(num_parallel)))\n else:\n parallel = np.asarray([parallel])\n\n elif batched:\n # Batched input\n parallel = np.asarray(parallel)\n\n else:\n # Expand input if not batched\n parallel = np.asarray([parallel])\n\n # Check number of inputs\n if parallel.shape[0] != num_parallel:\n raise TensorforceError.value(\n name='Agent.act', argument='len(parallel)', value=len(parallel),\n hint='!= len(states)'\n )\n\n def function(name, spec):\n auxiliary = ArrayDict()\n if self.config.enable_int_action_masking and spec.type == 'int' and \\\n spec.num_values is not None:\n # Mask, either part of states or default all true\n auxiliary['mask'] = states.pop(name + '_mask', np.ones(\n shape=(num_parallel,) + spec.shape + (spec.num_values,), dtype=spec.np_type()\n ))\n return auxiliary\n\n auxiliaries = self.actions_spec.fmap(function=function, cls=ArrayDict, with_names=True)\n\n # If not independent, check whether previous timesteps were completed\n if not independent:\n if not self.timestep_completed[parallel].all():\n raise TensorforceError(\n message=\"Calling agent.act must be preceded by agent.observe.\"\n )\n self.timestep_completed[parallel] = False\n\n # Buffer inputs for recording\n if self.recorder_spec is not None and not independent and \\\n self.episodes >= self.recorder_spec.get('start', 0):\n for n in range(num_parallel):\n for name in self.states_spec:\n self.buffers['states'][name][parallel[n]].append(states[name][n])\n for name in self.auxiliaries_spec:\n self.buffers['auxiliaries'][name][parallel[n]].append(auxiliaries[name][n])\n\n # Inputs to tensors\n states = self.states_spec.to_tensor(value=states, batched=True)\n if independent and not is_internals_none:\n internals = self.internals_spec.to_tensor(value=internals, batched=True)\n auxiliaries = self.auxiliaries_spec.to_tensor(value=auxiliaries, batched=True)\n parallel_tensor = self.parallel_spec.to_tensor(value=parallel, batched=True)\n\n # Model.act()\n if not independent:\n actions, timesteps = self.model.act(\n states=states, auxiliaries=auxiliaries, parallel=parallel_tensor\n )\n self.timesteps = timesteps.numpy().item()\n\n elif len(self.internals_spec) > 0:\n if len(self.auxiliaries_spec) > 0:\n actions_internals = self.model.independent_act(\n states=states, internals=internals, auxiliaries=auxiliaries\n )\n else:\n assert len(auxiliaries) == 0\n actions_internals = self.model.independent_act(states=states, internals=internals)\n actions_internals = TensorDict(actions_internals)\n actions = actions_internals['actions']\n internals = actions_internals['internals']\n\n else:\n if len(self.auxiliaries_spec) > 0:\n actions = self.model.independent_act(states=states, auxiliaries=auxiliaries)\n else:\n assert len(auxiliaries) == 0\n actions = self.model.independent_act(states=states)\n actions = TensorDict(actions)\n\n # Outputs from tensors\n # print(actions)\n actions = self.actions_spec.from_tensor(tensor=actions, batched=True)\n\n # Buffer outputs for recording\n if self.recorder_spec is not None and not independent and \\\n self.episodes >= self.recorder_spec.get('start', 0):\n for n in range(num_parallel):\n for name in self.actions_spec:\n self.buffers['actions'][name][parallel[n]].append(actions[name][n])\n\n # Unbatch actions\n if batched:\n # If inputs were batched, turn list of dicts into dict of lists\n function = (lambda x: x.item() if x.shape == () else x)\n if self.single_action:\n actions = input_type(function(actions['action'][n]) for n in range(num_parallel))\n else:\n # TODO: recursive\n actions = input_type(\n OrderedDict(((name, function(x[n])) for name, x in actions.items()))\n for n in range(num_parallel)\n )\n\n if independent and not is_internals_none and is_iter_of_dicts:\n # TODO: recursive\n internals = input_type(\n OrderedDict(((name, function(x[n])) for name, x in internals.items()))\n for n in range(num_parallel)\n )\n\n else:\n # If inputs were not batched, unbatch outputs\n function = (lambda x: x.item() if x.shape == (1,) else x[0])\n if self.single_action:\n actions = function(actions['action'])\n else:\n actions = actions.fmap(function=function, cls=OrderedDict)\n if independent and not is_internals_none:\n internals = internals.fmap(function=function, cls=OrderedDict)\n\n if self.model.saver is not None:\n self.model.save()\n\n if independent and not is_internals_none:\n return actions, internals\n else:\n return actions\n\n def observe(self, reward=0.0, terminal=False, parallel=0):\n \"\"\"\n Observes reward and whether a terminal state is reached, needs to be preceded by `act()`.\n\n Args:\n reward (float | iter[float]): Reward\n (default: 0.0).\n terminal (bool | 0 | 1 | 2 | iter[...]): Whether a terminal state is reached, or 2 if\n the episode was aborted\n (default: false).\n parallel (int, iter[int]): Parallel execution index\n (default: 0).\n\n Returns:\n int: Number of performed updates.\n \"\"\"\n # Check whether inputs are batched\n if util.is_iterable(x=reward):\n reward = np.asarray(reward)\n num_parallel = reward.shape[0]\n if terminal is False:\n terminal = np.asarray([0 for _ in range(num_parallel)])\n else:\n terminal = np.asarray(terminal)\n if parallel == 0:\n assert num_parallel == self.parallel_interactions\n parallel = np.asarray(list(range(num_parallel)))\n else:\n parallel = np.asarray(parallel)\n\n elif util.is_iterable(x=terminal):\n terminal = np.asarray([int(t) for t in terminal])\n num_parallel = terminal.shape[0]\n if reward == 0.0:\n reward = np.asarray([0.0 for _ in range(num_parallel)])\n else:\n reward = np.asarray(reward)\n if parallel == 0:\n assert num_parallel == self.parallel_interactions\n parallel = np.asarray(list(range(num_parallel)))\n else:\n parallel = np.asarray(parallel)\n\n elif util.is_iterable(x=parallel):\n parallel = np.asarray(parallel)\n num_parallel = parallel.shape[0]\n if reward == 0.0:\n reward = np.asarray([0.0 for _ in range(num_parallel)])\n else:\n reward = np.asarray(reward)\n if terminal is False:\n terminal = np.asarray([0 for _ in range(num_parallel)])\n else:\n terminal = np.asarray(terminal)\n\n else:\n reward = np.asarray([float(reward)])\n terminal = np.asarray([int(terminal)])\n parallel = np.asarray([int(parallel)])\n num_parallel = 1\n\n # Check whether shapes/lengths are consistent\n if parallel.shape[0] == 0:\n raise TensorforceError.value(\n name='Agent.observe', argument='len(parallel)', value=parallel.shape[0], hint='= 0'\n )\n if reward.shape != parallel.shape:\n raise TensorforceError.value(\n name='Agent.observe', argument='len(reward)', value=reward.shape,\n hint='!= parallel length'\n )\n if terminal.shape != parallel.shape:\n raise TensorforceError.value(\n name='Agent.observe', argument='len(terminal)', value=terminal.shape,\n hint='!= parallel length'\n )\n\n # Convert terminal to int if necessary\n if terminal.dtype is util.np_dtype(dtype='bool'):\n zeros = np.zeros_like(terminal, dtype=util.np_dtype(dtype='int'))\n ones = np.ones_like(terminal, dtype=util.np_dtype(dtype='int'))\n terminal = np.where(terminal, ones, zeros)\n\n # Check whether current timesteps are not completed\n if self.timestep_completed[parallel].any():\n raise TensorforceError(message=\"Calling agent.observe must be preceded by agent.act.\")\n self.timestep_completed[parallel] = True\n\n # Process per parallel interaction\n num_updates = 0\n for n in range(num_parallel):\n\n # Buffer inputs\n p = parallel[n]\n self.buffers['terminal'][p].append(terminal[n])\n self.buffers['reward'][p].append(reward[n])\n\n # Check whether episode is too long\n if self.max_episode_timesteps is not None and \\\n len(self.buffers['terminal'][p]) > self.max_episode_timesteps:\n raise TensorforceError(message=\"Episode longer than max_episode_timesteps.\")\n\n # Continue if not terminal and buffer_observe\n if terminal[n].item() == 0 and (\n self.config.buffer_observe == 'episode' or\n len(self.buffers['terminal'][p]) < self.config.buffer_observe\n ):\n continue\n\n # Buffered terminal/reward inputs\n t = np.asarray(self.buffers['terminal'][p], dtype=self.terminal_spec.np_type())\n r = np.asarray(self.buffers['reward'][p], dtype=self.reward_spec.np_type())\n self.buffers['terminal'][p].clear()\n self.buffers['reward'][p].clear()\n\n # Recorder\n if self.recorder_spec is not None and \\\n self.episodes >= self.recorder_spec.get('start', 0):\n\n # Store buffered values\n for name in self.states_spec:\n self.recorded['states'][name].append(\n np.stack(self.buffers['states'][name][p], axis=0)\n )\n self.buffers['states'][name][p].clear()\n for name in self.auxiliaries_spec:\n self.recorded['auxiliaries'][name].append(\n np.stack(self.buffers['auxiliaries'][name][p], axis=0)\n )\n self.buffers['auxiliaries'][name][p].clear()\n for name, spec in self.actions_spec.items():\n self.recorded['actions'][name].append(\n np.stack(self.buffers['actions'][name][p], axis=0)\n )\n self.buffers['actions'][name][p].clear()\n self.recorded['terminal'].append(t.copy())\n self.recorded['reward'].append(r.copy())\n\n # If terminal\n if t[-1] > 0:\n self.num_episodes += 1\n\n # Check whether recording step\n if self.num_episodes == self.recorder_spec.get('frequency', 1):\n self.num_episodes = 0\n\n # Manage recorder directory\n directory = self.recorder_spec['directory']\n if os.path.isdir(directory):\n files = sorted(\n f for f in os.listdir(directory)\n if os.path.isfile(os.path.join(directory, f))\n and os.path.splitext(f)[1] == '.npz'\n )\n else:\n os.makedirs(directory)\n files = list()\n max_traces = self.recorder_spec.get('max-traces')\n if max_traces is not None and len(files) > max_traces - 1:\n for filename in files[:-max_traces + 1]:\n filename = os.path.join(directory, filename)\n os.remove(filename)\n\n # Write recording file\n filename = os.path.join(directory, 'trace-{:09d}.npz'.format(self.episodes))\n # time.strftime('%Y%m%d-%H%M%S')\n kwargs = self.recorded.fmap(function=np.concatenate, cls=ArrayDict).items()\n np.savez_compressed(file=filename, **dict(kwargs))\n\n # Clear recorded values\n for recorded in self.recorded.values():\n recorded.clear()\n\n # Inputs to tensors\n terminal_tensor = self.terminal_spec.to_tensor(value=t, batched=True)\n reward_tensor = self.reward_spec.to_tensor(value=r, batched=True)\n parallel_tensor = self.parallel_spec.to_tensor(value=p, batched=False)\n\n # Model.observe()\n updated, episodes, updates = self.model.observe(\n terminal=terminal_tensor, reward=reward_tensor, parallel=parallel_tensor\n )\n num_updates += int(updated.numpy().item())\n self.episodes = episodes.numpy().item()\n self.updates = updates.numpy().item()\n\n if self.model.saver is not None:\n self.model.save()\n\n return num_updates\n\n def _process_states_input(self, states, function_name):\n if self.single_state and not isinstance(states, dict) and not (\n util.is_iterable(x=states) and isinstance(states[0], dict)\n ):\n # Single state\n input_type = type(states)\n states = np.asarray(states)\n\n if states.shape == self.states_spec['state'].shape:\n # Single state is not batched\n states = ArrayDict(state=np.expand_dims(states, axis=0))\n batched = False\n num_instances = 1\n is_iter_of_dicts = None\n input_type = None\n\n else:\n # Single state is batched, iter[state]\n assert states.shape[1:] == self.states_spec['state'].shape\n assert input_type in (tuple, list, np.ndarray)\n num_instances = states.shape[0]\n states = ArrayDict(state=states)\n batched = True\n is_iter_of_dicts = True # Default\n\n elif util.is_iterable(x=states):\n # States is batched, iter[dict[state]]\n batched = True\n num_instances = len(states)\n is_iter_of_dicts = True\n input_type = type(states)\n assert input_type in (tuple, list)\n if num_instances == 0:\n raise TensorforceError.value(\n name=function_name, argument='len(states)', value=num_instances, hint='= 0'\n )\n for n, state in enumerate(states):\n if not isinstance(state, dict):\n raise TensorforceError.type(\n name=function_name, argument='states[{}]'.format(n), dtype=type(state),\n hint='is not dict'\n )\n # Turn iter of dicts into dict of arrays\n # (Doesn't use self.states_spec since states also contains auxiliaries)\n states = [ArrayDict(state) for state in states]\n states = states[0].fmap(\n function=(lambda *xs: np.stack(xs, axis=0)), zip_values=states[1:]\n )\n\n elif isinstance(states, dict):\n # States is dict, turn into arrays\n some_state = next(iter(states.values()))\n input_type = type(some_state)\n\n states = ArrayDict(states)\n\n name, spec = self.states_spec.item()\n if states[name].shape == spec.shape:\n # States is not batched, dict[state]\n states = states.fmap(function=(lambda state: np.expand_dims(state, axis=0)))\n batched = False\n num_instances = 1\n is_iter_of_dicts = None\n input_type = None\n\n else:\n # States is batched, dict[iter[state]]\n assert states[name].shape[1:] == spec.shape\n assert input_type in (tuple, list, np.ndarray)\n batched = True\n num_instances = states[name].shape[0]\n is_iter_of_dicts = False\n if num_instances == 0:\n raise TensorforceError.value(\n name=function_name, argument='len(states)', value=num_instances, hint='= 0'\n )\n\n else:\n raise TensorforceError.type(\n name=function_name, argument='states', dtype=type(states),\n hint='is not array/tuple/list/dict'\n )\n\n # Check number of inputs\n if any(state.shape[0] != num_instances for state in states.values()):\n raise TensorforceError.value(\n name=function_name, argument='len(states)',\n value=[state.shape[0] for state in states.values()], hint='inconsistent'\n )\n\n return states, batched, num_instances, is_iter_of_dicts, input_type\n\n def save(self, directory, filename=None, format='checkpoint', append=None):\n \"\"\"\n Saves the agent to a checkpoint.\n\n Args:\n directory (str): Checkpoint directory\n (required).\n filename (str): Checkpoint filename, without extension\n (required, unless \"saved-model\" format).\n format (\"checkpoint\" | \"saved-model\" | \"numpy\" | \"hdf5\"): File format, \"checkpoint\" uses\n TensorFlow Checkpoint to save model, \"saved-model\" uses TensorFlow SavedModel to\n save an optimized act-only model, whereas the others store only variables as\n NumPy/HDF5 file\n (default: TensorFlow Checkpoint).\n append (\"timesteps\" | \"episodes\" | \"updates\"): Append timestep/episode/update to\n checkpoint filename\n (default: none).\n\n Returns:\n str: Checkpoint path.\n \"\"\"\n # TODO: Messes with required parallel disentangling, better to remove unfinished episodes\n # from memory, but currently entire episode buffered anyway...\n # Empty buffers before saving\n # for parallel in range(self.parallel_interactions):\n # if self.buffer_indices[parallel] > 0:\n # self.model_observe(parallel=parallel)\n\n path = self.model.save(directory=directory, filename=filename, format=format, append=append)\n\n if filename is None:\n filename = self.model.name\n spec_path = os.path.join(directory, filename + '.json')\n try:\n with open(spec_path, 'w') as fp:\n spec = OrderedDict(self.spec)\n spec['internals'] = self.internals_spec\n spec['initial_internals'] = self.initial_internals()\n json.dump(obj=spec, fp=fp, cls=TensorforceJSONEncoder)\n except BaseException:\n try:\n with open(spec_path, 'w') as fp:\n spec = OrderedDict()\n spec['states'] = self.spec['states']\n spec['actions'] = self.spec['actions']\n spec['internals'] = self.internals_spec\n spec['initial_internals'] = self.initial_internals()\n json.dump(obj=spec, fp=fp, cls=TensorforceJSONEncoder)\n except BaseException:\n os.remove(spec_path)\n\n return path\n\n def restore(self, directory=None, filename=None, format=None):\n \"\"\"\n Restores the agent from a checkpoint.\n\n Args:\n directory (str): Checkpoint directory\n (required, unless \"saved-model\" format and\n saver specified).\n filename (str): Checkpoint filename, with or without append and extension\n (required, unless \"saved-model\" format and\n saver specified).\n format (\"checkpoint\" | \"numpy\" | \"hdf5\"): File format\n (default: format matching directory and\n filename, required to be unambiguous).\n \"\"\"\n if not hasattr(self, 'model'):\n raise TensorforceError(message=\"Missing agent attribute model.\")\n\n if not self.is_initialized:\n self.initialize()\n\n # format implicitly given if file exists\n if format is None and os.path.isfile(os.path.join(directory, filename)):\n if '.data-' in filename:\n filename = filename[:filename.index('.data-')]\n format = 'checkpoint'\n elif filename.endswith('.npz'):\n filename = filename[:-4]\n format = 'numpy'\n elif filename.endswith('.hdf5'):\n filename = filename[:-5]\n format = 'hdf5'\n elif filename.endswith('.h5'):\n filename = filename[:-3]\n format = 'hdf5'\n else:\n assert False\n elif format is None and os.path.isfile(os.path.join(directory, filename + '.index')):\n format = 'checkpoint'\n elif format is None and os.path.isfile(os.path.join(directory, filename + '.npz')):\n format = 'numpy'\n elif format is None and (\n os.path.isfile(os.path.join(directory, filename + '.hdf5')) or\n os.path.isfile(os.path.join(directory, filename + '.h5'))\n ):\n format = 'hdf5'\n\n else:\n # infer format from directory\n found = None\n latest = -1\n for name in os.listdir(directory):\n if format in (None, 'numpy') and name == filename + '.npz':\n assert found is None\n found = 'numpy'\n latest = None\n elif format in (None, 'numpy') and name.startswith(filename) and \\\n name.endswith('.npz'):\n assert found is None or found == 'numpy'\n found = 'numpy'\n n = int(name[len(filename) + 1: -4])\n if n > latest:\n latest = n\n elif format in (None, 'hdf5') and \\\n (name == filename + '.hdf5' or name == filename + '.h5'):\n assert found is None\n found = 'hdf5'\n latest = None\n elif format in (None, 'hdf5') and name.startswith(filename) and \\\n (name.endswith('.hdf5') or name.endswith('.h5')):\n assert found is None or found == 'hdf5'\n found = 'hdf5'\n n = int(name[len(filename) + 1: -5])\n if n > latest:\n latest = n\n\n if latest == -1:\n if format is None:\n format = 'checkpoint'\n else:\n assert format == 'checkpoint'\n if filename is None or \\\n not os.path.isfile(os.path.join(directory, filename + '.index')):\n import tensorflow as tf\n path = tf.train.latest_checkpoint(checkpoint_dir=directory)\n if not path:\n raise TensorforceError.exists_not(name='Checkpoint', value=directory)\n filename = os.path.basename(path)\n\n else:\n if format is None:\n format = found\n else:\n assert format == found\n if latest is not None:\n filename = filename + '-' + str(latest)\n\n self.timesteps, self.episodes, self.updates = self.model.restore(\n directory=directory, filename=filename, format=format\n )\n\n def get_variables(self):\n \"\"\"\n Returns the names of all agent variables.\n\n Returns:\n list[str]: Names of variables.\n \"\"\"\n return [\n variable.name[len(self.model.name) + 1: -2] for variable in self.model.get_variables()\n ]\n\n def get_variable(self, variable):\n \"\"\"\n Returns the value of the variable with the given name.\n\n Args:\n variable (string): Variable name\n (required).\n\n Returns:\n numpy-array: Variable value.\n \"\"\"\n return self.model.get_variable(variable=variable)\n\n def assign_variable(self, variable, value):\n \"\"\"\n Assigns the given value to the variable with the given name.\n\n Args:\n variable (string): Variable name\n (required).\n value (variable-compatible value): Value to assign to variable\n (required).\n \"\"\"\n self.model.assign_variable(variable=variable, value=value)\n\n def summarize(self, summary, value, step=None):\n \"\"\"\n Records a value for the given custom summary label (as specified via summarizer[custom]).\n\n Args:\n variable (string): Custom summary label\n (required).\n value (summary-compatible value): Summary value to record\n (required).\n step (int): Summary recording step\n (default: current timestep).\n \"\"\"\n self.model.summarize(summary=summary, value=value, step=step)\n\n def get_output_tensors(self, function):\n \"\"\"\n Returns the names of output tensors for the given function.\n\n Args:\n function (str): Function name\n (required).\n\n Returns:\n list[str]: Names of output tensors.\n \"\"\"\n if function in self.model.output_tensors:\n return self.model.output_tensors[function]\n else:\n raise TensorforceError.value(\n name='agent.get_output_tensors', argument='function', value=function\n )\n\n def get_available_summaries(self):\n \"\"\"\n Returns the summary labels provided by the agent.\n\n Returns:\n list[str]: Available summary labels.\n \"\"\"\n return self.model.get_available_summaries()\n\n def get_query_tensors(self, function):\n \"\"\"\n Returns the names of queryable tensors for the given function.\n\n Args:\n function (str): Function name\n (required).\n\n Returns:\n list[str]: Names of queryable tensors.\n \"\"\"\n if function in self.model.query_tensors:\n return self.model.query_tensors[function]\n else:\n raise TensorforceError.value(\n name='agent.get_query_tensors', argument='function', value=function\n )\n\n\nclass TensorforceJSONEncoder(json.JSONEncoder):\n \"\"\"\n Custom JSON encoder which is NumPy-compatible.\n \"\"\"\n\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, TensorSpec):\n return obj.json()\n else:\n return super().default(obj)\n","sub_path":"tensorforce/agents/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":50703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"278060879","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, render_template, request, url_for, g, abort\nfrom flask import redirect, flash, current_app\n\nfrom ..models.category import Category\nfrom ..forms.category import CategoryForm\nfrom ..utils.user import request_login\n\n__author__ = 'run'\n\n__all__ = ['bp']\nbp = Blueprint('category', __name__)\n\n\n@bp.route('/', methods=['GET'])\n@request_login\ndef index():\n page = int(request.args.get('page', 1))\n if not page:\n return abort(404)\n paginator = Category.query.all()\n return render_template('category/index.html', paginator=paginator)\n\n\n@bp.route('/create', methods=['GET', 'POST'])\n@request_login\ndef create():\n next_url = request.args.get('next', url_for('category.index'))\n\n form = CategoryForm()\n if form.validate():\n form.save()\n flash(\"分组保存成功\", \"warning\")\n return redirect(next_url)\n\n return render_template('category/create.html', form=form)\n\n\n@bp.route('/delete', methods=['GET', 'POST'])\n@request_login\ndef delete():\n next_url = request.args.get('next', url_for('category.index'))\n id = request.args.get('id', None)\n if id is None:\n flash(\"删除分组失败\", \"warning\")\n return redirect(next_url)\n category = Category.query.filter_by(id=id).first()\n category.delete()\n return redirect(next_url)","sub_path":"runyu/controllers/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"191151053","text":"import lxml.etree\nimport os\ndef getQW(path):\n tree = lxml.etree.parse(path)\n root = tree.getroot()\n for qw in root:\n return qw\n\ndef getRDSS(path):\n content = ''\n qw = getQW(path)\n for qwchild in qw:\n if qwchild.tag == 'AJJBQK':\n for ajjbqkchild in qwchild:\n if ajjbqkchild.tag == 'BSSLD':\n for bssldchild in ajjbqkchild:\n if bssldchild.tag == 'ZJXX':\n for zjxxchild in bssldchild:\n if zjxxchild.tag == 'ZJFZ':\n for zjfzchild in zjxxchild:\n if zjfzchild.tag == 'RDSS':\n content = zjfzchild.attrib['value']\n return content\n#指控事实\ndef getZKSS(path):\n content = ''\n qw = getQW(path)\n for qwchild in qw:\n if qwchild.tag == 'AJJBQK':\n for ajjbqkchild in qwchild:\n if ajjbqkchild.tag == 'ZKDL':\n for zkdlchild in ajjbqkchild:\n if zkdlchild.tag == 'ZKSS':\n content = zkdlchild.attrib['value']\n\n return content\n\n\n# 指控段落\ndef getZKDL(path):\n content = ''\n qw = getQW(path)\n for qwchild in qw:\n if qwchild.tag == 'AJJBQK':\n for ajjbqkchild in qwchild:\n if ajjbqkchild.tag == 'ZKDL':\n content = ajjbqkchild.attrib['value']\n\n return content\n\n#从新填充了法条内容的文书里提取法条列表\ndef getFTList(path):\n ftnamelist = []\n ftnrlist = []\n qw = getQW(path)\n for qwchild in qw:\n if qwchild.tag == 'YYFLNR':\n for yyflfzchild in qwchild:\n if yyflfzchild.tag == 'FLNRFZ':\n for flnrfzchild in yyflfzchild:\n flag = 0\n if flnrfzchild.tag == 'FLMC':\n flmc = flnrfzchild.attrib['value']\n flag += 1\n if flnrfzchild.tag == 'FLNR':\n flnr = flnrfzchild.attrib['value']\n flag += 2\n if flag == 2 and flmc and flnr and flnr != 'NOT FOUND':\n if flmc not in ftnamelist:\n ftnamelist.append(flmc)\n ftnrlist.append(flnr)\n\n return ftnamelist,ftnrlist\n\n#文书QW下面的节点内容获取,如文首、诉讼情况、案件基本情况、裁判分析过程、判决结果这几个的value\n\ndef getQWChildContent(path,childname):\n content = ''\n qw = getQW(path)\n for qwchild in qw:\n if qwchild.tag == childname:\n content += qwchild.attrib['value']\n\n return content\n\n\n\ndef getFTfromQW(path):\n ftls = []\n qw = getQW(path)\n for qwchild in qw:\n if qwchild.tag == 'CPFXGC':\n for cpfxgcchild in qwchild:\n if cpfxgcchild.tag == 'CUS_FLFT_FZ_RY':\n for fz in cpfxgcchild:\n if fz.tag == 'CUS_FLFT_RY':\n ftls.append(fz.attrib['value'])\n return ftls\n\n\n\n\n# 获取事实内容\ndef getSSMatchObject(wspath):\n return getRDSS(wspath) + getZKDL(wspath)\n\n\n# 获取结论内容\ndef getJLMatchObject(wspath):\n return getQWChildContent(wspath, 'CPFXGC') + getQWChildContent(wspath, 'PJJG')\n\n#获取交通肇事罪的证据记录列表\ndef getZJ(wspath):\n zjlist = []\n qw = getQW(wspath)\n for qwchild in qw:\n if qwchild.tag == 'AJJBQK':\n for ajjbqkchild in qwchild:\n if ajjbqkchild.tag == 'BSSLD':\n for bssldchid in ajjbqkchild:\n if bssldchid.tag == 'ZJXX':\n for zjxxchild in bssldchid:\n if zjxxchild.tag == 'ZJFZ':\n for zjfzchild in zjxxchild:\n if zjfzchild.tag == 'ZJJL':\n zjlist.append(zjfzchild.attrib['value'])\n return zjlist\n\n\n#获取xml任意路径的value值\ndef getnodecontent(wspath,xmlpath):\n pathlist = xmlpath.split('/')\n print(pathlist)\n tree = lxml.etree.parse(wspath)\n root = tree.getroot()\n point = root\n index = 0\n while(index < len(pathlist)):\n for child in point:\n if child.tag == pathlist[index]:\n point = child\n index += 1\n break\n valuelist = []\n parent = point.getparent()\n for p in parent:\n if p.tag == pathlist[-1]:\n valuelist.append(p.attrib['value'])","sub_path":"util/ws_fun.py","file_name":"ws_fun.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"130102590","text":"import pymongo\nfrom pymongo import MongoClient\nimport simplejson\nimport urllib3\nimport urllib.request\nfrom operator import itemgetter\n\nclient = MongoClient()\ndb = client.yelp\n#db_backup = client.YelpDB\nr_scores = db.reviewScores1\n#r_backup = db.review\n\n#cname: Name of the city- all information related to user,review is segregated based on this.\ndef operation(cName, word):\n\tquery = \"http://localhost:8983/solr/\" + cName + \"/select?q=\" + word + \"&start=0&rows=1000&fl=review_id,score,business_id,stars,includes&wt=json\";\n\twith urllib.request.urlopen(query) as url:\n\t\tsolr_response = simplejson.load(url)\n\t\tword_score = 0\n\t\tstars = 1;\n\t\tnumOfDocs = solr_response['response']['numFound']\n\t\t#print('word:',word,' total docs: ',numOfDocs)\n\t\tmaxLength = len(solr_response['response']['docs'])\n\t\tif numOfDocs == 0 or maxLength==0:\n\t\t\t\t#print('inside doclen')\n\t\t\t\treturn (word,0,0)\n\t\tfor doc in solr_response['response']['docs']:\n\t\t\t\tstars += doc['stars'][0]\n\t\t\t\treview = r_scores.find_one({'review_id': doc['review_id'][0]});\n\t\t\t\t#print(doc['review_id'][0], doc['score'],review['review_score'])\n\t\t\t\tif review is not None:\n\t\t\t\t\t#print(review['review_id'],review['review_weight'],doc['score']);\n\t\t\t\t\tword_score += (review['review_weight'] * doc['score'])\n\t\t\t\telse:\n\t\t\t\t\tpass;\n\t\t\t\treturn (word, word_score)\n\n#words=[\"waitress\",\"restaurant\",\"burger\"]\nwords = []\nwith open('input-features-by-city.txt', 'r') as f2:\n words = f2.read().splitlines() \n\nval = map(lambda x: operation('Pheonix', x),words)\n\n#for item in val:\n#\tprint(item)\n\n#Prints a sorted output based on the word score obtained from the model.\nprint(sorted(val,key=itemgetter(1),reverse=True))\n","sub_path":"Task-2.2/Evaluation Metric/scripts/review_relevance_score.py","file_name":"review_relevance_score.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"151923594","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Arcangelo Massari \n#\n# Permission to use, copy, modify, and/or distribute this software for any purpose\n# with or without fee is hereby granted, provided that the above copyright notice\n# and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,\n# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,\n# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS\n# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS\n# SOFTWARE.\n\n\nfrom typing import Union\n\nfrom time_agnostic_library.agnostic_entity import AgnosticEntity\nfrom time_agnostic_library.support import convert_to_datetime\n\n\nclass Statistics:\n def __init__(self, data:Union[dict,tuple]):\n self.data = data\n\n def get_overhead(self):\n if type(self.data) is tuple:\n entity_snapshots, other_snapshots = self.data\n if entity_snapshots:\n entity_snapshots = sorted([convert_to_datetime(data['generatedAtTime']) for _, data in entity_snapshots.items()], reverse=True)\n other_snapshots = [data['generatedAtTime'] for _, data in other_snapshots.items() if convert_to_datetime(data['generatedAtTime']) >= entity_snapshots[0]]\n return len(entity_snapshots + other_snapshots)\n else:\n return 0\n elif type(self.data) is dict:\n return sum(len(se) for _, se in self.data.items())\n elif type(self.data) is set:\n return len(self.data) \n","sub_path":"src/time_agnostic_library/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"597196506","text":"# https://programmers.co.kr/learn/courses/30/lessons/12911\n\n\ndef solution(n):\n n_1 = bin(n).count(\"1\")\n\n while True:\n n += 1\n if n_1 == bin(n).count(\"1\"):\n return n\n\n\nprint(solution(78))\nprint(solution(15))","sub_path":"programmers/lv2/next-large-number.py","file_name":"next-large-number.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"520598609","text":"import json\nimport os\nimport sys\nimport time\nfrom collections import OrderedDict, namedtuple\nfrom tempfile import NamedTemporaryFile\n\nfrom contracts import contract, new_contract\nfrom contracts.utils import check_isinstance, raise_wrapped, indent\nfrom system_cmd import CmdException, system_cmd_result\n\nfrom .spawn_ipfs import get_ipfs_executable\nfrom .utils import memoize_simple\n\n\nclass Timeout(Exception):\n pass\n\n\nclass InvalidHash(Exception):\n pass\n\n\nclass CouldNotPin(Exception):\n pass\n\n\nIPFS_ls_entry = namedtuple('IPFS_ls_entry', 'name hash size')\n\n\n@new_contract\ndef multihash(mh):\n check_isinstance(mh, str)\n return mh.startswith('Qm')\n\n\nclass IPFSInterface(object):\n\n def __init__(self, ipfs_path):\n if ipfs_path is None:\n d = os.path.expanduser('~/shared-logs/.cache/.ipfs')\n if os.path.exists(d):\n ipfs_path = d\n else:\n if 'IPFS_PATH' in os.environ:\n ipfs_path = os.environ['IPFS_PATH']\n else:\n ipfs_path = os.path.expanduser('~/.ipfs')\n self.ipfs_path = ipfs_path\n self.providers_ttl = 120\n self.providers_cache = {}\n self.debug = False\n\n @memoize_simple\n def get_executable(self):\n return get_ipfs_executable(self.ipfs_path)\n\n @memoize_simple\n def version(self):\n cmd = ['ipfs', '--version']\n res = self._cmd(cmd)\n version = res.stdout\n version = version.replace('ipfs', '')\n version = version.replace('version', '')\n version = version.strip()\n print('Detected version %r' % version)\n return version\n\n def _get_env(self):\n env = os.environ.copy()\n if self.ipfs_path is not None:\n env['IPFS_PATH'] = self.ipfs_path\n return env\n\n def _cmd(self, cmd):\n if cmd[0] == 'ipfs':\n cmd[0] = self.get_executable()\n\n if self.debug:\n print('$ %s' % \" \".join(cmd))\n else:\n sys.stderr.write('.')\n t0 = time.time()\n res = system_cmd_result('.', cmd, raise_on_error=True, env=self._get_env())\n\n delta = time.time() - t0\n if self.debug:\n print('took %5.2f s: $ %s' % (delta, \" \".join(cmd)))\n return res\n\n def get_keys(self):\n ipfs = self.get_executable()\n cmd = [ipfs, 'key', 'list', '-l']\n res = self._cmd(cmd)\n # print res.stdout.__repr__()\n lines = res.stdout.strip().split('\\n')\n res = OrderedDict()\n for l in lines:\n tokens = l.strip().split(' ')\n assert len(tokens) == 2, tokens\n res[tokens[1]] = tokens[0]\n return res\n\n def gen_key(self, name, key_type, key_size):\n cmd = ['ipfs', 'key', 'gen', '--type', key_type, '--size', str(key_size), name]\n res = self._cmd(cmd)\n ipfs_hash = res.stdout.strip()\n return ipfs_hash\n\n def publish(self, key_name, ipfs_hash, timeout='60s'):\n cmd = ['ipfs', 'name', 'publish', '--timeout', timeout, '--key', key_name, ipfs_hash]\n _res = self._cmd(cmd)\n\n def pin_add(self, mh, recursive=True, timeout=None):\n cmd = ['ipfs', 'pin', 'add']\n if recursive:\n cmd.append('-r')\n if timeout is not None:\n cmd.extend(['--timeout', timeout])\n cmd.append(mh)\n res = self._cmd(cmd)\n if res.ret != 0:\n raise CouldNotPin(str(res))\n\n def pin_ls(self):\n cmd = ['ipfs', 'pin', 'ls']\n res = self._cmd(cmd)\n recursive = set()\n indirect = set()\n for line in res.stdout.strip().split('\\n'):\n tokens = line.split(' ')\n hashed = tokens[0]\n if tokens[1] == 'recursive':\n recursive.add(hashed)\n elif tokens[1] == 'indirect':\n indirect.add(hashed)\n else:\n assert False, line\n\n return recursive, indirect\n\n def dht_findprovs(self, ipfs_hash, timeout=\"1s\"):\n if ipfs_hash in self.providers_cache:\n t, result = self.providers_cache[ipfs_hash]\n elapsed = time.time() - t\n if elapsed < self.providers_ttl:\n return result\n result = self._dht_findprovs(ipfs_hash, timeout)\n self.providers_cache[ipfs_hash] = time.time(), result\n return result\n\n def _dht_findprovs(self, ipfs_hash, timeout):\n cmd = ['ipfs', 'dht', 'findprovs', '--timeout', timeout, ipfs_hash]\n try:\n res = self._cmd(cmd)\n except CmdException as e:\n res = e.res\n options = res.stdout.strip().split('\\n')\n options = [x for x in options if x]\n return options\n\n # @memoize_simple\n def object_get(self, h, timeout=None):\n cmd = ['ipfs', 'object']\n if timeout is not None:\n cmd.extend(['--timeout', timeout])\n cmd.extend(['get', h])\n\n try:\n res = self._cmd(cmd)\n except CmdException as e:\n raise InvalidHash(e.res.stderr)\n return res.stdout\n\n # @memoize_simple\n def get(self, mh, timeout=\"1h\"):\n temp_file = NamedTemporaryFile(suffix='ipfs_get', delete=False)\n temp_file.close()\n\n cmd = ['ipfs', 'get', '--timeout', timeout, '-o', temp_file.name, mh]\n try:\n _res = self._cmd(cmd)\n except CmdException as e:\n if e.res.ret == 1 and 'request canceled' in e.res.stderr:\n msg = 'Could not get %s with timeout %s' % (mh, timeout)\n raise_wrapped(Timeout, e, msg)\n print('exc %s %r' % (e.res.ret, e.res.stderr))\n raise\n\n with open(temp_file.name, 'r') as f:\n data = f.read()\n os.unlink(temp_file.name)\n return data\n\n # @memoize_simple\n def cat(self, mh, timeout=\"1h\"):\n cmd = ['ipfs', 'cat', '--timeout', timeout, mh]\n try:\n res = self._cmd(cmd)\n except CmdException as e:\n if e.res.ret == 1 and 'request canceled' in e.res.stderr:\n msg = 'Could not get %s with timeout %s' % (mh, timeout)\n raise_wrapped(Timeout, e, msg)\n print('exc %s %r' % (e.res.ret, e.res.stderr))\n raise\n data = res.stdout\n return data\n\n def ls(self, h, timeout=\"10s\"):\n # {\"Links\":[{\"Name\":\"FILE2\",\"Hash\":\"QmUtkGLvPf63NwVzLPKPUYgwhn8ZYPWF6vKWN3fZ2amfJF\",\"Size\":14},\n # {\"Name\":\"upload_info1.yaml\",\"Hash\":\"QmeiuS7VWRaUQTmva3UMgggCCEDisgtLWPwxFnJ1kCGaDJ\",\"Size\":214}],\"Data\":\"\\u0008\\u0001\"}\n data = self.object_get(h, timeout=timeout)\n d = json.loads(data)\n entries = OrderedDict()\n for entry in d['Links']:\n entries[str(entry['Name'])] = \\\n IPFS_ls_entry(str(entry['Name']), str(entry['Hash']), entry['Size'])\n return entries\n\n def add_bytes(self, s):\n return self.get_hash_for_bytes(s)\n\n def block_put(self, s):\n ipfs = self.get_executable()\n cmd = [ipfs, 'block', 'put']\n res = system_cmd_result(cwd='.', cmd=cmd, raise_on_error=True, display_stdout=False, display_stderr=False,\n write_stdin=s, env=self._get_env())\n token = res.stdout.strip()\n return token\n\n @memoize_simple\n def _get_ipfs_id_data(self):\n cmd = ['ipfs', 'id']\n res = self._cmd(cmd)\n\n data = json.loads(res.stdout.strip())\n return data\n\n def stats_bw(self):\n cmd = ['ipfs', 'stats', 'bw']\n res = self._cmd(cmd)\n return res.stdout.strip()\n\n @memoize_simple\n def ipfs_id(self):\n data = self._get_ipfs_id_data()\n return str(data['ID'])\n\n # @memoize_simple\n @contract(returns='list(str)')\n def get_addresses(self):\n data = self._get_ipfs_id_data()\n if data['Addresses']:\n return map(str, data['Addresses'])\n else:\n return []\n\n def p2p_listener_open(self, name, address):\n cmd = ['ipfs', 'p2p', 'listener', 'ls']\n res = self._cmd(cmd)\n already = name in res.stdout\n if already:\n cmd = ['ipfs', 'p2p', 'listener', 'close', name]\n res = self._cmd(cmd)\n\n cmd = ['ipfs', 'p2p', 'listener', 'open', name, address]\n res = self._cmd(cmd)\n return json.loads(res.stdout.strip())\n\n def swarm_connect(self, peer, timeout='10s'):\n cmd = ['ipfs', 'swarm', '--timeout', timeout, 'connect', peer]\n res = self._cmd(cmd)\n print(res.stdout)\n\n def swarm_peers(self):\n cmd = ['ipfs', 'swarm', 'peers']\n res = self._cmd(cmd)\n return res.stdout.strip().split()\n\n def object_put(self, data):\n ipfs = self.get_executable()\n cmd = [ipfs, 'object', 'put']\n cwd = '.'\n res = system_cmd_result(cwd, cmd, raise_on_error=True,\n write_stdin=data, env=self._get_env())\n hashed = res.stdout.split()[1]\n assert 'Qm' in hashed, hashed\n return hashed\n\n def add(self, s, pin=False):\n print('adding string of len %s' % len(s))\n ipfs = self.get_executable()\n cmd = [ipfs, 'add', '-Q']\n if not pin:\n cmd.append('--pin=false')\n cwd = '.'\n res = system_cmd_result(cwd, cmd, raise_on_error=True,\n write_stdin=s, env=self._get_env())\n hashed = res.stdout.strip().split()[0]\n assert 'Qm' in hashed, hashed\n print('returning %s' % hashed)\n return hashed\n\n get_hash_for_bytes = add\n\n def add_ipfs_dir(self, dirname):\n ipfs = self.get_executable()\n cmd = [ipfs, 'add', '-r', dirname]\n res = system_cmd_result(cwd='.', cmd=cmd, raise_on_error=True,\n env=self._get_env())\n s = res.stdout.strip()\n lines = s.split('\\n')\n out = lines[-1].split(' ')\n if (len(out) < 3 or out[0] != 'added' or not out[1].startswith('Qm')):\n msg = 'Invalid output for ipds:\\n%s' % indent(res.stdout, ' > ')\n raise Exception(msg)\n hashed = out[1]\n return hashed\n\n def get_tree_builder(self):\n return MakeIPFS3(self)\n\n\nclass MakeIPFS3(object):\n\n def __init__(self, ipfsi):\n self.ipfsi = ipfsi\n self.links = []\n self.total_file_size = 0\n\n def add_file(self, filename, mh, size):\n x = {'Name': filename, 'Hash': mh, \"Size\": size}\n self.links.append(x)\n self.total_file_size += size\n\n @contract(filename=str, s=str)\n def add_file_content(self, filename, s):\n # TODO: check filename\n mh = self.ipfsi.get_hash_for_bytes(s)\n self.add_file(filename, mh, len(s))\n\n def get_dag(self):\n result = {'Data': u\"\\u0008\\u0001\", 'Links': self.links}\n return result\n\n def total_size(self):\n return self.total_file_size\n\n def as_json(self):\n dag = self.get_dag()\n dag_json = json.dumps(dag)\n return dag_json\n\n @contract(returns='multihash')\n def get_hash(self):\n return self.ipfsi.object_put(self.as_json())\n","sub_path":"src/duckietown_swarm/ipfs_utils.py","file_name":"ipfs_utils.py","file_ext":"py","file_size_in_byte":11125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"281191750","text":"from morph import *\nimport morph\n\ntries = ['cpu']\nif (fcv.cudaEnabled()): tries.append('cuda')\n\nfor i in tries:\n\tif i == 'cuda':\n\t\tmmneg = lambda x: fcv.fromCuda(fcv.neg(fcv.toCuda(x)))\n\telif i == 'cpu':\n\t\tmmneg = lambda x: fcv.neg(x)\n\n\ta = uint8(ones((5,5)));\n\tmmisequal(mmneg(a),\n\t\t\tmmaddm(a,253),\n\t\t\t'mmneg (%s): uint8' % i);\n\t\t\n\tmmisequal(mmneg(uint16(a)),\n\t\t\tmmaddm(uint16(a),65533), \n\t\t\t'mmneg (%s): uint16' % i);\n\t\t\n\ta = mmreadgray('drawing.pgm');\n\tmmisequal(a,\n\t\t\tmmneg(mmneg(a)), \n\t\t\t'mmneg (%s): uint8 binary' % i);\n\n\tmmisequal(mmneg(uint8([1,0,3])),\n\t\t uint8([254,255,252]),\n\t\t 'mmneg (%s): uint8 case' % i)\n\n\tmmisequal(mmneg(int32([1,0,3])), \n\t\t\tint32([-1,0,-3]),\n\t\t\t'mmneg (%s): int32 case' % i)\n\n","sub_path":"test/fcvNeg.py","file_name":"fcvNeg.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"588587553","text":"# -*- coding: utf-8 -*-\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nfrom util import Res\nfrom error import PersonaError, AuthError\nfrom functools import wraps\n\ndef ensure_login(session):\n \"\"\"다른 리퀘스트에 앞서 유저가 유효한 검증을 했는지 확인하는 함수\n\n :param dict session: request에 포함된 session 딕셔너리 \n\n .. note:: auth module을 거치지않고 request가 온 경우 session에\n 피플정보가 미포함되어 에러가 발생\n\n :return: int, int. 리턴코드는 int형의 people_id,vehicle_id::\n \n people_id--데이터베이스에 저장되어있는 유저의 고유 아이디\n vehicle_id--데이터베이스에 저장되어있는 유저의 자동차 고유번호.\n\n 리턴값은 다음과 같습니다 - 1, 1\n\n \n :raises AuthError: 세션에 people_id나 vehicle_id가 없는 경우 raise.\n\n 다음과 같이 사용할 수 있습니다.\n\n >>> print ensure_login(request.session)\n 1, 125123513651\n \n \"\"\"\n people_id = session.get('people_id')\n vehicle_id = session.get('vehicle_id')\n if not people_id or not vehicle_id:\n raise AuthError('client send action without auth')\n return people_id, vehicle_id\n \ndef action(func):\n \"\"\"리퀘스트가 들어온 경우 리퀘스트 앞단에서 ���리되는 데코레이터 함수\n 검증을 거쳤는지 확인하고 일관된 딕셔너리 변수를 할당하여 각 request에\n 맞는 함수를 수행하고 그 결과를 response에 세팅하여 리턴하는 함수\n\n :param func func: request가 들어온 url에 mapping되어있는 함수. 코드상에 함수 위에 decorator로 @action을 사용하면 자동으로 매핑\n\n :local env: func를 처리하는데 있어 필요한 정보들을 딕셔너리에 묶어 func에 파라미터로 전달합니다.\n \n :local res: json형식의 리턴코드를 만들어주는 클래스. 참조: class: 'Res'\n\n :return: 리턴은 Res클래스를 통해 json형식의 스트링 값을 리턴\n\n :raises PersonaError: func가 수행되는 도중 발생한 PersonaError를 상속받은 클래스의 예외가 발생하였을때 동작합니다. Res 클래스의 각 에러별 고유 에러넘버와 에러 메세지를 세팅합니다.\n\n :raises Exception: PersonaError로 정의되지 않은 예외가 발생하였을때 동작. UnknownError메세지와 고유 에러넘버 100번을 리턴합니다.\n\n 다음과 같이 사용할 수 있습니다.\n\n >>> @action\n ... def viewRecentCanData(env):\n ... return 0\n\n \"\"\"\n @csrf_exempt\n @wraps(func)\n def deco(request):\n try:\n res = Res()\n people_id, vehicle_id = ensure_login(request.session)\n env = {\n 'req' : request.GET if request.method == 'GET' else request.POST,\n 'session' : request.session,\n 'people_id' : people_id,\n 'vehicle_id' : vehicle_id\n }\n \n res.setData(func(env))\n \n except PersonaError as e:\n e.error(res)\n \n except:\n res.setError(100, 'UnknownError')\n \n return HttpResponse(res.toJson())\n\n return deco\n","sub_path":"server/src/util/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"317747594","text":"from django.db import models\n\n\nclass HistoryDescriptor(object):\n def __init__(self, model):\n self.model = model\n\n def __get__(self, instance, owner):\n return HistoryManager(self.model, owner, instance)\n\n\nclass HistoryManager(models.Manager):\n def __init__(self, model, primary_model, instance=None):\n super(HistoryManager, self).__init__()\n self.model = model\n self.primary_model = primary_model\n self.instance = instance\n\n def get_query_set(self):\n qs = super(HistoryManager, self).get_query_set()\n if self.instance:\n qs = self._filter_queryset_by_pk(qs, self.instance.pk)\n return qs\n\n def _filter_queryset_by_pk(self, qs, pk):\n return qs.filter(**{self.primary_model._meta.pk.name: pk})\n\n def most_recent(self, pk=None):\n \"\"\"\n If called with an instance, returns the most recent copy of the instance\n available in the history.\n\n >>> obj = Obj.objects.get(pk=1)\n >>> obj.history.most_recent()\n \n\n If called without an instance, returns the most recent copy of an\n instance matching pk.\n\n >>> Obj.history.most_recent(pk=1)\n \n \"\"\"\n pk = self.instance.pk if self.instance else pk\n qs = self._filter_queryset_by_pk(self.get_query_set(), pk)\n\n try:\n version = qs[0]\n except IndexError:\n message = \"%s(pk=%s) has no historical record.\" % \\\n (self.primary_model.__name__, pk)\n raise self.primary_model.DoesNotExist(message)\n else:\n return version.history_object\n\n def as_of(self, date, pk=None, restore=False):\n \"\"\"\n If called with an instance, returns an instance of the original model\n with all the attributes set to what was present on the object on the\n date provided.\n\n >>> obj = Obj.objects.get(pk=1)\n >>> obj.history.as_of(datetime.datetime(2000, 1, 1))\n \n\n If called without an instance, has similar behavior but does its lookup\n based on the pk provided.\n\n >>> Obj.history.as_of(datetime.datetime(2000, 1, 1), pk=1)\n \n \"\"\"\n pk = self.instance.pk if self.instance else pk\n qs = self._filter_queryset_by_pk(self.get_query_set(), pk)\n\n try:\n version = qs.filter(history_date__lte=date)[0]\n except IndexError:\n message = \"%s(pk=%s) had not yet been created.\" % \\\n (self.primary_model.__name__, pk)\n raise self.primary_model.DoesNotExist(message)\n else:\n from history.models import DELETED\n if version.history_type == DELETED and not restore:\n message = \"%s(pk=%s) had already been deleted.\" % \\\n (self.primary_model.__name__, pk)\n raise self.primary_model.DoesNotExist(message)\n return version.history_object\n\n @property\n def created_date(self):\n if not self.instance:\n raise TypeError(\"Can't use created_date() without a %s instance.\" % \\\n self.primary_model._meta.object_name)\n return self.aggregate(created=models.Min('history_date'))['created']\n\n @property\n def created_by(self):\n if not self.instance:\n raise TypeError(\"Can't use created_by() without a %s instance.\" % \\\n self.primary_model._meta.object_name)\n return self.order_by('history_date')[0].history_editor\n\n @property\n def last_modified_date(self):\n if not self.instance:\n raise TypeError(\"Can't use last_modified_date() without a %s instance.\" % \\\n self.primary_model._meta.object_name)\n\n return self.aggregate(modified=models.Max('history_date'))['modified']\n\n @property\n def last_modified_by(self):\n if not self.instance:\n raise TypeError(\"Can't use last_modified_by() without a %s instance.\" % \\\n self.primary_model._meta.object_name)\n return self.order_by('-history_date')[0].history_editor\n\n def get_or_restore(self, pk):\n '''\n Looks for an existing item with the given primary key in the primary\n object table - return it if it exists, otherwise try to 'restore' the\n most recent version of the item.\n '''\n if self.instance:\n raise TypeError(\"Can't use get_or_restore() with a %s instance.\" %\\\n self.instance._meta.object_name)\n try:\n return self.primary_model._default_manager.get(pk=pk)\n except self.primary_model.DoesNotExist:\n return self.most_recent(pk=pk)\n\n\nclass HistoricalAnnotatingManager(models.Manager):\n\n def get_query_set(self):\n '''\n Annotate the queryset with historical information:\n - created_date - the history_date of the earliest version\n - last_modified_date - the history_date of the most recent version\n - count - the number of historical versions\n '''\n return super(HistoricalAnnotatingManager, self)\\\n .get_query_set()\\\n .annotate(created_date=models.Min('history__history_date'))\\\n .annotate(last_modified_date=models.Max('history__history_date'))\\\n .annotate(count=models.Count('history'))\n","sub_path":"src/history/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"267093583","text":"from exchange.order_books.book_price_q import BookPriceQ\nfrom exchange.order_books.list_elements import SortedIndexedDefaultList\nimport heapq\nimport math\nimport logging as log\n\nMIN_BID = 0\nMAX_ASK = 2147483647\n\ndef merge(ait, bit, key):\n \"\"\"\n >>> [a for a in merge(iter([1]), iter([4]), lambda i:i)]\n [1,4]\n >>> [a for a in merge(iter([4]), iter([1]), lambda i:i)]\n [1, 4]\n >>> [a for a in merge(iter([1,2,3, 10]), iter([2,4,7]), lambda i:i)]\n [1,2,2,3,4,7,10]\n \"\"\"\n a=None\n b=None\n try:\n a = next(ait)\n except StopIteration:\n yield from bit\n return\n if a is not None:\n try:\n b = next(bit)\n except StopIteration:\n yield a\n yield from ait\n return\n\n while b is not None:\n try:\n try:\n while(key(a) <= key(b)):\n yield a\n a = next(ait)\n except StopIteration:\n yield b\n yield from bit\n return\n\n yield b\n b = next(bit)\n except StopIteration:\n yield a\n yield from ait \n return\n\nclass FBABook:\n def __init__(self):\n self.bids = SortedIndexedDefaultList(index_func = lambda bq: bq.price, \n initializer = lambda p: BookPriceQ(p),\n index_multiplier = -1)\n self.asks = SortedIndexedDefaultList(index_func = lambda bq: bq.price, \n initializer = lambda p: BookPriceQ(p))\n\n def __str__(self):\n return \"\"\"\n Bids:\n{}\n\n Asks:\n{}\"\"\".format(self.bids, self.asks)\n\n def reset_book(self):\t\t\t\t\t\t#jason\n log.info('Clearing All Entries from Order Book')\n self.bid = MIN_BID\n self.ask = MAX_ASK\n for id in list(self.asks.index):\t\t#force as list because can't interate dict and delete keys at same time\n self.asks.remove(id)\n for id in list(self.bids.index):\n self.bids.remove(id)\n\n\n\n def cancel_order(self, id, price, volume, buy_sell_indicator):\n '''\n Cancel all or part of an order. Volume refers to the desired remaining shares to be executed: if it is 0, the order is\n fully cancelled, otherwise an order of volume volume remains.\n '''\n orders = self.bids if buy_sell_indicator == b'B' else self.asks\n \n if price not in orders or id not in orders[price].order_q:\n log.debug('No order in the book to cancel, cancel ignored.')\n return []\n else:\n amount_canceled=0\n current_volume=orders[price].order_q[id]\n if volume==0: #fully cancel\n orders[price].cancel_order(id)\n amount_canceled = current_volume\n if orders[price].interest == 0:\n orders.remove(price)\n elif volume < current_volume:\n orders[price].reduce_order(id, volume) \n amount_canceled = current_volume - volume\n else:\n amount_canceled = 0\n\n return [(id, amount_canceled)]\n\n def enter_buy(self, id, price, volume, enter_into_book = True):\n '''\n Enter a limit order to buy at price price: do NOT try and match\n '''\n if enter_into_book:\n self.bids[price].add_order(id, volume)\n entered_order = (id, price, volume)\n return ([], entered_order)\n else:\n return ([], None)\n\n def enter_sell(self, id, price, volume, enter_into_book):\n '''\n Enter a limit order to sell at price price: do NOT try and match\n '''\n if enter_into_book:\n self.asks[price].add_order(id, volume)\n entered_order = (id, price, volume)\n return ([], entered_order) \n else:\n return ([], None)\n\n def batch_process(self):\n log.debug('Running batch auction..')\n log.debug('Order book=%s', self)\n asks_volume = sum([price_book.interest for price_book in self.asks.ascending_items()])\n all_orders_descending = merge(\n self.asks.descending_items(),\n self.bids.ascending_items(), \n key= lambda bpq: -bpq.price)\n log.debug('Ask prices=%s:%s, bid prices=%s:%s', \n [(p.price, p.interest) for p in self.asks.ascending_items()],\n [(p.price, p.interest) for p in self.asks.descending_items()], \n [(p.price, p.interest) for p in self.bids.ascending_items()],\n [(p.price, p.interest) for p in self.bids.descending_items()])\n assert len([p.price for p in self.asks.descending_items()])==len([p.price for p in self.asks.ascending_items()]) \n \n orders_volume = prior_orders_volume = 0\n clearing_price=None\n log.debug('Calculating clearing price..')\n bpq=prior_bpq=None\n\n min_real_price = None\n max_real_price = None\n\n log.debug('all orders descending: %s', [(b.price,b.interest) for b in merge(\n self.asks.descending_items(),\n self.bids.ascending_items(), \n key= lambda bpq: -bpq.price)])\n \n for bpq in all_orders_descending:\n #update min/max prices\n if MIN_BID asks_volume:\n break\n prior_bpq=bpq\n\n #if bpq.price is still at MAX_ASK, loop until price is less than max_ask and use that price\n if bpq is not None and bpq.price==MAX_ASK:\n for bpq in all_orders_descending:\n if max_real_price is None or max_real_priceasks_volume:\n clearing_price = max(bpq.price, min_real_price)\n\n\n\n log.debug('Clearing price: %s', clearing_price)\n\n matches = []\n ask_it = self.asks.ascending_items()\n if clearing_price is not None:\n try:\n ask_node = next(ask_it)\n ask_price = ask_node.price\n\n #iterate over bids starting with highest\n for bid_node in self.bids.ascending_items():\n bid_price = bid_node.price\n log.info('bid price {}, ask price {}, clearing price {}'.format(bid_price, ask_price, clearing_price))\n if bid_priceclearing_price:\n log.debug('no cross at {}'.format(ask_price))\n break\n else:\n for (bid_id, volume) in list(bid_node.order_q.items()):\n volume_filled = 0\n while volume_filled < volume and ask_price <= clearing_price:\n (filled, fulfilling_orders) = ask_node.fill_order(volume-volume_filled)\n volume_filled += filled\n matches.extend([((bid_id, ask_id), clearing_price, volume) for (ask_id, volume) in fulfilling_orders])\n log.debug('matches: {}'.format(matches))\n if volume_filled <= volume:\n log.info('volume filled: {}, volume:{} at {}'.format(volume_filled, volume, ask_price))\n self.asks.remove(ask_price)\n try: \n ask_node = next(ask_it)\n ask_price = ask_node.price\n except StopIteration as e:\n log.debug('stopped iteration at {}'.format(ask_price)) \n break\n #update bid in book\n assert volume_filled<=volume\n if volume_filled==volume:\n log.debug('filled {} out of {}: bid id {} '.format(volume_filled, volume, bid_id))\n bid_node.cancel_order(bid_id)\n if bid_node.interest == 0:\n self.bids.remove(bid_node.price)\n elif volume_filled >0:\n log.debug('reducing {} out of {}, bid id: {}'.format(volume_filled, volume, bid_id))\n bid_node.reduce_order(bid_id, volume - volume_filled)\n except StopIteration as e:\n log.debug(e)\n # this is a terrible idea. why didn't you even log it ?\n # there are many iterations that can throw this inside here.\n # this let an exception pass silently, before bid node is updated\n # after line 224 throws StopIteration.\n pass\n return matches\n\n\n\n","sub_path":"exchange_server/exchange/order_books/fba_book.py","file_name":"fba_book.py","file_ext":"py","file_size_in_byte":10627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"644246502","text":"\"\"\"\nAPI App Management\n\"\"\"\n\n# stdlib\nfrom datetime import date\nfrom ssl import SSLContext\n\n# library\nfrom quart.json import JSONEncoder\nfrom quart_openapi import Pint\n\n\nclass CustomJSONEncoder(JSONEncoder):\n \"\"\"\n Customize the JSON date format\n \"\"\"\n\n # pylint: disable=method-hidden\n def default(self, obj):\n try:\n if isinstance(obj, date):\n return obj.isoformat() + \"Z\"\n iterable = iter(obj)\n except TypeError:\n pass\n else:\n return list(iterable)\n return JSONEncoder.default(self, obj)\n\n\nCORS_HEADERS = [\"Authorization\", \"Content-Type\"]\n\n\ndef add_cors(response):\n \"\"\"\n Add missing CORS headers\n\n Fixes CORS bug where headers are not included in OPTIONS\n \"\"\"\n for key, value in (\n (\"Access-Control-Allow-Origin\", \"*\"),\n (\"Access-Control-Allow-Headers\", CORS_HEADERS),\n (\"Access-Control-Allow-Methods\", list(response.allow)),\n ):\n if key not in response.headers:\n if isinstance(value, list):\n value = \",\".join(value)\n response.headers.add(key, value)\n return response\n\n\ndef create_app(\n name: str, psql_uri: str = None, mongo_uri: str = None, psql_pool_args: dict = None\n) -> Pint:\n \"\"\"\n Create the core API app. Supply URIs as necessary\n \"\"\"\n app = Pint(name)\n\n @app.before_serving\n async def _startup():\n if psql_uri:\n import asyncpg\n\n kwargs = {\"min_size\": 3, \"max_size\": 8, \"command_timeout\": 5}\n if \"localhost\" not in psql_uri:\n kwargs[\"ssl\"] = SSLContext()\n if psql_pool_args:\n kwargs.update(psql_pool_args)\n app.db = await asyncpg.create_pool(psql_uri, **kwargs)\n else:\n app.db = None\n\n if mongo_uri:\n from motor.motor_asyncio import AsyncIOMotorClient\n\n app.mdb = AsyncIOMotorClient(mongo_uri)\n else:\n app.mdb = None\n\n app.json_encoder = CustomJSONEncoder\n app.after_request(add_cors)\n return app\n","sub_path":"avwx_api_core/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"179175213","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport cython\nfrom typing import Optional, Union\nimport random\nfrom math import log10, pow\n\n\n@cython.cclass\nclass NumericEncoder(object):\n \"\"\"\n NumericEncoder encodes both integers and floats and ensures that those numerics close to each other on the number line have a certain level of similarity\n The encoder employs a random distribution algorithm that requires state (knowledge of previously encoded numerics) in order to guarantee the correct level\n of similarity\n \"\"\"\n\n # ***************************************\n # cython declarations for the class attributes\n #\n type = cython.declare(str, visibility='public')\n name = cython.declare(str, visibility='public')\n encodings = cython.declare(dict, visibility='public')\n bits = cython.declare(dict, visibility='public')\n n_bits = cython.declare(cython.int, visibility='public')\n enc_size = cython.declare(cython.int, visibility='public')\n bit_offset = cython.declare(cython.int, visibility='public')\n rand_state = cython.declare(object, visibility='public')\n\n log = cython.declare(cython.bint, visibility='public')\n min_step = cython.declare(float, visibility='public')\n min_bucket = cython.declare(cython.int, visibility='public')\n min_next_idx = cython.declare(cython.int, visibility='public')\n max_bucket = cython.declare(cython.int, visibility='public')\n max_next_idx = cython.declare(cython.int, visibility='public')\n zero_bucket = cython.declare(float, visibility='public')\n\n def __init__(self,\n name: str = 'numeric',\n min_step: float = 1.0,\n n_bits: cython.int = 40,\n enc_size: cython.int = 2048,\n bit_offset: cython.int = 0,\n log: cython.bint = False,\n seed=12345):\n \"\"\"\n Creates a Numeric Encoder using either a linear or log random distributed algorithm\n :param name: different instances of encoders can be identified by a given name.\n :param min_step: the minimum step amount that can be encoded. For example if set to 1.0 the encoder encodes to the nearest integer\n :param n_bits: the number of bits in the encoding - this should be at least 20 and should be roughly 2% (ie sparsity of 2%) of the enc_size\n :param enc_size: the total possible number of bits in the sparse encoding - this should ensure a sparsity of around 2%\n :param bit_offset: this factor offsets the starting bit - set a 0 means bits start from 0 up to enc_size-1. Set to 1 and bits start from enc_size and range up to (enc_size * 2) - 1\n :param log: flag to indicate if the log of the numeric is encoded - if set to true then the min_step applies to a log scale - meaning that\n values such as 1.0 and 2.0 will have an encoded similarity similar to 1000 and 2000\n :param seed: each encoder instance can have a different random seed to provide deterministic sequence of random bits\n \"\"\"\n\n self.type = 'numeric'\n \"\"\" indicates the type of encoder \"\"\"\n\n self.name = name\n \"\"\" the specific instance name of this encoder \"\"\"\n\n self.encodings = {}\n \"\"\" a map between the bucket and set of bits that represent its encoding \"\"\"\n\n self.bits = {}\n \"\"\" a map between each bit and the bucket encodings it is a part of \"\"\"\n\n self.n_bits = n_bits\n \"\"\" the maximum number of ON bits an encoding consists of \"\"\"\n\n self.enc_size = enc_size\n \"\"\" the maximum possible bits an encoding a sparse encoding can be created from \"\"\"\n\n self.bit_offset = bit_offset\n \"\"\" a factor of enc_size that defines the starting bit number \"\"\"\n\n # seed the generator if required\n #\n if seed is not None:\n random.seed(seed)\n\n self.rand_state = random.getstate()\n \"\"\" the state of the random number generator for this encoder \"\"\"\n\n self.log: cython.bint = log\n \"\"\" flag to indicate if the numeric is transformed by the log function \"\"\"\n\n self.zero_bucket: float\n \"\"\" the numeric value that bucket 0 represents \"\"\"\n\n self.min_step: float = min_step\n \"\"\" the minimum numerical amount that can be encoded\"\"\"\n\n self.min_bucket: cython.int = 0\n \"\"\" the current minimum bucket that has been encoded \"\"\"\n\n self.min_next_idx: cython.int = 0\n \"\"\" the next bit index (a number between 0 and n_bits) into the minimum bucket encoding that will be replaced with a random bit \"\"\"\n\n self.max_bucket: cython.int = 0\n \"\"\" the current maximum bucket that has been encoded \"\"\"\n\n self.max_next_idx: cython.int = 0\n \"\"\" the next bit index (a number between 0 and n_bits) into the maximum bucket encoding that will be replaced with a random bit \"\"\"\n\n def create_encoding(self, target_bucket: Optional[int]):\n \"\"\"\n creates encodings between the current min or max_bucket up to and including the target bucket\n :param target_bucket: the target bucket to encode\n :return: None\n \"\"\"\n\n # ************************\n # help cython type variables\n #\n offset: cython.int\n bit: cython.int\n bit_population: list\n new_enc: list\n prev_enc: list\n bucket: cython.int\n\n # set the state of the random generator\n #\n random.setstate(self.rand_state)\n\n # calculate the bit population, offset as required\n #\n offset = self.bit_offset * self.enc_size\n bit_population = [bit for bit in range(offset, offset + self.enc_size)]\n\n # the value none will have a specific value\n #\n if target_bucket is None:\n\n # create a list of random numbers to represent the bits set\n #\n new_enc = list(random.sample(population=bit_population, k=self.n_bits))\n\n # map the bucket to the encoding\n #\n self.encodings[target_bucket] = new_enc\n\n # maintain the mapping of bits to bucket to allow for easy decoding\n #\n for bit in new_enc:\n if bit not in self.bits:\n self.bits[bit] = {target_bucket}\n else:\n self.bits[bit].add(target_bucket)\n\n # the first encoded value is special\n #\n elif target_bucket == 0:\n\n # create a list of random numbers to represent the bits set\n #\n new_enc = list(random.sample(population=bit_population, k=self.n_bits))\n\n # map the 0 bucket to the encoding\n #\n self.encodings[target_bucket] = new_enc\n\n # set the max bucket that exists along with the next offset in list of bits to change\n #\n self.max_bucket = target_bucket\n self.max_next_idx = 0\n\n # set the min bucket that exists along with the next offset in list of bits to change\n #\n self.min_bucket = target_bucket\n self.min_next_idx = 0\n\n # maintain the mapping of bits to bucket to allow for easy decoding\n #\n for bit in new_enc:\n if bit not in self.bits:\n self.bits[bit] = {target_bucket}\n else:\n self.bits[bit].add(target_bucket)\n\n # if target bucket is larger than current max_bucket so fill in the gaps\n #\n elif target_bucket > self.max_bucket:\n\n # will need the bits from the current largest bucket encoding\n #\n prev_enc = self.encodings[self.max_bucket]\n\n # from the current max bucket + 1 up to and including the target_bucket\n #\n for bucket in range(self.max_bucket + 1, target_bucket + self.n_bits + 1):\n\n # create the new encoding as a copy of the last max bucket\n #\n new_enc = [bit for bit in prev_enc]\n\n # get another bit chosen at random that's not in previous bucket encoding\n #\n new_bit = random.sample(population=[bit for bit in bit_population if bit not in prev_enc], k=1)\n\n # replace one bit at the max_next_idx slot which guarantees no clashes\n #\n new_enc[self.max_next_idx] = new_bit[0]\n\n # update the next idx to replace, remembering to wrap around if necessary\n #\n self.max_next_idx += 1\n if self.max_next_idx >= self.n_bits:\n self.max_next_idx = 0\n\n # save new encoding\n #\n self.encodings[bucket] = new_enc\n\n # maintain the mapping of bits to buckets\n #\n for bit in new_enc:\n if bit not in self.bits:\n self.bits[bit] = {bucket}\n else:\n self.bits[bit].add(bucket)\n\n # remember the previous encoding\n #\n prev_enc = new_enc\n\n # we now have a new max bucket\n #\n self.max_bucket = bucket\n\n # else must be below minimum\n #\n else:\n prev_enc = self.encodings[self.min_bucket]\n for bucket in range(self.min_bucket - 1, target_bucket - self.n_bits - 1, -1):\n\n # create the new encoding as a copy of the last max bucket\n #\n new_enc = [i for i in prev_enc]\n\n # get another bit chosen at random that's not in previous bucket encoding\n #\n new_bit = random.sample(population=[i for i in bit_population if i not in prev_enc], k=1)\n\n # replace one bit at the max_next_idx slot which guarantees no clashes\n #\n new_enc[self.min_next_idx] = new_bit[0]\n\n # update the next idx to replace, remembering to wrap around if necessary\n #\n self.min_next_idx += 1\n if self.min_next_idx >= self.n_bits:\n self.min_next_idx = 0\n\n # save new encoding\n #\n self.encodings[bucket] = new_enc\n\n # maintain the mapping of bits to buckets\n #\n for bit in new_enc:\n if bit not in self.bits:\n self.bits[bit] = {bucket}\n else:\n self.bits[bit].add(bucket)\n\n # remember the previous encoding\n #\n prev_enc = new_enc\n\n # we now have a new min bucket\n #\n self.min_bucket = bucket\n\n # remember the state of the random generator\n #\n self.rand_state = random.getstate()\n\n def encode(self, numeric: Optional[float]) -> set:\n \"\"\"\n encodes a numeric\n :param numeric: the numeric to encode - can be a None\n :return: a set of encoded bits\n \"\"\"\n\n # ************************\n # help cython type variables\n #\n enc: set\n round_numeric: float\n target_bucket: cython.int\n\n # if its none then create special encoding that has no similarity to other numbers\n #\n if numeric is None:\n\n # if None hasn't already been encoded then encode\n #\n if None not in self.encodings:\n self.create_encoding(None)\n\n enc = set(self.encodings[numeric])\n\n # else assume its a numeric\n #\n else:\n\n # if configured to use the log then take the log to base 10 of the ration of the numeric and min step\n # need to cater for numerics < 1 as cannot take log of numbers less than 1\n #\n if self.log:\n if numeric >= 1.0:\n round_numeric = round(log10(numeric) / self.min_step)\n elif numeric <= -1.0:\n round_numeric = -1 * round(log10(numeric) / self.min_step)\n else:\n round_numeric = round(numeric / self.min_step)\n\n # round the numeric to the minimum step\n #\n else:\n round_numeric = round(numeric / self.min_step)\n\n # if no existing encodings then create first one for the zero bucket\n #\n if len(self.encodings) == 0 or (len(self.encodings) == 1 and None in self.encodings):\n\n # setup up the zero bucket association with real number\n #\n self.zero_bucket = round_numeric\n\n # create the 0 bucket encoding\n #\n self.create_encoding(0)\n\n # remember the encoding required\n #\n enc = set(self.encodings[0])\n\n # also create preceding and succeeding buckets\n #\n self.create_encoding(1)\n self.create_encoding(-1)\n else:\n\n # calculate the bucket associated with the encoding\n #\n target_bucket = int(round_numeric - self.zero_bucket)\n\n # just return encoding if bucket exists\n #\n if target_bucket in self.encodings:\n enc = set(self.encodings[target_bucket])\n else:\n\n self.create_encoding(target_bucket)\n\n # the encoding required\n #\n enc = set(self.encodings[target_bucket])\n\n return enc\n\n def decode(self, enc: Union[set, dict], max_bit_weight: float = 1.0):\n \"\"\"\n decodes a set of bits into a numeric\n :param enc: can be either a set of bits or a dictionary of bits in which the bits are the keys and the values are the weight of each bit\n :param max_bit_weight: the maximum a bit weight can be\n :return: a weighted average numeric - where the wights are based on the bit weights\n \"\"\"\n\n # ************************\n # help cython type variables\n #\n buckets: dict = {}\n bucket_list: list\n bit: cython.int\n bucket: cython.int\n n: cython.int\n best_weight: float\n total_weight: float\n idx: cython.int\n\n # add default weights of 1.0 if given a set of bits\n #\n if isinstance(enc, set):\n enc = {bit: max_bit_weight for bit in enc}\n\n # sum the weights for the buckets associated with the bits in the encoding\n #\n for bit in enc:\n # only process bits for this encoder\n #\n if bit in self.bits:\n for bucket in self.bits[bit]:\n if bucket not in buckets:\n buckets[bucket] = enc[bit]\n else:\n buckets[bucket] += enc[bit]\n\n if len(buckets) > 0:\n\n # create a list of buckets so we can sort in descending order of bit weight\n #\n bucket_list = [(n, buckets[n] / max_bit_weight) for n in buckets]\n bucket_list.sort(key=lambda x: x[1], reverse=True)\n\n # get weighted average of bucket values if the best weight is less than n_bits\n #\n best_weight = bucket_list[0][1]\n if best_weight < self.n_bits:\n\n value = None\n total_weight = 0.0\n\n # look only at the top 3 buckets which should contain the most relevant values\n # if any of these are None then return None\n #\n for idx in range(min(3, len(bucket_list))):\n if bucket_list[idx][0] is not None:\n if value is None:\n value = 0\n value += (bucket_list[idx][0] + self.zero_bucket) * self.min_step * bucket_list[idx][1]\n total_weight += bucket_list[idx][1]\n\n if value is not None:\n value = value / total_weight\n\n # return to linear scale if log previously applied\n #\n if self.log:\n value = pow(10, value)\n # weight of best bucket is a maximum of n_bits so don't need to calculate a weighted average\n #\n else:\n\n # it's possible the bucket value is actually None\n #\n if bucket_list[0][0] is not None:\n\n # calc value of best bucket using min_step and offset from zero bucket value\n #\n value = (bucket_list[0][0] * self.min_step) + (self.zero_bucket * self.min_step)\n\n # return to linear scale if log previously applied\n #\n if self.log:\n value = pow(10, value)\n else:\n value = None\n else:\n value = None\n return value\n\n\nif __name__ == '__main__':\n\n encoder = NumericEncoder(min_step=0.5, n_bits=40, enc_size=2048, bit_offset=1)\n\n enc_n = encoder.encode(None)\n val_n = encoder.decode(enc_n)\n\n enc_n = encoder.encode(None)\n\n enc_1 = encoder.encode(100)\n enc_4 = encoder.encode(120.0)\n enc_5 = encoder.encode(99)\n\n val_1 = encoder.decode(enc_1)\n\n enc_2 = encoder.encode(102)\n\n val_2 = encoder.decode(enc_2)\n\n enc_3 = encoder.encode(100.5)\n val_3 = encoder.decode(enc_3)\n\n val_4 = encoder.decode(enc_4)\n\n print('finished')\n","sub_path":"src/numeric_encoder.py","file_name":"numeric_encoder.py","file_ext":"py","file_size_in_byte":17702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"413148731","text":"import os\nimport time\nfrom datetime import timezone as tz\n\nfrom astropy import units as u\nfrom astropy.time import Time\n\nfrom .logger import logger\n\n\ndef current_time(flatten=False, datetime=False, pretty=False):\n \"\"\" Convenience method to return the \"current\" time according to the system.\n\n Note:\n If the ``$POCSTIME`` environment variable is set then this will return\n the time given in the variable. This is used for setting specific times\n during testing. After checking the value of POCSTIME the environment\n variable will also be incremented by one second so that subsequent\n calls to this function will generate monotonically increasing times.\n\n **Operation of POCS from $POCS/bin/pocs_shell will clear the POCSTIME variable.**\n\n .. doctest::\n\n >>> os.environ['POCSTIME'] = '1999-12-31 23:59:59'\n >>> party_time = current_time(pretty=True)\n >>> party_time\n '1999-12-31 23:59:59'\n\n # Next call is one second later when using $POCSTIME.\n >>> y2k = current_time(pretty=True)\n >>> y2k\n '2000-01-01 00:00:00'\n\n\n Note:\n The time returned from this function is **not** timezone aware. All times\n are UTC.\n\n .. doctest::\n\n >>> from panoptes.utils import current_time\n >>> current_time() # doctest: +SKIP\n